commit
stringlengths 40
40
| subject
stringlengths 1
1.49k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| new_contents
stringlengths 1
29.8k
| old_contents
stringlengths 0
9.9k
| lang
stringclasses 3
values | proba
float64 0
1
|
|---|---|---|---|---|---|---|---|
68206c67739abf4f9f4d1ab8aa647a28649b5f5f
|
add figure one comparison between IME and random
|
analysis/figure_1_ime_vs_random.py
|
analysis/figure_1_ime_vs_random.py
|
#!/usr/bin/env python3
import os
import click
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
# from matplotlib import cm
import pandas as pd
font = {'family':'sans-serif',
'sans-serif':['Helvetica'],
'weight' : 'normal',
'size' : 8}
rc('font', **font)
@click.command()
def figure1_ime_vs_random():
print("loading data...")
def setup_plot(csv_paths):
df = pd.DataFrame(data=dict(num=range(1, 500001)))
ax.set_xlabel("Materials")
ax.set_yticks([0, 400, 800, 1200, 1600])
ax.set_xlim(0, 150000)
ax.set_ylim(0, 1600)
# ax.axes.yaxis.set_visible(False)
ax.set_xticks([0, 25000, 50000, 100000, 150000])
ax.axes.xaxis.set_ticklabels(["0", "25K", "50K", "450K", "500K"])
ax.grid(linestyle='-', color='0.8', zorder=0, axis="x")
# ax.axhline(1600, linestyle="--", lw=2, color="black", label=0)
for path in csv_paths:
df[path] = pd.read_csv(path, usecols=["unique_bins"])
return df
fig = plt.figure(figsize=(3.346, 3.346), tight_layout=True)
ax = fig.add_subplot(1, 1, 1)
ax.set_ylabel("Bins Explored")
legend_labels = ["IME", "Random"]
g1 = setup_plot(["reference.csv", "random16_500K.csv"])
ax.plot(g1.num, g1["reference.csv"], lw=1.5, color="black", zorder=10)
ax.plot(g1.num[0:50000], g1["random16_500K.csv"][0:50000], lw=1.5, color="orange", zorder=10)
ax.plot(g1.num[100000:150000], g1["random16_500K.csv"][450000:500000], lw=1.5, color="orange", zorder=10)
ax.plot([50000, 100000],[541, 730], lw=1.5, color="orange", linestyle="--", zorder=10)
# ax.legend(legend_labels, bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0., facecolor='white', framealpha=1)
ax.legend(legend_labels, loc='upper right', facecolor='white', framealpha=1)
ax.axhline(468, lw=1.0, linestyle="--", color="grey", zorder=1)
ax.axhline(732, lw=1.0, linestyle="--", color="grey", zorder=1)
ax.axhline(1062, lw=1.0, linestyle="--", color="grey", zorder=1)
arrow_args = dict(arrowstyle="->")
ax.annotate("732 bins @ 494956", xy=(494956 - 350000, 732), xycoords="data",
textcoords="offset points", xytext=(0, 10), horizontalalignment='right', verticalalignment='bottom',
arrowprops=arrow_args)
ax.annotate("732 bins @ 4283", xy=(4283, 732), xycoords="data",
textcoords="offset points", xytext=(13, 10), horizontalalignment='left', verticalalignment='bottom',
arrowprops=arrow_args)
ax.annotate("468 bins @ 25000", xy=(25000, 468), xycoords="data",
textcoords="offset points", xytext=(0, -10), horizontalalignment='left', verticalalignment='top',
arrowprops=arrow_args)
ax.annotate("468 bins @ 1786", xy=(1786, 468), xycoords="data",
textcoords="offset points", xytext=(10, 15), horizontalalignment='left', verticalalignment='bottom',
arrowprops=arrow_args)
ax.annotate("1062 bins @ 25000", xy=(25000, 1062), xycoords="data",
textcoords="offset points", xytext=(0, 10), horizontalalignment='left', verticalalignment='bottom',
arrowprops=arrow_args)
fig.savefig("figure1_ime_vs_random.png", dpi=1200)
plt.close(fig)
if __name__ == '__main__':
figure1_ime_vs_random()
|
Python
| 0
|
|
e3aa781fe60e3ce293e34767c78e947ffc169cbc
|
Allow module contributions to return dict
|
src/ggrc/extensions.py
|
src/ggrc/extensions.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
import sys
from ggrc import settings
import ggrc
def get_extension_name(extension_setting, default):
extension_name = getattr(settings, extension_setting, default)
if extension_name is not None:
extension_name = extension_name if not callable(extension_name) else \
extension_name()
else:
extension_name = default
return extension_name
def get_extension_modules(modules=[]):
if len(modules) == 0:
extension_names = getattr(settings, 'EXTENSIONS')
if extension_names is None:
modules.append(None)
else:
for m in settings.EXTENSIONS:
modules.append(get_extension_module(m))
if len(modules) == 0 or modules[0] is None:
return []
else:
return modules
def get_extension_module(module_name):
__import__(module_name)
return sys.modules[module_name]
def get_extension_module_for(extension_setting, default, extension_modules={}):
if extension_setting not in extension_modules:
extension_name = get_extension_name(extension_setting, default)
if not extension_name:
extension_modules[extension_setting] = extension_name
else:
__import__(extension_name)
extension_modules[extension_setting] = sys.modules[extension_name]
return extension_modules[extension_setting]
def get_extension_instance(extension_setting, default, extensions={}):
if extension_setting not in extensions:
extension_name = get_extension_name(extension_setting, default)
idx = extension_name.rfind('.')
module_name = extension_name[0:idx]
class_name = extension_name[idx + 1:]
__import__(module_name)
module = sys.modules[module_name]
extensions[extension_setting] = getattr(module, class_name)(settings)
return extensions[extension_setting]
def _get_contribution(module, name):
"""Fetch contributions from a single module.
Args:
module: Python module that will be checked for a given attribute.
name: Name of the attribute that we want to collect from a module. The
attribute must be a list or a callable that returns a list.
Returns:
List of contributions found
Raises:
TypeError: If the attribute is not a list or a callable that returns a
list.
"""
contributions = getattr(module, name, [])
if callable(contributions):
contributions = contributions()
if isinstance(contributions, dict):
contributions = contributions.items()
if not isinstance(contributions, list):
raise TypeError("Contributed item must be a list or a callable that "
"returns a list")
return contributions
def get_module_contributions(name):
"""Fetch contributions from all modules if they exist.
This function loops through all modules and checks if the main module package
contains attribute with a given name or if it cotnains contributions which
have an attribute with the said name. It gathers all such attributes in a
list and returns it.
Args:
name (string): name of the contributed attribute that will be collected.
Returns:
A list of all collected atributes.
"""
all_contributions = []
all_modules = [ggrc] + get_extension_modules()
for module in all_modules:
all_contributions.extend(_get_contribution(module, name))
contributions_module = getattr(module, "contributions", None)
if contributions_module:
all_contributions.extend(_get_contribution(contributions_module, name))
if all(isinstance(val, tuple) for val in all_contributions):
all_contributions = dict(all_contributions)
return all_contributions
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
import sys
from ggrc import settings
import ggrc
def get_extension_name(extension_setting, default):
extension_name = getattr(settings, extension_setting, default)
if extension_name is not None:
extension_name = extension_name if not callable(extension_name) else \
extension_name()
else:
extension_name = default
return extension_name
def get_extension_modules(modules=[]):
if len(modules) == 0:
extension_names = getattr(settings, 'EXTENSIONS')
if extension_names is None:
modules.append(None)
else:
for m in settings.EXTENSIONS:
modules.append(get_extension_module(m))
if len(modules) == 0 or modules[0] is None:
return []
else:
return modules
def get_extension_module(module_name):
__import__(module_name)
return sys.modules[module_name]
def get_extension_module_for(extension_setting, default, extension_modules={}):
if extension_setting not in extension_modules:
extension_name = get_extension_name(extension_setting, default)
if not extension_name:
extension_modules[extension_setting] = extension_name
else:
__import__(extension_name)
extension_modules[extension_setting] = sys.modules[extension_name]
return extension_modules[extension_setting]
def get_extension_instance(extension_setting, default, extensions={}):
if extension_setting not in extensions:
extension_name = get_extension_name(extension_setting, default)
idx = extension_name.rfind('.')
module_name = extension_name[0:idx]
class_name = extension_name[idx + 1:]
__import__(module_name)
module = sys.modules[module_name]
extensions[extension_setting] = getattr(module, class_name)(settings)
return extensions[extension_setting]
def _get_contribution(module, name):
"""Fetch contributions from a single module.
Args:
module: Python module that will be checked for a given attribute.
name: Name of the attribute that we want to collect from a module. The
attribute must be a list or a callable that returns a list.
Returns:
List of contributions found
Raises:
TypeError: If the attribute is not a list or a callable that returns a
list.
"""
contributions = getattr(module, name, [])
if callable(contributions):
contributions = contributions()
if not isinstance(contributions, list):
raise TypeError("Contributed item must be a list or a callable that "
"returns a list")
return contributions
def get_module_contributions(name):
"""Fetch contributions from all modules if they exist.
This function loops through all modules and checks if the main module package
contains attribute with a given name or if it cotnains contributions which
have an attribute with the said name. It gathers all such attributes in a
list and returns it.
Args:
name (string): name of the contributed attribute that will be collected.
Returns:
A list of all collected atributes.
"""
all_contributions = []
all_modules = [ggrc] + get_extension_modules()
for module in all_modules:
all_contributions.extend(_get_contribution(module, name))
contributions_module = getattr(module, "contributions", None)
if contributions_module:
all_contributions.extend(_get_contribution(contributions_module, name))
return all_contributions
|
Python
| 0.000001
|
13f495ddabd1997b7dfdc9e2933b82fd25ecd664
|
Create LevelOrderTraversal.py from LeetCode
|
LevelOrderTraversal.py
|
LevelOrderTraversal.py
|
#https://leetcode.com/problems/binary-tree-level-order-traversal/#/description
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Node(object):
def __init__(self,node,level):
self.node = node
self.level = level
class Solution(object):
def __init__(self):
self.array = []
self.level_counter = 0
self.result = []
self.levelq = []
def queue(self,node):
self.array.append(node)
def isNotEmpty(self):
return self.array
def popValue(self):
value = self.array[0]
del self.array[0]
return value
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return self.result
self.queue(Node(root,0))
while self.isNotEmpty():
bigNode = self.popValue()
if bigNode.level > self.level_counter:
self.level_counter = bigNode.level
self.result.append(self.levelq[:])
self.levelq[:] = []
self.levelq.append(bigNode.node.val)
if bigNode.node.left :
self.queue(Node(bigNode.node.left, bigNode.level + 1))
if bigNode.node.right :
self.queue(Node(bigNode.node.right, bigNode.level + 1))
if self.levelq:
self.result.append(self.levelq[:])
return self.result
|
Python
| 0
|
|
66c00d10ddc1f137deaf9208572a287bbad33de7
|
Add migration script
|
migrations/versions/d756b34061ff_.py
|
migrations/versions/d756b34061ff_.py
|
"""Store privacyIDEA node in eventcounter table
Revision ID: d756b34061ff
Revises: 3d7f8b29cbb1
Create Date: 2019-09-02 13:59:24.244529
"""
# revision identifiers, used by Alembic.
from sqlalchemy import orm
from sqlalchemy.sql.ddl import CreateSequence
from privacyidea.lib.config import get_privacyidea_node
revision = 'd756b34061ff'
down_revision = '3d7f8b29cbb1'
from alembic import op, context
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class OldEventCounter(Base):
__tablename__ = 'eventcounter'
counter_name = sa.Column(sa.Unicode(80), nullable=False, primary_key=True)
counter_value = sa.Column(sa.Integer, default=0)
__table_args__ = {'mysql_row_format': 'DYNAMIC'}
class NewEventCounter(Base):
__tablename__ = 'eventcounter_new'
id = sa.Column(sa.Integer, sa.Sequence("eventcounter_seq"), primary_key=True)
counter_name = sa.Column(sa.Unicode(80), nullable=False)
counter_value = sa.Column(sa.Integer, default=0)
node = sa.Column(sa.Unicode(255), nullable=False)
__table_args__ = (sa.UniqueConstraint('counter_name',
'node',
name='evctr_1'),
{'mysql_row_format': 'DYNAMIC'})
def dialect_supports_sequences():
migration_context = context.get_context()
return migration_context.dialect.supports_sequences
def create_seq(seq):
if dialect_supports_sequences():
op.execute(CreateSequence(seq))
def upgrade():
try:
# Step 1: Create sequence on Postgres
seq = sa.Sequence('tokenowner_seq')
try:
create_seq(seq)
except Exception as _e:
pass
# Step 2: Create new eventcounter_new table
op.create_table('eventcounter_new',
sa.Column("id", sa.Integer, sa.Sequence("eventcounter_seq"), primary_key=True),
sa.Column("counter_name", sa.Unicode(80), nullable=False),
sa.Column("counter_value", sa.Integer, default=0),
sa.Column("node", sa.Unicode(255), nullable=False),
sa.UniqueConstraint('counter_name', 'node', name='evctr_1'),
mysql_row_format='DYNAMIC'
)
# Step 3: Migrate data from eventcounter to eventcounter_new
node = get_privacyidea_node()
bind = op.get_bind()
session = orm.Session(bind=bind)
for old_ctr in session.query(OldEventCounter).all():
new_ctr = NewEventCounter(counter_name=old_ctr.counter_name,
counter_value=old_ctr.counter_value,
node=node)
session.add(new_ctr)
print("Migrating counter {}={} on node={} ...".format(new_ctr.counter_name, new_ctr.counter_value, node))
session.commit()
# Step 4: Remove eventcounter
op.drop_table("eventcounter")
op.rename_table("eventcounter_new", "eventcounter")
except Exception as exx:
print("Could not migrate table 'eventcounter'")
print (exx)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('evctr_1', 'eventcounter', type_='unique')
op.drop_column('eventcounter', 'node')
op.drop_column('eventcounter', 'id')
# ### end Alembic commands ###
|
Python
| 0.000001
|
|
891dc05f36ae9084d8511bf3e26e0631eadecef7
|
add medications urls
|
medications/urls.py
|
medications/urls.py
|
from django.conf.urls import url
from medications.views import MedicationsView
urlpatterns = [
url(r'^$', MedicationsView.as_view()),
url(r'^([0-9]+)/$', MedicationsView.as_view()),
]
|
Python
| 0.000009
|
|
aa4f1df448c6d01875ed667e37afe68c114892ed
|
Add initial verification endpoint. Add all balance endpoint
|
api/mastercoin_verify.py
|
api/mastercoin_verify.py
|
import os
import glob
from flask import Flask, request, jsonify, abort, json
data_dir_root = os.environ.get('DATADIR')
app = Flask(__name__)
app.debug = True
@app.route('/addresses')
def addresses():
currency_id = request.args.get('currency_id')
print currency_id
response = []
addr_glob = glob.glob(data_dir_root + '/addr/*.json')
for address_file in addr_glob:
with open(address_file, 'r') as f:
addr = json.load(f)
res = {
'address': addr['address']
}
if currency_id == '0':
btc_balance = [x['value'] for x in addr['balance'] if x['symbol'] == 'BTC'][0]
res['balance'] = float(btc_balance)
response.append(res)
else:
if currency_id == '1' or currency_id == '2':
msc_currency_id = str(int(currency_id) - 1) # Mastercoin-tools is off by one on currency id from the spec
if msc_currency_id in addr:
print addr[currency_id]['balance']
res['balance'] = float(addr[msc_currency_id]['balance'])
response.append(res)
json_response = json.dumps(response)
return json_response
@app.route('/transactions/<address>')
def transactions(address=None):
return ""
|
Python
| 0.000001
|
|
23799c4a33b9d2da82ec0770f15e840459a940c6
|
Add api comtrade
|
app/apis/comtrade_api.py
|
app/apis/comtrade_api.py
|
from flask import Blueprint, jsonify, request
from sqlalchemy import func, distinct
from inflection import singularize
from app.models.comtrade import Comtrade as Model
from app import cache
from app.helpers.cache_helper import api_cache_key
blueprint = Blueprint('comtrade_api', __name__, url_prefix='/comtrade')
@blueprint.route('/<path:path>/')
@cache.cached(key_prefix=api_cache_key("comtrade"))
def api(path):
dimensions = map(singularize, path.split('/'))
if invalid_dimension(dimensions):
return 'Error', 403
filters = {k: v for k, v in request.args.to_dict().iteritems() if k in Model.dimensions()}
counts = [c for c in map(singularize, request.args.getlist('count')) if c in Model.dimensions()]
values = get_values(request)
group_columns = get_columns(dimensions)
count_columns = get_columns(counts)
aggregated_values = [Model.aggregate(v) for v in values]
headers = get_headers(group_columns) + get_headers(count_columns, '_count') + values
entities = group_columns + map(lambda x: func.count(distinct(x)), count_columns) + aggregated_values
query = Model.query.with_entities(*entities).filter_by(**filters).group_by(*group_columns)
return jsonify(data=query.all(), headers=headers)
def get_values(request):
values = [v for v in request.args.getlist('value') if v in Model.values()]
return values if len(values) else Model.values()
def get_headers(columns, suffix=''):
return map(lambda x: x.key + suffix, columns)
def get_columns(dimensions):
return [getattr(Model, dimension) for dimension in dimensions]
def invalid_dimension(dimensions):
return not set(dimensions).issubset(set(Model.dimensions()))
|
Python
| 0
|
|
670e5d017adb24c5adffb38fa59059fec5175c3c
|
Create hello.py
|
hello.py
|
hello.py
|
print('hello, world!')
|
Python
| 0.999503
|
|
1692161ad43fdc6a0e2ce9eba0bacefc04c46b5c
|
Add form generator module.
|
src/epiweb/apps/survey/utils.py
|
src/epiweb/apps/survey/utils.py
|
from django import forms
from epiweb.apps.survey.data import Survey, Section, Question
_ = lambda x: x
def create_field(question):
if question.type == 'yes-no':
field = forms.ChoiceField(widget=forms.RadioSelect,
choices=[('yes', _('Yes')), ('no', _('No'))])
elif question.type == 'option-multiple':
field = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
choices=zip(range(0, len(question.options)), question.options))
elif question.type == 'option-single':
field = forms.ChoiceField(widget=forms.RadioSelect,
choices=zip(range(0, len(question.options)), question.options))
elif question.type == 'date':
field = forms.DateField(input_formats='%m/%d/%y')
else:
field = forms.CharField()
field.label = question.label
field.required = False
return field
def generate_form(section, values=None):
if values:
form = forms.Form(values)
else:
form = forms.Form()
for question in section.questions:
form.fields[question.id] = create_field(question)
return form
|
Python
| 0
|
|
72a5f0d301b2169367c8bcbc42bb53b71c1d635c
|
Create utils.py
|
utils.py
|
utils.py
|
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import memcache
import jinja2
import logging
import json
import os
class BaseHandler(webapp.RequestHandler):
context = {}
def initialize(self, request, response):
"""docstring for __init__"""
self.populateContext()
super(BaseHandler, self).initialize(request, response)
def populateContext(self):
"""Load up the stuff that every web handler will need"""
user = users.get_current_user()
if user:
self.context['logged_in'] = True
self.context['is_admin'] = users.is_current_user_admin()
def render(self, template_name):
"""Rending a template in a base directory by passing the name of the template"""
env = jinja2.Environment(loader=jinja2.FileSystemLoader('views'))
template = env.get_template(template_name)
self.response.out.write(template.render(self.context))
|
Python
| 0.000001
|
|
26bc11340590b0b863527fa12da03cea528feb46
|
Add initial stub of GerritClient class
|
pygerrit/client.py
|
pygerrit/client.py
|
""" Gerrit client interface. """
from Queue import Queue, Empty, Full
from pygerrit.error import GerritError
from pygerrit.events import GerritEventFactory
class GerritClient(object):
""" Gerrit client interface. """
def __init__(self, host):
self._factory = GerritEventFactory()
self._host = host
self._events = Queue()
def get_event(self, block=True, timeout=None):
""" Get the next event from the queue.
Return a `GerritEvent` instance, or None if:
- `block` was False and there is no event available in the queue, or
- `block` was True and no event was available within the time
specified by `timeout`.
"""
try:
return self._events.get(block, timeout)
except Empty:
return None
def put_event(self, json_data):
""" Create event from `json_data` and add it to the queue.
Raise GerritError if the queue is full, or the factory could not
create the event.
"""
try:
event = self._factory.create(json_data)
self._events.put(event)
except Full:
raise GerritError("Unable to add event: queue is full")
|
Python
| 0.000006
|
|
10f99acc11051b37595751b9b9b84e11dd133a64
|
Add functions for getting available checksums for a channel from remote and disk.
|
kolibri/core/content/utils/file_availability.py
|
kolibri/core/content/utils/file_availability.py
|
import json
import os
import re
import requests
from django.core.cache import cache
from kolibri.core.content.models import LocalFile
from kolibri.core.content.utils.paths import get_content_storage_dir_path
from kolibri.core.content.utils.paths import get_file_checksums_url
checksum_regex = re.compile("^([a-f0-9]{32})$")
def get_available_checksums_from_remote(channel_id, baseurl):
CACHE_KEY = "PEER_AVAILABLE_CHECKSUMS_{baseurl}_{channel_id}".format(
baseurl=baseurl, channel_id=channel_id
)
if CACHE_KEY not in cache:
response = requests.get(get_file_checksums_url(channel_id, baseurl))
checksums = None
# Do something if we got a successful return
if response.status_code == 200:
try:
checksums = json.loads(response.content)
# Filter to avoid passing in bad checksums
checksums = [
checksum for checksum in checksums if checksum_regex.match(checksum)
]
cache.set(CACHE_KEY, checksums, 3600)
except (ValueError, TypeError):
# Bad JSON parsing will throw ValueError
# If the result of the json.loads is not iterable, a TypeError will be thrown
# If we end up here, just set checksums to None to allow us to cleanly continue
pass
return cache.get(CACHE_KEY)
def get_available_checksums_from_disk(channel_id, basepath):
PER_DISK_CACHE_KEY = "DISK_AVAILABLE_CHECKSUMS_{basepath}".format(basepath=basepath)
PER_DISK_PER_CHANNEL_CACHE_KEY = "DISK_AVAILABLE_CHECKSUMS_{basepath}_{channel_id}".format(
basepath=basepath, channel_id=channel_id
)
if PER_DISK_PER_CHANNEL_CACHE_KEY not in cache:
if PER_DISK_CACHE_KEY not in cache:
content_dir = get_content_storage_dir_path(datafolder=basepath)
disk_checksums = []
for _, _, files in os.walk(content_dir):
for name in files:
checksum = os.path.splitext(name)[0]
# Only add valid checksums formatted according to our standard filename
if checksum_regex.match(checksum):
disk_checksums.append(checksum)
# Cache is per device, so a relatively long lived one should
# be fine.
cache.set(PER_DISK_CACHE_KEY, disk_checksums, 3600)
disk_checksums = set(cache.get(PER_DISK_CACHE_KEY))
channel_checksums = set(
LocalFile.objects.filter(
files__contentnode__channel_id=channel_id
).values_list("id", flat=True)
)
cache.set(
PER_DISK_PER_CHANNEL_CACHE_KEY,
channel_checksums.intersection(disk_checksums),
3600,
)
return cache.get(PER_DISK_PER_CHANNEL_CACHE_KEY)
|
Python
| 0
|
|
61ec190ca29187cbf9ad7b721fbf1936d665e4f6
|
Revert "rm client.py"
|
orchestration/containerAPI/client.py
|
orchestration/containerAPI/client.py
|
from docker import Client as docker_client
class Client(object):
'''
Docker engine client
'''
def __init__(self, hostURL, version):
self.client = docker_client(base_url=hostURL, version=version)
self.url = hostURL
self.version = version
def get_url():
return self.url
def get_version():
return self.version
|
Python
| 0
|
|
dcc08986d4e2f0e7940f485d0ece465b1325a711
|
Add barebones FileBlob class
|
python/fileblob.py
|
python/fileblob.py
|
#!/usr/bin/env python
import os
MEGABYTE = 1024 * 1024
class FileBlob:
def __init__(self, path):
self.path = path
def data(self):
return open(self.path).read()
def size(self):
try:
return os.path.getsize(self.path)
except os.error:
return 0
def extname(self):
_, ext = os.path.splitext(self.path)
return ext
def _mime_type(self):
pass
def mime_type(self):
pass
def content_type(self):
pass
def encoding(self):
pass
def is_binary(self):
pass
def is_text(self):
pass
def is_image(self):
self.extname() in ['.png', '.jpg', '.jpeg', '.gif', '.tif', 'tiff']
def is_large(self):
self.size() > MEGABYTE
def is_safe_to_tokenize(self):
return not self.is_large() and self.is_text() and not self.high_ratio_of_long_lines()
def high_ratio_of_long_lines(self):
if self.loc() == 0:
return false
return self.size() / > 5000
def loc(self):
return len(self.lines())
def lines(self):
pass
def is_viewable(self):
pass
def line_split_character(self):
pass
|
Python
| 0.000009
|
|
5c1e1744fa19bf900981d6a40c69195419861357
|
Add snactor sanity-check command (#564)
|
leapp/snactor/commands/workflow/sanity_check.py
|
leapp/snactor/commands/workflow/sanity_check.py
|
from __future__ import print_function
import sys
from leapp.exceptions import LeappError, CommandError
from leapp.logger import configure_logger
from leapp.repository.scan import find_and_scan_repositories
from leapp.snactor.commands.workflow import workflow
from leapp.utils.clicmd import command_arg
from leapp.utils.repository import requires_repository, find_repository_basedir
_DESCRIPTION = 'The following messages are attempted to be consumed before they are produced: {}'
_LONG_DESCRIPTION = '''
Perform workflow sanity checks
- check whether there is a message in the given workflow which is attempted to be consumed before it was produced
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@workflow.command('sanity-check', help='Perform workflow sanity checks', description=_LONG_DESCRIPTION)
@command_arg('name')
@requires_repository
def cli(params):
configure_logger()
repository = find_and_scan_repositories(find_repository_basedir('.'), include_locals=True)
try:
repository.load()
except LeappError as exc:
sys.stderr.write(exc.message)
sys.stderr.write('\n')
sys.exit(1)
wf = repository.lookup_workflow(params.name)
if not wf:
raise CommandError('Could not find any workflow named "{}"'.format(params.name))
instance = wf()
produced_late = set(instance.initial).intersection(set(instance.produces))
if produced_late:
print(_DESCRIPTION.format(' '.join([m.__name__ for m in produced_late])), file=sys.stderr, end='\n')
sys.exit(1)
|
Python
| 0
|
|
489004c5f81b8a5a2a639bc67f3ed5008f18960a
|
fix the naming error of the plotting script
|
doc/source/report/plots/plot_hc_dendrogram.py
|
doc/source/report/plots/plot_hc_dendrogram.py
|
from mousestyles import data
from mousestyles.classification import clustering
from mousestyles.visualization import plot_clustering
# load data
mouse_data = data.load_all_features()
# mouse inidividual
mouse_dayavgstd_rsl = clustering.prep_data(mouse_data, melted=False, std = True, rescale = True)
# optimal parameters
method, dist = clustering.get_optimal_hc_params(mouse_day=mouse_dayavgstd_rsl)
# fit hc
sils_hc, labels_hc = clustering.fit_hc(
mouse_day_X=mouse_dayavgstd_rsl[:,2:],
method=method, dist=dist, num_clusters=range(2,17))
# plot and get the distance matrxix
Z = plot_clustering.plot_dendrogram(
mouse_day=mouse_dayavgstd_rsl, method=method, dist=dist)
|
Python
| 0.000004
|
|
e47f61f22a568a69e74cbf8d0b70c4858879b9b5
|
Temporary workaround for an apprtc bug.
|
chrome/test/functional/webrtc_apprtc_call.py
|
chrome/test/functional/webrtc_apprtc_call.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import time
# Note: pyauto_functional must come before pyauto.
import pyauto_functional
import pyauto
import webrtc_test_base
class WebrtcApprtcCallTest(webrtc_test_base.WebrtcTestBase):
"""Tests calling apprtc.appspot.com and setting up a call.
Prerequisites: This test case must run on a machine with a webcam, either
fake or real, and with some kind of audio device. The machine must have access
to the public Internet.
This should be considered an integration test: test failures could mean
that the AppRTC reference is broken, that WebRTC is broken, or both.
"""
def tearDown(self):
pyauto.PyUITest.tearDown(self)
self.assertEquals('', self.CheckErrorsAndCrashes(),
'Chrome crashed or hit a critical error during test.')
def testApprtcLoopbackCall(self):
self.NavigateToURL('http://apprtc.appspot.com/?debug=loopback')
self.WaitForInfobarCount(1, tab_index=0)
self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=0)
self._WaitForCallEstablishment(tab_index=0)
def testApprtcTabToTabCall(self):
# Randomize the call session id. If we would use the same id we would risk
# getting problems with hung calls and lingering state in AppRTC.
random_call_id = 'pyauto%d' % random.randint(0, 65536)
apprtc_url = 'http://apprtc.appspot.com/?r=%s' % random_call_id
self.NavigateToURL(apprtc_url)
self.AppendTab(pyauto.GURL(apprtc_url))
self.WaitForInfobarCount(1, tab_index=0)
self.WaitForInfobarCount(1, tab_index=1)
self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=0)
# TODO(phoglund): workaround for
# https://code.google.com/p/webrtc/issues/detail?id=1742
time.sleep(1)
self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=1)
self._WaitForCallEstablishment(tab_index=0)
self._WaitForCallEstablishment(tab_index=1)
def _WaitForCallEstablishment(self, tab_index):
# AppRTC will set opacity to 1 for remote video when the call is up.
video_playing = self.WaitUntil(
function=lambda: self.GetDOMValue('remoteVideo.style.opacity',
tab_index=tab_index),
expect_retval='1')
self.assertTrue(video_playing,
msg=('Timed out while waiting for '
'remoteVideo.style.opacity to return 1.'))
if __name__ == '__main__':
pyauto_functional.Main()
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
# Note: pyauto_functional must come before pyauto.
import pyauto_functional
import pyauto
import webrtc_test_base
class WebrtcApprtcCallTest(webrtc_test_base.WebrtcTestBase):
"""Tests calling apprtc.appspot.com and setting up a call.
Prerequisites: This test case must run on a machine with a webcam, either
fake or real, and with some kind of audio device. The machine must have access
to the public Internet.
This should be considered an integration test: test failures could mean
that the AppRTC reference is broken, that WebRTC is broken, or both.
"""
def tearDown(self):
pyauto.PyUITest.tearDown(self)
self.assertEquals('', self.CheckErrorsAndCrashes(),
'Chrome crashed or hit a critical error during test.')
def testApprtcLoopbackCall(self):
self.NavigateToURL('http://apprtc.appspot.com/?debug=loopback')
self.WaitForInfobarCount(1, tab_index=0)
self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=0)
self._WaitForCallEstablishment(tab_index=0)
def testApprtcTabToTabCall(self):
# Randomize the call session id. If we would use the same id we would risk
# getting problems with hung calls and lingering state in AppRTC.
random_call_id = 'pyauto%d' % random.randint(0, 65536)
apprtc_url = 'http://apprtc.appspot.com/?r=%s' % random_call_id
self.NavigateToURL(apprtc_url)
self.AppendTab(pyauto.GURL(apprtc_url))
self.WaitForInfobarCount(1, tab_index=0)
self.WaitForInfobarCount(1, tab_index=1)
self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=0)
self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=1)
self._WaitForCallEstablishment(tab_index=0)
self._WaitForCallEstablishment(tab_index=1)
def _WaitForCallEstablishment(self, tab_index):
# AppRTC will set opacity to 1 for remote video when the call is up.
video_playing = self.WaitUntil(
function=lambda: self.GetDOMValue('remoteVideo.style.opacity',
tab_index=tab_index),
expect_retval='1')
self.assertTrue(video_playing,
msg=('Timed out while waiting for '
'remoteVideo.style.opacity to return 1.'))
if __name__ == '__main__':
pyauto_functional.Main()
|
Python
| 0.998543
|
37d851bb34552edfc3b1abd4d1034d4fdf46408f
|
Implement --remote
|
nvim-remote.py
|
nvim-remote.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2015 Marco Hinz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import subprocess
import argparse
from neovim import attach
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--remote', action='append', help='Edit <files> in a Vim server if possible')
parser.add_argument('--remote-silent', help="Same, don't complain if there is no server")
parser.add_argument('--remote-wait', help='As --remote but wait for files to have been edited')
parser.add_argument('--remote-wait-silent', help="Same, don't complain if there is no server")
parser.add_argument('--remote-tab', help='As --remote but use tab page per file')
parser.add_argument('--remote-send', help='Send <keys> to a Vim server and exit')
parser.add_argument('--remote-expr', help='Evaluate <expr> in a Vim server and print result ')
args, unused = parser.parse_known_args()
sockpath = os.environ.get('NVIM_LISTEN_ADDRESS')
if sockpath is None:
sockpath = '/tmp/nvimsocket'
try:
nvim = attach('socket', path='/tmp/nvimsocket')
except FileNotFoundError:
print("""Problem: Can't find unix socket: /tmp/nvimsocket
Solution: Start a new server: NVIM_LISTEN_ADDRESS=/tmp/nvimsocket nvim""")
sys.exit(1)
if args.remote:
for fname in args.remote:
nvim.command('edit {}'.format(fname))
if unused:
os.putenv('VIMRUNTIME', '/data/repo/neovim/runtime')
subprocess.Popen(['/data/repo/neovim/build/bin/nvim'] + unused)
if __name__ == '__main__':
main()
|
Python
| 0.000097
|
|
49424b855f043ae2bbb3562481493b1fa83f5090
|
add random selection wip code
|
af_scripts/tmp/randSelect.py
|
af_scripts/tmp/randSelect.py
|
import random as rd
list = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
randList = list
#print randList
div=3
listSize=len(list)
#print listSize
numForOnePart=listSize/div
#print numForOnePart
rd.shuffle(randList)
#print randList
print [randList[i::3] for i in range(3)]
print randList
|
Python
| 0
|
|
7d4281574a9ee2a8e7642f14402a452f82a807db
|
Create smarthome.py
|
demos/smarthome/smarthome.py
|
demos/smarthome/smarthome.py
|
import logging
from pabiana import area
from pabiana.area import autoloop, load_interfaces, pulse, register, scheduling, subscribe
from pabiana.node import create_publisher, run
NAME = 'smarthome'
publisher = None
# Triggers
@register
def increase_temp():
area.context['temperature'] += 0.25
autoloop(increase_temp)
@register
def lower_temp():
area.context['temperature'] -= 0.25
autoloop(lower_temp)
@register
def keep_temp():
pass
@register
def window(open):
area.context['window-open'] = open
# Reactions
@scheduling
def schedule():
if keep_temp in area.demand:
area.demand.pop(increase_temp, None)
area.demand.pop(lower_temp, None)
elif lower_temp in area.demand:
area.demand.pop(increase_temp, None)
@pulse
def publish():
if area.clock % 8 == 0:
publisher.send_json({
'temperature': area.context['temperature'],
'window-open': area.context['window-open']
})
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG
)
load_interfaces('interfaces.json')
subscribe([], 'pulse', '01')
publisher = create_publisher(own_name=NAME, host='0.0.0.0')
area.context['temperature'] = 18
area.context['window-open'] = False
run(own_name=NAME, host='0.0.0.0')
|
Python
| 0
|
|
a0493ff48b96056709880804f61e794621886c61
|
Add CoNLL reader tests
|
compattern/dependency/tests/test_conll.py
|
compattern/dependency/tests/test_conll.py
|
# encoding: utf8
from compattern.dependency import conll
def test_read_french():
"""Test that conll.read understands French Bonsai output"""
line = (u"6\tchauffé\tchauffer\tV\tVPP\tg=m|m=part|n=s|t=past\t"
u"1100011\t5\tdep_coord\t_\t_")
sentence = conll.read([line, '\n'])[0]
assert len(sentence) == 1
token = sentence[0]
assert token.id == 6
assert token.lemma == "chauffer"
assert token.cpos == "V"
assert token.pos == "VPP"
assert token.feat[0].startswith("g=m") # morpho features
assert token.feat[1].startswith("110") # cluster path
assert token.head == 5
assert token.deprel == "dep_coord"
# Don't really care what happens with undefined phead and pdeprel
def test_read_turboparser():
line = "11\tvaccines\tvaccine\tNNS\tNNS\t_\t10\tPMOD"
sentence = conll.read([line, '\n'])[0]
assert len(sentence) == 1
token = sentence[0]
assert token.id == 11
assert token.form == "vaccines"
assert token.lemma == "vaccine"
assert token.cpos == "NNS"
assert token.pos == "NNS"
assert token.head == 10
assert token.deprel == "PMOD"
def test_read_wacky():
line = "was\tbe\tVBD\t18\t11\tPRD"
sentence = conll.read([line, '\n'])[0]
assert len(sentence) == 1
token = sentence[0]
assert token.id == 18
assert token.form == "was"
assert token.lemma == "be"
assert token.pos == "VBD"
assert token.head == 11
assert token.deprel == "PRD"
|
Python
| 0
|
|
4b696c2a54f7afd95013763c098aec30b08409d6
|
Create bulb-switcher-ii.py
|
Python/bulb-switcher-ii.py
|
Python/bulb-switcher-ii.py
|
# Time: O(1)
# Space: O(1)
class Solution(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if m == 0: return 1
if n == 1: return 2
if m == 1 and n == 2: return 3
if m == 1 or n == 2 return 4
if m == 2: return 7
return 8
|
Python
| 0.000001
|
|
0621b935558b6805d2b45fee49bc2e959201fd7a
|
add number-of-digit-one
|
vol5/number-of-digit-one/number-of-digit-one.py
|
vol5/number-of-digit-one/number-of-digit-one.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-03 15:21:00
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-03 15:21:14
import itertools
class Solution(object):
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 0:
return 0
def digits(n):
while n:
yield n % 10
n /= 10
def pows(b):
x = 1
while True:
yield x
x *= 10
def g(d, m):
if d < 1:
return n / (m * 10) * m
elif d == 1:
return n / (m * 10) * m + n % m + 1
else:
return (n / (m * 10) + 1) * m
return sum(itertools.starmap(g, itertools.izip(digits(n), pows(10))))
|
Python
| 0.999993
|
|
27d37833663842405f159127f30c6351958fcb10
|
Add draft of example using the new @bench
|
bench_examples/bench_dec_insert.py
|
bench_examples/bench_dec_insert.py
|
from csv import DictWriter
from ktbs_bench.utils.decorators import bench
@bench
def batch_insert(graph, file):
"""Insert triples in batch."""
print(graph, file)
if __name__ == '__main__':
# Define some graph/store to use
graph_list = ['g1', 'g2']
# Define some files to get the triples from
n3file_list = ['f1', 'f2']
# Testing batch insert
res = {'func_name': 'batch_insert'}
for graph in graph_list:
for n3file in n3file_list:
time_res = batch_insert(graph, n3file)
res[time_res[0]] = time_res[1]
# Setup the result CSV
with open('/tmp/res.csv', 'wb') as outfile:
res_csv = DictWriter(outfile, fieldnames=res.keys())
res_csv.writeheader()
# Write the results
res_csv.writerow(res)
|
Python
| 0
|
|
70cf735e7bee9c320229432400c775f6da8f0a5d
|
Add astro/osm-randomize-coord.py
|
astro/oskar/osm-randomize-coord.py
|
astro/oskar/osm-randomize-coord.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <weitian@aaronly.me>
# MIT License
#
"""
Create new randomized coordinates by adding random offset to the existing
OSKAR sky model (i.e., osm), and replace original coordinates with the
specified new ones.
"""
import os
import argparse
import numpy as np
class OskarSkyModel:
"""
OSKAR sky model
"""
def __init__(self, infile):
self.filename = infile
self.header = self.get_header(infile)
self.load_data(infile)
@staticmethod
def get_header(infile):
"""
Read the OSM header lines
"""
header = []
with open(infile) as f:
for line in f.readlines():
if line[0] == "#":
header.append(line)
else:
break
print("Read OSM header:\n%s" % "".join(header))
return header
def load_data(self, infile):
try:
data = np.loadtxt(infile)
except ValueError:
data = np.loadtxt(infile, delimiter=",")
self.ra = data[:, 0] # [deg]
self.dec = data[:, 1] # [deg]
self.flux = data[:, 2] # [Jy]
self.number = len(self.flux)
print("Loaded OSM data from file: %s" % infile)
def randomize_coord(self, sigma):
"""
Randomize the coordinates by adding an offset sampling
from a Gaussian of sigma.
"""
self.offset_sigma = sigma/3600.0 # [arcsec] -> [deg]
print("Random offset: %.1f [arcsec] == %.6f [deg]" %
(sigma, self.offset_sigma))
self.ra += np.random.normal(loc=0.0, scale=self.offset_sigma,
size=self.number)
self.dec += np.random.normal(loc=0.0, scale=self.offset_sigma,
size=self.number)
print("Generated randomized coordinates")
def replace_coord(self, coordfile):
"""
Replace the coordinates with the data from the given
coordinate file.
"""
try:
coord_new = np.loadtxt(coordfile)
except ValueError:
coord_new = np.loadtxt(coordfile, delimiter=",")
ra_new = coord_new[:, 0]
dec_new = coord_new[:, 1]
if self.number != len(ra_new):
raise RuntimeError("invalid coordinate file: %s" % coordfile)
self.ra = ra_new
self.dec = dec_new
print("Replaced coordinates")
def save_data(self, data, outfile, clobber=False):
if os.path.exists(outfile) and not clobber:
raise OSError("file already exists: %s" % outfile)
with open(outfile, "wb") as fb:
for line in self.header:
fb.write(line.encode("utf-8"))
np.savetxt(fb, data)
def save_coord(self, outfile, clobber=False):
data = np.column_stack([self.ra, self.dec])
self.save_data(data=data, outfile=outfile, clobber=clobber)
print("Wrote coordinates to file: %s" % outfile)
def save_osm(self, outfile, clobber=False):
data = np.column_stack([self.ra, self.dec, self.flux])
self.save_data(data=data, outfile=outfile, clobber=clobber)
print("Wrote OSM to file: %s" % outfile)
def cmd_create(args):
"""
sub-command: create - create randomized coordinates
"""
osm = OskarSkyModel(args.infile)
osm.randomize_coord(sigma=args.sigma)
osm.save_coord(outfile=args.coordfile, clobber=args.clobber)
def cmd_replace(args):
"""
sub-command: create - create randomized coordinates
"""
osm = OskarSkyModel(args.infile)
osm.replace_coord(coordfile=args.coordfile)
if not args.nobackup:
backfile = args.infile + ".bak"
os.rename(args.infile, backfile)
print("Backed up OSM as: %s" % backfile)
osm.save_osm(outfile=args.infile, clobber=True)
def main():
parser = argparse.ArgumentParser(
description="Randomize OSKAR sky model source coordinates")
subparsers = parser.add_subparsers(dest="subparser_name",
title="sub-commands",
help="additional help")
# sub-command: "create"
parser_create = subparsers.add_parser(
"create", help="create randomized coordinates")
parser_create.add_argument("-C", "--clobber", dest="clobber",
action="store_true",
help="overwrite existing output file")
parser_create.add_argument("-s", "--sigma", dest="sigma",
required=True, type=float,
help="random offset sigma [arcsec]")
parser_create.add_argument("-c", "--coord-file", dest="coordfile",
required=True,
help="output coordinate file")
parser_create.add_argument("infile", help="input OSKAR sky model")
parser_create.set_defaults(func=cmd_create)
# sub-command: "replace"
parser_replace = subparsers.add_parser(
"replace", help="replace coordinates of OSM")
parser_replace.add_argument("-B", "--no-backup", dest="nobackup",
action="store_true",
help="do NOT backup original OSM")
parser_replace.add_argument("-c", "--coord-file", dest="coordfile",
required=True,
help="file of new coordinates")
parser_replace.add_argument("infile", help="input OSKAR sky model")
parser_replace.set_defaults(func=cmd_replace)
#
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
Python
| 0
|
|
7c0a37e2ad123dfeb409c682a1cab37630678642
|
Improve preprocessing text docs
|
tensorflow/python/keras/preprocessing/text.py
|
tensorflow/python/keras/preprocessing/text.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for text input preprocessing.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_preprocessing import text
from tensorflow.python.util.tf_export import keras_export
hashing_trick = text.hashing_trick
Tokenizer = text.Tokenizer
@keras_export('keras.preprocessing.text.text_to_word_sequence')
def text_to_word_sequence(text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True, split=" "):
"""Converts a text to a sequence of words (or tokens).
This function transforms a string of text into a list of words
while ignoring `filters` which include punctuations by default.
>>> text = 'This is a sample sentence.'
>>> tf.keras.preprocessing.text.text_to_word_sequence(text)
['this', 'is', 'a', 'sample', 'sentence']
Arguments:
text: Input text (string).
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: `'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n'`,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to convert the input to lowercase.
split: str. Separator for word splitting.
Returns:
A list of words (or tokens).
"""
return text.text_to_word_sequence(
text, filters=filters, lower=lower, split=split)
@keras_export('tf.keras.preprocessing.text.one_hot')
def one_hot(text, n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' '):
"""One-hot encodes a text into a list of word indexes of size `n`.
This function receives as input a string of text and returns a
list of encoded integers each corresponding to a word (or token)
in the given input string.
>>> text = 'This is a sample sentence.'
>>> tf.keras.preprocessing.text.one_hot(text, 20)
[4, 18, 1, 15, 17]
Arguments:
text: Input text (string).
n: int. Size of vocabulary.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: ``!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n``,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
Returns:
List of integers in `[1, n]`. Each integer encodes a word
(unicity non-guaranteed).
"""
return text.one_hot(
text, n, filters=filters, lower=lower, split=split)
# text.tokenizer_from_json is only available if keras_preprocessing >= 1.1.0
try:
tokenizer_from_json = text.tokenizer_from_json
keras_export('keras.preprocessing.text.tokenizer_from_json')(
tokenizer_from_json)
except AttributeError:
pass
keras_export('keras.preprocessing.text.hashing_trick')(hashing_trick)
keras_export('keras.preprocessing.text.Tokenizer')(Tokenizer)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for text input preprocessing.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_preprocessing import text
from tensorflow.python.util.tf_export import keras_export
text_to_word_sequence = text.text_to_word_sequence
one_hot = text.one_hot
hashing_trick = text.hashing_trick
Tokenizer = text.Tokenizer
keras_export(
'keras.preprocessing.text.text_to_word_sequence')(text_to_word_sequence)
keras_export('keras.preprocessing.text.one_hot')(one_hot)
keras_export('keras.preprocessing.text.hashing_trick')(hashing_trick)
keras_export('keras.preprocessing.text.Tokenizer')(Tokenizer)
# text.tokenizer_from_json is only available if keras_preprocessing >= 1.1.0
try:
tokenizer_from_json = text.tokenizer_from_json
keras_export('keras.preprocessing.text.tokenizer_from_json')(
tokenizer_from_json)
except AttributeError:
pass
|
Python
| 0.000116
|
8b9c06d4cf29c546c97957a1cb381196e87eb25f
|
Update j2ee roles for 5.5
|
ibmcnx/config/j2ee/RoleAllRestricted55.py
|
ibmcnx/config/j2ee/RoleAllRestricted55.py
|
'''
Set all Roles to Restricted - no anonymous access possible
Description:
Script is tested with IBM Connections 5.0
You have to edit the variables and set them to your administrative Accounts
Author: Klaus Bild
Blog: http://www.kbild.ch
E-Mail:
Documentation: http://scripting101.stoeps.de
Version: 5.0.1
Date: 09/19/2015
License: Apache 2.0
History:
2014-09-10 Christoph Stoettner change the way for changing roles, 5.0 compatible
'''
import ibmcnx.functions
import ConfigParser
configParser = ConfigParser.ConfigParser()
configFilePath = r'ibmcnx/ibmcnx.properties'
configParser.read(configFilePath)
answer = raw_input(
'Do you want to set the j2ee roles with Users and Groups from ibmcnx.properties? (Yes|No) ')
allowed_answer = ['yes', 'y', 'ja', 'j']
if answer.lower() in allowed_answer:
# Get Admin Accounts and Groups for J2EE Roles
connwasadmin = configParser.get('Generic', 'j2ee.cnxwasadmin')
connadmin = configParser.get('Generic', 'j2ee.cnxadmin')
connmoderators = configParser.get('Generic', 'j2ee.cnxmoderators')
connmetrics = configParser.get('Generic', 'j2ee.cnxmetrics')
connmobile = configParser.get('Generic', 'j2ee.connmobile')
cnxmail = configParser.get('Generic', 'j2ee.cnxmail')
cnxreader = configParser.get('Generic', 'j2ee.cnxreader')
cnxcommunitycreator = configParser.get('Generic', 'j2ee.communitycreator')
cnxwikicreator = configParser.get('Generic', 'j2ee.wikicreator')
cnxfilesyncuser = configParser.get('Generic', 'j2ee.filesyncuser')
# Variables for Groupmapping
connadmingroup = configParser.get('Generic', 'j2ee.cnxadmingroup')
connmoderatorgroup = configParser.get('Generic', 'j2ee.cnxmoderatorgroup')
connmetricsgroup = configParser.get('Generic', 'j2ee.cnxmetricsgroup')
connmobilegroup = configParser.get('Generic', 'j2ee.cnxmobilegroup')
cnxmailgroup = configParser.get('Generic', 'j2ee.cnxmailgroup')
cnxreadergroup = configParser.get('Generic', 'j2ee.cnxreadergroup')
cnxcommunitycreatorgroup = configParser.get(
'Generic', 'j2ee.communitycreatorgroup')
cnxwikicreatorgroup = configParser.get('Generic', 'j2ee.wikicreatorgroup')
cnxfilesyncusergroup = configParser.get(
'Generic', 'j2ee.filesyncusergroup')
else:
# Variables for Usermapping
connwasadmin = str(ibmcnx.functions.getAdmin('connwasadmin'))
connadmin = str(ibmcnx.functions.getAdmin('connadmin'))
connmoderators = str(ibmcnx.functions.getAdmin('connmoderators'))
connmetrics = str(ibmcnx.functions.getAdmin('connmetrics'))
connmobile = str(ibmcnx.functions.getAdmin('connmobile'))
cnxmail = str(ibmcnx.functions.getAdmin('cnxmail'))
# Variables for Groupmapping
connadmingroup = str(ibmcnx.functions.getAdmin('connadmingroup'))
connmoderatorgroup = str(ibmcnx.functions.getAdmin('connmoderatorgroup'))
connmetricsgroup = str(ibmcnx.functions.getAdmin('connmetricsgroup'))
connmobilegroup = str(ibmcnx.functions.getAdmin('connmobilegroup'))
cnxmailgroup = str(ibmcnx.functions.getAdmin('cnxmailgroup'))
def setRoleCmd(appName, roleName, everyone, authenticated, users, groups):
'''
function to set the j2ee role of a Connections Application
Values needed appName = Application Name, roleName = Name of the role
everyone yes|no, authenticated yes|no, users single uid or uid1|uid2, groups like users
'''
print "\n\tApplication: " + appName
print "\tRole: " + roleName
print "\n\tEveryone: " + everyone
print "\tAuthenticated: " + authenticated
print "\tUsers: " + users
print "\tGroups: " + groups + "\n"
AdminApp.edit(appName, '[-MapRolesToUsers [[ "' + roleName + '" ' +
everyone + ' ' + authenticated + ' ' + users + ' ' + groups + ' ]] ]')
# example: AdminApp.edit( "Blogs", '[-MapRolesToUsers [["person" No Yes ""
# ""] ]]' )
def setRole(appName, roleName, connwasadmin, connadmin, connmoderators, connmetrics, connmobile, cnxmail, cnxreader, cnxcommunitycreator, cnxwikicreator, cnxfilesyncuser, connadmingroup, connmoderatorgroup, connmetricsgroup, connmobilegroup, cnxmailgroup, cnxreadergroup, cnxcommunitycreatorgroup, cnxwikicreatorgroup, cnxfilesyncusergroup):
if roleName == "reader":
# Default Access reader, person, authenticated
if cnxreader == "allauthenticated":
setRoleCmd(appName, roleName, "No", "Yes", "' '", "' '")
else:
setRoleCmd(appName, roleName, "No", "No",
cnxreader, cnxreadergroup)
else:
print "\n\nApplication " + appName + "- Role " + roleName + " not set!\n\n"
def convertRoles2Dict(appname, list):
# function to convert backup txt files of Security Role Backup to a dictionary
# print '\tPATH: ' + path
count = 0
dict = {}
for line in list.splitlines():
# for loop through file to read it line by line
if (':' in line) and (count > 12):
value = line.split(':')[0]
# cred = line.split(':')[1].strip('\n')
cred = line.split(':')[1]
# cred = cred.strip(' ')
cred = cred.strip()
if value == "Role":
role = cred
dict[role] = {}
dict[role][value] = cred
count += 1
return dict
apps = AdminApp.list()
appsList = apps.splitlines()
# only for testing single apps, uncomment following line:
# appsList = ['fncs']
for app in appsList:
dictionary = convertRoles2Dict(app, AdminApp.view(app, "-MapRolesToUsers"))
print "\n\tApplication: " + app + "\n\n"
# app, role
for role in dictionary.keys():
# Loop through Roles
try:
setRole(app, role, connwasadmin, connadmin, connmoderators, connmetrics, connmobile, cnxmail, cnxreader, cnxcommunitycreator, cnxwikicreator, cnxfilesyncuser,
connadmingroup, connmoderatorgroup, connmetricsgroup, connmobilegroup, cnxmailgroup, cnxreadergroup, cnxcommunitycreatorgroup, cnxwikicreatorgroup, cnxfilesyncusergroup)
except:
print "Error setting role: " + role + " in App: " + app
ibmcnx.functions.saveChanges()
|
Python
| 0
|
|
175470eea9716f587a2339932c1cfb6c5240c4df
|
add tools.testing module for asserts (numpy, pandas compat wrapper)
|
statsmodels/tools/testing.py
|
statsmodels/tools/testing.py
|
"""assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas.version import short_version as pversion
return StrictVersion(strip_rc(pversion)) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if not is_pandas_min_version('0.14.1'):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
|
Python
| 0
|
|
68cd37c1c1bf279bc67e3d6391c8f4b88e0eb7a0
|
add buggy profiler, not ready each instanciation add 2sec to exec time
|
syntaxnet_wrapper/test/profile_execution.py
|
syntaxnet_wrapper/test/profile_execution.py
|
from syntaxnet_wrapper.wrapper import SyntaxNetWrapper
from time import time
from prettytable import PrettyTable
def profile_exec(niter, action, keep_wrapper):
t = time()
sentence = 'une phrase de test'
for i in range(niter):
if keep_wrapper == False or i == 0:
sn_wrapper = SyntaxNetWrapper('French')
if action == 'morpho':
sn_wrapper.morpho_sentence(sentence)
elif action == 'tagger':
sn_wrapper.tag_sentence(sentence)
elif action == 'parser':
sn_wrapper.parse_sentence(sentence)
del sn_wrapper
return time() - t
x = PrettyTable(['Action', 'niter', 'keep wrapper', 'execution_time'])
# Describe test case
test_cases = [
{'action': 'morpho', 'niter': 1, 'keep_wrapper': False},
{'action': 'morpho', 'niter': 10, 'keep_wrapper': True},
#{'action': 'morpho', 'niter': 100, 'keep_wrapper': True},
#{'action': 'morpho', 'niter': 1000, 'keep_wrapper': True},
{'action': 'tagger', 'niter': 1, 'keep_wrapper': True},
#{'action': 'tagger', 'niter': 10, 'keep_wrapper': True},
#{'action': 'tagger', 'niter': 100, 'keep_wrapper': True},
#{'action': 'tagger', 'niter': 1000, 'keep_wrapper': True},
{'action': 'parser', 'niter': 1, 'keep_wrapper': True},
#{'action': 'parser', 'niter': 10, 'keep_wrapper': True},
#{'action': 'parser', 'niter': 100, 'keep_wrapper': True},
#{'action': 'parser', 'niter': 1000, 'keep_wrapper': True},
{'action': 'morpho', 'niter': 1, 'keep_wrapper': False},
#{'action': 'morpho', 'niter': 10, 'keep_wrapper': False},
#{'action': 'morpho', 'niter': 100, 'keep_wrapper': False},
#{'action': 'morpho', 'niter': 1000, 'keep_wrapper': False},
{'action': 'tagger', 'niter': 1, 'keep_wrapper': False},
#{'action': 'tagger', 'niter': 10, 'keep_wrapper': False},
#{'action': 'tagger', 'niter': 100, 'keep_wrapper': False},
#{'action': 'tagger', 'niter': 1000, 'keep_wrapper': False},
{'action': 'parser', 'niter': 1, 'keep_wrapper': False},
#{'action': 'parser', 'niter': 10, 'keep_wrapper': False},
#{'action': 'parser', 'niter': 100, 'keep_wrapper': False},
#{'action': 'parser', 'niter': 1000, 'keep_wrapper': False},
]
for test_case in test_cases:
exec_time = profile_exec(**test_case)
x.add_row([test_case['action'],
test_case['niter'],
test_case['keep_wrapper'],
exec_time])
with open('output_profiling.txt', 'wb') as file_:
file_.write(x.get_string())
|
Python
| 0
|
|
c76c7b19afdf364ade2b7d0793cbdb14cb315131
|
add smalltalk like object model
|
smalltalk_like/obj_model.py
|
smalltalk_like/obj_model.py
|
class Base(object):
def __init__(self, cls, fields):
self.cls = cls
self.fields = fields
def read_attribute(self, field_name):
return self.fields.get(field_name)
def write_attribute(self, field_name, value):
self.fields[field_name] = value
def call_method(self, method_name, *args):
method = self.cls.find_method(method_name)
return method(self, *args)
def isinstance(self, cls):
return self.cls.issubclass(cls)
class Class(Base):
def __init__(self, name, base_class, fields, metaclass):
Base.__init__(self, metaclass, fields)
self.name = name
self.base_class = base_class
def super_class_traversal(self):
if self.base_class is None:
return [self]
else:
return [self] + self.base_class.super_class_traversal()
def issubclass(self, cls):
return cls in self.super_class_traversal()
def find_method(self, method_name):
for cls in self.super_class_traversal():
if method_name in cls.fields:
return cls.fields[method_name]
return MISSING
class Instance(Base):
def __init__(self, cls):
assert isinstance(cls, Class)
Base.__init__(self, cls, {})
OBJECT = Class(name='object', base_class=None, fields={}, metaclass=None)
TYPE = Class(name='TYPE', base_class=OBJECT, fields={}, metaclass=None)
TYPE.cls = TYPE
OBJECT.cls = TYPE
MISSING = object()
|
Python
| 0.000002
|
|
5bcd31440322d19262b694a5df299f43af577e5e
|
Create app.py
|
app.py
|
app.py
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
|
Python
| 0.000003
|
|
f6624531e47c599af42e75d84708359eaa982569
|
Solve AoC 2020-12-25/1
|
adventofcode2020/25.py
|
adventofcode2020/25.py
|
def loop_size_finder(inp, subject_number=7):
i = 1
c = 0
while i != inp:
i *= subject_number
i %= 20201227
c += 1
return c
def transformer(iterations, subject_number=7):
i = 1
for _ in range(0, iterations):
i *= subject_number
i %= 20201227
return i
def test_loop_size_finder():
assert loop_size_finder(5764801) == 8
assert loop_size_finder(17807724) == 11
assert transformer(11, subject_number=5764801) == transformer(8, subject_number=17807724)
if __name__ == '__main__':
card_loops = loop_size_finder(10212254)
door_loops = loop_size_finder(12577395)
print(transformer(card_loops, 12577395))
|
Python
| 0.999999
|
|
b5433672a4e27db4e8f8698c311d05055462ac00
|
Create main file
|
annotate_clin_virus.py
|
annotate_clin_virus.py
|
import timeit
import subprocess
import glob
import sys
import argparse
start = timeit.default_timer()
# This program runs some shit and does some shit about clinical virus samples
# Gonna write more as I need too
# parser = argparse.ArgumentParser(description= 'Annotate a set of UW clinical viral samples, pulling virus information from prokka and blast')
# parser.add_argument('file_dir', help='Input file directory, all .fasta files will be processed and .seq and .gbf files will be produced in the format input_dir/output/FASTA_name')
# parser.add_argument('metadata_info_sheet_location', help='.csv file where all of the metadata is stored')
# parser.add_argument('sbt_file_loc', help='location of .sbt file for .gbf file creation')
# args = parser.parse_args()
# Here I assume that the .fasta file has multiple fastas as opposed to being given a directory, this is subject to later change
fasta_filename = '10fasta_UWViroClinSeq.fasta'
metadata_info_sheet = 'UWVIROCLINSEQ - SCCA.csv'
gff_file_loc = 'HPIV3_121416.gff'
# Takes the name of a clincical virus as specified on the metadata sheet and returns a list of the relevant metadata
def pull_metadata(virus_name):
for line in open(metadata_info_sheet):
if line.split(',')[1] == virus_name:
# Parse and steal input
# reutrn two strings, one for the cmt file and the other for the .fsa features
def parse_gff(gff_file_loc):
# First two lines are garbarge
# One line a sequence format: ##TYPE DNA virus_name
# then sequences start:
# FORMAT:
# RNA NAME
# SEQUENCE
# end-
# all of them, also in the same order as the first list
# NAME GENEIOUS cds ## ## stupid shit then the names
# all named, and also in order
# Write this into lists
# write the damn files right here
# pull_metadata(name)
# write the .tbl and .fsa right here
def write_output():
# make a folder for each, name it the sample name
# Go through and make .fsa and .tbl files out of our data
# TODO: generalize, but first I'mma run it with hard coded filepaths
def run_tbl():
# run .tbl2asn on all of the folders and process the .sqn files for submission
# Probobly entails throwing the .sbt file into each folder
#
# Process the fasta_file
# Now we go through and actually work our magic on the viruses
for x in range(0,len(virus_name_list)):
clin_data_list = pull_metadata(virus_name_list[x])
# TODO: Modify fasta/cmt file
# TODO: Run Prokka - with options stolen from sheet
|
Python
| 0.000001
|
|
92aaff39dbd670f65dcbdeb34a2a506e0fcdf58b
|
add basic show_urls test
|
tests/management/commands/test_show_urls.py
|
tests/management/commands/test_show_urls.py
|
# -*- coding: utf-8 -*-
from django.core.management import call_command
from django.utils.six import StringIO
def test_show_urls_format_dense():
out = StringIO()
call_command('show_urls', stdout=out)
output = out.getvalue()
assert "/admin/\tdjango.contrib.admin.sites.index\tadmin:index\n" in output
assert "/admin/<app_label>/\tdjango.contrib.admin.sites.app_index\tadmin:app_list\n" in output
def test_show_urls_format_verbose():
out = StringIO()
call_command('show_urls', format="verbose", stdout=out)
output = out.getvalue()
assert """/login/
\tController: django.contrib.auth.views.LoginView
\tURL Name: login""" in output
|
Python
| 0
|
|
74a4f56d28497de89415f29ca3e1d6298c2fdd23
|
Create drivers.py
|
chips/sensor/simulation/drivers.py
|
chips/sensor/simulation/drivers.py
|
# This code has to be added to the corresponding __init__.py
DRIVERS["simulatedsensors"] = ["PRESSURE", "TEMPERATURE", "LUMINOSITY", "DISTANCE", "HUMIDITY",
"COLOR", "CURRENT", "VOLTAGE", "POWER",
"LINEARACCELERATION", "ANGULARACCELERATION", "ACCELERATION", "LINEARVELOCITY", "ANGULARVELOCITY", "VELOCITY",
"SENSORS"]
|
Python
| 0.000001
|
|
0d77fe363b6e6e8b1a0424cec7631cf13b669968
|
add linear simulation
|
epistasis/simulate/linear.py
|
epistasis/simulate/linear.py
|
__doc__ = """Submodule with various classes for generating/simulating genotype-phenotype maps."""
# ------------------------------------------------------------
# Imports
# ------------------------------------------------------------
import numpy as np
from gpmap.gpm import GenotypePhenotypeMap
# local imports
from epistasis.decomposition import generate_dv_matrix
from epistasis.simulate.base import BaseSimulation
# ------------------------------------------------------------
# ArtificialMap object can be used to quickly generating a toy
# space for testing the EpistasisModels
# ------------------------------------------------------------
class LinearSimulation(BaseSimulation):
"""Construct an genotype-phenotype from linear building blocks and
epistatic coefficients.
Example
-------
Phenotype = b0 + b1 + b2 + b3 + b12 + b13 + b13 + b123
Parameters
---------
wildtype : str
Wildtype genotype
mutations : dict
Mapping for each site to its alphabet
order : int
Order of epistasis in simulated genotype-phenotype map
betas : array-like
values of epistatic coefficients (must be positive for this function
to work. Log is taken)
model_type : str
Use a local or global (i.e. Walsh space) epistasis model to construct
phenotypes
"""
def __init__(self, wildtype, mutations,
model_type='local',
):
# Construct epistasis mapping objects (empty)
super(LinearSimulation,self).__init__(
wildtype,
mutations,
)
self.model_type = model_type
@property
def p_additive(self):
"""Get the additive phenotypes"""
orders = self.epistasis.getorder
labels = list(orders[0].labels) + list(orders[1].labels)
vals = list(orders[0].values) + list(orders[1].values)
x = generate_dv_matrix(self.binary.genotypes, labels, model_type=self.model_type)
return np.dot(x, vals)
def build(self):
""" Build the phenotype map from epistatic interactions. """
# Allocate phenotype numpy array
_phenotypes = np.zeros(self.n, dtype=float)
# Get model type:
self.X = generate_dv_matrix(self.binary.genotypes, self.epistasis.labels, model_type=self.model_type)
self.phenotypes = np.dot( self.X, self.epistasis.values)
|
Python
| 0.000012
|
|
14e637720d6c80ed88232130b00385ceb4d451da
|
Create manual/__init__.py
|
app/tests/manual/__init__.py
|
app/tests/manual/__init__.py
|
"""
Manual test module.
Note that while `TEST_MODE` should be set an environment variable for the
unit and integration tests, we want that off here so we can test against
local config data.
"""
|
Python
| 0.000294
|
|
5bd4534b375efed2ce5026a64228a45a9acc1d64
|
add parallel runner
|
microscopes/kernels/parallel.py
|
microscopes/kernels/parallel.py
|
"""Contains a parallel runner implementation, with support
for various backends
"""
from microscopes.common import validator
import multiprocessing as mp
def _mp_work(args):
runner, niters = args
runner.run(niters)
return runner.get_latent()
class runner(object):
def __init__(self, runners, backend='multiprocessing', **kwargs):
self._runners = runners
if backend not in ('multiprocessing',):
raise ValueError("invalid backend: {}".format(backend))
self._backend = backend
if backend == 'multiprocessing':
validator.validate_kwargs(kwargs, ('processes',))
if 'processes' not in kwargs:
kwargs['processes'] = mp.cpu_count()
validator.validate_positive(kwargs['processes'], 'processes')
self._processes = kwargs['processes']
else:
assert False, 'should not be reached'
def run(self, niters=10000):
"""Run each runner for `niters`, using the backend for parallelism
"""
if self._backend == 'multiprocessing':
pool = mp.Pool(processes=self._processes)
args = [(runner, niters) for runner in self._runners]
# map_async() + get() allows us to workaround a bug where
# control-C doesn't kill multiprocessing workers
self._latents = pool.map_async(_mp_work, args).get(10000000)
pool.close()
pool.join()
else:
assert False, 'should not be reached'
def get_latents(self):
return self._latents
|
Python
| 0.001425
|
|
0cdc87edc4d5e4c967e7bc5bd35c5b30151d5a6e
|
Create admin_pages.py
|
evewspace/API/admin_pages.py
|
evewspace/API/admin_pages.py
|
from core.admin_page_registry import registry
registry.register('SSO', 'sso_admin.html', 'API.change_ssoaccesslist')
|
Python
| 0.000001
|
|
48190b463bcbafc0b1d3af6c41677a295237e3ba
|
Add missing file
|
3rdParty/V8/V8-5.0.71.39/build/has_valgrind.py
|
3rdParty/V8/V8-5.0.71.39/build/has_valgrind.py
|
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VALGRIND_DIR = os.path.join(BASE_DIR, 'third_party', 'valgrind')
LINUX32_DIR = os.path.join(VALGRIND_DIR, 'linux_x86')
LINUX64_DIR = os.path.join(VALGRIND_DIR, 'linux_x64')
def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
return int(os.path.exists(LINUX32_DIR) and os.path.exists(LINUX64_DIR))
if __name__ == '__main__':
print DoMain([])
|
Python
| 0.000006
|
|
82860a07e361aa5322b7d055c60c7178e40296bd
|
Create search_accepted_nodes_for_queries.py
|
SearchTools/search_accepted_nodes_for_queries.py
|
SearchTools/search_accepted_nodes_for_queries.py
|
# Search and accept, looks for each accept what the previously entered search text
# and the node that was accepted
import gzip
import json
import base64
import sys # Library of system calls
import traceback
import time
import os
from os.path import isfile, join
# Check that the script has been given the right argumets
if len(sys.argv) != 3:
print "Usage: python search_actions_extract.py path_to_data results_path"
print "Export the search and accept actions in the logs"
exit(1)
# Load the arguments into local variables
VERBOSE = True
path = sys.argv[1] # First command line argument (input path)
out_path = sys.argv[2] # Second command line argument (results path)
# Setup the tracking data structures
results = [] # Holds the results
linesCount = 0 # Number of lines processed
searchCount = 0 # Number of search messages processed
err = 0; # Error count
lastSeenSearch = None;
# Print the header row
print time.strftime("%Y-%m-%d %H:%M:%S"), "LinesCount", "SearchesCount", "Errors Count"
# Recursively list the files in sub folders
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
for filePath in files:
# If the file isn't a sorted file, skip it
if not filePath.endswith('sorted'):
continue
# Open the file, decompressing it as we go
f = gzip.open (filePath)
# Walk over every line in the file
for ln in f:
linesCount += 1 # Count them
# If we've seen 10,000 lines emit a progress indicator message
if linesCount % 10000 == 0:
print time.strftime("%Y-%m-%d %H:%M:%S"), linesCount, searchCount,err
try:
if not ln.startswith("{"):
continue # It wasn't a valid data line, maybe a header or an error
data = json.loads(ln) # The data lines are JSON packed, so load them into a map
# At this point `data` contains a map of all the data fields in the message
tag = data["Tag"] # Extract the tag
if tag != "Search" and tag != "Search-NodeAdded": # If it isn't a search message, skip
continue
searchCount += 1
result = {} # Assemble an empty result structure
# Copy over the relevant data
result["Session"] = data["SessionID"] # Populate the sessions
result["MicroTime"] = data["MicroTime"] # Add the timing
result["Query"] = base64.b64decode(data["Data"]) # The thing that is being searched for
# Now manually compute a data item called 'Action', what the user was doing
if tag == "Search":
result["Action"] = "SEARCH"
lastSeenSearch = result
if tag == "Search-NodeAdded":
result["Action"] = "ACCEPT"
if (lastSeenSearch['Session'] == result['Session']):
searchAnswer = {}
searchAnswer['Session'] = lastSeenSearch['Session']
searchAnswer['Query'] = lastSeenSearch['Query']
searchAnswer['Accepted'] = result['Query']
searchAnswer['TimeSinceLastSearch'] = int(result['MicroTime']) - int(lastSeenSearch['MicroTime'])
results.append(searchAnswer)
if VERBOSE:
print searchAnswer
except:
# If there is a problem, print what went wrong
print filePath
print "FAILED LINE: "+ ln
print traceback.format_exc()
err += 1
# Output the results into the output file
print time.strftime("%Y-%m-%d %H:%M:%S"), "Writing results"
out_file = open(out_path, "w")
out_file.write(json.dumps(results))
out_file.close()
print time.strftime("%Y-%m-%d %H:%M:%S"), "Done"
|
Python
| 0.000008
|
|
ccce1108e1deab466fd72c022949fa05fa807a3a
|
add initial files for launch
|
synth.py
|
synth.py
|
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import subprocess
import logging
logging.basicConfig(level=logging.DEBUG)
# run the gapic generator
gapic = gcp.GAPICBazel()
versions = ["v1"]
name = 'policytroubleshooter'
for version in versions:
library = gapic.node_library(
name,
version,
proto_path = f'google/cloud/policytroubleshooter/{version}')
s.copy(library, excludes=[])
# Copy common templates
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(
source_location='build/src', versions=["v1"], default_version="v1")
s.copy(templates, excludes=[])
node.postprocess_gapic_library()
|
Python
| 0.000001
|
|
f480a0a8d51c5c059a05165f30f64bb310299ee3
|
Add 'rescore' command
|
project/apps/api/management/commands/rescore.py
|
project/apps/api/management/commands/rescore.py
|
from django.core.management.base import (
BaseCommand,
)
from apps.api.models import (
Contestant,
Appearance,
Performance,
)
class Command(BaseCommand):
help = "Command to denormailze data."
def handle(self, *args, **options):
ps = Performance.objects.all()
for p in ps:
p.save()
as_ = Appearance.objects.all()
for a in as_:
a.save()
cs = Contestant.objects.all()
for c in cs:
c.save()
return "Done"
|
Python
| 0.999783
|
|
d4a7bbe27b285e455a3beafefd22fc493edeb161
|
Add unittest for eventlogger config validation.
|
test/test_config_eventlogger.py
|
test/test_config_eventlogger.py
|
#!/usr/bin/env python2
import unittest
import subprocess
import threading
import tempfile
import os
from testdc import *
DAEMON_PATH = './astrond'
TERMINATED = -15
EXITED = 1
class ConfigTest(object):
def __init__(self, config):
self.config = config
self.process = None
def run(self, timeout):
def target():
self.process = subprocess.Popen([DAEMON_PATH, self.config])
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.process.returncode
class TestConfigEventLogger(unittest.TestCase):
@classmethod
def setUpClass(cls):
cfg, cls.config_file = tempfile.mkstemp()
os.close(cfg)
cls.test_command = ConfigTest(cls.config_file)
@classmethod
def tearDownClass(cls):
if cls.config_file is not None:
os.remove(cls.config_file)
@classmethod
def write_config(cls, config):
f = open(cls.config_file, "w")
f.write(config)
f.close()
@classmethod
def run_test(cls, config, timeout = 2):
cls.write_config(config)
return cls.test_command.run(timeout)
def test_eventlogger_good(self):
config = """\
messagedirector:
bind: 127.0.0.1:57123
roles:
- type: eventlogger
bind: 0.0.0.0:9090
output: /var/log/astron/eventlogger/el-%Y-%m-%d-%H-%M-%S.log
rotate_interval: 1d
"""
self.assertEquals(self.run_test(config), TERMINATED)
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
|
1578c4328542dd1b1c7ccd1f08dd2b2455055190
|
Add integration test covering all cql types
|
tests/integration/test_types.py
|
tests/integration/test_types.py
|
from decimal import Decimal
from datetime import datetime
from uuid import uuid1, uuid4
import unittest
from cassandra.cluster import Cluster
from cassandra.query import ColumnCollection
class TypeTests(unittest.TestCase):
def test_basic_types(self):
c = Cluster()
s = c.connect()
s.execute("""
CREATE KEYSPACE typetests
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}
""")
s.set_keyspace("typetests")
s.execute("""
CREATE TABLE mytable (
a text,
b text,
c ascii,
d bigint,
e blob,
f boolean,
g decimal,
h double,
i float,
j inet,
k int,
l list<text>,
m set<int>,
n map<text, int>,
o text,
p timestamp,
q uuid,
r timeuuid,
s varint,
PRIMARY KEY (a, b)
)
""")
v1_uuid = uuid1()
v4_uuid = uuid4()
mydatetime = datetime(2013, 1, 1, 1, 1, 1)
params = (
"sometext",
"sometext",
"ascii", # ascii
12345678923456789, # bigint
"blob".encode('hex'), # blob
True, # boolean
Decimal('1.234567890123456789'), # decimal
0.000244140625, # double
1.25, # float
"1.2.3.4", # inet
12345, # int
ColumnCollection(['a', 'b', 'c']), # list<text> collection
ColumnCollection({1, 2, 3}), # set<int> collection
ColumnCollection({'a': 1, 'b': 2}), # map<text, int> collection
"text", # text
mydatetime, # timestamp
v4_uuid, # uuid
v1_uuid, # timeuuid
123456789123456789123456789 # varint
)
s.execute("""
INSERT INTO mytable (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""", params)
results = s.execute("SELECT * FROM mytable")
expected = (
"sometext",
"sometext",
"ascii", # ascii
12345678923456789, # bigint
"blob", # blob
True, # boolean
Decimal('1.234567890123456789'), # decimal
0.000244140625, # double
1.25, # float
"1.2.3.4", # inet
12345, # int
('a', 'b', 'c'), # list<text> collection
{1, 2, 3}, # set<int> collection
{'a': 1, 'b': 2}, # map<text, int> collection
"text", # text
mydatetime, # timestamp
v4_uuid, # uuid
v1_uuid, # timeuuid
123456789123456789123456789 # varint
)
for expected, actual in zip(expected, results[0]):
self.assertEquals(expected, actual)
|
Python
| 0
|
|
78c9f392a02c0fdb72294e08a3d5ce78262443f5
|
Create 1.py
|
1.py
|
1.py
|
u=1
|
Python
| 0.000001
|
|
d596bfbbfa725111fb4c0f6d4abf6789669f06de
|
Create sets.py
|
sets.py
|
sets.py
|
#!/usr/bin/env python2
'''
Generates automatically one array, a.
Prints an ordered list with only unique elems
'''
import random
SIZE_LIST_A = 10
a = []
def populate_arrays():
for i in range(0, SIZE_LIST_A):
a.append(random.randint(1, 100))
if __name__ == "__main__":
populate_arrays()
print "a: {:s}".format(str(a))
b = list(set(a))
b.sort()
print "b: {:s}".format(str(b))
exit(0)
|
Python
| 0.000001
|
|
563b9e1f826433179a5e3c5e611d40efc8736c4a
|
Create Hexbin Example
|
altair/examples/hexbins.py
|
altair/examples/hexbins.py
|
"""
Hexbin Chart
-----------------
This example shows a hexbin chart.
"""
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
# Size of the hexbins
size = 15
# Count of distinct x features
xFeaturesCount = 12
# Count of distinct y features
yFeaturesCount = 7
# Name of the x field
xField = 'date'
# Name of the y field
yField = 'date'
# the shape of a hexagon
hexagon = "M0,-2.3094010768L2,-1.1547005384 2,1.1547005384 0,2.3094010768 -2,1.1547005384 -2,-1.1547005384Z"
alt.Chart(source).mark_point(size=size**2, shape=hexagon).encode(
x=alt.X('xFeaturePos:Q', axis=alt.Axis(title='Month',
grid=False, tickOpacity=0, domainOpacity=0)),
y=alt.Y('day(' + yField + '):O', axis=alt.Axis(title='Weekday',
labelPadding=20, tickOpacity=0, domainOpacity=0)),
stroke=alt.value('black'),
strokeWidth=alt.value(0.2),
fill=alt.Color('mean(temp_max):Q', scale=alt.Scale(scheme='darkblue')),
tooltip=['month(' + xField + '):O', 'day(' + yField + '):O', 'mean(temp_max):Q']
).transform_calculate(
# This field is required for the hexagonal X-Offset
xFeaturePos='(day(datum.' + yField + ') % 2) / 2 + month(datum.' + xField + ')'
).properties(
# Exact scaling factors to make the hexbins fit
width=size * xFeaturesCount * 2,
height=size * yFeaturesCount * 1.7320508076, # 1.7320508076 is approx. sin(60°)*2
).configure_view(
strokeWidth=0
)
|
Python
| 0
|
|
ec85dbe3937781f188b9e8d5ae1f8dc7a58c1d0c
|
Add tests for message_processing module
|
tests/messenger/message_processing_test.py
|
tests/messenger/message_processing_test.py
|
import unittest
from app.messenger import message_processing
class MessageProcessingTestCase(unittest.TestCase):
def test_extract_all_messaging_events_valid_input(self):
known_input = [
{
'id': 1,
'messaging': [
{
'messaging_event_id': 0
},
{
'messaging_event_id': 1
},
{
'messaging_event_id': 2
}
]
},
{
'id': 2,
'messaging': [
{
'messaging_event_id': 3
},
{
'messaging_event_id': 4
},
{
'messaging_event_id': 5
}
]
},
{
'id': 3,
'messaging': [
{
'messaging_event_id': 6
},
{
'messaging_event_id': 7
},
{
'messaging_event_id': 8
}
]
}
]
expected_output = [{'messaging_event_id': event_id} for event_id in range(9)]
output = message_processing.extract_all_messaging_events(known_input)
self.assertEqual(output, expected_output)
def test_is_quick_button_pressed_true(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"message": {
"mid": "mid.1457764197618:41d102a3e1ae206a38",
"text": "hello, world!",
"quick_reply": {
"payload": "DEVELOPER_DEFINED_PAYLOAD"
}
}
}
self.assertTrue(message_processing.is_quick_button_pressed(known_input))
def test_is_quick_button_pressed_false(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"message": {
"mid": "mid.1457764197618:41d102a3e1ae206a38",
"text": "hello, world!",
}
}
self.assertFalse(message_processing.is_quick_button_pressed(known_input))
def test_is_schedule_button_pressed_true(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"message": {
"mid": "mid.1457764197618:41d102a3e1ae206a38",
"text": "hello, world!",
"quick_reply": {
"payload": "schedule payload"
}
}
}
self.assertTrue(message_processing.is_schedule_button_pressed(known_input))
def test_is_schedule_button_pressed_false_not_quick_button(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"message": {
"mid": "mid.1457764197618:41d102a3e1ae206a38",
"text": "hello, world!",
}
}
self.assertFalse(message_processing.is_schedule_button_pressed(known_input))
def test_is_schedule_button_pressed_false_not_schedule_button(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"message": {
"mid": "mid.1457764197618:41d102a3e1ae206a38",
"text": "hello, world!",
"quick_reply": {
"payload": "chat payload"
}
}
}
self.assertFalse(message_processing.is_schedule_button_pressed(known_input))
def test_is_more_talk_info_button_pressed_true(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"postback": {
"payload": "info talk 1",
}
}
self.assertTrue(message_processing.is_more_talk_info_button_pressed(known_input))
def test_is_more_talk_info_button_pressed_false_not_postback(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"message": {
"mid": "mid.1457764197618:41d102a3e1ae206a38",
"text": "hello, world!",
}
}
self.assertFalse(message_processing.is_more_talk_info_button_pressed(known_input))
def test_is_more_talk_info_button_pressed_false_payload(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"postback": {
"payload": "like talk 1",
}
}
self.assertFalse(message_processing.is_more_talk_info_button_pressed(known_input))
def test_is_like_talk_button_pressed_true(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"postback": {
"payload": "like talk 1",
}
}
self.assertTrue(message_processing.is_like_talk_button_pressed(known_input))
def test_is_like_talk_button_pressed_false_not_postback(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"message": {
"mid": "mid.1457764197618:41d102a3e1ae206a38",
"text": "hello, world!",
}
}
self.assertFalse(message_processing.is_like_talk_button_pressed(known_input))
def test_is_like_talk_button_pressed_false_payload(self):
known_input = {
"sender": {
"id": "USER_ID"
},
"recipient": {
"id": "PAGE_ID"
},
"timestamp": 1458692752478,
"postback": {
"payload": "info talk 1",
}
}
self.assertFalse(message_processing.is_like_talk_button_pressed(known_input))
|
Python
| 0.000001
|
|
8118dc283eececdd074bac675c57975ceeba3739
|
Create gateway.py
|
Gateway/gateway.py
|
Gateway/gateway.py
|
\\ This will be the Gateway.py file for the RPi Gateway
|
Python
| 0.000001
|
|
d9dcf34a73b4168885a02c495fb9b808a55b5c9e
|
Add spu debugger printer module
|
corepy/lib/printer/spu_debugger.py
|
corepy/lib/printer/spu_debugger.py
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import corepy.spre.spe as spe
import corepy.spre.syn_util as syn_util
class SPU_Debugger(object):
"""
InstructionStream printer for the Interactive SPU debugger.
Output syntax from this printer is designed to be easily used by the SPU
debugger:
ilhu(3, 0xDEAD)
iohl(3, 0xBEEF)
stqd(3, 0, 1)
"""
def __init__(self):
return
def __del__(self):
return
def header(self, fd):
return
def footer(self, fd):
return
def prologue(self, fd):
""" Allow the module to print a prologue header if desired.
The return value should be a boolean indicating whether prologue
instructions should be printed. """
return False
def epilogue(self, fd):
""" Allow the module to print a prologue header if desired.
The return value should be a boolean indicating whether epilogue
instructions should be printed. """
return False
def stream(self, fd, stream):
return
def string(self, fd, str):
"""Print a string (assumedly representing an instruction)."""
print >>fd, "\t%s" % (str)
return
def instruction(self, fd, inst):
op_str = ', '.join([self.str_op(op) for op in inst._supplied_operands])
for k, v in inst._supplied_koperands.items():
op_str += ", %s = %s" % (str(k), str(v))
print >>fd, "%s(%s)" % (inst.__class__.__name__, op_str)
return
def label(self, fd, lbl):
print >>fd, "\n%s:" % lbl.name
return
def str_op(self, op):
if isinstance(op, spe.Register):
return str(op.reg)
elif isinstance(op, spe.Variable):
return str(op.reg.reg)
return str(op)
|
Python
| 0
|
|
2c0ce3c64720122bf2fdd80aeb2ff8359873ac83
|
Test that noindex flag will only show robots metatag when set
|
municipal_finance/tests/test_analytics.py
|
municipal_finance/tests/test_analytics.py
|
from django.test import TestCase
from django.conf import settings
class TestAnalytics(TestCase):
def test_noindex_flag(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTrue('<meta name="robots" content="noindex">' not in str(response.content))
settings.NO_INDEX = "True"
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertTrue('<meta name="robots" content="noindex">' in str(response.content))
|
Python
| 0
|
|
11dd2daf7dd125e0be6a604dd22ae25efed16226
|
Update at 2017-07-20 14-05-11
|
test.py
|
test.py
|
import json
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
from keras.models import Sequential, Model
from keras.preprocessing import image
from keras.layers import *
from keras.optimizers import *
from data import *
from utils import get_callbacks
def main():
with tf.device('/gpu:3'):
model = Sequential()
model.add(TimeDistributed(BatchNormalization(), input_shape=(TIMESTEPS, 224, 224, 3)))
model.add(TimeDistributed(Conv2D(4, kernel_size=5, strides=3, activation='relu')))
model.add(TimeDistributed(Conv2D(8, kernel_size=5, strides=2, activation='relu')))
model.add(TimeDistributed(Conv2D(12, kernel_size=3, strides=1, activation='relu')))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D(pool_size=3)))
model.add(Conv3D(4, kernel_size=5, strides=1, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(16))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model_arg = {
'loss': 'binary_crossentropy',
'optimizer': 'sgd',
'metrics': ['binary_accuracy']
}
model.compile(**model_arg)
model.summary()
n_train, n_val = 5000, 1000
x_train = np.zeros((n_train, TIMESTEPS, 224, 224, 3), dtype=np.float32)
y_train = np.zeros((n_train, 1), dtype=np.uint8)
x_val = np.zeros((n_val, TIMESTEPS, 224, 224, 3), dtype=np.float32)
y_val = np.zeros((n_val, 1), dtype=np.uint8)
print('Loading data...', end='')
for i in range(n_train):
x, y = next(window_train_gen)
x_train[i] = x
y_train[i] = y
for i in range(n_val):
x, y = next(window_val_gen)
x_val[i] = x
y_val[i] = y
print('ok')
fit_arg = {
'x': x_train,
'y': y_train,
'batch_size': WINDOW_BATCH_SIZE,
'epochs': 30,
'validation_data': (x_val, y_val),
'shuffle': True
}
model.fit(**fit_arg)
if __name__ == '__main__':
main()
|
Python
| 0
|
|
5383c884257eda3977b32c29874da8402c6a6380
|
Add test cases for Link, missed this file in previous commits.
|
coil/test/test_link.py
|
coil/test/test_link.py
|
"""Tests for coil.struct.Link"""
import unittest
from coil import errors
from coil.struct import Node, Link
class BasicTestCase(unittest.TestCase):
def setUp(self):
self.r = Node()
self.a = Node(self.r, "a")
self.b = Node(self.a, "b")
def assertRelative(self, link, expect):
relative = link.relative_path(link.link_path, '..')
self.assertEquals(relative, expect)
def assertAbsolute(self, link, expect):
absolute = link.absolute_path(link.link_path, '..')
self.assertEquals(absolute, expect)
def testInit(self):
w = Link("@root", self.r, "w")
x = Link("i.j.k", self.r, "x")
y = Link("..j.k", self.a, "y")
z = Link("..k", self.b, "z")
self.assertRelative(w, ".")
self.assertAbsolute(w, "@root")
self.assertRelative(x, "i.j.k")
self.assertAbsolute(x, "@root.i.j.k")
self.assertRelative(y, "..j.k")
self.assertAbsolute(y, "@root.j.k")
self.assertRelative(z, "..k")
self.assertAbsolute(z, "@root.a.k")
self.assertRaises(errors.CoilError, Link, "..z", self.r, "z")
def testCopy1(self):
x = Link("b", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertRelative(x, "b")
self.assertAbsolute(x, "@root.a.b")
a2 = self.a.copy()
x2 = x.copy(a2, "x")
self.assertEquals(x2.node_path, "@root.x")
self.assertRelative(x2, "b")
self.assertAbsolute(x2, "@root.b")
def testCopy2(self):
x = Link("..i", self.a, "x")
y = x.copy(self.b, "y")
self.assertEquals(x.node_path, "@root.a.x")
self.assertRelative(x, "..i")
self.assertAbsolute(x, "@root.i")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertRelative(y, "..i")
self.assertAbsolute(y, "@root.a.i")
def testCopyTree1(self):
x = Link("i", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertEquals(x.link_path, "i")
self.assertRelative(x, "i")
self.assertAbsolute(x, "@root.a.i")
y = x.copy(self.b, "y")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertEquals(y.link_path, "i")
self.assertRelative(y, "i")
self.assertAbsolute(y, "@root.a.b.i")
r2 = self.r.copy()
a2 = self.a.copy(r2, "a")
b2 = self.b.copy(a2, "b")
x2 = x.copy(a2, "x")
y2 = y.copy(b2, "y")
self.assertEquals(x2.node_path, "@root.a.x")
self.assertEquals(x2.link_path, "i")
self.assertRelative(x2, "i")
self.assertAbsolute(x2, "@root.a.i")
self.assertEquals(y2.node_path, "@root.a.b.y")
self.assertEquals(y2.link_path, "i")
self.assertRelative(y2, "i")
self.assertAbsolute(y2, "@root.a.b.i")
def testCopyTree2(self):
x = Link("@root.a.i", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertEquals(x.link_path, "@root.a.i")
self.assertRelative(x, "i")
self.assertAbsolute(x, "@root.a.i")
y = x.copy(self.b, "y")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertEquals(y.link_path, "@root.a.i")
self.assertRelative(y, "..i")
self.assertAbsolute(y, "@root.a.i")
r2 = self.r.copy()
a2 = self.a.copy(r2, "a")
b2 = self.b.copy(a2, "b")
x2 = x.copy(a2, "x")
y2 = y.copy(b2, "y")
self.assertEquals(x2.node_path, "@root.a.x")
self.assertEquals(x2.link_path, "@root.a.i")
self.assertRelative(x2, "i")
self.assertAbsolute(x2, "@root.a.i")
self.assertEquals(y2.node_path, "@root.a.b.y")
self.assertEquals(y2.link_path, "@root.a.i")
self.assertRelative(y2, "..i")
self.assertAbsolute(y2, "@root.a.i")
def testCopySubTree1(self):
x = Link("i", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertEquals(x.link_path, "i")
self.assertRelative(x, "i")
self.assertAbsolute(x, "@root.a.i")
y = x.copy(self.b, "y")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertEquals(y.link_path, "i")
self.assertRelative(y, "i")
self.assertAbsolute(y, "@root.a.b.i")
a2 = self.a.copy()
b2 = self.b.copy(a2, "b")
x2 = x.copy(a2, "x")
y2 = y.copy(b2, "y")
self.assertEquals(x2.node_path, "@root.x")
self.assertEquals(x2.link_path, "i")
self.assertRelative(x2, "i")
self.assertAbsolute(x2, "@root.i")
self.assertEquals(y2.node_path, "@root.b.y")
self.assertEquals(y2.link_path, "i")
self.assertRelative(y2, "i")
self.assertAbsolute(y2, "@root.b.i")
def testCopySubTree2(self):
x = Link("@root.a.i", self.a, "x")
self.assertEquals(x.node_path, "@root.a.x")
self.assertEquals(x.link_path, "@root.a.i")
self.assertRelative(x, "i")
self.assertAbsolute(x, "@root.a.i")
y = x.copy(self.b, "y")
self.assertEquals(y.node_path, "@root.a.b.y")
self.assertEquals(y.link_path, "@root.a.i")
self.assertRelative(y, "..i")
self.assertAbsolute(y, "@root.a.i")
a2 = self.a.copy()
b2 = self.b.copy(a2, "b")
x2 = x.copy(a2, "x")
y2 = y.copy(b2, "y")
self.assertEquals(x2.node_path, "@root.x")
self.assertEquals(x2.link_path, "@root.i")
self.assertRelative(x2, "i")
self.assertAbsolute(x2, "@root.i")
self.assertEquals(y2.node_path, "@root.b.y")
self.assertEquals(y2.link_path, "@root.i")
self.assertRelative(y2, "..i")
self.assertAbsolute(y2, "@root.i")
|
Python
| 0
|
|
0c76fa59e77786c577f0750c65f97d24eb3c4157
|
Test script
|
test.py
|
test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import tables
from sklearn.metrics import f1_score,confusion_matrix
# ===================== Preparation des données =============================
# Load data
print("Loading data...")
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{} "
sequence_max_length = 1024 # Twitter has only 140 characters. We pad 4 blanks characters more to the right of tweets to be conformed with the architecture of A. Conneau et al (2016)
from tensorflow.core.protobuf import saver_pb2
checkpoint_file = tf.train.latest_checkpoint("./")
graph = tf.Graph()
# Input data.
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
is_training = graph.get_operation_by_name(
"phase").outputs[0]
### To update the computation of moving_mean & moving_var, we must put it on the parent graph of minimizing loss
accuracy = graph.get_operation_by_name(
"accuracy/accuracy").outputs[0]
predictions = graph.get_operation_by_name(
"fc-3/predictions").outputs[0]
hdf5_path = "my_extendable_compressed_data_test.hdf5"
batch_size = 1000
extendable_hdf5_file = tables.open_file(hdf5_path, mode='r')
y_true_ = []
predictions_= []
for ptr in range(0, 70000, batch_size):
feed_dict = {cnn.input_x: extendable_hdf5_file.root.data[ptr:ptr + batch_size], cnn.input_y: extendable_hdf5_file.root.clusters[ptr:ptr + batch_size] , cnn.is_training: False }
y_true = tf.argmax(extendable_hdf5_file.root.clusters[ptr:ptr + batch_size] , 1)
y_true_bis,predictions_bis ,accuracy = sess.run([y_true,predictions,cnn.accuracy], feed_dict= feed_dict)
y_true_.extend(y_true_bis)
predictions_.extend(predictions_bis)
confusion_matrix_ = confusion_matrix(y_true_,predictions_)
print(confusion_matrix_)
print ("f1_score", f1_score(y_true_, predictions_ ,average ='weighted'))
print ("f1_score", f1_score(y_true_, predictions_ ,average =None))
extendable_hdf5_file.close()
|
Python
| 0.000001
|
|
77effff7ece070eabb3853ba918d40b7eb1c3de5
|
Create sc.py
|
sc.py
|
sc.py
|
#!/usr/bin/env python
import soundcloud
from clize import clize, run
from subprocess import call
@clize
def sc_load(tracks='', likes='', tags='', group=''):
opts = {}
if likes:
method = 'favorites'
elif tracks or group:
method = 'tracks'
elif tags:
method = 'tracks'
opts = {'tags': tags}
else:
return
client = soundcloud.Client(client_id='c4c979fd6f241b5b30431d722af212e8')
if likes or tracks:
user = likes or tracks
track = client.get('/resolve', url='https://soundcloud.com/' + user)
user_id = track.id
url = '/users/%d/' % user_id
elif group:
track = client.get('/resolve', url='https://soundcloud.com/groups/' + group)
group_id = track.id
url = '/groups/%d/' % group_id
else:
url = '/'
end = '%s%s' % (url, method)
for i, sound in enumerate(client.get(end, **opts)):
print("%d Loading %s..." % (i, sound.obj['title']))
call(['mpc', '-h', '<motdepasse>@entrecote', 'load',
'soundcloud://url/%s' % sound.obj['permalink_url'].replace('http:', 'https:')])
if __name__ == '__main__':
run(sc_load)
|
Python
| 0.000005
|
|
2055fc1eda896103931eaba5fb01238506aaac1a
|
Add signup in urls
|
urls.py
|
urls.py
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from okupy.login.views import *
from okupy.user.views import *
from okupy.signup.views import *
admin.autodiscover()
urlpatterns = patterns('',
url(r'^login/$', mylogin),
url(r'^$', user),
url(r'^signup/', signup),
url(r'^admin/', include(admin.site.urls)),
)
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from okupy.login.views import *
from okupy.user.views import *
admin.autodiscover()
urlpatterns = patterns('',
url(r'^login/$', mylogin),
url(r'^$', user),
url(r'^admin/', include(admin.site.urls)),
)
|
Python
| 0
|
d5b6299b802810748584b06242f614550155a283
|
Create app.py
|
app.py
|
app.py
|
from flask import Flask, request
import requests
import json
import traceback
import random
import os
from urllib.parse import urlencode
from urllib.request import Request, urlopen
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def main():
# if request.method == 'POST':
# try:
# data = json.loads(request.data)
# print ('data: ', data)
# print ('request.data: ', request.data)
# except:
# print ('error?')
# elif request.method == 'GET':
# print('get')
# print (request.data)
# return 'get'
# return 'all fails\n'
if request.method == 'POST':
data = request.get_json()
if data['name'] != 'My Man':
# msg = '{}, you sent "{}".'.format(data['name'], data['text'])
msg = 'https://media.giphy.com/media/qPVzemjFi150Q/giphy.gif'
send_message(msg)
elif request.method == 'GET':
msg = 'https://media.giphy.com/media/3o7aCUqs54taGzqDWU/giphy.gif'
send_message(msg)
return ("My Man!!")
def send_message(msg):
url = 'https://api.groupme.com/v3/bots/post'
data = {
'bot_id' : os.getenv('BOT_ID'),
'text' : msg,
}
request = Request(url, urlencode(data).encode())
json = urlopen(request).read().decode()
if __name__ == '__main__':
app.run()
|
Python
| 0.000003
|
|
4ff22a24a7d681a3c62f7d7e4fe56c0032a83370
|
Improve logging
|
app.py
|
app.py
|
import bottle
from bottle import get, post, static_file, request, route, template
from bottle import SimpleTemplate
from configparser import ConfigParser
from ldap3 import Connection, LDAPBindError, LDAPInvalidCredentialsResult, Server
from ldap3 import AUTH_SIMPLE, SUBTREE
from os import path
@get('/')
def get_index():
return index_tpl()
@post('/')
def post_index():
form = request.forms.get
def error(msg):
return index_tpl(username=form('username'), alerts=[('error', msg)])
if form('new-password') != form('confirm-password'):
return error("Password doesn't match the confirmation!")
if len(form('new-password')) < 8:
return error("Password must be at least 8 characters long!")
if not change_password(form('username'), form('old-password'), form('new-password')):
print("Unsuccessful attemp to change password for: %s" % form('username'))
return error("Username or password is incorrect!")
print("Password successfully changed for: %s" % form('username'))
return index_tpl(alerts=[('success', "Password has been changed")])
@route('/static/<filename>', name='static')
def serve_static(filename):
return static_file(filename, root=path.join(BASE_DIR, 'static'))
def index_tpl(**kwargs):
return template('index', **kwargs)
def change_password(username, old_pass, new_pass):
server = Server(CONF['ldap']['host'], int(CONF['ldap']['port']))
user_dn = find_user_dn(server, username)
try:
with Connection(server, authentication=AUTH_SIMPLE, raise_exceptions=True,
user=user_dn, password=old_pass) as c:
c.bind()
c.extend.standard.modify_password(user_dn, old_pass, new_pass)
return True
except (LDAPBindError, LDAPInvalidCredentialsResult):
return False
def find_user_dn(server, uid):
with Connection(server) as c:
c.search(CONF['ldap']['base'], "(uid=%s)" % uid, SUBTREE, attributes=['dn'])
return c.response[0]['dn'] if c.response else None
BASE_DIR = path.dirname(__file__)
CONF = ConfigParser()
CONF.read(path.join(BASE_DIR, 'settings.ini'))
bottle.TEMPLATE_PATH = [ BASE_DIR ]
# Set default attributes to pass into templates.
SimpleTemplate.defaults = dict(CONF['html'])
SimpleTemplate.defaults['url'] = bottle.url
# Run bottle internal test server when invoked directly (in development).
if __name__ == '__main__':
bottle.run(host='0.0.0.0', port=8080)
# Run bottle in application mode (in production under uWSGI server).
else:
application = bottle.default_app()
|
import bottle
from bottle import get, post, static_file, request, route, template
from bottle import SimpleTemplate
from configparser import ConfigParser
from ldap3 import Connection, LDAPBindError, LDAPInvalidCredentialsResult, Server
from ldap3 import AUTH_SIMPLE, SUBTREE
from os import path
@get('/')
def get_index():
return index_tpl()
@post('/')
def post_index():
form = request.forms.get
def error(msg):
return index_tpl(username=form('username'), alerts=[('error', msg)])
if form('new-password') != form('confirm-password'):
return error("Password doesn't match the confirmation!")
if len(form('new-password')) < 8:
return error("Password must be at least 8 characters long!")
if not change_password(form('username'), form('old-password'), form('new-password')):
return error("Username or password is incorrect!")
return index_tpl(alerts=[('success', "Password has been changed")])
@route('/static/<filename>', name='static')
def serve_static(filename):
return static_file(filename, root=path.join(BASE_DIR, 'static'))
def index_tpl(**kwargs):
return template('index', **kwargs)
def change_password(username, old_pass, new_pass):
print("Changing password for user: %s" % username)
server = Server(CONF['ldap']['host'], int(CONF['ldap']['port']))
user_dn = find_user_dn(server, username)
try:
with Connection(server, authentication=AUTH_SIMPLE, raise_exceptions=True,
user=user_dn, password=old_pass) as c:
c.bind()
c.extend.standard.modify_password(user_dn, old_pass, new_pass)
return True
except (LDAPBindError, LDAPInvalidCredentialsResult):
return False
def find_user_dn(server, uid):
with Connection(server) as c:
c.search(CONF['ldap']['base'], "(uid=%s)" % uid, SUBTREE, attributes=['dn'])
return c.response[0]['dn'] if c.response else None
BASE_DIR = path.dirname(__file__)
CONF = ConfigParser()
CONF.read(path.join(BASE_DIR, 'settings.ini'))
bottle.TEMPLATE_PATH = [ BASE_DIR ]
# Set default attributes to pass into templates.
SimpleTemplate.defaults = dict(CONF['html'])
SimpleTemplate.defaults['url'] = bottle.url
# Run bottle internal test server when invoked directly (in development).
if __name__ == '__main__':
bottle.run(host='0.0.0.0', port=8080)
# Run bottle in application mode (in production under uWSGI server).
else:
application = bottle.default_app()
|
Python
| 0.00001
|
b720ecf75634718a122c97bcff29129e321aa9b2
|
Add cat.py.
|
cat.py
|
cat.py
|
"""
Usage: cat.py [FILE]...
Concatenate FILE(s), or standard input, to standard output.
"""
import sys
def iter_files(paths):
for path in paths:
try:
yield open(path, 'rb')
except (IOError, OSError) as e:
print("error: {}".format(e), file=sys.stderr)
def main(argv=None):
if not argv:
argv = list(sys.argv)
if len(argv) < 2:
files = [sys.stdin.buffer]
else:
files = iter_files(argv[1:])
for file in files:
for line in file:
sys.stdout.buffer.write(line)
file.close()
if __name__ == "__main__":
main()
|
Python
| 0.000007
|
|
3b58283f613fc827e024c8d971d89c24fc2b3ed0
|
Create knn.py
|
knn.py
|
knn.py
|
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
#Read training data and split into train and test data
data=pd.read_csv('train.csv')
data1=data.values
X=data1[:,1:]
y=np.ravel(y)
Xtrain,Xtest,ytrain,ytest=train_test_split(X,y,test_size=0.25)
#Run PCA and KNN
pca=PCA(n_components=50).fit(Xtrain)
Xtrain_reduced=pca.transform(Xtrain)
Xtest_reduced=pca.transform(Xtest)
knn=KNeighborsClassifier(n_neighbors=5,weights='distance',p=3)
knn.fit(Xtrain_reduced,ytrain)
pred=knn.predict(Xtest_reduced)
print("Classification report for classifier %s:\n%s\n"
% (knn, metrics.classification_report(ytest,pred)))
#Run prediction on test data and make submissions
test=pd.read_csv('test.csv')
test_reduced=pca.transform(test)
pred2=knn.predict(test_reduced)
pred2 = pd.DataFrame(pred2)
pred2['ImageId'] = pred2.index + 1
pred2 = pred2[['ImageId', 0]]
pred2.columns = ['ImageId', 'Label']
pred2.to_csv('pred2.csv', index=False)
|
Python
| 0.00005
|
|
1faa3c76d1c752de02149af34954ed538fe10fa1
|
Add test
|
app/tests/test_data.py
|
app/tests/test_data.py
|
import unittest
from app import data
class TestProjects(unittest.TestCase):
def test_load(self) -> None:
projects = data.Projects.load()
self.assertNotEqual(projects.data, {})
self.assertIn('Python', projects.data)
self.assertIn('Git Browse', projects.data['Python'])
self.assertIn('description', projects.data['Python']['Git Browse'])
|
Python
| 0.000005
|
|
5813474651299998fb27c64c6d179a0a59bbe28c
|
Create otc.py
|
otc.py
|
otc.py
|
def tick(a,b,c):
if a == 'help':
msg = '^otc {currency}, specify a 2nd currency for rates, add --last/high/low etc for that alone.'
return msg
import urllib2,json,StringIO
a = a.lower()
b = b.lower()
c = c.lower()
if b.startswith('-'):
c = b
b = 'usd'
if b == 'none':
b = 'usd'
btce = urllib2.Request('https://btc-e.com/api/2/' + a + '_' + b + '/ticker')
get = urllib2.urlopen(btce)
parse = get.read()
if parse == '{"error":"invalid pair"}':
b = 'btc'
btce = urllib2.Request('https://btc-e.com/api/2/' + a + '_' + b + '/ticker')
get = urllib2.urlopen(btce)
parse = get.read()
try:
ticker3 = "{" + parse.split('{',2)[2].split('}',2)[0] + "}".replace('"','\'').replace(':',':"').replace(',','",').replace('}','"}')
ticker2 = ticker3.replace(':',':"').replace(',','",')
ticker = json.loads(ticker2)
except:
return 'Unknown currency'
if c == 'none':
msg = 'BTC-E ' + a.upper() + b.upper() + ' ticker | High: ' + ticker['high'] + ', Low: ' + ticker['low'] + ', avg: ' + ticker['avg'] + ', Last: ' + ticker['last'] + ', Buy: ' + ticker['buy'] + ', Sell: ' + ticker['sell']
elif c.startswith('--'):
msg = ticker[c[2:]]
else:
msg = 'That flag does not exist'
return msg
|
Python
| 0.000002
|
|
bf678628cf98b1c18a75f09fa15d26526ea0e3ac
|
Add gender choices fields
|
accelerator/migrations/0028_add_gender_fields.py
|
accelerator/migrations/0028_add_gender_fields.py
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0027_add_gender_choices_object'),
]
operations = [
migrations.AddField(
model_name='entrepreneurprofile',
name='gender_self_description',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='entrepreneurprofile',
name='gender_identity',
field=models.ManyToManyField(
blank=True,
to=settings.ACCELERATOR_GENDERCHOICES_MODEL),
),
migrations.AddField(
model_name='expertprofile',
name='gender_self_description',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='expertprofile',
name='gender_identity',
field=models.ManyToManyField(
blank=True,
to=settings.ACCELERATOR_GENDERCHOICES_MODEL),
),
migrations.AddField(
model_name='memberprofile',
name='gender_self_description',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='memberprofile',
name='gender_identity',
field=models.ManyToManyField(
blank=True,
to=settings.ACCELERATOR_GENDERCHOICES_MODEL),
),
]
|
Python
| 0.000001
|
|
bac06acb1e6255040f371232776f3da75fb9247a
|
Add data migration to populate preprint_doi_created field on existing published preprints where DOI identifier exists. Set to preprint date_published field.
|
osf/migrations/0069_auto_20171127_1119.py
|
osf/migrations/0069_auto_20171127_1119.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-27 17:19
from __future__ import unicode_literals
import logging
from django.db import migrations
from osf.models import PreprintService
logger = logging.getLogger(__name__)
def add_preprint_doi_created(apps, schema_editor):
"""
Data migration that makes preprint_doi_created equal to date_published for existing published preprints.
"""
null_preprint_doi_created = PreprintService.objects.filter(preprint_doi_created__isnull=True, date_published__isnull=False)
preprints_count = null_preprint_doi_created.count()
current_preprint = 0
logger.info('{} published preprints found with preprint_doi_created is null.'.format(preprints_count))
for preprint in null_preprint_doi_created:
current_preprint += 1
if preprint.get_identifier('doi'):
preprint.preprint_doi_created = preprint.date_published
preprint.save()
logger.info('Preprint ID {}, {}/{} preprint_doi_created field populated.'.format(preprint._id, current_preprint, preprints_count))
else:
logger.info('Preprint ID {}, {}/{} skipped because a DOI has not been created.'.format(preprint._id, current_preprint, preprints_count))
def reverse_func(apps, schema_editor):
"""
Reverses data migration. Sets preprint_doi_created field back to null.
"""
preprint_doi_created_not_null = PreprintService.objects.filter(preprint_doi_created__isnull=False)
preprints_count = preprint_doi_created_not_null.count()
current_preprint = 0
logger.info('Reversing preprint_doi_created migration.')
for preprint in preprint_doi_created_not_null:
current_preprint += 1
preprint.preprint_doi_created = None
preprint.save()
logger.info('Preprint ID {}, {}/{} preprint_doi_created field set to None.'.format(preprint._id, current_preprint, preprints_count))
class Migration(migrations.Migration):
dependencies = [
('osf', '0068_preprintservice_preprint_doi_created'),
]
operations = [
migrations.RunPython(add_preprint_doi_created, reverse_func)
]
|
Python
| 0
|
|
167a6497d79a4a18badd5ea85a87e7eefcd02696
|
Add init file to the root acceptance tests folder
|
test/acceptance/__init__.py
|
test/acceptance/__init__.py
|
# -*- coding: utf-8 -*-
"""
Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
This file is part of fiware-orion-pep
fiware-orion-pep is free software: you can redistribute it and/or
modify it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
fiware-orion-pep is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with fiware-orion-pep.
If not, see http://www.gnu.org/licenses/.
For those usages not covered by the GNU Affero General Public License
please contact with::[iot_support@tid.es]
"""
__author__ = 'Jon Calderin Goñi <jon.caldering@gmail.com>'
import os
"""
Make sure the logs path exists and create it otherwise.
"""
if not os.path.exists('logs'):
os.makedirs('logs')
|
Python
| 0
|
|
d290b3b2cc15a3bab907ed3847da709ab31edace
|
disable unpredictable tests
|
tests/acceptance/test_api.py
|
tests/acceptance/test_api.py
|
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class ApiTokensTest(AcceptanceTestCase):
def setUp(self):
super(ApiTokensTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - no tokens')
# self.browser.click('.ref-create-token')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api tokens - new token')
# self.browser.click('.btn-primary')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api tokens - single token')
class ApiApplicationTest(AcceptanceTestCase):
def setUp(self):
super(ApiApplicationTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/applications/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - no applications')
# self.browser.click('.ref-create-application')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api applications - new application')
# self.browser.click('.btn-primary')
# self.browser.wait_until_not('.loading')
# self.browser.snapshot('api applications - single application')
|
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class ApiTokensTest(AcceptanceTestCase):
def setUp(self):
super(ApiTokensTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - no tokens')
self.browser.click('.ref-create-token')
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - new token')
self.browser.click('.btn-primary')
self.browser.wait_until_not('.loading')
self.browser.snapshot('api tokens - single token')
class ApiApplicationTest(AcceptanceTestCase):
def setUp(self):
super(ApiApplicationTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
self.path = '/api/applications/'
def test_simple(self):
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - no applications')
self.browser.click('.ref-create-application')
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - new application')
self.browser.click('.btn-primary')
self.browser.wait_until_not('.loading')
self.browser.snapshot('api applications - single application')
|
Python
| 0
|
8fa776fd2fa63a44cb048a39fe7359ee9366c5e8
|
Add basic Processor tests
|
tests/003-test-processor.py
|
tests/003-test-processor.py
|
import time
import random
import multiprocessing
from functools import wraps
try:
import queue
except ImportError:
import Queue as queue
import t
import bucky.processor
import bucky.cfg as cfg
cfg.debug = True
def processor(func):
@wraps(func)
def run():
inq = multiprocessing.Queue()
outq = multiprocessing.Queue()
proc = bucky.processor.CustomProcessor(inq, outq, cfg)
proc.start()
func(inq, outq, proc)
inq.put(None)
dead = False
for i in range(5):
if not proc.is_alive():
dead = True
break
time.sleep(0.1)
if not dead:
raise RuntimeError("Server didn't die.")
return run
def send_get_data(indata, inq, outq):
for sample in indata:
inq.put(sample)
while True:
try:
sample = outq.get(True, 1)
except queue.Empty:
break
yield sample
def identity(host, name, val, time):
return host, name, val, time
@t.set_cfg("processor", identity)
@processor
def test_start_stop(inq, outq, proc):
assert proc.is_alive(), "Processor not alive."
inq.put(None)
time.sleep(0.5)
assert not proc.is_alive(), "Processor not killed by putting None in queue"
@t.set_cfg("processor", identity)
@processor
def test_plumbing(inq, outq, proc):
data = []
times = 100
for i in range(times):
host = "tests.host-%d" % i
name = "test-plumbing-%d" % i
value = i
timestamp = int(time.time() + i)
data.append((host, name, value, timestamp))
i = 0
for sample in send_get_data(data, inq, outq):
t.eq(sample, data[i])
i += 1
t.eq(i, times)
def filter_even(host, name, val, timestamp):
if not val % 2:
return None
return host, name, val, timestamp
@t.set_cfg("processor", filter_even)
@processor
def test_filter(inq, outq, proc):
data = []
times = 100
for i in range(times):
host = "tests.host-%d" % i
name = "test-filter-%d" % i
timestamp = int(time.time() + i)
data.append((host, name, 0, timestamp))
data.append((host, name, 1, timestamp))
i = 0
for sample in send_get_data(data, inq, outq):
t.eq(sample[2], 1)
i += 1
t.eq(i, times)
|
Python
| 0.000001
|
|
0b185bb6a30cb7c9b02c80051a8426dc736da3d6
|
Add sample WSGI app
|
examples/wsgi.py
|
examples/wsgi.py
|
import cgi
import json
from wsgiref import simple_server
import falcon
from mclib import mc_info
class MCInfo(object):
def on_get(self, req, resp):
host = req.get_param('host', required=True)
port = req.get_param_as_int('port', min=1024,
max=65565)
try:
if port is not None:
info = mc_info.get_info(host=host,
port=port)
else:
info = mc_info.get_info(host=host)
except Exception:
raise Exception('Couldn\'t retrieve info.')
if '.json' in req.uri:
resp.body = self.get_json(info)
return
preferred = req.client_prefers(['application/json', 'text/html'])
if 'html' in preferred:
resp.content_type = 'text/html'
resp.body = self.get_html(info)
else:
resp.body = self.get_json(info)
def get_html(self, info):
html = """<body>
<style>
table,th,td
{
border:1px solid black;
border-collapse:collapse
}
th,td
{
padding: 5px
}
</style>
<table>
"""
for k,v in info.iteritems():
items = {'key': cgi.escape(k)}
if isinstance(v, basestring):
items['val'] = cgi.escape(v)
else:
items['val'] = v
html = html + '<tr><td>%(key)s</td><td>%(val)s</td></tr>' % items
html = html + '</table></body>'
return html
def get_json(self, info):
return json.dumps(info)
app = falcon.API()
mcinfo = MCInfo()
app.add_route('/mcinfo', mcinfo)
app.add_route('/mcinfo.json', mcinfo)
if __name__ == '__main__':
httpd = simple_server.make_server('0.0.0.0', 3000, app)
httpd.serve_forever()
|
Python
| 0
|
|
b097075f7606563fc8ae80274e73b74dedd8129f
|
prepare a new folder "resources" for json files to replace python dynamic_resources
|
src/alfanous/Data.py
|
src/alfanous/Data.py
|
'''
Created on Jun 15, 2012
@author: assem
'''
class Configs:
pass
class Indexes:
pass
class Ressources:
pass
|
Python
| 0.000014
|
|
708f1b009b9b23971d73bfc7bc09163969ab6e00
|
Add integration tests for MEDIA_ALLOW_REDIRECTS
|
tests/test_pipeline_crawl.py
|
tests/test_pipeline_crawl.py
|
# -*- coding: utf-8 -*-
import os
import shutil
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from w3lib.url import add_or_replace_parameter
from scrapy.crawler import CrawlerRunner
from scrapy import signals
from tests.mockserver import MockServer
from tests.spiders import SimpleSpider
class MediaDownloadSpider(SimpleSpider):
name = 'mediadownload'
def _process_url(self, url):
return url
def parse(self, response):
self.logger.info(response.headers)
self.logger.info(response.text)
item = {
'images': [],
'image_urls': [
self._process_url(response.urljoin(href))
for href in response.xpath('''
//table[thead/tr/th="Filename"]
/tbody//a/@href
''').extract()],
}
yield item
class BrokenLinksMediaDownloadSpider(MediaDownloadSpider):
name = 'brokenmedia'
def _process_url(self, url):
return url + '.foo'
class RedirectedMediaDownloadSpider(MediaDownloadSpider):
name = 'redirectedmedia'
def _process_url(self, url):
return add_or_replace_parameter(
'http://localhost:8998/redirect-to',
'goto', url)
class MediaDownloadCrawlTestCase(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
# prepare a directory for storing files
self.tmpmediastore = self.mktemp()
os.mkdir(self.tmpmediastore)
self.settings = {
'ITEM_PIPELINES': {'scrapy.pipelines.images.ImagesPipeline': 1},
'IMAGES_STORE': self.tmpmediastore,
}
self.runner = CrawlerRunner(self.settings)
self.items = []
# these are the checksums for images in test_site/files/images
# - scrapy.png
# - python-powered-h-50x65.png
# - python-logo-master-v3-TM-flattened.png
self.expected_checksums = set([
'a7020c30837f971084834e603625af58',
'acac52d42b63cf2c3b05832641f3a53c',
'195672ac5888feb400fbf7b352553afe'])
def tearDown(self):
shutil.rmtree(self.tmpmediastore)
self.items = []
self.mockserver.__exit__(None, None, None)
def _on_item_scraped(self, item):
self.items.append(item)
def _create_crawler(self, spider_class):
crawler = self.runner.create_crawler(spider_class)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
return crawler
def _assert_files_downloaded(self, items, logs):
self.assertEqual(len(items), 1)
self.assertIn('images', items[0])
# check that logs show the expected number of successful file downloads
file_dl_success = 'File (downloaded): Downloaded file from'
self.assertEqual(logs.count(file_dl_success), 3)
# check that the images checksums are what we know they should be
checksums = set(
i['checksum']
for item in items
for i in item['images'])
self.assertEqual(checksums, self.expected_checksums)
# check that the image files where actually written to the media store
for item in items:
for i in item['images']:
self.assertTrue(
os.path.exists(
os.path.join(self.tmpmediastore, i['path'])))
def _assert_files_download_failure(self, crawler, items, code, logs):
# check that the item does NOT have the "images" field populated
self.assertEqual(len(items), 1)
self.assertIn('images', items[0])
self.assertFalse(items[0]['images'])
# check that there was 1 successful fetch and 3 other responses with non-200 code
self.assertEqual(crawler.stats.get_value('downloader/request_method_count/GET'), 4)
self.assertEqual(crawler.stats.get_value('downloader/response_count'), 4)
self.assertEqual(crawler.stats.get_value('downloader/response_status_count/200'), 1)
self.assertEqual(crawler.stats.get_value('downloader/response_status_count/%d' % code), 3)
# check that logs do show the failure on the file downloads
file_dl_failure = 'File (code: %d): Error downloading file from' % code
self.assertEqual(logs.count(file_dl_failure), 3)
# check that no files were written to the media store
self.assertEqual(os.listdir(self.tmpmediastore), [])
@defer.inlineCallbacks
def test_download_media(self):
crawler = self._create_crawler(MediaDownloadSpider)
with LogCapture() as log:
yield crawler.crawl("http://localhost:8998/files/images/")
self._assert_files_downloaded(self.items, str(log))
@defer.inlineCallbacks
def test_download_media_wrong_urls(self):
crawler = self._create_crawler(BrokenLinksMediaDownloadSpider)
with LogCapture() as log:
yield crawler.crawl("http://localhost:8998/files/images/")
self._assert_files_download_failure(crawler, self.items, 404, str(log))
@defer.inlineCallbacks
def test_download_media_redirected_default_failure(self):
crawler = self._create_crawler(RedirectedMediaDownloadSpider)
with LogCapture() as log:
yield crawler.crawl("http://localhost:8998/files/images/")
self._assert_files_download_failure(crawler, self.items, 302, str(log))
@defer.inlineCallbacks
def test_download_media_redirected_allowed(self):
settings = dict(self.settings)
settings.update({'MEDIA_ALLOW_REDIRECTS': True})
self.runner = CrawlerRunner(settings)
crawler = self._create_crawler(RedirectedMediaDownloadSpider)
with LogCapture() as log:
yield crawler.crawl("http://localhost:8998/files/images/")
self._assert_files_downloaded(self.items, str(log))
self.assertEqual(crawler.stats.get_value('downloader/response_status_count/302'), 3)
|
Python
| 0.000001
|
|
b171eb0c77f2d68051b48145f4e49275ed6860b9
|
Add tests for signup code exists method
|
account/tests/test_models.py
|
account/tests/test_models.py
|
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from django.contrib.auth.models import User
from account.models import SignupCode
class SignupCodeModelTestCase(TestCase):
def test_exists_no_match(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertFalse(SignupCode.exists(code='BARBAR'))
self.assertFalse(SignupCode.exists(email='bar@example.com'))
self.assertFalse(SignupCode.exists(email='bar@example.com', code='BARBAR'))
self.assertFalse(SignupCode.exists())
def test_exists_email_only_match(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(email='foobar@example.com'))
def test_exists_code_only_match(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(code='FOOFOO'))
self.assertTrue(SignupCode.exists(email='bar@example.com', code='FOOFOO'))
def test_exists_email_match_code_mismatch(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(email='foobar@example.com', code='BARBAR'))
def test_exists_code_match_email_mismatch(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(email='bar@example.com', code='FOOFOO'))
def test_exists_both_match(self):
code = SignupCode(email='foobar@example.com', code='FOOFOO')
code.save()
self.assertTrue(SignupCode.exists(email='foobar@example.com', code='FOOFOO'))
|
Python
| 0
|
|
6c55d840ed22ec584c6adad15d89d9888b408d88
|
[128. Longest Consecutive Sequence][Accepted]committed by Victor
|
128-Longest-Consecutive-Sequence/solution.py
|
128-Longest-Consecutive-Sequence/solution.py
|
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# idea something like bucket sort,
# put the element n at the (start,end) bucket, everytime
# check n-1 or n+1 in the bucket, if so combine them
bucket={}
max_len=1
for num in nums:
# duplicate n pass
if bucket.has_key(num):
continue
# inital n.start n.end is itself
start=end=num
# if has n-1
if bucket.has_key(num-1):
# update the start to the n-1.start
start=bucket[num-1][0]
if bucket.has_key(num+1):
# update the end to the n+1.end
end=bucket[num+1][1]
# add the update value to the bucket
bucket[start]=bucket[end]=bucket[num]=(start,end)
max_len=max(end-start+1,max_len)
return max_len
|
Python
| 0.999951
|
|
f5140f87e0e4326fe189b2f5f3ff3ac90f8db5c8
|
Add new heroku_worker.py to run as a Heroku worker process
|
blockbuster/heroku_worker.py
|
blockbuster/heroku_worker.py
|
import redis
from rq import Worker, Queue, Connection
import os
REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:32769/1')
print(REDIS_URL)
listen = ['default']
conn = redis.from_url(REDIS_URL)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
|
Python
| 0.000001
|
|
0722624244d107b19a006f07fd884d47597e4eb1
|
Add utility class to filter text through external program
|
lib/filter.py
|
lib/filter.py
|
from subprocess import Popen
from subprocess import PIPE
from subprocess import TimeoutExpired
import threading
from Dart import PluginLogger
from Dart.lib.plat import supress_window
_logger = PluginLogger(__name__)
class TextFilter(object):
'''Filters text through an external program (sync).
'''
def __init__(self, args, timeout=10):
self.args = args
self.timeout = timeout
# Encoding the external program likes to receive.
self.in_encoding = 'utf-8'
# Encoding the external program will emit.
self.out_encoding = 'utf-8'
self._proc = None
def encode(self, text):
return text.encode(self.in_ecoding)
def decode(self, encoded_bytes):
return encoded_bytes.decode(self.out_encoding)
def clean(self, text):
return text.replace('\r', '').rstrip()
def _start(self):
try:
self._proc = Popen(self.args,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
startupinfo=supress_window())
except OSError as e:
_logger.error('while starting text filter program: %s', e)
return
def filter(self, input_text):
self._start()
try:
in_bytes = input_text.encode(self.in_encoding)
out_bytes, err_bytes = self._proc.communicate(in_bytes,
self.timeout)
return self.clean(self.decode(out_bytes))
except TimeoutExpired:
_logger.debug('text filter program response timed out')
return None
except Exception as e:
_logger.error('while running TextFilter: %s', e)
return None
|
Python
| 0
|
|
c7da0ed13838150f0276c4c9f425390822b5b43b
|
Add serializers for API models.
|
vinotes/apps/api/serializers.py
|
vinotes/apps/api/serializers.py
|
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Note, Trait, Wine, Winery
class WinerySerializer(serializers.ModelSerializer):
class Meta:
model = Winery
fields = ('id', 'name')
class WineSerializer(serializers.ModelSerializer):
class Meta:
model = Wine
fields = ('id', 'winery', 'name', 'vintage')
class TraitSerializer(serializers.ModelSerializer):
class Meta:
model = Trait
fields = ('id', 'name')
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = ('id', 'taster', 'tasted', 'wine', 'color_traits',
'nose_traits', 'taste_traits', 'finish_traits', 'rating')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'notes')
|
Python
| 0
|
|
383c67da4729886602227b715f65390427ccd8bc
|
Create w3_1.py
|
w3_1.py
|
w3_1.py
|
print ("Hello World!")
|
Python
| 0.000482
|
|
66afbaab9abe51a83d6ea9765b7b8b70d045115e
|
Create question2.py
|
dingshubo/question2.py
|
dingshubo/question2.py
|
#_*_ coding:utf-8 _*_
#!/user/bin/python
import random
number_random = random.randint(1,100)
for chance in range(5): #玩家有5次机会
number_player=input('请输入一个1-100之间的整数:')
if(number_player>number_random):
print('这个数字偏大')
elif (number_player<number_random):
print('这个数字偏小')
print('你还有%d次机会')%(4-chance)
while (chance == 4): #当for遍历到第最后一次的时候
if (number_player == number_random):
print('恭喜你答对了')
break
else:
print('正确答案是:%s') % number_random
break
|
Python
| 0.999772
|
|
3189cd139b868d74caf35aa5b7a80f748f21c231
|
add tool to process brian's files
|
scripts/import/import_brian_files.py
|
scripts/import/import_brian_files.py
|
import glob
import os
os.chdir("c")
for filename in glob.glob("*"):
tokens = filename.split("_")
huc12 = tokens[1]
typ = tokens[2].split(".")[1]
newfn = "/i/%s/%s/%s" % (typ, huc12, filename)
os.rename(filename, newfn)
|
Python
| 0
|
|
e73b5fadbcff141fab2478954345ebaac22d8e63
|
add K-means
|
K-means/K-means.py
|
K-means/K-means.py
|
'''
Created on Apr 30, 2017
@author: Leo Zhong
'''
import numpy as np
# Function: K Means
# -------------
# K-Means is an algorithm that takes in a dataset and a constant
# k and returns k centroids (which define clusters of data in the
# dataset which are similar to one another).
def kmeans(X, k, maxIt):
#get col and row
numPoints, numDim = X.shape
dataSet = np.zeros((numPoints, numDim + 1))
dataSet[:, :-1] = X
# Initialize centroids randomly
centroids = dataSet[np.random.randint(numPoints, size = k), :]
#Randomly assign labels to initial centorid
centroids[:, -1] = range(1, k +1)
# Initialize book keeping vars.
iterations = 0
oldCentroids = None
# Run the main k-means algorithm
while not shouldStop(oldCentroids, centroids, iterations, maxIt):
print ("iteration: \n", iterations)
print ("dataSet: \n", dataSet)
print ("centroids: \n", centroids)
# Save old centroids for convergence test. Book keeping.
oldCentroids = np.copy(centroids)
iterations += 1
# Assign labels to each datapoint based on centroids
updateLabels(dataSet, centroids)
# Assign centroids based on datapoint labels
centroids = getCentroids(dataSet, k)
# We can get the labels too by calling getLabels(dataSet, centroids)
return dataSet
# Function: Should Stop
# -------------
# Returns True or False if k-means is done. K-means terminates either
# because it has run a maximum number of iterations OR the centroids
# stop changing.
def shouldStop(oldCentroids, centroids, iterations, maxIt):
if iterations > maxIt:
return True
return np.array_equal(oldCentroids, centroids)
# Function: Get Labels
# -------------
# Update a label for each piece of data in the dataset.
def updateLabels(dataSet, centroids):
# For each element in the dataset, chose the closest centroid.
# Make that centroid the element's label.
numPoints, numDim = dataSet.shape
for i in range(0, numPoints):
dataSet[i, -1] = getLabelFromClosestCentroid(dataSet[i, :-1], centroids)
def getLabelFromClosestCentroid(dataSetRow, centroids):
label = centroids[0, -1];
minDist = np.linalg.norm(dataSetRow - centroids[0, :-1])
for i in range(1 , centroids.shape[0]):
dist = np.linalg.norm(dataSetRow - centroids[i, :-1])
if dist < minDist:
minDist = dist
label = centroids[i, -1]
print ("minDist:", minDist)
return label
# Function: Get Centroids
# -------------
# Returns k random centroids, each of dimension n.
def getCentroids(dataSet, k):
# Each centroid is the geometric mean of the points that
# have that centroid's label. Important: If a centroid is empty (no points have
# that centroid's label) you should randomly re-initialize it.
result = np.zeros((k, dataSet.shape[1]))
for i in range(1, k + 1):
oneCluster = dataSet[dataSet[:, -1] == i, :-1]
result[i - 1, :-1] = np.mean(oneCluster, axis = 0)
result[i - 1, -1] = i
return result
x1 = np.array([1, 1])
x2 = np.array([2, 1])
x3 = np.array([4, 3])
x4 = np.array([5, 4])
testX = np.vstack((x1, x2, x3, x4))
result = kmeans(testX, 2, 10)
print ("final result:")
print (result)
|
Python
| 0.999987
|
|
7e17363eaf8d17f0d595ca5199e59a51c7b1df65
|
Add the core social_pipeline.
|
oneflow/core/social_pipeline.py
|
oneflow/core/social_pipeline.py
|
# -*- coding: utf-8 -*-
u"""
Copyright 2013-2014 Olivier Cortès <oc@1flow.io>.
This file is part of the 1flow project.
It provides {python,django}-social-auth pipeline helpers.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
# from constance import config
# from django.shortcuts import redirect
from social_auth.backends.facebook import FacebookBackend
from social_auth.backends.twitter import TwitterBackend
from social_auth.backends import google
from models import (
TwitterAccount,
# FacebookAccount, FacebookFeed,
)
LOGGER = logging.getLogger(__name__)
def check_feeds(social_user, user, details, request, response, backend,
is_new=False, *args, **kwargs):
""" Create Accounts & feeds associated with social networks. """
try:
if isinstance(backend, FacebookBackend):
pass
elif isinstance(backend, google.GoogleOAuth2Backend):
pass
elif isinstance(backend, TwitterBackend):
TwitterAccount.check_social_user(social_user, user, backend)
except:
LOGGER.exception(u'Could not check feeds for user %s from '
u'backend %s.', user, social_user)
|
Python
| 0
|
|
ee533a5e2a4eff99641383741e1cbe8e57c43e1f
|
add typing stub/compat package
|
gosubl/typing.py
|
gosubl/typing.py
|
try:
# ST builds >= 4000
from mypy_extensions import TypedDict
from typing import Any
from typing import Callable
from typing import Dict
from typing import Generator
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import Union
from typing_extensions import Protocol
except ImportError:
# ST builds < 4000
def _make_type(name: str) -> '_TypeMeta':
return _TypeMeta(name, (Type,), {}) # type: ignore
class _TypeMeta(type):
def __getitem__(self, args: 'Any') -> 'Any':
if not isinstance(args, tuple):
args = (args,)
name = '{}[{}]'.format(
str(self),
', '.join(map(str, args))
)
return _make_type(name)
def __str__(self) -> str:
return self.__name__
class Type(metaclass=_TypeMeta): # type: ignore
pass
class TypedDict(Type, dict): # type: ignore
def __init__(*args, **kwargs) -> None: # type: ignore
pass
class Any(Type): # type: ignore
pass
class Callable(Type): # type: ignore
pass
class Dict(Type): # type: ignore
pass
class Generator(Type): # type: ignore
pass
class IO(Type): # type: ignore
pass
class Iterable(Type): # type: ignore
pass
class Iterator(Type): # type: ignore
pass
class List(Type): # type: ignore
pass
class Mapping(Type): # type: ignore
pass
class Optional(Type): # type: ignore
pass
class Set(Type): # type: ignore
pass
class Tuple(Type): # type: ignore
pass
class Union(Type): # type: ignore
pass
Protocol = object # type: ignore
|
Python
| 0
|
|
2761e3bfd8d2c8281db565e54f6e3ea687bd5663
|
add backfill problem_id script
|
private/scripts/extras/backfill_problem_id.py
|
private/scripts/extras/backfill_problem_id.py
|
"""
Copyright (c) 2015-2019 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
ptable = db.problem
stable = db.submission
links = db(ptable).select(ptable.id, ptable.link)
plink_to_id = dict([(x.link, x.id) for x in links])
BATCH_SIZE = 25000
for i in xrange(10000):
rows = db(stable).select(limitby=(i * BATCH_SIZE, (i + 1) * BATCH_SIZE))
print rows.first().id, rows.last().id,
updated = 0
for srecord in rows:
if srecord.problem_id is None and \
srecord.problem_link in plink_to_id:
srecord.update_record(problem_id=plink_to_id[srecord.problem_link])
updated += 1
if updated > 0:
db.commit()
time.sleep(0.1)
print "updated", updated
else:
print "no updates"
|
Python
| 0.000001
|
|
a3de0337f6e3511cc3381f92f7bbc384d7667dfd
|
Create xmas.py
|
xmas.py
|
xmas.py
|
gifts=['A Partridge in a Pear Tree', 'Two Turtle Doves, and', 'Three French Hens', 'Four Calling Birds', 'Five Golden Rings', 'Six Geese-a-Laying', 'Seven Swans-a-Swimming', 'Eight Maids-a-Milking', 'Nine Ladies Dancing', 'Ten Lords-a-Leaping', 'Eleven Pipers Piping', 'Twelve Drummers Drumming']
ordinal=['st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th', 'th', 'th', 'th']
for day in range(12):
print('On the ' + str(day+1) + str(ordinal[day]) + ' day of Christmas, my true love sent to me...')
gift=day
while gift >= 0:
print(str(gifts[gift]))
gift-=1
print('\n')
|
Python
| 0.999876
|
|
8fa4888dbf82d225f52b6df347372a0381c08237
|
Add __main__.py for running python -m grip.
|
grip/__main__.py
|
grip/__main__.py
|
"""\
Grip
----
Render local readme files before sending off to Github.
:copyright: (c) 2014 by Joe Esposito.
:license: MIT, see LICENSE for more details.
"""
from command import main
if __name__ == '__main__':
main()
|
Python
| 0.000006
|
|
95874a5e06ff70d1cbea49321549beee5cc5abba
|
Create an example of storing units in HDF5
|
examples/store_and_retrieve_units_example.py
|
examples/store_and_retrieve_units_example.py
|
"""
Author: Daniel Berke, berke.daniel@gmail.com
Date: October 27, 2019
Requirements: h5py>=2.10.0, unyt>=v2.4.0
Notes: This short example script shows how to save unit information attached
to a `unyt_array` using `attrs` in HDF5, and recover it upon reading the file.
It uses the Unyt package (https://github.com/yt-project/unyt) because that's
what I'm familiar with, but presumably similar options exist for Pint and
astropy.units.
"""
import h5py
import tempfile
import unyt as u
# Set up a temporary file for this example.
tf = tempfile.TemporaryFile()
f = h5py.File(tf, 'a')
# Create some mock data with moderately complicated units (this is the
# dimensional representation of Joules of energy).
test_data = [1, 2, 3, 4, 5] * u.kg * ( u.m / u.s ) ** 2
print(test_data.units)
# kg*m**2/s**2
# Create a data set to hold the numerical information:
f.create_dataset('stored data', data=test_data)
# Save the units information as a string in `attrs`.
f['stored data'].attrs['units'] = str(test_data.units)
# Now recover the data, using the saved units information to reconstruct the
# original quantities.
reconstituted_data = u.unyt_array(f['stored data'],
units=f['stored data'].attrs['units'])
print(reconstituted_data.units)
# kg*m**2/s**2
assert reconstituted_data.units == test_data.units
|
Python
| 0.000001
|
|
406d038232a44c2640d334015ef3485ac115074d
|
Create PyAPT.py
|
PyAPT.py
|
PyAPT.py
|
# -*- coding: utf-8 -*-
"""
APT Motor Controller for Thorlabs
Adopted from
https://github.com/HaeffnerLab/Haeffner-Lab-LabRAD-Tools/blob/master/cdllservers/APTMotor/APTMotorServer.py
With thanks to SeanTanner@ThorLabs for providing APT.dll ad APT.lib
V1.0
20141125 V1.0 First working version
Michael Leung
mcleung@stanford.edu
"""
from ctypes import c_long, c_buffer, c_float, windll, pointer
class APTMotor():
def __init__(self, SerialNum, HWTYPE=31):
'''
HWTYPE_BSC001 11 // 1 Ch benchtop stepper driver
HWTYPE_BSC101 12 // 1 Ch benchtop stepper driver
HWTYPE_BSC002 13 // 2 Ch benchtop stepper driver
HWTYPE_BDC101 14 // 1 Ch benchtop DC servo driver
HWTYPE_SCC001 21 // 1 Ch stepper driver card (used within BSC102,103 units)
HWTYPE_DCC001 22 // 1 Ch DC servo driver card (used within BDC102,103 units)
HWTYPE_ODC001 24 // 1 Ch DC servo driver cube
HWTYPE_OST001 25 // 1 Ch stepper driver cube
HWTYPE_MST601 26 // 2 Ch modular stepper driver module
HWTYPE_TST001 29 // 1 Ch Stepper driver T-Cube
HWTYPE_TDC001 31 // 1 Ch DC servo driver T-Cube
HWTYPE_LTSXXX 42 // LTS300/LTS150 Long Travel Integrated Driver/Stages
HWTYPE_L490MZ 43 // L490MZ Integrated Driver/Labjack
HWTYPE_BBD10X 44 // 1/2/3 Ch benchtop brushless DC servo driver
'''
self.aptdll = windll.LoadLibrary("APT.dll")
self.aptdll.EnableEventDlg(True)
self.aptdll.APTInit()
#print 'APT initialized'
self.HWType = c_long(HWTYPE) # 31 means TDC001 controller
self.SerialNum = c_long(SerialNum)
#self.SerialNum = self.getSerialNumber(0)
#print self.SerialNum
self.initializeHardwareDevice()
def getNumberOfHardwareUnits(self):
numUnits = c_long()
self.aptdll.GetNumHWUnitsEx(self.HWType, pointer(numUnits))
return numUnits.value
def getSerialNumber(self, index):
HWSerialNum = c_long()
hardwareIndex = c_long(index)
self.aptdll.GetHWSerialNumEx(self.HWType, hardwareIndex, pointer(HWSerialNum))
return HWSerialNum
def initializeHardwareDevice(self):
self.aptdll.InitHWDevice(self.SerialNum)
# need some kind of error reporting here
return True
''' Interfacing with the motor settings '''
def getHardwareInformation(self):
model = c_buffer(255)
softwareVersion = c_buffer(255)
hardwareNotes = c_buffer(255)
self.aptdll.GetHWInfo(self.SerialNum, model, 255, softwareVersion, 255, hardwareNotes, 255)
hwinfo = [model.value, softwareVersion.value, hardwareNotes.value]
return hwinfo
def getStageAxisInformation(self):
minimumPosition = c_float()
maximumPosition = c_float()
units = c_long()
pitch = c_float()
self.aptdll.MOT_GetStageAxisInfo(self.SerialNum, pointer(minimumPosition), pointer(maximumPosition), pointer(units), pointer(pitch))
stageAxisInformation = [minimumPosition.value, maximumPosition.value, units.value, pitch.value]
return stageAxisInformation
def setStageAxisInformation(self, minimumPosition, maximumPosition):
minimumPosition = c_float(minimumPosition)
maximumPosition = c_float(maximumPosition)
units = c_long(1) #units of mm
# Get different pitches of lead screw for moving stages for different lasers.
pitch = c_float(self.config.get_pitch())
self.aptdll.MOT_SetStageAxisInfo(self.SerialNum, minimumPosition, maximumPosition, units, pitch)
return True
def getHardwareLimitSwitches(self):
reverseLimitSwitch = c_long()
forwardLimitSwitch = c_long()
self.aptdll.MOT_GetHWLimSwitches(self.SerialNum, pointer(reverseLimitSwitch), pointer(forwardLimitSwitch))
hardwareLimitSwitches = [reverseLimitSwitch.value, forwardLimitSwitch.value]
return hardwareLimitSwitches
def getVelocityParameters(self):
minimumVelocity = c_float()
acceleration = c_float()
maximumVelocity = c_float()
self.aptdll.MOT_GetVelParams(self.SerialNum, pointer(minimumVelocity), pointer(acceleration), pointer(maximumVelocity))
velocityParameters = [minimumVelocity.value, acceleration.value, maximumVelocity.value]
return velocityParameters
def setVelocityParameters(self, minVel, acc, maxVel):
minimumVelocity = c_float(minVel)
acceleration = c_float(acc)
maximumVelocity = c_float(maxVel)
self.aptdll.MOT_SetVelParams(self.SerialNum, minimumVelocity, acceleration, maximumVelocity)
return True
def getVelocityParameterLimits(self):
maximumAcceleration = c_float()
maximumVelocity = c_float()
self.aptdll.MOT_GetVelParamLimits(self.SerialNum, pointer(maximumAcceleration), pointer(maximumVelocity))
velocityParameterLimits = [maximumAcceleration.value, maximumVelocity.value]
return velocityParameterLimits
''' Controlling the motors '''
def getPosition(self):
'''
Obtain the current absolute position of the stage
'''
position = c_float()
self.aptdll.MOT_GetPosition(self.SerialNum, pointer(position))
return position.value
def moveRelative(self, relDistance):
'''
Move a relative distance specified
'''
relativeDistance = c_float(relDistance)
self.aptdll.MOT_MoveRelativeEx(self.SerialNum, relativeDistance, True)
return True
def moveAbsolute(self, absPosition):
'''
Moves the motor to the Absolute position specified
absPosition float Position desired
'''
absolutePosition = c_float(absPosition)
self.aptdll.MOT_MoveAbsoluteEx(self.SerialNum, absolutePosition, True)
return True
def moveRelCon(self, relDistance, moveVel=0.5):
'''
Move a relative distance at a controlled velocity
'''
# Save velocities to reset after move
minVel, acc, maxVel = self.getVelocityParameters()
# Set new desired max velocity
self.setVelocityParameters(minVel,acc,moveVel)
self.moveRelative(relDistance)
self.setVelocityParameters(minVel,acc,maxVel)
return True
''' Miscelaneous '''
def identify(self):
''' Causes the motor to blink the Active LED '''
self.aptdll.MOT_Identify(self.SerialNum)
return True
def cleanUpAPT(self):
''' Closes the APT?? '''
self.aptdll.APTCleanUp()
print 'APT cleaned up'
|
Python
| 0.000001
|
|
4fe50fda289be7db3fb96450e713eb8f1a815026
|
Add weighted linear algorithm
|
autoscaler/server/scaling/algorithms/weighted.py
|
autoscaler/server/scaling/algorithms/weighted.py
|
import math
from autoscaler.server.request_history import RequestHistory
from autoscaler.server.scaling.utils import parse_interval
class WeightedScalingAlgorithm:
def __init__(self, algorithm_config):
self.interval_seconds = parse_interval(
algorithm_config['interval']
)
self.requests_per_instance_interval = (
algorithm_config['requests_per_instance_interval']
)
self.weights = algorithm_config['weights']
def get_instance_count(self, request_history: RequestHistory):
intervals = request_history.get_last_intervals(
self.interval_seconds, len(self.weights)
)
normalized_weights = self._normalized_weights(self.weights)
weighted_request_count = sum(
len(interval) * weight
for weight, interval in zip(normalized_weights, intervals)
)
return max(1, math.ceil(
weighted_request_count / self.requests_per_instance_interval)
)
@staticmethod
def _normalized_weights(weights):
weight_sum = sum(weights)
return [weight / weight_sum for weight in weights]
|
Python
| 0.000596
|
|
ca43479fc10505b04ec8861de074f25c80c6f5e1
|
add rhythm description module
|
rhythm_features.py
|
rhythm_features.py
|
from __future__ import division, print_function
import os
import numpy as np
import utils
onsets_dir = ''
beats_dir = ''
def compute_and_write(data_dir, track_list=None, features=None):
"""Compute frame-based features for all audio files in a folder.
Args:
data_dir (str): where to write features
track_list (str or None): list of file ids. Set to None to infer from
files in ioi_dir and chroma_dir.
features (dict): dictionary with (unique) feature names as keys and
tuples as values, each containing a feature extraction function and a
parameter dictionary.
Feature extraction functions can be any function that returns one
or more 1d or 2d-arrays that share their first dimension.
Required global variables:
beats_dir (str): where to find beat data
onsets_dir (str): where to find onset data
"""
if track_list is None:
track_list = [filename.split('.')[0] for filename in os.listdir(ioi_dir)]
if features is None:
features = {'ioihist': (get_ioi_hist, {})}
for track_id in track_list:
print("Computing features for track {}...".format(track_id))
for feature in features:
# run feature function
func, params = features[feature]
X = func(track_id, **params)
# normalize (!) and flatten
X = X.flatten() / np.sum(X)
# write
utils.write_feature(X, [data_dir, feature, track_id])
def get_ioi_hist(track_id, min_length = -7, max_length = 0, step=1):
"""Compute a IOI histogram, with bins logarithmically spaced between
`min_length` (def: -7) and `max_length` (0), with step `step`.
"""
t, ioi = get_norm_ioi(track_id)
log_ioi = np.log2(ioi)
halfstep = step / 2.0
nbins = (max_length - min_length) / step + 1
binedges = np.linspace(minpitch - halfstep, maxpitch + halfstep, nbins + 1)
ioi_hist, _ = np.histogram(log_ioi, binedges)
ioi_hist = ioi_hist / np.sum(ioi_hist)
return ioi_hist
def get_beats(track_id):
"""Read beat data from file beats_dir + track_id + '.csv'.
File should contain a time column followed by one column of
beat intervals.
"""
beats_file = os.path.join(beats_dir, track_id + '.csv')
t, beat_intervals = utils.read_feature(beats_file, time=True)
return t, beat_intervals
def get_onsets(track_id):
"""Read ioi data from file onsets_dir + track_id + '.csv'.
File should contain a time column followed by one column of
inter-onset intervals.
"""
onsets_file = os.path.join(onsets_dir, track_id + '.csv')
t, ioi = utils.read_feature(onsets_file, time=True)
return t, ioi
# TODO
def get_norm_ioi(track_id):
pass
if __name__ == '__main__':
compute_and_write(sys.argv[1], sys.argv[2])
|
Python
| 0.000001
|
|
a726625e13ac08d0b6c2c686de476b6e78bc0f48
|
Add unit test for _skeleton
|
dlstats/fetchers/test__skeleton.py
|
dlstats/fetchers/test__skeleton.py
|
import unittest
from datetime import datetime
from _skeleton import Dataset
class DatasetTestCase(unittest.TestCase):
def test_full_example(self):
self.assertIsInstance(Dataset(provider='Test provider',name='GDP',dataset_code='nama_gdp_fr',dimension_list=[{'name':'COUNTRY','values':[('FR','France'),('DE','Germany')]}],doc_href='rasessr',last_update=datetime(2014,12,2)),Dataset)
def test_empty_doc_href(self):
self.assertIsInstance(Dataset(provider='Test provider',name='GDP',dataset_code='nama_gdp_fr',dimension_list=[{'name':'COUNTRY','values':[('FR','France'),('DE','Germany')]}],last_update=datetime(2014,12,2)),Dataset)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
|
e54c82c336827c1fc835837006885c245a05e5cb
|
Add html stripper for announcements
|
html_stripper.py
|
html_stripper.py
|
from html.parser import HTMLParser
class HTMLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = HTMLStripper()
s.feed(html)
return s.get_data()
|
Python
| 0
|
|
20830e9fb2785eda94bf9e7c0dab70d476bc82b4
|
Add `sample_settings.py`
|
sample_settings.py
|
sample_settings.py
|
# Rename this file to `settings.py` in deployment
# supported_subreddits = 'india'
supported_subreddits = 'india+indianbooks'
user_agent = ('Goodreads, v0.1. Gives info of the book whenever goodreads'
'link to a book is posted. (by /u/avinassh)')
scopes = ['identity', 'submit', 'privatemessages', 'read']
be_gentle_to_reddit = True
# reddit app
app_key = 'K...q'
app_secret = 'y...i'
# bot account
access_token = '3...R'
refresh_token = '3...m'
# good reads
goodreads_api_key = '5...v'
goodreads_api_secret = 'T...4'
|
Python
| 0
|
|
638c6383acf4431c95327fd0cbdb535e115e027d
|
Create admin util for user management.
|
flow-admin.py
|
flow-admin.py
|
#!/usr/bin/env python
#
# To ensure you can import rhizo-server modules set PYTHONPATH
# to point to rhize-server base dir.
# E.g.
# export PYTHONPATH=/home/user/rhizo-server/
#
from optparse import OptionParser
from main.users.auth import create_user
from main.users.models import User, OrganizationUser
from main.resources.resource_util import find_resource, _create_folders
from main.app import db
if __name__ == '__main__':
parser = OptionParser()
parser.add_option( '-c',
'--create-user',
dest='flow_user_spec',
help='Create flow user specified in the format email:username:password:fullname',
default='')
parser.add_option( '-d',
'--delete-user',
dest='delete_username',
help='Delete flow user specified by username',
default='')
(options, args) = parser.parse_args()
if options.flow_user_spec:
parts = options.flow_user_spec.split(':')
email = parts[0]
username = parts[1]
password = parts[2]
fullname = parts[3]
assert '.' in email and '@' in email
#
# Create user
#
print("Creating user %s" % (username))
user_id = create_user( email,
username,
password,
fullname,
User.STANDARD_USER)
#
# Add user to flow organization
#
print("Creating organization user.")
org_user = OrganizationUser()
org_user.organization_id = find_resource('/testing').id
org_user.user_id = user_id
org_user.is_admin = False
db.session.add(org_user)
db.session.commit()
#
# Create a folder for this user to store their programs
#
student_folder = 'testing/student-folders/%s' % (username)
print("Creating student folder %s." % (student_folder))
_create_folders(student_folder)
print('Created flow user: %s' % (email))
elif options.delete_username:
#
# Delete the specified user by username
#
username = options.delete_username
user = User.query.filter(User.user_name == username).first()
if user is None:
print("No such user %s." % (username))
exit(1)
#
# Delete user folder
#
student_folder = find_resource('/testing/student-folders/%s' % (username))
if student_folder is not None:
print("Deleting student folder %s." % (student_folder.name))
db.session.delete(student_folder)
db.session.commit()
else:
print("No student folder to delete.")
#
# Delete organization user
#
org_id = find_resource('/testing').id
org_user = OrganizationUser.query.filter(
OrganizationUser.organization_id == org_id,
OrganizationUser.user_id == user.id ).first()
if org_user is not None:
print("Deleting organization user.")
db.session.delete(org_user)
db.session.commit()
else:
print("No organization user to delete.")
#
# Now delete the user
#
db.session.delete(user)
db.session.commit()
print('Deleted flow user: %s.' % (username))
|
Python
| 0
|
|
7af1a75b26ecdf1c7932169f2904a74db50c5b5d
|
Add release.py, updated to use Darcs.
|
sandbox/release.py
|
sandbox/release.py
|
#!/usr/bin/env python
import os
import re
import sys
import ftplib
import shutil
def firstLines(filename, n):
fd = file(filename)
lines = []
while n:
n -= 1
lines.append(fd.readline().rstrip('\r\n'))
return lines
def firstLine(filename):
return firstLines(filename, 1)[0]
def error(s):
sys.stderr.write(s+'\n')
sys.exit(-1)
def system(sh, errmsg=None):
if errmsg is None:
errmsg = repr(sh)
ret = os.system(sh)
if ret:
error(errmsg + ' (error code: %s)' % ret)
if __name__ == '__main__':
if len(sys.argv) < 3:
error('Usage: %s <sf username> <version>\n' % sys.argv[0])
print 'Check version string for validity.'
(u, v) = sys.argv[1:]
if not re.match(r'^\d+\.\d+\.\d+\w*$', v):
error('Invalid version string: '
'must be of the form MAJOR.MINOR.PATCHLEVEL.')
if os.path.exists('supybot'):
error('I need to make the directory "supybot" but it already exists.'
' Change to an appropriate directory or rmeove the supybot '
'directory to continue.')
print 'Checking out fresh tree from Darcs.'
system('darcs get http://source.supybot.com/supybot/')
os.chdir('supybot')
print 'Checking RELNOTES version line.'
if firstLine('RELNOTES') != 'Version %s' % v:
error('Invalid first line in RELNOTES.')
print 'Checking ChangeLog version line.'
(first, _, third) = firstLines('ChangeLog', 3)
if not re.match(r'^200\d-\d{2}-\d{2}\s+\w+.*<\S+@\S+>$', first):
error('Invalid first line in ChangeLog.')
if not re.match(r'^\t\* Version %s!$' % v, third):
error('Invalid third line in ChangeLog.')
print 'Updating version in version files.'
versionFiles = ('src/conf.py', 'scripts/supybot', 'setup.py')
for fn in versionFiles:
sh = 'perl -pi -e "s/^version\s*=.*/version = \'%s\'/" %s' % (v, fn)
system(sh, 'Error changing version in %s' % fn)
system('darcs record -m "Updated to %s." %s' % (v, ' '.join(versionFiles)))
print 'Tagging release.'
system('darcs tag -m release-%s' % v.replace('.', '_'))
print 'Removing test, sandbox, and _darcs.'
shutil.rmtree('test')
shutil.rmtree('sandbox')
system('find . -name _darcs | xargs rm -rf')
os.chdir('..')
dirname = 'Supybot-%s' % v
print 'Renaming directory to %s.' % dirname
if os.path.exists(dirname):
shutil.rmtree(dirname)
shutil.move('supybot', dirname)
print 'Creating tarball (gzip).'
system('tar czvf Supybot-%s.tar.gz %s' % (v, dirname))
print 'Creating tarball (bzip2).'
system('tar cjvf Supybot-%s.tar.bz2 %s' % (v, dirname))
print 'Creating zip.'
system('zip -r Supybot-%s.zip %s' % (v, dirname))
print 'Uploading package files to upload.sf.net.'
ftp = ftplib.FTP('upload.sf.net')
ftp.login()
ftp.cwd('incoming')
for filename in ['Supybot-%s.tar.gz',
'Supybot-%s.tar.bz2',
'Supybot-%s.zip']:
filename = filename % v
print 'Uploading %s to SF.net.' % filename
ftp.storbinary('STOR %s' % filename, file(filename))
ftp.close()
print 'Committing %s+darcs to version files.' % v
for fn in versionFiles:
sh = 'perl -pi -e "s/^version\s*=.*/version = \'%s\'/" %s' % \
(v + '+cvs', fn)
system(sh, 'Error changing version in %s' % fn)
system('darcs record -m "Updated to %s." %s' % (v, ' '.join(versionFiles)))
print 'Copying new version.txt over to project webserver.'
system('echo %s > version.txt' % v)
system('scp version.txt %s@shell.sf.net:/home/groups/s/su/supybot/htdocs'%u)
# print 'Generating documentation.'
# # docFiles is in the format {directory: files}
# docFiles = {'.': ('README', 'INSTALL', 'ChangeLog'),
# 'docs': ('config.html', 'CAPABILITIES', 'commands.html',
# 'CONFIGURATION', 'FAQ', 'GETTING_STARTED',
# 'INTERFACES', 'OVERVIEW', 'PLUGIN-EXAMPLE',
# 'plugins', 'plugins.html', 'STYLE'),
# }
# system('python scripts/supybot-plugin-doc')
# pwd = os.getcwd()
# os.chmod('docs/plugins', 0775)
# sh = 'tar rf %s/docs.tar %%s' % pwd
# for (dir, L) in docFiles.iteritems():
# os.chdir(os.path.join(pwd, dir))
# system(sh % ' '.join(L))
# os.chdir(pwd)
# system('bzip2 docs.tar')
#
# print 'Uploading documentation to webspace.'
# system('scp docs.tar.bz2 %s@supybot.sf.net:/home/groups/s/su/supybot'
# '/htdocs/docs/.' % u)
# system('ssh %s@supybot.sf.net "cd /home/groups/s/su/supybot/htdocs/docs; '
# 'tar jxf docs.tar.bz2"' % u)
#
# print 'Cleaning up generated documentation.'
# shutil.rmtree('docs/plugins')
# configFiles = ('docs/config.html', 'docs/plugins.html',
# 'docs/commands.html', 'docs.tar.bz2', 'test-conf',
# 'test-data', 'test-logs', 'tmp')
# for fn in configFiles:
# os.remove(fn)
# This is the part where we do our release on Freshmeat using XMLRPC and
# <gasp> ESR's software to do it: http://freshmeat.net/p/freshmeat-submit/
|
Python
| 0
|
|
0da1d2edc0f2a01d90cfc7cbf2bb4d37d1cc58d9
|
Add examples from JModelica User's Manual (1.17.0)
|
src/ast_example.py
|
src/ast_example.py
|
# Import library for path manipulations
import os.path
# Import the JModelica.org Python packages
import pymodelica
from pymodelica.compiler_wrappers import ModelicaCompiler
# Import numerical libraries
import numpy as N
import ctypes as ct
import matplotlib.pyplot as plt
# Import JPype
import jpype
# Create a reference to the java package 'org'
org = jpype.JPackage('org')
# Create a compiler and compiler target object
mc = ModelicaCompiler()
# Build trees as if for an FMU or Model Exchange v 1.0
target = mc.create_target_object("me", "1.0")
# Don't parse the file if it has already been parsed
try:
source_root.getProgramRoot()
except:
# Parse the file CauerLowPassAnalog.mo and get the root node
# of the AST
model = mc.get_modelicapath() + "\\Modelica"
source_root = mc.parse_model(model)
# Don't load the standard library if it is already loaded
try:
modelica.getName().getID()
except NameError, e:
# Load the Modelica standard library and get the class
# declaration AST node corresponding to the Modelica
# package.
modelica = source_root.getProgram().getLibNode(0). \
getStoredDefinition().getElement(0)
def count_classes(class_decl, depth):
"""
Count the number of classes hierarchically contained
in a class declaration.
"""
# get an iterator over all local classes using the method
# ClassDecl.classes() which returns a Java Iterable object
# over ClassDecl objects
local_classes = class_decl.classes().iterator()
num_classes = 0
# Loop over all local classes
while local_classes.hasNext():
# Call count_classes recursively for all local classes
# (including the contained class itself)
num_classes += 1 + count_classes(local_classes.next(), depth + 1)
# If the class declaration corresponds to a package, print
# the number of hierarchically contained classes
if class_decl.isPackage() and depth <= 1:
print("The package %s has %d hierarchically contained classes"%(
class_decl.qualifiedName(), num_classes))
# Return the number of hierarchically contained classes
return num_classes
# Call count_classes for 'Modelica'
num_classes = count_classes(modelica, 0)
try:
filter_source.getProgramRoot()
except:
filter_source = mc.parse_model("CauerLowPassAnalog.mo")
# Don't instantiate if instance has been computed already
try:
filter_instance.components()
except:
# Retrieve the node
filter_instance = mc.instantiate_model(
filter_source, "CauerLowPassAnalog", target)
def dump_inst_ast(inst_node, indent, fid):
"""
Pretty print an instance node, including its merged environment.
"""
# Get the merged environment of an instance node
env = inst_node.getMergedEnvironment()
# Create a string containing the type and name of the instance node
str = indent + inst_node.prettyPrint("")
str = str + " {"
# Loop over all elements in the merged modification environment
for i in range(env.size()):
str = str + env.get(i).toString()
if i < env.size() - 1:
str = str + ", "
str = str + "}"
# Print
fid.write(str + "\n")
# Get all components and dump them recursively
components = inst_node.instComponentDeclList
for i in range(components.getNumChild()):
# Assume the primitive variables are leafs in the instance AST
if (inst_node.getClass() is \
org.jmodelica.modelica.compiler.InstPrimitive) is False:
dump_inst_ast(components.getChild(i), indent + " ", fid)
# Get all extends clauses and dump them recursively
extends = inst_node.instExtendsList
for i in range(extends.getNumChild()):
# Assume that primitive variables are leafs in the instance AST
if (inst_node.getClass() is \
org.jmodelica.modelica.compiler.InstPrimitive) is False:
dump_inst_ast(extends.getChild(i), indent + " ", fid)
# dump the filter instance
with open('out.txt', 'w') as fid:
dump_inst_ast(filter_instance, "", fid)
print("Done!")
|
Python
| 0
|
|
55dd21610a2ed1befed6b4560528e8a6bf3602e2
|
Define function to retrieve imgur credentials
|
imgur_cli/cli.py
|
imgur_cli/cli.py
|
import argparse
import logging
import os
import imgurpython
from collections import namedtuple
logger = logging.getLogger(__name__)
def imgur_credentials():
ImgurCredentials = namedtuple('ImgurCredentials', ['client_id', 'client_secret', 'access_token', 'refresh_token', 'mashape_key'])
try:
from config import config
client_id = config.get('IMGUR_CLIENT_ID')
client_secret = config.get('IMGUR_CLIENT_SECRET')
access_token = config.get('IMGUR_ACCESS_TOKEN')
refresh_token = config.get('IMGUR_REFRESH_TOKEN')
mashape_key = config.get('IMGUR_MASHAPE_KEY')
except ImportError:
client_id = os.environ.get('IMGUR_CLIENT_ID')
client_secret = os.environ.get('IMGUR_CLIENT_SECRET')
access_token = os.environ.get('IMGUR_ACCESS_TOKEN')
refresh_token = os.environ.get('IMGUR_REFRESH_TOKEN')
mashape_key = os.environ.get('IMGUR_MASHAPE_KEY')
if not client_id or not client_secret:
raise imgurpython.client.ImgurClientError('Client credentials not found. Ensure you have both client id and client secret')
return ImgurCredentials(client_id, client_secret, access_token, refresh_token, mashape_key)
|
Python
| 0.000006
|
|
d3ebb800c88be18861608f8b174cc652223ac67c
|
Add utils.py with get_options function
|
apps/ivrs/utils.py
|
apps/ivrs/utils.py
|
def get_options(question_number):
if question_number == 2:
return " Press 4 or 5 "
else:
return " Press 1 for Yes or 2 for No"
|
Python
| 0.000001
|
|
2c8752cd586f6d02ce8da4bc3a79660889ed7f3f
|
Add some minimal testing for BandRCModel to the test suite.
|
climlab/tests/test_bandrc.py
|
climlab/tests/test_bandrc.py
|
import numpy as np
import climlab
import pytest
# The fixtures are reusable pieces of code to set up the input to the tests.
# Without fixtures, we would have to do a lot of cutting and pasting
# I inferred which fixtures to use from the notebook
# Latitude-dependent grey radiation.ipynb
@pytest.fixture()
def model():
return climlab.BandRCModel()
# helper for a common test pattern
def _check_minmax(array, amin, amax):
return (np.allclose(array.min(), amin) and
np.allclose(array.max(), amax))
def test_model_creation(model):
"""Just make sure we can create a model."""
assert len(model.Tatm)==30
def test_integrate_years(model):
"""Check that we can integrate forward the model and get the expected
surface temperature and water vapor.
Also check the climate sensitivity to doubling CO2."""
model.step_forward()
model.integrate_years(2)
Ts = model.Ts.copy()
assert np.isclose(Ts, 275.43383753)
assert _check_minmax(model.q, 5.E-6, 3.23764447e-03)
model.absorber_vmr['CO2'] *= 2.
model.integrate_years(2)
assert np.isclose(model.Ts - Ts, 3.180993)
|
Python
| 0
|
|
c1ea660b72ac10fd0a2dea1416b45c6796ca5adb
|
add pascal voc ingest
|
ingest/pascal.py
|
ingest/pascal.py
|
#!/usr/bin/python
import json
import glob
import sys
import getopt
import collections
import os
from os.path import isfile, join
import xml.etree.ElementTree as et
from collections import defaultdict
# http://stackoverflow.com/questions/7684333/converting-xml-to-dictionary-using-elementtree
def etree_to_dict(t):
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.iteritems():
dd[k].append(v)
d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.iteritems()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
def validate_metadata(jobj,file):
boxlist = jobj['object']
if not isinstance(boxlist,collections.Sequence):
print('{0} is not a sequence').format(file)
return False
# print("{0} has {1} boxes").format(jobj['filename'],len(boxlist))
index = 0;
for box in boxlist:
if 'part' in box:
parts = box['part']
if not isinstance(parts,collections.Sequence):
print('parts {0} is not a sequence').format(file)
return False
index += 1
return True
def convert_pascal_to_json(input_path,output_path):
#onlyfiles = [f for f in listdir(input_path) if isfile(join(input_path, f)) && file.endswith('.xml')]
if not os.path.exists(output_path):
os.makedirs(output_path)
onlyfiles = glob.glob(join(input_path,'*.xml'))
onlyfiles.sort()
for file in onlyfiles:
outfile = join(output_path,os.path.basename(file))
outfile = os.path.splitext(outfile)[0]+'.json'
print(outfile)
trimmed = parse_single_file(join(input_path,file))
if validate_metadata(trimmed,file):
result = json.dumps(trimmed, sort_keys=True, indent=4, separators=(',', ': '))
f = open(outfile,'w')
f.write(result)
else:
print('error parsing metadata {0}').format(file)
#print(result)
def parse_single_file(path):
tree = et.parse(path)
root = tree.getroot()
d = etree_to_dict(root)
trimmed = d['annotation']
olist = trimmed['object']
if not isinstance(olist,collections.Sequence):
trimmed['object'] = [olist];
return trimmed
def main(argv):
input_path = ''
output_path = ''
parse_file = ''
try:
opts, args = getopt.getopt(argv,"hi:o:p:")
except getopt.GetoptError:
print 'ingest.py -i <input> -o <output>'
sys.exit(2)
for opt, arg in opts:
print('opt {0}, arg {1}').format(opt,arg)
if opt == '-h':
print 'ingest.py -i <input> -o <output>'
sys.exit()
elif opt in ("-i", "--input"):
input_path = arg
elif opt in ("-o", "--output"):
output_path = arg
elif opt in ("-p", "--parse"):
parse_file = arg
print(parse_file)
if parse_file:
parsed = parse_single_file(parse_file)
json1 = json.dumps(parsed, sort_keys=True, indent=4, separators=(',', ': '))
print(json1)
elif input_path:
convert_pascal_to_json(input_path,output_path)
if __name__ == "__main__":
main(sys.argv[1:])
# file = '/usr/local/data/VOCdevkit/VOC2007/Annotations/006637.xml'
# tree = et.parse(file)
# root = tree.getroot()
# d = etree_to_dict(root)
# # et.dump(tree)
# json2 = d['annotation']
# json1 = json.dumps(json2, sort_keys=True, indent=4, separators=(',', ': '))
# print(json1)
# path = '/usr/local/data/VOCdevkit/VOC2007/Annotations/*.xml'
# convert_pascal_to_json(path)
|
Python
| 0.999984
|
|
59fd062a65e83bc95e9864e4cfb057ba7ffbe475
|
Add files via upload
|
apriori_temp.py
|
apriori_temp.py
|
"""
Description : Simple Python implementation of the Apriori Algorithm
Usage:
$python apriori.py -f DATASET.csv -s minSupport -c minConfidence
$python apriori.py -f DATASET.csv -s 0.15 -c 0.6
"""
import sys
from itertools import chain, combinations
from collections import defaultdict
from optparse import OptionParser
def subsets(arr):
""" Returns non empty subsets of arr"""
return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])
def returnItemsWithMinSupport(itemSetVar, transactionList, minSupport, freqSet):
"""calculates the support for items in the itemSet and returns a subset
of the itemSet each of whose elements satisfies the minimum support"""
_itemSet = set()
_itemSetRemoved = set()
localSet = defaultdict(int)
freqSetVar = [list() for i in range(numVars)]
for idVar in range(numVars):
freqSetVar[idVar] = [list() for _ in range(len(freqSetVar[idVar]))]
for idItem in range(itemSetVar[idVar]):
freqSetVar[idVar][idItem] = [[0] for _ in range(tCiclo)]
for j in range(tCiclo):
for
for transaction in transactionList:
if item.issubset(transaction):
freqSet[item] += 1
localSet[item] += 1
for item, count in localSet.items():
support = float(count)/len(transactionList)
if support >= minSupport:
_itemSet.add(item)
else:
_itemSetRemoved.add(item)
return _itemSet, _itemSetRemoved
def joinSet(itemSet, removedSet, length):
"""Join a set with itself and returns the n-element itemsets"""
filteredSet = set()
flagAddItem = True
composed_set = set([i.union(j) for i in itemSet for j in itemSet if len(i.union(j)) == length])
newremovedSet = set()
for item in composed_set:
if removedSet != set([]):
for itemR in removedSet:
if itemR.issubset(item):
flagAddItem = False
break
if flagAddItem == True:
filteredSet.add(item)
else:
newremovedSet.add(item)
return filteredSet
def getItemSetTransactionList(data_iterator):
transactionList = list()
numVars = 4
itemSetVar = [set() for i in range(numVars)]
for transaction in data_iterator:
transactionList.append(transaction)
for i in range(numVars):
itemSetVar[i].add(transaction[i]) # Generate 1-itemSets
return itemSetVar, transactionList
def runApriori(data_iter, minSupport, minConfidence):
"""
run the apriori algorithm. data_iter is a record iterator
Return both:
- items (tuple, support)
- rules ((pretuple, posttuple), confidence)
"""
itemSet, transactionList = getItemSetTransactionList(data_iter)
freqSet = defaultdict(int)
largeSet = dict()
# Global dictionary which stores (key=n-itemSets,value=support)
# which satisfy minSupport
assocRules = dict()
# Dictionary which stores Association Rules
oneCSet, removedCSet = returnItemsWithMinSupport(itemSet,
transactionList,
minSupport,
freqSet)
currentLSet = oneCSet
removedLSet = removedCSet
k = 2
while(currentLSet != set([])):
largeSet[k-1] = currentLSet
currentLSet = joinSet(currentLSet, removedLSet, k)
currentCSet, removedCSet = returnItemsWithMinSupport(currentLSet,
transactionList,
minSupport,
freqSet)
currentLSet = currentCSet
removedLSet = removedCSet
k = k + 1
def getSupport(item):
"""local function which Returns the support of an item"""
return float(freqSet[item])/len(transactionList)
toRetItems = []
for key, value in largeSet.items():
toRetItems.extend([(tuple(item), getSupport(item))
for item in value])
toRetRules = []
for key, value in largeSet.items():
for item in value:
_subsets = map(frozenset, [x for x in subsets(item)])
for element in _subsets:
remain = item.difference(element)
if len(remain) > 0:
confidence = getSupport(item)/getSupport(element)
if confidence >= minConfidence:
toRetRules.append(((tuple(element), tuple(remain)),
confidence))
return toRetItems, toRetRules
def printResults(items, rules):
"""prints the generated itemsets sorted by support and the confidence rules sorted by confidence"""
for item, support in sorted(items, key=lambda tp: tp[1]):
print("item: %s , %.3f" % (str(item), support))
print("\n------------------------ RULES:")
for rule, confidence in sorted(rules, key=lambda tp: tp[1]):
pre, post = rule
print("Rule: %s ==> %s , %.3f" % (str(pre), str(post), confidence))
def dataFromFile(fname):
"""Function which reads from the file and yields a generator"""
file_iter = open(fname, 'rU')
for line in file_iter:
line = line.strip().rstrip(',') # Remove trailing comma
record = list(line.split(','))
yield record
if __name__ == "__main__":
optparser = OptionParser()
optparser.add_option('-f', '--inputFile',
dest='input',
help='filename containing csv',
default=None)
optparser.add_option('-s', '--minSupport',
dest='minS',
help='minimum support value',
default=0.15,
type='float')
optparser.add_option('-c', '--minConfidence',
dest='minC',
help='minimum confidence value',
default=0.6,
type='float')
(options, args) = optparser.parse_args()
inFile = None
if options.input is None:
inFile = sys.stdin
elif options.input is not None:
inFile = dataFromFile(options.input)
else:
print('No dataset filename specified, system with exit')
sys.exit('System will exit')
minSupport = options.minS
minConfidence = options.minC
items, rules = runApriori(inFile, minSupport, minConfidence)
printResults(items, rules)
|
Python
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.