commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
841fb156fff3d257d39afdc9d3d4e587427fe2cf | Add new file missed in earlier commit place holder for projects that do not load for some reason | Source/Scm/wb_scm_project_place_holder.py | Source/Scm/wb_scm_project_place_holder.py | '''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_scm_project_place_holder.py
'''
import pathlib
#
# ScmProjectPlaceholder is used when the project cannot be loaded
#
class ScmProjectPlaceholder:
def __init__( self, app, prefs_project ):
self.app = app
self.prefs_project = prefs_project
self.tree = ScmProjectPlaceholderTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )
def scmType( self ):
return self.prefs_project.scm_type
def isNotEqual( self, other ):
return self.projectName() != other.projectName()
def getBranchName( self ):
return ''
def projectName( self ):
return self.prefs_project.name
def projectPath( self ):
return pathlib.Path( self.prefs_project.path )
def updateState( self ):
pass
class ScmProjectPlaceholderTreeNode:
def __init__( self, project, name, path ):
self.project = project
self.name = name
self.__path = path
def __repr__( self ):
return '<ScmProjectPlaceholderTreeNode: project %r, path %s>' % (self.project, self.__path)
def isNotEqual( self, other ):
return (self.relativePath() != other.relativePath()
or self.project.isNotEqual( other.project ))
def __lt__( self, other ):
return self.name < other.name
def relativePath( self ):
return self.__path
def absolutePath( self ):
return self.project.projectPath() / self.__path
def getAllFolderNodes( self ):
return []
def getAllFolderNames( self ):
return []
def getAllFileNames( self ):
return []
def isByPath( self ):
return False
| Python | 0 | |
f1ba45809e6682235c07ab89e4bc32e56b2fa84f | Create i_love_lance_janice.py | i_love_lance_janice.py | i_love_lance_janice.py | """
I Love Lance & Janice
=====================
You've caught two of your fellow minions passing coded notes back and forth - while they're on duty, no less! Worse, you're pretty sure it's not job-related - they're both huge fans of the space soap opera "Lance & Janice". You know how much Commander Lambda hates waste, so if you can prove that these minions are wasting her time passing non-job-related notes, it'll put you that much closer to a promotion.
Fortunately for you, the minions aren't exactly advanced cryptographers. In their code, every lowercase letter [a..z] is replaced with the corresponding one in [z..a], while every other character (including uppercase letters and punctuation) is left untouched. That is, 'a' becomes 'z', 'b' becomes 'y', 'c' becomes 'x', etc. For instance, the word "vmxibkgrlm", when decoded, would become "encryption".
Write a function called answer(s) which takes in a string and returns the deciphered string so you can show the commander proof that these minions are talking about "Lance & Janice" instead of doing their jobs.
Languages
=========
To provide a Python solution, edit solution.py
To provide a Java solution, edit solution.java
Test cases
==========
Inputs:
(string) s = "wrw blf hvv ozhg mrtsg'h vkrhlwv?"
Output:
(string) "did you see last night's episode?"
Inputs:
(string) s = "Yvzs! I xzm'g yvorvev Lzmxv olhg srh qly zg gsv xlolmb!!"
Output:
(string) "Yeah! I can't believe Lance lost his job at the colony!!"
"""
def strSlice(s):
str_lst = []
for i in range(len(s)):
sliced_str = s[0:i+1]
str_lst.append(sliced_str)
return str_lst
def answer(s):
str_lst = strSlice(s)
str_len_lst = []
for elmt in str_lst:
cnt_elmt = s.count(elmt)
quotient = len(s)/len(elmt)
if (elmt * quotient) == s:
str_len_lst.append(cnt_elmt)
return max(str_len_lst)
# s = "abccbaabccba"
# 2
s = "abcabcabcabc"
# 4
print answer(s)
| Python | 0.000014 | |
a08a7da41300721e07c1bff8e36e3c3d69af06fb | Add py-asdf package (#12817) | var/spack/repos/builtin/packages/py-asdf/package.py | var/spack/repos/builtin/packages/py-asdf/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAsdf(PythonPackage):
"""The Advanced Scientific Data Format (ASDF) is a next-generation
interchange format for scientific data. This package contains the Python
implementation of the ASDF Standard."""
homepage = "https://github.com/spacetelescope/asdf"
url = "https://pypi.io/packages/source/a/asdf/asdf-2.4.2.tar.gz"
version('2.4.2', sha256='6ff3557190c6a33781dae3fd635a8edf0fa0c24c6aca27d8679af36408ea8ff2')
depends_on('python@3.3:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-setuptools-scm', type='build')
depends_on('py-semantic-version@2.3.1:2.6.0', type=('build', 'run'))
depends_on('py-pyyaml@3.10:', type=('build', 'run'))
depends_on('py-jsonschema@2.3:3.999', type=('build', 'run'))
depends_on('py-six@1.9.0:', type=('build', 'run'))
depends_on('py-numpy@1.8:', type=('build', 'run'))
| Python | 0 | |
7f4642fc2e0edba668482f2ebbb64ab8870e709a | Initialize P01_basics | books/AutomateTheBoringStuffWithPython/Chapter01/P01_basics.py | books/AutomateTheBoringStuffWithPython/Chapter01/P01_basics.py | # This program performs basic Python instructions
# Expressions
print(2 + 2)
print(2 + 3 * 6)
print((2 + 3) * 6)
print(48565878 * 578453)
print(2 ** 8)
print(23 / 7)
print(23 // 7)
print(23 % 7)
print(2 + 2)
print((5 - 1) * ((7 + 1) / (3 - 1)))
# Uncomment to see what happens
#print(5 + )
#print(42 + 5 + * 2)
# The Integer, Floating-Point, and String Data Types
#print("Hello world!) # Uncomment to see what happens
print("Alice" + "Bob")
#print("Alice" + 42) # Uncomment to see what happens
print("Alice" * 5)
# Uncomment to see what happens
#print("Alice" * "Bob")
#print("Alice" * 5.0)
# Storing Values in Variables
spam = 40
print(spam)
eggs = 2
print(spam + eggs)
print(spam + eggs + spam)
spam = spam + 2
print(spam)
spam = "Hello"
print(spam)
spam = "Goodbye"
print(spam)
# The len() Function
print(len("hello"))
print(len("My very energetic monster just scarfed nachos."))
print(len(''))
#print("I am" + 29 + " years old.") # Uncomment to see what happens
# The str(), int(), and float() Functions
print(str(29))
print("I am " + str(29) + " years old.")
print(str(0))
print(str(-3.14))
print(int("42"))
print(int("-99"))
print(int(1.25))
print(int(1.99))
print(float("3.14"))
print(float(10))
spam = input("Type 101 here: ") # Type 101 when prompted
print(spam)
spam = int(spam)
print(spam)
print(spam * 10 / 5)
# Uncomment to see what happens
#print(int("99.99"))
#print(int("twelve"))
print(int(7.7))
print(int(7.7) + 1)
| Python | 0.000002 | |
ea6d73ac2b9274eae0a866acd1e729854c59fb17 | Add update.py to drive the update loop. | kettle/update.py | kettle/update.py | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
def modified_today(fname):
now = time.time()
try:
return os.stat(fname).st_mtime > (now - now % (24 * 60 * 60))
except OSError:
return False
def call(cmd):
print '+', cmd
status = os.system(cmd)
if status:
raise Exception('invocation failed')
def main():
call('time python make_db.py --buckets ../buckets.yaml --junit --threads 128')
bq_cmd = 'bq load --source_format=NEWLINE_DELIMITED_JSON --max_bad_records=1000'
mj_cmd = 'pypy make_json.py'
mj_ext = ''
bq_ext = ''
if not modified_today('build_day.json.gz'):
# cycle daily/weekly tables
bq_ext = ' --replace'
mj_ext = ' --reset-emitted'
call(mj_cmd + mj_ext + ' --days 1 | pv | gzip > build_day.json.gz')
call(bq_cmd + bq_ext + ' k8s-gubernator:build.day build_day.json.gz schema.json')
call(mj_cmd + mj_ext + ' --days 7 | pv | gzip > build_week.json.gz')
call(bq_cmd + bq_ext + ' k8s-gubernator:build.week build_week.json.gz schema.json')
call(mj_cmd + ' | pv | gzip > build_all.json.gz')
call(bq_cmd + ' k8s-gubernator:build.all build_all.json.gz schema.json')
call('python stream.py --poll kubernetes-jenkins/gcs-changes/kettle '
' --dataset k8s-gubernator:build --tables all:0 day:1 week:7 --stop_at=1')
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__))
os.environ['TZ'] = 'America/Los_Angeles'
main()
| Python | 0 | |
5114f177741b105f33819b98415702e53b52eb01 | Add script to update site setup which is used at places like password reset email [skip ci] | corehq/apps/hqadmin/management/commands/update_site_setup.py | corehq/apps/hqadmin/management/commands/update_site_setup.py | from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from django.conf import settings
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'site_address',
help="the new site address that should be used. This would get set in the site objects name "
"and domain."
)
parser.add_argument(
'--skip-checks',
action='store_true',
default=False,
help="If you are sure of what you are doing and want to skip checks to ensure safe update."
)
def handle(self, site_address, *args, **options):
if not options['skip_checks']:
if settings.SITE_ID != 1:
raise CommandError("SITE ID under settings expected to have value 1 since only one object is expected")
sites_count = Site.objects.count()
if sites_count != 1:
raise CommandError("Expected to have only one object added by Site during setup but currently its %s "%
Site.objects.count())
site_object = Site.objects.first()
if site_object.name != "example.com" and site_object.domain != "example.com":
raise CommandError(
"""
Expected the present site object to have dummy example values.
They were probably modified and needs to be rechecked.
Current Values, name -> {name}, domain -> {domain}
""".format(name=site_object.name, domain=site_object.domain
))
site_object = Site.objects.first()
site_object.name = site_address
site_object.domain = site_address
site_object.save()
Site.objects.clear_cache()
site_object = Site.objects.first()
print('Updated!')
print('Site object now is name -> {name}, domain -> {domain}'.format(
name=site_object.name,
domain=site_object.domain
))
| Python | 0 | |
23747d8181e7b9c576d07c4b3d50a0ac8a60cd3e | Add wunderground module | i3pystatus/wunderground.py | i3pystatus/wunderground.py | from i3pystatus import IntervalModule
from i3pystatus.core.util import user_open, internet, require
from datetime import datetime
from urllib.request import urlopen
import json
import re
GEOLOOKUP_URL = 'http://api.wunderground.com/api/%s/geolookup%s/q/%s.json'
STATION_LOOKUP_URL = 'http://api.wunderground.com/api/%s/conditions/q/%s.json'
class Wunderground(IntervalModule):
'''
This module retrieves weather from the Weather Underground API.
.. note::
A Weather Underground API key is required to use this module, you can
sign up for one for a developer API key free at
https://www.wunderground.com/weather/api/
A developer API key is allowed 500 queries per day.
Valid values for ``location_code`` include:
* **State/City_Name** - CA/San_Francisco
* **Country/City** - France/Paris
* **Geolocation by IP** - autoip
* **Zip or Postal Code** - 60616
* **ICAO Airport Code** - icao:LAX
* **Latitude/Longitude** - 41.8301943,-87.6342619
* **Personal Weather Station (PWS)** - pws:KILCHICA30
When not using a PWS, the location will be queried, and the closest
station will be used. When possible, it is recommended to use a PWS
location, as this will result in fewer API calls.
.. rubric:: Available formatters
* `{city}` — Location of weather observation
* `{conditon}` — Current condition (Rain, Snow, Overcast, etc.)
* `{observation_time}` — Time of weather observation (supports strftime format flags)
* `{current_temp}` — Current temperature, excluding unit
* `{degrees}` — ``°C`` if ``units`` is set to ``metric``, otherwise ``°F``
* `{feelslike}` — Wunderground "Feels Like" temperature, excluding unit
* `{current_wind}` — Wind speed in mph/kph, excluding unit
* `{current_wind_direction}` — Wind direction
* `{current_wind_gust}` — Speed of wind gusts in mph/kph, excluding unit
* `{pressure_in}` — Barometric pressure (in inches), excluding unit
* `{pressure_mb}` — Barometric pressure (in millibars), excluding unit
* `{pressure_trend}` — ``+`` (rising) or ``-`` (falling)
* `{visibility}` — Visibility in mi/km, excluding unit
* `{humidity}` — Current humidity, excluding percentage symbol
* `{dewpoint}` — Dewpoint temperature, excluding unit
* `{uv_index}` — UV Index
'''
interval = 300
settings = (
('api_key', 'Weather Underground API key'),
('location_code', 'Location code from www.weather.com'),
('units', 'Celsius (metric) or Fahrenheit (imperial)'),
('use_pws', 'Set to False to use only airport stations'),
('error_log', 'If set, tracebacks will be logged to this file'),
'format',
)
required = ('api_key', 'location_code')
api_key = None
location_code = None
units = "metric"
format = "{current_temp}{degrees}"
use_pws = True
error_log = None
station_id = None
forecast_url = None
on_leftclick = 'open_wunderground'
def open_wunderground(self):
'''
Open the forecast URL, if one was retrieved
'''
if self.forecast_url and self.forecast_url != 'N/A':
user_open(self.forecast_url)
def api_request(self, url):
'''
Execute an HTTP POST to the specified URL and return the content
'''
with urlopen(url) as content:
try:
content_type = dict(content.getheaders())['Content-Type']
charset = re.search(r'charset=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
response = json.loads(content.read().decode(charset))
try:
raise Exception(response['response']['error']['description'])
except KeyError:
pass
return response
def geolookup(self):
'''
Use the location_code to perform a geolookup and find the closest
station. If the location is a pws or icao station ID, no lookup will be
peformed.
'''
if self.station_id is None:
try:
for no_lookup in ('pws', 'icao'):
sid = self.location_code.partition(no_lookup + ':')[-1]
if sid:
self.station_id = self.location_code
return
except AttributeError:
# Numeric or some other type, either way we'll just stringify
# it below and perform a lookup.
pass
extra_opts = '/pws:0' if not self.use_pws else ''
api_url = GEOLOOKUP_URL % (self.api_key,
extra_opts,
self.location_code)
response = self.api_request(api_url)
station_type = 'pws' if self.use_pws else 'airport'
try:
stations = response['location']['nearby_weather_stations']
nearest = stations[station_type]['station'][0]
except (KeyError, IndexError):
raise Exception('No locations matched location_code %s'
% self.location_code)
if self.use_pws:
nearest_pws = nearest.get('id', '')
if not nearest_pws:
raise Exception('No id entry for station')
self.station_id = 'pws:%s' % nearest_pws
else:
nearest_airport = nearest.get('icao', '')
if not nearest_airport:
raise Exception('No icao entry for station')
self.station_id = 'icao:%s' % nearest_airport
def query_station(self):
'''
Query a specific station
'''
# If necessary, do a geolookup to set the station_id
self.geolookup()
query_url = STATION_LOOKUP_URL % (self.api_key, self.station_id)
try:
response = self.api_request(query_url)['current_observation']
self.forecast_url = response.pop('forecast_url', None)
except KeyError:
raise Exception('No weather data found for %s' % self.station_id)
def _find(key, data=None):
data = data or response
return data.get(key, 'N/A')
if self.units == 'metric':
temp_unit = 'c'
speed_unit = 'kph'
distance_unit = 'km'
else:
temp_unit = 'f'
speed_unit = 'mph'
distance_unit = 'mi'
try:
observation_time = int(_find('observation_epoch'))
except TypeError:
observation_time = 0
return dict(
forecast_url=_find('forecast_url'),
city=_find('city', response['observation_location']),
condition=_find('weather'),
observation_time=datetime.fromtimestamp(observation_time),
current_temp=_find('temp_' + temp_unit),
feelslike=_find('feelslike_' + temp_unit),
current_wind=_find('wind_' + speed_unit),
current_wind_direction=_find('wind_dir'),
current_wind_gust=_find('wind_gust_' + speed_unit),
pressure_in=_find('pressure_in'),
pressure_mb=_find('pressure_mb'),
pressure_trend=_find('pressure_trend'),
visibility=_find('visibility_' + distance_unit),
humidity=_find('relative_humidity').rstrip('%'),
dewpoint=_find('dewpoint_' + temp_unit),
uv_index=_find('uv'),
)
@require(internet)
def run(self):
try:
result = self.query_station()
except Exception as exc:
if self.error_log:
import traceback
with open(self.error_log, 'a') as f:
f.write('%s : An exception was raised:\n' %
datetime.isoformat(datetime.now()))
f.write(''.join(traceback.format_exc()))
f.write(80 * '-' + '\n')
raise
result['degrees'] = '°%s' % ('C' if self.units == 'metric' else 'F')
self.output = {
"full_text": self.format.format(**result),
# "color": self.color # TODO: add some sort of color effect
}
| Python | 0 | |
6fd0cee9bca0449aa6aab6a62e470ba8ff909cbb | print all caesar rotations for some string | language/rotN.py | language/rotN.py | #! /usr/bin/env python
import string
ciphered = "LVFU XAN YIJ UVXRB RKOYOFB"
def make_rot_n(n):
# http://stackoverflow.com/questions/3269686/short-rot13-function
lc = string.ascii_lowercase
uc = string.ascii_uppercase
trans = string.maketrans(lc + uc,
lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: string.translate(s, trans)
for i in range(26):
rotator = make_rot_n(i)
deciphered = rotator(ciphered)
print str(i) + ' ' + deciphered
| Python | 0.031516 | |
d3dbb797575221d574fdda9c3d087d8696f6091a | Add netstring lib | lib/netstring.py | lib/netstring.py | def encode_netstring(s):
return str(len(s)).encode('ascii') + b':' + s + b','
def consume_netstring(s):
"""If s is a bytestring beginning with a netstring, returns (value, rest)
where value is the contents of the netstring, and rest is the part of s
after the netstring.
Raises ValueError if s does not begin with a netstring.
"""
(length, sep, rest) = s.partition(b':')
if sep != b':':
raise ValueError("No colon found in s")
if not length.isdigit():
raise ValueError("Length is not numeric")
length = int(length)
if len(rest) <= length:
raise ValueError("String not long enough")
if rest[length] != 0x2c:
raise ValueError("String not terminated with comma")
return (rest[:length], rest[length+1:])
def is_netstring(s):
try:
(val, rest) = consume_netstring(s)
return len(rest) == 0
except ValueError:
return False
| Python | 0.000002 | |
9f5c3715f4b3cd5bf451bdc504cded6459e8ee79 | add one test file and add content to it | test/unit_test/test_similarity2.py | test/unit_test/test_similarity2.py | from lexos.helpers.error_messages import MATRIX_DIMENSION_UNEQUAL_MESSAGE
count_matrix = [['', 'The', 'all', 'bobcat', 'cat', 'caterpillar',
'day.', 'slept'],
['catBobcat', 9.0, 9.0, 5.0, 4.0, 0.0, 9.0, 9.0],
['catCaterpillar', 9.0, 9.0, 0.0, 4.0, 5.0, 9.0, 9.0],
['test', 9.0, 9.0, 5.0, 4.0, 0.0, 9.0, 9.0]]
assert all(len(line) == len(count_matrix[1])
for line in count_matrix[1:]), MATRIX_DIMENSION_UNEQUAL_MESSAGE
print("pass")
| Python | 0 | |
44863ff1f7064f1d9a9bb897822834eb6755ed59 | Add SMTP auth server | authserver.py | authserver.py | import bcrypt
import asyncore
from secure_smtpd import SMTPServer, FakeCredentialValidator
from srht.objects import User
class UserValidator(object):
def validate(self, username, password):
user = User.query.filter(User.username == username).first()
if not user:
return False
return bcrypt.checkpw(password, user.password)
SMTPServer(
('0.0.0.0', 4650),
None,
require_authentication=True,
ssl=False,
credential_validator=FakeCredentialValidator(),
)
asyncore.loop()
| Python | 0.000001 | |
f56e390be0e2cea8e08080029aad756a6ab3c91f | Add lc0253_meeting_rooms_ii.py from Copenhagen :) | lc0253_meeting_rooms_ii.py | lc0253_meeting_rooms_ii.py | """Leetcode 253. Meeting Rooms II (Premium)
Medium
URL: https://leetcode.com/problems/meeting-rooms-ii
Given an array of meeting time intervals consisting of start and end times
[[s1,e1],[s2,e2],...] (si < ei),
find the minimum number of conference rooms required.
"""
class Solution2(object):
# @param {Interval[]} intervals
# @return {integer}
def minMeetingRooms(self, intervals):
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0 | |
8b4c34e84d306b5f9021de47bc3ae9050e2fc2b3 | Fix loading of ply files exported by meshlab | compare_clouds.py | compare_clouds.py | #!/usr/bin/env python3
from pathlib import Path
"""Code for comparing point clouds"""
cloud1Path = Path("./data/reconstructions/2016_10_24__17_43_17/reference.ply")
cloud2Path = Path("./data/reconstructions/2016_10_24__17_43_17/high_quality.ply")
from load_ply import load_ply
cloud1PointData = load_ply(cloud1Path)[0][:,:3].copy()
cloud2PointData = load_ply(cloud2Path)[0][:,:3].copy()
#if __name__=='__main__':
#pass
| Python | 0 | |
4f70773bb9041c44b0f83ef61a46d5fa974b366e | Create conwaytesting.py | conwaytesting.py | conwaytesting.py | Python | 0 | ||
73d753e315c7feb18af39360faf4d6fc6d10cedf | test to demonstrate bug 538 | tests/text/ELEMENT_CHANGE_STYLE.py | tests/text/ELEMENT_CHANGE_STYLE.py | #!/usr/bin/env python
'''Test that inline elements can have their style changed, even after text
has been deleted before them. [This triggers bug 538 if it has not yet been fixed.]
To run the test, delete the first line, one character at a time,
verifying that the element remains visible and no tracebacks are
printed to the console.
Press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
import pyglet
from pyglet.text import caret, document, layout
doctext = '''ELEMENT.py test document.
PLACE CURSOR AT THE END OF THE ABOVE LINE, AND DELETE ALL ITS TEXT,
BY PRESSING THE DELETE KEY REPEATEDLY.
IF THIS WORKS OK, AND THE ELEMENT (GRAY RECTANGLE) WITHIN THIS LINE
[element here]
REMAINS VISIBLE BETWEEN THE SAME CHARACTERS, WITH NO ASSERTIONS PRINTED TO
THE CONSOLE, THE TEST PASSES.
(In code with bug 538, the element sometimes moves within the text, and
eventually there is an assertion failure. Note that there is another bug,
unrelated to this one, which sometimes causes the first press of the delete
key to be ignored.)
Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Fusce venenatis
pharetra libero. Phasellus lacinia nisi feugiat felis. Sed id magna in nisl
cursus consectetuer. Aliquam aliquam lectus eu magna. Praesent sit amet ipsum
vitae nisl mattis commodo. Aenean pulvinar facilisis lectus. Phasellus sodales
risus sit amet lectus. Suspendisse in turpis. Vestibulum ac mi accumsan eros
commodo tincidunt. Nullam velit. In pulvinar, dui sit amet ullamcorper dictum,
dui risus ultricies nisl, a dignissim sapien enim sit amet tortor.
Pellentesque fringilla, massa sit amet bibendum blandit, pede leo commodo mi,
eleifend feugiat neque tortor dapibus mauris. Morbi nunc arcu, tincidunt vel,
blandit non, iaculis vel, libero. Vestibulum sed metus vel velit scelerisque
varius. Vivamus a tellus. Proin nec orci vel elit molestie venenatis. Aenean
fringilla, lorem vel fringilla bibendum, nibh mi varius mi, eget semper ipsum
ligula ut urna. Nullam tempor convallis augue. Sed at dui.
'''
element_index = doctext.index('[element here]')
doctext = doctext.replace('[element here]', '')
class TestElement(document.InlineElement):
vertex_list = None
def place(self, layout, x, y):
## assert layout.document.text[self._position] == '\x00'
### in bug 538, this fails after two characters are deleted.
self.vertex_list = layout.batch.add(4, pyglet.gl.GL_QUADS,
layout.top_group,
'v2i',
('c4B', [200, 200, 200, 255] * 4))
y += self.descent
w = self.advance
h = self.ascent - self.descent
self.vertex_list.vertices[:] = (x, y,
x + w, y,
x + w, y + h,
x, y + h)
def remove(self, layout):
self.vertex_list.delete()
del self.vertex_list
class TestWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(TestWindow, self).__init__(*args, **kwargs)
self.batch = pyglet.graphics.Batch()
self.document = pyglet.text.decode_attributed(doctext)
for i in [element_index]:
self.document.insert_element(i, TestElement(60, -10, 70))
self.margin = 2
self.layout = layout.IncrementalTextLayout(self.document,
self.width - self.margin * 2, self.height - self.margin * 2,
multiline=True,
batch=self.batch)
self.caret = caret.Caret(self.layout)
self.push_handlers(self.caret)
self.set_mouse_cursor(self.get_system_mouse_cursor('text'))
def on_draw(self):
pyglet.gl.glClearColor(1, 1, 1, 1)
self.clear()
self.batch.draw()
def on_key_press(self, symbol, modifiers):
super(TestWindow, self).on_key_press(symbol, modifiers)
if symbol == pyglet.window.key.TAB:
self.caret.on_text('\t')
self.document.set_style(0, len(self.document.text), dict(bold = None)) ### trigger bug 538
class TestCase(unittest.TestCase):
def test(self):
self.window = TestWindow(##resizable=True,
visible=False)
self.window.set_visible()
pyglet.app.run()
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
2e796f38dbdc1044c13a768c76fa733ad07b9829 | Add astro/21cm/extract_slice.py | astro/21cm/extract_slice.py | astro/21cm/extract_slice.py | #!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <weitian@aaronly.me>
# MIT License
#
"""
Extract a slice from the 21cm cube and save as FITS image.
"""
import os
import sys
import argparse
import numpy as np
import astropy.io.fits as fits
def main():
outfile_default = "{prefix}_z{z:05.2f}_N{Nside}_L{Lside}_s{sidx}.fits"
parser = argparse.ArgumentParser(
description="Extract a slice from cube and save as FITS image")
parser.add_argument("-C", "--clobber", dest="clobber",
action="store_true",
help="overwrite existing files")
parser.add_argument("-d", "--dtype", dest="dtype", default="float32",
help="NumPy dtype of data cubes (default: float32)")
parser.add_argument("-z", "--redshift", dest="redshift",
type=float, required=True,
help="redshift of the input data cube")
parser.add_argument("-L", "--len-side", dest="Lside",
type=float, required=True,
help="Side length of the cube [comoving Mpc]")
parser.add_argument("-s", "--slice-idx", dest="sidx",
type=int, default=None,
help="slice index to be extracted (default: "
"the central slice)")
parser.add_argument("-u", "--unit", dest="unit",
help="data unit (e.g., K, mK)")
parser.add_argument("-i", "--infile", dest="infile", required=True,
help="input data cube")
parser.add_argument("-o", "--outfile", dest="outfile",
default=outfile_default,
help="output FITS image slice (default: %s)" %
outfile_default)
parser.add_argument("-p", "--prefix", dest="prefix", required=True,
help="prefix for the output FITS image")
args = parser.parse_args()
cube = np.fromfile(open(args.infile, "rb"), dtype=args.dtype)
Nside = round(cube.shape[0] ** (1.0/3))
print("Read cube: %s (Nside=%d)" % (args.infile, Nside))
if args.sidx is None:
sidx = int(Nside / 2.0)
elif args.idx >= 0 and args.idx < Nside:
sidx = args.idx
else:
raise ValueError("invalid slice index: %s" % args.sidx)
outfile = args.outfile.format(prefix=args.prefix, z=args.redshift,
Nside=Nside, Lside=args.Lside, sidx=sidx)
if os.path.exists(outfile) and not args.clobber:
raise OSError("output file already exists: %s" % outfile)
cube = cube.reshape((Nside, Nside, Nside))
simg = cube[:, :, sidx]
header = fits.Header()
header["REDSHIFT"] = args.redshift
header["Lside"] = (args.Lside, "Cube side length [comoving Mpc]")
header["Nside"] = (Nside, "Number of pixels on each cube side")
header["SliceIdx"] = (sidx, "Index of this extracted slice")
if args.unit:
header["BUNIT"] = (args.unit, "Data unit")
header.add_history(" ".join(sys.argv))
hdu = fits.PrimaryHDU(data=simg, header=header)
try:
hdu.writeto(outfile, overwrite=args.clobber)
except TypeError:
hdu.writeto(outfile, clobber=args.clobber)
print("Extracted #%d slice: %s" % (sidx, outfile))
if __name__ == "__main__":
main()
| Python | 0.000006 | |
0bf7d9fb20a3d2588ffc0e8341ec2af3df5fe300 | Add test for depot index page | depot/tests/test_depot_index.py | depot/tests/test_depot_index.py | from django.test import TestCase, Client
from depot.models import Depot
def create_depot(name, state):
return Depot.objects.create(name=name, active=state)
class DepotIndexTestCase(TestCase):
def test_depot_index_template(self):
response = self.client.get('/depots/')
self.assertTemplateUsed(
response,
template_name='depot/index.html'
)
def test_depot_index_with_no_depots(self):
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available :(')
def test_depot_index_with_active_depot(self):
depot = create_depot('active depot', True)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, depot.name)
def test_depot_index_with_archived_depot(self):
depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available')
self.assertNotContains(response, depot.name)
def test_depot_index_with_active_and_archived_depot(self):
active_depot = create_depot('active depot', True)
archived_depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, active_depot.name)
self.assertNotContains(response, archived_depot.name)
| Python | 0 | |
2a903d721c44f9c6b53c8516b28b9dd6c1faa5e0 | Create crawler_utils.py | crawler_utils.py | crawler_utils.py | import json
import os.path
def comments_to_json(comments):
result = []
for comment in comments:
result.append({"score": comment.score,
"url": comment.permalink,
"body": comment.body,
"id": comment.id,
"replies": comments_to_json(comment.replies)})
return result
def save_submission(submission, storage_dir):
with open(os.path.join(storage_dir, submission.id), "w") as f:
f.write(json.dumps({"url": submission.permalink,
"text": submission.selftext,
"title": submission.title,
"score": submission.score,
"comments": comments_to_json(submission.comments)}))
f.close()
| Python | 0.000017 | |
d81a1f3ef63aef7f003a018f26ea636cf47cfc5d | Add init file for installation | jswatchr/__init__.py | jswatchr/__init__.py | from jswatchr import *
| Python | 0 | |
7850371982cc50dc2a5a59c7b01d5a1bec80cf3f | Add FairFuzz tool spec | benchexec/tools/fairfuzz.py | benchexec/tools/fairfuzz.py | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
import benchexec.model
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for FairFuzz (https://https://github.com/carolemieux/afl-rb/tree/testcomp).
"""
REQUIRED_PATHS = [
"bin"
]
def executable(self):
return util.find_executable('fairfuzz-svtestcomp')
def version(self, executable):
return "FairFuzz, built on AFL 2.52b"
def name(self):
return 'FairFuzz'
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
for line in output:
if "ERROR: couldn't run FairFuzz" in line:
return "Couldn't run FairFuzz"
if "CRASHES FOUND" in line:
return result.RESULT_FALSE_REACH
if "DONE RUNNING" in line:
return result.RESULT_DONE
return result.RESULT_UNKNOWN
| Python | 0 | |
a39fbd74eaf84c757f15d14bfc728bd1d0f63bd4 | Create blockchain.py | blockchain.py/blockchain.py | blockchain.py/blockchain.py | ###########################
# build your own blockchain from scratch in python3!
#
# inspired by
# Let's Build the Tiniest Blockchain In Less Than 50 Lines of Python by Gerald Nash
# see https://medium.com/crypto-currently/lets-build-the-tiniest-blockchain-e70965a248b
#
#
# to run use:
# $ python ./blockchain.py
import hashlib as hasher
import datetime as date
import pprint
class Block:
def __init__(self, index, data, previous_hash):
self.index = index
self.timestamp = date.datetime.now()
self.data = data
self.previous_hash = previous_hash
self.hash = self.calc_hash()
def calc_hash(self):
sha = hasher.sha256()
sha.update(str(self.index).encode("utf-8") +
str(self.timestamp).encode("utf-8") +
str(self.data).encode("utf-8") +
str(self.previous_hash).encode("utf-8"))
return sha.hexdigest()
def __repr__(self):
return "Block<\n index: {},\n timestamp: {},\n data: {},\n previous_hash: {},\n hash: {}>".format(
self.index, self.timestamp, self.data, self.previous_hash, self.hash)
@staticmethod
def first( data="Genesis" ):
return Block(0, data, "0")
@staticmethod
def next( previous, data="Transaction Data..." ):
return Block(previous.index + 1, data, previous.hash)
#####
## let's get started
## build a blockchain a block at a time
b0 = Block.first( "Genesis" )
b1 = Block.next( b0, "Transaction Data..." )
b2 = Block.next( b1, "Transaction Data......" )
b3 = Block.next( b2, "More Transaction Data..." )
blockchain = [b0, b1, b2, b3]
pprint.pprint( blockchain )
######
# will pretty print something like:
#
# [Block<
# index: 0,
# timestamp: 2017-09-19 19:21:04.015584,
# data: Genesis,
# previous_hash: 0,
# hash: b0cb7953bfad60415ea3b5d3b8015ee22c89d43351ea8f53e5367ee06193b1d3>,
# Block<
# index: 1,
# timestamp: 2017-09-19 19:21:04.015584,
# data: Transaction Data...,
# previous_hash: b0cb7953bfad60415ea3b5d3b8015ee22c89d43351ea8f53e5367ee06193b1d3,
# hash: a87707b2867d28e7367c74e4a2800ec112ea2a8b1517a332ad0b4c49c3b3d60b>,
# Block<
# index: 2,
# timestamp: 2017-09-19 19:21:04.015584,
# data: Transaction Data......,
# previous_hash: a87707b2867d28e7367c74e4a2800ec112ea2a8b1517a332ad0b4c49c3b3d60b,
# hash: 9a8aecdd62da47301502f0079aa1bf24dcf39ad392c723baef6b9bfbc927cf4e>,
# Block<
# index: 3,
# timestamp: 2017-09-19 19:21:04.015584,
# data: More Transaction Data...,
# previous_hash: 9a8aecdd62da47301502f0079aa1bf24dcf39ad392c723baef6b9bfbc927cf4e,
# hash: 5ef442875fb8c3e18d08531f3eba26ea75b608604fa0cc75715d76e15edbb5ea>]
| Python | 0.999863 | |
37db687b4167aee0e88036c5d85995de891453ed | Create cbalusek_01.py | Week01/Problem01/cbalusek_01.py | Week01/Problem01/cbalusek_01.py | #This project defines a function that takes any two numbers and sums their multiples to some cutoff value
def sum(val1, val2, test):
i = 1
j = 1
cum = 0
while i*val1 < test:
cum += i*val1
i += 1
while j*val2 < test:
if j*val2%val1 != 0:
cum += j*val2
j += 1
else:
j += 1
return cum
print(sum(3,5,1000))
| Python | 0.000048 | |
089f93bcf7157ee3eaf83964294c5c2df19683f0 | Create retweet.py | settings-search_query-yourHashtag-Leave-empty-for-all-languages-tweet_language-Create-your-app-on-http/retweet.py | settings-search_query-yourHashtag-Leave-empty-for-all-languages-tweet_language-Create-your-app-on-http/retweet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, ConfigParser, tweepy, inspect, hashlib
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# read config
config = ConfigParser.SafeConfigParser()
config.read(os.path.join(path, "config"))
# your hashtag or search query and tweet language (empty = all languages)
hashtag = config.get("settings","search_query")
tweetLanguage = config.get("settings","tweet_language")
# blacklisted users and words
userBlacklist = []
wordBlacklist = ["RT", u"♺"]
# build savepoint path + file
hashedHashtag = hashlib.md5(hashtag).hexdigest()
last_id_filename = "last_id_hashtag_%s" % hashedHashtag
rt_bot_path = os.path.dirname(os.path.abspath(__file__))
last_id_file = os.path.join(rt_bot_path, last_id_filename)
# create bot
auth = tweepy.OAuthHandler(config.get("twitter","consumer_key"), config.get("twitter","consumer_secret"))
auth.set_access_token(config.get("twitter","access_token"), config.get("twitter","access_token_secret"))
api = tweepy.API(auth)
# retrieve last savepoint if available
try:
with open(last_id_file, "r") as file:
savepoint = file.read()
except IOError:
savepoint = ""
print "No savepoint found. Trying to get as many results as possible."
# search query
timelineIterator = tweepy.Cursor(api.search, q=hashtag, since_id=savepoint, lang=tweetLanguage).items()
# put everything into a list to be able to sort/filter
timeline = []
for status in timelineIterator:
timeline.append(status)
try:
last_tweet_id = timeline[0].id
except IndexError:
last_tweet_id = savepoint
# filter @replies/blacklisted words & users out and reverse timeline
timeline = filter(lambda status: status.text[0] != "@", timeline)
timeline = filter(lambda status: not any(word in status.text.split() for word in wordBlacklist), timeline)
timeline = filter(lambda status: status.author.screen_name not in userBlacklist, timeline)
timeline.reverse()
tw_counter = 0
err_counter = 0
# iterate the timeline and retweet
for status in timeline:
try:
print "(%(date)s) %(name)s: %(message)s\n" % \
{ "date" : status.created_at,
"name" : status.author.screen_name.encode('utf-8'),
"message" : status.text.encode('utf-8') }
api.retweet(status.id)
tw_counter += 1
except tweepy.error.TweepError as e:
# just in case tweet got deleted in the meantime or already retweeted
err_counter += 1
#print e
continue
print "Finished. %d Tweets retweeted, %d errors occured." % (tw_counter, err_counter)
# write last retweeted tweet id to file
with open(last_id_file, "w") as file:
file.write(str(last_tweet_id))
| Python | 0.000002 | |
cc79ee252e09ade17961d03265c61a87e270bd88 | Make color emoji use character sequences instead of PUA. | nototools/map_pua_emoji.py | nototools/map_pua_emoji.py | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modify an emoji font to map legacy PUA characters to standard ligatures."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
from fontTools import ttLib
from nototools import add_emoji_gsub
from nototools import font_data
def get_glyph_name_from_gsub(char_seq, font):
"""Find the glyph name for ligature of a given character sequence from GSUB.
"""
cmap = font_data.get_cmap(font)
# FIXME: So many assumptions are made here.
try:
first_glyph = cmap[char_seq[0]]
rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]]
except KeyError:
return None
for lookup in font['GSUB'].table.LookupList.Lookup:
ligatures = lookup.SubTable[0].ligatures
try:
for ligature in ligatures[first_glyph]:
if ligature.Component == rest_of_glyphs:
return ligature.LigGlyph
except KeyError:
continue
return None
def add_pua_cmap(source_file, target_file):
"""Add PUA characters to the cmap of the first font and save as second."""
font = ttLib.TTFont(source_file)
cmap = font_data.get_cmap(font)
for pua, (ch1, ch2) in (add_emoji_gsub.EMOJI_KEYCAPS.items()
+ add_emoji_gsub.EMOJI_FLAGS.items()):
if pua not in cmap:
glyph_name = get_glyph_name_from_gsub([ch1, ch2], font)
if glyph_name is not None:
cmap[pua] = glyph_name
font.save(target_file)
def main(argv):
"""Save the first font given to the second font."""
add_pua_cmap(argv[1], argv[2])
if __name__ == '__main__':
main(sys.argv)
| Python | 0 | |
2a5b7773af3e9516d8a4a3df25c0b829598ebb1c | Remove redundant str typecasting | nova/tests/uuidsentinel.py | nova/tests/uuidsentinel.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
class UUIDSentinels(object):
def __init__(self):
from oslo_utils import uuidutils
self._uuid_module = uuidutils
self._sentinels = {}
def __getattr__(self, name):
if name.startswith('_'):
raise ValueError('Sentinels must not start with _')
if name not in self._sentinels:
self._sentinels[name] = self._uuid_module.generate_uuid()
return self._sentinels[name]
sys.modules[__name__] = UUIDSentinels()
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
class UUIDSentinels(object):
def __init__(self):
from oslo_utils import uuidutils
self._uuid_module = uuidutils
self._sentinels = {}
def __getattr__(self, name):
if name.startswith('_'):
raise ValueError('Sentinels must not start with _')
if name not in self._sentinels:
self._sentinels[name] = str(self._uuid_module.generate_uuid())
return self._sentinels[name]
sys.modules[__name__] = UUIDSentinels()
| Python | 0.000009 |
ac673650673f7d6b9785d577499037bf9db4435a | refactor prompt abstraction | lib/smashlib/prompt.py | lib/smashlib/prompt.py | """ smash.prompt """
from smashlib.data import PROMPT_DEFAULT as DEFAULT
class Prompt(dict):
def __setitem__(self, k, v, update=True):
if k in self:
raise Exception,'prompt component is already present: ' + str(k)
super(Prompt, self).__setitem__(k, v)
if update:
self.update_prompt()
def update_prompt(self):
parts = self.values()
parts.sort()
parts = [part[1] for part in parts]
self.template = ' '.join(parts)
def _get_template(self):
""" get the current prompt template """
opc = getattr(__IPYTHON__.shell, 'outputcache', None)
if opc:
return opc.prompt1.p_template
else:
return 'error-getting-output-prompt'
def _set_template(self, t):
""" set the current prompt template """
opc = getattr(__IPYTHON__.shell, 'outputcache', None)
if opc:
opc.prompt1.p_template = t
template = property(_get_template, _set_template)
prompt = Prompt()
prompt.__setitem__('working_dir', [100, DEFAULT], update=False)
prompt.template = DEFAULT
| Python | 0.000003 | |
2c178c5ea05d2454ef6896aaf9c58b6536f5a15f | Create bubblesort.py | bubblesort.py | bubblesort.py | def bubblesort(lst):
#from last index to second
for passes in range(len(lst) - 1, 0, -1):
#from [0,passes[ keep swapping to put the largest
#number at index passes
for i in range(passes):
if lst[i] > lst[i+1]:
swap(lst, i, i+1)
return lst
def swap(lst, i, j):
temp = lst[i]
lst[i] = lst[j]
lst[j] = temp
print "{0}".format(bubblesort([23,57,75,33,6,8,56]))
| Python | 0.000003 | |
9e954d5181d36762a8c34e69516c7f5510bae5a7 | add exception class to use for mtconvert errors | oldowan/mtconvert/error.py | oldowan/mtconvert/error.py |
class MtconvertError(Exception):
"""Exception raised for errors in the mtconvert module.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
| Python | 0 | |
3adcefcad4fc3ecb85aa4a22e8b3c4bf5ca4e6f5 | Add tests for revision updates via import | test/integration/ggrc/converters/test_import_update.py | test/integration/ggrc/converters/test_import_update.py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for bulk updates with CSV import."""
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 1)
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 2)
| # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
| Python | 0 |
f79d10de2adb99e3a3d07caa2a00359208186c15 | Add twext.python.datetime tests | twext/python/test/test_datetime.py | twext/python/test/test_datetime.py | ##
# Copyright (c) 2006-2010 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from datetime import date, datetime, timedelta
from twext.python.datetime import dateordatetime, timerange, utc
from twistedcaldav.test.util import TestCase, testUnimplemented
class DateTimeTests(TestCase):
def test_date_date(self):
d = date.today()
dodt = dateordatetime(d)
self.assertEquals(dodt.date(), d)
def test_date_date_tz(self):
d = date.today()
dodt = dateordatetime(d, defaultTZ=utc)
self.assertEquals(dodt.date(), d)
def test_date_datetime(self):
d = date.today()
dodt = dateordatetime(d)
self.assertEquals(dodt.datetime(), datetime(d.year, d.month, d.day))
def test_date_datetime_tz(self):
d = date.today()
dodt = dateordatetime(d, defaultTZ=utc)
self.assertEquals(dodt.datetime(), datetime(d.year, d.month, d.day, tzinfo=utc))
def test_datetime_date(self):
dt = datetime.now()
dodt = dateordatetime(dt)
self.assertEquals(dodt.date(), dt.date())
def test_datetime_datetime(self):
dt = datetime.now()
dodt = dateordatetime(dt)
self.assertEquals(dodt.datetime(), dt)
def test_datetime_datetime_tz(self):
dt = datetime.now()
dodt = dateordatetime(dt, defaultTZ=utc)
self.assertEquals(dodt.datetime(), dt)
def test_date_iCalendarString(self):
d = date(2010, 2, 22)
dodt = dateordatetime(d)
self.assertEquals(dodt.iCalendarString(), "20100222")
def test_datetime_iCalendarString(self):
dt = datetime(2010, 2, 22, 17, 44, 42, 98303)
dodt = dateordatetime(dt)
self.assertEquals(dodt.iCalendarString(), "20100222T174442")
def test_datetime_iCalendarString_utc(self):
dt = datetime(2010, 2, 22, 17, 44, 42, 98303, tzinfo=utc)
dodt = dateordatetime(dt)
self.assertEquals(dodt.iCalendarString(), "20100222T174442Z")
@testUnimplemented
def test_datetime_iCalendarString_tz(self):
# Need to test a non-UTC timezone also
raise NotImplementedError()
@testUnimplemented
def test_asTimeZone(self):
raise NotImplementedError()
@testUnimplemented
def test_asUTC(self):
raise NotImplementedError()
class TimeRangeTests(TestCase):
def test_start(self):
start = datetime.now()
tr = timerange(start=start)
self.assertEquals(tr.start(), start)
def test_start_none(self):
tr = timerange()
self.assertEquals(tr.start(), None)
def test_end(self):
end = datetime.now()
tr = timerange(end=end)
self.assertEquals(tr.end(), end)
def test_end_none(self):
tr = timerange()
self.assertEquals(tr.end(), None)
def test_end_none_duration(self):
duration = timedelta(seconds=8)
tr = timerange(duration=duration)
self.assertEquals(tr.end(), None)
def test_end_none_duration_start(self):
start = datetime.now()
duration = timedelta(seconds=8)
tr = timerange(start=start, duration=duration)
self.assertEquals(tr.end(), start + duration)
def test_duration(self):
duration = timedelta(seconds=8)
tr = timerange(duration=duration)
self.assertEquals(tr.duration(), duration)
def test_duration_none(self):
tr = timerange()
self.assertEquals(tr.duration(), None)
def test_duration_none_end(self):
end = datetime.now()
tr = timerange(end=end)
self.assertEquals(tr.duration(), None)
def test_duration_none_start_end(self):
start = datetime.now()
duration = timedelta(seconds=8)
end = start + duration
tr = timerange(start=start, end=end)
self.assertEquals(tr.duration(), duration)
@testUnimplemented
def test_overlapsWith(self):
# Need a few tests; combinations of:
# - start/end are None
# - overlapping and not
# - dates and datetimes
# - timezones
raise NotImplementedError()
| Python | 0.000006 | |
157cb4518412a6e6de9c3d0d64c68ac0af276c6a | Access checking unit tests added for FormPage view. | tests/app/soc/modules/gsoc/views/test_student_forms.py | tests/app/soc/modules/gsoc/views/test_student_forms.py | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for student forms view."""
from tests import profile_utils
from tests import test_utils
class FormPageTest(test_utils.GSoCDjangoTestCase):
"""Test student form page."""
def setUp(self):
self.init()
def testLoneUserAccessForbidden(self):
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testMentorAccessForbidden(self):
self.data.createMentor(self.org)
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testOrgAdminAccessForbidden(self):
self.data.createOrgAdmin(self.org)
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testHostAccessForbidden(self):
self.data.createHost()
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testStudentAccessForbidden(self):
# access should be forbidden because at this point students are not
# permitted to upload their forms
self.timeline.studentsAnnounced()
mentor = self._createNewMentor()
self.data.createStudentWithProject(self.org, mentor)
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testStudentAccessGranted(self):
self.timeline.formSubmission()
mentor = self._createNewMentor()
self.data.createStudentWithProject(self.org, mentor)
# check for enrollment form
url = self._getEnrollmentFormUrl()
response = self.get(url)
self._assertStudentFormsTemplatesUsed(response)
# check for tax form
url = self._getTaxFormUrl()
response = self.get(url)
self._assertStudentFormsTemplatesUsed(response)
def _getEnrollmentFormUrl(self):
"""Returns URL for the student enrollment form upload."""
return '/gsoc/student_forms/enrollment/' + self.gsoc.key().name()
def _getTaxFormUrl(self):
"""Returns URL for the student tax form upload."""
return '/gsoc/student_forms/tax/' + self.gsoc.key().name()
def _assertAccessForbiddenForUrl(self, url):
"""Asserts that GET request will return forbidden response
for the specified URL."""
response = self.get(url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def _assertStudentFormsTemplatesUsed(self, response):
"""Asserts that all the templates from the student forms were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response,
'v2/modules/gsoc/student_forms/base.html')
self.assertTemplateUsed(response, 'v2/modules/gsoc/_form.html')
def _createNewMentor(self):
"""Returns a newly created mentor."""
profile_helper = profile_utils.GSoCProfileHelper(self.gsoc, self.dev_test)
profile_helper.createOtherUser('mentor@example.com')
return profile_helper.createMentor(self.org)
| Python | 0 | |
e48caa4bb61cce466ad5eb9bffbfba8e33312474 | Add Python EC2 TerminateInstances example | python/example_code/ec2/terminate_instances.py | python/example_code/ec2/terminate_instances.py | # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[terminate_instances.py demonstrates how to terminate an Amazon EC2 instance.]
# snippet-service:[ec2]
# snippet-keyword:[Amazon EC2]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-2-11]
# snippet-sourceauthor:[AWS]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
def terminate_instances(instance_ids):
"""Terminate one or more Amazon EC2 instances
:param instance_ids: List of string IDs of EC2 instances to terminate
:return: List of state information for each instance specified in instance_ids.
If error, return None.
"""
# Terminate each instance in the argument list
ec2 = boto3.client('ec2')
try:
states = ec2.terminate_instances(InstanceIds=instance_ids)
except ClientError as e:
logging.error(e)
return None
return states['TerminatingInstances']
def main():
"""Exercise terminate_instances()"""
# Assign these values before running the program
ec2_instance_ids = ['EC2_INSTANCE_ID']
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
# Terminate the EC2 instance(s)
states = terminate_instances(ec2_instance_ids)
if states is not None:
logging.debug('Terminating the following EC2 instances')
for state in states:
logging.debug(f'ID: {state["InstanceId"]}')
logging.debug(f' Current state: Code {state["CurrentState"]["Code"]}, '
f'{state["CurrentState"]["Name"]}')
logging.debug(f' Previous state: Code {state["PreviousState"]["Code"]}, '
f'{state["PreviousState"]["Name"]}')
if __name__ == '__main__':
main()
| Python | 0 | |
34c0ca7ba0f8d2ac51583dfab4ea2f4cee7a62d5 | add script to read csv files to list | python/read_formated_txt_file/read_csv2list.py | python/read_formated_txt_file/read_csv2list.py | import csv
def csv_to_list(csv_file, delimiter=','):
"""
Reads in a CSV file and returns the contents as list,
where every row is stored as a sublist, and each element
in the sublist represents 1 cell in the table.
"""
with open(csv_file, 'r') as csv_con:
reader = csv.reader(csv_con, delimiter=delimiter)
return list(reader)
data = csv_to_list('./astro.csv')
print data | Python | 0 | |
d402c25a6b257778e08e6db2890ae575432daed0 | Add new linkedlist file for intersection | linkedlist/intersection.py | linkedlist/intersection.py | def intersection(h1, h2):
"""
This function takes two lists and returns the node they have in common, if any.
In this example:
1 -> 3 -> 5
\
7 -> 9 -> 11
/
2 -> 4 -> 6
...we would return 7.
Note that the node itself is the unique identifier, not the value of the node.
"""
count = 0
flag = None
h1_orig = h1
h2_orig = h2
while h1 or h2:
count += 1
if not flag and (h1.next is None or h2.next is None):
# We hit the end of one of the lists, set a flag for this
flag = (count, h1.next, h2.next)
if h1:
h1 = h1.next
if h2:
h2 = h2.next
long_len = count # Mark the length of the longer of the two lists
short_len = flag[0]
if flag[1] is None:
shorter = h1_orig
longer = h2_orig
elif flag[2] is None:
shorter = h2_orig
longer = h1_orig
while longer and shorter:
while long_len > short_len:
# force the longer of the two lists to "catch up"
longer = longer.next
long_len -= 1
if longer == shorter:
# The nodes match, return the node
return longer
else:
longer = longer.next
shorter = shorter.next
return None
class Node(object):
def __init__(self, val=None):
self.val = val
self.next = None
def test():
def printLinkedList(head):
string = ""
while head.next:
string += head.val + " -> "
head = head.next
string += head.val
print(string)
# 1 -> 3 -> 5
# \
# 7 -> 9 -> 11
# /
# 2 -> 4 -> 6
a1 = Node("1")
b1 = Node("3")
c1 = Node("5")
d = Node("7")
a2 = Node("2")
b2 = Node("4")
c2 = Node("6")
e = Node("9")
f = Node("11")
a1.next = b1
b1.next = c1
c1.next = d
a2.next = b2
b2.next = c2
c2.next = d
d.next = e
e.next = f
printLinkedList(a1)
printLinkedList(a2)
print(intersection(a1, a2))
assert intersection(a1, a2).val == d.val
test()
| Python | 0 | |
8f1cf446a0b602e6e64ccebaa794e7ec6a2f840d | add support routines for oversampling | compressible_fv4/initialization_support.py | compressible_fv4/initialization_support.py | """Routines to help initialize cell-average values by oversampling the
initial conditions on a finer mesh and averaging down to the requested
mesh"""
import mesh.fv as fv
def get_finer(myd):
mgf = myd.grid.fine_like(4)
fd = fv.FV2d(mgf)
for v in myd.names:
fd.register_var(v, myd.BCs[v])
fd.create()
return fd
def average_down(myd, fd):
"""average the fine data from fd into the coarser object, myd"""
for v in myd.names:
var = myd.get_var(v)
var[:,:] = fd.restrict(v, N=4)
| Python | 0 | |
4227cef6567023717c8d66f99ce776d9d8aa0929 | Add OS::Contrail::PhysicalRouter | contrail_heat/resources/physical_router.py | contrail_heat/resources/physical_router.py | from heat.engine import properties
from vnc_api import vnc_api
from contrail_heat.resources import contrail
import uuid
class HeatPhysicalRouter(contrail.ContrailResource):
PROPERTIES = (
NAME,
) = (
'name',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Physical Router name'),
update_allowed=True,
),
}
attributes_schema = {
"name": _("The name of the Physical Router."),
"fq_name": _("The FQ name of the Physical Router."),
"physical_interfaces": _("List of Physical Interfaces attached."),
"show": _("All attributes."),
}
def handle_create(self):
config_obj = self.vnc_lib().global_system_config_read(
fq_name=["default-global-system-config"])
pr_obj = vnc_api.PhysicalRouter(name=self.properties[self.NAME],
parent_obj=config_obj)
pr_uuid = self.vnc_lib().physical_router_create(pr_obj)
self.resource_id_set(pr_uuid)
def _show_resource(self):
pr_obj = self.vnc_lib().physical_router_read(id=self.resource_id)
dic = {}
dic['name'] = pr_obj.get_display_name()
dic['fq_name'] = pr_obj.get_fq_name_str()
dic['physical_interfaces'] = (
[pi['to'] for pi in pr_obj.get_physical_interfaces() or []])
return dic
def handle_delete(self):
try:
self.vnc_lib().physical_router_delete(id=self.resource_id)
except Exception:
pass
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
# TODO
pass
def resource_mapping():
return {
'OS::Contrail::PhysicalRouter': HeatPhysicalRouter,
}
| Python | 0.000001 | |
5cebd0b56f81dfc02feb5511dade82ebf6db99ff | add presence.py | litecord/presence.py | litecord/presence.py | '''
presence.py - presence management
Sends PRESENCE_UPDATE to clients when needed
'''
class PresenceManager:
def __init__(self, server):
self.server = server
async def update_presence(self, user_id, status):
'''
PresenceManager.update_presence(user_id, status)
Updates the presence of a user.
Sends a PRESENCE_UPDATE event to relevant clients.
'''
'''
????dummy code????
current_presence = self.presences.get(user_id)
new_presence = self.make_presence(status)
# something like this lol
user = await self.user.get_user(user_id)
for guild_id in user.guilds:
guild = await self.guilds.get_guild(guild_id)
for member in guild:
member = await self.guilds.get_member(guild_id, member_id)
c = await self.server.get_connection(member_id)
if c is not None:
await c.dispatch('PRESENCE_UPDATE', self.diff(current_presence, new_presence))
'''
pass
| Python | 0.000002 | |
02c59aa1d2eec43442f4bcf1d6662535e094bffd | add move pics by modified date | media/pic_date_move.py | media/pic_date_move.py | '''
File: pic_date_move.py
Created: 2021-04-01 10:46:38
Modified: 2021-04-01 10:46:43
Author: mcxiaoke (github@mcxiaoke.com)
License: Apache License 2.0
'''
import os
import sys
import shutil
from datetime import date, datetime
import pathlib
def move_one_file(src_file):
old_file = pathlib.Path(src_file)
old_dir = pathlib.Path(old_file).parent
name = old_file.name
# old_file = pathlib.Path(old_dir, name)
fd = datetime.fromtimestamp(old_file.stat().st_mtime)
new_dir = pathlib.Path(old_dir.parent, fd.strftime('%Y%m%d'))
new_file = pathlib.Path(new_dir, name)
if not (new_dir.exists() and new_dir.samefile(old_dir)):
if not new_dir.exists():
new_dir.mkdir(parents=True, exist_ok=True)
print('Move to', new_file)
# old_file.rename(new_file)
def move_by_date(src_dir):
'''
move image files by file modified date
'''
for root, _, files in os.walk(src_dir):
print(root)
for name in files:
move_one_file(pathlib.Path(root, name))
move_by_date(sys.argv[1])
| Python | 0 | |
3ae5dc9a4325251033d3db9cae0d80eb4812815d | Add lazy iterator | minio/lazy_iterator.py | minio/lazy_iterator.py | # Minimal Object Storage Library, (C) 2015 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'minio'
class LazyIterator(object):
def __init__(self, populator):
self.populator = populator
self.values = []
def __iter__(self):
return self
def next(self):
if self.populator is None:
# should never see this, but we'll be defensive
raise StopIteration()
if len(self.values) == 0:
self.values, self.populator = self.populator()
if len(self.values) > 0:
return self.values.pop(0)
raise StopIteration()
| Python | 0.00005 | |
860f93d2bb4c08b63c64fe9e5b7b620b824d8490 | test ++/--/+ | pychecker/pychecker2/utest/ops.py | pychecker/pychecker2/utest/ops.py | from pychecker2 import TestSupport
from pychecker2 import OpChecks
class OpTests(TestSupport.WarningTester):
def testOperator(self):
for op in ['--', '++']:
self.warning('def f(x):\n'
' return %sx' % op,
2, OpChecks.OpCheck.operator, op)
def testOperatorPlus(self):
self.warning('def f(x):\n'
' return +x', 2, OpChecks.OpCheck.operatorPlus)
| Python | 0.000069 | |
cfb77bbe0c77a67c536614bf9fece1e9fcde4eb0 | make find_configs filter name more descriptive | crosscat/utils/experiment_utils.py | crosscat/utils/experiment_utils.py | import os
#
import crosscat.utils.file_utils as file_utils
import crosscat.utils.geweke_utils as geweke_utils
import crosscat.utils.general_utils as general_utils
result_filename = geweke_utils.summary_filename
writer = geweke_utils.write_result
reader = geweke_utils.read_result
runner = geweke_utils.run_geweke
def find_configs(dirname, filename=result_filename):
root_has_filename = lambda (root, ds, filenames): filenames.count(filename)
get_filepath = lambda (root, ds, fs): os.path.join(root, filename)
def is_this_dirname(filepath):
_dir, _file = os.path.split(filepath)
return os.path.split(_dir)[0] == dirname
tuples = filter(root_has_filename, os.walk(dirname))
filepaths = map(get_filepath, tuples)
filepaths = filter(is_this_dirname, filepaths)
return filepaths
def read_all_configs(dirname='.'):
def read_config(filepath):
result = file_utils.unpickle(filepath, dir=dirname)
config = result['config']
return config
filepaths = find_configs(dirname)
config_list = map(read_config, filepaths)
return config_list
def read_results(config_list, *args, **kwargs):
_read_result = lambda config: reader(config, *args, **kwargs)
config_list = general_utils.ensure_listlike(config_list)
results = map(_read_result, config_list)
return results
def write_results(results, *args, **kwargs):
_write_result = lambda result: writer(result, *args, **kwargs)
map(_write_result, results)
return
def do_experiments(runner, writer, config_list, *args, **kwargs):
def do_experiment(config):
result = runner(config)
writer(result, *args, **kwargs)
return
config_list = general_utils.ensure_listlike(config_list)
map(do_experiment, config_list)
return
def args_to_config(args):
parser = geweke_utils.generate_parser()
args = parser.parse_args(args)
args = geweke_utils.arbitrate_args(args)
return args.__dict__
if __name__ == '__main__':
args_list = [
['--num_rows', '10', '--num_cols', '2', '--num_iters', '300', ],
['--num_rows', '10', '--num_cols', '3', '--num_iters', '300', ],
['--num_rows', '20', '--num_cols', '2', '--num_iters', '300', ],
['--num_rows', '20', '--num_cols', '3', '--num_iters', '300', ],
]
configs_list = map(args_to_config, args_list)
do_experiments(runner, writer, configs_list)
configs_list = read_all_configs()
has_three_cols = lambda config: config['num_cols'] == 3
configs_list = filter(has_three_cols, configs_list)
results = read_results(configs_list)
| import os
#
import crosscat.utils.file_utils as file_utils
import crosscat.utils.geweke_utils as geweke_utils
import crosscat.utils.general_utils as general_utils
result_filename = geweke_utils.summary_filename
writer = geweke_utils.write_result
reader = geweke_utils.read_result
runner = geweke_utils.run_geweke
def find_configs(dirname, filename=result_filename):
root_has_filename = lambda (root, ds, filenames): filenames.count(filename)
get_filepath = lambda (root, ds, fs): os.path.join(root, filename)
def is_one_deep(filepath):
_dir, _file = os.path.split(filepath)
return os.path.split(_dir)[0] == dirname
tuples = filter(root_has_filename, os.walk(dirname))
filepaths = map(get_filepath, tuples)
filepaths = filter(is_one_deep, filepaths)
return filepaths
def read_all_configs(dirname='.'):
def read_config(filepath):
result = file_utils.unpickle(filepath, dir=dirname)
config = result['config']
return config
filepaths = find_configs(dirname)
config_list = map(read_config, filepaths)
return config_list
def read_results(config_list, *args, **kwargs):
_read_result = lambda config: reader(config, *args, **kwargs)
config_list = general_utils.ensure_listlike(config_list)
results = map(_read_result, config_list)
return results
def write_results(results, *args, **kwargs):
_write_result = lambda result: writer(result, *args, **kwargs)
map(_write_result, results)
return
def do_experiments(runner, writer, config_list, *args, **kwargs):
def do_experiment(config):
result = runner(config)
writer(result, *args, **kwargs)
return
config_list = general_utils.ensure_listlike(config_list)
map(do_experiment, config_list)
return
def args_to_config(args):
parser = geweke_utils.generate_parser()
args = parser.parse_args(args)
args = geweke_utils.arbitrate_args(args)
return args.__dict__
if __name__ == '__main__':
args_list = [
['--num_rows', '10', '--num_cols', '2', '--num_iters', '300', ],
['--num_rows', '10', '--num_cols', '3', '--num_iters', '300', ],
['--num_rows', '20', '--num_cols', '2', '--num_iters', '300', ],
['--num_rows', '20', '--num_cols', '3', '--num_iters', '300', ],
]
configs_list = map(args_to_config, args_list)
do_experiments(runner, writer, configs_list)
configs_list = read_all_configs()
has_three_cols = lambda config: config['num_cols'] == 3
configs_list = filter(has_three_cols, configs_list)
results = read_results(configs_list)
| Python | 0.000001 |
58fee826ab5298f7de036bf320bbc109b853eec8 | Add null check for sds sync thread which can be optional | tendrl/commons/manager/__init__.py | tendrl/commons/manager/__init__.py | import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
if self._sds_sync_thread:
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
if self._sds_sync_thread:
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
if self._sds_sync_thread:
self._sds_sync_thread.join()
self._central_store_thread.join()
| import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
self._sds_sync_thread.join()
self._central_store_thread.join()
| Python | 0 |
4928fbe97dfa59b8a4c1e9726657df5269f2f900 | Test variable records and JSON | tests/test_variable_data_record.py | tests/test_variable_data_record.py | import os
import sys
import json
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import unittest
import meterbus
from meterbus.exceptions import *
from json import encoder
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.frame = "\x68\x53\x53\x68\x08\x05\x72\x34\x08\x00\x54\x96\x15\x32" \
"\x00\xf2\x00\x00\x00\x01\xfd\x1b\x00\x02\xfc\x03\x48\x52" \
"\x25\x74\xd4\x11\x22\xfc\x03\x48\x52\x25\x74\xc8\x11\x12" \
"\xfc\x03\x48\x52\x25\x74\xb4\x16\x02\x65\xd0\x08\x22\x65" \
"\x70\x08\x12\x65\x23\x09\x01\x72\x18\x42\x65\xe4\x08\x82" \
"\x01\x65\xdd\x08\x0c\x78\x34\x08\x00\x54\x03\xfd\x0f\x00" \
"\x00\x04\x1f\x5d\x16"
self.frame = meterbus.load(self.frame)
def test_json_record0(self):
dict_record = {
"value": 0,
"unit": "MeasureUnit.NONE",
"type": "VIFUnitExt.DIGITAL_INPUT",
"function": "FunctionType.INSTANTANEOUS_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[0].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record1(self):
dict_record = {
"value": 45.64,
"unit": "%RH",
"type": "VIFUnit.VARIABLE_VIF",
"function": "FunctionType.INSTANTANEOUS_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[1].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record2(self):
dict_record = {
"value": 45.52,
"unit": "%RH",
"type": "VIFUnit.VARIABLE_VIF",
"function": "FunctionType.MINIMUM_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[2].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
# Skip record 3 since it has a very strange float
# 58.120000000000005
def test_json_record4(self):
dict_record = {
"value": 22.56,
"unit": "MeasureUnit.C",
"type": "VIFUnit.EXTERNAL_TEMPERATURE",
"function": "FunctionType.INSTANTANEOUS_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[4].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record5(self):
dict_record = {
"value": 21.60,
"unit": "MeasureUnit.C",
"type": "VIFUnit.EXTERNAL_TEMPERATURE",
"function": "FunctionType.MINIMUM_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[5].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record6(self):
dict_record = {
"value": 23.39,
"unit": "MeasureUnit.C",
"type": "VIFUnit.EXTERNAL_TEMPERATURE",
"function": "FunctionType.MAXIMUM_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[6].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record7(self):
dict_record = {
"value": 86400,
"unit": "MeasureUnit.SECONDS",
"type": "VIFUnit.AVG_DURATION",
"function": "FunctionType.INSTANTANEOUS_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[7].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record8(self):
dict_record = {
"value": 22.76,
"unit": "MeasureUnit.C",
"type": "VIFUnit.EXTERNAL_TEMPERATURE",
"function": "FunctionType.INSTANTANEOUS_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[8].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record9(self):
dict_record = {
"value": 22.69,
"unit": "MeasureUnit.C",
"type": "VIFUnit.EXTERNAL_TEMPERATURE",
"function": "FunctionType.INSTANTANEOUS_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[9].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record10(self):
dict_record = {
"value": 54000834,
"unit": "MeasureUnit.NONE",
"type": "VIFUnit.FABRICATION_NO",
"function": "FunctionType.INSTANTANEOUS_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[10].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
def test_json_record11(self):
dict_record = {
"value": 262144,
"unit": "MeasureUnit.NONE",
"type": "VIFUnitExt.SOFTWARE_VERSION",
"function": "FunctionType.INSTANTANEOUS_VALUE"
}
frame_rec_dict = json.loads(self.frame.records[11].to_JSON())
self.assertEqual(frame_rec_dict, dict_record)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
27cce3b6708a17f813f0a82871c988fec3a36517 | Add quart to contrib (#300) | rollbar/contrib/quart/__init__.py | rollbar/contrib/quart/__init__.py | """
Integration with Quart
"""
from quart import request
import rollbar
def report_exception(app, exception):
rollbar.report_exc_info(request=request)
def _hook(request, data):
data['framework'] = 'quart'
if request:
data['context'] = str(request.url_rule)
rollbar.BASE_DATA_HOOK = _hook
| Python | 0 | |
e5a39d4e17a0555cb242731b34f0ee480367b4fe | Add task that sends out notifications | foireminder/foireminder/reminders/tasks.py | foireminder/foireminder/reminders/tasks.py | from django.utils import timezone
from .models import ReminderRequest, EmailReminder
def send_todays_notifications(self):
today = timezone.now()
reminders = ReminderRequest.objects.filter(
start__year=today.year,
start__month=today.month,
start__day=today.da
)
for reminder in reminders:
for subscriber in EmailReminder.objects.filter(rule=reminder.rule):
subscriber.send_notification()
| Python | 0.999047 | |
9efda5a5a2b7aa16423e68fb10e1a0cb94c1f33e | Create rectangles_into_squares.py | rectangles_into_squares.py | rectangles_into_squares.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Rectangles into Squares
#Problem level: 6 kyu
def sqInRect(lng, wdth):
if lng==wdth: return None
li=[]
while lng and wdth:
if lng>wdth:
lng-=wdth
li.append(wdth)
else:
wdth-=lng
li.append(lng)
return li
| Python | 0.999056 | |
c5fc38749dcf966787f6c6a201e23c310a22358c | Add script to update UniProt protein names | src/main/resources/org/clulab/reach/update_uniprot.py | src/main/resources/org/clulab/reach/update_uniprot.py | import os
import re
import csv
import requests
import itertools
from gilda.generate_terms import parse_uniprot_synonyms
# Base URL for UniProt
uniprot_url = 'http://www.uniprot.org/uniprot'
# Get protein names, gene names and the organism
columns = ['id', 'protein%20names', 'genes', 'organism']
# Only get reviewed entries and use TSV format
params = {
'sort': 'id',
'desc': 'no',
'compress': 'no',
'query': 'reviewed:yes',
'format': 'tab',
'columns': ','.join(columns)
}
def process_row(row):
entry, protein_names, genes, organisms = row
# Gene names are space separated
gene_synonyms = genes.split(' ') if genes else []
# We use a more complex function to parse protein synonyms which appear
# as "first synonym (second synonym) (third synonym) ...".
protein_synonyms = parse_uniprot_synonyms(protein_names) \
if protein_names else []
# We remove EC codes as synonyms because they always refer to higher-level
# enzyme categories shared across species
protein_synonyms = [p for p in protein_synonyms
if not p.startswith('EC ')]
# Organisms and their synonyms also appear in the format that protein
# synonyms do
organism_synonyms = parse_uniprot_synonyms(organisms)
# ... except we need to deal with a special case in which the first
# organism name has a strain name in parantheses after it, and make sure
# that the strain name becomes part of the first synonym.
if len(organism_synonyms) >= 2 and \
organism_synonyms[1].startswith('strain'):
organism_synonyms[0] = '%s (%s)' % (organism_synonyms[0],
organism_synonyms[1])
organism_synonyms = [organism_synonyms[0]] + organism_synonyms[2:]
# We now take each gene synonym and each organism synonym and create all
# combinations of these as entries.
entries = []
for gene, organism in itertools.product(gene_synonyms + protein_synonyms,
organism_synonyms):
# We skip synonyms that are more than 5 words in length (consistent
# with original KB construction).
if len(gene.split(' ')) > 5:
continue
entries.append((gene, entry, organism))
return entries
if __name__ == '__main__':
if not os.path.exists('uniprot_entries.tsv'):
res = requests.get(uniprot_url, params=params)
res.raise_for_status()
with open('uniprot_entries.tsv', 'w') as fh:
fh.write(res.text)
processed_entries = []
with open('uniprot_entries.tsv', 'r') as fh:
reader = csv.reader(fh, delimiter='\t')
next(reader)
for row in reader:
processed_entries += process_row(row)
# We sort the entries first by the synonym but in a way that special
# characters and capitalization is ignored, then sort by ID and then
# by organism.
processed_entries = sorted(processed_entries,
key=lambda x: (re.sub('[^A-Za-z0-9]', '',
x[0]).lower(), x[1],
x[2]))
with open('kb/uniprot-proteins.tsv.update', 'w') as fh:
writer = csv.writer(fh, delimiter='\t')
for entry in processed_entries:
writer.writerow(entry)
| Python | 0 | |
aeadfbd4ae1f915291328f040cda54f309743024 | Add main application code | oline-gangnam-style.py | oline-gangnam-style.py | from jinja2 import Environment, FileSystemLoader
import json
import os
import sys
env = Environment(loader=FileSystemLoader("."))
template = env.get_template('ircd.conf.jinja')
config = {}
with open(sys.argv[1] if len(sys.argv) > 1 else "config.json", "r") as fin:
config = json.loads(fin.read())
network = config["network"]
for server in config["servers"]:
with open("confs/" + server["name"]+".conf", "w") as fout:
fout.write(template.render(**locals()))
| Python | 0.000001 | |
7a6b5396ce760eaa206bfb9b556a374c9c17f397 | Add DecisionTree estimator. | bike-sharing/2-decision-tree.py | bike-sharing/2-decision-tree.py | import math
import argparse
from datetime import datetime
import numpy as np
from sklearn import cross_validation
from sklearn import tree
from sklearn import metrics
def load_data(path, **kwargs):
return np.loadtxt(path, **kwargs)
def save_data(path, data, **kwargs):
np.savetxt(path, data, **kwargs)
def hour_from_dt_string(dt_string):
return datetime.strptime(dt_string, '%Y-%m-%d %H:%M:%S').hour
def preprocessing(X, y):
is_seasons = np.empty((X.shape[0], 4))
return X, y
def cv(estimator, X, y):
k_fold = cross_validation.KFold(n=len(train_dataset), n_folds=10,
indices=True)
a = 0.0
for train_idx, test_idx in k_fold:
r = estimator.fit(X[train_idx], y[train_idx]).predict(X[test_idx])
r = np.where(r > 0, r, 0).astype(np.int)
s = math.sqrt(metrics.mean_squared_error(np.log(y[test_idx] + 1),
np.log(r + 1.0)))
a += s
print 'Score: {:.4f}'.format(s)
print 'Average score: {:.4f}'.format(a/len(k_fold))
def loss_func(y_real, y_predicted):
return math.sqrt(metrics.mean_squared_error(np.log(y_real + 1), np.log(y_predicted + 1)))
if __name__ == '__main__':
# Command arguments
parser = argparse.ArgumentParser(description='bike-sharing estimator')
parser.add_argument('--cv', dest='cv', action='store_const', const=True,
default=False, help='Do cross validation')
parser.add_argument('--no-test', dest='out', action='store_const',
const=False, default=True, help='No test dataset')
args = parser.parse_args()
# Input
common_input_options = {'delimiter': ',', 'skiprows': 1,
'converters': {0: hour_from_dt_string} }
train_dataset = load_data('data/train.csv', usecols=(0,1,2,3,4,5,6,7,8,11),
**common_input_options)
test_dataset = load_data('data/test.csv', usecols=(0,1,2,3,4,5,6,7,8),
**common_input_options)
common_input_options['converters'] = {}
out_column = load_data('data/test.csv', usecols=(0,), dtype=str,
**common_input_options)
# Data preprocessing
X_train, y_train = preprocessing(train_dataset[:,:-1], train_dataset[:,-1])
X_test, y_test = preprocessing(test_dataset, None)
# The interesting part
estimator = tree.DecisionTreeRegressor(max_depth=12)
if args.cv:
cv(estimator, X_train, y_train)
if args.out:
results = estimator.fit(X_train, y_train).predict(X_test)
results = np.where(results > 0, results, 0.01).astype(np.int)
# Output
save_data('data/out.csv', np.column_stack((out_column.T, results.T)),
delimiter=',', header='datetime,count', fmt=('%s', '%s'),
comments='')
| Python | 0 | |
c58c58d5bf1394e04e30f5eeb298818558be027f | Add directory for tests of rules removin | tests/rules_tests/clearAfterNonTermRemove/__init__.py | tests/rules_tests/clearAfterNonTermRemove/__init__.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 22:06
:Licence GNUv3
Part of grammpy
""" | Python | 0 | |
9a7091c1502b9758c1492a1c99ace7d4ad74026c | move integer_divions to syntax | tests/pyccel/parser/scripts/syntax/integer_division.py | tests/pyccel/parser/scripts/syntax/integer_division.py | 5 // 3
a // 3
5 // b
a // b
5.// 3.
a // 3.
5.// b
a // b
| Python | 0.000022 | |
17e0b81463e3c4c9b62f95f40912b270652a8e63 | Create new package (#6376) | var/spack/repos/builtin/packages/r-ggridges/package.py | var/spack/repos/builtin/packages/r-ggridges/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGgridges(RPackage):
"""Ridgeline plots provide a convenient way of visualizing changes in
distributions over time or space."""
homepage = "https://cran.r-project.org/web/packages/ggridges/index.html"
url = "https://cran.r-project.org/src/contrib/ggridges_0.4.1.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/ggridges"
version('0.4.1', '21d53b3f7263beb17f629f0ebfb7b67a')
version('0.4.0', 'da94ed1ee856a7fa5fb87712c84ec4c9')
depends_on('r@3.4.0:3.4.9')
depends_on('r-ggplot2', type=('build', 'run'))
| Python | 0 | |
1c2292dcd47865a3dbd3f7b9adf53433f6f34770 | Create new package. (#6215) | var/spack/repos/builtin/packages/r-timedate/package.py | var/spack/repos/builtin/packages/r-timedate/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RTimedate(RPackage):
"""Environment for teaching "Financial Engineering and Computational
Finance". Managing chronological and calendar objects."""
homepage = "https://cran.r-project.org/package=timeDate"
url = "https://cran.r-project.org/src/contrib/timeDate_3012.100.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/timeDate"
version('3012.100', '9f69d3724efbf0e125e6b8e6d3475fe4')
| Python | 0.000002 | |
5ba4fce42892634213bede09759bbca1cd56e346 | add package py-brian2 (#3617) | var/spack/repos/builtin/packages/py-brian2/package.py | var/spack/repos/builtin/packages/py-brian2/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBrian2(PythonPackage):
"""A clock-driven simulator for spiking neural networks"""
homepage = "http://www.briansimulator.org"
url = "https://pypi.io/packages/source/B/Brian2/Brian2-2.0.1.tar.gz"
version('2.0.1', 'df5990e9a71f7344887bc02f54dfd0f0')
version('2.0rc3', '3100c5e4eb9eb83a06ff0413a7d43152')
variant('docs', default=False)
# depends on py-setuptools@6: for windows, if spack targets windows,
# this will need to be added here
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.8.2:', type=('build', 'run'))
depends_on('py-sympy@0.7.6:', type=('build', 'run'))
depends_on('py-pyparsing', type=('build', 'run'))
depends_on('py-jinja2@2.7:', type=('build', 'run'))
depends_on('py-cpuinfo@0.1.6:', type=('build', 'run'))
# depends_on('py-nosetests@1.0:', type=('build', 'run')) # extra test
depends_on('py-nosetests@1.0:', type=('build', 'run'), when='+docs')
depends_on('py-sphinx@1.4.2:', type=('build', 'run'), when='+docs')
| Python | 0 | |
26525354e7bf2465a561a5172a0d9fef4205e77d | move column defs to singleton object | chart06columns.py | chart06columns.py | '''define columns in all reports produced by chart06'''
import numpy as np
# all possible column definition
_defs = {
'median_absolute_error': [6, '%6d', (' ', 'MAE'), 'median absolute error'],
'model': [5, '%5s', (' ', 'model'),
'model name (en = elastic net, gd = gradient boosting, rf = random forests)'],
'n_months_back': [2, '%2d', (' ', 'bk'), 'number of mnths back for training'],
'max_depth': [4, '%4d', (' ', 'mxd'), 'max depth of any individual decision tree'],
'n_estimators': [4, '%4d', (' ', 'next'), 'number of estimators (= number of trees)'],
'max_features': [4, '%4s', (' ', 'mxft'), 'maximum number of features examined to split a node'],
'learning_rate': [4, '%4.1f', (' ', 'lr'), 'learning rate for gradient boosting'],
'alpha': [5, '%5.2f', (' ', 'alpha'), 'constant multiplying penalty term for elastic net'],
'l1_ratio': [4, '%4.2f', (' ', 'l1'), 'l1_ratio mixing L1 and L2 penalties for elastic net'],
'units_X': [6, '%6s', (' ', 'unitsX'), 'units for the x value; either natural (nat) or log'],
'units_y': [6, '%6s', (' ', 'unitsY'), 'units for the y value; either natural (nat) or log'],
'validation_month': [6, '%6d', ('vald', 'month'), 'month used for validation'],
'rank': [4, '%4d', (' ', 'rank'), 'rank within validation month; 1 == lowest MAE'],
'median_price': [6, '%6d', ('median', 'price'), 'median price in the validation month'],
'mae_validation': [6, '%6d', ('vald ', 'MAE'), 'median absolute error in validation month'],
'mae_next': [6, '%6d', ('next ', 'MAE'),
'median absolute error in test month (which follows the validation month)'],
'note': [15, '%15s', (' ', 'note'),
'when provided, the next MAE column contains the specified value'],
'rank_index': [5, '%5d', ('rank', 'index'), 'ranking of model performance in the validation month; 0 == best'],
'weight': [6, '%6.4f', (' ', 'weight'), 'weight of the model in the ensemble method'],
}
def defs_for_columns(*key_list):
return [[key] + _defs[key]
for key in key_list
]
def replace_by_spaces(k, v):
'define values that are replaced by spaces'
if isinstance(v, float) and np.isnan(v):
return True
return False
| Python | 0.000048 | |
32740172d4258a95145a5bb68be315fe1640db23 | Add alpha version of bootstraps script | bootstraps.py | bootstraps.py | '''
Does N times random stacks of X maps of large L in pixels.
At each stacks it gets the central temperature, makes a histogram for all
stacks, then fits a normal distribution for the histogram.
'''
N = 100000
X = 10
L = 16
import stacklib as sl
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import os
path = os.environ["HOME"] + '/FILES/'
m = path + 'ACT_148_equ_season_3_1way_v3_src_free.fits'
w = path + 'ACT_148_equ_season_3_1way_calgc_strictcuts2_weights.fits'
b = path + 'profile_AR1_2009_pixwin_130224.txt'
s = path + 'Equa_mask_15mJy.fits'
RA0 = 55.
RA1 = 324.
DEC0 = -1.5
DEC1 = 1.5
M = sl.StackMap(m,w,b,s,RA0,RA1,DEC0,DEC1)
M.squeezefullmap()
M.filterfullmap()
M.unsqueezefullmap()
DeltaTs = []
def onestack(X,L):
cat = sl.fakecatalog(X)
M.setsubmapL(L)
M.setstackmap()
for item in cat:
M.setsubmap(item[0],item[1])
M.stacksubmap()
M.finishstack()
return DeltaTs.append(M.stackmap[L/2,L/2])
for i in range(N):
onestack(X,L)
# histogram
n, bins, patches = plt.hist(DeltaTs,bins=50,normed = 1, facecolor = 'blue')
# best fit of data
(mu, sigma) = norm.fit(DeltaTs)
# add a 'best fit' line
y = mlab.normpdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
plt.xlabel('Temperature (microKelvin)')
plt.ylabel('Probability Density')
plt.show() | Python | 0 | |
a3bfe6b91cbd87cb4292e92d7bf1ac4a44afd462 | Add struct.py mirroring C struct declarations. | tardis/montecarlo/struct.py | tardis/montecarlo/struct.py | from ctypes import Structure, POINTER, c_int, c_int64, c_double, c_ulong
class RPacket(Structure):
_fields_ = [
('nu', c_double),
('mu', c_double),
('energy', c_double),
('r', c_double),
('tau_event', c_double),
('nu_line', c_double),
('current_shell_id', c_int64),
('next_line_id', c_int64),
('last_line', c_int64),
('close_line', c_int64),
('current_continuum_id', c_int64),
('virtual_packet_flag', c_int64),
('virtual_packet', c_int64),
('d_line', c_double),
('d_electron', c_double),
('d_boundary', c_double),
('d_cont', c_double),
('next_shell_id', c_int64),
('status', c_int),
('id', c_int64),
('chi_th', c_double),
('chi_cont', c_double),
('chi_ff', c_double),
('chi_bf', c_double)
]
class StorageModel(Structure):
_fields_ = [
('packet_nus', POINTER(c_double)),
('packet_mus', POINTER(c_double)),
('packet_energies', POINTER(c_double)),
('output_nus', POINTER(c_double)),
('output_energies', POINTER(c_double)),
('last_interaction_in_nu', POINTER(c_double)),
('last_line_interaction_in_id', POINTER(c_int64)),
('last_line_interaction_out_id', POINTER(c_int64)),
('last_line_interaction_shell_id', POINTER(c_int64)),
('last_line_interaction_type', POINTER(c_int64)),
('no_of_packets', c_int64),
('no_of_shells', c_int64),
('r_inner', POINTER(c_double)),
('r_outer', POINTER(c_double)),
('v_inner', POINTER(c_double)),
('time_explosion', c_double),
('inverse_time_explosion', c_double),
('electron_densities', POINTER(c_double)),
('inverse_electron_densities', POINTER(c_double)),
('line_list_nu', POINTER(c_double)),
('continuum_list_nu', POINTER(c_double)),
('line_lists_tau_sobolevs', POINTER(c_double)),
('line_lists_tau_sobolevs_nd', c_int64),
('line_lists_j_blues', POINTER(c_double)),
('line_lists_j_blues_nd', c_int64),
('no_of_lines', c_int64),
('no_of_edges', c_int64),
('line_interaction_id', c_int64),
('transition_probabilities', POINTER(c_double)),
('transition_probabilities_nd', c_int64),
('line2macro_level_upper', POINTER(c_int64)),
('macro_block_references', POINTER(c_int64)),
('transition_type', POINTER(c_int64)),
('destination_level_id', POINTER(c_int64)),
('transition_line_id', POINTER(c_int64)),
('js', POINTER(c_double)),
('nubars', POINTER(c_double)),
('spectrum_start_nu', c_double),
('spectrum_delta_nu', c_double),
('spectrum_end_nu', c_double),
('spectrum_virt_start_nu', c_double),
('spectrum_virt_end_nu', c_double),
('spectrum_virt_nu', POINTER(c_double)),
('sigma_thomson', c_double),
('inverse_sigma_thomson', c_double),
('inner_boundary_albedo', c_double),
('reflective_inner_boundary', c_int64),
('current_packet_id', c_int64),
('chi_bf_tmp_partial', POINTER(c_double)),
('t_electrons', POINTER(c_double)),
('l_pop', POINTER(c_double)),
('l_pop_r', POINTER(c_double)),
('cont_status', c_int),
('virt_packet_nus', POINTER(c_double)),
('virt_packet_energies', POINTER(c_double)),
('virt_packet_last_interaction_in_nu', POINTER(c_double)),
('virt_packet_last_interaction_type', POINTER(c_int64)),
('virt_packet_last_line_interaction_in_id', POINTER(c_int64)),
('virt_packet_last_line_interaction_out_id', POINTER(c_int64)),
('virt_packet_count', c_int64),
('virt_array_size', c_int64)
]
class RKState(Structure):
_fields_ = [
('key', POINTER(c_ulong)),
('pos', c_int),
('has_gauss', c_int),
('gauss', c_double)
]
| Python | 0 | |
3f67c8bfa0f78fbd7832d130d550f2a4d9ded327 | Add an s3_utils acceptance test. | src/encoded/tests/acceptance/test_dcicutils.py | src/encoded/tests/acceptance/test_dcicutils.py | import json
import os
import pytest
import requests
from dcicutils.env_utils import get_bucket_env, is_stg_or_prd_env, get_standard_mirror_env
from dcicutils.misc_utils import find_association
from dcicutils.s3_utils import s3Utils
pytestmark = [pytest.mark.working, pytest.mark.unit]
S3_UTILS_BUCKET_VAR_DATA = [
{
'attribute': 'sys_bucket',
'health_key': 'system_bucket',
'description': "The 'xxx-system' bucket",
'template': 'SYS_BUCKET_TEMPLATE',
'recent': False,
},
{
'attribute': 'outfile_bucket',
'health_key': 'processed_file_bucket',
'description': "The 'xxx-wfoutput' bucket",
'template': 'OUTFILE_BUCKET_TEMPLATE',
'recent': False,
},
{
'attribute': 'raw_file_bucket',
'health_key': 'file_upload_bucket',
'description': "The 'xxx-files' bucket",
'template': 'RAW_BUCKET_TEMPLATE',
'recent': False,
},
{
'attribute': 'blob_bucket',
'health_key': 'blob_bucket',
'description': "The 'xxx-blobs' bucket",
'template': 'BLOB_BUCKET_TEMPLATE',
'recent': False,
},
{
'attribute': 'metadata_bucket',
'health_key': 'metadata_bucket',
'description': "The 'xxx-metadata-bundles' bucket",
'template': 'METADATA_BUCKET_TEMPLATE',
'recent': True,
},
{
'attribute': 'tibanna_output_bucket',
'health_key': 'tibanna_output_bucket',
'description': "The 'tibanna-output' bucket",
'template': 'TIBANNA_OUTPUT_BUCKET_TEMPLATE',
'recent': True,
},
]
@pytest.mark.parametrize('env', [None, 'fourfront-mastertest', 'fourfront-green', 'fourfront-blue', 'data', 'staging'])
def test_s3_utils_bare(env):
# Calling without an env argument or explicit bucket names is only expected to work
# in orchestrated environments with a single environment.
s = s3Utils(env=env)
# No matter where invoked, we should at least get an AWS s3 object
assert s.s3
# This is probably the same everywhere. It doesn't need to vary.
assert s.ACCESS_KEYS_S3_KEY == 'access_key_admin'
def apply_template(template, arg):
if '%' in template:
return template % arg
else:
return template
for datum in S3_UTILS_BUCKET_VAR_DATA:
attr_name = datum['attribute']
template_name = datum['template']
# This is behavior we don't want, but it's the normal behavior, so test stability.
# e.g., for env=None, assert s.sys_bucket == 'elasticbeanstalk-None-system'
# but for env='fourfront-mastertest', assert s.sys_bucket == 'elasticbeanstalk-fourfront-mastertest-system'
if hasattr(s, attr_name) and hasattr(s, template_name):
assert getattr(s, attr_name) == apply_template(getattr(s, template_name), get_bucket_env(env))
else:
assert datum['recent'], f"Problem with: {datum}"
if s.url is not '':
assert is_stg_or_prd_env(env)
def health_page(url):
return requests.get(s.url+"/health?format=json").json()
health = health_page(s.url)
for k, v in health.items():
if k.endswith("bucket"):
print(f"Considering health page key {k}...")
entry = find_association(S3_UTILS_BUCKET_VAR_DATA, health_key=k)
assert entry, f"No entry for health key {k}."
if v:
assert getattr(s, entry['attribute']) == v
print("Attribute matches.")
else:
print("No health page value.")
beanstalk_env = health['beanstalk_env']
prd_url = "https://data.4dnucleome.org"
stg_url = "http://staging.4dnucleome.org"
def test_stg_or_prd(me, my_twin):
assert s.url == me
mirror_s = s3Utils(env=get_standard_mirror_env(beanstalk_env))
assert mirror_s.url == my_twin
mirror_health = health_page(mirror_s.url)
assert mirror_health['beanstalk_env'] == beanstalk_env
def test_data():
test_stg_or_prd(prd_url, stg_url)
def test_staging():
test_stg_or_prd(stg_url, prd_url)
if env == 'data':
test_data()
elif env == 'staging':
test_staging()
else:
assert beanstalk_env == env
if s.url == prd_url:
test_data()
else:
test_staging()
| Python | 0 | |
89b10996cfe6e60870e55b7c759aa73448bfa4d8 | remove off curve pen | pens/removeOffcurvesPen.py | pens/removeOffcurvesPen.py | ## use BasePen as base class
from fontTools.pens.basePen import BasePen
class RemoveOffcurvesPen(BasePen):
"""
A simple pen drawing a contour without any offcurves.
"""
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self._contours = []
self._components = []
def _moveTo(self, pt):
self._contours.append([])
self._contours[-1].append(("moveTo", pt))
def _lineTo(self, pt):
self._contours[-1].append(("lineTo", pt))
def _curveToOne(self, pt1, pt2, pt3):
self._contours[-1].append(("lineTo", pt3))
def qCurveTo(self, *points):
pt = points[-1]
self._contours[-1].append(("lineTo", pt))
def _closePath(self):
self._contours[-1].append(("closePath", None))
def _endpath(self):
self._contours[-1].append(("endPath", None))
def addComponent(self, baseName, transformation):
self._components.append((baseName, transformation))
def draw(self, outPen):
"""
Draw the stored instructions in an other pen.
"""
for contour in self._contours:
for penAttr, pt in contour:
func = getattr(outPen, penAttr)
if pt is None:
func()
else:
func(pt)
for baseGlyph, transformation in self._components:
outPen.addComponent(baseGlyph, transformation)
## get the current glyph
g = CurrentGlyph()
## prepare the glyph for undo
g.prepareUndo("Remove All Offcurves")
## create a pen
pen = RemoveOffcurvesPen(g.getParent())
## draw the glyph in the pen
g.draw(pen)
## clear the glyph
g.clear()
## draw the stored contour from the pen into the emtpy glyph
pen.draw(g.getPen())
## tell the glyph undo watching is over
g.performUndo() | Python | 0.000001 | |
10952213496e8a1cbf80ba1eee7a0e968bdea14a | add missing test | corehq/ex-submodules/couchforms/tests/test_errors.py | corehq/ex-submodules/couchforms/tests/test_errors.py | from django.test import TestCase
from casexml.apps.case.exceptions import IllegalCaseId
from corehq.apps.receiverwrapper import submit_form_locally
from couchforms.models import XFormError
class CaseProcessingErrorsTest(TestCase):
def test_no_case_id(self):
"""
submit form with a case block that has no case_id
check that
- it errors
- the form is not saved under its original id
- an XFormError is saved with the original id as orig_id
- the error was logged (<-- is this hard to test?)
<data xmlns="example.com/foo">
<case case_id="">
<update><foo>bar</foo></update>
</case>
</data>
"""
with self.assertRaises(IllegalCaseId):
submit_form_locally(
"""<data xmlns="example.com/foo">
<meta>
<instanceID>abc-easy-as-123</instanceID>
</meta>
<case case_id="" xmlns="http://commcarehq.org/case/transaction/v2">
<update><foo>bar</foo></update>
</case>
</data>""",
'my_very_special_domain',
)
xform_errors = XFormError.view(
'domain/docs',
startkey=['my_very_special_domain', 'XFormError'],
endkey=['my_very_special_domain', 'XFormError', {}],
)
related_errors = [xform_error for xform_error in xform_errors
if xform_error.orig_id == 'abc-easy-as-123']
self.assertEqual(len(related_errors), 1)
related_error = related_errors[0]
self.assertEqual(related_error.problem,
'IllegalCaseId: case_id must not be empty')
| Python | 0.000288 | |
d4057367d8630df541b8869e33766ed298b9fa43 | Add run state testbench | tests/test_execution.py | tests/test_execution.py | # Copyright 2020 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
from unittest import TestCase
from mock.mock import Mock
from nose.tools import assert_equal
from datetime import datetime
from wa.framework.configuration import RunConfiguration
from wa.framework.configuration.core import JobSpec, Status
from wa.framework.execution import ExecutionContext, Runner
from wa.framework.job import Job
from wa.framework.output import RunOutput, init_run_output
from wa.framework.output_processor import ProcessorManager
import wa.framework.signal as signal
from wa.framework.run import JobState
class MockConfigManager(Mock):
@property
def jobs(self):
return self._joblist
@property
def loaded_config_sources(self):
return []
@property
def run_config(self):
return RunConfiguration()
@property
def plugin_cache(self):
return MockPluginCache()
def __init__(self, *args, **kwargs):
super(MockConfigManager, self).__init__(*args, **kwargs)
self._joblist = None
self._run_config = RunConfiguration()
def to_pod(self):
return {}
class MockPluginCache(Mock):
def list_plugins(self, kind=None):
return []
class MockProcessorManager(Mock):
def __init__(self, *args, **kwargs):
super(MockProcessorManager, self).__init__(*args, **kwargs)
def get_enabled(self):
return []
class JobState_force_retry(JobState):
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if(self.retries != self.times_to_retry) and (value == Status.RUNNING):
self._status = Status.FAILED
if self.output:
self.output.status = Status.FAILED
else:
self._status = value
if self.output:
self.output.status = value
def __init__(self, to_retry, *args, **kwargs):
self.retries = 0
self._status = Status.NEW
self.times_to_retry = to_retry
self.output = None
super(JobState_force_retry, self).__init__(*args, **kwargs)
class Job_force_retry(Job):
'''This class imitates a job that retries as many times as specified by
``retries`` in its constructor'''
def __init__(self, to_retry, *args, **kwargs):
super(Job_force_retry, self).__init__(*args, **kwargs)
self.state = JobState_force_retry(to_retry, self.id, self.label, self.iteration, Status.NEW)
class TestRunState(TestCase):
def setUp(self):
self.path = tempfile.mkstemp()[1]
os.remove(self.path)
self.initialise_signals()
config = MockConfigManager()
output = init_run_output(self.path, config)
self.context = ExecutionContext(config, Mock(), output)
self.job_spec = JobSpec()
self.job_spec.augmentations = {}
self.job_spec.finalize()
def tearDown(self):
signal.disconnect(self._verify_serialized_state, signal.RUN_INITIALIZED)
signal.disconnect(self._verify_serialized_state, signal.JOB_STARTED)
signal.disconnect(self._verify_serialized_state, signal.JOB_RESTARTED)
signal.disconnect(self._verify_serialized_state, signal.JOB_COMPLETED)
signal.disconnect(self._verify_serialized_state, signal.JOB_FAILED)
signal.disconnect(self._verify_serialized_state, signal.JOB_ABORTED)
signal.disconnect(self._verify_serialized_state, signal.RUN_FINALIZED)
def test_job_state_transitions_pass(self):
'''Tests state equality when the job passes first try'''
job = Job(self.job_spec, 1, self.context)
job.workload = Mock()
self.context.cm._joblist = [job]
self.context.run_state.add_job(job)
runner = Runner(self.context, MockProcessorManager())
runner.run()
def test_job_state_transitions_fail(self):
'''Tests state equality when job fails completely'''
job = Job_force_retry(3, self.job_spec, 1, self.context)
job.workload = Mock()
self.context.cm._joblist = [job]
self.context.run_state.add_job(job)
runner = Runner(self.context, MockProcessorManager())
runner.run()
def test_job_state_transitions_retry(self):
'''Tests state equality when job fails initially'''
job = Job_force_retry(1, self.job_spec, 1, self.context)
job.workload = Mock()
self.context.cm._joblist = [job]
self.context.run_state.add_job(job)
runner = Runner(self.context, MockProcessorManager())
runner.run()
def initialise_signals(self):
signal.connect(self._verify_serialized_state, signal.RUN_INITIALIZED)
signal.connect(self._verify_serialized_state, signal.JOB_STARTED)
signal.connect(self._verify_serialized_state, signal.JOB_RESTARTED)
signal.connect(self._verify_serialized_state, signal.JOB_COMPLETED)
signal.connect(self._verify_serialized_state, signal.JOB_FAILED)
signal.connect(self._verify_serialized_state, signal.JOB_ABORTED)
signal.connect(self._verify_serialized_state, signal.RUN_FINALIZED)
def _verify_serialized_state(self, _):
fs_state = RunOutput(self.path).state
ex_state = self.context.run_output.state
assert_equal(fs_state.status, ex_state.status)
fs_js_zip = zip(
[value for key, value in fs_state.jobs.items()],
[value for key, value in ex_state.jobs.items()]
)
for fs_jobstate, ex_jobstate in fs_js_zip:
assert_equal(fs_jobstate.iteration, ex_jobstate.iteration)
assert_equal(fs_jobstate.retries, ex_jobstate.retries)
assert_equal(fs_jobstate.status, ex_jobstate.status)
class TestJobState(TestCase):
def setUp(self):
path = tempfile.mkstemp()[1]
os.remove(path)
self.initialise_signals()
config = MockConfigManager()
output = init_run_output(path, config)
self.context = ExecutionContext(config, Mock(), output)
def test_job_retry_status(self):
job_spec = JobSpec()
job_spec.augmentations = {}
job_spec.finalize()
self.job = Job_force_retry(2, job_spec, 1, self.context)
self.job.workload = Mock()
self.context.cm._joblist = [self.job]
self.context.run_state.add_job(self.job)
runner = Runner(self.context, MockProcessorManager())
runner.run()
def initialise_signals(self):
signal.connect(self._verify_restarted_job_status, signal.JOB_RESTARTED)
def tearDown(self):
signal.disconnect(self._verify_restarted_job_status, signal.JOB_RESTARTED)
def _verify_restarted_job_status(self, _):
assert_equal(self.job.status, Status.PENDING)
| Python | 0.004295 | |
f120e2524f09ed462bca52dbc83863ba74291dd5 | Fix backend import. | tests/test_extension.py | tests/test_extension.py | import unittest
from mopidy_gmusic import GMusicExtension, actor as backend_lib
class ExtensionTest(unittest.TestCase):
def test_get_default_config(self):
ext = GMusicExtension()
config = ext.get_default_config()
self.assertIn('[gmusic]', config)
self.assertIn('enabled = true', config)
def test_get_config_schema(self):
ext = GMusicExtension()
schema = ext.get_config_schema()
self.assertIn('username', schema)
self.assertIn('password', schema)
self.assertIn('deviceid', schema)
def test_get_backend_classes(self):
ext = GMusicExtension()
backends = ext.get_backend_classes()
self.assertIn(backend_lib.GMusicBackend, backends)
| import unittest
from mopidy_gmusic import GMusicExtension, backend as backend_lib
class ExtensionTest(unittest.TestCase):
def test_get_default_config(self):
ext = GMusicExtension()
config = ext.get_default_config()
self.assertIn('[gmusic]', config)
self.assertIn('enabled = true', config)
def test_get_config_schema(self):
ext = GMusicExtension()
schema = ext.get_config_schema()
self.assertIn('username', schema)
self.assertIn('password', schema)
self.assertIn('deviceid', schema)
def test_get_backend_classes(self):
ext = GMusicExtension()
backends = ext.get_backend_classes()
self.assertIn(backend_lib.GMusicBackend, backends)
| Python | 0 |
b705c79b911ae201e9a79786e61ec36bd4a9be0f | add tests for revisions api | tests/test_revisions.py | tests/test_revisions.py | import datetime
import json
from werkzeug.exceptions import BadRequest
from server.models import db, Backup, Group, Message, Version, Score
from tests import OkTestCase
class TestRevision(OkTestCase):
"""Tests revision API submission and scoring."""
def setUp(self):
""" Add submissions for 3 users. """
super(TestRevision, self).setUp()
self.setup_course()
message_dict = {'file_contents': {'backup.py': '1'}, 'analytics': {}}
self.active_user_ids = [self.user1.id, self.user2.id, self.user3.id]
self.assignment.revisions_allowed = True
time = self.assignment.due_date # Set to dt.now(), so future subms are late
for user_id in self.active_user_ids:
time -= datetime.timedelta(minutes=15)
backup = Backup(submitter_id=user_id,
assignment=self.assignment, submit=True)
# Revisions are submitted on time.
backup.created = time
messages = [Message(kind=k, backup=backup,
contents=m) for k, m in message_dict.items()]
db.session.add_all(messages)
db.session.add(backup)
# Put user 3 in a group with user 4
Group.invite(self.user3, self.user4, self.assignment)
group = Group.lookup(self.user3, self.assignment)
group.accept(self.user4)
okversion = Version(name="ok-client", current_version="v1.5.0",
download_link="http://localhost/ok")
db.session.add(okversion)
db.session.commit()
def _submit_revision(self):
data = {
'assignment': self.assignment.name,
'messages': {
'file_contents': {
'hog.py': 'print("Hello world!")'
}
},
'submit': False,
'revision': True,
}
response = self.client.post('/api/v3/revision/?client_version=v1.5.0',
data=json.dumps(data),
headers=[('Content-Type', 'application/json')])
return response
def test_no_revisions(self):
""" Ensure no user has revisions before submitting ."""
for user in self.active_user_ids:
revision = self.assignment.revision({self.user1.id})
self.assertIs(revision, None)
def test_revison_anon(self):
response = self._submit_revision()
self.assert401(response)
def test_revison_submit(self):
self.login(self.user1.email)
response = self._submit_revision()
self.assert200(response)
revision = self.assignment.revision({self.user1.id})
self.assertTrue(revision.is_revision)
def test_revison_disabled(self):
# Disable revisions
self.assignment.revisions_allowed = False
db.session.commit()
self.login(self.user1.email)
response = self._submit_revision()
self.assert403(response)
# Ensure that the backup is still accepted
backups = Backup.query.filter_by(submitter=self.user1).count()
self.assertEquals(backups, 2)
def test_revison_no_submission(self):
""" Revisions are not accepted if there is no final submission. """
self.login(self.user5.email)
response = self._submit_revision()
self.assert403(response)
# Ensure that the backup is still accepted
backups = Backup.query.filter_by(submitter=self.user5).count()
self.assertEquals(backups, 1)
def test_revison_test_group_member(self):
self.login(self.user4.email)
response = self._submit_revision()
self.assert200(response)
group = self.assignment.active_user_ids(self.user4.id)
revision = self.assignment.revision(group)
self.assertEquals(len(revision.owners()), 2)
def test_revison_multiple_submit(self):
group = self.assignment.active_user_ids(self.user3.id)
self.login(self.user3.email)
response = self._submit_revision()
self.assert200(response)
first_revision = self.assignment.revision(group)
self.assertTrue(first_revision.is_revision)
self.login(self.user4.email)
response = self._submit_revision()
self.assert200(response)
second_revision = self.assignment.revision(group)
self.assertTrue(second_revision.is_revision)
self.assertNotEquals(first_revision.id, second_revision.id)
# Check the number of revisions scores is 1
scores = Score.query.filter_by(kind="revision", archived=False).count()
self.assertEquals(scores, 1)
| Python | 0 | |
7e15f973f0ee898a0c06e50151ada675be46263d | add basic data, query method and method scaffolds | notifo/notifo.py | notifo/notifo.py | # encoding: utf-8
""" notifo.py - python wrapper for notifo.com """
import json
import urllib
import urllib2
class Notifo:
""" Class for wrapping notifo.com """
def __init__(self, user, api_secret):
self.user = user
self.api_secret = api_secret
self.root_url = "https://api.notifo.com/v1/"
# status codes (Request successful)
self.status_codes = {
2201 : "OK.",
2202 : "User is already subscribed."
}
# error codes (Something went wrong)
self.error_codes = {
1100 : "An error occurred.",
1101 : "Invalid credentials.",
1102 : "Not allowed to sent to user.",
1105 : "No such user.",
1106 : "Not allowed to subscribe user.",
1107 : "Missing required parameters.",
}
def subsribe_user(self, user):
""" method to subscribe a user to a service
"""
pass
def send_notification(self):
""" method to send a message to a user
"""
pass
def _query(self, url, data = None):
""" query method to do HTTP POST/GET
Parameters:
url -> the url to POST/GET
data -> header_data as a dict (only for POST)
Returns:
Parsed JSON data as dict
or
None on error
"""
if data is not None: # we have POST data if there is data
values = urllib.urlencode(data)
request = urllib2.Request(url, values)
else: # do a GET otherwise
request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
except IOError: # no connection
return None
json_data = response.read()
data = json.loads(json_data)
return data
| Python | 0.000004 | |
d40fb122d7083b9735728df15120ed682431be79 | Create script for generating analysis seeds. | scripts/make_fhes_seeds.py | scripts/make_fhes_seeds.py | import yaml
import sys
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
from fermipy.catalog import *
from fermipy.utils import *
def get_coord(name,tab):
row = tab[tab['Source_Name'] == name]
return SkyCoord(float(row['RAJ2000']), float(row['DEJ2000']),unit='deg')
def avg_coords(coords):
xyz = np.zeros(3)
for t in coords:
xyz += t.cartesian.xyz
xyz /= np.sum(xyz**2)**0.5
c = SkyCoord(xyz[0], xyz[1], xyz[2],representation='cartesian')
c.representation='spherical'
return c
tab = Table.read(sys.argv[1])
src_names = []
m = np.abs(tab['glat']) < 0.
#m |= (tab['fit_ext_gauss_ts_ext'] > 9.0)
#m |= (tab['fit_ext_disk_ts_ext'] > 9.0)
m |= (tab['fit_halo_ts'] > 16.0)
#m |= (tab['ts'] > 20000.0)
for row in tab[m]:
src_names += [row['codename']]
src_names = sorted(list(set(src_names)))
o = {}
for name in src_names:
#coords = [get_coord(t,cat.table) for t in names]
#c0 = avg_coords(coords)
print(name)
#print(create_source_name(c0))
names = [name]
row = tab[tab['codename'] == names[0].lower().replace(' ','_')]
c0 = SkyCoord(row['ra'],row['dec'],unit='deg')
name = create_source_name(c0).replace('PS','FHES') + 'e'
#print(c0.ra.deg,c0.dec.deg)
#print(names[0])
#print(row['codename'])
src = {'name' : name,
'ra' : float(c0.ra.deg), 'dec' : float(c0.dec.deg),
'SpectrumType' : 'PowerLaw', 'SpatialModel' : 'RadialGaussian',
'SpatialWidth' : float(row['fit_halo_r68']),
'Index' : float(row['fit_halo_index'])}
o[name.lower().replace(' ','_')] = {'selection' : {'target' : name},
'model' : {'sources' : [src]} }
yaml.dump(o,open('out.yaml','w'))
| Python | 0 | |
9696acf13a6b25b1935b7fcaae5763db8e16e83a | Create MyoRemote.py | home/Alessandruino/MyoRemote.py | home/Alessandruino/MyoRemote.py | from com.thalmic.myo.enums import PoseType
remote = Runtime.start("remote","RemoteAdapter")
remote.setDefaultPrefix("raspi")
remote.connect("tcp://192.168.0.5:6767")
roll = 0.0
sleep(2)
python.send("raspiarduino", "connect","/dev/ttyUSB0")
sleep(1)
python.send("raspiarduino", "digitalWrite",2,1)
python.send("raspiarduino", "digitalWrite",3,1)
python.send("raspiarduino", "servoAttach","raspiservo",6)
python.send("raspiservo", "map",5.0,12.0,50.0,110.0)
myo = Runtime.start("myo","MyoThalmic")
myo.connect()
myo.addMyoDataListener(python)
def onMyoData(data):
if (data.getPose() == PoseType.FIST):
global roll
roll = data.getRoll()
python.send("raspiarduino", "analogWrite",5,50)
python.send("raspiservo", "moveTo",roll)
elif (data.getPose() == PoseType.WAVE_OUT):
python.send("raspiarduino", "analogWrite",11,50)
elif (data.getPose() == PoseType.REST):
python.send("raspiarduino", "analogWrite",5,0)
python.send("raspiarduino", "analogWrite",11,0)
| Python | 0 | |
6bd3869a2c2a6041e47da01ddaaa15b309bf90d7 | Add example checkerscript | checker/examples/dummyrunner.py | checker/examples/dummyrunner.py | #!/usr/bin/python3
import sys
import time
import os
import codecs
def generate_flag(tick, payload=None):
if payload is None:
sys.stdout.write("FLAG %d\n" % (tick,))
else:
sys.stdout.write("FLAG %d %s\n" % (tick, codecs.encode(os.urandom(8), 'hex').decode('latin-1')))
sys.stdout.flush()
return sys.stdin.readline().strip()
def place_flag(flag, ip):
return 0
def check_for_flag(flag, ip):
return 0
def main(tick, ip):
result = place_flag(generate_flag(tick), ip)
if 0 != result:
sys.exit(result)
oldesttick = max(tick-7, -1)
for ctick in range(tick-1, oldesttick, -1):
result = check_for_flag(generate_flag(ctick), ip)
if 0 != result:
sys.exit(result)
sys.exit(0)
if __name__ == '__main__':
_, tick, ip = sys.argv
main(tick=int(tick), ip=ip)
| Python | 0 | |
4a782b2930210053bcc2fc705d55e56af5900771 | Create dispatch_curve_cooling_plant.py | cea/plots/supply_system/dispatch_curve_cooling_plant.py | cea/plots/supply_system/dispatch_curve_cooling_plant.py | """
Show a Pareto curve plot for individuals in a given generation.
"""
from __future__ import division
from __future__ import print_function
import plotly.graph_objs as go
import cea.plots.supply_system
from cea.plots.variable_naming import NAMING, COLOR
__author__ = "Jimeno Fonseca"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
class DispatchCurveDistrictCoolingPlot(cea.plots.supply_system.SupplySystemPlotBase):
"""Show a pareto curve for a single generation"""
name = "Dispatch curve cooling plant"
expected_parameters = {
'generation': 'plots-supply-system:generation',
'individual': 'plots-supply-system:individual',
'timeframe': 'plots-supply-system:timeframe',
'scenario-name': 'general:scenario-name',
}
def __init__(self, project, parameters, cache):
super(DispatchCurveDistrictCoolingPlot, self).__init__(project, parameters, cache)
self.analysis_fields = [
"Q_DailyStorage_gen_directload_W",
"Q_Trigen_NG_gen_directload_W",
"Q_BaseVCC_WS_gen_directload_W",
"Q_PeakVCC_WS_gen_directload_W",
"Q_BaseVCC_AS_gen_directload_W",
"Q_PeakVCC_AS_gen_directload_W",
"Q_BackupVCC_AS_directload_W",
]
self.analysis_field_demand = ['Q_districtcooling_sys_req_W']
self.timeframe = self.parameters['timeframe']
self.input_files = [(self.locator.get_optimization_slave_cooling_activation_pattern,
[self.individual, self.generation])]
@property
def title(self):
return "Dispatch curve for cooling plant in system #%s (%s)" % (self.individual, self.timeframe)
@property
def output_path(self):
return self.locator.get_timeseries_plots_file(
'gen{generation}_ind{individual}dispatch_curve_cooling_plant'.format(individual=self.individual,
generation=self.generation),
self.category_name)
@property
def layout(self):
return dict(barmode='relative', yaxis=dict(title='Energy Generation [MWh]'))
def calc_graph(self):
# main data about technologies
data = self.process_individual_dispatch_curve_cooling()
graph = []
analysis_fields = self.remove_unused_fields(data, self.analysis_fields)
for field in analysis_fields:
y = (data[field].values) / 1E6 # into MW
trace = go.Bar(x=data.index, y=y, name=NAMING[field],
marker=dict(color=COLOR[field]))
graph.append(trace)
# data about demand
for field in self.analysis_field_demand:
y = (data[field].values) / 1E6 # into MW
trace = go.Scattergl(x=data.index, y=y, name=NAMING[field],
line=dict(width=1, color=COLOR[field]))
graph.append(trace)
return graph
def main():
"""Test this plot"""
import cea.config
import cea.plots.cache
config = cea.config.Configuration()
cache = cea.plots.cache.NullPlotCache()
DispatchCurveDistrictCoolingPlot(config.project,
{'scenario-name': config.scenario_name,
'generation': config.plots_supply_system.generation,
'individual': config.plots_supply_system.individual,
'timeframe': config.plots_supply_system.timeframe},
cache).plot(auto_open=True)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
3026c007a4f9cbb6befa1599c8a8390a96d8396b | test import checks | pychecker2/utest/import.py | pychecker2/utest/import.py | from pychecker2.TestSupport import WarningTester
from pychecker2 import ImportChecks
class ImportTestCase(WarningTester):
def testImportChecks(self):
self.silent('import sys; print sys.argv')
self.silent('import pychecker2; print pychecker2')
self.silent('import pychecker2.utest; print pychecker2.utest')
def testImportChecks(self):
self.warning('import sys\n'
'print sys.argv\n'
'import sys\n',
3, ImportChecks.ImportCheck.duplicateImport,
'sys', ' in current scope')
self.warning('from sys import *\n'
'def f():\n'
' def g():\n'
' from sys import argv\n'
' return argv\n'
' return g() + g()\n'
'print argv\n',
4, ImportChecks.ImportCheck.duplicateImport, 'argv',
' of import in parent scope <ModuleScope: global>')
self.warning('import no_such_module\n',
1, ImportChecks.ImportCheck.importError, 'no_such_module',
'No module named no_such_module')
self.warning('from pychecker2.utest.data import *\n'
'import exceptions\n'
'print exceptions\n',
2, ImportChecks.ImportCheck.shadowImport,
'exceptions', 'pychecker2.utest.data', 1)
| Python | 0 | |
5219e970f1b09d8f2d41bf61a3b9f9803a8aed1d | Add database.py with working db find function | python-backend/database.py | python-backend/database.py | from pymongo import MongoClient
client = MongoClient()
client = MongoClient('localhost', 27017)
# `community` database
db = client.community;
# Database find wrapper
def db_find( db_collection, db_query, find_one = False ):
# Get collection
collection = db[db_collection]
if (find_one):
result = collection.find(db_query)
else:
result = collection.find_one(db_query)
return result;
# Database insert wrapper
# Database update wrapper
# Database remove wrapper
print db_find('test', {'name': 'test'})
| Python | 0.000001 | |
ab5dede4b1fdd5e6256d8802034846eda7a66722 | add orm.py | www/transwarp/orm.py | www/transwarp/orm.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'zhu327'
'''
ORM ϵӳ
from transwarp.orm import Model, StringField, IntegerField
class User(Mode):
__table__ = 'user'
id = IntegerField(primary_key=True)
name = StringField()
# ֱͨѯ
user = User.get('123')
# ʵ:
user = User(id=123, name='Michael')
# ݿ:
user.insert()
'''
import db, logging
class Field(object):
def __init__(self, **kw):
self.name = kw.get('name', None)
self._default = kw.get('default', None)
self.primary_key = kw.get('primary_key', False)
self.not_null = kw.get('not_null', False)
self.updateable = kw.get('updateable', True)
self.insertable = kw.get('insertable', True)
self.datatype = kw.get('datatype', None)
@property
def default(self):
d = self._default
return d() if callable(d) else d
class IntegerField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = 0
if not 'datatype' in kw:
kw['datatype'] = 'bigint'
super(IntegerField, self).__init__(**kw)
class StringField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'datatype' in kw:
kw['datatype'] = 'varchar(255)'
super(StringField, self).__init__(**kw)
# ࣬ORMӳ
class ModelMetaClass(type):
def __new__(cls, clsname, bases, attrs):
if clsname == 'Model':
return super (ModelMetaClass, cls).__new__(cls, clsname, bases, attrs)
mapping = {}
primary_key = None
for k, v in attrs.iteritems():
if isinstance(v, Field):
logging.info('mapping here %s, %s' % (k, v))
if v.primary_key:
if not primary_key:
primary_key = k
else:
raise TypeError(r'primary key is already set %s, %s shuold be nomarl' % (primary_key, k))
if not v.not_null:
logging.warning(r"primary key can't null")
v.not_null = True
if v.insertable:
logging.warning(r"primary key can't update")
v.insertable = False
if not v.name:
v.name = k
mapping[k] = v
if not primary_key:
raise TypeError(r'there is no primary key')
if not '__table__' in attrs:
atter['__table__'] = clsname.lower()
for k in mapping.iterkeys():
attrs.pop(k)
attrs['__mapping__'] = mapping
attrs['__primary_key__'] = primary_key
return super(ModelMetaClass, cls).__new__(cls, clsname, bases, attrs)
class Model(dict):
'''
ORM CLASS
>>> d = Model(id=123, name='Timmy')
>>> d.id
123
>>> d.name
'Timmy'
>>> d.id=100
>>> d.id
100
>>> d.h
Traceback (most recent call last):
...
AttributeError: dict obeject has no attr named h
'''
__metaclass__ = ModelMetaClass
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r'dict obeject has no attr named %s' % key)
def __setattr__(self, key, value):
self[key] = value
# ͨȡһݣض
@classmethod
def get(cls, pk):
l = db.select('select * form %s where %s=?' % (cls.__table__, cls.__primary_key__,), pk)
return cls(**l[0]) if l else None
# ͨ
def insert(self):
'''
insert object to db
>>> class User(Model):
... __table__ = 'user'
... id = IntegerField(primary_key=True)
... name = StringField()
>>> user1 = User(id=1, name='a')
>>> user2 = User(id=2, name='b')
>>> user1.insert()
>>> user2.insert()
>>> d = User.get(1)
>>> d.id
1
>>> d.name
u'a'
'''
params = {}
for k, v in self.__mapping__.iteritems():
if v.insertable:
if v.name in self:
params[v.name] = self[k]
else:
params[v.name] = v.default
db.insert(self.__table__, **params)
return self
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python | 0.000001 | |
271dca123ff9bb3004cbd2cfa366f606dd250f94 | Add test for configmap | tests/k8s/test_configmap.py | tests/k8s/test_configmap.py | #!/usr/bin/env python
# -*- coding: utf-8
import mock
import pytest
from k8s.client import NotFound
from k8s.models.common import ObjectMeta
from k8s.models.configmap import ConfigMap
NAME = "my-name"
NAMESPACE = "my-namespace"
@pytest.mark.usefixtures("k8s_config")
class TestIngress(object):
def test_created_if_not_exists(self, post, api_get):
api_get.side_effect = NotFound()
configmap = _create_default_configmap()
call_params = configmap.as_dict()
assert configmap._new
configmap.save()
assert not configmap._new
pytest.helpers.assert_any_call(post, _uri(NAMESPACE), call_params)
def test_updated_if_exists(self, get, put):
mock_response = _create_mock_response()
get.return_value = mock_response
configmap = _create_default_configmap()
from_api = ConfigMap.get_or_create(metadata=configmap.metadata, data=configmap.data)
assert not from_api._new
assert from_api.data == {"foo": "bar"}
from_api.data = {"baz": "quux"}
call_params = from_api.as_dict()
from_api.save()
pytest.helpers.assert_any_call(put, _uri(NAMESPACE, NAME), call_params)
def test_deleted(self, delete):
ConfigMap.delete(NAME, namespace=NAMESPACE)
pytest.helpers.assert_any_call(delete, _uri(NAMESPACE, NAME))
def _create_mock_response():
mock_response = mock.Mock()
mock_response.json.return_value = {
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"creationTimestamp": "2017-09-08T13:37:00Z",
"generation": 1,
"labels": {
"test": "true"
},
"name": NAME,
"namespace": NAMESPACE,
"resourceVersion": "42",
"selfLink": _uri(NAMESPACE, NAME),
"uid": "d8f1ba26-b182-11e6-a364-fa163ea2a9c4"
},
"data": {
"foo": "bar",
},
}
return mock_response
def _create_default_configmap():
object_meta = ObjectMeta(name=NAME, namespace=NAMESPACE, labels={"test": "true"})
data = {"foo": "bar"}
configmap = ConfigMap(metadata=object_meta, data=data)
return configmap
def _uri(namespace, name=""):
return "/api/v1/namespaces/{namespace}/configmaps/{name}".format(name=name, namespace=namespace)
| Python | 0.000001 | |
db912d4097da45c2b14cce4f8f852cbc1e720750 | add test framework | tests/test_clientManager.py | tests/test_clientManager.py | from unittest import TestCase
class TestClientManager(TestCase):
def test_add_http_client(self):
self.fail()
def test_add_local_client(self):
self.fail()
def test_restrictClient(self):
self.fail()
def test_load_clients_from_config(self):
self.fail()
def test_federated_featurephenotypeassociaton_query(self):
self.fail()
| Python | 0.000001 | |
401a1aabde600336bd129cce8fb3884ed8945272 | Create HCS_interpreter.py | HCS_interpreter.py | HCS_interpreter.py | #!/usr/bin/env python3
from HCS import HCS
def interpret_loop():
hcs = HCS()
while True:
print(">> ", end="")
try:
command = input()
except EOFError as e:
print()
return
if command in ['quit', 'exit']:
return
try:
print(hcs.eval(command))
except Exception as e:
print("Error: \n" + repr(e))
if __name__ == '__main__':
interpret_loop()
| Python | 0.000001 | |
d58d2c6bb2805c8ebc95fe3445dc973560de9c79 | Create generate.py | Names/generate.py | Names/generate.py | import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from random import randint
inp = 'name.txt'
with open(inp) as f:
content = f.readlines()
content = [x.lower() for x in content]
vocab = ''
for i in content:
vocab+=str(i).lower()
vocab = sorted(list(set(vocab)))
vocab = vocab[vocab.index('a'):]
vocab.append(('\n'))
vocab_indices = dict((c, i) for i, c in enumerate(vocab))
indices_vocab = dict((i, c) for i, c in enumerate(vocab))
def sample(preds, temperature=1):
preds = np.asarray(preds[0]).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
"""def exists(new):
for j in content:
if i==j[:-1]:
return True
return False"""
seq = randint(1,6)
dataX = []
dataY = []
for x,word in enumerate(content):
for i in range(0, len(word) - seq, 1):
seq_in = word[i:i + seq]
seq_out = word[i + seq]
dataX.append([vocab_indices[char] for char in seq_in])
dataY.append(vocab_indices[seq_out])
n_patterns = len(dataX)
X = np.reshape(dataX, (n_patterns, seq, 1))
X = X / float(len(vocab))
y = np_utils.to_categorical(dataY)
model = Sequential()
model.add(LSTM(256, return_sequences=True,input_shape=(X.shape[1], X.shape[2])))
model.add(LSTM(256))
model.add(Dense(y.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.summary()
filepath='nn.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit(X, y,batch_size=128,epochs=10,verbose=2,callbacks=callbacks_list)
filename = 'nn.hdf5'
model.load_weights(filename)
final=[]
for temp in [.5,.8,.1,1.2]:
j=1
while(j<=500):
new=[]
word = content[randint(0, len(content)-1)][:seq]
for k in word[-seq:]:
new.append(vocab_indices[k])
try:
for t in range(10):
x = np.reshape(new, (1, seq, 1))
x = x / float(len(vocab))
preds = model.predict(x, verbose=2)
word+=indices_vocab[sample(preds,temp)]
new=[]
for k in word[-seq:]:
new.append(vocab_indices[k])
if word[-1] == '\n':
word=word.strip()
word.replace('\n','')
break
if (len(word)>seq and len(word)>=2) and word not in final:
final.append(word.capitalize())
except:
0
j+=1
open("New Names.txt", 'w').close()
new_text = open("New Names.txt", "w")
for i in sorted(final):
new_text.write(i.replace('\n','')+'\n')
new_text.close()
| Python | 0.000002 | |
d4a04d4a0fffd8dbb006d86504fc3593ae800cc6 | add bitly shorten function in proper directory | will/plugins/productivity/bitly.py | will/plugins/productivity/bitly.py | # coding: utf-8
import bitly_api # pip install bitly_api
from will.plugin import WillPlugin
from will.decorators import (respond_to, periodic, hear, randomly, route,
rendered_template, require_settings)
from will import settings
# BITLY_ACCESS_TOKEN = ' <get_access_token_from_bitly.com> '
class BitlyPlugin(WillPlugin):
"""Class for creating Bitly shorten URL's."""
@respond_to("^bitly (?P<long_url>.*)$")
@require_settings("BITLY_ACCESS_TOKEN",)
def get_bitly_shorten_url(self, message, long_url, short_url=None):
"""Function to get shorten_url from bit.ly through API."""
# use oauth2 endpoints
c = bitly_api.Connection(access_token=settings.BITLY_ACCESS_TOKEN)
response = c.shorten(uri=long_url)
short_url = response['url']
self.reply("Shorten URL: %s" % short_url, message=message)
| Python | 0 | |
6922ad3922d187a3e05d339a49449292a1d7efd6 | add Prototype pattern | prototype/Prototype.py | prototype/Prototype.py | #
# Python Design Patterns: Prototype
# Author: Jakub Vojvoda [github.com/JakubVojvoda]
# 2016
#
# Source code is licensed under MIT License
# (for more details see LICENSE)
#
import sys
import copy
#
# Prototype
# declares an interface for cloning itself
#
class Prototype:
def clone(self):
pass
def getType(self):
pass
#
# Concrete Prototypes
# implement an operation for cloning itself
#
class ConcretePrototypeA(Prototype):
def clone(self):
return copy.deepcopy(self)
def getType(self):
return "type A"
class ConcretePrototypeB(Prototype):
def clone(self):
return copy.deepcopy(self)
def getType(self):
return "type B"
#
# Client
# creates a new object by asking a prototype to clone itself
#
class Client:
def __init__(self):
self._types = [ConcretePrototypeA(), ConcretePrototypeB()]
def make(self, index):
return self._types[index].clone()
if __name__ == "__main__":
client = Client()
prototype = client.make(0)
print(prototype.getType())
prototype = client.make(1)
print(prototype.getType()) | Python | 0 | |
841487ab4d0e05fa6f0780cf39973072417ec701 | Complete cherry-pick of PR#95 | service/management/commands/start_celery.py | service/management/commands/start_celery.py | import os
from django.core.management.base import BaseCommand
from subprocess import call
class Command(BaseCommand):
help = 'Custom manage.py command to start celery.'
def handle(self, *args, **options):
logfile = "celery_node.log"
if not os.path.isfile(logfile):
with open(logfile, 'w+') as f:
f.close()
call(("celery worker --app=atmosphere --loglevel=INFO -c 5 --logfile=%s" % logfile).split())
| Python | 0 | |
d1ee86414d45c571571d75434b8c2256b0120732 | Add py solution for 563. Binary Tree Tilt | py/binary-tree-tilt.py | py/binary-tree-tilt.py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.do_findTilt(root)[1]
def do_findTilt(self, cur):
if cur is None:
return (0, 0)
lsum, ltiltsum = self.do_findTilt(cur.left)
rsum, rtiltsum = self.do_findTilt(cur.right)
tilt = abs(lsum - rsum)
return lsum + rsum + cur.val, ltiltsum + rtiltsum + tilt
| Python | 0.005643 | |
34bfea59b600f9dac457e2a16a812ce2fb768d15 | Add graph.py to collect runtime data on workers and tasks (#8) | chtc/graph.py | chtc/graph.py | #!/usr/bin/env python
from __future__ import print_function
import csv
import itertools
import time
from distributed import Client
START_TIMEOUT = 900 # 15 min
MAX_COLLECT_TIME = 86400 # 1 day
def running_task_list(cli):
return list(itertools.chain.from_iterable(cli.processing().values()))
cli = Client('127.0.0.1:8786')
print("Waiting for tasks to start running")
timeout = time.time() + START_TIMEOUT
while not cli.ncores():
time.sleep(5)
if time.time() > timeout:
raise Exception("workers never started")
print("First worker connected. Starting data collection.")
start_time = time.time()
end_time = time.time() + MAX_COLLECT_TIME
with open('graph.csv', 'wb') as outfile:
writer = csv.writer(outfile)
while cli.ncores() and time.time() < end_time:
n_running_tasks = len(running_task_list(cli))
n_cores = sum(cli.ncores().values())
n_futures = len(cli.who_has().keys())
row = [time.time() - start_time, n_cores, n_running_tasks, n_futures]
print("{0:>6.0f}s {1:>5d} cores {2:>5d} tasks {3:>5d} futures".format(*row))
writer.writerow(row)
time.sleep(5)
print("Done with data collection.")
| Python | 0 | |
925ff38344b5058ce196877e1fdcf79a1d1f6719 | Add basic test for checking messages are received correctly | ue4/tests/test_messaging.py | ue4/tests/test_messaging.py | import pytest
from m2u.ue4 import connection
def test_send_message_size():
"""Send a big message, larger than buffer size, so the server has to
read multiple chunks.
"""
message = "TestMessageSize " + ("abcdefg" * 5000)
connection.connect()
result = connection.send_message(message)
assert result == str(len(message))
connection.disconnect()
| Python | 0 | |
ff700e5d6fc5e0c5062f687110563d7f0312a3f0 | Set up test suite to ensure server admin routes are added. | server/tests/test_admin.py | server/tests/test_admin.py | """General functional tests for the API endpoints."""
from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework import status
from server.models import ApiKey, User
# from api.v2.tests.tools import SalAPITestCase
class AdminTest(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {
'apikey', 'businessunit', 'condition', 'fact', 'historicalfact',
'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine',
'pendingappleupdate', 'pendingupdate', 'pluginscriptrow',
'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem',
'updatehistory', 'userprofile'}
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
response = self.client.get('/admin/server/{}'.format(path))
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY)
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.user.user_profile = 'RO'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
self.user.user_profile = 'GA'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
| Python | 0 | |
3be7efc31bcbdaac76f0c6761554ec6f84f3f840 | add a class to convert short url to venue id | VenueIdCrawler.py | VenueIdCrawler.py | #! /usr/bin/python2
# vim: set fileencoding=utf-8
from timeit import default_timer as clock
from itertools import izip_longest
import pycurl
POOL_SIZE = 30
def grouper(iterable, n, fillvalue=None):
"""
from http://docs.python.org/2/library/itertools.html#recipes
Collect data into fixed-length chunks or blocks
>>> list(grouper('ABCDEFG', 3, 'x'))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
"""
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
class VenueIdCrawler():
cpool = None
multi = None
pool_size = 0
remaining = 0
results = {}
errors = []
def __init__(self, pool_size=POOL_SIZE):
assert isinstance(pool_size, int) and pool_size > 0
self.pool_size = pool_size
self.multi = pycurl.CurlMulti()
self.cpool = [pycurl.Curl() for _ in range(self.pool_size)]
for c in self.cpool:
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.MAXREDIRS, 6)
c.setopt(pycurl.NOBODY, 1)
def venue_id_from_url(self, urls):
start = clock()
for batch in grouper(urls, self.pool_size):
self.prepare_request(batch)
self.perform_request()
report = 'query {} urls in {:.2f}s, {} and {} errors'
fails = [1 for v in self.results.values() if v is None]
print(report.format(len(urls), clock() - start, len(self.errors),
len(fails)))
self.remaining = 0
return self.results
def prepare_request(self, urls):
assert len(urls) == self.pool_size
for i, u in enumerate(urls):
if u is not None:
self.cpool[i].setopt(pycurl.URL, u)
self.cpool[i].url = u
self.multi.add_handle(self.cpool[i])
self.remaining += 1
def perform_request(self):
while self.remaining > 0:
status = self.multi.select(0.3)
if status == -1: # timeout
continue
if status > 0:
self.empty_queue()
performing = True
while performing:
status, self.remaining = self.multi.perform()
if status is not pycurl.E_CALL_MULTI_PERFORM:
performing = False
self.empty_queue()
def empty_queue(self):
_, ok, ko = self.multi.info_read()
for failed in ko:
self.errors.append(failed[1])
self.multi.remove_handle(failed[0])
for success in ok:
self.results[success.url] = VenueIdCrawler.get_venue_id(success)
self.multi.remove_handle(success)
@staticmethod
def get_venue_id(curl_object):
if curl_object.getinfo(pycurl.HTTP_CODE) == 200:
return curl_object.getinfo(pycurl.EFFECTIVE_URL).split('/')[-1]
return None
def venue_id_from_url(c, url):
"""
Return the id of the venue associated with short url
>>> venue_id_from_url(pycurl.Curl(), 'http://4sq.com/31ZCjK')
'44d17cecf964a5202b361fe3'
"""
c.reset()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.MAXREDIRS, 6)
c.setopt(pycurl.NOBODY, 1)
c.perform()
if c.getinfo(pycurl.HTTP_CODE) == 200:
return c.getinfo(pycurl.EFFECTIVE_URL).split('/')[-1]
return None
if __name__ == '__main__':
import doctest
doctest.testmod()
from persistent import load_var
urls = ['http://4sq.com/1ZNmiJ', 'http://4sq.com/2z5p82',
'http://4sq.com/31ZCjK', 'http://4sq.com/3iFEGH',
'http://4sq.com/4ADi6k', 'http://4sq.com/4FFBOp',
'http://4sq.com/4k1L7c', 'http://4sq.com/5DPD7A',
'http://4sq.com/5NbLrk', 'http://4sq.com/67XL9k',
'http://4sq.com/75SfNv', 'http://4sq.com/7DAVph',
'http://4sq.com/7JaOFa', 'http://4sq.com/7sG6jW',
'http://4sq.com/7yoatn', 'http://4sq.com/81eMd8',
'http://4sq.com/8bg7q4', 'http://4sq.com/8gFJww',
'http://4sq.com/8KuiUi', 'http://4sq.com/9cg9xg',
'http://4sq.com/9E7CnF', 'http://4sq.com/9GoW57',
'http://4sq.com/9hM2SE', 'http://4sq.com/9qN20H',
'http://4sq.com/9yHXf0', 'http://4sq.com/alRmoX',
'http://4sq.com/c7m20Y', 'http://4sq.com/cDCxsE',
'http://4sq.com/ck4qtA', 'http://4sq.com/cXUN8F',
'http://4sq.com/cYesTR', 'http://4sq.com/dlIcOc',
'http://4sq.com/dy5um7', 'http://4sq.com/dz96EL',
'http://t.co/3kGBr2l', 'http://t.co/90Fh8ks',
'http://t.co/9COjxhy', 'http://t.co/axfykVI',
'http://t.co/djzPO2S', 'http://t.co/gFaEd0N',
'http://t.co/HEAq94l', 'http://t.co/Hl8jVOi',
'http://t.co/iQ6yeYi', 'http://t.co/MkpAVDb',
'http://t.co/Mu61K9b', 'http://t.co/n4kqQR0',
'http://t.co/NN1xkiq', 'http://t.co/T88WGpO',
'http://t.co/WQn3bFf', 'http://t.co/y8qxjsT',
'http://t.co/ycbb5kt']
gold = load_var('gold_url')
start = clock()
r = VenueIdCrawler()
res = r.venue_id_from_url(urls[:len(gold)])
print('{:.2f}s'.format(clock() - start))
shared_items = set(gold.items()) & set(res.items())
print('match with gold: {}/{}'.format(len(shared_items), len(gold)))
| Python | 0.000001 | |
5906f3744782ba98e4826b1023bae6075df91a01 | Simple system for logging the outputs | waflib/extras/build_logs.py | waflib/extras/build_logs.py | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2013 (ita)
"""
A system for recording all outputs to a log file. Just add the following to your wscript file::
def init(ctx):
ctx.load('build_logs')
"""
import atexit, sys, time, os, shutil, threading
from waflib import Logs, Context
# adding the logs under the build/ directory will clash with the clean/ command
try:
up = os.path.dirname(Context.g_module.__file__)
except AttributeError:
up = '.'
LOGFILE = os.path.join(up, 'logs', '%s.log' % time.strftime('%Y_%m_%d_%H_%M'))
wlock = threading.Lock()
class log_to_file(object):
def __init__(self, stream, fileobj, filename):
self.stream = stream
self.encoding = self.stream.encoding
self.fileobj = fileobj
self.filename = filename
self.is_valid = True
def replace_colors(self, data):
for x in Logs.colors_lst.values():
if isinstance(x, str):
data = data.replace(x, '')
return data
def write(self, data):
try:
wlock.acquire()
self.stream.write(data)
self.stream.flush()
if self.is_valid:
self.fileobj.write(self.replace_colors(data))
finally:
wlock.release()
def fileno(self):
return self.stream.fileno()
def flush(self):
self.stream.flush()
if self.is_valid:
self.fileobj.flush()
def isatty(self):
return self.stream.isatty()
def init(ctx):
global LOGFILE
filename = os.path.abspath(LOGFILE)
try:
os.makedirs(os.path.dirname(os.path.abspath(filename)))
except OSError:
pass
if hasattr(os, 'O_NOINHERIT'):
fd = os.open(LOGFILE, os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT)
fileobj = os.fdopen(fd, 'w')
else:
fileobj = open(LOGFILE, 'w')
old_stderr = sys.stderr
# sys.stdout has already been replaced, so __stdout__ will be faster
#sys.stdout = log_to_file(sys.stdout, fileobj, filename)
#sys.stderr = log_to_file(sys.stderr, fileobj, filename)
sys.stdout = log_to_file(sys.__stdout__, fileobj, filename)
sys.stderr = log_to_file(sys.__stderr__, fileobj, filename)
# now mess with the logging module...
for x in Logs.log.handlers:
try:
stream = x.stream
except AttributeError:
pass
else:
if id(stream) == id(old_stderr):
x.stream = sys.stderr
def exit_cleanup():
try:
fileobj = sys.stdout.fileobj
except AttributeError:
pass
else:
sys.stdout.is_valid = False
sys.stderr.is_valid = False
fileobj.close()
filename = sys.stdout.filename
Logs.info('Output logged to %r' % filename)
# then copy the log file to "latest.log" if possible
up = os.path.dirname(os.path.abspath(filename))
try:
shutil.copy(filename, os.path.join(up, 'latest.log'))
except OSError:
# this may fail on windows due to processes spawned
#
pass
atexit.register(exit_cleanup)
| Python | 0.999986 | |
2913d840b63746669ac5695bd244abd6db24fe5a | Create script that prepares LaGeR strings for use with a machine learning training algorithms | lager_ml/lager_training_prep.py | lager_ml/lager_training_prep.py | #!/usr/bin/env python3
# This program prepares LaGeR strings for use with a machine learning training
# algorithm.
#
# It expands the string or set of strings to specific length (number of
# features), then generates variants for each of those. Finally, it converts
# the variants into numbers and adds the result to a dataset file.
import sys
from subprocess import call
if (len(sys.argv) < 5):
print("lager_training_prep [GESTURE_NAME] [GESTURE_LABEL] [NUM_FEATURES] [NUM_VARIANTS]")
exit()
gesture_name = sys.argv[1]
gesture_label = sys.argv[2]
num_features = sys.argv[3]
num_variants = sys.argv[4]
print("Gesture name: ", gesture_name)
print("Gesture label: ", gesture_label)
print("Number of features: ", num_features)
print("Number of variants: ", num_variants)
orig_gesture_filename = gesture_name + ".dat"
gesture_expanded_filename = gesture_name + "_expanded.dat"
gesture_variants_filename = gesture_name + "_expanded_variants.dat"
gesture_numbers_filename = gesture_name + "_expanded_variants_numbers.csv"
call(['./lager_expander.py', orig_gesture_filename, num_features])
call(['../lager_generator/lager_generator.py', gesture_expanded_filename, num_variants])
call(['./lager_file_to_numbers.py', gesture_variants_filename, gesture_label])
call('cat ' + gesture_numbers_filename + ' >>'+ ' dataset.csv', shell=True)
| Python | 0 | |
352379690275e970693a06ed6981f530b6704354 | Add index to Task.status | migrations/versions/181adec926e2_add_status_index_to_task.py | migrations/versions/181adec926e2_add_status_index_to_task.py | """Add status index to task
Revision ID: 181adec926e2
Revises: 43397e521791
Create Date: 2016-10-03 17:41:44.038137
"""
# revision identifiers, used by Alembic.
revision = '181adec926e2'
down_revision = '43397e521791'
from alembic import op
def upgrade():
op.create_index('idx_task_status', 'task', ['status'], unique=False)
def downgrade():
op.drop_index('id_task_status', table_name='task')
| Python | 0.000003 | |
6ccf99966461bd8545654084584d58093dac03d5 | Add missing version file | pyrle/version.py | pyrle/version.py | __version__ = "0.0.17"
| Python | 0.000002 | |
f4e08d41d53cf74f8a53efeb7e238de6a98946cc | add script to find allreferenced hashes | add-ons/tools/get_referenced_hashes.py | add-ons/tools/get_referenced_hashes.py | #!/usr/bin/env python
import sys
import cvmfs
def usage():
print sys.argv[0] + " <local repo name | remote repo url> [root catalog]"
print "This script walks the catalogs and generates a list of all referenced content hashes."
# get referenced hashes from a single catalog (files, chunks, nested catalogs)
def get_hashes_for_catalog(catalog):
print >> sys.stderr, "Processing" , catalog.hash , catalog
query = " SELECT DISTINCT \
lower(hex(hash)) \
FROM catalog \
WHERE hash != 0 \
UNION \
SELECT DISTINCT \
lower(hex(hash)) || 'P' \
FROM chunks \
WHERE hash != 0 \
UNION \
SELECT DISTINCT \
sha1 || 'C' \
FROM nested_catalogs;"
return { res[0] for res in catalog.run_sql(query) }
def get_hashes_for_catalog_tree(repo, root_catalog):
hashes = { root_catalog.hash + "C" }
for catalog in repo.catalogs(root_catalog):
hashes = hashes | get_hashes_for_catalog(catalog)
return hashes
def get_hashes_for_revision(repo, root_hash = None):
root_catalog = repo.retrieve_catalog(root_hash) if root_hash else repo.retrieve_root_catalog()
return get_hashes_for_catalog_tree(repo, root_catalog)
# check input values
if len(sys.argv) != 2 and len(sys.argv) != 3:
usage()
sys.exit(1)
# get input parameters
repo_identifier = sys.argv[1]
root_catalog_hash = sys.argv[2] if len(sys.argv) == 3 else None
repo = cvmfs.open_repository(repo_identifier)
hashes = get_hashes_for_revision(repo, root_catalog_hash)
print '\n'.join(hashes)
| Python | 0.000001 | |
df852b2ee81756fa62a98e425e156530333bf5a1 | add migration to change order of participation choices | meinberlin/apps/plans/migrations/0033_change_order_participation_choices.py | meinberlin/apps/plans/migrations/0033_change_order_participation_choices.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-28 13:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_plans', '0032_rename_topic_field'),
]
operations = [
migrations.AlterField(
model_name='plan',
name='participation',
field=models.SmallIntegerField(choices=[(0, 'Yes'), (1, 'No'), (2, 'Still undecided')], verbose_name='Participation'),
),
]
| Python | 0 | |
8b71de6988b65665d60c696daffb12ab78c35472 | allow passing of 'profile' argument to constructor | crosscat/IPClusterEngine.py | crosscat/IPClusterEngine.py | #
# Copyright (c) 2010-2013, MIT Probabilistic Computing Project
#
# Lead Developers: Dan Lovell and Jay Baxter
# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
#
from IPython.parallel import Client
#
import crosscat.LocalEngine as LE
import crosscat.utils.sample_utils as su
# these imports are necessary to make ipcluster to work
import crosscat
import crosscat.LocalEngine
def partialize(func, args_dict, dview):
# why is this push necessary?
dview.push(args_dict, block=True)
helper = functools.partial(func, **args_dict)
return helper
class IPClusterEngine(LE.LocalEngine):
"""A simple interface to the Cython-wrapped C++ engine
IPClusterEngine
"""
def __init__(self, config_filename, profile=None, seed=0, sshkey=None, packer='json'):
"""Initialize a IPClusterEngine
Do IPython.parallel operations to set up cluster and generate mapper.
"""
super(IPClusterEngine, self).__init__(seed=seed)
rc = Client(config_filename, profile=profile, sshkey=sshkey, packer=packer)
dview = rc.direct_view()
lview = rc.load_balanced_view()
with dview.sync_imports(local=True):
import crosscat
import crosscat.LocalEngine
mapper = lambda f, tuples: self.lview.map(f, *tuples)
# if you're trying to debug issues, consider clearning to start fresh
# rc.clear(block=True)
#
self.rc = rc
self.dview = dview
self.lview = lview
self.mapper = mapper
self.do_initialize = None
self.do_analyze = None
return
def get_initialize_arg_tuples(self, M_c, M_r, T, initialization, n_chains):
args_dict = dict(M_c=M_c, M_r=M_r, T=T, initialization=initialization)
do_initialize = partialize(crosscat.LocalEngine._do_initialize2,
args_dict, self.dview)
seeds = [self.get_next_seed() for seed_idx in range(n_chains)]
arg_tuples = [seeds]
#
self.do_initialize = do_initialize
return arg_tuples
def get_analyze_arg_tuples(self, M_c, T, X_L, X_D, kernel_list=(), n_steps=1, c=(), r=(),
max_iterations=-1, max_time=-1):
n_chains = len(X_L)
args_dict = dict(M_c=M_c, T=T, kernel_list=kernel_list, n_steps=n_steps,
c=c, r=r, max_iterations=max_iterations, max_time=max_time)
do_analyze = partialize(crosscat.LocalEngine._do_analyze2,
args_dict, self.dview)
seeds = [self.get_next_seed() for seed_idx in range(n_chains)]
arg_tuples = [seeds, X_L, X_D]
#
self.do_analyze = do_analyze
return arg_tuples
| #
# Copyright (c) 2010-2013, MIT Probabilistic Computing Project
#
# Lead Developers: Dan Lovell and Jay Baxter
# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
#
from IPython.parallel import Client
#
import crosscat.LocalEngine as LE
import crosscat.utils.sample_utils as su
# these imports are necessary to make ipcluster to work
import crosscat
import crosscat.LocalEngine
def partialize(func, args_dict, dview):
# why is this push necessary?
dview.push(args_dict, block=True)
helper = functools.partial(func, **args_dict)
return helper
class IPClusterEngine(LE.LocalEngine):
"""A simple interface to the Cython-wrapped C++ engine
IPClusterEngine
"""
def __init__(self, config_filename, seed=0, sshkey=None, packer='json'):
"""Initialize a IPClusterEngine
Do IPython.parallel operations to set up cluster and generate mapper.
"""
super(IPClusterEngine, self).__init__(seed=seed)
rc = Client(config_filename, sshkey=sshkey, packer=packer)
dview = rc.direct_view()
lview = rc.load_balanced_view()
with dview.sync_imports(local=True):
import crosscat
import crosscat.LocalEngine
mapper = lambda f, tuples: self.lview.map(f, *tuples)
# if you're trying to debug issues, consider clearning to start fresh
# rc.clear(block=True)
#
self.rc = rc
self.dview = dview
self.lview = lview
self.mapper = mapper
self.do_initialize = None
self.do_analyze = None
return
def get_initialize_arg_tuples(self, M_c, M_r, T, initialization, n_chains):
args_dict = dict(M_c=M_c, M_r=M_r, T=T, initialization=initialization)
do_initialize = partialize(crosscat.LocalEngine._do_initialize2,
args_dict, self.dview)
seeds = [self.get_next_seed() for seed_idx in range(n_chains)]
arg_tuples = [seeds]
#
self.do_initialize = do_initialize
return arg_tuples
def get_analyze_arg_tuples(self, M_c, T, X_L, X_D, kernel_list=(), n_steps=1, c=(), r=(),
max_iterations=-1, max_time=-1):
n_chains = len(X_L)
args_dict = dict(M_c=M_c, T=T, kernel_list=kernel_list, n_steps=n_steps,
c=c, r=r, max_iterations=max_iterations, max_time=max_time)
do_analyze = partialize(crosscat.LocalEngine._do_analyze2,
args_dict, self.dview)
seeds = [self.get_next_seed() for seed_idx in range(n_chains)]
arg_tuples = [seeds, X_L, X_D]
#
self.do_analyze = do_analyze
return arg_tuples
| Python | 0.000001 |
92debba4bb0b0064b865a53b40476effa4d09c78 | Undo Framework example | pyside/demos/framework/undo/document.py | pyside/demos/framework/undo/document.py | from collections import namedtuple
from PySide.QtGui import QWidget, QPalette, QPainter
from PySide.QtCore import Qt, QRect
ShapeType = namedtuple('ShapeType', 'Rectangle Circle Triangle')(*range(3))
class Shape(object):
def __init__(self, type=ShapeType.Rectangle, color=Qt.red, rect=QRect()):
self._type = type
self._color = color
self._rect = rect
@property
def type(self):
return self._type
@property
def color(self):
return self._color
@property
def rect(self):
return self._rect
@property
def name(self):
return self._name
class Document(QWidget):
def __init__(self, parent=None):
super(Document, self).__init__(parent)
self._shapeList = []
self.setAutoFillBackground(True)
self.setBackgroundRole(QPalette.Base)
pal = QPalette()
pal.setColor(QPalette.HighlightedText, Qt.red)
self.setPalette(pal)
def paintEvent(self, event):
paintRegion = event.region()
painter = QPainter(self)
pal = self.palette()
for shape in self._shapeList:
rect = shape.rect
if not paintRegion.contains(rect):
continue
shapeType = shape.type
painter.setBrush(shape.color)
if shapeType == ShapeType.Rectangle:
print "rectangle"
painter.drawRect(rect)
elif shapeType == ShapeType.Circle:
print "circle"
painter.drawEllipse(rect)
s1 = Shape(ShapeType.Rectangle, color=Qt.green, rect=QRect(0, 0, 100, 100))
s2 = Shape(ShapeType.Circle, rect=QRect(200, 200, 100, 100))
d = Document()
d._shapeList = [s1, s2]
d.show()
| Python | 0.000001 | |
fe08242647962af0fdfab0ce34417b6a6079ed65 | add another import now missing | sympy/strategies/tests/test_traverse.py | sympy/strategies/tests/test_traverse.py | from sympy.strategies.traverse import (top_down, bottom_up, sall, top_down_once,
bottom_up_once, basic_fns)
from sympy.strategies.util import expr_fns
from sympy import Basic, symbols, Symbol, S
zero_symbols = lambda x: S.Zero if isinstance(x, Symbol) else x
x,y,z = symbols('x,y,z')
def test_sall():
zero_onelevel = sall(zero_symbols)
assert zero_onelevel(Basic(x, y, Basic(x, z))) == \
Basic(0, 0, Basic(x, z))
def test_bottom_up():
_test_global_traversal(bottom_up)
_test_stop_on_non_basics(bottom_up)
def test_top_down():
_test_global_traversal(top_down)
_test_stop_on_non_basics(top_down)
def _test_global_traversal(trav):
x,y,z = symbols('x,y,z')
zero_all_symbols = trav(zero_symbols)
assert zero_all_symbols(Basic(x, y, Basic(x, z))) == \
Basic(0, 0, Basic(0, 0))
def _test_stop_on_non_basics(trav):
def add_one_if_can(expr):
try: return expr + 1
except: return expr
expr = Basic(1, 'a', Basic(2, 'b'))
expected = Basic(2, 'a', Basic(3, 'b'))
rl = trav(add_one_if_can)
assert rl(expr) == expected
class Basic2(Basic):
pass
rl = lambda x: Basic2(*x.args) if isinstance(x, Basic) else x
def test_top_down_once():
top_rl = top_down_once(rl)
assert top_rl(Basic(1, 2, Basic(3, 4))) == \
Basic2(1, 2, Basic(3, 4))
def test_bottom_up_once():
bottom_rl = bottom_up_once(rl)
assert bottom_rl(Basic(1, 2, Basic(3, 4))) == \
Basic(1, 2, Basic2(3, 4))
def test_expr_fns():
from sympy.strategies.rl import rebuild
from sympy import Add
x, y = map(Symbol, 'xy')
expr = x + y**3
e = bottom_up(lambda x: x + 1, expr_fns)(expr)
b = bottom_up(lambda x: Basic.__new__(Add, x, 1), basic_fns)(expr)
assert rebuild(b) == e
| from sympy.strategies.traverse import (top_down, bottom_up, sall, top_down_once,
bottom_up_once, expr_fns, basic_fns)
from sympy import Basic, symbols, Symbol, S
zero_symbols = lambda x: S.Zero if isinstance(x, Symbol) else x
x,y,z = symbols('x,y,z')
def test_sall():
zero_onelevel = sall(zero_symbols)
assert zero_onelevel(Basic(x, y, Basic(x, z))) == \
Basic(0, 0, Basic(x, z))
def test_bottom_up():
_test_global_traversal(bottom_up)
_test_stop_on_non_basics(bottom_up)
def test_top_down():
_test_global_traversal(top_down)
_test_stop_on_non_basics(top_down)
def _test_global_traversal(trav):
x,y,z = symbols('x,y,z')
zero_all_symbols = trav(zero_symbols)
assert zero_all_symbols(Basic(x, y, Basic(x, z))) == \
Basic(0, 0, Basic(0, 0))
def _test_stop_on_non_basics(trav):
def add_one_if_can(expr):
try: return expr + 1
except: return expr
expr = Basic(1, 'a', Basic(2, 'b'))
expected = Basic(2, 'a', Basic(3, 'b'))
rl = trav(add_one_if_can)
assert rl(expr) == expected
class Basic2(Basic):
pass
rl = lambda x: Basic2(*x.args) if isinstance(x, Basic) else x
def test_top_down_once():
top_rl = top_down_once(rl)
assert top_rl(Basic(1, 2, Basic(3, 4))) == \
Basic2(1, 2, Basic(3, 4))
def test_bottom_up_once():
bottom_rl = bottom_up_once(rl)
assert bottom_rl(Basic(1, 2, Basic(3, 4))) == \
Basic(1, 2, Basic2(3, 4))
def test_expr_fns():
from sympy.strategies.rl import rebuild
from sympy import Add
x, y = map(Symbol, 'xy')
expr = x + y**3
e = bottom_up(lambda x: x + 1, expr_fns)(expr)
b = bottom_up(lambda x: Basic.__new__(Add, x, 1), basic_fns)(expr)
assert rebuild(b) == e
| Python | 0 |
0c29b431a0f5ce9115d7acdcaaabbd27546949c6 | Add test for contact success view. | chmvh_website/contact/tests/views/test_success_view.py | chmvh_website/contact/tests/views/test_success_view.py | from django.test import RequestFactory
from django.urls import reverse
from contact.views import SuccessView
class TestSuccessView(object):
"""Test cases for the success view"""
url = reverse('contact:success')
def test_get(self, rf: RequestFactory):
"""Test sending a GET request to the view.
Sending a GET request to the view should render the success
page.
"""
request = rf.get(self.url)
response = SuccessView.as_view()(request)
assert response.status_code == 200
assert 'contact/success.html' in response.template_name
| Python | 0 | |
f66a60411b4e1cb30ac1fde78735ba38e99289cf | Create cfprefs.py | cfprefs.py | cfprefs.py | #!/usr/bin/python
import CoreFoundation
domain = 'com.apple.appstore'
key = 'restrict-store-require-admin-to-install'
key_value = CoreFoundation.CFPreferencesCopyAppValue(key, domain)
print 'Key Value = ', key_value
key_forced = CoreFoundation.CFPreferencesAppValueIsForced(key, domain)
print 'Key Forced = ', key_forced
| Python | 0 | |
e5fecce2693056ac53f7d34d00801829ea1094c3 | add JPEG decoder CPU perf bench | tools/jpegdec_perf/reader_perf_multi.py | tools/jpegdec_perf/reader_perf_multi.py | import cv2
import os
from turbojpeg import TurboJPEG, TJPF_GRAY, TJSAMP_GRAY, TJFLAG_PROGRESSIVE
import time
import threading
# specifying library path explicitly
# jpeg = TurboJPEG(r'D:\turbojpeg.dll')
# jpeg = TurboJPEG('/usr/lib64/libturbojpeg.so')
# jpeg = TurboJPEG('/usr/local/lib/libturbojpeg.dylib')
# using default library installation
def decode():
jpeg = TurboJPEG()
image_folder = '/home/matrix/data/val/'
cnt = 0
time_sum = 0.0
for fname in sorted(os.listdir(image_folder)):
fpath = os.path.join(image_folder, fname)
# print(fpath)
in_file = open(fpath, 'rb')
jpg = in_file.read()
cnt += 1
# (width, height, jpeg_subsample, jpeg_colorspace) = jpeg.decode_header(jpg)
# print(width, height, jpeg_subsample, jpeg_colorspace)
begin = time.time() * 1000
raw = jpeg.decode(jpg)
end = time.time() * 1000
time_sum += end - begin
in_file.close()
print("image cnt: ", cnt)
print("time per image is(ms):", time_sum / cnt)
for i in range(52):
print('thread %s is running...' % threading.current_thread().name)
t = threading.Thread(target=decode, name='DecodeThread')
t.start()
# t.join()
print('thread %s ended.' % threading.current_thread().name)
| Python | 0 | |
4e5c0ea499dd596d3719717166172113e7209d1e | check in script, authored by Joseph Bisch | src/github-api-releases.py | src/github-api-releases.py | # Homepage: https://github.com/josephbisch/test-releases-api/blob/master/github-api-releases.py
#
# Copyright
# Copyright (C) 2016 Joseph Bisch <joseph.bisch AT gmail.com>
#
# License
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import requests
import getpass
import json
import sys
import os
import ntpath
import magic
from urllib.parse import urljoin
GITHUB_API = 'https://api.github.com'
def check_status(res, j):
if res.status_code >= 400:
msg = j.get('message', 'UNDEFINED')
print('ERROR: %s' % msg)
return 1
return 0
def create_release(owner, repo, tag, token):
url = urljoin(GITHUB_API, '/'.join(['repos', owner, repo, 'releases']))
headers = {'Authorization': token}
data = {'tag_name': tag, 'name': tag, 'body': 'winetricks - %s' % tag}
res = requests.post(url, auth=(owner, token), data=json.dumps(data), headers=headers)
j = json.loads(res.text)
if check_status(res, j):
return 1
return 0
def upload_asset(path, owner, repo, tag):
token = os.environ['GITHUB_TOKEN']
url = urljoin(GITHUB_API,
'/'.join(['repos', owner, repo, 'releases', 'tags', tag]))
res = requests.get(url)
j = json.loads(res.text)
if check_status(res, j):
# release must not exist, creating release from tag
if create_release(owner, repo, tag, token):
return 0
else:
# Need to start over with uploading now that release is created
# Return 1 to indicate we need to run upload_asset again
return 1
upload_url = j['upload_url']
upload_url = upload_url.split('{')[0]
fname = ntpath.basename(path)
with open(path) as f:
contents = f.read()
content_type = magic.from_file(path)
headers = {'Content-Type': content_type, 'Authorization': token}
params = {'name': fname}
res = requests.post(upload_url, data=contents, auth=(owner, token),
headers=headers, params=params)
j = json.loads(res.text)
if check_status(res, j):
return 0
print('SUCCESS: %s uploaded' % fname)
return 0
if __name__ == '__main__':
path = sys.argv[1]
owner = sys.argv[2]
repo = sys.argv[3]
tag = sys.argv[4]
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), path)
ret = 1 # Run upload_asset at least once.
while ret:
ret = upload_asset(path, owner, repo, tag)
| Python | 0 | |
e10f6ebf9eba5cf734bbeead68a3b36f9db8dae8 | add ridesharing | source/jormungandr/jormungandr/street_network/ridesharing.py | source/jormungandr/jormungandr/street_network/ridesharing.py | # Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import logging
import copy
import itertools
from jormungandr.street_network.street_network import AbstractStreetNetworkService, StreetNetworkPathType
from jormungandr import utils
from jormungandr.utils import get_pt_object_coord, SectionSorter
from navitiacommon import response_pb2
class Ridesharing(AbstractStreetNetworkService):
"""
TODO:
"""
def __init__(self, instance, service_url, modes=None, id=None, timeout=10, api_key=None, **kwargs):
self.instance = instance
self.modes = modes or []
self.sn_system_id = id or 'ridesharing'
config = kwargs.get('street_network', None)
if 'service_url' not in config['args']:
config['args'].update({'service_url': None})
if 'instance' not in config['args']:
config['args'].update({'instance': instance})
config['args'].update({'modes': self.modes})
self.street_network = utils.create_object(config)
def status(self):
return {'id': unicode(self.sn_system_id), 'class': self.__class__.__name__, 'modes': self.modes}
def _direct_path(
self, mode, pt_object_origin, pt_object_destination, fallback_extremity, request, direct_path_type
):
# TODO: the ridesharing_speed is stored in car_no_park_speed
# a proper way to handle this is to override car_no_park_speed use the ridesharing_speed here
# copy_request = copy.deepcopy(request)
# copy_request["car_no_park_speed"] = copy_request["ridesharing_speed"]
response = self.street_network._direct_path(
mode, pt_object_origin, pt_object_destination, fallback_extremity, request, direct_path_type
)
if response:
for journey in response.journeys:
for section in journey.sections:
section.street_network.mode = response_pb2.Ridesharing
return response
def get_street_network_routing_matrix(
self, origins, destinations, street_network_mode, max_duration, request, **kwargs
):
# TODO: the ridesharing_speed is stored in car_no_park_speed
# a proper way to handle this is to override car_no_park_speed use the ridesharing_speed here
# copy_request = copy.deepcopy(request)
# copy_request["car_no_park_speed"] = copy_request["ridesharing_speed"]
return self.street_network.get_street_network_routing_matrix(
origins, destinations, street_network_mode, max_duration, request, **kwargs
)
def make_path_key(self, mode, orig_uri, dest_uri, streetnetwork_path_type, period_extremity):
"""
:param orig_uri, dest_uri, mode: matters obviously
:param streetnetwork_path_type: whether it's a fallback at
the beginning, the end of journey or a direct path without PT also matters especially for car (to know if we
park before or after)
:param period_extremity: is a PeriodExtremity (a datetime and its meaning on the
fallback period)
Nota: period_extremity is not taken into consideration so far because we assume that a
direct path from A to B remains the same even the departure time are different (no realtime)
"""
return self.street_network.make_path_key(mode, orig_uri, dest_uri, streetnetwork_path_type, None)
| Python | 0.00041 | |
c650d64247d63d2af7a8168795e7edae5c9ef6ef | Add realtime chart plotting example | realtime-plot.py | realtime-plot.py | import time, random
import math
from collections import deque
start = time.time()
class RealtimePlot:
def __init__(self, axes, max_entries = 100):
self.axis_x = deque(maxlen=max_entries)
self.axis_y = deque(maxlen=max_entries)
self.axes = axes
self.max_entries = max_entries
self.lineplot, = axes.plot([], [], "ro-")
self.axes.set_autoscaley_on(True)
def add(self, x, y):
self.axis_x.append(x)
self.axis_y.append(y)
self.lineplot.set_data(self.axis_x, self.axis_y)
self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
def animate(self, figure, callback, interval = 50):
import matplotlib.animation as animation
def wrapper(frame_index):
self.add(*callback(frame_index))
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
return self.lineplot
animation.FuncAnimation(figure, wrapper, interval=interval)
def main():
from matplotlib import pyplot as plt
fig, axes = plt.subplots()
display = RealtimePlot(axes)
display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))
plt.show()
fig, axes = plt.subplots()
display = RealtimePlot(axes)
while True:
display.add(time.time() - start, random.random() * 100)
plt.pause(0.001)
if __name__ == "__main__": main()
| Python | 0 | |
8ae82037dde45019cae8912f45a36cf3a362c444 | Revert "HAProxy uses milliseconds ..." | openstack/network/v2/health_monitor.py | openstack/network/v2/health_monitor.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack import resource2 as resource
class HealthMonitor(resource.Resource):
resource_key = 'healthmonitor'
resources_key = 'healthmonitors'
base_path = '/lbaas/healthmonitors'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'delay', 'expected_codes', 'http_method', 'max_retries',
'timeout', 'type', 'url_path',
is_admin_state_up='adminstate_up',
project_id='tenant_id',
)
# Properties
#: The time, in seconds, between sending probes to members.
delay = resource.Body('delay')
#: Expected HTTP codes for a passing HTTP(S) monitor.
expected_codes = resource.Body('expected_codes')
#: The HTTP method that the monitor uses for requests.
http_method = resource.Body('http_method')
#: The administrative state of the health monitor, which is up
#: ``True`` or down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: Maximum consecutive health probe tries.
max_retries = resource.Body('max_retries')
#: Name of the health monitor.
name = resource.Body('name')
#: List of pools associated with this health monitor
#: *Type: list of dicts which contain the pool IDs*
pool_ids = resource.Body('pools', type=list)
#: The ID of the project this health monitor is associated with.
project_id = resource.Body('tenant_id')
#: The maximum number of seconds for a monitor to wait for a
#: connection to be established before it times out. This value must
#: be less than the delay value.
timeout = resource.Body('timeout')
#: The type of probe sent by the load balancer to verify the member
#: state, which is PING, TCP, HTTP, or HTTPS.
type = resource.Body('type')
#: Path portion of URI that will be probed if type is HTTP(S).
url_path = resource.Body('url_path')
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack import resource2 as resource
class HealthMonitor(resource.Resource):
resource_key = 'healthmonitor'
resources_key = 'healthmonitors'
base_path = '/lbaas/healthmonitors'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'delay', 'expected_codes', 'http_method', 'max_retries',
'timeout', 'type', 'url_path',
is_admin_state_up='adminstate_up',
project_id='tenant_id',
)
# Properties
#: The time, in milliseconds, between sending probes to members.
delay = resource.Body('delay')
#: Expected HTTP codes for a passing HTTP(S) monitor.
expected_codes = resource.Body('expected_codes')
#: The HTTP method that the monitor uses for requests.
http_method = resource.Body('http_method')
#: The administrative state of the health monitor, which is up
#: ``True`` or down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: Maximum consecutive health probe tries.
max_retries = resource.Body('max_retries')
#: Name of the health monitor.
name = resource.Body('name')
#: List of pools associated with this health monitor
#: *Type: list of dicts which contain the pool IDs*
pool_ids = resource.Body('pools', type=list)
#: The ID of the project this health monitor is associated with.
project_id = resource.Body('tenant_id')
#: The maximum number of milliseconds for a monitor to wait for a
#: connection to be established before it times out. This value must
#: be less than the delay value.
timeout = resource.Body('timeout')
#: The type of probe sent by the load balancer to verify the member
#: state, which is PING, TCP, HTTP, or HTTPS.
type = resource.Body('type')
#: Path portion of URI that will be probed if type is HTTP(S).
url_path = resource.Body('url_path')
| Python | 0.000006 |
c3add04f098e81b20946abaa99e6f2d81055b168 | Lie algebras: Type A | sympy/liealgebras/type_A.py | sympy/liealgebras/type_A.py | from sympy.core import(Set, Dict, Tuple)
from cartan_type import CartanType_standard
from sympy.matrices import eye
class CartanType(Standard_Cartan):
def __init__(self,n):
assert n >= 1
Standard_Cartan.__init__(self, "A", n)
def dimension(self, n):
"""
Return the dimension of the vector space
V underlying the Lie algebra
Example
========
>>> c = CartanType["A4"]
>>> c.dimension
4
"""
return n+1
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth postion.
"""
n = self.n
root = [0]*(n+1)
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Returns the ith simple root for the A series.
Examples
========
>>> c = CartanType["A4"]
>>> c.simple_root(1)
[1,-1,0,0,0]
"""
return self.basic_root(i-1,i)
def highest_root(self):
return self.basic_root(0, self.n - 1)
def roots(self):
"""
Returns the total number of roots for A_n
"""
n = self.n
return n(n+1)
def cartan_matrix(self):
"""
Returns the Cartan matrix for A_n.
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>.
Example
=======
>>> c = CartanType['A4']
>>> c.cartan_matrix
[2 -1 0 0 ]
[ ]
[-1 2 -1 0]
[ ]
[0 -1 2 -1]
[ ]
[0 0 -1 2]
"""
n = self.n
m = 2 * eye(n)
i = 1
for i < n-1:
m[i,i+1] = -1
m[i-1,i] = -1
i += 1
m[0,1] = -1
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of A_n
"""
n = self.n
return n**2 - 1
def LieAlgebra(self):
"""
Returns the Lie algebra associated with A_n
"""
n = self.n
return "su(" + str(n + 1) + ")"
| Python | 0.998793 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.