commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
e778bf0bb5a90e6d5d6f4fcaf1cf56ca96e4d291
|
Write clustering result in sorted log line
|
pygraphc/clustering/ClusterUtility.py
|
pygraphc/clustering/ClusterUtility.py
|
from itertools import combinations
from operator import itemgetter
class ClusterUtility(object):
@staticmethod
def get_geometric_mean(weights):
multiplication = 1
for weight in weights:
multiplication = multiplication * weight
gmean = 0.0
if multiplication > 0.0:
k = float(len(weights))
gmean = multiplication ** (1 / k)
return round(gmean, 5)
@staticmethod
def get_weighted_cliques(graph, cliques, threshold):
weighted_kcliques = []
for clique in cliques:
weights = []
for u, v in combinations(clique, 2):
reduced_precision = round(graph[u][v][0]['weight'], 5)
weights.append(reduced_precision)
gmean = ClusterUtility.get_geometric_mean(weights)
if gmean > threshold:
weighted_kcliques.append(frozenset(clique))
return weighted_kcliques
@staticmethod
def set_cluster_id(graph, clusters):
cluster_id = 0
for cluster in clusters:
for node in cluster:
graph.node[node]['cluster'] = cluster_id
cluster_id += 1
@staticmethod
def set_cluster_label_id(graph, clusters, original_logs, analysis_dir):
new_cluster_member_label = {} # store individiual cluster id for each cluster member
dominant_cluster_labels = {} # store dominant cluster label from all clusters
cluster_labels = ['accepted password', 'accepted publickey', 'authentication failure', 'check pass',
'connection closed', 'connection reset by peer', 'did not receive identification string',
'failed password', 'ignoring max retries', 'invalid user', 'pam adding faulty module',
'pam unable to dlopen', 'received disconnect', 'received signal',
'reverse mapping checking getaddrinfo', 'server listening', 'session closed',
'session opened', 'this does not map back to the address', 'unknown option']
max_cluster_id = len(cluster_labels) - 1
for cluster in clusters:
logs_per_cluster = []
label_counter = dict((cl, 0) for cl in cluster_labels)
for c in cluster:
# get all original_logs per cluster
members = graph.node[c]['member']
for member in members:
logs_per_cluster.append(original_logs[member])
# get dominant label in cluster
for label in cluster_labels:
for log in logs_per_cluster:
if label in log.lower():
label_counter[label] += 1
# get most dominant cluster label
dominant_label_counter = sorted(label_counter.items(), key=itemgetter(1), reverse=True)
# if cluster label has already used
if dominant_label_counter[0][0] in [labels[0] for labels in dominant_cluster_labels.values()]:
# get existing counter
existing_counter = 0
for ec in dominant_cluster_labels.values():
if ec[0] == dominant_label_counter[0][0]:
existing_counter = ec[1]
# check for which one is more dominant
if dominant_label_counter[0][1] > existing_counter:
# get existing cluster with lower existing counter
existing_cluster = \
dominant_cluster_labels.keys()[dominant_cluster_labels.values().index(existing_counter)]
for c in cluster:
new_cluster_member_label[c] = cluster_labels.index(dominant_label_counter[0][0])
# set old cluster to max_cluster_id + 1
for c in existing_cluster:
new_cluster_member_label[c] = max_cluster_id + 1
else:
for c in cluster:
new_cluster_member_label[c] = max_cluster_id + 1
# if cluster label has not used
else:
dominant_cluster_labels[frozenset(cluster)] = dominant_label_counter[0]
for c in cluster:
new_cluster_member_label[c] = cluster_labels.index(dominant_label_counter[0][0])
# set new cluster label
for node_id, new_label in new_cluster_member_label.iteritems():
graph.node[node_id]['cluster'] = new_label
# write clustering result to file (clustering result for all members in a node)
fopen = open(analysis_dir, 'w')
for key, value in new_cluster_member_label.iteritems():
cluster_members = graph.node[key]['member']
for member in cluster_members:
cluster_label = 'undefined' if value > max_cluster_id else cluster_labels[value]
fopen.write(str(value) + '; ' + cluster_label + '; ' + original_logs[member])
fopen.close()
|
Python
| 0.000057
|
@@ -4585,21 +4585,40 @@
#
-write
+get sorted log line id -
cluster
ing
@@ -4617,243 +4617,445 @@
ster
-ing
+ id
result
- to file (clustering result for all members in a node)%0A fopen = open(analysis_dir, 'w')%0A for key, value in new_cluster_member_label.iteritems():%0A cluster_members = graph.node%5Bkey%5D%5B'member'%5D%0A
+s%0A analysis_result = %7B%7D%0A for node in graph.nodes_iter(data=True):%0A members = node%5B1%5D%5B'member'%5D%0A for member in members:%0A analysis_result%5Bmember%5D = new_cluster_member_label%5Bnode%5B0%5D%5D%0A sorted(analysis_result.items(), key=itemgetter(0))%0A%0A # write clustering result to file (clustering result for all members in a node)%0A fopen = open(analysis_dir, 'w')%0A
fo
@@ -5054,45 +5054,98 @@
-
for
-member in cluster_members:%0A
+rowid, cluster_id in analysis_result.iteritems():%0A print rowid, cluster_id%0A
@@ -5183,21 +5183,26 @@
ned' if
-value
+cluster_id
%3E max_c
@@ -5235,19 +5235,20 @@
els%5B
-value%5D%0A
+cluster_id%5D%0A
@@ -5271,21 +5271,26 @@
ite(str(
-value
+cluster_id
) + '; '
@@ -5329,22 +5329,21 @@
al_logs%5B
-member
+rowid
%5D)%0A
|
c8069fff1941d0739bca8716a5e26f5c02ccffe3
|
Add South field tuple.
|
django_enumfield/fields.py
|
django_enumfield/fields.py
|
from django.db import models
class EnumField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, enumeration, *args, **kwargs):
self.enumeration = enumeration
kwargs.setdefault('choices', enumeration.get_choices())
super(EnumField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'IntegerField'
def to_python(self, value):
return self.enumeration.to_item(value)
def get_db_prep_save(self, value, connection=None):
if value is None:
return value
return self.to_python(value).value
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
def prepare(value):
v = self.to_python(value)
return self.get_db_prep_save(v, connection=connection)
if lookup_type == 'exact':
return [prepare(value)]
elif lookup_type == 'in':
return [prepare(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Lookup type %r not supported." % lookup_type)
|
Python
| 0.000001
|
@@ -1111,8 +1111,210 @@
p_type)%0A
+%0A def south_field_triple(self):%0A from south.modelsinspector import introspector%0A args, kwargs = introspector(self)%0A return ('django.db.models.fields.Integerfield', args, kwargs)%0A
|
ad3c5fe06cf6c28b0e9b5d68e2459889c1ade434
|
Version bump to 0.2
|
django_shotgun/__init__.py
|
django_shotgun/__init__.py
|
__version__ = "0.1"
|
Python
| 0
|
@@ -15,7 +15,7 @@
%220.
-1
+2
%22%0A
|
2c73fee5b0a3a527d0ee3c51291c7b4c01c9f688
|
Revert "Создание скрипта изменения группы"
|
fixture/group.py
|
fixture/group.py
|
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# создание новой группы
wd.find_element_by_name("new").click()
# fill group form
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
if not wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").click()
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
wd.find_element_by_name("selected[]").click()#select 1 group
wd.find_element_by_name("delete").click() #delete group
self.return_to_groups_page()
def change_group_properties(self):
wd = self.app.wd
self.open_groups_page()
wd.find_element_by_name("selected[]").click() # select 1 group
wd.find_element_by_name("edit").click() #delete group
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Best group")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("Header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("Footer")
wd.find_element_by_name("update").click()
self.return_to_groups_page()
|
Python
| 0
|
@@ -1681,801 +1681,6 @@
.wd%0A
- self.open_groups_page()%0A wd.find_element_by_name(%22selected%5B%5D%22).click() # select 1 group%0A wd.find_element_by_name(%22edit%22).click() #delete group%0A wd.find_element_by_name(%22group_name%22).click()%0A wd.find_element_by_name(%22group_name%22).clear()%0A wd.find_element_by_name(%22group_name%22).send_keys(%22Best group%22)%0A wd.find_element_by_name(%22group_header%22).click()%0A wd.find_element_by_name(%22group_header%22).clear()%0A wd.find_element_by_name(%22group_header%22).send_keys(%22Header%22)%0A wd.find_element_by_name(%22group_footer%22).click()%0A wd.find_element_by_name(%22group_footer%22).clear()%0A wd.find_element_by_name(%22group_footer%22).send_keys(%22Footer%22)%0A wd.find_element_by_name(%22update%22).click()%0A%0A self.return_to_groups_page()%0A%0A%0A%0A
%0A%0A
|
4e8177bca4335c34950adb54c0bca4bca59ef0c0
|
fix error: has no attribute __subclass__
|
app/auth/oauth.py
|
app/auth/oauth.py
|
from rauth import OAuth2Service
from flask import current_app, url_for, redirect, request, session
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name, _external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclass__():
provider = proveder_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name = 'facebook',
client_id = self.consumer_id,
client_secret = self.consumer_secret,
authorize_url = 'https://graph.facebook.com/oauth/authorize',
access_token_url = 'https://graph.facebook.com/oauth/access_token',
base_url = 'https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri= self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me?fields=id,email').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
)
|
Python
| 0.000033
|
@@ -686,16 +686,18 @@
subclass
+es
__():%0A%09%09
|
65d91fe8857ab63827f1b85935d8a6647bd57543
|
test refactoring
|
plenum/test/view_change/test_client_req_during_view_change.py
|
plenum/test/view_change/test_client_req_during_view_change.py
|
import functools
import pytest
from plenum.common.constants import NODE, TXN_TYPE, GET_TXN
from plenum.common.exceptions import RequestNackedException
from plenum.test.helper import sdk_send_random_and_check, \
sdk_send_random_requests, sdk_get_and_check_replies, sdk_gen_request, \
checkDiscardMsg
from plenum.test.pool_transactions.helper import sdk_build_get_txn_request, sdk_sign_and_send_prepared_request
from plenum.test.testing_utils import FakeSomething
@pytest.fixture(scope='function')
def test_node(test_node):
test_node.view_changer = FakeSomething(view_change_in_progress=True,
view_no=1)
return test_node
def test_client_write_request_discard_in_view_change_integration(txnPoolNodeSet,
looper,
sdk_pool_handle,
sdk_wallet_client):
'''
Check that client requests sent in view change will discard.
'''
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 4)
for node in txnPoolNodeSet:
node.view_changer.view_change_in_progress = True
discard_reqs = sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 1)
with pytest.raises(RequestNackedException) as e:
sdk_get_and_check_replies(looper, discard_reqs)
assert "Client request is discarded since view " \
"change is in progress" in e.args[0]
def test_client_get_request_not_discard_in_view_change_integration(txnPoolNodeSet,
looper,
sdk_pool_handle,
sdk_wallet_client):
'''
Check that client requests sent in view change will discard.
'''
for node in txnPoolNodeSet:
node.view_changer.view_change_in_progress = True
_, steward_did = sdk_wallet_client
request = sdk_build_get_txn_request(looper, steward_did, 1)
sdk_request = sdk_sign_and_send_prepared_request(looper,
sdk_wallet_client,
sdk_pool_handle,
request)
sdk_get_and_check_replies(looper, [sdk_request])
def test_client_write_request_discard_in_view_change_with_dict(test_node):
test_node.send_nack_to_client = check_nack_msg
msg = sdk_gen_request({TXN_TYPE: NODE}).as_dict
test_node.unpackClientMsg(msg, "frm")
checkDiscardMsg([test_node, ], msg, "view change in progress")
def test_client_get_request_not_discard_in_view_change_with_dict(test_node):
sender = "frm"
msg = sdk_gen_request({TXN_TYPE: GET_TXN}).as_dict
def post_to_client_in_box(received_msg, received_frm):
assert received_frm == sender
assert received_msg == msg
test_node.postToClientInBox = post_to_client_in_box
def discard(received_msg, reason, logLevel):
assert False, "Message {} was discard with '{}'".format(received_msg, reason)
test_node.discard = discard
test_node.unpackClientMsg(msg, sender)
def test_client_msg_discard_in_view_change_with_request(test_node):
test_node.send_nack_to_client = check_nack_msg
msg = sdk_gen_request({TXN_TYPE: NODE})
test_node.unpackClientMsg(msg, "frm")
checkDiscardMsg([test_node, ], msg.as_dict, "view change in progress")
def check_nack_msg(req_key, reason, to_client):
assert "Client request is discarded since view " \
"change is in progress" == reason
|
Python
| 0
|
@@ -1,22 +1,4 @@
-import functools%0A%0A
impo
|
fb50596afa08aded35a5c49cd9d4f8a23ae15348
|
Add base_duration property setter.
|
structure/note.py
|
structure/note.py
|
"""
File: note.py
Purpose: Defines the basic Note class that holds a pitch, duration, dots, tie information.
"""
from structure.abstract_note import AbstractNote
from timemodel.duration import Duration
class Note(AbstractNote):
"""
Class representation for a musical note.
"""
STANDARD_NOTES = {'W': Duration(1),
'H': Duration(1, 2),
'Q': Duration(1, 4),
'E': Duration(1, 8),
'S': Duration(1, 16),
'T': Duration(1, 32),
'X': Duration(1, 64),
}
def __init__(self, diatonic_pitch, base_duration, num_dots=0):
"""
Constructor.
Args
diatontic_pitch: ref. class DiatonicPitch.
base_duration: either a Duration, or key in STANDARD_NOTES (upper or lower case).
num_dots: number of duration extension dots.
"""
AbstractNote.__init__(self)
self.__diatonic_pitch = diatonic_pitch
self.__num_dots = num_dots
if type(base_duration) == Duration:
self.__base_duration = base_duration
elif isinstance(base_duration, str):
if base_duration.upper() in Note.STANDARD_NOTES.keys():
self.__base_duration = Note.STANDARD_NOTES[base_duration.upper()]
else:
raise Exception('Base duration can only be a Duration or string in key set [w, h, q, e, s, t. x]')
self.__duration = self.base_duration.apply_dots(num_dots)
self.__tied_to = None
self.__tied_from = None
@property
def diatonic_pitch(self):
return self.__diatonic_pitch
@diatonic_pitch.setter
def diatonic_pitch(self, new_pitch):
self.__diatonic_pitch = new_pitch
@property
def duration(self):
return self.__duration
@property
def base_duration(self):
return self.__base_duration
@property
def num_dots(self):
return self.__num_dots
@property
def is_tied_to(self):
return self.__tied_to is not None
@property
def is_tied_from(self):
return self.__tied_from is not None
@property
def tied_to(self):
return self.__tied_to
@property
def tied_from(self):
return self.__tied_from
@property
def is_rest(self):
return self.diatonic_pitch is None
def get_all_notes(self):
return [self]
def tie(self):
"""
Tie this note to the next note.
"""
original_parent = self.get_original_parent()
if original_parent is None:
raise Exception('Cannot tie note that has no parent')
note = self.next_note()
if note is None:
raise Exception('No next note to tie to.')
# notes must have the same pitch
if note.diatonic_pitch != self.diatonic_pitch:
raise Exception(
'Tied notes require to have same pitch {0} != {1}'.format(self.diatonic_pitch, note.diatonic_pitch))
self.__tied_to = note
note.__tied_from = self
def untie(self):
if not self.is_tied_to:
return
self.__tied_to.__tied_from = None
self.__tied_to = None
def next_note(self):
"""
Determine the successor Note within the context of the note structure parentage.
Returns:
The successor Note, or None if there is none, e.g. this is the last note.
"""
child = self
p = child.parent
while True:
if p is None:
break
next_str = p.get_next_child(child)
if next_str is not None:
if isinstance(next_str, Note):
return next_str
else:
return next_str.get_first_note()
else:
child = p
p = p.parent
# At this point, we are the last note in the structure - there is no next
return None
def prior_note(self):
"""
Determine the Note prior to this one within the context of the note structure parentage.
Returns:
The prior Note, or None is there is none, e.g. this is the first note.
"""
child = self
p = child.parent
while True:
if p is None:
break
next_str = p.get_prior_child(child)
if next_str is not None:
if isinstance(next_str, Note):
return next_str
else:
return next_str.get_last_note()
else:
child = p
p = p.parent
# At this point, we are the last note in the structure - there is no next
return None
def apply_factor(self, factor):
self.__base_duration *= factor
self.__duration *= factor
self.relative_position *= factor
self.contextual_reduction_factor *= factor
def reverse(self):
return self
def __str__(self):
dot_string = str(self.base_duration) + self.num_dots * '@'
return '[{0}<{1}>-({2}){3}] off={4} f={5}'.format(
self.diatonic_pitch if self.diatonic_pitch is not None else 'R', dot_string, self.duration,
'T' if self.is_tied_to else '', self.relative_position, self.contextual_reduction_factor)
|
Python
| 0
|
@@ -1987,32 +1987,219 @@
.__base_duration
+%0A%0A @base_duration.setter%0A def base_duration(self, base_duration):%0A self.__base_duration = base_duration%0A self.__duration = self.base_duration.apply_dots(self.num_dots)
%0A %0A @prope
|
e8056e4e2c5ef55b46a99afaf7664a734b401443
|
add pending as a "sent" state
|
tests/postman.py
|
tests/postman.py
|
import os
from notifications_python_client.errors import HTTPError
from config import config
from tests.test_utils import create_temp_csv, RetryException
def send_notification_via_api(client, template_id, to, message_type):
jenkins_build_id = os.getenv('BUILD_ID', 'No build id')
personalisation = {'build_id': jenkins_build_id}
if message_type == 'sms':
resp_json = client.send_sms_notification(to, template_id, personalisation)
elif message_type == 'email':
resp_json = client.send_email_notification(to, template_id, personalisation)
elif message_type == 'letter':
to.update(personalisation)
resp_json = client.send_letter_notification(template_id, to)
return resp_json['id']
def send_precompiled_letter_via_api(reference, client, pdf_file):
resp_json = client.send_precompiled_letter_notification(reference, pdf_file)
return resp_json['id']
def send_notification_via_csv(upload_csv_page, message_type, seeded=False):
service_id = config['service']['id'] if seeded else config['service']['id']
email = config['service']['seeded_user']['email'] if seeded else config['user']['email']
letter_contact = config['letter_contact_data']
if message_type == 'sms':
template_id = config['service']['templates']['sms']
directory, filename = create_temp_csv({'phone number': config['user']['mobile']})
elif message_type == 'email':
template_id = config['service']['templates']['email']
directory, filename = create_temp_csv({'email address': email})
elif message_type == 'letter':
template_id = config['service']['templates']['letter']
directory, filename = create_temp_csv(letter_contact)
upload_csv_page.go_to_upload_csv_for_service_and_template(service_id, template_id)
upload_csv_page.upload_csv(directory, filename)
notification_id = upload_csv_page.get_notification_id_after_upload()
return notification_id
class NotificationStatuses:
PENDING_VIRUS_CHECK = 'pending-virus-check'
RECEIVED = {'received'}
DELIVERED = {'delivered', 'temporary-failure', 'permanent-failure'}
SENT = RECEIVED | DELIVERED | {'sending'}
def get_notification_by_id_via_api(client, notification_id, expected_statuses):
try:
resp = client.get_notification_by_id(notification_id)
notification_status = resp['status']
if notification_status not in expected_statuses:
raise RetryException(
(
'Notification in wrong status '
'id: {id} '
'status: {status} '
'created_at: {created_at} '
'sent_at: {sent_at} '
'completed_at: {completed_at}'
).format(**resp)
)
return resp
except HTTPError as e:
if e.status_code == 404:
message = 'Notification not created yet for id: {}'.format(notification_id)
raise RetryException(message)
else:
raise
|
Python
| 0.000001
|
@@ -2184,16 +2184,27 @@
sending'
+, 'pending'
%7D%0A%0A%0Adef
|
b951c30a856611ba37bba4cc0e6ef294b55650c9
|
allow code to be defined as an array of string
|
web/Language.py
|
web/Language.py
|
import json
import os
class Language:
def __init__(self, key):
"""
Initialize the Language object, which will contain concepts for a given structure
:param key: ID of the language in the meta_info.json file
"""
# Add an empty string to convert SafeString to str
self.key = str(key + "")
self.friendly_name = None
self.categories = None
self.concepts = None
def has_key(self):
"""
Returns a Boolean if the language key exists or not
:rtype: bool
"""
# Empty string is falsy, but text is truthy, but would return return text
return bool(self.key)
def lang_exists(self):
"""
Returns a Boolean if the language (self.key) exists in the thesauruses or not
:rtype: bool
"""
return os.path.exists(os.path.join("web", "thesauruses", self.key))
def load_structure(self, structure_key):
"""
Loads the structure file into the Language object
:param structure_key: the ID for the structure to load
"""
file_path = os.path.join(
"web", "thesauruses", self.key, structure_key) + ".json"
with open(file_path, 'r') as file:
data = file.read()
# parse file
file_json = json.loads(data)
self.friendly_name = file_json["meta"]["language_name"]
self.categories = file_json["categories"]
self.concepts = file_json[structure_key]
def concept(self, concept_key):
"""
Get the concept (including code and comment) from the concept file for that Language
:param concept_key: key for the concept to look up
:returns: a dict containing the code and comment, and possibly the 'not-implemented' flag. They are empty strings if not specified
:rtype: object
"""
if self.concepts.get(concept_key) is None:
return {
"code": "",
"comment": ""
}
if self.concepts.get(concept_key).get("not-implemented", False):
return {
"not-implemented": True,
"code": "",
"comment": self.concepts.get(concept_key).get("comment", "")
}
return self.concepts.get(concept_key)
def concept_unknown(self, concept_key):
"""
Returns a Boolean if the concept is not known
:param concept_key: ID for the concept
:return: Boolean if the concept is not known
"""
return self.concepts.get(concept_key) is None
def concept_implemented(self, concept_key):
"""
Returns a Boolean if the concept is implemented
:param concept_key: ID for the concept
:return: Boolean if the language defines this concept
"""
return self.concept(concept_key).get("not-implemented", False) is False
def concept_code(self, concept_key):
"""
Returns the code portion of the provided concept
:param concept_key: ID for the concept
:return: the string containing the concept's code
"""
return self.concept(concept_key).get("code")
def concept_comment(self, concept_key):
"""
Returns the comment portion of the provided concept
:param concept_key: ID for the concept
:return: the string containing the concept's comment
"""
return self.concept(concept_key).get("comment", "")
|
Python
| 0.000028
|
@@ -3158,38 +3158,39 @@
%22%22%22%0A
-return
+code =
self.concept(co
@@ -3211,16 +3211,106 @@
(%22code%22)
+%0A if isinstance(code, list):%0A code = %22%5Cn%22.join(code)%0A return code
%0A%0A de
|
f97288b74696efe5b06548b776e08a6586e05eae
|
Add countdown.
|
LSLBroadcaster.py
|
LSLBroadcaster.py
|
# Import modules
from pylsl.pylsl import StreamInfo, StreamOutlet, local_clock
import xdf.xdf as xdf
import collections
import time
import sys
# Python sender example
'''
# first create a new stream info (here we set the name to BioSemi,
# the content-type to EEG, 8 channels, 100 Hz, and float-valued data) The
# last value would be the serial number of the device or some other more or
# less locally unique identifier for the stream as far as available (you
# could also omit it but interrupted connections wouldn't auto-recover).
info = StreamInfo('BioSemi', 'EEG', 8, 100, 'float32', 'myuid2424')
# append some meta-data
info.desc().append_child_value("manufacturer", "BioSemi")
channels = info.desc().append_child("channels")
for c in ["C3", "C4", "Cz", "FPz", "POz", "CPz", "O1", "O2"]:
channels.append_child("channel") \
.append_child_value("label", c) \
.append_child_value("unit", "microvolts") \
.append_child_value("type", "EEG")
# next make an outlet; we set the transmission chunk size to 32 samples and
# the outgoing buffer size to 360 seconds (max.)
outlet = StreamOutlet(info, 32, 360)
print("now sending data...")
while True:
# make a new random 8-channel sample; this is converted into a
# pylsl.vectorf (the data type that is expected by push_sample)
mysample = [rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand()]
# get a time stamp in seconds (we pretend that our samples are actually
# 125ms old, e.g., as if coming from some external hardware)
stamp = local_clock()-0.125
# now send it and wait for a bit
outlet.push_sample(mysample, stamp)
time.sleep(0.01)
'''
# Load file
streams = xdf.load_xdf(r"SampleData.xdf", None, False)[0]
###############################################################################
### CREATE STREAMS WITH INFO HEADERS
###############################################################################
# List for stream outlets
outlets = []
# Go over streams
for i in range(len(streams)):
# Fetch stream info
streamInfo = streams[i]['info']
print("--- STREAM FOUND ---")
# Extract info
name = streamInfo['name'][0]
dataType = streamInfo['type'][0]
cannelCount = streamInfo['channel_count'][0]
dataRate = streamInfo['nominal_srate'][0]
dataFormat = streamInfo['channel_format'][0]
identifier = streamInfo['source_id'][0]
# Print resuls
print("Name: " + name)
print("Data Type: " + dataType)
print("Channel Count: " + cannelCount)
print("Data Rate: " + dataRate)
print("Data Format: " + dataFormat)
print("Identifier: " + identifier)
# Create new info header
info = StreamInfo(name, dataType, int(cannelCount), int(dataRate), dataFormat, identifier)
# Announce extraction of child values
print("Child Values:")
# Extract child values
children = streamInfo['desc'][0]
# Go over values
for childKey in children:
# Fetch value by key
childValue = children[childKey][0]
# Is value a sequence ("append_child_value")
if(isinstance(childValue, collections.Sequence)):
print(" " + childKey + ": " + childValue)
info.desc().append_child_value(childKey, childValue)
# Is value a mapping ("append_child")
elif(isinstance(childValue, collections.Mapping)):
print(" " + childKey)
child = info.desc().append_child(childKey)
# Go over entries within child
for innerChildKey in childValue:
print(" " + " " + innerChildKey)
child.append_child(innerChildKey)
# Now go over even more inner child
for innererChild in childValue[innerChildKey]:
# Now go over innermost child
for innermostChild in innererChild:
print(" " + " " + " " + innermostChild + ": " + innererChild[innermostChild][0])
child.append_child_value(innermostChild, innererChild[innermostChild][0])
# Add outlet with information
outlets.append(StreamOutlet(info, 32, 360)) # chunk size and buffer for given seconds
print("--------------------")
###############################################################################
### PREPARE STREAMING BY MERGING TIMESTAMPS
###############################################################################
# List to collect timestamps with meta information
events = []
# Save timestamps with pointer to outlet and data
for i in range(len(streams)):
# Fetch timestamps
timeStamps = streams[i]['time_stamps']
# Go through timestamps and store tripple to global list of timestamps
for j in range(len(timeStamps)):
events.append([timeStamps[j], i, j]) # timeStamp, outletIndex / streamIndex, index of time_series
# Sort global timestamps after timestamps
def comparator(x, y):
if x[0] < y[0]:
return -1 # x smaller than y
elif x[0] > y[0]:
return 1 # x greater than y
else:
return 0 # x equal to y
events.sort(comparator)
###############################################################################
### SEND DATA
###############################################################################
# Initial waiting time
print("--- ABOUT TO START STREAMING ---")
sys.stdout.flush()
time.sleep(10)
# Do it while there are events
print("--- START STREAMING ---")
eventCount = len(events)
for i in range(eventCount):
# Extract data from current event
event = events[i]
timestamp = event[0]
outletIndex = event[1]
timeSeriesIndex = event[2]
# Get sample data to push
sampleData = streams[outletIndex]['time_series'][timeSeriesIndex]
# Push sample data to outlet
outlets[outletIndex].push_sample(sampleData) # TODO: stamp from hardware
# Print timestamp
print("Sample at: " + str(timestamp))
# Set delta time (TODO: potentially erronous. maybe use some global time measurement)
if i < eventCount-1:
time.sleep(events[i+1][0]-timestamp)
print("-----------------------")
|
Python
| 0.000002
|
@@ -5540,16 +5540,92 @@
G ---%22)%0A
+for i in range(10, 0, -1):%0A %0A # Wait one second%0A print(str(i))%0A
sys.stdo
@@ -5635,16 +5635,20 @@
flush()%0A
+
time.sle
@@ -5651,17 +5651,16 @@
.sleep(1
-0
)%0A%0A# Do
|
0781b47512cbab5fc1a090ff68b5f9d434a864af
|
Update examples/API_v2/lookup_users_using_user_ids.py
|
examples/API_v2/lookup_users_using_user_ids.py
|
examples/API_v2/lookup_users_using_user_ids.py
|
import tweepy
# Replace bearer token value with your own
bearer_token = ""
# Initializing the Tweepy client
client = tweepy.Client(bearer_token)
# Replace User IDs
ids = [2244994945, 6253282]
# By default the user ID, name and username are returned. user_fields can be
# used to specify the additional user data that you want returned for each user
# e.g. profile_image_url
users = client.get_users(ids, user_fields=["profile_image_url"])
# Print the username and the user's profile image url
for user in users.data:
print(user.username)
print(user.profile_image_url)
|
Python
| 0
|
@@ -401,16 +401,20 @@
sers(ids
+=ids
, user_f
|
54c81494cbbe9a20db50596e68c57e1caa624043
|
Add a User post_save hook for creating user profiles
|
src-django/authentication/signals/user_post_save.py
|
src-django/authentication/signals/user_post_save.py
|
from django.contrib.auth.models import User, Group
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.conf import settings
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=User)
def on_user_post_save(sender, instance=None, created=False, **kwargs):
# Normally, users automatically get a Token created for them (if they do not
# already have one) when they hit
#
# rest_framework.authtoken.views.obtain_auth_token view
#
# This will create an authentication token for newly created users so the
# user registration endpoint can return a token back to Ember
# (thus avoiding the need to hit login endpoint)
if created:
Token.objects.create(user=instance)
# Add new user to the proper user group
normal_users_group, created = Group.objects.get_or_create(name=settings.NORMAL_USER_GROUP)
instance.groups.add(normal_users_group)
|
Python
| 0.000001
|
@@ -1,20 +1,66 @@
+from authentication.models import UserProfile%0A
from django.contrib.
@@ -770,16 +770,136 @@
reated:%0A
+ user_profile = UserProfile.objects.create(user=instance, is_email_confirmed=False)%0A user_profile.save()%0A%0A
|
9b678e184a568baea857ca68fcacb5070db6792d
|
update modulation.py
|
examples/modulation.py
|
examples/modulation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
# Driver to use
from lase.drivers import Oscillo
# Modules to import
import numpy as np
import matplotlib.pyplot as plt
import time
# Connect to Lase
host = '192.168.1.4' # Lase IP address
client = KClient(host)
driver = Oscillo(client) # Replace with appropriate driver
# Enable laser
driver.start_laser()
# Set laser current
current = 15 #mA
driver.set_laser_current(current)
# Modulation on DAC
amp_mod = 0.2
freq_mod = 1e6
driver.dac[1,:] = amp_mod*np.sin(2*np.pi*freq_mod*driver.sampling.t)
driver.set_dac()
# Signal on ADC
driver.get_adc()
signal = driver.adc[0,:]
# Plot
plt.plot(driver.sampling.t, signal)
plt.show()
# Plot
psd_signal = np.abs(np.fft.fft(signal))**2
plt.semilogy(1e-6 * np.fft.fftshift(driver.sampling.f_fft), np.fft.fftshift(psd_signal))
plt.xlabel('Frequency (MHz)')
plt.show()
# Disable laser
driver.stop_laser()
driver.close()
|
Python
| 0.000001
|
@@ -59,17 +59,16 @@
Example%0A
-%0A
from las
@@ -225,17 +225,16 @@
t time%0A%0A
-%0A
# Connec
@@ -263,16 +263,17 @@
168.1.4'
+
# Lase
@@ -331,16 +331,17 @@
(client)
+
# Repla
@@ -438,17 +438,19 @@
nt = 15
-#
+ #
mA%0Adrive
@@ -541,16 +541,17 @@
r.dac%5B1,
+
:%5D = amp
@@ -567,23 +567,27 @@
in(2
-*
+ *
np.pi
-*
+ *
freq_mod
*dri
@@ -582,17 +582,19 @@
freq_mod
-*
+ *
driver.s
@@ -788,10 +788,12 @@
al))
+
**
+
2%0A%0Ap
@@ -972,9 +972,8 @@
.close()
-%0A
|
1014c809638157da85794223c4990b5ae20512fa
|
Add crawled_at field back
|
hackernews_scrapy/items.py
|
hackernews_scrapy/items.py
|
# -*- coding: utf-8 -*-
import scrapy
class HackernewsScrapyItem(scrapy.Item):
title = scrapy.Field()
url = scrapy.Field()
|
Python
| 0
|
@@ -110,24 +110,70 @@
url = scrapy.Field()%0A
+ crawled_at = scrapy.Field(serializer=str)%0A
|
d8cb4384f32f4d0e20f3212a36cc01915260f7a8
|
Support custom actions in search router
|
tests/routers.py
|
tests/routers.py
|
"""Search router."""
from rest_framework.routers import DefaultRouter, Route
class SearchRouter(DefaultRouter):
"""Custom router for search endpoints.
Search endpoints don't follow REST principles and thus don't need
routes that default router provides.
"""
routes = [
Route(
url=r"^{prefix}{trailing_slash}$",
mapping={"get": "list", "post": "list_with_post"},
name="{basename}",
initkwargs={},
detail=False,
)
]
|
Python
| 0
|
@@ -63,16 +63,30 @@
tRouter,
+ DynamicRoute,
Route%0A%0A
@@ -518,15 +518,1030 @@
)
+,%0A # Dynamically generated list routes. Generated using%0A # @action(detail=False) decorator on methods of the viewset.%0A DynamicRoute(%0A url=r'%5E%7Bprefix%7D/%7Burl_path%7D%7Btrailing_slash%7D$',%0A name='%7Bbasename%7D-%7Burl_name%7D',%0A detail=False,%0A initkwargs=%7B%7D%0A ),%0A Route(%0A url=r'%5E%7Bprefix%7D/%7Blookup%7D%7Btrailing_slash%7D$',%0A mapping=%7B%0A 'get': 'retrieve',%0A 'put': 'update',%0A 'patch': 'partial_update',%0A 'delete': 'destroy'%0A %7D,%0A name='%7Bbasename%7D-detail',%0A detail=True,%0A initkwargs=%7B'suffix': 'Instance'%7D%0A ),%0A # Dynamically generated detail routes. Generated using%0A # @action(detail=True) decorator on methods of the viewset.%0A DynamicRoute(%0A url=r'%5E%7Bprefix%7D/%7Blookup%7D/%7Burl_path%7D%7Btrailing_slash%7D$',%0A name='%7Bbasename%7D-%7Burl_name%7D',%0A detail=True,%0A initkwargs=%7B%7D%0A ),
%0A %5D%0A
|
43922bb7cf5015cbf3538195d3d4f93ff8c9ec18
|
Bump version
|
tomb_cli/__about__.py
|
tomb_cli/__about__.py
|
__title__ = 'tomb_cli'
__summary__ = 'Top level CLI command for tomb'
__uri__ = 'http://github.com/tomborine/tomb_cli'
__version__ = '0.0.1'
__author__ = 'John Anderson'
__email__ = 'sontek@gmail.com'
__license__ = 'MIT'
__copyright__ = '2015 John Anderson (sontek)'
|
Python
| 0
|
@@ -131,17 +131,17 @@
= '0.0.
-1
+2
'%0A__auth
|
14d5a55a129546585fad6f9ff8c0db9de791ab72
|
Reset default_n_iters to 10
|
python/examples/conv/conv_1d_bench.py
|
python/examples/conv/conv_1d_bench.py
|
# RUN: %PYTHON %s 2>&1 | FileCheck %s
# This file contains simple test cases that combine various codegen options.
from ..core.experts import *
from ..core.harness import *
from ..core.transforms import *
from .definitions import *
import typing as tp
fun_name = 'conv_1d_nwc_wcf_main'
op_name = 'linalg.conv_1d_nwc_wcf'
################################################################################
### Compilation strategies.
################################################################################
all_names = [ \
"SingleTiling3DPeel",
"SingleTiling3DPad",
"DoubleTile3DPeel",
"DoubleTile3DPad",
]
all_experts = [
e.print_ir(after_all=False, llvm=False) for e in [ \
SingleTilingExpert(
fun_name,
op_name,
# N W C KW F
tile_sizes=[1, 8, 32, 1, 8],
peel=[0, 1, 2, 3, 4])
.then(Vectorize(fun_name, op_name))
.then(Bufferize())
.then(LowerVectors())
.then(LowerToLLVM()),
SingleTilingExpert(
fun_name,
op_name,
# N W C KW F
tile_sizes=[1, 8, 32, 1, 8],
pad=True,
pack_paddings=[1, 1, 0],
hoist_paddings=[3, 0, 0])
.then(Vectorize(fun_name, op_name))
.then(Bufferize())
.then(LowerVectors())
.then(LowerToLLVM()),
DoubleTilingExpert(fun_name,
op_name,
# N W C KW F
tile_sizes1=[1, 32, 128, 3, 32],
tile_sizes2=[1, 8, 32, 1, 8],
peel2=[0, 1, 2, 3, 4])
.then(Vectorize(fun_name, op_name))
.then(Bufferize())
.then(LowerVectors())
.then(LowerToLLVM()),
DoubleTilingExpert(fun_name,
op_name,
# N W C KW F
tile_sizes1=[1, 32, 128, 3, 32],
tile_sizes2=[1, 8, 32, 1, 8],
pad2=True,
pack_paddings2=[1, 1, 0],
hoist_paddings2=[3, 0, 0])
.then(Vectorize(fun_name, op_name))
.then(Bufferize())
.then(LowerVectors(split_transfers='vector-transfers'))
.then(LowerToLLVM()),
]
]
################################################################################
### Problem instantiation
################################################################################
keys = ['N', 'W', 'C', 'KW', 'F', 'strides', 'dilations']
# CHECK-NOT: FAILURE
def main():
# Specify default configuration and parse command line.
args = test_argparser(
"conv 1d benchmark",
default_n_iters=10,
# N W C KW F st dil
default_problem_sizes_list=[
[8, 16, 32, 3, 64, [1], [1]],
[8, 16, 32, 3, 64, [1], [2]],
[8, 16, 32, 3, 64, [2], [1]],
[8, 16, 32, 3, 64, [2], [2]],
[8, 16, 32, 3, 64, [2], [3]],
[8, 16, 32, 3, 64, [3], [2]],
],
default_expert_list=all_names,
default_dynamic_at_compile_time_list=[],
default_spec_list=[])
test_harness(lambda sizes, types: ConvolutionProblem(
'NWC', 'WCF', strides=sizes['strides'], dilations=sizes['dilations']),
[[np.float32] * 3],
test_sizes(keys, args.problem_sizes_list),
test_experts(all_experts, all_names, args.expert_list),
n_iters=args.n_iters,
function_name=fun_name,
dump_data_to_file=args.dump_data)
if __name__ == '__main__':
main()
|
Python
| 0.000016
|
@@ -2872,16 +2872,17 @@
iters=10
+0
,%0A
|
18f373ffc1e49b33708ae2303b61ccf76ffa686e
|
Use pylab.load to read in data.
|
examples/ortho_demo.py
|
examples/ortho_demo.py
|
from matplotlib import rcParams, use
rcParams['numerix'] = 'Numeric' # make sure Numeric is used (to read pickle)
from matplotlib.toolkits.basemap import Basemap
import cPickle
from pylab import *
# read in topo data from pickle (on a regular lat/lon grid)
topodict = cPickle.load(open('etopo20.pickle','rb'))
etopo = topodict['data']; lons = topodict['lons']; lats = topodict['lats']
# create Basemap instance for Orthographic (satellite view) projection.
lon_0 = float(raw_input('enter reference longitude (lon_0):'))
lat_0 = float(raw_input('enter reference latitude (lat_0):'))
fillcont = int(raw_input('fill continents? (1 for yes, 0 for no):'))
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0)
# compute native map projection coordinates for lat/lon grid.
lons, lats = meshgrid(lons,lats)
x,y = m(lons,lats)
# create figure with same aspect ratio as map.
fig=m.createfigure().add_axes([0.05,0.05,0.9,0.9])
# make filled contour plot.
cs = m.contourf(x,y,etopo,30,cmap=cm.jet)
# draw coastlines.
m.drawcoastlines()
# draw a line around the map region.
m.drawmapboundary()
if fillcont:
m.fillcontinents()
# draw parallels and meridians.
m.drawparallels(arange(-90.,120.,30.))
m.drawmeridians(arange(0.,420.,60.))
title('Orthographic Map Centered on Lon=%s, Lat=%s' % (lon_0,lat_0))
show()
|
Python
| 0.000094
|
@@ -1,119 +1,4 @@
-from matplotlib import rcParams, use%0ArcParams%5B'numerix'%5D = 'Numeric' # make sure Numeric is used (to read pickle)%0A
from
@@ -45,23 +45,8 @@
map%0A
-import cPickle%0A
from
@@ -125,135 +125,131 @@
id)%0A
+e
topo
-dict = cPickle.load(open('etopo20.pickle','rb'))%0Aetopo = topodict%5B'data'%5D; lons = topodict%5B'lons'%5D; lats = topodict%5B'lats'%5D
+ = array(load('etopo20data.gz'),'f')%0Alons = array(load('etopo20lons.gz'),'f')%0Alats = array(load('etopo20lats.gz'),'f')
%0A# c
@@ -652,24 +652,25 @@
shgrid(lons,
+
lats)%0Ax,y =
|
f41e620de3eedd38fc7444696bd384cf6ed0dfa4
|
Rename blocks => structure and add display_full_screen and exit_url fields.
|
studies/models.py
|
studies/models.py
|
import uuid
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.text import slugify
from guardian.shortcuts import assign_perm
from transitions.extensions import GraphMachine as Machine
from accounts.models import DemographicData, Organization, Child, User
from project.fields.datetime_aware_jsonfield import DateTimeAwareJSONField
from . import workflow
class Study(models.Model):
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
name = models.CharField(max_length=255, blank=False, null=False)
date_modified = models.DateTimeField(auto_now=True)
short_description = models.TextField()
long_description = models.TextField()
criteria = models.TextField()
duration = models.TextField()
contact_info = models.TextField()
image = models.ImageField(null=True)
organization = models.ForeignKey(
Organization,
on_delete=models.DO_NOTHING,
related_name='studies',
related_query_name='study'
)
blocks = DateTimeAwareJSONField(default=dict)
state = models.CharField(
choices=workflow.STATE_CHOICES,
max_length=25,
default=workflow.STATE_CHOICES.created
)
public = models.BooleanField(default=False)
creator = models.ForeignKey(User)
def __init__(self, *args, **kwargs):
super(Study, self).__init__(*args, **kwargs)
self.machine = Machine(
self,
states=workflow.states,
transitions=workflow.transitions,
initial=self.state,
send_event=True,
before_state_change='check_permission',
after_state_change='_finalize_state_change'
)
def __str__(self):
return f'<Study: {self.name}>'
class Meta:
permissions = (
('can_view', 'Can View'),
('can_create', 'Can Create'),
('can_edit', 'Can Edit'),
('can_remove', 'Can Remove'),
('can_activate', 'Can Activate'),
('can_deactivate', 'Can Deactivate'),
('can_pause', 'Can Pause'),
('can_resume', 'Can Resume'),
('can_approve', 'Can Approve'),
('can_submit', 'Can Submit'),
('can_retract', 'Can Retract'),
('can_resubmit', 'Can Resubmit'),
('can_edit_permissions', 'Can Edit Permissions'),
('can_view_permissions', 'Can View Permissions'),
('can_view_responses', 'Can View Responses'),
('can_view_video_responses', 'Can View Video Responses'),
('can_view_demographics', 'Can View Demographics'),
)
# WORKFLOW CALLBACKS
def check_permission(self, ev):
user = ev.kwargs.get('user')
if user.is_superuser:
return
raise
def notify_administrators_of_submission(self, ev):
# TODO
pass
def notify_submitter_of_approval(self, ev):
# TODO
pass
def notify_submitter_of_rejection(self, ev):
# TODO
pass
def notify_administrators_of_retraction(self, ev):
# TODO
pass
def notify_administrators_of_activation(self, ev):
# TODO
pass
def notify_administrators_of_pause(self, ev):
# TODO
pass
def notify_administrators_of_deactivation(self, ev):
# TODO
pass
# Runs for every transition to log action
def _log_action(self, ev):
StudyLog.objects.create(
action=ev.state.name,
study=ev.model,
user=ev.kwargs.get('user')
)
# Runs for every transition to save state and log action
def _finalize_state_change(self, ev):
ev.model.save()
self._log_action(ev)
# TODO Need a post_save hook for edit that pulls studies out of approved state
# TODO or disallows editing in pre_save if they are approved
@receiver(post_save, sender=Study)
def study_post_save(sender, **kwargs):
"""
Add study permissions to organization groups and
create groups for all newly created Study instances. We only
run on study creation to avoid having to check for existence
on each call to Study.save.
"""
study, created = kwargs['instance'], kwargs['created']
if created:
from django.contrib.auth.models import Group
organization_groups = Group.objects.filter(
name__startswith=f'{slugify(study.organization.name)}_ORG_'.upper()
)
# assign study permissions to organization groups
for group in organization_groups:
for perm, _ in Study._meta.permissions:
if 'ADMIN' in group.name:
assign_perm(perm, group, obj=study)
elif 'READ' in group.name and 'view' in perm:
assign_perm(perm, group, obj=study)
# create study groups and assign permissions
for group in ['read', 'admin']:
study_group_instance = Group.objects.create(
name=f'{slugify(study.organization.name)}_{slugify(study.name)}_STUDY_{group}'.upper() # noqa
)
for perm, _ in Study._meta.permissions:
# add only view permissions to non-admin
if group == 'read' and perm != 'can_view':
continue
if 'approve' not in perm:
assign_perm(perm, study_group_instance, obj=study)
class Response(models.Model):
study = models.ForeignKey(
Study, on_delete=models.DO_NOTHING,
related_name='responses'
)
child = models.ForeignKey(Child, on_delete=models.DO_NOTHING)
demographic_snapshot = models.ForeignKey(
DemographicData,
on_delete=models.DO_NOTHING
)
results = DateTimeAwareJSONField(default=dict)
def __str__(self):
return f'<Response: {self.study} {self.child.user.get_short_name}>'
class Meta:
permissions = (
('view_response', 'View Response'),
)
class Log(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.DO_NOTHING)
def __str__(self):
return f'<{self.__class__.name}: {self.action} @ {self.created_at:%c}>'
class Meta:
abstract = True
class StudyLog(Log):
action = models.CharField(max_length=128)
study = models.ForeignKey(
Study,
on_delete=models.DO_NOTHING,
related_name='logs',
related_query_name='logs'
)
def __str__(self):
return f'<StudyLog: {self.action} on {self.study.name} at {self.created_at} by {self.user.username}' # noqa
class ResponseLog(Log):
action = models.CharField(max_length=128)
response = models.ForeignKey(Response, on_delete=models.DO_NOTHING)
|
Python
| 0
|
@@ -1054,14 +1054,17 @@
-blocks
+structure
= D
@@ -1099,16 +1099,142 @@
t=dict)%0A
+ display_full_screen = models.BooleanField(default=True)%0A exit_url = models.URLField(default=%22https://lookit.mit.edu/%22)%0A
stat
|
35d0ce026741c65cdb834f5828ef4000f6d06150
|
fix for runtest path handling from Marek
|
tests/runtest.py
|
tests/runtest.py
|
#! /usr/bin/env python
"""
Test runner for main pygr tests.
Collects all files ending in _test.py and executes them with
unittest.TextTestRunner.
"""
import os, sys, re, unittest, shutil, re, shutil
from testlib import testutil, testoptions
from pygr import logger
def all_tests():
"Returns all file names that end in _test.py"
patt = re.compile("_test.py$")
mods = os.listdir(os.getcwd())
mods = filter(patt.search, mods)
mods = [ m.rstrip(".py") for m in mods ]
# some predictable order...
mods.sort()
return mods
def run(targets, options):
"Imports and runs the modules names that are contained in the 'targets'"
success = errors = 0
# run the tests by importing the module and getting its test suite
for name in targets:
try:
testutil.info( 'running tests for module %s' % name )
mod = __import__( name )
suite = mod.get_suite()
runner = unittest.TextTestRunner(verbosity=options.verbosity,
descriptions=0)
results = runner.run( suite )
# count tests and errors
success += results.testsRun - \
len(results.errors) - len(results.failures)
errors += len(results.errors) + len(results.failures)
# if we're in strict mode stop on errors
if options.strict and errors:
testutil.error( "strict mode stops on errors" )
break
except ImportError:
testutil.error( "unable to import module '%s'" % name )
# each skipped testsuite generates a message
skipped = len(testutil.SKIP_MESSAGES)
# generate warnings on skipped tests
for message in testutil.SKIP_MESSAGES:
testutil.warn(message)
# summarize the run
testutil.info('=' * 59)
testutil.info('''\
%s tests passed, %s tests failed, %s suites skipped; %d total''' % \
(success, errors, skipped, success + errors + skipped))
if __name__ == '__main__':
# gets the prebuild option parser
parser = testoptions.option_parser()
# parse the options
options, args = parser.parse_args()
# modules: from command line args or all modules
targets = args or all_tests()
# get rid of the .py ending in case full module names were passed in
# the command line
targets = [ t.rstrip(".py") for t in targets ]
# exclusion mode
if options.exclude:
targets = [ name for name in all_tests() if name not in targets ]
# disables debug messages at < 2 verbosity
if options.verbosity != 2:
logger.disable('DEBUG')
# run all the tests
if options.coverage:
testutil.generate_coverage(run, 'coverage', targets=targets,
options=options)
else:
run(targets=targets, options=options)
|
Python
| 0
|
@@ -392,15 +392,47 @@
(os.
-getcwd(
+path.normpath(os.path.dirname(__file__)
))%0A
|
6d4c5618db43725c0af2b37661911a960bfa0aa2
|
Allow an already deleted watch to not fail the stack.delete().
|
heat/engine/cloud_watch.py
|
heat/engine/cloud_watch.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import logging
import json
import os
from heat.common import exception
from heat.db import api as db_api
from heat.engine.resources import Resource
logger = logging.getLogger('heat.engine.cloud_watch')
class CloudWatchAlarm(Resource):
properties_schema = {'ComparisonOperator': {'Type': 'String',
'AllowedValues': ['GreaterThanOrEqualToThreshold',
'GreaterThanThreshold', 'LessThanThreshold',
'LessThanOrEqualToThreshold']},
'AlarmDescription': {'Type': 'String'},
'EvaluationPeriods': {'Type': 'String'},
'MetricName': {'Type': 'String'},
'Namespace': {'Type': 'String'},
'Period': {'Type': 'String'},
'Statistic': {'Type': 'String',
'AllowedValues': ['SampleCount', 'Average', 'Sum',
'Minimum', 'Maximum']},
'AlarmActions': {'Type': 'List'},
'OKActions': {'Type': 'List'},
'InsufficientDataActions': {'Type': 'List'},
'Threshold': {'Type': 'String'},
'Units': {'Type': 'String',
'AllowedValues': ['Seconds', 'Microseconds', 'Milliseconds',
'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits',
'Terabits', 'Percent', 'Count', 'Bytes/Second',
'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
'Terabytes/Second', 'Bits/Second', 'Kilobits/Second',
'Megabits/Second', 'Gigabits/Second', 'Terabits/Second',
'Count/Second', None]}}
def __init__(self, name, json_snippet, stack):
super(CloudWatchAlarm, self).__init__(name, json_snippet, stack)
self.instance_id = ''
def validate(self):
'''
Validate the Properties
'''
return Resource.validate(self)
def create(self):
if self.state in [self.CREATE_IN_PROGRESS, self.CREATE_COMPLETE]:
return
self.state_set(self.CREATE_IN_PROGRESS)
Resource.create(self)
wr_values = {
'name': self.name,
'rule': self.parsed_template()['Properties'],
'state': 'NORMAL',
'stack_name': self.stack.name
}
wr = db_api.watch_rule_create(self.stack.context, wr_values)
self.instance_id = wr.id
self.state_set(self.CREATE_COMPLETE)
def delete(self):
if self.state in [self.DELETE_IN_PROGRESS, self.DELETE_COMPLETE]:
return
self.state_set(self.DELETE_IN_PROGRESS)
Resource.delete(self)
db_api.watch_rule_delete(self.stack.context, self.name)
self.state_set(self.DELETE_COMPLETE)
def FnGetRefId(self):
return unicode(self.name)
def strict_dependency(self):
return False
|
Python
| 0
|
@@ -3297,32 +3297,49 @@
e.delete(self)%0A%0A
+ try:%0A
db_api.w
@@ -3381,24 +3381,73 @@
, self.name)
+%0A except Exception as ex:%0A pass
%0A%0A se
|
2ab2927b2ee4f821fd75050da19a7f1f81aaeca8
|
FIX divide mnist features by 255 in mlp example (#11961)
|
examples/neural_networks/plot_mnist_filters.py
|
examples/neural_networks/plot_mnist_filters.py
|
"""
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.neural_network import MLPClassifier
print(__doc__)
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# rescale the data, use the traditional train/test split
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# solver='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
Python
| 0
|
@@ -1221,16 +1221,29 @@
_y=True)
+%0AX = X / 255.
%0A%0A# resc
|
45c67e0b9bc168549fdd1eb2cde3599aae921567
|
Update base.py
|
webhook/base.py
|
webhook/base.py
|
"""
Base webhook implementation
"""
import json
from django.http import HttpResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
class WebhookBase(View):
"""
Simple Webhook base class to handle the most standard case.
"""
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(WebhookBase, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = json.loads(request.body.decode('utf-8'))
self.process_webhook(data)
return HttpResponse(status=200)
def process_webhook(self, data=None):
"""
Unimplemented method
"""
raise NotImplementedError
|
Python
| 0.000001
|
@@ -709,13 +709,8 @@
data
-=None
):%0A
|
1abd5833ef8936185f4c8870d300b3793da4ce00
|
Fix regex for parsing Solr dates. The solr documentation suggests it will always use 4-digit years. In practice, though, it returns < 4 digits for years before 1000 AD. This fixes the date-parsing regex to account for the discrepancy.
|
sunburnt/dates.py
|
sunburnt/dates.py
|
from __future__ import absolute_import
import datetime
import re
import warnings
try:
import mx.DateTime
except ImportError:
warnings.warn(
"mx.DateTime not found, retricted to Python datetime objects",
ImportWarning)
mx = None
year = r'[+/-]?\d*\d\d\d\d'
tzd = r'Z|((?P<tzd_sign>[-+])(?P<tzd_hour>\d\d):(?P<tzd_minute>\d\d))'
extended_iso_template = r'(?P<year>'+year+r""")
(-(?P<month>\d\d)
(-(?P<day>\d\d)
([T%s](?P<hour>\d\d)
:(?P<minute>\d\d)
(:(?P<second>\d\d)
(.(?P<fraction>\d+))?)?
("""+tzd+""")?)?
)?)?"""
extended_iso = extended_iso_template % " "
extended_iso_re = re.compile('^'+extended_iso+'$', re.X)
def datetime_from_w3_datestring(s):
""" We need to extend ISO syntax (as permitted by the standard) to allow
for dates before 0AD and after 9999AD. This is how to parse such a string"""
m = extended_iso_re.match(s)
if not m:
raise ValueError
d = m.groupdict()
d['year'] = int(d['year'])
d['month'] = int(d['month'] or 1)
d['day'] = int(d['day'] or 1)
d['hour'] = int(d['hour'] or 0)
d['minute'] = int(d['minute'] or 0)
d['fraction'] = d['fraction'] or '0'
d['second'] = float("%s.%s" % ((d['second'] or '0'), d['fraction']))
del d['fraction']
if d['tzd_sign']:
if d['tzd_sign'] == '+':
tzd_sign = 1
elif d['tzd_sign'] == '-':
tzd_sign = -1
try:
tz_delta = datetime_delta_factory(tzd_sign*int(d['tzd_hour']),
tzd_sign*int(d['tzd_minute']))
except DateTimeRangeError:
raise ValueError(e.args[0])
else:
tz_delta = datetime_delta_factory(0, 0)
del d['tzd_sign']
del d['tzd_hour']
del d['tzd_minute']
try:
dt = datetime_factory(**d) + tz_delta
except DateTimeRangeError:
raise ValueError(e.args[0])
return dt
class DateTimeRangeError(ValueError):
pass
if mx:
def datetime_factory(**kwargs):
try:
return mx.DateTime.DateTimeFrom(**kwargs)
except mx.DateTime.RangeError:
raise DateTimeRangeError(e.args[0])
else:
def datetime_factory(**kwargs):
try:
return datetime.datetime(**kwargs)
except ValueError, e:
raise DateTimeRangeError(e.args[0])
if mx:
def datetime_delta_factory(hours, minutes):
return mx.DateTime.DateTimeDelta(0, hours, minutes)
else:
def datetime_delta_factory(hours, minutes):
return datetime.timedelta(hours=hours, minutes=minutes)
|
Python
| 0
|
@@ -274,17 +274,9 @@
%5D?%5Cd
-*%5Cd%5Cd%5Cd%5Cd
++
'%0Atz
|
4c085d301c3c70c9e8f2299cb9f5dbd0fedd5954
|
Fix add_converter.
|
sunpy/net/attr.py
|
sunpy/net/attr.py
|
from itertools import chain, repeat
from sunpy.util.multimethod import MultiMethod
class Attr(object):
def __and__(self, other):
if isinstance(other, AttrOr):
return AttrOr([elem & self for elem in other.attrs])
if self.collides(other):
return NotImplemented
return AttrAnd([self, other])
def __or__(self, other):
# Optimization.
if self == other:
return self
return AttrOr([self, other])
def collides(self, other):
raise NotImplementedError
class DummyAttr(Attr):
def __and__(self, other):
return other
def __or__(self, other):
return other
def collides(self, other):
return False
def __hash__(self):
return hash(None)
def __eq__(self, other):
return isinstance(other, DummyAttr)
class AttrAnd(Attr):
def __init__(self, attrs):
self.attrs = attrs
def __and__(self, other):
if any(other.collides(elem) for elem in self.attrs):
return NotImplemented
if isinstance(other, AttrAnd):
return AttrAnd(self.attrs + other.attrs)
if isinstance(other, AttrOr):
return AttrOr([elem & self for elem in other.attrs])
return AttrAnd(self.attrs + [other])
__rand__ = __and__
def __repr__(self):
return "<AttrAnd(%r)>" % self.attrs
def __eq__(self, other):
if not isinstance(other, AttrAnd):
return False
return set(self.attrs) == set(other.attrs)
def __hash__(self):
return hash(frozenset(self.attrs))
def collides(self, other):
return any(elem.collides(other) for elem in self)
class AttrOr(Attr):
def __init__(self, attrs):
self.attrs = attrs
def __or__(self, other):
if isinstance(other, AttrOr):
return AttrOr(self.attrs + other.attrs)
return AttrOr(self.attrs + [other])
__ror__ = __or__
def __and__(self, other):
return AttrOr([elem & other for elem in self.attrs])
__rand__ = __and__
def __xor__(self, other):
new = AttrOr([])
for elem in self.attrs:
try:
new |= elem ^ other
except TypeError:
pass
return new
def __contains__(self, other):
for elem in self.attrs:
try:
if other in elem:
return True
except TypeError:
pass
return False
def __repr__(self):
return "<AttrOr(%r)>" % self.attrs
def __eq__(self, other):
if not isinstance(other, AttrOr):
return False
return set(self.attrs) == set(other.attrs)
def __hash__(self):
return hash(frozenset(self.attrs))
def collides(self, other):
return all(elem.collides(other) for elem in self)
class KeysAttr(Attr):
def __init__(self, attrs):
self.attrs = attrs
def __repr__(self):
return "<KeysAttr(%r)>" % (self.attrs)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.attrs == other.attrs
def __hash__(self):
return hash(frozenset(self.attrs))
def collides(self, other):
if not isinstance(other, ValueAttr):
return False
return any(k in other.attrs for k in self.attrs)
class ValueAttr(KeysAttr):
def __init__(self, attrs):
KeysAttr.__init__(self, attrs)
self.attrs = attrs
def __repr__(self):
return "<ValueAttr(%r)>" % (self.attrs)
def __hash__(self):
return hash(frozenset(self.attrs.iteritems()))
class AttrWalker(object):
def __init__(self):
self.applymm = MultiMethod(lambda *a, **kw: (a[1], ))
self.createmm = MultiMethod(lambda *a, **kw: (a[1], ))
def add_creator(self, *types):
def _dec(fun):
for type_ in types:
self.createmm.add(fun, (type_, ))
return fun
return _dec
def add_applier(self, *types):
def _dec(fun):
for type_ in types:
self.applymm.add(fun, (type_, ))
return fun
return _dec
def add_converter(self, *types):
def _dec(fun):
for type_ in types:
self.applymm.add(self.cv_apply(fun), (type_, ))
self.createmm.add(self.cv_create(fun), (type_, ))
return fun
return _dec
def cv_apply(self, fun):
def _fun(*args, **kwargs):
nargs, nkwargs = fun(*args, **kwargs)
return self.apply(*nargs, **nkwargs)
return _fun
def cv_create(self, fun):
def _fun(*args, **kwargs):
nargs, nkwargs = fun(*args, **kwargs)
return self.create(*nargs, **nkwargs)
return _fun
def create(self, *args, **kwargs):
return self.createmm(self, *args, **kwargs)
def apply(self, *args, **kwargs):
return self.applymm(self, *args, **kwargs)
def super_create(self, *args, **kwargs):
return self.createmm.super(self, *args, **kwargs)
def super_apply(self, *args, **kwargs):
return self.applymm.super(self, *args, **kwargs)
def and_(*args):
""" Trick operator precendence.
and_(foo < bar, bar < baz)
"""
value = DummyAttr()
for elem in args:
value &= elem
return value
def or_(*args):
""" Trick operator precendence.
or_(foo < bar, bar < baz)
"""
value = DummyAttr()
for elem in args:
value |= elem
return value
|
Python
| 0
|
@@ -4735,44 +4735,59 @@
-nargs, nkw
+args = list(args)%0A
args
+%5B1%5D
= fun(
-*
args
-, **kwargs
+%5B1%5D
)%0A
@@ -4813,27 +4813,28 @@
lf.apply
+mm
(*
-n
args, **
nkwargs)
@@ -4817,33 +4817,32 @@
pplymm(*args, **
-n
kwargs)%0A
@@ -4939,44 +4939,59 @@
-nargs, nkw
+args = list(args)%0A
args
+%5B1%5D
= fun(
-*
args
-, **kwargs
+%5B1%5D
)%0A
@@ -5022,19 +5022,20 @@
eate
+mm
(*
-n
args, **
nkwa
@@ -5030,17 +5030,16 @@
args, **
-n
kwargs)%0A
|
a751e7f51412581e14cc822f1e443ed97746055a
|
Update structures example
|
examples/structures.py
|
examples/structures.py
|
from numba import struct, jit, double
import numpy as np
record_type = struct([('x', double), ('y', double)])
record_dtype = record_type.get_dtype()
a = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=record_dtype)
@jit(argtypes=[record_type[:]])
def hypot(data):
# return types of numpy functions are inferred
result = np.empty_like(data, dtype=np.float64)
# notice access to structure elements 'x' and 'y' via attribute access
for i in range(data.shape[0]):
result[i] = np.sqrt(data[i].x * data[i].x + data[i].y * data[i].y)
return result
print hypot(a)
# Notice inferred return type
print hypot.signature
# Notice native sqrt calls and for.body direct access to memory...
print hypot.lfunc
|
Python
| 0
|
@@ -430,16 +430,123 @@
access%0A
+ # You can also index by field name or field index:%0A # data%5Bi%5D.x == data%5Bi%5D%5B'x'%5D == data%5Bi%5D%5B0%5D%0A
for
|
f6045517b27bf6f878ab2906aa6b793cfd640786
|
upgrade anymail
|
toucan_conf/settings/prod/__init__.py
|
toucan_conf/settings/prod/__init__.py
|
import os
from .. import *
try:
from ..secrets import ALLOWED_HOSTS
except ImportError:
raise ImportError('Please set ALLOWED_HOSTS in the secrets file when using production config.')
try:
from ..secrets import ANYMAIL
except ImportError:
raise ImportError('Please set ANYMAIL settings in the secrets file when using production config.')
INSTALLED_APPS += [
'anymail'
]
DEBUG = False
DEFAULT_FROM_EMAIL = 'toucan@brickwall.at'
STATIC_ROOT = os.path.join(BASE_DIR, '_static')
EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend"
# install raven handler if configured
try:
import raven
from ..secrets import RAVEN_DSN
except ImportError:
pass
else:
RAVEN_CONFIG = {
'dsn': RAVEN_DSN,
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(BASE_DIR),
}
|
Python
| 0
|
@@ -545,15 +545,13 @@
gun.
-M
+Em
ail
-gun
Back
|
7efa7bc0d0d2c97c8064e2a8f292c040528346e7
|
Test radio widget
|
tests/widgets.py
|
tests/widgets.py
|
from __future__ import unicode_literals
from unittest import TestCase
from wtforms.widgets import html_params, Input
from wtforms.widgets import *
from wtforms.widgets import html5
class DummyField(object):
def __init__(self, data, name='f', label='', id='', type='TextField'):
self.data = data
self.name = name
self.label = label
self.id = id
self.type = type
_value = lambda x: x.data
__unicode__ = lambda x: x.data
__str__ = lambda x: x.data
__call__ = lambda x, **k: x.data
__iter__ = lambda x: iter(x.data)
iter_choices = lambda x: iter(x.data)
class HTMLParamsTest(TestCase):
def test(self):
self.assertEqual(html_params(foo=9, k='wuuu'), 'foo="9" k="wuuu"')
self.assertEqual(html_params(class_='foo'), 'class="foo"')
self.assertEqual(html_params(class__='foo'), 'class_="foo"')
self.assertEqual(html_params(for_='foo'), 'for="foo"')
class ListWidgetTest(TestCase):
def test(self):
# ListWidget just expects an iterable of field-like objects as its
# 'field' so that is what we will give it
field = DummyField([DummyField(x, label='l' + x) for x in ['foo', 'bar']], id='hai')
self.assertEqual(ListWidget()(field), '<ul id="hai"><li>lfoo foo</li><li>lbar bar</li></ul>')
w = ListWidget(html_tag='ol', prefix_label=False)
self.assertEqual(w(field), '<ol id="hai"><li>foo lfoo</li><li>bar lbar</li></ol>')
class TableWidgetTest(TestCase):
def test(self):
inner_fields = [
DummyField('hidden1', type='HiddenField'),
DummyField('foo', label='lfoo'),
DummyField('bar', label='lbar'),
DummyField('hidden2', type='HiddenField'),
]
field = DummyField(inner_fields, id='hai')
self.assertEqual(
TableWidget()(field),
'<table id="hai"><tr><th>lfoo</th><td>hidden1foo</td></tr><tr><th>lbar</th><td>bar</td></tr></table>hidden2'
)
class BasicWidgetsTest(TestCase):
"""Test most of the basic input widget types"""
field = DummyField('foo', name='bar', label='label', id='id')
def test_input_type(self):
a = Input()
self.assertRaises(AttributeError, getattr, a, 'input_type')
b = Input(input_type='test')
self.assertEqual(b.input_type, 'test')
def test_html_marking(self):
html = TextInput()(self.field)
self.assertTrue(hasattr(html, '__html__'))
self.assertTrue(html.__html__() is html)
def test_text_input(self):
self.assertEqual(TextInput()(self.field), '<input id="id" name="bar" type="text" value="foo">')
def test_password_input(self):
self.assertTrue('type="password"' in PasswordInput()(self.field))
self.assertTrue('value=""' in PasswordInput()(self.field))
self.assertTrue('value="foo"' in PasswordInput(hide_value=False)(self.field))
def test_hidden_input(self):
self.assertTrue('type="hidden"' in HiddenInput()(self.field))
def test_checkbox_input(self):
self.assertEqual(CheckboxInput()(self.field, value='v'), '<input checked id="id" name="bar" type="checkbox" value="v">')
field2 = DummyField(False)
self.assertTrue('checked' not in CheckboxInput()(field2))
def test_radio_input(self):
pass # TODO
def test_textarea(self):
# Make sure textareas escape properly and render properly
f = DummyField('hi<>bye')
self.assertEqual(TextArea()(f), '<textarea id="" name="f">hi<>bye</textarea>')
class SelectTest(TestCase):
field = DummyField([('foo', 'lfoo', True), ('bar', 'lbar', False)])
def test(self):
self.assertEqual(
Select()(self.field),
'<select id="" name="f"><option selected value="foo">lfoo</option><option value="bar">lbar</option></select>'
)
self.assertEqual(
Select(multiple=True)(self.field),
'<select id="" multiple name="f"><option selected value="foo">lfoo</option><option value="bar">lbar</option></select>'
)
def test_render_option(self):
# value, label, selected
self.assertEqual(
Select.render_option('bar', 'foo', False),
'<option value="bar">foo</option>'
)
self.assertEqual(
Select.render_option(True, 'foo', True),
'<option selected value="True">foo</option>'
)
class HTML5Test(TestCase):
field = DummyField('42', name='bar', id='id')
def test_number(self):
i1 = html5.NumberInput(step='any')
self.assertEqual(i1(self.field), '<input id="id" name="bar" step="any" type="number" value="42">')
i2 = html5.NumberInput(step=2)
self.assertEqual(i2(self.field, step=3), '<input id="id" name="bar" step="3" type="number" value="42">')
def test_range(self):
i1 = html5.RangeInput(step='any')
self.assertEqual(i1(self.field), '<input id="id" name="bar" step="any" type="range" value="42">')
i2 = html5.RangeInput(step=2)
self.assertEqual(i2(self.field, step=3), '<input id="id" name="bar" step="3" type="range" value="42">')
|
Python
| 0
|
@@ -2089,24 +2089,54 @@
pes%22%22%22%0A%0A
+def setUp(self):%0A self.
field = Dumm
@@ -3376,20 +3376,295 @@
-pass # TODO
+self.field.checked = True%0A expected = '%3Cinput checked id=%22id%22 name=%22bar%22 type=%22radio%22 value=%22foo%22%3E'%0A self.assertEqual(RadioInput()(self.field), expected)%0A self.field.checked = False%0A self.assertEqual(RadioInput()(self.field), expected.replace(' checked', ''))
%0A%0A
|
cf18c2cf2516dfe0e06df336f90d9f0a730d5a23
|
remove pprint output for ClientException
|
hp3parclient/exceptions.py
|
hp3parclient/exceptions.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Hewlett Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions for the client
"""
import pprint
class UnsupportedVersion(Exception):
"""Indicates that the user is trying to use an unsupported
version of the API"""
pass
class CommandError(Exception):
pass
class AuthorizationFailure(Exception):
pass
class NoUniqueMatch(Exception):
pass
class ClientException(Exception):
"""
The base exception class for all exceptions this library raises.
"""
error_code = None
error_desc = None
debug1 = None
debug2 = None
def __init__(self, error=None):
pprint.pprint(error)
if 'code' in error:
self.error_code = error['code']
if 'desc' in error:
self.error_desc = error['desc']
if 'debug1' in error:
self.debug1 = error['debug1']
if 'debug2' in error:
self.debug2 = error['debug2']
def __str__(self):
formatted_string = "%s (HTTP %s)" % (self.message, self.http_status)
if self.error_code:
formatted_string += " %s" % self.error_code
if self.error_desc:
formatted_string += " - %s" % self.error_desc
if self.debug1:
formatted_string += " (1: '%s')" % self.debug1
if self.debug2:
formatted_string += " (2: '%s')" % self.debug2
return formatted_string
class BadRequest(ClientException):
"""
HTTP 400 - Bad request: you sent some malformed data.
"""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""
HTTP 401 - Unauthorized: bad credentials.
"""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""
HTTP 403 - Forbidden: your credentials don't give you access to this
resource.
"""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""
HTTP 404 - Not found
"""
http_status = 404
message = "Not found"
class MethodNotAllowed(ClientException):
"""
HTTP 405 - Method not Allowed
"""
http_status = 405
message = "Method Not Allowed"
class Conflict(ClientException):
"""
HTTP 409 - Conflict: A Conflict happened on the server
"""
http_status = 409
message = "Conflict"
class OverLimit(ClientException):
"""
HTTP 413 - Over limit: you're over the API limits for this time period.
"""
http_status = 413
message = "Over limit"
# NotImplemented is a python keyword.
class HTTPNotImplemented(ClientException):
"""
HTTP 501 - Not Implemented: the server does not support this operation.
"""
http_status = 501
message = "Not Implemented"
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
# so we can do this:
# _code_map = dict((c.http_status, c)
# for c in ClientException.__subclasses__())
#
# Instead, we have to hardcode it:
_code_map = dict((c.http_status, c) for c in [BadRequest, Unauthorized,
Forbidden, NotFound, MethodNotAllowed, Conflict,
OverLimit, HTTPNotImplemented])
def from_response(response, body):
"""
Return an instance of an ClientException or subclass
based on an httplib2 response.
Usage::
resp, body = http.request(...)
if resp.status != 200:
raise exception_from_response(resp, body)
"""
cls = _code_map.get(response.status, ClientException)
return cls(body)
|
Python
| 0.000001
|
@@ -1254,37 +1254,8 @@
e):%0A
- pprint.pprint(error)%0A
|
193d911536799751c9ec29571cb8091bcd187087
|
fix uraseuranta py
|
pdi_integrations/arvo/python_scripts/get_arvo_uraseuranta.py
|
pdi_integrations/arvo/python_scripts/get_arvo_uraseuranta.py
|
#import json
import requests
#import os
from pandas.io.json import json_normalize
#import datetime
import os
try:
api_key = os.environ['AUTH_API_KEY']
except KeyError:
print("API-key missing")
result = []
good_result=[]
filtered_result=[]
urls = []
url = 'https://arvo.csc.fi/api/vipunen/uraseuranta'
reqheaders = {'Content-Type': 'application/json'}
reqheaders['Authorization'] = api_key
#response = requests.get(url, headers=reqheaders).json()
## Not checking the status just downloading
## GET STATUS
##
while url != 'null': ## The url is not null
response = requests.get(url, headers=reqheaders).json()
for uraseuranta in response['data']:
result.append(uraseuranta)
# taustatiedot.append(uraseuranta['taustatiedot'])
url = response['pagination']['next_url']
urls.append(url)
## split result into two sets (with&without taustatiedot)
## test first 301 result
## for item in result[0:300]:
for item in result:
if item.get('taustatiedot') == None:
filtered_result.append(item)
else:
good_result.append(item)
## normalize data from result sets
### if you want to check column names use row below
### data.dtypes.index
data = json_normalize(good_result)
filtered_data = json_normalize(filtered_result)
# print(data[12])
# data['vastaajaid'].head(10)
## data.dtypes
## Export data to csv's
filtered_data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta_vajaadata.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n', escapechar='$')
data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n' , escapechar='$')
#now = datetime.datetime.now()
#print
#print("Current date and time using str method of datetime object:")
#print(str(now))
## data.vastaajaid.nunique()
|
Python
| 0.000001
|
@@ -92,17 +92,30 @@
atetime%0A
+import base64
%0A
-
import o
@@ -199,16 +199,19 @@
API-key
+is
missing%22
@@ -212,17 +212,113 @@
ssing%22)%0A
-%0A
+try:%0A api_user = os.environ%5B'AUTH_API_USER'%5D%0Aexcept KeyError:%0A print(%22API-user is missing%22)
%0A%0Aresult
@@ -487,32 +487,226 @@
s%5B'A
-uthorization'%5D = api_key
+ccept'%5D = 'application/json'%0A%0A### encode API user and API key tothe request headers %0Atmp = %22%25s:%25s%22 %25 (api_user, api_key)%0Areqheaders%5B'Authorization'%5D = %22Basic %25s%22 %25 base64.b64encode(tmp.encode('utf-8')).decode('utf-8')%0A
%0A#re
@@ -839,14 +839,12 @@
!=
-'null'
+None
: ##
@@ -1666,16 +1666,52 @@
csv's%0A%0A
+print(%22Exporting data to csv file%22)%0A
%0Afiltere
|
a0f7ca32edd5c924366738e0e6d6b8ab4e483cc8
|
Undo last commit.
|
foliant/utils.py
|
foliant/utils.py
|
'''Various utilities used here and there in the Foliant code.'''
from contextlib import contextmanager
from pkgutil import iter_modules
from importlib import import_module
from shutil import rmtree
from pathlib import Path
from logging import Logger
from typing import Dict, Tuple, Type, Set
from halo import Halo
def get_available_tags() -> Set[str]:
'''Extract ``tags`` attribute values from installed
``foliant.preprocessors.*.Preprocessor`` classes.
:returns: Set of tags
'''
preprocessors_module = import_module('foliant.preprocessors')
result = set()
for importer, modname, _ in iter_modules(preprocessors_module.__path__):
if modname == 'base':
continue
result.update(importer.find_module(modname).load_module(modname).Preprocessor.tags)
return result
def get_available_config_parsers() -> Dict[str, Type]:
'''Get the names of the installed ``foliant.config`` submodules and the corresponding
``Parser`` classes.
Used for construction of the Foliant config parser, which is a class that inherits
from all ``foliant.config.*.Parser`` classes.
:returns: Dictionary with submodule names as keys as classes as values
'''
config_module = import_module('foliant.config')
result = {}
for importer, modname, _ in iter_modules(config_module.__path__):
if modname == 'base':
continue
result[modname] = importer.find_module(modname).load_module(modname).Parser
return result
def get_available_clis() -> Dict[str, Type]:
'''Get the names of the installed ``foliant.cli`` submodules and the corresponding
``Cli`` classes.
Used for construction of the Foliant CLI, which is a class that inherits
from all ``foliant.cli.*.Cli`` classes.
:returns: Dictionary with submodule names as keys as classes as values
'''
cli_module = import_module('foliant.cli')
result = {}
for importer, modname, _ in iter_modules(cli_module.__path__):
if modname == 'base':
continue
result[modname] = importer.find_module(modname).load_module(modname).Cli
return result
def get_available_backends() -> Dict[str, Tuple[str]]:
'''Get the names of the installed ``foliant.backends`` submodules and the corresponding
``Backend.targets`` tuples.
Used in the interactive backend selection prompt to list the available backends
and to check if the selected target can be made with the selected backend.
:returns: Dictionary of submodule names as keys and target tuples as values
'''
backends_module = import_module('foliant.backends')
result = {}
for importer, modname, _ in iter_modules(backends_module.__path__):
if modname == 'base':
continue
result[modname] = importer.find_module(modname).load_module(modname).Backend.targets
return result
@contextmanager
def spinner(text: str, logger: Logger, quiet=False):
'''Spinner decoration for long running processes.
:param text: The spinner's caption
:param logger: Logger to capture the error if it occurs
:param quiet: If ``True``, the spinner is hidden
'''
halo = Halo(text, enabled=not quiet)
halo.start()
try:
logger.info(text)
yield
if not quiet:
halo.succeed()
else:
halo.stop()
except Exception as exception:
logger.error(str(exception))
if not quiet:
halo.fail(str(exception))
else:
halo.stop()
@contextmanager
def tmp(tmp_path: Path, keep_tmp=False):
'''Clean up tmp directory before and after running a code block.
:param tmp_path: Path to the tmp directory
:param keep_tmp: If ``True``, skip the cleanup
'''
rmtree(tmp_path, ignore_errors=True)
yield
if not keep_tmp:
rmtree(tmp_path, ignore_errors=True)
|
Python
| 0
|
@@ -2008,60 +2008,8 @@
_):%0A
- if modname == 'base':%0A continue%0A%0A
|
8c631a450a804c1e0ba2c4d22329e4bc2967d5aa
|
Deal with the differences between different versions of libxml
|
tickets/tests.py
|
tickets/tests.py
|
##
# Copyright (C) 2014 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django import test
from django.core import mail, urlresolvers
from django.db.models import Max
import factory
import factory.fuzzy
from inboxen.tests import factories
from inboxen.utils import override_settings
from tickets import models
class QuestionFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Question
subject = factory.fuzzy.FuzzyText()
body = factory.fuzzy.FuzzyText()
class ResponseFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Response
body = factory.fuzzy.FuzzyText()
class MockModel(models.RenderBodyMixin):
def __init__(self, body):
self.body = body
class QuestionViewTestCase(test.TestCase):
def setUp(self):
super(QuestionViewTestCase, self).setUp()
self.user = factories.UserFactory()
self.other_user = factories.UserFactory(username="tester")
QuestionFactory.create_batch(11, author=self.user, status=models.Question.NEW)
QuestionFactory.create_batch(3, author=self.other_user, status=models.Question.RESOLVED)
login = self.client.login(username=self.user.username, password="123456")
if not login:
raise Exception("Could not log in")
def get_url(self):
return urlresolvers.reverse("tickets-index")
def test_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertIn("More Open Tickets", response.content)
self.assertNotIn("More Closed Tickets", response.content)
def test_switch_open_closed(self):
models.Question.objects.filter(status=models.Question.NEW).update(author=self.other_user)
models.Question.objects.filter(status=models.Question.RESOLVED).update(author=self.user)
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
self.assertNotIn("More Open Tickets", response.content)
self.assertIn("More Closed Tickets", response.content)
def test_post(self):
params = {"subject": "hello!", "body": "This is the body of my question"}
response = self.client.post(self.get_url(), params)
question = models.Question.objects.latest("date")
self.assertRedirects(response, urlresolvers.reverse("tickets-detail", kwargs={"pk": question.pk}))
class QuestionListTestCase(test.TestCase):
def setUp(self):
super(QuestionListTestCase, self).setUp()
self.user = factories.UserFactory()
QuestionFactory.create_batch(75, author=self.user, status=models.Question.NEW)
login = self.client.login(username=self.user.username, password="123456")
if not login:
raise Exception("Could not log in")
def get_url(self):
return urlresolvers.reverse("tickets-list", kwargs={"status": "!resolved"})
def test_get(self):
response = self.client.get(self.get_url())
self.assertEqual(response.status_code, 200)
response = self.client.get(self.get_url() + "2/")
self.assertEqual(response.status_code, 200)
response = self.client.get(self.get_url() + "3/")
self.assertEqual(response.status_code, 404)
@override_settings(ADMINS=(("admin", "root@localhost"),))
class QuestionModelTestCase(test.TestCase):
def setUp(self):
super(QuestionModelTestCase, self).setUp()
self.user = factories.UserFactory()
def test_admins_emailed(self):
question = models.Question()
question.author = self.user
question.subject = "Hey"
question.body = "Sort it out!"
question.save()
self.assertEqual(len(mail.outbox), 1)
question2 = models.Question.objects.get(id=question.id)
question2.save()
self.assertEqual(len(mail.outbox), 1)
def test_last_activity(self):
question = models.Question()
question.author = self.user
question.subject = "Hey"
question.body = "Sort it out!"
question.save()
question.refresh_from_db()
question_qs = models.Question.objects.annotate(last_response_date=Max("response__date"))
self.assertEqual(question_qs[0].last_activity, question.last_modified)
response = models.Response()
response.question = question
response.author = self.user
response.body = "Oook"
response.save()
response.refresh_from_db()
question_qs = models.Question.objects.annotate(last_response_date=Max("response__date"))
self.assertEqual(question_qs[0].last_activity, response.date)
class RenderBodyTestCase(test.TestCase):
def test_empty_body(self):
obj = MockModel("")
self.assertEqual(obj.render_body(), "")
def test_normal_html(self):
original = "Hi\n\nAnother < 12\n\n* this one\n* that one"""
expected = "<div><p>Hi</p>\n<p>Another < 12</p>\n<ul>\n<li>this one</li>\n<li>that one</li>\n</ul></div>"
obj = MockModel(original)
self.assertEqual(obj.render_body(), expected)
def test_bad_html(self):
original = "<p class='hide'>Hi</p>\n\n<sometag> </>"
expected = "<div><p>Hi</p>\n\n<p> ></p></div>"
obj = MockModel(original)
self.assertEqual(obj.render_body(), expected)
|
Python
| 0
|
@@ -5541,24 +5541,378 @@
ody(), %22%22)%0A%0A
+ def assertHtmlEqual(self, first, second, *args):%0A %22%22%22Normalise HTML and compare%0A%0A LXML seems to deal with whitespace differently on different systems, so%0A we strip it out before comparing%0A %22%22%22%0A first = %22%22.join(first.split())%0A second = %22%22.join(second.split())%0A%0A self.assertEqual(first, second, *args)%0A%0A
def test
@@ -5923,32 +5923,32 @@
mal_html(self):%0A
-
original
@@ -6161,32 +6161,36 @@
self.assert
+Html
Equal(obj.render
@@ -6383,32 +6383,32 @@
Model(original)%0A
-
self.ass
@@ -6402,32 +6402,36 @@
self.assert
+Html
Equal(obj.render
|
77859dbc019a19222ada36ebccc849ba77649a86
|
add to unicode functions to all forum models
|
forums/models.py
|
forums/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255, unique=True)
position = models.IntegerField(_("Position"), default=0)
class Meta:
ordering = ['position']
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums')
name = models.CharField(_("Name"), max_length=255)
position = models.IntegerField(_("Position"), default=0)
description = models.TextField(_("Description"), blank=True)
class Meta:
ordering = ['position']
class Topic(models.Model):
forum = models.ForeignKey(Forum, related_name='topics')
name = models.CharField(_("Name"), max_length=255)
last_post = models.ForeignKey('Post', verbose_name=_("Last post"), related_name='forum_last_post', blank=True, null=True)
class Meta:
ordering = ['-last_post__created']
class Post(models.Model):
topic = models.ForeignKey(Topic, related_name='posts')
user = models.ForeignKey(User, related_name='forum_posts')
created = models.DateTimeField(_("Created"), auto_now_add=True)
updated = models.DateTimeField(_("Updated"),auto_now=True)
body = models.TextField(_("Body"))
class Meta:
ordering = ['created']
|
Python
| 0
|
@@ -330,24 +330,77 @@
position'%5D%0A%0A
+ def __unicode__(self):%0A return self.name%0A%0A
%0Aclass Forum
@@ -708,24 +708,77 @@
position'%5D%0A%0A
+ def __unicode__(self):%0A return self.name%0A%0A
%0Aclass Topic
@@ -1095,16 +1095,69 @@
ated'%5D%0A%0A
+ def __unicode__(self):%0A return self.name%0A%0A
%0Aclass P
@@ -1476,32 +1476,32 @@
class Meta:%0A
-
ordering
@@ -1507,16 +1507,69 @@
g = %5B'created'%5D%0A
+%0A def __unicode__(self):%0A return self.body%0A
|
f22ad01d72b8ab2a12bf68a23b79c9a1b2e6f237
|
Standardizing all to uppercase for compare
|
tms/tms_utils.py
|
tms/tms_utils.py
|
# Local modules
from common import telegram_utils
from tms import tms_data
class Verse():
def __init__(self, ref, title, pack, pos):
self.reference = ref
self.title = title
self.pack = pack
self.position = pos
def get_reference(self):
return self.reference
def get_title(self):
return self.title
def get_pack(self):
return self.pack
def get_position(self):
return self.position
def get_pack(pack):
select_pack = tms_data.get_tms().get(pack)
if select_pack is not None:
return select_pack
return None
def find_pack_pos(query):
query = query.strip().split()
query = ''.join(query)
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
try_packpos = pack_key + str(i + 1)
if try_packpos == query:
return pack[i]
return None
def get_all_pack_keys():
return tms_data.get_tms().keys()
def get_verse_by_pack(pack, pos):
select_pack = get_pack(pack)
if select_pack is not None:
select_verse = select_pack[pos - 1]
if select_verse is not None:
return Verse(select_verse[1], select_verse[0], pack, pos)
def get_verse_by_title(title, pos):
verses = get_verses_by_title(title)
if len(verses) > pos:
return verses[pos - 1]
return None
def get_verse_by_reference(ref):
ref = ref.strip().split()
ref = ''.join(ref)
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
select_verse = pack[i]
try_ref = select_verse[1]
try_ref = ''.join(try_ref)
if try_ref == ref:
return Verse(select_verse[1], select_verse[0], pack_key, i + 1)
return None
def get_verses_by_title(title):
verses = []
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
select_verse = pack[i]
if title == select_verse[0]:
verses.append(Verse(select_verse[1], select_verse[0], pack_key, i + 1))
return verses
def get_start_verse():
start_key = 'BWC'
select_pack = get_pack(start_key)
select_verse = select_pack[0]
return Verse(select_verse[1], select_verse[0], start_key, 1)
def format_verse(verse, text):
verse_prep = []
verse_prep.append(verse.get_pack() + ' ' + str(verse.get_position()))
verse_prep.append(text)
verse_prep.append(telegram_utils.bold(verse.reference))
return telegram_utils.join(verse_prep, '\n\n')
|
Python
| 0.99991
|
@@ -620,12 +620,13 @@
def
-find
+query
_pac
@@ -657,16 +657,24 @@
= query.
+upper().
strip().
@@ -983,934 +983,968 @@
def
-get_all_pack_keys():%0A return tms_data.get_tms().keys()%0A%0Adef get_verse_by_pack(pack, pos):%0A select_pack = get_pack(pack)%0A%0A if select_pack is not None:%0A select_verse = select_pack%5Bpos - 1%5D%0A%0A if select_verse is not None:%0A return Verse(select_verse%5B1%5D, select_verse%5B0%5D, pack, pos)%0A%0Adef get_verse_by_title(title, pos):%0A verses = get_verses_by_title(title)%0A%0A if len(verses) %3E pos:%0A
+query_verse_by_reference(ref):%0A ref = ref.upper().strip().split()%0A ref = ''.join(ref)%0A%0A for pack_key in get_all_pack_keys():%0A pack = get_pack(pack_key)%0A size = len(pack)%0A for i in range(0, size):%0A select_verse = pack%5Bi%5D%0A try_ref = select_verse%5B1%5D.upper().strip().split()%0A try_ref = ''.join(try_ref)%0A if try_ref == ref:%0A return Verse(select_verse%5B1%5D, select_verse%5B0%5D, pack_key, i + 1)%0A
-
return
-verses%5Bpos - 1%5D%0A%0A return None%0A%0Adef get_verse_by_reference(ref):%0A ref = ref.strip().split()%0A ref = ''.join(ref)%0A%0A for pack_key in get_all_pack_keys():%0A pack = get_pack(pack_key)%0A size = len(pack)%0A for i in range(0, size):%0A select_verse = pack%5Bi%5D%0A try_ref = select_verse%5B1%5D%0A try_ref = ''.join(try_ref)%0A if try_ref == ref:%0A return Verse(select_verse%5B1%5D, select_verse%5B0%5D, pack_key, i + 1)%0A return None%0A
+None%0A %0A%0Adef get_all_pack_keys():%0A return tms_data.get_tms().keys()%0A%0Adef get_verse_by_pack(pack, pos):%0A select_pack = get_pack(pack)%0A%0A if select_pack is not None:%0A select_verse = select_pack%5Bpos - 1%5D%0A%0A if select_verse is not None:%0A return Verse(select_verse%5B1%5D, select_verse%5B0%5D, pack, pos)%0A%0Adef get_verse_by_title(title, pos):%0A verses = get_verses_by_title(title)%0A%0A if len(verses) %3E pos:%0A return verses%5Bpos - 1%5D%0A%0A return None
%0Adef
|
694df5ba69e4e7123009605e59c2b5417a3b52c5
|
Remove print statement about number of bins
|
tools/fitsevt.py
|
tools/fitsevt.py
|
#! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
print(nBins)
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
|
Python
| 0.003864
|
@@ -475,23 +475,8 @@
ins%0A
-%09%09print(nBins)%0A
%09%09%0A%09
|
cbe5c11a151cc93bed9702289b40c61203e6cca4
|
Fix original file path in mkdiffs tool
|
tools/mkdiffs.py
|
tools/mkdiffs.py
|
#!/usr/bin/env python
#
# Copyright 2016, IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import git
import os
import sys
import yaml
CONF_FILE = 'mkdiffs.yml'
EXEC_DIR, SCRIPT_NAME = os.path.split(sys.argv[0])
def _load_config():
conf_file = os.path.join(EXEC_DIR, CONF_FILE)
with open(conf_file, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as ex:
print(ex)
raise
def rm_dir(directory):
if directory != '/':
os.system('rm -rf ' + directory)
else:
print('Tried to remove \'/\' directory')
exit(1)
def clone_all(conf):
print ('Cloning the git projects.')
git_dir = conf['gitsrc_loc']
for project in conf['changes']:
print (' ' + project['git'])
git_clone(project['git'], project['branch'],
git_dir + '/' + project['src_location'])
def confirm_clones(conf):
git_dir = conf['gitsrc_loc']
for project in conf['changes']:
if not os.path.isdir(git_dir + os.sep + project['src_location']):
print ('Clone not found for project: ' + project['git'])
exit(1)
def git_clone(url, branch, tgt_dir):
# Remove the target directory before cloning
rm_dir(tgt_dir)
git.Repo.clone_from(url, tgt_dir, branch=branch)
class CreateDiffs(object):
def __init__(self, conf):
super(CreateDiffs, self).__init__()
self.conf = conf
self.git_dir = conf['gitsrc_loc']
self.diffs_dir = os.path.normpath(
os.path.join(EXEC_DIR, self.conf['temp_diff_loc']))
self.changes_loc = os.path.join(EXEC_DIR, self.conf['changes_loc'])
def create_dir(self):
rm_dir(self.diffs_dir)
os.mkdir(self.diffs_dir, 0755)
def find_project(self, chgs_loc, chg_path):
# Remove the changes location and pull out the first directory
project_name = chg_path.split(chgs_loc)[1].split(os.sep)[1]
# Now find the project in the conf
for project in self.conf['changes']:
if project['src_location'] == project_name:
return project
return None
def create_file_diffs(self):
norm_chg_loc = os.path.normpath(self.changes_loc)
for directory, sub_dir, file_names in (os.walk(self.changes_loc)):
norm_dir = os.path.normpath(directory)
for file_name in file_names:
changed_file = os.path.join(norm_dir, file_name)
print ('Processing: ' + changed_file)
# Calculate the file relative the changes directory
relative_change = changed_file.split(norm_chg_loc)[1]
relative_change = relative_change.lstrip(os.sep)
# Get the project from the config
project = self.find_project(norm_chg_loc, norm_dir)
# Project relative path:
src_loc = project['src_location']
prj_rel_path = relative_change[len(src_loc) + 1:
len(relative_change)]
# Full diff path
diff_path = (project['target_location'] + os.sep +
os.path.dirname(prj_rel_path))
# Original File
orig_file_path = (self.git_dir + project['src_location'] +
os.sep + prj_rel_path)
# Ensure the original file exists.
if not os.path.isfile(orig_file_path):
print (' Original file not found: ' + orig_file_path)
orig_file_path = "None"
# Diff output file
diff_file_name = (project['target_location'] + os.sep +
prj_rel_path)
diff_file_name = diff_file_name.replace(os.sep, '-')
diff_output_file = (self.diffs_dir + os.sep +
diff_file_name.lstrip('-') + '.patch')
call_str = ' '.join([os.path.join(EXEC_DIR, 'mkpatch.sh'),
diff_path, orig_file_path,
changed_file, diff_output_file])
os.system(call_str)
def process_files(skip_git_cloning):
conf = _load_config()
if not skip_git_cloning:
clone_all(conf)
else:
confirm_clones(conf)
crt_diffs = CreateDiffs(conf)
crt_diffs.create_dir()
crt_diffs.create_file_diffs()
print ('\nGenerated patch files are available in directory: %s' %
os.path.normpath(os.path.join(EXEC_DIR, conf['temp_diff_loc'])))
print ('The project source is in directory: %s' %
os.path.normpath(os.path.join(EXEC_DIR, conf['gitsrc_loc'])))
def parse_command():
"""Parse the command arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=("A command to make the patch files. Generated\n"
"patch files will be available in the directory\n"
"<git top-level directory>/.diffs/."))
parser.add_argument('-s', '--skip-git-cloning', action='store_true',
help='Skip the git cloning.')
parser.set_defaults(func=process_files)
return parser
def main():
parser = parse_command()
args = parser.parse_args()
process_files(args.skip_git_cloning)
print('Done.')
if __name__ == "__main__":
main()
|
Python
| 0.00003
|
@@ -3853,16 +3853,25 @@
it_dir +
+ os.sep +
project
|
22a4644bd510a8b786d181c01c20f3dc522dac8d
|
Update corehq/apps/auditcare/migrations/0004_add_couch_id.py
|
corehq/apps/auditcare/migrations/0004_add_couch_id.py
|
corehq/apps/auditcare/migrations/0004_add_couch_id.py
|
# Generated by Django 2.2.20 on 2021-05-21 17:32
from django.db import migrations, models
ACCESS_INDEX = "audit_access_couch_10d1b_idx"
ACCESS_TABLE = "auditcare_accessaudit"
NAVIGATION_EVENT_INDEX = "audit_nav_couch_875bc_idx"
NAVIGATION_EVENT_TABLE = "auditcare_navigationeventaudit"
def _create_index_sql(table_name, index_name):
return """
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS {} ON {} (couch_id)
WHERE couch_id IS NOT NULL
""".format(index_name, table_name)
def _drop_index_sql(index_name):
return "DROP INDEX CONCURRENTLY IF EXISTS {}".format(index_name)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('auditcare', '0003_truncatechars'),
]
operations = [
migrations.AddField(
model_name='accessaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(ACCESS_TABLE, ACCESS_INDEX),
reverse_sql=_drop_index_sql(ACCESS_INDEX),
state_operations=[
migrations.AddIndex(
model_name='accessaudit',
index=models.Index(fields=['couch_id'], name=ACCESS_INDEX),
),
]
),
migrations.AddField(
model_name='navigationeventaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(NAVIGATION_EVENT_TABLE, NAVIGATION_EVENT_INDEX),
reverse_sql=_drop_index_sql(NAVIGATION_EVENT_INDEX),
state_operations=[
migrations.AddIndex(
model_name='navigationeventaudit',
index=models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name=NAVIGATION_EVENT_INDEX),
),
]
),
]
|
Python
| 0
|
@@ -1214,21 +1214,32 @@
=models.
-Index
+UniqueConstraint
(fields=
@@ -1251,16 +1251,60 @@
ch_id'%5D,
+ condition=models.Q(couch_id__isnull=False),
name=AC
|
11405d9ee340cdae33eaaa98469f9e9a43de26dc
|
Enable Etherbone by default also defaults to Crossover UART when kwargs is empty.
|
litex_boards/targets/sds1104xe.py
|
litex_boards/targets/sds1104xe.py
|
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
# Build/Use ----------------------------------------------------------------------------------------
# Build/Load bitstream:
# ./sds1104xe.py --with-etherbone --uart-name=crossover --csr-csv=csr.csv --build --load
#
# Test Ethernet:
# ping 192.168.1.50
#
# Test Console:
# litex_server --udp
# litex_term crossover
# --------------------------------------------------------------------------------------------------
import os
import argparse
from migen import *
from litex_boards.platforms import sds1104xe
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litedram.modules import MT41K64M16
from litedram.phy import s7ddrphy
from liteeth.phy.mii import LiteEthPHYMII
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_idelay = ClockDomain()
# # #
self.submodules.pll = pll = S7PLL(speedgrade=-1)
pll.register_clkin(ClockSignal("eth_tx"), 25e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
pll.create_clkout(self.cd_sys4x_dqs, 4*sys_clk_freq, phase=90)
pll.create_clkout(self.cd_idelay, 200e6)
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_idelay)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(100e6), with_etherbone=False, eth_ip="192.168.1.50", **kwargs):
platform = sds1104xe.Platform()
# SoCCore ----------------------------------------------------------------------------------
if kwargs["uart_name"] == "serial":
kwargs["uart_name"] = "crossover" # Defaults to Crossover UART.
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Siglent SDS1104X-E",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.A7DDRPHY(platform.request("ddram"),
memtype = "DDR3",
nphases = 4,
sys_clk_freq = sys_clk_freq)
self.add_csr("ddrphy")
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT41K64M16(sys_clk_freq, "1:4"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x40000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
# Etherbone --------------------------------------------------------------------------------
if with_etherbone:
self.submodules.ethphy = LiteEthPHYMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
self.add_csr("ethphy")
self.add_etherbone(phy=self.ethphy, ip_address=eth_ip)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on SDS1104X-E")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--sys-clk-freq", default=100e6, help="System clock frequency (default: 100MHz)")
parser.add_argument("--with-etherbone", action="store_true", help="Enable Etherbone support")
parser.add_argument("--eth-ip", default="192.168.1.50", type=str, help="Ethernet/Etherbone IP address")
builder_args(parser)
soc_sdram_args(parser)
vivado_build_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
with_etherbone = args.with_etherbone,
eth_ip = args.eth_ip,
**soc_sdram_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(**vivado_build_argdict(args), run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"), device=1)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -2258,12 +2258,11 @@
one=
-Fals
+Tru
e, e
@@ -2449,25 +2449,29 @@
if kwargs
-%5B
+.get(
%22uart_name%22%5D
@@ -2469,17 +2469,27 @@
rt_name%22
-%5D
+, %22serial%22)
== %22ser
|
0c816aaa82ee9fee1ee244c6b96c1a2718ec836e
|
use default python command from the environment
|
testrunner.py
|
testrunner.py
|
#!/usr/bin/python
import os
import sys
import unittest
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps."""
SDK_PATH_manual = '/usr/local/google_appengine'
TEST_PATH_manual = '../unittests'
def main(sdk_path, test_path):
os.chdir('backend')
sys.path.extend([sdk_path, '.', '../lib', '../testlib'])
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(test_path)
if not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful():
sys.exit(-1)
if __name__ == '__main__':
SDK_PATH = SDK_PATH_manual
TEST_PATH = TEST_PATH_manual
if len(sys.argv)==2:
SDK_PATH = sys.argv[1]
main(SDK_PATH, TEST_PATH)
|
Python
| 0.000001
|
@@ -4,16 +4,20 @@
usr/bin/
+env
python%0Ai
|
ec2aaf86f2002b060f6e5b4d040961a37f89d06a
|
Update rearrange-string-k-distance-apart.py
|
Python/rearrange-string-k-distance-apart.py
|
Python/rearrange-string-k-distance-apart.py
|
# Time: O(nlogc), c is the count of unique characters.
# Space: O(c)
from collections import defaultdict
from heapq import heappush, heappop
class Solution(object):
def rearrangeString(self, str, k):
"""
:type str: str
:type k: int
:rtype: str
"""
if k == 0:
return str
cnts = defaultdict(int)
for c in str:
cnts[c] += 1
heap = []
for c, cnt in cnts.iteritems():
heappush(heap, [-cnt, c])
result = []
while heap:
used_cnt_chars = []
for _ in xrange(min(k, len(str) - len(result))):
if not heap:
return ""
cnt_char = heappop(heap)
result.append(cnt_char[1])
cnt_char[0] += 1
if cnt_char[0] < 0:
used_cnt_chars.append(cnt_char)
for cnt_char in used_cnt_chars:
heappush(heap, cnt_char)
return "".join(result)
|
Python
| 0.000046
|
@@ -1,8 +1,854 @@
+# Time: O(n)%0A# Space: O(n)%0A%0Aclass Solution(object):%0A def rearrangeString(self, str, k):%0A %22%22%22%0A :type str: str%0A :type k: int%0A :rtype: str%0A %22%22%22%0A cnts = %5B0%5D * 26;%0A for c in str:%0A cnts%5Bord(c) - ord('a')%5D += 1%0A%0A sorted_cnts = %5B%5D%0A for i in xrange(26):%0A sorted_cnts.append((cnts%5Bi%5D, chr(i + ord('a'))))%0A sorted_cnts.sort(reverse=True)%0A%0A max_cnt = sorted_cnts%5B0%5D%5B0%5D%0A blocks = %5B%5B%5D for _ in xrange(max_cnt)%5D%0A i = 0%0A for cnt in sorted_cnts:%0A for _ in xrange(cnt%5B0%5D):%0A blocks%5Bi%5D.append(cnt%5B1%5D)%0A i = (i + 1) %25 max(cnt%5B0%5D, max_cnt - 1)%0A%0A for i in xrange(max_cnt-1):%0A if len(blocks%5Bi%5D) %3C k:%0A return %22%22%0A%0A return %22%22.join(map(lambda x : %22%22.join(x), blocks))%0A%0A%0A
# Time:
@@ -909,17 +909,16 @@
e: O(c)%0A
-%0A
from col
@@ -981,17 +981,16 @@
heappop%0A
-%0A
class So
@@ -987,32 +987,33 @@
p%0Aclass Solution
+2
(object):%0A de
|
392cf8f05b6c23600e7a61a51494771ab08f2274
|
add exceptions to should_curry
|
toolz/curried.py
|
toolz/curried.py
|
"""
Alternate namespece for toolz such that all functions are curried
Currying provides implicit partial evaluation of all functions
Example:
Get usually requires two arguments, an index and a collection
>>> from toolz.curried import get
>>> get(0, ('a', 'b'))
'a'
When we use it in higher order functions we often want to pass a partially
evaluated form
>>> data = [(1, 2), (11, 22), (111, 222)])
>>> map(lambda seq: get(0, seq), data)
[1, 11, 111]
The curried version allows simple expression of partial evaluation
>>> map(get(0), data)
[1, 11, 111]
See Also:
toolz.functoolz.curry
"""
import toolz
from .functoolz import curry
import inspect
def nargs(f):
try:
return len(inspect.getargspec(f).args)
except TypeError:
return None
def should_curry(f):
return callable(f) and nargs(f) and nargs(f) > 1
d = dict((name, curry(f) if '__' not in name and should_curry(f) else f)
for name, f in toolz.__dict__.items())
locals().update(d)
|
Python
| 0.000011
|
@@ -812,16 +812,60 @@
n None%0A%0A
+exceptions = set((toolz.map, toolz.filter))%0A
%0Adef sho
@@ -889,16 +889,17 @@
return
+(
callable
@@ -932,16 +932,48 @@
(f) %3E 1%0A
+ or f in exceptions)%0A
%0A%0Ad = di
|
6c3d1a36f542e7162d962beccd40245fca026521
|
Fix bug with one-liner hashlib
|
tptapi/client.py
|
tptapi/client.py
|
import requests
import six
import hashlib
from . import errors
def md5(data):
return hashlib.md5().update(bytes(data)).hexdigest()
class Client(object):
def __init__(self):
self.base_url = "http://powdertoy.co.uk"
def _get(self, url, params=None):
headers = {
"X-Auth-User-Id": 0,
"X-Auth-Session-Key": 0
}
if hasattr(self, 'loginData'):
headers["X-Auth-User-Id"] = self.loginData["UserID"]
headers["X-Auth-Session-Key"] = self.loginData["SessionKey"]
return requests.get(url, params=params, headers=headers)
def _post(self, url, params=None, data=None):
headers = {
"X-Auth-User-Id": 0,
"X-Auth-Session-Key": 0
}
if hasattr(self, 'loginData'):
headers["X-Auth-User-Id"] = self.loginData["UserID"]
headers["X-Auth-Session-Key"] = self.loginData["SessionKey"]
return requests.post(url, params=params, data=data, headers=headers)
def login(self, user, password):
form = {
"Username": user,
"Hash": md5("{0}-{1}".format(user, md5(password)))
}
r = self._post(self.opts["url"] + "Login.json", data=form)
if r.status_code == requests.codes.ok:
self.loginData = j = r.json()
if len(j["Notifications"]):
six.print_("User has a new notifications: "+", ".join(j["Notifications"]))
del self.loginData["Status"]
del self.loginData["Notifications"]
else:
raise errors.InvalidLogin()
return r.status_code == requests.codes.ok
def checkLogin(self):
r = self._get(self.base_url + "/Login.json").json()
return r["Status"] == 1
def vote(self, ID, action):
# action can be -1 or +1
form = {
"ID": int(ID),
"Action": "Up" if action > 0 else "Down"
}
r = self._post(self.base_url + "/Vote.api", data=form)
return r.text() == "OK"
def comment(self, ID, content):
form = {
"Comment": content
}
qs = {"ID": ID}
r = self._post(self.base_url + "/Browse/Comments.json", data=form, params=qs)
return r.status_code == requests.codes.ok
def addTag(self, ID, tag):
qs = {
"ID": ID,
"Tag": tag,
"Op": "add",
"Key": self.loginData.SessionKey
}
r = self._get(self.base_url + "/Browse/EditTag.json", params=qs)
return r.status_code == requests.codes.ok
def delTag(self, ID, tag):
qs = {
"ID": ID,
"Tag": tag,
"Op": "delete",
"Key": self.loginData.SessionKey
}
r = self._get(self.base_url + "/Browse/EditTag.json", params=qs)
return r.status
def delSave(self, ID):
qs = {
"ID": ID,
"Mode": "Delete",
"Key": self.loginData.SessionKey
}
r = self._get(self.base_url + "/Browse/Delete.json", params=qs)
return r.status_code == requests.codes.ok
def unpublishSave(self, ID):
qs = {
"ID": ID,
"Mode": "Unpublish",
"Key": self.loginData.SessionKey
}
r = self._get(self.base_url + "/Browse/Delete.json", params=qs)
return r.status_code == requests.codes.ok
def publishSave(self, ID, content):
form = {
"ActionPublish": 1
}
qs = {
"ID": ID,
"Key": self.loginData.SessionKey
}
r = self._post(self.base_url + "/Browse/View.json", data=form, params=qs)
return r.text() == "1"
def setProfile(self, p):
# action can be -1 or +1
r = self._post(self.base_url + "/Profile.json", data=p)
return r.text() == "OK"
def browse(self, query, count,start):
qs = {
"Start": start,
"Count": count,
"Search_Query": query
}
r = self._get(self.base_url + "/Browse.json", params=qs)
return r.json()
def listTags(self, c, s):
qs = {
"Start": s,
"Count": c
}
r = self._get(self.base_url + "/Browse/Tags.json", params=qs)
return r.json()["Tags"]
def fav(self, ID):
qs = {
"ID": ID,
"Key": self.loginData.SessionKey
}
r = self._get(self.base_url + "/Browse/Favourite.json", params=qs)
return r.status_code == requests.codes.ok
def remfav(self, ID):
qs = {
"ID": ID,
"Key": self.loginData.SessionKey,
"Mode": "Remove"
}
r = self._get(self.base_url + "/Browse/Tags.json", params=qs)
return r.status_code == requests.codes.ok
def save(self, name, desc, data):
# action can be -1 or +1
form = {
"Name": name,
"Description": desc,
"Data": data
}
r = self._post(self.base_url + "/Save.api", data=form)
if r.text().split(" ")[0] == "OK":
return r.text().split(" ")[1]
def updateSave(self, ID, data, desc):
# action can be -1 or +1
form = {
"ID": int(ID),
"Description": desc,
"Data": data
}
r = self._post(self.base_url + "/Vote.api", data=form)
return r.text() == "OK"
def saveData(self, ID):
qs = {"ID": ID}
r = self._get(self.base_url + "/Browse/View.json", params=qs)
return r.json()
def startup(self):
return self._get(self.base_url + "/Startup.json").json()
def comments(self, ID, count, start):
qs = {
"Start": start,
"Count": count,
"ID": ID
}
r = self._get(self.base_url + "/Browse/Comments.json", params=qs)
return r.json()
|
Python
| 0.000001
|
@@ -99,17 +99,8 @@
md5(
-).update(
byte
|
f5810cb764e81ba2acf3253891fb66197c5f3105
|
Bump version number
|
VMEncryption/main/Common.py
|
VMEncryption/main/Common.py
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommonVariables:
utils_path_name = 'Utils'
extension_name = 'AzureDiskEncryptionForLinuxTest'
extension_version = '0.1.0.999233'
extension_type = extension_name
extension_media_link = 'https://amextpaas.blob.core.windows.net/prod/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure VMEncryption Extension for Linux IaaS'
extension_description = extension_label
"""
disk/file system related
"""
sector_size = 512
luks_header_size = 4096 * 512
default_block_size = 52428800
min_filesystem_size_support = 52428800 * 3
#TODO for the sles 11, we should use the ext3
default_file_system = 'ext4'
default_mount_name = 'encrypted_disk'
dev_mapper_root = '/dev/mapper/'
disk_by_id_root = '/dev/disk/by-id'
"""
parameter key names
"""
PassphraseFileNameKey = 'BekFileName'
KeyEncryptionKeyURLKey = 'KeyEncryptionKeyURL'
KeyVaultURLKey = 'KeyVaultURL'
AADClientIDKey = 'AADClientID'
AADClientCertThumbprintKey = 'AADClientCertThumbprint'
KeyEncryptionAlgorithmKey = 'KeyEncryptionAlgorithm'
DiskFormatQuerykey = "DiskFormatQuery"
PassphraseKey = 'Passphrase'
"""
value for VolumeType could be OS or Data
"""
VolumeTypeKey = 'VolumeType'
AADClientSecretKey = 'AADClientSecret'
SecretUriKey = 'SecretUri'
SecretSeqNum = 'SecretSeqNum'
VolumeTypeOS = 'OS'
VolumeTypeData = 'Data'
VolumeTypeAll = 'All'
SupportedVolumeTypes = [ VolumeTypeOS, VolumeTypeData, VolumeTypeAll ]
"""
command types
"""
EnableEncryption = 'EnableEncryption'
EnableEncryptionFormat = 'EnableEncryptionFormat'
UpdateEncryptionSettings = 'UpdateEncryptionSettings'
DisableEncryption = 'DisableEncryption'
QueryEncryptionStatus = 'QueryEncryptionStatus'
"""
encryption config keys
"""
EncryptionEncryptionOperationKey = 'EncryptionOperation'
EncryptionDecryptionOperationKey = 'DecryptionOperation'
EncryptionVolumeTypeKey = 'VolumeType'
EncryptionDiskFormatQueryKey = 'DiskFormatQuery'
"""
crypt ongoing item config keys
"""
OngoingItemMapperNameKey = 'MapperName'
OngoingItemHeaderFilePathKey = 'HeaderFilePath'
OngoingItemOriginalDevNamePathKey = 'DevNamePath'
OngoingItemOriginalDevPathKey = 'DevicePath'
OngoingItemPhaseKey = 'Phase'
OngoingItemHeaderSliceFilePathKey = 'HeaderSliceFilePath'
OngoingItemFileSystemKey = 'FileSystem'
OngoingItemMountPointKey = 'MountPoint'
OngoingItemDeviceSizeKey = 'Size'
OngoingItemCurrentSliceIndexKey = 'CurrentSliceIndex'
OngoingItemFromEndKey = 'FromEnd'
OngoingItemCurrentDestinationKey = 'CurrentDestination'
OngoingItemCurrentTotalCopySizeKey = 'CurrentTotalCopySize'
OngoingItemCurrentLuksHeaderFilePathKey = 'CurrentLuksHeaderFilePath'
OngoingItemCurrentSourcePathKey = 'CurrentSourcePath'
OngoingItemCurrentBlockSizeKey = 'CurrentBlockSize'
"""
encryption phase devinitions
"""
EncryptionPhaseBackupHeader = 'BackupHeader'
EncryptionPhaseCopyData = 'EncryptingData'
EncryptionPhaseRecoverHeader = 'RecoverHeader'
EncryptionPhaseEncryptDevice = 'EncryptDevice'
EncryptionPhaseDone = 'Done'
"""
decryption phase constants
"""
DecryptionPhaseCopyData = 'DecryptingData'
DecryptionPhaseDone = 'Done'
"""
logs related
"""
InfoLevel = 'Info'
WarningLevel = 'Warning'
ErrorLevel = 'Error'
"""
error codes
"""
extension_success_status = 'success'
extension_error_status = 'error'
process_success = 0
success = 0
os_not_supported = 1
luks_format_error = 2
scsi_number_not_found = 3
device_not_blank = 4
environment_error = 5
luks_open_error = 6
mkfs_error = 7
folder_conflict_error = 8
mount_error = 9
mount_point_not_exists = 10
passphrase_too_long_or_none = 11
parameter_error = 12
create_encryption_secret_failed = 13
encrypttion_already_enabled = 14
passphrase_file_not_found = 15
command_not_support = 16
volue_type_not_support = 17
copy_data_error = 18
encryption_failed = 19
tmpfs_error = 20
backup_slice_file_error = 21
unmount_oldroot_error = 22
operation_lookback_failed = 23
unknown_error = 100
class TestHooks:
search_not_only_ide = False
use_hard_code_passphrase = False
hard_code_passphrase = "Quattro!"
class DeviceItem(object):
def __init__(self):
#NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL
self.name = None
self.type = None
self.file_system = None
self.mount_point = None
self.label = None
self.uuid = None
self.model = None
self.size = None
def __str__(self):
return ("name:" + str(self.name) + " type:" + str(self.type) +
" fstype:" + str(self.file_system) + " mountpoint:" + str(self.mount_point) +
" label:" + str(self.label) + " model:" + str(self.model) +
" size:" + str(self.size))
class CryptItem(object):
def __init__(self):
self.mapper_name = None
self.dev_path = None
self.mount_point = None
self.file_system = None
self.luks_header_path = None
self.uses_cleartext_key = None
self.current_luks_slot = None
def __str__(self):
return ("name: " + str(self.mapper_name) + " dev_path:" + str(self.dev_path) +
" mount_point:" + str(self.mount_point) + " file_system:" + str(self.file_system) +
" luks_header_path:" + str(self.luks_header_path) +
" uses_cleartext_key:" + str(self.uses_cleartext_key) +
" current_luks_slot:" + str(self.current_luks_slot))
|
Python
| 0.000002
|
@@ -731,20 +731,16 @@
ForLinux
-Test
'%0A ex
@@ -769,17 +769,17 @@
.0.99923
-3
+4
'%0A ex
|
bd3559a51eebaeb409e203c738aaa81e0071deec
|
update code in compute.py resource to match other resource pattern
|
xos/tosca/resources/compute.py
|
xos/tosca/resources/compute.py
|
import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
from core.models import Slice,Sliver,User,Flavor,Node,Image
from nodeselect import XOSNodeSelector
from imageselect import XOSImageSelector
from flavorselect import XOSFlavorSelector
from xosresource import XOSResource
class XOSCompute(XOSResource):
provides = "tosca.nodes.Compute"
xos_model = Sliver
def select_compute_node(self, user, v, hostname=None):
mem_size = v.get_property_value("mem_size")
num_cpus = v.get_property_value("num_cpus")
disk_size = v.get_property_value("disk_size")
flavor = XOSFlavorSelector(user, mem_size=mem_size, num_cpus=num_cpus, disk_size=disk_size).get_flavor()
compute_node = XOSNodeSelector(user, mem_size=mem_size, num_cpus=num_cpus, disk_size=disk_size, hostname=hostname).get_nodes(1)[0]
return (compute_node, flavor)
def select_image(self, user, v):
distribution = v.get_property_value("distribution")
version = v.get_property_value("version")
type = v.get_property_value("type")
architecture = v.get_property_value("architecture")
return XOSImageSelector(user, distribution=distribution, version=version, type=type, architecture=architecture).get_image()
def get_xos_args(self, name=None, index=None):
nodetemplate = self.nodetemplate
if not name:
name = nodetemplate.name
host=None
flavor=None
image=None
sliceName = self.get_requirement("tosca.relationships.MemberOfSlice", throw_exception=True)
slice = self.get_xos_object(Slice, name=sliceName)
# locate it one the same host as some other sliver
colocate_host = None
colocate_sliver_name = self.get_requirement("tosca.relationships.SameHost")
if index is not None:
colocate_sliver_name = "%s-%d" % (colocate_sliver_name, index)
colocate_slivers = Sliver.objects.filter(name=colocate_sliver_name)
if colocate_slivers:
colocate_host = colocate_slivers[0].node.name
self.info("colocating on %s" % colocate_host)
capabilities = nodetemplate.get_capabilities()
for (k,v) in capabilities.items():
if (k=="host"):
(compute_node, flavor) = self.select_compute_node(self.user, v, hostname=colocate_host)
elif (k=="os"):
image = self.select_image(self.user, v)
if not compute_node:
raise Exception("Failed to pick a host")
if not image:
raise Exception("Failed to pick an image")
if not flavor:
raise Exception("Failed to pick a flavor")
return {"name": name,
"image": image,
"slice": slice,
"flavor": flavor,
"node": compute_node,
"deployment": compute_node.site_deployment.deployment}
def create(self, name = None, index = None):
xos_args = self.get_xos_args(name=name, index=index)
sliver = Sliver(**xos_args)
sliver.caller = self.user
sliver.no_sync = True
sliver.save()
self.deferred_sync.append(sliver)
self.info("Created Sliver '%s' on node '%s' using flavor '%s' and image '%s'" %
(str(sliver), str(sliver.node), str(sliver.flavor), str(sliver.image)))
def create_or_update(self):
scalable = self.get_scalable()
if scalable:
default_instances = scalable.get("default_instances",1)
for i in range(0, default_instances):
name = "%s-%d" % (self.nodetemplate.name, i)
existing_slivers = Sliver.objects.filter(name=name)
if existing_slivers:
self.info("%s %s already exists" % (self.xos_model.__name__, name))
self.update(existing_slivers[0])
else:
self.create(name, index=i)
else:
super(XOSCompute,self).create_or_update()
def get_existing_objs(self):
scalable = self.get_scalable()
if scalable:
existing_slivers = []
max_instances = scalable.get("max_instances",1)
for i in range(0, max_instances):
name = "%s-%d" % (self.nodetemplate.name, i)
existing_slivers = existing_slivers + list(Sliver.objects.filter(name=name))
return existing_slivers
else:
return super(XOSCompute,self).get_existing_objs()
|
Python
| 0
|
@@ -1494,16 +1494,47 @@
e.name%0A%0A
+ args = %7B%22name%22: name%7D%0A%0A
@@ -2807,61 +2807,29 @@
-return %7B%22name%22: name,%0A
+args%5B
%22image%22
-:
+%5D =
image
-,
%0A
@@ -2837,31 +2837,29 @@
-
+args%5B
%22slice%22
-:
+%5D =
slice
-,
%0A
@@ -2867,24 +2867,21 @@
-
+args%5B
%22flavor%22
: fl
@@ -2880,17 +2880,18 @@
vor%22
-:
+%5D =
flavor
-,
%0A
@@ -2899,23 +2899,22 @@
-
+args%5B
%22node%22
-:
+%5D =
com
@@ -2922,17 +2922,16 @@
ute_node
-,
%0A
@@ -2931,24 +2931,21 @@
-
+args%5B
%22deploym
@@ -2948,17 +2948,19 @@
loyment%22
-:
+%5D =
compute
@@ -2986,18 +2986,29 @@
nt.d
-eployment%7D
+%0A%0A return args
%0A%0A
|
778bab1b4f57eb03137c00203d7b5f32c018ca83
|
fix error
|
ImagePaste.py
|
ImagePaste.py
|
# import sublime
import sublime_plugin
import os
package_file = os.path.normpath(os.path.abspath(__file__))
package_path = os.path.dirname(package_file)
lib_path = os.path.join(package_path, "lib")
if lib_path not in sys.path:
sys.path.append(lib_path)
print(sys.path)
from PIL import ImageGrab
from PIL import ImageFile
class ImagePasteCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
rel_fn = self.paste()
if not rel_fn:
view.run_command("paste")
return
for pos in view.sel():
# print("scope name: %r" % (view.scope_name(pos.begin())))
if 'text.html.markdown' in view.scope_name(pos.begin()):
view.insert(edit, pos.begin(), "" % rel_fn)
else:
view.insert(edit, pos.begin(), "%s" % rel_fn)
# only the first cursor add the path
break
def paste(self):
ImageFile.LOAD_TRUNCATED_IMAGES = True
im = ImageGrab.grabclipboard()
if im:
abs_fn, rel_fn = self.get_filename()
im.save(abs_fn,'PNG')
return rel_fn
else:
print('clipboard buffer is not image!')
return None
def get_filename(self):
view = self.view
filename = view.file_name()
# create dir in current path with the name of current filename
dirname, _ = os.path.splitext(filename)
# create new image file under currentdir/filename_without_ext/filename_without_ext%d.png
fn_without_ext = os.path.basename(dirname)
if not os.path.lexists(dirname):
os.mkdir(dirname)
i = 0
while True:
# relative file path
rel_filename = os.path.join("%s/%s%d.png" % (fn_without_ext, fn_without_ext, i))
# absolute file path
abs_filename = os.path.join(dirname, "%s%d.png" % ( fn_without_ext, i))
if not os.path.exists(abs_filename):
break
i += 1
print("save file: " + abs_filename + "\nrel " + rel_filename)
return abs_filename, rel_filename
|
Python
| 0.000002
|
@@ -41,16 +41,27 @@
mport os
+%0Aimport sys
%0A%0Apackag
|
734457ed995a3dfcacf8556ed4e98e7536e63a66
|
Fix typos
|
nodeconductor/openstack/management/commands/initsecuritygroups.py
|
nodeconductor/openstack/management/commands/initsecuritygroups.py
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from nodeconductor.openstack import models, executors, handlers
class Command(BaseCommand):
help_text = "Add default security groups with given names to all tenants to tenants."
def add_arguments(self, parser):
parser.add_argument('names', nargs='+', type=str)
def handle(self, *args, **options):
names = options['names']
default_security_groups = getattr(settings, 'NODECONDUCTOR', {}).get('DEFAULT_SECURITY_GROUPS')
security_groups = []
for name in names:
try:
group = next(sg for sg in default_security_groups if sg['name'] == name)
except StopIteration:
raise CommandError('There is no default security group with name %s' % name)
else:
security_groups.append(group)
for spl in models.OpenStackServiceProjectLink.objects.all():
if not spl.tenant:
continue
for group in security_groups:
if spl.security_groups.filter(name=group['name']).exists():
self.stdout.write('Tenant %s already have security group %s' % (spl.tenant, group['name']))
continue
spl.security_groups.create(name=group['name'], description=group['description'])
try:
db_security_group = handlers.create_security_group(spl, group)
except handlers.SecurityGroupCreateException as e:
self.stdout.write(
'Failed to add security_group %s to tenant %s. Error: %s' % (group['name'], spl.teannt, e))
else:
executors.SecurityGroupCreateExecutor.execute(db_security_group, async=False)
self.stdout.write(
'Security group %s has been successfully added to tenant %s' % (group['name'], spl.tenant))
|
Python
| 0.999537
|
@@ -301,19 +301,8 @@
all
-tenants to
tena
@@ -1237,18 +1237,17 @@
ready ha
-ve
+s
securit
|
b5b38d5ba76e61bc14e25a45394424436e323c5d
|
fix reduction2 index dict
|
utils/speedyvec/vectorizers/subject_verb_agreement.py
|
utils/speedyvec/vectorizers/subject_verb_agreement.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Generate a model capable of detecting subject-verb agreement errors"""
from pattern.en import lexeme, tenses
from pattern.en import pluralize, singularize
from textstat.textstat import textstat
from time import sleep
import hashlib
import os
import pika
import psycopg2
import random
import re
import sqlite3
import textacy
from sva_reducer import get_reduction
print("You just imported get_reduction from sva_reducer. This reduction"
"algorithm should be the same as the one used to create your previous"
"reducutions.")
RABBIT = os.environ.get('RABBITMQ_LOCATION', 'localhost')
DB_PASSWORD = os.environ.get('SVA_PASSWORD', '')
DB_NAME = os.environ.get('SVA_DB', 'sva')
DB_USER = os.environ.get('SVA_USER', DB_NAME)
# Indexing the sentence keys ################################################
print("Indexing sentence keys...")
# Connect to postgres
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD)
cur = conn.cursor()
# Select unique reductions in order of regularity, must occur at least thrice
reductions = cur.execute('SELECT reduction, count(*) from reductions group by'
' reduction having count(*) > 2 order by count(*) desc;')
# with ~2 million total sentences the number of unique reductions was a little
# over 12k. ~5k had more than 2 occurrences
reduction2idx = {n: i for i, n[1] in enumerate(reductions)}
num_reductions = len(reduction2idx)
# close connections to database
cur.close()
conn.close()
# Vectorizing sentence keys ################################################
print('Vectorizing sentence keys...')
# vectors must be convertable to a numpy array.
# NOTE: storing the number of reductions on each object is not necessary and is
# increasing db size. The advantage is that each row can compute its numpy
# vector with no database calls which is why we choose it. We might undecide
# this at some point.
# Ex:
# {indices={5:1, 6:2, 500:1, 6003:2} num_reductions=5000}
# {indicies={index:count, index:count, ...} reductions=num_reductions}
def get_vector(string):
result = {'indices':{}, 'reductions':num_reductions}
for reduction in get_reduction(string):
index = reduction2idx.get(reduction)
if index:
result['indices'][index] = x['indices'].get(index, 0) + 1
result = repr(result) # transform to a string
return result
def handle_message(ch, method, properties, body):
labeled_sent_dict = dict(body.decode("utf-8"))
sent_str = labeled_sent_dict['sent_str']
label = labeled_sent_dict['label']
for vector in get_vector(sent_str):
labeled_vector = repr({'vector':vector, 'label':label})
channel.basic_publish(exchange='', routing_key='vectors',
body=labeled_vector)
ch.basic_ack(delivery_tag=method.delivery_tag)
if __name__ == '__main__':
connection = pika.BlockingConnection(pika.ConnectionParameters(RABBIT))
channel = connection.channel()
channel.queue_declare(queue='fstrings') # create queue if doesn't exist
channel.queue_declare(queue='reductions')
# NOTE: if the prefetch count is too high, some workers could starve. If it
# is too low, we make an unneccessary amount of requests to rabbitmq server
channel.basic_qos(prefetch_count=10) # limit num of unackd msgs on channel
channel.basic_consume(handle_message, queue='fstrings', no_ack=False)
channel.start_consuming()
|
Python
| 0
|
@@ -1128,29 +1128,16 @@
thrice%0A
-reductions =
cur.exec
@@ -1402,16 +1402,19 @@
idx = %7Bn
+%5B0%5D
: i for
@@ -1421,11 +1421,8 @@
i, n
-%5B1%5D
in
@@ -1431,26 +1431,19 @@
umerate(
-reductions
+cur
)%7D %0Anum_
|
b14133db8127fd4584de3399893c3f7fcbc7f0ba
|
Optimise get_most_common_from_counter func
|
matasano-crypto-solutions/set1.py
|
matasano-crypto-solutions/set1.py
|
from base64 import b64encode
from binascii import hexlify
from collections import Counter, defaultdict
from string import ascii_letters
from typing import Dict
from pathlib import Path
import os
current_dir = str(Path(__file__).parent)
def get_most_common_from_counter(counter: Counter, n: int):
"""Like counter.most_common(n), but includes elements with
equal counts, which means the returned
list may be longer than n. Elements with the same count are ordered in
reverse alphabetical order (easier to code), but all elements with a
given count will be returned if one is returned.
>>> get_most_common_from_counter(Counter('test'), 1)
[('t', 2)]
>>> get_most_common_from_counter(Counter('test'), 2)
[('t', 2), ('s', 1), ('e', 1)]
>>> get_most_common_from_counter(Counter('test'), 1)
[('t', 2)]
>>> get_most_common_from_counter(Counter('teest'), 1)
[('t', 2), ('e', 2)]
>>> get_most_common_from_counter(Counter('teest'), 9)
[('t', 2), ('e', 2), ('s', 1)]"""
if n < 0:
return []
elems = counter.most_common()
# nums holds all the nums we have seen so far, including duplicates
nums = [elems[0][1]]
i = 1
for elem, num in elems[1:]:
if len(nums) >= n and num < nums[-1]:
break
# we either haven't got enough nums or we haven't finished collecting
# the same num
nums.append(num)
i += 1
return sorted(elems[:i], key=lambda tup: str(
tup[1]) + tup[0], reverse=True)
def hex_to_base64(bstr: str):
return b64encode(bytes.fromhex(bstr))
def xor(x: str, y: str):
x = bytes.fromhex(x)
y = bytes.fromhex(y)
return hexlify(bytes((a ^ b for a, b in zip(x, y))))
def find_english_text(texts: list):
scores = defaultdict(list)
for text in texts:
score = 0
count = Counter(text.lower())
most_common = {x for x, _ in get_most_common_from_counter(count, 5)}
least_common = {x for x, _ in count.most_common()[:-5 - 1:-1]}
for e in ['e', 't', 'a', 'o']:
if e in most_common:
score += 1
for e in ['z', 'q', 'x']:
if e in least_common or e not in count:
score += 1
for e in text:
if e not in ascii_letters:
score -= 1
if count[' '] < 2:
score -= 2
scores[score].append(text)
max_score = max(scores.keys())
res = scores[max_score]
choice = 0
if len(res) > 1:
if max_score < 0:
return ''
choice = int(input('Select one of these: '))
if not 0 <= choice < len(res):
return ''
return res[choice]
def decode_1_byte_xor(bstr: str):
x = bytes.fromhex(bstr)
strings = {} # type: Dict[bytes, str]
for e in range(256):
try:
y = bytes((a ^ e for a in x)).decode('ascii')
except UnicodeDecodeError:
continue
strings[y] = chr(e)
if not strings:
return '', ''
res = find_english_text(strings.keys())
if not res:
return '', ''
return strings[res], res
def find_and_decrypt_ciphertexts(ciphertexts: list):
plaintexts = {}
for c in ciphertexts:
key, string = decode_1_byte_xor(c)
if not key or not string:
continue
plaintexts[string] = key
res = find_english_text(plaintexts.keys())
return plaintexts[res], res
res1 = hex_to_base64(
'49276d206b696c6c696e6720796f757220627261696e206c6'
'96b65206120706f69736f6e6f7573206d757368726f6f6d')
print('Task 1')
print(res1)
assert res1 == (b'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc'
b'29ub3VzIG11c2hyb29t')
print('Task 2')
x = '1c0111001f010100061a024b53535009181c'
y = '686974207468652062756c6c277320657965'
res2 = xor(x, y)
print(res2)
assert res2 == b'746865206b696420646f6e277420706c6179'
print('Task 3')
ciphertext = ('1b37373331363f78151b7f2b783431333d78397828372d'
'363c78373e783a393b3736')
res3 = decode_1_byte_xor(ciphertext)
print(res3[1])
assert res3[1] == "Cooking MC's like a pound of bacon"
print('Task 4')
ciphertexts = open(os.path.join(current_dir, '4.txt'), 'r').read().split('\n')
res4 = find_and_decrypt_ciphertexts(ciphertexts)
print('Key: {0}\nPlaintext: {1}'.format(*res4))
assert res4[1] == 'Now that the party is jumping\n'
|
Python
| 0.998997
|
@@ -1161,16 +1161,19 @@
+last_
num
-s
=
-%5B
elem
@@ -1179,21 +1179,40 @@
ms%5B0%5D%5B1%5D
-%5D
%0A
+ total_nums_so_far =
i = 1%0A
@@ -1257,17 +1257,25 @@
if
-len(nums)
+total_nums_so_far
%3E=
@@ -1290,16 +1290,16 @@
m %3C
-nums%5B-1%5D
+last_num
:%0A
@@ -1401,16 +1401,27 @@
#
+ elems with
the sam
@@ -1422,19 +1422,21 @@
he same
-num
+count
%0A
@@ -1440,24 +1440,53 @@
-nums.append(num)
+last_num = num%0A total_nums_so_far += 1
%0A
|
8463c22898210990e911580d217559efdbbfe5d7
|
Make disk space test optional
|
earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/geecheck_tests/user_tests/disk_space_test.py
|
earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/geecheck_tests/user_tests/disk_space_test.py
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xml.etree.ElementTree as ET
# Need to use unittest2 for Python 2.6.
try:
import unittest2 as unittest
except ImportError:
import unittest
def getDiskInfo():
"""Returns disk usage represented as percent of total available."""
tree = ET.parse('/etc/opt/google/systemrc')
root = tree.getroot()
sys_rc = {}
for child in root:
sys_rc[child.tag] = child.text
asset_root = sys_rc["assetroot"];
mount_point = getMountPoint(asset_root)
available_space, size = getFsFreespace(mount_point)
percentage_used = (size - available_space) * 100 / size
return percentage_used
def getMountPoint(pathname):
"""Get the mount point of the filesystem containing pathname."""
pathname = os.path.normcase(os.path.realpath(pathname))
parent_device = path_device = os.stat(pathname).st_dev
while parent_device == path_device:
mount_point = pathname
pathname = os.path.dirname(pathname)
if pathname == mount_point:
break
return mount_point
def getFsFreespace(pathname):
"""Get the free space of the filesystem containing pathname."""
statvfs = os.statvfs(pathname)
# Size of filesystem in bytes
size = statvfs.f_frsize * statvfs.f_blocks
# Number of free bytes that ordinary users are allowed to use.
avail = statvfs.f_frsize * statvfs.f_bavail
return avail, size
class TestDiskSpace(unittest.TestCase):
def testAdequateDiskSpace(self):
"""Check that the remaining disk space is at least 20%."""
self.assertLessEqual(20, getDiskInfo())
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000007
|
@@ -639,16 +639,50 @@
ee as ET
+%0Afrom geecheck_tests import common
%0A%0A# Need
@@ -2010,16 +2010,96 @@
Case):%0A%0A
+ @unittest.skipUnless(common.IsFusionInstalled(), 'Fusion is not installed')%0A
def
|
88db3ab0e09639d07a0374f9e1877ae3a3669fd4
|
Use more unittest.TestCase.assertIn instead of *.assertTrue(foo in bar).
|
utils/swift_build_support/tests/products/test_llvm.py
|
utils/swift_build_support/tests/products/test_llvm.py
|
# tests/products/test_llvm.py -----------------------------------*- python -*-
#
# This source file is part of the LLVM.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the LLVM project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of LLVM project authors
# ----------------------------------------------------------------------------
import argparse
import os
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from swift_build_support import shell
from swift_build_support.products import LLVM
from swift_build_support.toolchain import host_toolchain
from swift_build_support.workspace import Workspace
class LLVMTestCase(unittest.TestCase):
def setUp(self):
# Setup workspace
tmpdir1 = os.path.realpath(tempfile.mkdtemp())
tmpdir2 = os.path.realpath(tempfile.mkdtemp())
os.makedirs(os.path.join(tmpdir1, 'llvm'))
self.workspace = Workspace(source_root=tmpdir1,
build_root=tmpdir2)
# Setup toolchain
self.toolchain = host_toolchain()
self.toolchain.cc = '/path/to/cc'
self.toolchain.cxx = '/path/to/cxx'
# Setup args
self.args = argparse.Namespace(
llvm_targets_to_build='X86;ARM;AArch64;PowerPC;SystemZ',
llvm_assertions='true',
darwin_deployment_version_osx='10.9')
# Setup shell
shell.dry_run = True
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
shutil.rmtree(self.workspace.build_root)
shutil.rmtree(self.workspace.source_root)
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
shell.dry_run = False
self.workspace = None
self.toolchain = None
self.args = None
def test_llvm_targets_to_build(self):
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
expected_targets = 'X86;ARM;AArch64;PowerPC;SystemZ'
expected_arg = '-DLLVM_TARGETS_TO_BUILD=%s' % expected_targets
self.assertTrue(expected_arg in llvm.cmake_options)
def test_llvm_enable_assertions(self):
self.args.llvm_assertions = True
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertTrue('-DLLVM_ENABLE_ASSERTIONS=TRUE' in
llvm.cmake_options)
self.args.llvm_assertions = False
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertTrue('-DLLVM_ENABLE_ASSERTIONS=FALSE' in
llvm.cmake_options)
|
Python
| 0
|
@@ -2550,20 +2550,18 @@
f.assert
-True
+In
(expecte
@@ -2565,19 +2565,17 @@
cted_arg
- in
+,
llvm.cm
@@ -2849,36 +2849,34 @@
self.assert
-True
+In
('-DLLVM_ENABLE_
@@ -2895,35 +2895,9 @@
RUE'
- in%0A
+,
llv
@@ -3141,20 +3141,18 @@
f.assert
-True
+In
('-DLLVM
@@ -3180,35 +3180,9 @@
LSE'
- in%0A
+,
llv
|
a602ed873d71253723f07dfa043d959cd247d734
|
Add latest version of py-typing (#13287)
|
var/spack/repos/builtin/packages/py-typing/package.py
|
var/spack/repos/builtin/packages/py-typing/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTyping(PythonPackage):
"""This is a backport of the standard library typing module to Python
versions older than 3.6."""
homepage = "https://docs.python.org/3/library/typing.html"
url = "https://pypi.io/packages/source/t/typing/typing-3.6.1.tar.gz"
import_modules = ['typing']
version('3.6.4', sha256='d400a9344254803a2368533e4533a4200d21eb7b6b729c173bc38201a74db3f2')
version('3.6.1', sha256='c36dec260238e7464213dcd50d4b5ef63a507972f5780652e835d0228d0edace')
# You need Python 2.7 or 3.3+ to install the typing package
depends_on('python@2.7:2.8,3.3:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
|
Python
| 0
|
@@ -480,17 +480,19 @@
yping-3.
-6
+7.4
.1.tar.g
@@ -528,16 +528,114 @@
ping'%5D%0A%0A
+ version('3.7.4.1', sha256='91dfe6f3f706ee8cc32d38edbbf304e9b7583fb37108fef38229617f8b3eba23')%0A
vers
@@ -823,72 +823,8 @@
')%0A%0A
- # You need Python 2.7 or 3.3+ to install the typing package%0A
@@ -852,17 +852,17 @@
7:2.8,3.
-3
+4
:', type
|
efc5d0a3b49d193910b426198d24a3440483f71f
|
version update + fix for compilers newer than gcc5 (#28243)
|
var/spack/repos/builtin/packages/snap-korf/package.py
|
var/spack/repos/builtin/packages/snap-korf/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class SnapKorf(MakefilePackage):
"""SNAP is a general purpose gene finding program suitable for both
eukaryotic and prokaryotic genomes."""
homepage = "http://korflab.ucdavis.edu/software.html"
url = "http://korflab.ucdavis.edu/Software/snap-2013-11-29.tar.gz"
version('2013-11-29', sha256='e2a236392d718376356fa743aa49a987aeacd660c6979cee67121e23aeffc66a')
depends_on('perl', type=('build', 'run'))
depends_on('boost')
depends_on('sqlite')
depends_on('sparsehash')
conflicts('%gcc@5:', when='@2013-11-29')
def install(self, spec, prefix):
mkdirp(prefix.bin)
progs = ['snap', 'fathom', 'forge', 'depend', 'exonpairs', 'hmm-info']
for p in progs:
install(p, prefix.bin)
install('*.pl', prefix.bin)
install_tree('Zoe', prefix.Zoe)
install_tree('HMM', prefix.HMM)
install_tree('DNA', prefix.DNA)
def setup_run_environment(self, env):
env.set('ZOE', self.prefix)
env.prepend_path('PATH', self.prefix)
|
Python
| 0
|
@@ -478,16 +478,146 @@
tar.gz%22%0A
+ git = %22https://github.com/KorfLab/SNAP.git%22%0A%0A version('2021-11-04', commit='62ff3120fceccb03b5eea9d21afec3167dedfa94')
%0A ver
@@ -761,129 +761,682 @@
'))%0A
+%0A
de
-pends_on('boost')%0A depends_on('sqlite')%0A depends_on('sparsehash')%0A%0A conflicts('%25gcc@5:', when='@2013-11-29
+f edit(self, spec, prefix):%0A if spec.satisfies('@2013-11-29%25gcc@6:'):%0A rstr = '%5C%5C1 -Wno-tautological-compare -Wno-misleading-indentation'%0A filter_file('(-Werror)', rstr, 'Zoe/Makefile')%0A rstr = '%5C%5C1 -Wno-error=format-overflow -Wno-misleading-indentation'%0A filter_file('(-Werror)', rstr, 'Makefile')%0A%0A filter_file(r'(%5Econst char %5C* zoeFunction;)', 'extern %5C%5C1',%0A 'Zoe/zoeTools.h')%0A filter_file(r'(%5Econst char %5C* zoeConstructor;)', 'extern %5C%5C1',%0A 'Zoe/zoeTools.h')%0A filter_file(r'(%5Econst char %5C* zoeMethod;)', 'extern %5C%5C1',%0A 'Zoe/zoeTools.h
')%0A%0A
@@ -1543,18 +1543,89 @@
'forge'
-,
+%5D%0A if spec.satisfies('@2013-11-29'):%0A progs = progs + %5B
'depend'
|
d3af10b713056b68220a58571a2d06bd426342a2
|
Correct terrible, stupid omission
|
types_builtin.py
|
types_builtin.py
|
from base import *
def prim_equal(p1, p2):
return match((p1, p2),
("(PInt(), PInt())", lambda: True),
("(PStr(), PStr())", lambda: True),
("(PChar(), PChar())", lambda: True),
("(PBool(), PBool())", lambda: True),
("_", lambda: False))
def _type_tuple_equal(ts1, ts2):
if len(ts1) != len(ts2):
return False
for t1, t2 in ezip(ts1, ts2):
if not type_equal(t1, t2):
return False
return True
def _type_func_equal(as1, r1, as2, r2):
if len(as1) != len(as2):
return False
for a1, a2 in ezip(as1, as2):
if not type_equal(a1, a2):
return False
return type_equal(r1, r2)
def _type_data_equal(d1, ts1, d2, ts2):
if d1 is not d2:
return False
if len(ts1) != len(ts2):
return False
for t1, t2 in ezip(ts1, ts2):
if not type_equal(t1, t2):
return False
return True
def type_equal(a, b):
if a is b:
return True
return match((a, b),
("(TVar(a), TVar(b))", lambda a, b: a is b),
("(TPrim(a), TPrim(b))", prim_equal),
("(TVoid(), TVoid())", lambda: True),
("(TTuple(ts1), TTuple(ts2))", _type_tuple_equal),
("(TFunc(args1, r1), TFunc(args2, r2))", _type_func_equal),
("(TData(d1, ts1), TData(d2, ts2))", _type_data_equal),
("(TArray(a), TArray(b))", type_equal),
("(TWeak(a), TWeak(b))", type_equal),
("_", lambda: False))
def _get_name(a):
if not a or not has_extrinsic(Name, a):
return '?? %r' % (a,)
return extrinsic(Name, a)
REPR_ENV = None
def _meta_type_repr(t, j):
assert t is not j
return _type_repr(j)
def _type_repr(t):
global REPR_ENV
if t in REPR_ENV:
return '<cyclic 0x%x>' % id(t)
REPR_ENV.add(t)
rstr = match(t, ("TVar(a)", _get_name),
("TPrim(PInt())", lambda: 'int'),
("TPrim(PStr())", lambda: 'str'),
("TPrim(PChar())", lambda: 'char'),
("TPrim(PBool())", lambda: 'bool'),
("TVoid()", lambda: 'void'),
("TTuple(ts)", lambda ts: '(%s)' %
(', '.join(_type_repr(t) for t in ts),)),
("TFunc(s, r)", lambda s, r: '(%s)' %
(' -> '.join(_type_repr(t) for t in s + [r]),)),
("TData(d, [])", _get_name),
("_", lambda: '<bad type %s>' % type(t)))
REPR_ENV.remove(t)
return rstr
def _cyclic_check_type_repr(t):
global REPR_ENV
REPR_ENV = set()
r = _type_repr(t)
REPR_ENV = None
return r
def _inject_type_reprs():
temp = locals().copy()
for t in temp:
if len(t) > 1 and t[0] == 'T' and t[1].lower() != t[1]:
temp[t].__repr__ = _cyclic_check_type_repr
_inject_type_reprs()
def map_type_vars(f, t):
"""Applies f to every typevar in the given type."""
return match(t, ("tv==TVar(_)", f),
("TFunc(args, ret)", lambda args, ret:
TFunc([map_type_vars(f, a) for a in args],
map_type_vars(f, ret))),
("TTuple(ts)", lambda ts:
TTuple([map_type_vars(f, t) for t in ts])),
("TArray(t)", lambda t: TArray(map_type_vars(f, t))),
("TWeak(t)", lambda t: TWeak(map_type_vars(f, t))),
("_", lambda: t))
def _var(n): return TVar(n)
# Tuples are a shortcut for functions
builtins_types = {
'True': TBool, 'False': TBool,
'not': (TBool, TBool),
'ord': (TChar, TInt),
'+': (TInt, TInt, TInt), '-': (TInt, TInt, TInt),
'*': (TInt, TInt, TInt), '//': (TInt, TInt, TInt), '%': (TInt, TInt, TInt),
'negate': (TInt, TInt),
'&': (TInt, TInt, TInt), '|': (TInt, TInt, TInt), '^': (TInt, TInt, TInt),
'==': (TInt, TInt, TBool), '!=': (TInt, TInt, TBool),
'<': (TInt, TInt, TBool), '>': (TInt, TInt, TBool),
'<=': (TInt, TInt, TBool), '>=': (TInt, TInt, TBool),
'is': (_var(1), _var(1), TBool), 'is not': (_var(1), _var(1), TBool),
}
# vi: set sw=4 ts=4 sts=4 tw=79 ai et nocindent:
|
Python
| 0.999965
|
@@ -2984,16 +2984,140 @@
)%22, f),%0A
+ (%22TData(dt, ts)%22, lambda dt, ts:%0A TData(dt, %5Bmap_type_vars(f, t) for t in ts%5D)),%0A
|
d4adaaf52a81c0d471657672fee5b5ed2ad4e306
|
update export agency stats
|
export_agency_stats.py
|
export_agency_stats.py
|
#!/usr/bin/env python2
import requests
import unicodecsv
from utils import get_api_key
token = get_api_key()
url = 'https://www.muckrock.com/api_v1/'
headers = {'Authorization': 'Token %s' % token, 'content-type': 'application/json'}
next_ = url + 'agency'
fields = (
"id",
"name",
"slug",
"status",
"twitter",
"twitter_handles",
"parent",
"appeal_agency",
"url",
"foia_logs",
"foia_guide",
"public_notes",
"absolute_url",
"average_response_time",
"fee_rate",
"success_rate",
"has_portal",
"has_email",
"has_fax",
"has_address",
"number_requests",
"number_requests_completed",
"number_requests_rejected",
"number_requests_no_docs",
"number_requests_ack",
"number_requests_resp",
"number_requests_fix",
"number_requests_appeal",
"number_requests_pay",
"number_requests_partial",
"number_requests_lawsuit",
"number_requests_withdrawn"
)
jurisdiction_fields = (
'name',
'parent',
'level',
)
page = 1
# This allows you to cach jurisdiction look ups
jurisdictions = {}
def get_jurisdiction(jurisdiction_id):
global jurisdictions
if jurisdiction_id in jurisdictions:
return jurisdictions[jurisdiction_id]
else:
# print 'getting jurisdiction', jurisdiction_id
r = requests.get(url + 'jurisdiction/' + str(jurisdiction_id), headers=headers)
jurisdiction_json = r.json()
if jurisdiction_json['parent']: # USA has no paremt
parent = get_jurisdiction(jurisdiction_json['parent'])
jurisdiction_json['parent'] = parent['name'] # replace parent id with parent name in jurisdiction json
jurisdictions[jurisdiction_id] = jurisdiction_json
return jurisdiction_json
csv_file = open('agency_stats.csv', 'w')
csv_writer = unicodecsv.writer(csv_file)
jurisdiction_field_names = tuple('jurisdiction {}'.format(f) for f in jurisdiction_fields)
csv_writer.writerow(fields + jurisdiction_field_names)
while next_ is not None:
r = requests.get(next_, headers=headers)
try:
json = r.json()
next_ = json['next']
for datum in json['results']:
agency_values = [datum[field] for field in fields]
jurisdiction = get_jurisdiction(datum['jurisdiction'])
jurisdiction_values = [jurisdiction[field] for field in jurisdiction_fields]
csv_writer.writerow(agency_values + jurisdiction_values)
print 'Page %d of %d' % (page, json['count'] / 20 + 1)
break
page += 1
except Exception as e:
print 'Error', e
|
Python
| 0
|
@@ -17,16 +17,40 @@
ython2%0A%0A
+from time import sleep%0A%0A
import r
@@ -1217,16 +1217,102 @@
ge = 1%0A%0A
+# make this true while exporting data to not crash on errors%0ASUPRESS_ERRORS = False %0A%0A
# This a
@@ -1588,16 +1588,46 @@
tion_id%0A
+ sleep(1) # rate limit%0A
@@ -2832,22 +2832,8 @@
1)%0A
- break%0A
@@ -2873,16 +2873,16 @@
n as e:%0A
-
@@ -2898,8 +2898,57 @@
ror', e%0A
+ if not SUPRESS_ERRORS:%0A raise%0A
|
70249d061e92f6d1e7254a9c9899c40f3c2083c8
|
Add Route.from_environ.
|
webstar/core.py
|
webstar/core.py
|
"""Module containing tools to assist in building of WSGI routers.
This routing system works by tracking the UNrouted part of the request, and
watching how it changes as it passes through various routers.
"""
import collections
import logging
import posixpath
log = logging.getLogger(__name__)
HISTORY_ENVIRON_KEY = 'webstar.route'
def normalize_path(*segments):
path = '/'.join(x for x in segments if x)
if not path:
return ''
return '/' + posixpath.normpath(path).lstrip('/')
GenerateStep = collections.namedtuple('GenerateStep', 'segment head'.split())
RouteStep = collections.namedtuple('RouteStep', 'head consumed unrouted data router')
class Route(list):
def __init__(self, path, steps):
self.append(RouteStep(
unrouted=path,
head=None,
consumed=None,
data={},
router=None,
))
self.extend(steps)
def url_for(self, _strict=True, **kwargs):
for i, chunk in enumerate(self):
if chunk.router is not None:
data = self.data.copy()
data.update(kwargs)
url = chunk.router.generate(data)
if _strict and not url:
raise GenerationError('could not generate URL for %r, relative to %r' % (data, self[0].unrouted))
return url
if _strict:
raise GenerationError('no routers')
@property
def consumed(self):
return ''.join(x.consumed or '' for x in self)
@property
def app(self):
return self[-1].head
@property
def unrouted(self):
return self[-1].unrouted
@property
def data(self):
data = {}
for step in self:
data.update(step.data)
return data
def __repr__(self):
return '<%s:%s>' % (self.__class__.__name__, list.__repr__(self))
def get_route_data(environ):
route = environ.get(HISTORY_ENVIRON_KEY, None)
return route.data if route else {}
class GenerationError(ValueError):
pass
class RouterInterface(object):
def __repr__(self):
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
def route_step(self, path):
"""Yield a RouteStep for each possible route from this node."""
raise NotImplementedError()
def generate_step(self, data):
"""Yield a GenerateStep for each possible route from this node."""
raise NotImplementedError()
def route(self, path):
"""Route a given path, starting at this router."""
path = normalize_path(path)
steps = self._route(self, path)
if not steps:
return
return Route(path, steps)
def _route(self, node, path):
log.debug('_route: %r, %r' % (node, path))
if not isinstance(node, RouterInterface):
return []
for step in node.route_step(path):
res = self._route(step.head, step.unrouted)
if res is not None:
return [step] + res
def __call__(self, environ, start):
route = self.route(environ.get('PATH_INFO', ''))
if route is None:
return self.not_found_app(environ, start)
# Build up wsgi.routing_args data
args, kwargs = environ.setdefault('wsgiorg.routing_args', ((), {}))
for step in route:
kwargs.update(step.data)
environ[HISTORY_ENVIRON_KEY] = route
environ['PATH_INFO'] = route.unrouted
environ['SCRIPT_NAME'] = environ.get('SCRIPT_NAME', '') + route.consumed
return route.app(environ, start)
def not_found_app(self, environ, start):
start('404 Not Found', [('Content-Type', 'text/html')])
return ['''
<html><head>
<title>404 Not Found</title>
</head><body>
<h1>Not Found</h1>
<p>The requested URL %s was not found on this server.</p>
</body></html>
'''.strip() % environ.get('PATH_INFO', 'UNKNOWN')]
def generate(self, *args, **kwargs):
data = dict()
for arg in args:
data.update(arg)
data.update(kwargs)
steps = self._generate(self, data)
if not steps:
return
return normalize_path('/'.join(step.segment for step in steps))
def _generate(self, node, data):
data = data.copy()
log.debug('_generate: %r, %r' % (node, data))
if not isinstance(node, RouterInterface):
return []
for step in node.generate_step(data):
res = self._generate(step.head, data)
if res is not None:
return [step] + res
def url_for(self, _strict=True, **data):
url = self.generate(data)
if _strict and not url:
raise GenerationError('could not generate URL for %r' % data)
return url
|
Python
| 0
|
@@ -685,32 +685,134 @@
ute(list):%0A %0A
+ @staticmethod%0A def from_environ(environ):%0A return environ.get(HISTORY_ENVIRON_KEY)%0A %0A
def __init__
|
24f485f256279e2fc86c12cdf383c93850f7f328
|
Add utility method to check if message sender is admin
|
IrcMessage.py
|
IrcMessage.py
|
import time
import Constants
class IrcMessage(object):
"""Parses incoming messages into usable parts like the command trigger"""
def __init__(self, messageType, bot, user=None, source=None, rawText=""):
self.createdAt = time.time()
#MessageType is what kind of message it is. A 'say', 'action' or 'quit', for instance
self.messageType = messageType
self.bot = bot
#Info about the user that sent the message
self.user = user
if self.user and '!' in self.user:
self.userNickname, self.userAddress = self.user.split("!", 1)
else:
self.userNickname = None
self.userAddress = None
#Info about the source the message came from, either a channel, or a PM from a user
#If there is no source provided, or the source isn't a channel, assume it's a PM
if not source or source[0] not in Constants.CHANNEL_PREFIXES:
self.source = self.userNickname
self.isPrivateMessage = True
else:
self.source = source
self.isPrivateMessage = False
#Handle the text component, including seeing if it starts with the bot's command character
self.rawText = rawText.strip()
#There isn't always text
if not self.rawText:
self.trigger = None
self.message = ""
self.messageParts = []
self.messagePartsLength = 0
else:
#Collect information about the possible command in this message
if self.rawText.startswith(bot.commandPrefix):
#Get the part from the end of the command prefix to the first space (the 'help' part of '!help say')
self.trigger = self.rawText[bot.commandPrefixLength:].split(" ", 1)[0].lower()
self.message = self.rawText[bot.commandPrefixLength + len(self.trigger):].lstrip()
#Check if the text starts with the nick of the bot, 'DideRobot: help'
elif bot.nickname and self.rawText.startswith(bot.nickname + ": ") and len(self.rawText) > len(bot.nickname) + 2:
self.trigger = self.rawText.split(" ", 2)[1].strip().lower()
self.message = self.rawText[len(bot.nickname) + len(self.trigger) + 3:].lstrip() #+3 because of the colon and space
#In private messages we should respond too if there's no command character, because there's no other reason to PM a bot
elif self.isPrivateMessage:
self.trigger = self.rawText.split(" ", 1)[0].lower()
self.message = self.rawText[len(self.trigger)+1:]
else:
self.trigger = None
self.message = self.rawText
if self.message:
self.messageParts = self.message.split(" ")
self.messagePartsLength = len(self.messageParts)
else:
self.messageParts = []
self.messagePartsLength = 0
def reply(self, replytext, messagetype=None):
if not messagetype:
#Reply with a notice to a user's notice (not a channel one!), and with a 'say' to anything else
messagetype = 'notice' if self.messageType == 'notice' and self.isPrivateMessage else 'say'
self.bot.sendMessage(self.source, replytext, messagetype)
|
Python
| 0.000005
|
@@ -2866,8 +2866,210 @@
getype)%0A
+%0A%09def isSenderAdmin(self):%0A%09%09%22%22%22%0A%09%09:return: True if the person that sent this message is a bot admin, False otherwise%0A%09%09%22%22%22%0A%09%09return self.bot.isUserAdmin(self.user, self.userNickname, self.userAddress)%0A
|
cfb5f0692bc381507691e8bcaa85a61ec37f8c12
|
change validator to test for valid source case insensitive
|
anyway/backend_constants.py
|
anyway/backend_constants.py
|
from enum import Enum
from typing import List
class BackEndConstants(object):
MARKER_TYPE_ACCIDENT = 1
MARKER_TYPE_DISCUSSION = 2
CBS_ACCIDENT_TYPE_1_CODE = 1
UNITED_HATZALA_CODE = 2
CBS_ACCIDENT_TYPE_3_CODE = 3
RSA_PROVIDER_CODE = 4
BIKE_ACCIDENTS = 21
AGE_GROUPS_NUMBER = 18
ALL_AGE_GROUPS_LIST = list(range(1, AGE_GROUPS_NUMBER + 1)) + [99]
# This is a type for the field 'injury_severity' in the table 'involved_markers_hebrew'
class InjurySeverity:
DEAD = 1
SEVERE = 2
LIGHT = 3
# This is a type for the 'accident_severity' table field name
class AccidentSeverity:
FATAL = 1
SEVERE = 2
LIGHT = 3
class AccidentType:
PEDESTRIAN_INJURY = 1
COLLISION_OF_FRONT_TO_SIDE = 2
COLLISION_OF_FRONT_TO_REAR_END = 3
COLLISION_OF_SIDE_TO_SIDE_LATERAL = 4
HEAD_ON_FRONTAL_COLLISION = 5
COLLISION_WITH_A_STOPPED_NON_PARKED_VEHICLE = 6
COLLISION_WITH_A_PARKED_VEHICLE = 7
COLLISION_WITH_AN_INANIMATE_OBJECT = 8
SWERVING_OFF_THE_ROAD_OR_ONTO_THE_PAVEMENT = 9
OVERTURNED_VEHICLE = 10
SKID = 11
INJURY_OF_A_PASSENGER_IN_A_VEHICLE = 12
A_FALL_FROM_A_MOVING_VEHICLE = 13
FIRE = 14
OTHER = 15
COLLISION_OF_REAR_END_TO_FRONT = 17
COLLISION_OF_REAR_END_TO_SIDE = 18
COLLISION_WITH_AN_ANIMAL = 19
DAMAGE_CAUSED_BY_A_FALLING_LOAD_OFF_A_VEHICLE = 20
class DriverType:
PROFESSIONAL_DRIVER = 1
PRIVATE_VEHICLE_DRIVER = 2
OTHER_DRIVER = 3
# This class should be correlated with the Roles table
class Roles2Names(Enum):
Admins = "admins"
Or_yarok = "or_yarok"
# This is a type for the 'road_type' table field name
ROAD_TYPE_NOT_IN_CITY_IN_INTERSECTION = 3
ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION = 4
NON_CITY_ROAD_TYPES = [
ROAD_TYPE_NOT_IN_CITY_IN_INTERSECTION,
ROAD_TYPE_NOT_IN_CITY_NOT_IN_INTERSECTION,
]
# other global constants (python only)
DEFAULT_NUMBER_OF_YEARS_AGO = 5
# years ago to store in cache
INFOGRAPHICS_CACHE_YEARS_AGO = [1, 3, 5, 8]
SOURCE_MAPPING = {"walla": "וואלה", "twitter": "מד״א", "ynet": "ynet"}
UNKNOWN = "UNKNOWN"
DEFAULT_REDIRECT_URL = "https://anyway-infographics.web.app/"
ANYWAY_CORS_SITE_LIST_PROD = [
"https://anyway-infographics-staging.web.app/*",
"https://anyway-infographics.web.app/*",
"https://www.anyway.co.il/*",
"https://anyway-infographics-demo.web.app/*",
]
ANYWAY_CORS_SITE_LIST_DEV = ANYWAY_CORS_SITE_LIST_PROD + [
"https://dev.anyway.co.il/*",
"http://localhost:3000/*",
"https://localhost:3000/*",
"http://127.0.0.1:3000/*",
"https://127.0.0.1:3000/*",
]
class ResolutionCategories(Enum):
REGION = "מחוז"
DISTRICT = "נפה"
CITY = "עיר"
STREET = "רחוב"
URBAN_JUNCTION = "צומת עירוני"
SUBURBAN_ROAD = "כביש בינעירוני"
SUBURBAN_JUNCTION = "צומת בינעירוני"
OTHER = "אחר"
SUPPORTED_RESOLUTIONS: List[ResolutionCategories] = [
ResolutionCategories.STREET,
ResolutionCategories.SUBURBAN_ROAD,
]
class Source(Enum):
YNET = "ynet"
WALLA = "walla"
TWITTER = "twitter"
SUPPORTED_SOURCES: List[Source] = [
Source.YNET,
Source.WALLA,
Source.TWITTER,
]
# If in the future there will be a number of organizations or a need for a dynamic setting change, move this
# data to a table in the DB.
OR_YAROK_WIDGETS = [
"accident_count_by_severity",
"most_severe_accidents_table",
"most_severe_accidents",
"vision_zero_2_plus_1",
"head_on_collisions_comparison",
]
BE_CONST = BackEndConstants()
|
Python
| 0.000001
|
@@ -3295,32 +3295,203 @@
s Source(Enum):%0A
+ @classmethod%0A def _missing_(cls, value):%0A for member in cls:%0A if member.value == value.lower():%0A return member%0A
YNET = %22
|
6641fd1275c27dfb27787ed25b80af3b6ba14b9f
|
debug by further reduction
|
apdflash/scarecrowDreams.py
|
apdflash/scarecrowDreams.py
|
import sys,os
sys.path.insert(0, '../helpers')
from mpi4py import MPI
|
Python
| 0
|
@@ -1,72 +1,22 @@
-import sys,os%0Asys.path.insert(0, '../helpers')%0A%0Afrom mpi4py import MPI
+print %22hellow world%22
%0A%0A
|
14b1f9bde45b66f8752778469f1daae77b49f4e0
|
Add comment
|
bluebottle/bb_orders/signals.py
|
bluebottle/bb_orders/signals.py
|
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.dispatch.dispatcher import Signal
from django_fsm.signals import post_transition
from bluebottle.donations.models import Donation
from bluebottle.payments.models import OrderPayment
from bluebottle.payments.services import PaymentService
from bluebottle.utils.utils import StatusDefinition
order_requested = Signal(providing_args=["order"])
@receiver(post_save, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount_post_save(sender, instance, **kwargs):
instance.order.update_total()
@receiver(post_delete, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount(sender, instance, **kwargs):
# If we're deleting order and donations do nothing.
# If we're just deleting a donation then we should update the order total.
from bluebottle.orders.models import Order
try:
instance.order.update_total()
except Order.DoesNotExist:
pass
@receiver(post_transition, sender=OrderPayment)
def _order_payment_status_changed(sender, instance, **kwargs):
"""
TODO: Here we need to get the status from the Order Payment and update the
associated Order.
"""
# Get the Order from the OrderPayment
order = instance.order
# Get the mapped status OrderPayment to Order
new_order_status = order.get_status_mapping(kwargs['target'])
order.transition_to(new_order_status)
@receiver(order_requested)
def _order_requested(sender, order, **kwargs):
# Check the status at PSP if status is still locked
if order.status == StatusDefinition.LOCKED:
order_payment = OrderPayment.get_latest_by_order(order)
service = PaymentService(order_payment)
service.check_payment_status()
|
Python
| 0
|
@@ -919,16 +919,64 @@
r total.
+%0A%0A # Import it here to avoid circular imports
%0A fro
|
5e371a6aea1c3c0eb126849ef4c5855202b05cfa
|
Use standard name for output files
|
packages/cardpay-reward-programs/cardpay_reward_programs/utils.py
|
packages/cardpay-reward-programs/cardpay_reward_programs/utils.py
|
import tempfile
from pathlib import PosixPath
import pyarrow.parquet as pq
import yaml
from cloudpathlib import AnyPath, CloudPath
from cachetools import cached, TTLCache
def get_local_file(file_location):
if isinstance(file_location, PosixPath):
return file_location.as_posix()
elif isinstance(file_location, CloudPath):
if file_location._local.exists():
# Our files are immutable so if the local cache exists
# we can just return that
return file_location._local.as_posix()
else:
# Otherwise this downloads the file and returns the local path
return file_location.fspath
else:
raise Exception("Unsupported path type")
@cached(TTLCache(maxsize=1000, ttl=60))
def get_latest_details(config_location):
with open(config_location / "latest.yaml", "r") as stream:
return yaml.safe_load(stream)
def get_partition_iterator(min_partition, max_partition, partition_sizes):
for partition_size in sorted(partition_sizes, reverse=True):
start_partition_allowed = (min_partition // partition_size) * partition_size
end_partition_allowed = (max_partition // partition_size) * partition_size
last_max_partition = None
for start_partition in range(
start_partition_allowed, end_partition_allowed, partition_size
):
last_max_partition = start_partition + partition_size
yield partition_size, start_partition, start_partition + partition_size
if last_max_partition is not None:
min_partition = last_max_partition
def get_partition_files(config_location, table, min_partition, max_partition):
# Get config
with open(get_local_file(config_location / "config.yaml"), "r") as stream:
config = yaml.safe_load(stream)
latest = get_latest_details(config_location)
latest_block = latest.get("latest_block")
# Get table
table_config = config["tables"][table]
partition_sizes = sorted(table_config["partition_sizes"], reverse=True)
table_dir = config_location.joinpath(
"data", f"subgraph={latest['subgraph_deployment']}", f"table={table}"
)
files = []
for partition_size, start_partition, end_partition in get_partition_iterator(
min_partition, latest_block, partition_sizes):
if start_partition < max_partition:
files.append(table_dir.joinpath(
f"partition_size={partition_size}",
f"start_partition={start_partition}",
f"end_partition={end_partition}",
"data.parquet",
))
return files
def get_files(config_location, table, min_partition, max_partition):
file_list = get_partition_files(AnyPath(config_location), table, min_partition, max_partition)
return list(map(get_local_file, file_list))
def get_parameters(parameters):
"""
TODO: take hex blob as input instead of parameters
"""
core_parameters = parameters.get("core")
user_defined_parameters = parameters.get("user_defined")
return core_parameters, user_defined_parameters
def get_payment_cycle(start_block, end_block, payment_cycle_length):
"""
by default, the payment cycle is the tail of the compute range
"""
return max(end_block, start_block + payment_cycle_length)
def write_parquet_file(file_location, table):
# Pyarrow can't take a file object so we have to write to a temp file
# and upload directly
if isinstance(file_location, CloudPath):
with tempfile.TemporaryDirectory() as temp_dir:
pq_file_location = AnyPath(temp_dir).joinpath("data.parquet")
pq.write_table(table, pq_file_location)
file_location.joinpath("data.parquet").upload_from(pq_file_location)
else:
pq.write_table(table, file_location / "results.parquet")
|
Python
| 0.000009
|
@@ -3638,31 +3638,27 @@
emp_dir)
-.joinpath(%22data
+ / %22results
.parquet
@@ -3658,17 +3658,16 @@
parquet%22
-)
%0A
@@ -3747,20 +3747,23 @@
inpath(%22
-data
+results
.parquet
|
e1b9bcccbc3d4f29a7f7c66623f695bd062cb8e1
|
clean up usage of query file in collector
|
synt/collector.py
|
synt/collector.py
|
# -*- coding: utf-8 -*-
import time, datetime
import os
import bz2
import urllib2
from sqlite3 import IntegrityError
from cStringIO import StringIO
from synt.utils.db import db_init
from synt import config
from kral import stream
def collect(db_name='', commit_every=1000, max_collect=400000, queries_file=''):
"""
Will continuously populate the sample database if it exists
else it will create a new one.
Keyword Arguments:
db_name (str) -- Custom name for database.
commit_every (int) -- Commit to sqlite after commit_every executes.
max_collect (int) -- Will stop collecting at this number.
queries_file (str) -- If queries file is provided should be a path to a text file
containing the queries in the format:
label
query1
queryN
"""
if not db_name:
d = datetime.datetime.now()
#if no dbname is provided we'll store a timestamped db name
db_name = "samples-%s-%s-%s.db" % (d.year, d.month, d.day)
db = db_init(db=db_name)
cursor = db.cursor()
queries = {}
if queries_file:
try:
f = open(queries_file)
words = [line.strip() for line in f.readlines()]
label = words[0]
for w in words:
queries[w] = label
except IOError:
pass
else:
queries[':)'] = 'positive'
queries[':('] = 'negative'
#collect on twitter with kral
g = stream(query_list=queries.keys(), service_list="twitter")
c = 0
for item in g:
text = unicode(item['text'])
sentiment = queries.get(item['query'], None)
if sentiment:
try:
cursor.execute('INSERT INTO item VALUES (NULL,?,?)', [text, sentiment])
c += 1
if c % commit_every == 0:
db.commit()
print("Commited {}".format(commit_every))
if c == max_collect:
break
except IntegrityError: #skip duplicates
continue
db.close()
def import_progress():
global logger, output_count, prcount
try:
prcount
output_count
except:
prcount=0
output_count = 500000
prcount += 20
output_count += 20
if output_count >= 500000:
output_count = 0
percent = round((float(prcount) / 40423300 )*100, 2)
print("Processed %s of 40423300 records (%0.2f%%)" % (prcount,percent))
return 0
def fetch(db_name='samples.db'):
"""
Pre-populates training database from public archive of ~2mil tweets.
Stores training database as db_name in ~/.synt/
Keyword Arguments:
db_name (str) -- Custom name for database.
"""
response = urllib2.urlopen('https://github.com/downloads/Tawlk/synt/sample_data.bz2')
total_bytes = int(response.info().getheader('Content-Length').strip())
saved_bytes = 0
start_time = time.time()
last_seconds = 0
last_seconds_start = 0
data_buffer = StringIO()
decompressor = bz2.BZ2Decompressor()
fp = os.path.join(os.path.expanduser(config.DB_PATH), db_name)
if os.path.exists(fp):
os.remove(fp)
db = db_init(db=db_name, create=False)
db.set_progress_handler(import_progress,20)
while True:
seconds = (time.time() - start_time)
chunk = response.read(8192)
if not chunk:
break
saved_bytes += len(chunk)
data_buffer.write(decompressor.decompress(chunk))
if seconds > 1:
percent = round((float(saved_bytes) / total_bytes)*100, 2)
speed = round((float(total_bytes / seconds ) / 1024),2)
speed_type = 'Kb/s'
if speed > 1000:
speed = round((float(total_bytes / seconds ) / 1048576),2)
speed_type = 'Mb/s'
if last_seconds >= 0.5:
last_seconds = 0
last_seconds_start = time.time()
print("Downloaded %d of %d Mb, %s%s (%0.2f%%)\r" % (saved_bytes/1048576, total_bytes/1048576, speed, speed_type, percent))
else:
last_seconds = (time.time() - last_seconds_start)
if saved_bytes == total_bytes:
print("Downloaded %d of %d Mb, %s%s (100%%)\r" % (saved_bytes/1048576, total_bytes/1048576, speed, speed_type))
try:
db.executescript(data_buffer.getvalue())
except Exception, e:
print("Sqlite3 import failed with: %s" % e)
break
if __name__ == '__main__':
max_collect = 2000000
commit_every = 500
qf = 'negwords.txt'
collect(commit_every = commit_every, max_collect = max_collect, queries_file=qf)
|
Python
| 0.000001
|
@@ -287,27 +287,25 @@
400000, quer
-ies
+y
_file=''):%0A
@@ -619,27 +619,25 @@
er.%0A quer
-ies
+y
_file (str)
@@ -646,19 +646,17 @@
If quer
-ies
+y
file is
@@ -676,16 +676,23 @@
uld be a
+bsolute
path to
@@ -692,18 +692,16 @@
path to
-a
text fil
@@ -705,172 +705,9 @@
file
-%0A containing the queries in the format:%0A%0A label%0A query1%0A queryN%0A
+.
%0A
@@ -988,24 +988,65 @@
if quer
-ies
+y_file:%0A if not os.path.exists(query
_file
+)
:%0A
@@ -1039,36 +1039,76 @@
_file):%0A
+
-try:
+ return %22Query file path does not exist.%22
%0A f =
@@ -1100,20 +1100,25 @@
+%0A
+
f = open
@@ -1118,27 +1118,25 @@
= open(quer
-ies
+y
_file)%0A
@@ -1138,20 +1138,16 @@
-
-
words =
@@ -1195,20 +1195,16 @@
-
label =
@@ -1220,20 +1220,16 @@
-
-
for w in
@@ -1236,20 +1236,16 @@
words:%0A
-
@@ -1270,49 +1270,8 @@
abel
-%0A except IOError:%0A pass
%0A%0A
|
cd0123b2cce81c063f42ff5a9f80665b602bdefd
|
use the wright product
|
addons/hr_timesheet_project/wizard/timesheet_hour_encode.py
|
addons/hr_timesheet_project/wizard/timesheet_hour_encode.py
|
##############################################################################
#
# Copyright (c) 2004 TINY SPRL. (http://tiny.be) All Rights Reserved.
# Fabien Pinckaers <fp@tiny.Be>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import wizard
import netsvc
import time
import pooler
from osv import osv
def _action_line_create(self, cr, uid, data, context):
tw = pooler.get_pool(cr.dbname).get('project.task.work')
ids = tw.search(cr, uid, [('user_id','=',uid), ('date','>=',time.strftime('%Y-%m-%d 00:00:00')), ('date','<=',time.strftime('%Y-%m-%d 23:59:59'))])
ts = pooler.get_pool(cr.dbname).get('hr.analytic.timesheet')
for work in tw.browse(cr, uid, ids, context):
if work.task_id.project_id.category_id:
unit_id = ts._getEmployeeUnit(cr, uid, context)
product_id = ts._getEmployeeUnit(cr, uid, context)
res = {
'name': work.name,
'date': time.strftime('%Y-%m-%d'),
'unit_amount': work.hours,
'product_uom_id': unit_id,
'product_id': product_id,
'amount': work.hours or 0.0,
'account_id': work.task_id.project_id.category_id.id
}
res2 = ts.on_change_unit_amount(cr, uid, False, product_id, work.hours or 0.0,unit_id, context)
if res2:
res.update(res2['value'])
id = ts.create(cr, uid, res, context)
else:
print 'not found', work.task_id.project_id.name
value = {
'domain': "[('user_id','=',%d),('date','>=','%s'), ('date','<=','%s')]" % (uid, time.strftime('%Y-%m-%d 00:00:00'), time.strftime('%Y-%m-%d 23:59:59')),
'name': 'Create Analytic Line',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.analytic.timesheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
return value
class wiz_hr_timesheet_project(wizard.interface):
states = {
'init': {
'actions': [],
'result': {'type': 'action', 'action': _action_line_create, 'state':'end'}
}
}
wiz_hr_timesheet_project('hr_timesheet_project.encode.hour')
|
Python
| 0.000214
|
@@ -1923,35 +1923,38 @@
ts._getEmployee
-Uni
+Produc
t(cr, uid, conte
|
5a9358147c9930faf5b6d153344fb012c4a8f304
|
Add MSISDN in the phone_number provider
|
faker/providers/phone_number/pt_BR/__init__.py
|
faker/providers/phone_number/pt_BR/__init__.py
|
from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
formats = (
'+55 (011) #### ####',
'+55 (021) #### ####',
'+55 (031) #### ####',
'+55 (041) #### ####',
'+55 (051) #### ####',
'+55 (061) #### ####',
'+55 (071) #### ####',
'+55 (081) #### ####',
'+55 11 #### ####',
'+55 21 #### ####',
'+55 31 #### ####',
'+55 41 #### ####',
'+55 51 ### ####',
'+55 61 #### ####',
'+55 71 #### ####',
'+55 81 #### ####',
'+55 (011) ####-####',
'+55 (021) ####-####',
'+55 (031) ####-####',
'+55 (041) ####-####',
'+55 (051) ####-####',
'+55 (061) ####-####',
'+55 (071) ####-####',
'+55 (081) ####-####',
'+55 11 ####-####',
'+55 21 ####-####',
'+55 31 ####-####',
'+55 41 ####-####',
'+55 51 ### ####',
'+55 61 ####-####',
'+55 71 ####-####',
'+55 81 ####-####',
'(011) #### ####',
'(021) #### ####',
'(031) #### ####',
'(041) #### ####',
'(051) #### ####',
'(061) #### ####',
'(071) #### ####',
'(081) #### ####',
'11 #### ####',
'21 #### ####',
'31 #### ####',
'41 #### ####',
'51 ### ####',
'61 #### ####',
'71 #### ####',
'81 #### ####',
'(011) ####-####',
'(021) ####-####',
'(031) ####-####',
'(041) ####-####',
'(051) ####-####',
'(061) ####-####',
'(071) ####-####',
'(081) ####-####',
'11 ####-####',
'21 ####-####',
'31 ####-####',
'41 ####-####',
'51 ### ####',
'61 ####-####',
'71 ####-####',
'81 ####-####',
'#### ####',
'####-####',
)
|
Python
| 0.000001
|
@@ -1938,8 +1938,398 @@
',%0A )
+%0A msisdn_formats = (%0A '5511#########',%0A '5521#########',%0A '5531#########',%0A '5541#########',%0A '5551#########',%0A '5561#########',%0A '5571#########',%0A '5581#########',%0A )%0A%0A @classmethod%0A def msisdn(cls):%0A %22%22%22 https://en.wikipedia.org/wiki/MSISDN %22%22%22%0A return cls.numerify(cls.random_element(cls.msisdn_formats))%0A
|
d8778ac6f6f0b99cacf5f08c28422b7ffe88d420
|
Refactor test list credential
|
tests/test.py
|
tests/test.py
|
import os
import shutil
import sys
import unittest
import gnupg
TEST_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import pysswords.db
import pysswords.crypt
from pysswords.utils import touch
class PysswordsTests(unittest.TestCase):
def setUp(self):
self.database_path = os.path.join(TEST_DIR, ".pysswords")
self.gnupg_path = os.path.join(self.database_path, ".gnupg")
pysswords.db.init(database_path=self.database_path)
def tearDown(self):
shutil.rmtree(self.database_path)
def some_credential(self, **kwargs):
return {
"name": kwargs.get("name", "example"),
"login": kwargs.get("login", "john"),
"password": kwargs.get("password", "my-great-password"),
"login_url": kwargs.get("login_url", "http://example.org/login"),
"description": kwargs.get("description",
"This is login credentials for example"),
}
def test_init_database_creates_gnupg_hidden_directory(self):
self.assertTrue(os.path.exists(self.database_path))
self.assertTrue(os.path.exists(self.gnupg_path))
def test_add_credential_creates_directory_with_credential_name(self):
credential_name = "email"
self.assertIn(credential_name, os.listdir(self.database_path))
def test_get_credential_returns_expected_credential_dictionary(self):
credential_name = "email"
credential_login = "email@example.com"
credential_password = "p4ssw0rd"
credential_comments = "email"
credential_path = os.path.join(self.database_path, credential_name)
os.makedirs(credential_path)
with open(credential_path + "/login", "w") as f:
f.write(credential_login)
with open(credential_path + "/password", "w") as f:
f.write(credential_password)
with open(credential_path + "/comments", "w") as f:
f.write(credential_comments)
credential = pysswords.db.get_credential(credential_path)
self.assertIsInstance(credential, dict)
self.assertEqual(credential.get('name'), credential_name)
self.assertEqual(credential.get('login'), credential_login)
self.assertEqual(credential.get('password'), credential_password)
self.assertEqual(credential.get('comments'), credential_comments)
def test_list_credentials_return_credentials_from_database_dir(self):
credential_name = "email"
credential_login = "email@example.com"
credential_password = "p4ssw0rd"
credential_comments = "email"
credential_path = os.path.join(self.database_path, credential_name)
os.makedirs(credential_path)
with open(credential_path + "/login", "w") as f:
f.write(credential_login)
with open(credential_path + "/password", "w") as f:
f.write(credential_password)
with open(credential_path + "/comments", "w") as f:
f.write(credential_comments)
credentials = pysswords.db.list_credentials(self.database_path)
self.assertIn(credential_name, (c["name"] for c in credentials))
self.assertIn(credential_login, (c["login"] for c in credentials))
self.assertIn(credential_password,
(c["password"] for c in credentials))
self.assertIn(credential_comments,
(c["comments"] for c in credentials))
class PysswordsCryptTests(unittest.TestCase):
def setUp(self):
self.database_path = os.path.join(TEST_DIR, ".pysswords")
self.gnupg_path = os.path.join(self.database_path, ".gnupg")
os.makedirs(self.database_path)
self.gpg = pysswords.crypt.get_gpg(self.gnupg_path)
def tearDown(self):
shutil.rmtree(self.database_path)
def test_get_gpg_creates_keyrings_in_database_path(self):
pysswords.crypt.get_gpg(self.database_path)
self.assertIn("pubring.gpg", os.listdir(self.gnupg_path))
self.assertIn("secring.gpg", os.listdir(self.gnupg_path))
def test_get_gpg_return_valid_gpg_object(self):
gpg = pysswords.crypt.get_gpg(self.database_path)
self.assertIsInstance(gpg, gnupg.GPG)
if __name__ == "__main__":
if sys.version_info >= (3, 1):
unittest.main(warnings="ignore")
else:
unittest.main()
|
Python
| 0.000001
|
@@ -2577,134 +2577,8 @@
il%22%0A
- credential_login = %22email@example.com%22%0A credential_password = %22p4ssw0rd%22%0A credential_comments = %22email%22%0A
@@ -2690,33 +2690,29 @@
th)%0A
-with open
+touch
(credential_
@@ -2730,688 +2730,245 @@
gin%22
-, %22w%22) as f:%0A f.write(credential_login)%0A with open(credential_path + %22/password%22, %22w%22) as f:%0A f.write(credential_password)%0A with open(credential_path + %22/comments%22, %22w%22) as f:%0A f.write(credential_comments)%0A%0A credentials = pysswords.db.list_credentials(self.database_path)%0A self.assertIn(credential_name, (c%5B%22name%22%5D for c in credentials))%0A self.assertIn(credential_login, (c%5B%22login%22%5D for c in credentials))%0A self.assertIn(credential_password,%0A (c%5B%22password%22%5D for c in credentials))%0A self.assertIn(credential_comments,%0A (c%5B%22comments%22%5D for c in credentials))%0A
+)%0A touch(credential_path + %22/password%22)%0A touch(credential_path + %22/comments%22)%0A%0A credentials = pysswords.db.list_credentials(self.database_path)%0A self.assertIn(credential_name, (c%5B%22name%22%5D for c in credentials))
%0A%0A%0Ac
|
faf3c4fe474733965ab301465f695e3cc311169c
|
Fix PostgresToGCSOperat bool dtype (#25475)
|
airflow/providers/google/cloud/transfers/postgres_to_gcs.py
|
airflow/providers/google/cloud/transfers/postgres_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""PostgreSQL to GCS operator."""
import datetime
import json
import time
import uuid
from decimal import Decimal
from typing import Dict
import pendulum
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.postgres.hooks.postgres import PostgresHook
class _PostgresServerSideCursorDecorator:
"""
Inspired by `_PrestoToGCSPrestoCursorAdapter` to keep this consistent.
Decorator for allowing description to be available for postgres cursor in case server side
cursor is used. It doesn't provide other methods except those needed in BaseSQLToGCSOperator,
which is more of a safety feature.
"""
def __init__(self, cursor):
self.cursor = cursor
self.rows = []
self.initialized = False
def __iter__(self):
return self
def __next__(self):
if self.rows:
return self.rows.pop()
else:
self.initialized = True
return next(self.cursor)
@property
def description(self):
"""Fetch first row to initialize cursor description when using server side cursor."""
if not self.initialized:
element = self.cursor.fetchone()
if element is not None:
self.rows.append(element)
self.initialized = True
return self.cursor.description
class PostgresToGCSOperator(BaseSQLToGCSOperator):
"""
Copy data from Postgres to Google Cloud Storage in JSON or CSV format.
:param postgres_conn_id: Reference to a specific Postgres hook.
:param use_server_side_cursor: If server-side cursor should be used for querying postgres.
For detailed info, check https://www.psycopg.org/docs/usage.html#server-side-cursors
:param cursor_itersize: How many records are fetched at a time in case of server-side cursor.
"""
ui_color = '#a0e08c'
type_map = {
1114: 'DATETIME',
1184: 'TIMESTAMP',
1082: 'DATE',
1083: 'TIME',
1005: 'INTEGER',
1007: 'INTEGER',
1016: 'INTEGER',
20: 'INTEGER',
21: 'INTEGER',
23: 'INTEGER',
16: 'BOOLEAN',
700: 'FLOAT',
701: 'FLOAT',
1700: 'FLOAT',
}
def __init__(
self,
*,
postgres_conn_id='postgres_default',
use_server_side_cursor=False,
cursor_itersize=2000,
**kwargs,
):
super().__init__(**kwargs)
self.postgres_conn_id = postgres_conn_id
self.use_server_side_cursor = use_server_side_cursor
self.cursor_itersize = cursor_itersize
def _unique_name(self):
return f"{self.dag_id}__{self.task_id}__{uuid.uuid4()}" if self.use_server_side_cursor else None
def query(self):
"""Queries Postgres and returns a cursor to the results."""
hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
conn = hook.get_conn()
cursor = conn.cursor(name=self._unique_name())
cursor.execute(self.sql, self.parameters)
if self.use_server_side_cursor:
cursor.itersize = self.cursor_itersize
return _PostgresServerSideCursorDecorator(cursor)
return cursor
def field_to_bigquery(self, field) -> Dict[str, str]:
return {
'name': field[0],
'type': self.type_map.get(field[1], "STRING"),
'mode': 'REPEATED' if field[1] in (1009, 1005, 1007, 1016) else 'NULLABLE',
}
def convert_type(self, value, schema_type, stringify_dict=True):
"""
Takes a value from Postgres, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery.
Timezone aware Datetime are converted to UTC seconds.
Unaware Datetime, Date and Time are converted to ISO formatted strings.
Decimals are converted to floats.
:param value: Postgres column value.
:param schema_type: BigQuery data type.
:param stringify_dict: Specify whether to convert dict to string.
"""
if isinstance(value, datetime.datetime):
iso_format_value = value.isoformat()
if value.tzinfo is None:
return iso_format_value
return pendulum.parse(iso_format_value).float_timestamp
if isinstance(value, datetime.date):
return value.isoformat()
if isinstance(value, datetime.time):
formatted_time = time.strptime(str(value), "%H:%M:%S")
time_delta = datetime.timedelta(
hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec
)
return str(time_delta)
if stringify_dict and isinstance(value, dict):
return json.dumps(value)
if isinstance(value, Decimal):
return float(value)
return value
|
Python
| 0
|
@@ -2961,11 +2961,8 @@
BOOL
-EAN
',%0A
|
ff6e8cc14112b2248c7f68701e288b28ff49c0c2
|
return code in get_submission
|
backend/service/submission.py
|
backend/service/submission.py
|
from req import Service
from service.base import BaseService
import config
import os
import re
class Submission(BaseService):
def get_submission_list_admin(self, data={}):
required_args = [{
'name': '+count',
'type': int,
}, {
'name': '+page',
'type': int,
}, {
'name': 'user',
'type': str,
}, {
'name': 'problem_id',
'type': int,
}, {
'name': 'verdict_id',
'type': int,
}]
err = self.form_validation(data, required_args)
if err: return (err, None)
where = {}
if 'user' in data and data['user'] is not None:
pass
if 'problem_id' in data and data['problem_id'] is not None:
pass
if 'verdict_id' in data and data['verdict_id'] is not None:
pass
self.log(data)
limit, offset = self.calc_limit_offset(data['page'], data['count'])
res = {}
res['data'] = (yield self.db.execute("SELECT * FROM submissions ORDER BY id DESC LIMIT %s OFFSET %s", (limit, offset,))).fetchall()
res['count'] = (yield self.db.execute("SELECT COUNT(*) as count FROM submissions")).fetchone()['count']
return (None, res)
def get_submission_list(self, data={}):
required_args = [{
'name': '+count',
'type': int,
}, {
'name': '+page',
'type': int,
}, {
'name': '+user_id',
'type': int,
}]
err = self.form_validation(data, required_args)
if err: return (err, None)
limit, offset = self.calc_limit_offset(data['page'], data['count'])
res = {}
res['data'] = (yield self.db.execute("SELECT * FROM submissions WHERE user_id=%s ORDER BY id DESC LIMIT %s OFFSET %s", (data['user_id'], limit, offset,))).fetchall()
res['count'] = (yield self.db.execute("SELECT COUNT(*) as count FROM submissions WHERE user_id=%s", (data['user_id'],))).fetchone()['count']
return (None, res)
def get_submission(self, data={}):
required_args = [{
'name': '+id',
'type': int,
}]
err = self.form_validation(data, required_args)
if err: return (err, None)
res = yield self.db.execute("SELECT * FROM submissions WHERE id=%s", (data['id'],))
res = res.fetchone()
return (None, res)
def fixed_file_name(self, file_name):
pass
def post_submission_code(self, data={}):
required_args = [{
'name': '+problem_id',
'type': int,
}, {
'name': '+user_id',
'type': int,
}, {
'name': '+execute_type_id',
'type': int,
}, {
'name': '+code',
'type': str,
}, {
'name': '+file_name',
'type': str,
}, {
'name': '+ip',
}]
err = self.form_validation(data, required_args)
if err: return (err, None)
if data['file_name'] == '':
err, res = yield from Service.Execute.get_execute({'id': data['execute_type_id']})
data['file_name'] = res['file_name']
code = data.pop('code')
data['length'] = len(code)
sql, param = self.gen_insert_sql("submissions", data)
res = yield self.db.execute(sql, param)
res = res.fetchone()
folder = os.path.join(config.DATA_ROOT, 'data/submissions' , str(res['id']))
file_path = os.path.join(folder, data['file_name'])
try: os.makedirs(folder)
except: pass
with open(file_path, 'w+') as f:
f.write(code)
yield self.db.execute("INSERT INTO wait_submissions (submission_id) VALUES (%s)", (res['id'],))
return (None, res)
def post_submission_file(self, data={}):
required_args = [{
'name': '+problem_id',
'type': int,
}, {
'name': '+user_id',
'type': int,
}, {
'name': '+execute_type_id',
'type': int,
}, {
'name': '+file',
}, {
'name': '+ip',
}]
err = self.form_validation(data, required_args)
if err: return (err, None)
code_file = data.pop('file')
data['file_name'] = code_file['filename']
data['length'] = len(code_file['body'])
sql, param = self.gen_insert_sql("submissions", data)
res = yield self.db.execute(sql, param)
res = res.fetchone()
folder = os.path.join(config.DATA_ROOT, 'data/submissions', str(res['id']))
file_path = os.path.join(folder, data['file_name'])
try: os.makedirs(folder)
except: pass
with open(file_path, 'wb+') as f:
f.write(code_file['body'])
yield self.db.execute("INSERT INTO wait_submissions (submission_id) VALUES (%s)", (res['id'],))
return (None, res)
|
Python
| 0.00003
|
@@ -2433,32 +2433,152 @@
res.fetchone()%0A
+ res%5B'code'%5D = open(os.path.join(config.DATA_ROOT, 'data/submissions', str(res%5B'id'%5D), res%5B'file_name'%5D)).read()%0A
return (
|
374ef731e658097f9b2d2d7593ed1126ec52d282
|
Fix issues with `skip_unknown`
|
ycml/transformers/sequences.py
|
ycml/transformers/sequences.py
|
import logging
from .base import PureTransformer
from .text import ListCountVectorizer
__all__ = ['TokensToIndexTransformer']
logger = logging.getLogger(__name__)
class TokensToIndexTransformer(PureTransformer):
def __init__(self, ignore_unknown=False, pad_sequences=None, count_vectorizer_args={}, pad_sequences_args={}, **kwargs):
super(TokensToIndexTransformer, self).__init__(**kwargs)
self.ignore_unknown = ignore_unknown
self.pad_sequences = pad_sequences
self.count_vectorizer_args = count_vectorizer_args
self.pad_sequences_args = pad_sequences_args
#end def
def fit(self, X, **kwargs):
self.count_vectorizer_ = ListCountVectorizer(**self.count_vectorizer_args).fit(X)
logger.debug('TokensToIndexTransformer vocabulary fitted with size {}.'.format(len(self.vocabulary_)))
return self
#end def
def _transform(self, X, y=None):
if 'maxlen' in self.pad_sequences_args:
raise ValueError('The `maxlen` argument should not be set in `pad_sequences_args`. Set it in `pad_sequences` instead.')
analyzer = self.count_vectorizer_.build_analyzer()
V = self.vocabulary_
unknown_index = 1 if self.ignore_unknown else len(V)
X_transformed = []
for seq in X:
indexes = []
for j, tok in enumerate(analyzer(seq)):
index = V.get(tok, unknown_index)
if index >= 0:
indexes.append(index)
#end for
X_transformed.append(indexes)
#end for
if self.pad_sequences is not None:
from keras.preprocessing.sequence import pad_sequences as keras_pad_sequences
maxlen = getattr(self, 'pad_sequences_maxlen_', None if self.pad_sequences is True else self.pad_sequences)
X_transformed = keras_pad_sequences(X_transformed, maxlen=maxlen, **self.pad_sequences_args)
if self.pad_sequences is True or maxlen is not None:
logger.debug('TokensToIndexTransformer transformed sequences has max length {}.'.format(X_transformed.shape[1]))
self.pad_sequences_maxlen_ = X_transformed.shape[1]
#end if
return X_transformed
#end def
@property
def vocabulary_(self): return self.count_vectorizer_.vocabulary_
@property
def stop_words_(self): return self.count_vectorizer_.stop_words_
def __repr__(self):
count_vectorizer_repr = '{}(vocabulary_={}, stop_words_={})'.format(self.count_vectorizer_.__class__.__name__, len(getattr(self.count_vectorizer_, 'vocabulary_', [])), len(getattr(self.count_vectorizer_, 'stop_words_', []))) if hasattr(self, 'count_vectorizer_') else None
return '{}(ignore_unknown={}, pad_sequences={}, count_vectorizer_args={}, pad_sequences_args={}, count_vectorizer_={})'.format(self.__class__.__name__, self.ignore_unknown, self.pad_sequences, self.count_vectorizer_args, self.pad_sequences_args, count_vectorizer_repr)
#end def
#end class
|
Python
| 0
|
@@ -233,22 +233,20 @@
_(self,
-ignore
+skip
_unknown
@@ -408,30 +408,28 @@
self.
-ignore
+skip
_unknown = i
@@ -427,22 +427,20 @@
known =
-ignore
+skip
_unknown
@@ -1186,69 +1186,8 @@
ary_
-%0A unknown_index = 1 if self.ignore_unknown else len(V)
%0A%0A
@@ -1347,23 +1347,8 @@
(tok
-, unknown_index
)%0A%0A
@@ -1369,39 +1369,122 @@
if
-index %3E= 0:%0A
+not self.skip_unknown: indexes.append(0 if index is None else (index + 1))%0A elif index is not None:
ind
@@ -2755,22 +2755,20 @@
urn '%7B%7D(
-ignore
+skip
_unknown
@@ -2903,14 +2903,12 @@
elf.
-ignore
+skip
_unk
|
de6babf92252ea5828a9c17d76766357cff3e440
|
Extend _VALID_URL (Closes #10812)
|
youtube_dl/extractor/tvland.py
|
youtube_dl/extractor/tvland.py
|
# coding: utf-8
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
class TVLandIE(MTVServicesInfoExtractor):
IE_NAME = 'tvland.com'
_VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|episodes)/(?P<id>[^/?#.]+)'
_FEED_URL = 'http://www.tvland.com/feeds/mrss/'
_TESTS = [{
# Geo-restricted. Without a proxy metadata are still there. With a
# proxy it redirects to http://m.tvland.com/app/
'url': 'http://www.tvland.com/episodes/hqhps2/everybody-loves-raymond-the-invasion-ep-048',
'info_dict': {
'description': 'md5:80973e81b916a324e05c14a3fb506d29',
'title': 'The Invasion',
},
'playlist': [],
}, {
'url': 'http://www.tvland.com/video-clips/zea2ev/younger-younger--hilary-duff---little-lies',
'md5': 'e2c6389401cf485df26c79c247b08713',
'info_dict': {
'id': 'b8697515-4bbe-4e01-83d5-fa705ce5fa88',
'ext': 'mp4',
'title': 'Younger|December 28, 2015|2|NO-EPISODE#|Younger: Hilary Duff - Little Lies',
'description': 'md5:7d192f56ca8d958645c83f0de8ef0269',
'upload_date': '20151228',
'timestamp': 1451289600,
},
}]
|
Python
| 0
|
@@ -228,16 +228,26 @@
o-clips%7C
+(?:full-)?
episodes
@@ -1246,19 +1246,166 @@
%0A %7D,%0A
+ %7D, %7B%0A 'url': 'http://www.tvland.com/full-episodes/iu0hz6/younger-a-kiss-is-just-a-kiss-season-3-ep-301',%0A 'only_matching': True,%0A
%7D%5D%0A
|
59d4678e8319320b9b5ccd304b1034188c02ae61
|
insert pth eggs at index of site-packages they come from
|
pkgs/development/python-modules/site/site.py
|
pkgs/development/python-modules/site/site.py
|
def __boot():
import sys, imp, os, os.path
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d,nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p,np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
|
Python
| 0
|
@@ -1620,16 +1620,80 @@
ONPATH:%0A
+ sys.__egginsert = sys.path.index(os.path.abspath(item))%0A
|
fcc5ae524736bffed0a394afe4be31777e74c6b8
|
Fix regression w/email vs. delivery_email.
|
zerver/lib/email_validation.py
|
zerver/lib/email_validation.py
|
from typing import Callable, Dict, Optional, Set, Tuple
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from zerver.lib.name_restrictions import is_disposable_domain
# TODO: Move DisposableEmailError, etc. into here.
from zerver.models import (
email_to_username,
email_to_domain,
get_users_by_delivery_email,
is_cross_realm_bot_email,
DisposableEmailError,
DomainNotAllowedForRealmError,
EmailContainsPlusError,
Realm,
RealmDomain,
)
def validate_disposable(email: str) -> None:
if is_disposable_domain(email_to_domain(email)):
raise DisposableEmailError
def get_realm_email_validator(realm: Realm) -> Callable[[str], None]:
if not realm.emails_restricted_to_domains:
# Should we also do '+' check for non-resticted realms?
if realm.disallow_disposable_email_addresses:
return validate_disposable
# allow any email through
return lambda email: None
'''
RESTRICTIVE REALMS:
Some realms only allow emails within a set
of domains that are configured in RealmDomain.
We get the set of domains up front so that
folks can validate multiple emails without
multiple round trips to the database.
'''
query = RealmDomain.objects.filter(realm=realm)
rows = list(query.values('allow_subdomains', 'domain'))
allowed_domains = {
r['domain'] for r in rows
}
allowed_subdomains = {
r['domain'] for r in rows
if r['allow_subdomains']
}
def validate(email: str) -> None:
'''
We don't have to do a "disposable" check for restricted
domains, since the realm is already giving us
a small whitelist.
'''
if '+' in email_to_username(email):
raise EmailContainsPlusError
domain = email_to_domain(email)
if domain in allowed_domains:
return
while len(domain) > 0:
subdomain, sep, domain = domain.partition('.')
if domain in allowed_subdomains:
return
raise DomainNotAllowedForRealmError
return validate
# Is a user with the given email address allowed to be in the given realm?
# (This function does not check whether the user has been invited to the realm.
# So for invite-only realms, this is the test for whether a user can be invited,
# not whether the user can sign up currently.)
def email_allowed_for_realm(email: str, realm: Realm) -> None:
'''
Avoid calling this in a loop!
Instead, call get_realm_email_validator()
outside of the loop.
'''
get_realm_email_validator(realm)(email)
def validate_email_is_valid(
email: str,
validate_email_allowed_in_realm: Callable[[str], None],
) -> Optional[str]:
try:
validators.validate_email(email)
except ValidationError:
return _("Invalid address.")
try:
validate_email_allowed_in_realm(email)
except DomainNotAllowedForRealmError:
return _("Outside your domain.")
except DisposableEmailError:
return _("Please use your real email address.")
except EmailContainsPlusError:
return _("Email addresses containing + are not allowed.")
return None
def email_reserved_for_system_bots_error(email: str) -> str:
return '%s is reserved for system bots' % (email,)
def get_existing_user_errors(
target_realm: Realm,
emails: Set[str],
verbose: bool=False,
) -> Dict[str, Tuple[str, bool]]:
'''
We use this function even for a list of one emails.
It checks "new" emails to make sure that they don't
already exist. There's a bit of fiddly logic related
to cross-realm bots and mirror dummies too.
'''
errors = {} # type: Dict[str, Tuple[str, bool]]
users = get_users_by_delivery_email(emails, target_realm).only(
'email',
'is_active',
'is_mirror_dummy',
)
'''
A note on casing: We will preserve the casing used by
the user for email in most of this code. The only
exception is when we do existence checks against
the `user_dict` dictionary. (We don't allow two
users in the same realm to have the same effective
delivery email.)
'''
user_dict = {user.email.lower(): user for user in users}
def process_email(email: str) -> None:
if is_cross_realm_bot_email(email):
if verbose:
msg = email_reserved_for_system_bots_error(email)
else:
msg = _('Reserved for system bots.')
deactivated = False
errors[email] = (msg, deactivated)
return
existing_user_profile = user_dict.get(email.lower())
if existing_user_profile is None:
# HAPPY PATH! Most people invite users that don't exist yet.
return
if existing_user_profile.is_mirror_dummy:
if existing_user_profile.is_active:
raise AssertionError("Mirror dummy user is already active!")
return
'''
Email has already been taken by a "normal" user.
'''
deactivated = not existing_user_profile.is_active
if existing_user_profile.is_active:
if verbose:
msg = _('%s already has an account') % (email,)
else:
msg = _("Already has an account.")
else:
msg = _("Account has been deactivated.")
errors[email] = (msg, deactivated)
for email in emails:
process_email(email)
return errors
def validate_email_not_already_in_realm(target_realm: Realm,
email: str,
verbose: bool=True) -> None:
'''
NOTE:
Only use this to validate that a single email
is not already used in the realm.
We should start using bulk_check_new_emails()
for any endpoint that takes multiple emails,
such as the "invite" interface.
'''
error_dict = get_existing_user_errors(target_realm, {email}, verbose)
# Loop through errors, the only key should be our email.
for key, error_info in error_dict.items():
assert key == email
msg, deactivated = error_info
raise ValidationError(msg)
|
Python
| 0.00001
|
@@ -3929,16 +3929,25 @@
'
+delivery_
email',%0A
@@ -4334,16 +4334,25 @@
= %7Buser.
+delivery_
email.lo
|
a34d08cec2cdcf259070ca51c69dcd425a04c5be
|
move use_container into execkwargs
|
tests/util.py
|
tests/util.py
|
from __future__ import absolute_import
import os
import functools
from pkg_resources import (Requirement, ResolutionError, # type: ignore
resource_filename)
import distutils.spawn
import pytest
from cwltool.utils import onWindows, windows_default_container_id
from cwltool.factory import Factory
def get_windows_safe_factory(**execkwargs):
if onWindows():
opts = {'find_default_container': functools.partial(
force_default_container, windows_default_container_id),
'use_container': True,
'default_container': windows_default_container_id}
else:
opts = {}
return Factory(makekwargs=opts, **execkwargs)
def force_default_container(default_container_id, builder):
return default_container_id
def get_data(filename):
filename = os.path.normpath(
filename) # normalizing path depending on OS or else it will cause problem when joining path
filepath = None
try:
filepath = resource_filename(
Requirement.parse("cwltool"), filename)
except ResolutionError:
pass
if not filepath or not os.path.isfile(filepath):
filepath = os.path.join(os.path.dirname(__file__), os.pardir, filename)
# warning, __file__ is all lowercase on Windows systems, this can
# sometimes conflict with docker toolkit. Workaround: pip install .
# and run the tests elsewhere via python -m pytest --pyarg cwltool
return filepath
needs_docker = pytest.mark.skipif(not bool(distutils.spawn.find_executable('docker')),
reason="Requires the docker executable on the "
"system path.")
|
Python
| 0.000009
|
@@ -388,27 +388,33 @@
():%0A
-opt
+makekwarg
s = %7B'find_d
@@ -535,16 +535,22 @@
+
'use_con
@@ -562,17 +562,17 @@
r': True
-,
+%7D
%0A
@@ -572,24 +572,27 @@
-
+execkwargs%5B
'default
@@ -632,17 +632,17 @@
ainer_id
-%7D
+%5D
%0A els
@@ -692,19 +692,25 @@
ekwargs=
-opt
+makekwarg
s, **exe
|
b06863b3bd9b12c47380362b3d4182167a6d2eaa
|
Update openssl.py
|
wigs/openssl.py
|
wigs/openssl.py
|
class openssl(Wig):
tarball_uri = 'https://github.com/openssl/openssl/archive/OpenSSL_$RELEASE_VERSION$.tar.gz'
git_uri = 'https://github.com/openssl/openssl'
last_release_version = 'v1_0_2d'
def setup(self):
self.configure_flags += [S.FPIC_FLAG]
def gen_configure_snippet(self):
return './config %s' % ' '.join(self.configure_flags)
|
Python
| 0.000002
|
@@ -186,13 +186,15 @@
'v1_
-0_2d'
+1_0e'%0A%09
%0A%0A%09d
|
e23ef45a125a3bee8df274a90f48ffd94c632451
|
update docstring
|
buncuts/utils.py
|
buncuts/utils.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import io
import re
default_delimeter = list("。!?▲")
default_quote_dict = {"「": "」", "『": "』"}
class QuoteChecker:
"""Quote checker.
The instance is expected to be used in an iteration,
to check whether a character is inside a (nested) quotatoin
in a sequence of text.
"""
def __init__(self, quote_dict):
self.quote_dict = quote_dict
self.open_quotes = quote_dict.keys()
self.quotes_remained = []
def outside_quote(self, char):
if char in self.open_quotes:
close_quote = self.quote_dict[char]
self.quotes_remained.append(close_quote)
if len(self.quotes_remained) == 0:
return True
else:
if char == self.quotes_remained[-1]:
self.quotes_remained.pop()
return False
def split_line(chunk,
sentence_delim=default_delimeter,
check_quote=True,
quote_dict=default_quote_dict):
"""Split a chunk.
Returns:
A tuple that contains the splitted string and the count of
sentence delimeters in the chunk: (result, count)
"""
result = ""
count = 0
length = len(chunk)
qc = QuoteChecker(quote_dict)
for i, char in enumerate(chunk):
# Always append original char to the result.
result = ''.join((result, char))
if check_quote:
if qc.outside_quote(char):
pass
else:
continue
# Additionaly, append a newline after a sentence delimeter.
if char in sentence_delim:
count += 1
if i < length - 1 and chunk[i+1] != '\n':
result = ''.join((result, '\n'))
elif i == length - 1:
result = ''.join((result, '\n'))
return result, count
def process_single_file(input=sys.stdin,
input_enc='sjis',
output=sys.stdout,
output_enc='sjis',
output_newline=None,
append=False,
is_dir=False,
sentence_delim=default_delimeter,
quote_dict=default_quote_dict,
limit=float('inf'),
echo=False):
"""Perform line breaks on one file.
Call ``split_line()`` for each line in the input file.
Args:
input: Path of the input file.
input_enc: Character encoding of the input files.
Defauts to Shift-JIS.
output: Path of output destination.
Defauts to stdout.
output_enc: Character encoding of the output file(s).
Defauts to Shift-JIS.
output_newline: The newline formart of the output file(s).
append: Whether append to output or not.
is_dir: Whether the output is a directory or a regular file.
sentence_delim: A list of sentence delimeters.
quote_dict: A dict that maps opening quote marks
to its closing counterpart.
limit: The limit for maximum amout of sentences
that should be extracted.
echo: Whether echo the output or not.
"""
count = 0
if append:
mode = 'a'
else:
mode = 'w'
if input is not sys.stdin:
input_file = io.open(input,
mode='r',
encoding=input_enc)
else:
input_file = input
# determine the newline format.
# io.TextIOBase.newlines only indicates the newlines translated so far.
# so you have to read one line in order to determine the newline.
input_file.readline()
input_newline = input_file.newlines
input_file.seek(0)
if output_newline is None:
output_newline = input_newline
if output is not sys.stdout:
if is_dir:
path = os.path.join(output, os.path.basename(input))
output_file = io.open(path,
mode=mode,
encoding=output_enc,
newline=output_newline)
else:
output_file = io.open(output,
mode=mode,
encoding=output_enc,
newline=output_newline)
else:
output_file = output
for line in input_file:
# strip half/full width spaces
# strip() somehow don't work very well.
# use re instead.
line = re.sub(r"^[ ]+|[ ]+$", "", line)
if line == '\n':
continue
line_splitted, count_added = split_line(line,
sentence_delim,
quote_dict)
output_file.write(line_splitted)
count += count_added
# close files
if input is not sys.stdin:
input_file.close()
if output is not sys.stdout:
output_file.close()
def split_sentences(input_list,
input_enc='sjis',
output=sys.stdout,
output_enc='sjis',
append=False,
is_dir=False,
sentence_delim=default_delimeter,
quote_dict=default_quote_dict,
limit=float('inf'),
echo=False):
"""Split the text from input files into sentences.
Call ``process_single_file()`` for each file in the input file list.
"""
for f in input_list:
process_single_file(input=f,
output=output,
append=append,
is_dir=is_dir,
sentence_delim=sentence_delim,
quote_dict=quote_dict,
limit=limit,
echo=echo)
|
Python
| 0
|
@@ -661,24 +661,100 @@
elf, char):%0A
+ %22%22%22Return True if the char is outside a quotation, False if not.%22%22%22%0A
if c
|
170aa86b851e34525e150c9cd26c33577438ac87
|
interpolate scratch string
|
polyjit/buildbot/builders/slurm.py
|
polyjit/buildbot/builders/slurm.py
|
import sys
from polyjit.buildbot.builders import register
from polyjit.buildbot import slaves
from polyjit.buildbot.utils import (builder, define, git, cmd, ucmd, ucompile,
upload_file, trigger, ip, mkdir,
rmdir, s_sbranch, s_force, s_trigger,
hash_download_from_master,
property_is_false, clean_unpack)
from polyjit.buildbot.repos import make_cb, codebases
from polyjit.buildbot.master import URL
from buildbot.plugins import util
from buildbot.changes import filter
codebase = make_cb(['benchbuild'])
P = util.Property
BuildFactory = util.BuildFactory
def has_munged(host):
if "has_munged" in host["properties"]:
return host["properties"]["has_munged"]
return False
accepted_builders = slaves.get_hostlist(slaves.infosun, predicate = has_munged)
# yapf: disable
def configure(c):
llvm_dl = hash_download_from_master("public_html/llvm.tar.gz",
"llvm.tar.gz", "llvm")
polyjit_dl = hash_download_from_master("public_html/polyjit.tar.gz",
"polyjit.tar.gz", "polyjit")
steps = [
# trigger(schedulerNames=['trigger-build-llvm', 'trigger-build-jit']),
define("scratch", "/scratch/pjtest/%(prop:buildnumber)s")
]
steps.extend(llvm_dl)
steps.extend(clean_unpack("llvm.tar.gz", "llvm"))
steps.extend(polyjit_dl)
steps.extend(clean_unpack("polyjit.tar.gz", "polyjit"))
steps.extend([
define("BENCHBUILD_ROOT", ip("%(prop:builddir)s/build/benchbuild/")),
git('benchbuild', 'develop', codebases, workdir=P("BENCHBUILD_ROOT")),
])
steps.extend([
ucmd('virtualenv', '-ppython3', 'env/'),
ucmd('/mnt/build/env/bin/pip3', 'install', '--upgrade', '.',
workdir='build/benchbuild'),
ucmd('/mnt/build/env/bin/benchbuild', 'bootstrap', env={
'PATH': '/opt/cmake/bin:/usr/sbin:/sbin:/usr/bin:/bin',
'BB_ENV_COMPILER_PATH':
ip('%(prop:scratch)s/llvm/bin:'
'/mnt/build/llvm/bin:'
'/mnt/build/polyjit/bin'),
'BB_ENV_COMPILER_LD_LIBRARY_PATH':
ip('%(prop:scratch)s/llvm/lib:'
'/mnt/build/llvm/lib:'
'/mnt/build/polyjit/lib'),
'BB_ENV_LOOKUP_PATH':
ip('%(prop:scratch)s/llvm/lib:'
'/mnt/build/llvm/lib:'
'/mnt/build/polyjit/bin'),
'BB_ENV_LOOKUP_LD_LIBRARY_PATH':
ip('%(prop:scratch)s/polyjit/lib:'
'/mnt/build/llvm/lib:'
'/mnt/build/polyjit/lib'),
'BB_LLVM_DIR': '/mnt/build/llvm',
'BB_LIKWID_PREFIX': '/usr/local',
'BB_PAPI_INCLUDE': '/usr/include',
'BB_PAPI_LIBRARY': '/usr/lib',
'BB_SRC_DIR': '/mnt/build/benchbuild',
'BB_UNIONFS_ENABLE': 'false'
},
workdir='build/benchbuild'),
mkdir(P("scratch")),
cmd("cp", "-var", "build/*", P("scratch"))
])
c['builders'].append(builder("build-slurm-set", None, accepted_builders,
factory=BuildFactory(steps)))
# yapf: enable
def schedule(c):
c['schedulers'].extend([
s_sbranch("build-slurm-set-sched", codebase, ["build-slurm-set"],
change_filter=filter.ChangeFilter(branch_re='next|develop'),
treeStableTimer=2 * 60),
s_force("force-build-slurm-set", codebase, ["build-slurm-set"]),
s_trigger("trigger-slurm-set", codebase, ['build-slurm-set'])
])
register(sys.modules[__name__])
|
Python
| 0.000064
|
@@ -1332,16 +1332,19 @@
ratch%22,
+ip(
%22/scratc
@@ -1374,16 +1374,17 @@
mber)s%22)
+)
%0A %5D%0A
|
07a77f02ed3d7bd31c0a315c2f4fa76491e0f915
|
Use MySQL default port when not set explicitly
|
tooz/drivers/mysql.py
|
tooz/drivers/mysql.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pymysql
import tooz
from tooz import coordination
from tooz.drivers import _retry
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
class MySQLLock(locking.Lock):
"""A MySQL based lock."""
def __init__(self, name, parsed_url, options):
super(MySQLLock, self).__init__(name)
self._conn = MySQLDriver.get_connection(parsed_url, options)
self.acquired = False
def acquire(self, blocking=True):
@_retry.retry(stop_max_delay=blocking)
def _lock():
# NOTE(sileht): mysql-server (<5.7.5) allows only one lock per
# connection at a time:
# select GET_LOCK("a", 0);
# select GET_LOCK("b", 0); <-- this release lock "a" ...
# Or
# select GET_LOCK("a", 0);
# select GET_LOCK("a", 0); release and lock again "a"
#
# So, we track locally the lock status with self.acquired
if self.acquired is True:
if blocking:
raise _retry.Retry
return False
try:
with self._conn as cur:
cur.execute("SELECT GET_LOCK(%s, 0);", self.name)
# Can return NULL on error
if cur.fetchone()[0] is 1:
self.acquired = True
return True
except pymysql.MySQLError as e:
coordination.raise_with_cause(coordination.ToozError,
utils.exception_message(e),
cause=e)
if blocking:
raise _retry.Retry
return False
return _lock()
def release(self):
try:
with self._conn as cur:
cur.execute("SELECT RELEASE_LOCK(%s);", self.name)
cur.fetchone()
self.acquired = False
except pymysql.MySQLError as e:
coordination.raise_with_cause(coordination.ToozError,
utils.exception_message(e),
cause=e)
def __del__(self):
if self.acquired:
LOG.warn("unreleased lock %s garbage collected" % self.name)
class MySQLDriver(coordination.CoordinationDriver):
"""A `MySQL`_ based driver.
This driver users `MySQL`_ database tables to
provide the coordination driver semantics and required API(s). It **is**
missing some functionality but in the future these not implemented API(s)
will be filled in.
.. _MySQL: http://dev.mysql.com/
"""
def __init__(self, member_id, parsed_url, options):
"""Initialize the MySQL driver."""
super(MySQLDriver, self).__init__()
self._parsed_url = parsed_url
self._options = utils.collapse(options)
def _start(self):
self._conn = MySQLDriver.get_connection(self._parsed_url,
self._options)
def _stop(self):
self._conn.close()
def get_lock(self, name):
return MySQLLock(name, self._parsed_url, self._options)
@staticmethod
def watch_join_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_join_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def watch_leave_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_leave_group(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def watch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def get_connection(parsed_url, options):
host = parsed_url.hostname
port = parsed_url.port
dbname = parsed_url.path[1:]
username = parsed_url.username
password = parsed_url.password
unix_socket = options.get("unix_socket")
try:
if unix_socket:
return pymysql.Connect(unix_socket=unix_socket,
port=port,
user=username,
passwd=password,
database=dbname)
else:
return pymysql.Connect(host=host,
port=port,
user=username,
passwd=password,
database=dbname)
except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
coordination.raise_with_cause(coordination.ToozConnectionError,
utils.exception_message(e),
cause=e)
|
Python
| 0.000222
|
@@ -843,24 +843,55 @@
d lock.%22%22%22%0A%0A
+ MYSQL_DEFAULT_PORT = 3306%0A%0A
def __in
@@ -4593,16 +4593,48 @@
url.port
+ or MySQLLock.MYSQL_DEFAULT_PORT
%0A
|
66c4b93ae78c98928946f0ceeee3a2c16be7655d
|
Add coding line
|
app/tests/integration/test_database.py
|
app/tests/integration/test_database.py
|
"""
Database tests.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from unittest import TestCase
from lib import database
from models.trends import Trend
class TestDatabaseSetup(TestCase):
"""
Test the database library module.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_drop(self):
database._dropTables()
def test_create(self):
database._createTables()
def test_baseLabels(self):
database._createTables(verbose=False)
database._baseLabels()
def test_populate(self):
database._createTables(verbose=False)
limit = 1
database._populate(limit)
class TestModel(TestCase):
"""
Test ORM operations on the SQL database.
In particular, edgecases such as unicode character handling.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_insert(self):
database._dropTables(verbose=False)
database._createTables(verbose=False)
database._baseLabels()
t = Trend(topic="abc", volume=1)
self.assertEqual(t.topic, "abc")
self.assertEqual(t.volume, 1)
t = Trend(topic="a b Ç 😊", volume=1000)
self.assertEqual(t.topic, "a b Ç 😊")
database._dropTables(verbose=False)
|
Python
| 0.000878
|
@@ -1,12 +1,36 @@
+# -*- coding: utf-8 -*-%0A
%22%22%22%0ADatabase
|
005eea5e467c3a0aa6b942ce377a5c72b9177e21
|
Fix build_lines() - s/bw/image
|
textinator.py
|
textinator.py
|
import click
from PIL import Image
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
for line in build_lines(bw, newlines):
click.echo(line)
def build_lines(image, newlines=True):
width, height = image.size
for y in range(height):
line = ''
for x in range(width):
pixel = bw.getpixel((x, y))
line += value_to_char(pixel, palette)
if newlines:
line += '\n'
yield line
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
|
Python
| 0
|
@@ -2069,18 +2069,21 @@
pixel =
-bw
+image
.getpixe
|
1f130a8577f16809008bd301ab8c47aab4677750
|
Add build_lines function, move image generation there.
|
textinator.py
|
textinator.py
|
import click
from PIL import Image
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False, writable=True)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
o_width, o_height = bw.size
for y in range(o_height):
line = ''
for x in range(o_width):
pixel = bw.getpixel((x, y))
line += value_to_char(pixel, palette)
click.echo(line)
|
Python
| 0
|
@@ -2217,23 +2217,129 @@
L%22)%0A
+%0A
-o_
+for line in build_lines(bw, newlines):%0A click.echo(line)%0A%0A%0Adef build_lines(image, newlines=True):%0A
width,
-o_
heig
@@ -2343,18 +2343,21 @@
eight =
-bw
+image
.size%0A%0A
@@ -2374,18 +2374,16 @@
n range(
-o_
height):
@@ -2401,16 +2401,17 @@
ne = ''%0A
+%0A
@@ -2425,18 +2425,16 @@
n range(
-o_
width):%0A
@@ -2527,29 +2527,79 @@
te)%0A
-click.echo(line)
+ if newlines:%0A line += '%5Cn'%0A%0A yield line%0A
%0A
|
bf7fd4e606901fae6a434e4a375ac72bcbc66e00
|
Fix plugin
|
tgp/plugin.py
|
tgp/plugin.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import traceback
import sys
from os.path import exists, join, dirname, abspath
from os import makedirs, environ
from future import standard_library
from qiita_client import QiitaClient, format_payload
from tgp.split_libraries import split_libraries, split_libraries_fastq
from tgp.pick_otus import pick_closed_reference_otus
with standard_library.hooks():
from configparser import ConfigParser
TASK_DICT = {
'Split libraries FASTQ': split_libraries_fastq,
'Split libraries': split_libraries,
'Pick closed-reference OTUs': pick_closed_reference_otus
}
def execute_job(server_url, job_id, output_dir):
"""Starts the plugin and executes the assigned task
Parameters
----------
server_url : str
The url of the server
job_id : str
The job id
Raises
------
RuntimeError
If there is a problem gathering the job information
"""
# Set up the Qiita Client
try:
conf_fp = environ['QP_TARGET_GENE_CONFIG_FP']
except KeyError:
conf_fp = join(dirname(abspath(__file__)), 'support_files',
'config_file.cfg')
config = ConfigParser()
with open(conf_fp, 'U') as conf_file:
config.readfp(conf_file)
qclient = QiitaClient(server_url, config.get('main', 'CLIENT_ID'),
config.get('main', 'CLIENT_SECRET'),
server_cert=config.get('main', 'SERVER_CERT'))
# Request job information
job_info = qclient.get_job_info(job_id)
# Check if we have received the job information so we can start it
if job_info and job_info['success']:
# Starting the heartbeat
qclient.start_heartbeat(job_id)
# Execute the given task
task_name = job_info['command']
task = TASK_DICT[task_name]
if not exists(output_dir):
makedirs(output_dir)
try:
payload = task(qclient, job_id, job_info['parameters'],
output_dir)
except Exception:
exc_str = repr(traceback.format_exception(*sys.exc_info()))
error_msg = ("Error executing %s:\n%s" % (task_name, exc_str))
payload = format_payload(False, error_msg=error_msg)
# The job completed
qclient.complete_job(job_id, payload)
else:
raise RuntimeError("Can't get job (%s) information" % job_id)
|
Python
| 0.000001
|
@@ -533,24 +533,8 @@
ient
-, format_payload
%0A%0Afr
@@ -1270,96 +1270,13 @@
-try:%0A conf_fp = environ%5B'QP_TARGET_GENE_CONFIG_FP'%5D%0A except KeyError:%0A
+dflt_
conf
@@ -1354,16 +1354,17 @@
+
'config_
@@ -1374,16 +1374,83 @@
e.cfg')%0A
+ conf_fp = environ.get('QP_TARGET_GENE_CONFIG_FP', dflt_conf_fp)
%0A con
@@ -1545,16 +1545,17 @@
f_file)%0A
+%0A
qcli
@@ -1787,92 +1787,50 @@
tion
-%0A job_info = qclient.get_job_info(job_id)%0A # Check if we have received
+. If there is a problem retrieving
the job
inf
@@ -1817,32 +1817,38 @@
trieving the job
+%0A #
information so
@@ -1847,34 +1847,53 @@
tion
- so we can start it%0A
+, the QiitaClient already raises an error%0A
-if
job
@@ -1902,41 +1902,43 @@
nfo
-and job_info%5B'success'%5D:%0A
+= qclient.get_job_info(job_id)%0A
-
# St
@@ -1958,20 +1958,16 @@
artbeat%0A
-
qcli
@@ -1998,20 +1998,16 @@
id)%0A
-
-
# Execut
@@ -2023,20 +2023,16 @@
en task%0A
-
task
@@ -2063,20 +2063,16 @@
d'%5D%0A
-
-
task = T
@@ -2096,20 +2096,16 @@
e%5D%0A%0A
-
-
if not e
@@ -2131,20 +2131,16 @@
-
-
makedirs
@@ -2148,28 +2148,24 @@
output_dir)%0A
-
try:%0A
@@ -2173,27 +2173,63 @@
- payload = task(
+success, artifacts_info, error_msg = task(%0A
qcli
@@ -2268,35 +2268,8 @@
s'%5D,
-%0A
out
@@ -2277,20 +2277,16 @@
ut_dir)%0A
-
exce
@@ -2307,20 +2307,16 @@
-
-
exc_str
@@ -2367,20 +2367,16 @@
nfo()))%0A
-
@@ -2450,69 +2450,24 @@
- payload = format_payload(False, error_msg=error_msg)%0A
+success = False%0A
@@ -2486,20 +2486,16 @@
mpleted%0A
-
qcli
@@ -2523,93 +2523,90 @@
id,
-payload)%0A else:%0A raise RuntimeError(%22Can't get job (%25s) information%22 %25 job_id
+success, error_msg=error_msg,%0A artifacts_info=artifacts_info
)%0A
|
fe81916434e6aa04d9672589cb75fde3c676e19f
|
Fix revision chain
|
src/ggrc/migrations/versions/20151216132037_5410607088f9_delete_background_tasks.py
|
src/ggrc/migrations/versions/20151216132037_5410607088f9_delete_background_tasks.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Delete background tasks
Revision ID: 5410607088f9
Revises: 504f541411a5
Create Date: 2015-12-16 13:20:37.341342
"""
# pylint: disable=C0103,E1101
from alembic import op
# revision identifiers, used by Alembic.
revision = '5410607088f9'
down_revision = '504f541411a5'
def upgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
def downgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
|
Python
| 0.000003
|
@@ -296,28 +296,28 @@
evises:
-504f541411a5
+1ef8f4f504ae
%0ACreate
@@ -498,20 +498,20 @@
= '
-504f541411a5
+1ef8f4f504ae
'%0A%0A%0A
|
701aee990300c777417777ca963f068840532e03
|
Add function to delete docker containers for a project
|
api/library/dockerhelper.py
|
api/library/dockerhelper.py
|
import logging
# config
from config import shared_config
from docker import Client
logger = logging.getLogger(shared_config.api_log_root_name + __name__)
"""
Call get_client first, and then pass it to the other functions
"""
def get_client(host_url):
""" Returns a docker api client """
c = Client(base_url=host_url)
# TODO: Test connection
return c
def create_container(docker_client, image_name, container_name, env=None, links=None, ports=None, volumes=None, volumes_from=None, command=None):
# Links can be specified with the links argument. They can either be specified as a dictionary mapping name to alias or as a list of (name, alias) tuples.
logger.debug("Creating a new container with: %r" % locals())
container = docker_client.create_container(image=image_name, environment=env, name=container_name, command=command)
if volumes is not None:
vol_binding = {}
for vol in volumes:
vol_binding[volumes[vol]] = {
'bind': vol,
'ro': False
}
else:
vol_binding = None
response = docker_client.start(container=container.get('Id'), links=links, binds=vol_binding, volumes_from=volumes_from)
# Return the container id
return container.get('Id')
def create_mysql(docker_client, container_name, db_name, db_user, db_pass=None, db_sql=None):
env = {
"MYSQL_USER": db_user,
"MYSQL_PASS": db_pass if db_pass is not None else "**Random**",
}
return create_container(docker_client, "mysql:latest", container_name, env)
def create_nginx(docker_client, container_name, links=None):
# Links can be specified with the links argument. They can either be specified as a dictionary mapping name to alias or as a list of (name, alias) tuples.
return create_container(docker_client=docker_client, image_name="debian", container_name=container_name, env=None, links=links)
def create_storage(docker_client, container_name, storage_url):
if 'local://' in storage_url:
# Only local storage is supported right now. Path has to exist on the docker host machine
local_path = storage_url.replace('local://', '')
return create_container(docker_client=docker_client, container_name=container_name, image_name="debian", volumes={'/data': local_path}, command="/bin/bash -c 'while /bin/true; do sleep 300;done'")
else:
raise RuntimeError("%s not supported. Use local://" % storage_url)
def create_apache(docker_client, container_name, volumes_from, links):
return create_container(docker_client=docker_client, container_name=container_name, image_name="apache", volumes_from=volumes_from, links=links)
def create_phpfpm(docker_client, container_name, volumes_from, links):
return create_container(docker_client=docker_client, image_name="php-fpm", container_name=container_name, volumes_from=[volumes_from], links=links)
def get_ip(docker_client, container_id):
return docker_client.inspect_container(container=container_id)['NetworkSettings']['IPAddress']
def create_containers_from_proj(docker_client, project_name, project_containers):
prefix = project_name.strip().replace(' ', '_').lower() + "_"
# TODO: Get a list of existing containers for project
# TODO: Error handling
for container in project_containers:
clean_name = container['name'].strip().replace(' ', '_').lower()
container_name = "%s%s" % (prefix, clean_name)
logger.debug("Checking if we need to create container_name: %s container_type: %s" % (container_name, container['type']))
if 'link' in container:
links = []
for link in container['link']:
links.append(("%s%s" % (prefix, link.strip().replace(" ", '').lower()), link))
else:
links = None
if 'volumes_from' in container:
volumes_from = "%s%s" % (prefix, container['volumes_from'].strip().replace(" ", '').lower())
else:
volumes_from = None
if container['type'] == "storage":
result = create_storage(docker_client=docker_client, container_name=container_name, storage_url=container['data_source'])
elif container['type'] == "nginx":
result = create_nginx(docker_client=docker_client, container_name=container_name, links=links)
elif container['type'] == "apache":
result = create_apache(docker_client=docker_client, container_name=container_name, links=links, volumes_from=volumes_from)
elif container['type'] == "mysql":
result = create_mysql(docker_client=docker_client, container_name=container_name, db_name=container['mysql_name'], db_user=container['mysql_user'], db_pass=container['mysql_pass'], db_sql=container['mysql_sql'])
elif container['type'] == "php":
result = create_phpfpm(docker_client=docker_client, container_name=container_name, volumes_from=volumes_from, links=links)
return True
|
Python
| 0.000001
|
@@ -5033,8 +5033,555 @@
rn True%0A
+%0Adef delete_all_containers_from_proj(docker_client, project_name):%0A %22%22%22 Looks for any vms that begin with the prefix of the project name. TODO: Do something else. %22%22%22%0A containers = docker_client.containers(all=True)%0A prefix = project_name.strip().replace(' ', '_').lower() + %22_%22 %0A for c in containers:%0A for container_name in c%5B'Names'%5D:%0A if prefix in container_name:%0A logger.info(%22Deleting container with name: %25s%22 %25 container_name)%0A c.remove_container(container=c%5B'Id'%5D, force=True)%0A
|
0ce840cf43b06fc810bbe45d2aed5fcc591be87c
|
Add ShortenedUrl class
|
url_shortener.py
|
url_shortener.py
|
# -*- coding: utf-8 -*-
import os
from bisect import bisect_left
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import types
class SlugValueError(ValueError):
'''The value of slug is incorrect '''
class Slug(object):
''' An identifier for shortened url
In has two values used as its representations: a string value
and an integer value, used in short urls and in database,
respectively.
:var CHARS: string containing characters allowed to be used
in a slug. The characters are used as digits of a numerical system
used to convert between the string and integer representations.
:var BASE: a base of numeral system used to convert between
the string and integer representations.
'''
CHARS = '0123456789abcdefghijkmnopqrstuvwxyz'
BASE = len(CHARS)
def __init__(self, integer=None, string=None):
''' Initialize new instance
:param integer: a value representing the slug as an integer.
It can not be None while string is None. If it is None, a
corresponding property of the object will be based on
the string parameter
:param string: a value representing the slug as a string.
It can not be None while integer is None, and it has to consist
only of characters specified by the CHARS class property.
If it is None, a value of corresponding property of the object
will be based on the integer parameter
:raises SlugValueError: if the slug contains characters that are not
in self.CHARS property, or if both string and integer params
are None
'''
if string is not None:
forbidden = [d for d in string if d not in self.CHARS]
if forbidden:
msg_tpl = "The slug '{}' contains forbidden characters: '{}'"
raise SlugValueError(msg_tpl.format(string, forbidden))
elif integer is None:
raise SlugValueError(
'The string and integer arguments cannot both be None'
)
self._string = string
self.integer = integer
if integer is None:
value = 0
for exponent, char in enumerate(reversed(string)):
digit_value = bisect_left(self.CHARS, char)
value += digit_value*self.BASE**exponent
self.integer = value
def __str__(self):
''' Get string representation of the slug
:returns: a string representing value of the slug as a numeral
of base specified for the class. If the object has been
initialized with integer as its only representation,
the numeral will be derived from it using the base.
'''
if self._string is None:
value = ''
integer = self.integer
while True:
integer, remainder = divmod(integer, self.BASE)
value = self.CHARS[remainder] + value
if integer == 0:
break
self._string = value
return self._string
class IntegerSlug(types.TypeDecorator):
''' Converts between database integers and
instances of Slug
'''
impl = types.Integer
def process_bind_param(self, value, dialect):
return value.integer
process_literal_param = process_bind_param
def process_result_value(self, value, dialect):
return Slug(integer=value)
app = Flask(__name__)
DATABASE_URI_NAME = 'URL_SHORTENER_DATABASE_URI'
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ[DATABASE_URI_NAME]
db = SQLAlchemy(app)
if __name__ == '__main__':
app.run()
|
Python
| 0
|
@@ -3617,16 +3617,745 @@
(app)%0A%0A%0A
+class ShortenedUrl(db.Model):%0A ''' Represents a url for which a short alias has been created%0A%0A :var slug: a value representing a registered url in short urls and%0A in database%0A '''%0A slug = db.Column(IntegerSlug, primary_key=True)%0A target = db.Column(db.String(2083), unique=True)%0A redirect = db.Column(db.Boolean(), default=True)%0A%0A def __init__(self, target, redirect=True):%0A ''' Constructor%0A%0A :param target: url represented by the instance%0A :param redirect: True if automatic redirection should be%0A performed when handling http requests for this url%0A '''%0A self.target = target%0A self.redirect = redirect%0A%0A def __str__(self):%0A return self.target%0A%0A%0A
if __nam
|
07bda8adbeb798dfd100b63a784e14a00cf33927
|
add new views to urls
|
urlsaver/urls.py
|
urlsaver/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.main_view, name='main'),
]
|
Python
| 0
|
@@ -110,10 +110,181 @@
main'),%0A
+ url(r'%5Eregister/', views.register_view, name='register'),%0A url(r'%5Elogin/', views.login_view, name='login'),%0A url(r'%5Elogout/', views.logout_view, name='logout'),%0A
%5D%0A
|
c8fd797319b7ce79a420942535e08fd291e36491
|
Fix Python 2 issue in "fill" image operation
|
wagtail/wagtailimages/image_operations.py
|
wagtail/wagtailimages/image_operations.py
|
from __future__ import division
import inspect
import math
from wagtail.wagtailimages.exceptions import InvalidFilterSpecError
class Operation(object):
def __init__(self, method, *args):
self.method = method
self.args = args
# Check arguments
try:
inspect.getcallargs(self.construct, *args)
except TypeError as e:
raise InvalidFilterSpecError(e)
# Call construct
try:
self.construct(*args)
except ValueError as e:
raise InvalidFilterSpecError(e)
def construct(self, *args):
raise NotImplementedError
def run(self, willow, image):
raise NotImplementedError
class DoNothingOperation(Operation):
def construct(self):
pass
def run(self, willow, image):
pass
class FillOperation(Operation):
vary_fields = ('focal_point_width', 'focal_point_height', 'focal_point_x', 'focal_point_y')
def construct(self, size, *extra):
# Get width and height
width_str, height_str = size.split('x')
self.width = int(width_str)
self.height = int(height_str)
# Crop closeness
self.crop_closeness = 0
for extra_part in extra:
if extra_part.startswith('c'):
self.crop_closeness = int(extra_part[1:])
else:
raise ValueError("Unrecognised filter spec part: %s" % extra_part)
# Divide it by 100 (as it's a percentage)
self.crop_closeness /= 100
# Clamp it
if self.crop_closeness > 1:
self.crop_closeness = 1
def run(self, willow, image):
image_width, image_height = willow.get_size()
focal_point = image.get_focal_point()
# Get crop aspect ratio
crop_aspect_ratio = self.width / self.height
# Get crop max
crop_max_scale = min(image_width, image_height * crop_aspect_ratio)
crop_max_width = crop_max_scale
crop_max_height = crop_max_scale / crop_aspect_ratio
# Initialise crop width and height to max
crop_width = crop_max_width
crop_height = crop_max_height
# Use crop closeness to zoom in
if focal_point is not None:
# Get crop min
crop_min_scale = max(focal_point.width, focal_point.height * crop_aspect_ratio)
crop_min_width = crop_min_scale
crop_min_height = crop_min_scale / crop_aspect_ratio
# Sometimes, the focal point may be bigger than the image...
if not crop_min_scale >= crop_max_scale:
# Calculate max crop closeness to prevent upscaling
max_crop_closeness = max(
1 - (self.width - crop_min_width) / (crop_max_width - crop_min_width),
1 - (self.height - crop_min_height) / (crop_max_height - crop_min_height)
)
# Apply max crop closeness
crop_closeness = min(self.crop_closeness, max_crop_closeness)
if 1 >= crop_closeness >= 0:
# Get crop width and height
crop_width = crop_max_width + (crop_min_width - crop_max_width) * crop_closeness
crop_height = crop_max_height + (crop_min_height - crop_max_height) * crop_closeness
# Find focal point UV
if focal_point is not None:
fp_x, fp_y = focal_point.centroid
else:
# Fall back to positioning in the centre
fp_x = image_width / 2
fp_y = image_height / 2
fp_u = fp_x / image_width
fp_v = fp_y / image_height
# Position crop box based on focal point UV
crop_x = fp_x - (fp_u - 0.5) * crop_width
crop_y = fp_y - (fp_v - 0.5) * crop_height
# Convert crop box into rect
left = crop_x - crop_width / 2
top = crop_y - crop_height / 2
right = crop_x + crop_width / 2
bottom = crop_y + crop_height / 2
# Make sure the entire focal point is in the crop box
if focal_point is not None:
if left > focal_point.left:
right -= left - focal_point.left
left = focal_point.left
if top > focal_point.top:
bottom -= top - focal_point.top
top = focal_point.top
if right < focal_point.right:
left += focal_point.right - right
right = focal_point.right
if bottom < focal_point.bottom:
top += focal_point.bottom - bottom
bottom = focal_point.bottom
# Don't allow the crop box to go over the image boundary
if left < 0:
right -= left
left = 0
if top < 0:
bottom -= top
top = 0
if right > image_width:
left -= right - image_width
right = image_width
if bottom > image_height:
top -= bottom - image_height
bottom = image_height
# Crop!
willow.crop((math.floor(left), math.floor(top), math.ceil(right), math.ceil(bottom)))
# Get scale for resizing
# The scale should be the same for both the horizontal and
# vertical axes
aftercrop_width, aftercrop_height = willow.get_size()
scale = self.width / aftercrop_width
# Only resize if the image is too big
if scale < 1.0:
# Resize!
willow.resize((self.width, self.height))
class MinMaxOperation(Operation):
def construct(self, size):
# Get width and height
width_str, height_str = size.split('x')
self.width = int(width_str)
self.height = int(height_str)
def run(self, willow, image):
image_width, image_height = willow.get_size()
horz_scale = self.width / image_width
vert_scale = self.height / image_height
if self.method == 'min':
if image_width <= self.width or image_height <= self.height:
return
if horz_scale > vert_scale:
width = self.width
height = int(image_height * horz_scale)
else:
width = int(image_width * vert_scale)
height = self.height
elif self.method == 'max':
if image_width <= self.width and image_height <= self.height:
return
if horz_scale < vert_scale:
width = self.width
height = int(image_height * horz_scale)
else:
width = int(image_width * vert_scale)
height = self.height
else:
# Unknown method
return
willow.resize((width, height))
class WidthHeightOperation(Operation):
def construct(self, size):
self.size = int(size)
def run(self, willow, image):
image_width, image_height = willow.get_size()
if self.method == 'width':
if image_width <= self.size:
return
scale = self.size / image_width
width = self.size
height = int(image_height * scale)
elif self.method == 'height':
if image_height <= self.size:
return
scale = self.size / image_height
width = int(image_width * scale)
height = self.size
else:
# Unknown method
return
willow.resize((width, height))
|
Python
| 0.00004
|
@@ -5090,61 +5090,129 @@
op((
-math.floor(left), math.floor(top), math.ceil(right),
+%0A int(math.floor(left)),%0A int(math.floor(top)),%0A int(math.ceil(right)),%0A int(
math
@@ -5225,16 +5225,26 @@
bottom))
+%0A )
)%0A%0A
|
38d47061b6c1ea3250b99f7376d7479e970974a5
|
define cheakInradius
|
MonteCarlo.py
|
MonteCarlo.py
|
from math import *
N = int(raw_input('Insert your N (random) :: '))
print N
|
Python
| 0.99981
|
@@ -13,16 +13,123 @@
port *%0A%0A
+def checkInradius(x, y):%0A%09z = x**2 + y**2%0A%09z = sqrt(z)%0A%0A%09if z %3C 1.0:%0A%09%09return True%0A%09else:%0A%09%09return False%0A%0A%0A
N = int(
@@ -173,13 +173,4 @@
'))%0A
-%0Aprint N%0A
|
2ff82cd1e34472173cd8631b8e353515d2c38a41
|
Rename get_update_db() into get_wrapupdater()
|
wrapweb/hook.py
|
wrapweb/hook.py
|
# Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
# GitHub secret key support
import hashlib
import hmac
from mesonwrap import wrapupdater
from wrapweb.app import APP
RESTRICTED_PROJECTS = [
'mesonbuild/meson',
'mesonbuild/wrapweb',
'mesonbuild/meson-ci',
]
def get_update_db():
db = getattr(flask.g, "_update_database", None)
if db is None:
dbdir = APP.config['DB_DIRECTORY']
db = flask.g._update_database = wrapupdater.WrapUpdater(dbdir)
return db
def json_ok():
jsonout = flask.jsonify({'output': 'ok'})
jsonout.status_code = 200
return jsonout
def json_error(code, message):
jsonout = flask.jsonify({'output': 'notok', 'error': message})
jsonout.status_code = code
return jsonout
@APP.route('/github-hook', methods=['POST'])
def github_hook():
headers = flask.request.headers
if not headers.get('User-Agent').startswith('GitHub-Hookshot/'):
return json_error(401, 'Not a GitHub hook')
signature = ('sha1=%s' %
hmac.new(APP.config['SECRET_KEY'].encode('utf-8'),
flask.request.data, hashlib.sha1).hexdigest())
if headers.get('X-Hub-Signature') != signature:
return json_error(401, 'Not a valid secret key')
if headers.get('X-Github-Event') != 'pull_request':
return json_error(405, 'Not a Pull Request hook')
d = flask.request.get_json()
base = d['pull_request']['base']
if not base['repo']['full_name'].startswith('mesonbuild/'):
return json_error(406, 'Not a mesonbuild project')
if base['repo']['full_name'] in RESTRICTED_PROJECTS:
return json_error(406, "We don't run hook for "
"restricted project names")
if d['action'] == 'closed' and d['pull_request']['merged']:
project = base['repo']['name']
branch = base['ref']
repo_url = base['repo']['clone_url']
if branch == 'master':
return json_error(406, 'No bananas for you')
db_updater = get_update_db()
# FIXME, should launch in the background instead. This will now block
# until branching is finished.
try:
db_updater.update_db(project, repo_url, branch)
return json_ok()
except Exception as e:
return json_error(500, 'Wrap generation failed. %s' % e)
else:
APP.logger.warning(flask.request.data)
return json_error(417, 'We got hook which is not merged pull request')
|
Python
| 0.004751
|
@@ -830,25 +830,27 @@
def get_
+wrap
update
-_db
+r
():%0A
@@ -873,31 +873,27 @@
sk.g, %22_
+wrap
update
-_database
+r
%22, None)
@@ -981,23 +981,19 @@
.g._
+wrap
update
-_database
+r
= w
@@ -2569,25 +2569,27 @@
r = get_
+wrap
update
-_db
+r
()%0A
|
e97755dffbc834853f3f46a8233a295671b53f5d
|
Disable pylint broad-except in wrapweb.hook
|
wrapweb/hook.py
|
wrapweb/hook.py
|
# Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib # GitHub secret key support
import hmac
import flask
from mesonwrap import inventory
from mesonwrap import wrapupdater
from wrapweb import flaskutil
from wrapweb import jsonstatus
BP = flask.Blueprint('hook', __name__)
@flaskutil.appcontext_var(BP)
def _wrapupdater():
dbdir = flask.current_app.config['DB_DIRECTORY']
return wrapupdater.WrapUpdater(dbdir)
@_wrapupdater.teardown
def _close_connection(db):
db.close()
def update_project(project, repo_url, branch):
if branch == 'master':
return jsonstatus.error(406, 'Will not update master branch')
# FIXME, should launch in the background instead. This will now block
# until branching is finished.
try:
_wrapupdater().update_db(project, repo_url, branch)
return jsonstatus.ok()
except Exception as e:
return jsonstatus.error(500, 'Wrap generation failed. %s' % e)
def check_allowed_project(full_repo_name):
if not inventory.is_wrap_full_project_name(full_repo_name):
raise jsonstatus.WrapWebError(406, 'Not a mesonwrap project')
def github_pull_request():
d = flask.request.get_json()
base = d['pull_request']['base']
check_allowed_project(base['repo']['full_name'])
if d['action'] != 'closed' or not d['pull_request']['merged']:
flask.current_app.logger.warning(flask.request.data)
return jsonstatus.error(
417, 'We got hook which is not merged pull request')
return update_project(project=base['repo']['name'],
repo_url=base['repo']['clone_url'],
branch=base['ref'])
@BP.route('/github-hook', methods=['POST'])
def github_hook():
headers = flask.request.headers
if not headers.get('User-Agent').startswith('GitHub-Hookshot/'):
return jsonstatus.error(401, 'Not a GitHub hook')
secret_key = flask.current_app.config['SECRET_KEY'].encode('utf-8')
digest = hmac.new(secret_key, flask.request.data, hashlib.sha1).hexdigest()
signature = 'sha1=%s' % digest
if headers.get('X-Hub-Signature') != signature:
return jsonstatus.error(401, 'Not a valid secret key')
if headers.get('X-Github-Event') != 'pull_request':
return jsonstatus.error(405, 'Not a Pull Request hook')
return github_pull_request()
|
Python
| 0
|
@@ -1417,16 +1417,48 @@
on as e:
+ # pylint: disable=broad-except
%0A
|
7b717c778f02e642c564e9afaf406b0cc4f399ac
|
Move EE Initialize
|
geebam/geebam.py
|
geebam/geebam.py
|
#! /usr/bin/env python
import argparse
import json
import logging
import logging.config
import os
import sys
import ee
import batch_uploader
def setup_logging(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
def delete_collection(id):
logging.info('Attempting to delete collection %s', id)
if 'users' not in id:
root_path_in_gee = ee.data.getAssetRoots()[0]['id']
id = root_path_in_gee + '/' + id
params = {'id': id}
items_in_collection = ee.data.getList(params)
for item in items_in_collection:
ee.data.deleteAsset(item['id'])
ee.data.deleteAsset(id)
logging.info('Collection %s removed', id)
def cancel_all_running_tasks():
logging.info('Attempting to cancel all running tasks')
running_tasks = [task for task in ee.data.getTaskList() if task['state'] == 'RUNNING']
for task in running_tasks:
ee.data.cancelTask(task['id'])
logging.info('Cancel all request completed')
def get_filename_from_path(path):
return os.path.splitext(os.path.basename(os.path.normpath(path)))[0]
def cancel_all_running_tasks_from_parser(args):
cancel_all_running_tasks()
def delete_collection_from_parser(args):
delete_collection(args.id)
def upload_from_parser(args):
batch_uploader.upload(user=args.user,
path_for_upload=args.directory,
metadata_path=args.metadata,
collection_name=args.collection or get_filename_from_path(args.directory))
def main(args=None):
setup_logging(path=os.path.join(os.path.dirname(__file__), 'logconfig.json'))
ee.Initialize()
parser = argparse.ArgumentParser(description='Google Earth Engine Batch Asset Manager', prog='GEE asset manager')
subparsers = parser.add_subparsers()
parser_delete = subparsers.add_parser('delete', help='Deletes collection and all items inside.')
parser_delete.add_argument('id', help='ID of the collection, either fully qualified or abbreviated (no need to pass users/username).')
parser_delete.set_defaults(func=delete_collection_from_parser)
parser_upload = subparsers.add_parser('upload', help='Batch Asset Uploader.')
required_named = parser_upload.add_argument_group('Required named arguments.')
required_named.add_argument('-u', '--user', help='Google account name (gmail address).', required=True)
required_named.add_argument('-d', '--directory', help='Path to the directory with images.', required=True)
optional_named = parser_upload.add_argument_group('Optional named arguments')
optional_named.add_argument('-m', '--metadata', help='Path to CSV with metadata.')
optional_named.add_argument('-c', '--collection', help='Name of the collection to create. If not provided, '
'directory name will be used.')
parser_upload.set_defaults(func=upload_from_parser)
parser_cancel = subparsers.add_parser('cancel', help='Cancel all running tasks')
parser_cancel.set_defaults(func=cancel_all_running_tasks_from_parser)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -1672,28 +1672,8 @@
'))%0A
- ee.Initialize()%0A
@@ -1762,34 +1762,8 @@
ger'
-, prog='GEE asset manager'
)%0A%0A
@@ -3111,24 +3111,44 @@
arse_args()%0A
+ ee.Initialize()%0A
args.fun
|
33c1db03e6b52d73ee6571f3f645f1b8d01e9a25
|
Comment to clarify the use of a custom field source
|
snippets/serializers.py
|
snippets/serializers.py
|
from django.forms import widgets
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
# Add a filed to diplsay a list of related snippets.
snippets = serializers.PrimaryKeyRelatedField(many=True)
class Meta:
model = User
fields = ('id', 'username', 'snippets')
#class SnippetSerializer(serializers.Serializer):
# pk = serializers.Field() # Note: `Field` is an untyped read-only field.
# title = serializers.CharField(required=False,
# max_length=100)
# code = serializers.CharField(widget=widgets.Textarea,
# max_length=100000)
# linenos = serializers.BooleanField(required=False)
# language = serializers.ChoiceField(choices=LANGUAGE_CHOICES,
# default='python')
# style = serializers.ChoiceField(choices=STYLE_CHOICES,
# default='friendly')
#
# def restore_object(self, attrs, instance=None):
# """
# Create or update a new snippet instance, given a dictionary
# of deserialized field values.
#
# Note that if we don't define this method, then deserializing
# data will simply return a dictionary of items.
# """
# if instance:
# # Update existing instance
# instance.title = attrs.get('title', instance.title)
# instance.code = attrs.get('code', instance.code)
# instance.linenos = attrs.get('linenos', instance.linenos)
# instance.language = attrs.get('language', instance.language)
# instance.style = attrs.get('style', instance.style)
# return instance
#
# # Create new instance
# return Snippet(**attrs)
class SnippetSerializer(serializers.ModelSerializer):
owner = serializers.Field(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')
|
Python
| 0
|
@@ -1938,24 +1938,146 @@
erializer):%0A
+ # To make it more user-friendly, let's use the username instead of the default pk. This is%0A # optional, obviously.%0A
owner =
|
dd9e53cbe02c0652cca35cde6d859512de4f9e44
|
fix user_detail pipeline issue
|
social/pipeline/user.py
|
social/pipeline/user.py
|
from uuid import uuid4
from social.utils import slugify, module_member
USER_FIELDS = ['username', 'email']
def get_username(strategy, details, user=None, *args, **kwargs):
if 'username' not in strategy.setting('USER_FIELDS', USER_FIELDS):
return
storage = strategy.storage
if not user:
email_as_username = strategy.setting('USERNAME_IS_FULL_EMAIL', False)
uuid_length = strategy.setting('UUID_LENGTH', 16)
max_length = storage.user.username_max_length()
do_slugify = strategy.setting('SLUGIFY_USERNAMES', False)
do_clean = strategy.setting('CLEAN_USERNAMES', True)
if do_clean:
clean_func = storage.user.clean_username
else:
clean_func = lambda val: val
if do_slugify:
override_slug = strategy.setting('SLUGIFY_FUNCTION')
if override_slug:
slug_func = module_member(override_slug)
else:
slug_func = slugify
else:
slug_func = lambda val: val
if email_as_username and details.get('email'):
username = details['email']
elif details.get('username'):
username = details['username']
else:
username = uuid4().hex
short_username = username[:max_length - uuid_length]
final_username = slug_func(clean_func(username[:max_length]))
# Generate a unique username for current user using username
# as base but adding a unique hash at the end. Original
# username is cut to avoid any field max_length.
# The final_username may be empty and will skip the loop.
while not final_username or \
storage.user.user_exists(username=final_username):
username = short_username + uuid4().hex[:uuid_length]
final_username = slug_func(clean_func(username[:max_length]))
else:
final_username = storage.user.get_username(user)
return {'username': final_username}
def create_user(strategy, details, user=None, *args, **kwargs):
if user:
return {'is_new': False}
fields = dict((name, kwargs.get(name) or details.get(name))
for name in strategy.setting('USER_FIELDS',
USER_FIELDS))
if not fields:
return
return {
'is_new': True,
'user': strategy.create_user(**fields)
}
def user_details(strategy, details, user=None, *args, **kwargs):
"""Update user details using data from provider."""
if user:
changed = False # flag to track changes
protected = ('username', 'id', 'pk', 'email') + \
tuple(strategy.setting('PROTECTED_USER_FIELDS', []))
# Update user model attributes with the new data sent by the current
# provider. Update on some attributes is disabled by default, for
# example username and id fields. It's also possible to disable update
# on fields defined in SOCIAL_AUTH_PROTECTED_FIELDS.
for name, value in details.items():
if value and hasattr(user, name):
current_value = getattr(user, name, None)
if current_value is None or name not in protected:
changed |= current_value != value
setattr(user, name, value)
if changed:
strategy.storage.user.changed(user)
|
Python
| 0
|
@@ -3195,16 +3195,20 @@
if
+not
current_
@@ -3217,16 +3217,8 @@
lue
-is None
or n
|
e8542d56aa8d81198351c7354fdbbba2ab7ca455
|
Fix portability warning with recent versions of bs4
|
cabin-porn-it.py
|
cabin-porn-it.py
|
#!/usr/bin/env python
'''
Sets the current screen to a cabin from CabinPorn.com.
Default is set to the most recent picture, which it downloads and puts into a
folder in the current directory.
Optional arguments:
1. '-r', choose a random image from the current page
2. '-i', choose a specific image by the page id
3. '-l', limit files to images larger than 1024x748
4. 'path=BASE_DIR', choose a directory for the photos to be put into
Based originally on code to set desktop pictures for all screens from:
https://github.com/grahamgilbert/macscripts/tree/master/set_desktops
'''
from bs4 import BeautifulSoup
from AppKit import NSWorkspace, NSScreen
from Foundation import NSURL
from optparse import OptionParser
import requests, glob, random, re, urllib, os, fnmatch, sys
from os import listdir
from os.path import isfile, join
# Set the options
parser = OptionParser()
parser.add_option("-p", "--path", dest="base_dir",
help="write cabins to PATH", metavar="PATH")
parser.add_option("-r", "--random", action="store_true",
dest="random_cabin", default=False,
help="pick a random cabin")
parser.add_option("-i", "--image", dest="image",
type="int", default=0,
help="pick a specific cabin, by image id")
parser.add_option("-l", "--large-only", dest="large_only",
help="only use large images", default=False,
action="store_true")
parser.add_option("-o", "--offline", dest="offline",
help="use offline", default=False,
action="store_true")
(options, args) = parser.parse_args()
# Create a directory for image storage
if options.base_dir:
base_dir = options.base_dir
if base_dir[-1] != "/":
base_dir = base_dir + "/"
# If the base directory is not set, we use ./Pictures/Cabins instead
else:
base_dir = os.path.dirname(os.path.realpath(__file__)) + "/Cabins/"
if not os.path.exists(base_dir):
os.makedirs(base_dir)
# Set the root URL
url = "http://cabinporn.com"
if options.image:
url = url + "/post/" + str(options.image)
# Grab the html
r = requests.get(url)
if r.status_code == 200:
data = r.text
soup = BeautifulSoup(data)
# Get a list of all of the images of cabins
cabins = []
for post in soup.find_all("li", "post"):
for photo in post.find_all("div", "photo_div") + post.find_all("div", "photoset_div"):
image = photo.find("img")
image_src = str(image.get("src"))
if re.search("jpg|png", image_src):
link_str = str(image.get("title"))
if link_str == "None":
link_str = post.find("div", "fb-like").get("data-href")
cabins.append({ "src" : image_src, "link": link_str })
# Choose one of the pictures to download. If random is flagged, pick one from
# the top page. Else, just choose the most recent.
if options.random_cabin:
cabin = random.choice(cabins)
else:
cabin = cabins[0]
# Local filename for the image is the descriptive post link
post_name = re.search('/post/(.+)$', cabin["link"]).group(1)
image_ext = os.path.splitext(cabin["src"])[1]
# eg: 12345-some-cool-place.jpg
image_file = post_name.replace('/', '-') + image_ext
# Previously, images were stored using the raw image name, so if an older image
# exists, just move the old file to the new location
old_file = cabin["src"].split('/')[-1]
if os.path.isfile(base_dir + old_file):
os.rename(base_dir + old_file, base_dir + image_file)
# If the image has not already been downloaded, get it now
if not os.path.isfile(base_dir + image_file):
urllib.urlretrieve(cabin["src"], base_dir + image_file)
elif r.status_code != 200:
print('You are not connected to the internet. Choosing old cabin')
# Select all of the cabins in the folder, and pick a random one
cabins = [ f for f in listdir(base_dir) if isfile(join(base_dir, f)) ]
image_file = random.choice(cabins)
def setFile():
# generate a fileURL for the desktop picture
file_path = NSURL.fileURLWithPath_(base_dir + image_file)
# get shared workspace
ws = NSWorkspace.sharedWorkspace()
# iterate over all screens
for screen in NSScreen.screens():
# tell the workspace to set the desktop picture
(result, error) = ws.setDesktopImageURL_forScreen_options_error_(
file_path, screen, ws.desktopImageOptionsForScreen_(screen), None)
# Check the size of the file
if options.large_only:
from PIL import Image
im = Image.open(base_dir + image_file).size
if im[0] >= 1024 and im[1] >= 768:
print('Image is large:', im)
setFile()
else:
print('Image is too small:', im) # (width,height) tuple
else:
setFile()
|
Python
| 0
|
@@ -2214,16 +2214,31 @@
oup(data
+, %22html.parser%22
)%0A%0A # G
|
2b553e23791adaa9e333d6f8feded8e95fd348c9
|
Bump version to 0.2.0a0
|
cachy/version.py
|
cachy/version.py
|
# -*- coding: utf-8 -*-
VERSION = '0.1.1'
|
Python
| 0.000001
|
@@ -35,9 +35,11 @@
'0.
-1.1
+2.0a0
'%0A
|
91129eb86958590c2ec5c4fd5529b20fbc0cc8cf
|
Make overdue tasks have a red due date
|
todoman/ui.py
|
todoman/ui.py
|
from datetime import datetime
import urwid
import ansi.colour.fx
import ansi.sequence
from dateutil.tz import tzlocal
class TodoEditor:
"""
The UI for a single todo entry.
"""
def __init__(self, todo, databases, formatter):
"""
:param model.Todo todo: The todo object which will be edited.
"""
self.todo = todo
self.databases = databases
self.formatter = formatter
self.saved = False
if todo.due:
# TODO: use proper date_format
due = formatter.format_date(todo.due)
else:
due = ""
self._summary = urwid.Edit(edit_text=todo.summary)
self._description = urwid.Edit(edit_text=todo.description,
multiline=True)
self._location = urwid.Edit(edit_text=todo.location)
self._due = urwid.Edit(edit_text=due)
self._completed = urwid.CheckBox("", state=todo.is_completed)
self._urgent = urwid.CheckBox("", state=todo.priority != 0)
save_btn = urwid.Button('Save', on_press=self._save)
cancel_btn = urwid.Button('Cancel', on_press=self._cancel)
buttons = urwid.Columns([(10, cancel_btn), (8, save_btn)],
dividechars=2)
pile_items = []
for label, field in [("Summary", self._summary),
("Description", self._description),
("Location", self._location),
("Due", self._due),
("Completed", self._completed),
("Urgent", self._urgent),
]:
label = urwid.Text(label + ":", align='right')
column = urwid.Columns([(13, label), field], dividechars=1)
pile_items.append(('pack', column))
grid = urwid.Pile(pile_items)
spacer = urwid.Divider()
items = [grid, spacer, buttons]
self._ui = urwid.ListBox(items)
def edit(self):
"""
Shows the UI for editing a given todo. Returns True if modifications
were saved.
"""
loop = urwid.MainLoop(self._ui, unhandled_input=self._keypress,
handle_mouse=False)
loop.run()
return self.saved
def _save(self, btn):
self.todo.summary = self.summary
self.todo.description = self.description
self.todo.location = self.location
if self.due:
self.todo.due = self.formatter.unformat_date(self.due)
else:
self.todo.due = None
self.todo.is_completed = self._completed.get_state()
# If it was already non-zero, keep it that way. Let's not overwrite
# values 1 thru 8.
if self._urgent.get_state() and not self.todo.priority:
self.todo.priority = 9
elif not self._urgent.get_state():
self.todo.priority = 0
# TODO: categories
# TODO: comment
# TODO: priority (0: undef. 1: max, 9: min)
# https://tools.ietf.org/html/rfc5545#section-3.8
# geo (lat, lon)
# RESOURCE: the main room
self.saved = True
raise urwid.ExitMainLoop()
def _cancel(self, btn):
raise urwid.ExitMainLoop()
def _keypress(self, key):
if key in ('q', 'Q'):
raise urwid.ExitMainLoop()
@property
def summary(self):
return self._summary.edit_text
@property
def description(self):
return self._description.edit_text
@property
def location(self):
return self._location.edit_text
@property
def due(self):
return self._due.edit_text
class TodoFormatter:
# This one looks good with [X]
compact_format = \
"[{completed}] {urgent} {due} {summary} {list}{percent}"
# compact_format = "{completed} {urgent} {due} {summary}"
def __init__(self, date_format):
self.date_format = date_format
self.empty_date = " " * len(self.format_date(datetime.now()))
self._localtimezone = tzlocal()
def compact(self, todo, database):
"""
Returns a brief representation of a task, suitable for displaying
on-per-line.
:param Todo todo: The todo component.
"""
# completed = "✓" if todo.percent_complete == 100 else " "
completed = "X" if todo.is_completed else " "
percent = todo.percent_complete or ''
if percent:
percent = " ({}%)".format(percent)
urgent = " " if todo.priority in [None, 0] else "!"
due = self.format_date(todo.due)
summary = todo.summary
list = self.format_database(database)
return self.compact_format.format(completed=completed, urgent=urgent,
due=due, summary=summary, list=list,
percent=percent)
def detailed(self, todo, database):
"""
Returns a detailed representation of a task.
:param Todo todo: The todo component.
"""
rv = self.compact(todo, database)
if todo.description:
rv = "{}\n\n{}".format(rv, todo.description)
return rv
def format_date(self, date):
if date:
return date.strftime(self.date_format)
else:
return self.empty_date
def unformat_date(self, date):
if date:
date = datetime.strptime(date, self.date_format)
return date.replace(tzinfo=self._localtimezone)
else:
return None
def format_database(self, database):
return '{}@{}{}'.format(database.color_ansi or '',
database.name,
ansi.colour.fx.reset)
|
Python
| 0.999719
|
@@ -59,16 +59,38 @@
lour.fx%0A
+import ansi.colour.fg%0A
import a
@@ -4042,103 +4042,167 @@
elf.
-empty_date = %22 %22 * len(self.format_date(datetime.now()))%0A self._localtimezone = tzlocal(
+_localtimezone = tzlocal()%0A self.now = datetime.now().replace(tzinfo=self._localtimezone)%0A self.empty_date = %22 %22 * len(self.format_date(self.now)
)%0A%0A
@@ -4699,16 +4699,17 @@
lse %22!%22%0A
+%0A
@@ -4733,32 +4733,186 @@
_date(todo.due)%0A
+ if todo.due and todo.due %3C= self.now and not todo.is_completed:%0A due = '%7B%7D%7B%7D%7B%7D'.format(ansi.colour.fg.red, due, ansi.colour.fx.reset)%0A%0A
summary
@@ -5560,29 +5560,27 @@
r
-eturn
+v =
date.strfti
@@ -5592,32 +5592,54 @@
lf.date_format)%0A
+ return rv%0A
else:%0A
|
a9240cd8bcfced47b402fdbff0162ad939eaa631
|
Fix typo
|
Yank/multistate/__init__.py
|
Yank/multistate/__init__.py
|
#!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
MultiState
==========
Multistate Sampling simulation algorithms, specific variants, and analyzers
This module provides a general facility for running multiple thermodynamic state multistate simulations, both general
as well as derived classes for special cases such as parallel tempering (in which
the states differ only in temperature).
The classes also provide
Provided classes include:
- :class:`yank.multistate.MultiStateSampler`
Base class for general, multi-thermodynamic state parallel multistate
- :class:`yank.multistate.ReplicaExchangeSampler`
Derived class from MultiStateSampler which allows sampled thermodynamic states
to swap based on Hamiltonian Replica Exchange
- :class:`yank.multistate.ParallelTemperingSampler`
Convenience subclass of ReplicaExchange for parallel tempering simulations
(one System object, many temperatures).
- :class:`yank.multistate.SAMSSampler`
Single-replica sampler which samples through multiple thermodynamic states
on the fly.
- :class:`yank.multistate.MultiStateReporter`
Replica Exchange reporter class to store all variables and data
Analyzers
---------
The MultiState module also provides analysis modules to analyze simulations and compute observables from data generated
under any of the MultiStateSampler's
Extending and Subclassing
-------------------------
Subclassing a sampler and analyzer is done by importing and extending any of the following:
* The base ``MultiStateSampler`` from ``multistatesampler``
* The base ``MultiStateReporter`` from ``multistatereporter``
* The base ``MultiStateAnalyzer`` or ``PhaseAnalyzer`` and base `ObservablesRegistry`` from ``multistateanalyzer``
COPYRIGHT
---------
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
-------
This code is licensed under the latest available version of the MIT License.
"""
import warning
warnings.warn("The yank.multistate package is deprecated and it will be "
"available as openmmtools.multistate with openmmtools >= 0.18",
DeprecationWarning, stacklevel=2)
from .multistatesampler import MultiStateSampler
from .multistatereporter import MultiStateReporter
from .replicaexchange import ReplicaExchangeSampler, ReplicaExchangeAnalyzer
from .paralleltempering import ParallelTemperingSampler, ParallelTemperingAnalyzer
from .sams import SAMSSampler, SAMSAnalyzer
from .multistateanalyzer import *
from .utils import *
|
Python
| 0.999999
|
@@ -2347,16 +2347,17 @@
warning
+s
%0Awarning
|
4a18649367e6593724cdde6cf821eced595bb3cf
|
use list comprehension for auto-parse
|
genson/genson.py
|
genson/genson.py
|
import argparse
import sys
import re
import json
from .generator import SchemaNode
DESCRIPTION = """
Generate one, unified JSON Schema from one or more
JSON objects and/or JSON Schemas.
(uses Draft 4 - http://json-schema.org/draft-04/schema)
"""
def main():
args = parse_args()
s = SchemaNode()
for schema_file in args.schema:
add_json_from_file(s, schema_file, args.delimiter, schema=True)
for object_file in args.object:
add_json_from_file(s, object_file, args.delimiter)
print(s.to_json(indent=args.indent))
def parse_args():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-d', '--delimiter', metavar='DELIM',
help='''set a delimiter - Use this option if the
input files contain multiple JSON objects/schemas.
You can pass any string. A few cases ('newline', 'tab',
'space') will get converted to a whitespace character,
and if empty string ('') is passed, the parser will
try to auto-detect where the boundary is.''')
parser.add_argument('-i', '--indent', type=int, metavar='SPACES',
help='''pretty-print the output, indenting SPACES
spaces''')
parser.add_argument('-s', '--schema', action='append', default=[],
type=argparse.FileType('r'),
help='''file containing a JSON Schema (can be
specified multiple times to merge schemas)''')
parser.add_argument('object', nargs=argparse.REMAINDER,
type=argparse.FileType('r'), help='''files containing
JSON objects (defaults to stdin if no arguments
are passed and the -s option is not present)''')
args = parser.parse_args()
args.delimiter = get_delim(args.delimiter)
# default to stdin if no objects or schemas
if not args.object and not args.schema:
args.object.append(get_stdin())
return args
def get_delim(delim):
"""
manage special conversions for difficult bash characters
"""
if delim == 'newline':
delim = '\n'
elif delim == 'tab':
delim = '\t'
elif delim == 'space':
delim = ' '
return delim
def get_stdin():
"""
Grab stdin, printing simple instructions if it's interactive.
"""
if sys.stdin.isatty():
print('Enter a JSON object, then press ctrl-D')
return sys.stdin
def add_json_from_file(s, fp, delimiter, schema=False):
method = getattr(s, 'add_schema' if schema else 'add_object')
raw_text = fp.read().strip()
fp.close()
for json_string in get_json_strings(raw_text, delimiter):
method(json.loads(json_string))
def get_json_strings(raw_text, delim):
if delim is None:
json_strings = [raw_text]
elif delim == '':
json_strings = detect_json_strings(raw_text)
else:
json_strings = raw_text.split(delim)
# sanitize data before returning
return [string.strip() for string in json_strings if string.strip()]
def detect_json_strings(raw_text):
"""
Use regex with lookaround to spot the boundaries between JSON objects.
Unfortunately, it has to match *something*, so at least one character
must be removed and replaced.
"""
strings = re.split('}\s*(?={)', raw_text)
json_strings = []
for string in strings:
# put back the stripped character
json_strings.append(string + '}')
# the last one doesn't need to be modified
json_strings[-1] = strings[-1]
return json_strings
|
Python
| 0.000002
|
@@ -3467,61 +3467,8 @@
t)%0A%0A
- json_strings = %5B%5D%0A for string in strings:%0A
@@ -3501,20 +3501,16 @@
aracter%0A
-
json
@@ -3517,24 +3517,20 @@
_strings
-.append(
+ = %5B
string +
@@ -3533,17 +3533,44 @@
ng + '%7D'
-)
+ for string in strings%5B:-1%5D%5D
%0A%0A #
@@ -3630,15 +3630,16 @@
ings
-%5B-1%5D =
+.append(
stri
@@ -3645,16 +3645,17 @@
ings%5B-1%5D
+)
%0A%0A re
|
c3d454d5d7272620ab83b9e02cc8063d0e16da0f
|
Add an embedded favicon for error pages
|
edgedb/lang/common/markup/renderers/dhtml/__init__.py
|
edgedb/lang/common/markup/renderers/dhtml/__init__.py
|
##
# Copyright (c) 2011 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import os
from semantix.rendering.css import dumps as scss_dumps, reload as scss_reload
from .. import json
from ... import serialize
__all__ = 'render',
_HTML_TPL_START = '''<!DOCTYPE html>
<!--
Copyright (c) 2011 Sprymix Inc.
All rights reserved.
-->
<html>
<head>
<style type="text/css">
{styles}
</style>
<script type="text/javascript">
{scripts}
(function() {{
var exc_info = ''';
_HTML_END = ''';
sx.dom.on(window, 'load', function(exc_info) {
var spec = sx.Markup.Renderer.unpack_markup(exc_info);
var renderer = new sx.Markup.Renderer(spec);
renderer.render('body');
if (renderer.top_exc_title) {
document.title = renderer.top_exc_title;
}
}, this, exc_info);
})();
</script>
</head>
<body>
<div id="body">
</div>
</body>
</html>
'''
class Renderer:
TPL_START = None
@classmethod
def _init(cls):
from semantix.utils.lang import javascript
with open(os.path.join(os.path.dirname(javascript.__file__), 'sx.js')) as f:
scripts = f.read()
with open(os.path.join(os.path.dirname(__file__), 'render.js')) as f:
scripts += ';\n' + f.read()
from . import styles
scss_reload(styles)
rendered_styles = scss_dumps(styles)
cls.TPL_START = _HTML_TPL_START.format(styles=rendered_styles, scripts=scripts)
@classmethod
def render(cls, markup, reload=False):
if reload:
cls._init()
exc_info = json.render(markup)
return ''.join((cls.TPL_START, exc_info, _HTML_END))
Renderer._init()
render = Renderer.render
|
Python
| 0
|
@@ -246,16 +246,460 @@
der',%0A%0A%0A
+_FAVICON = ('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAGXRFWHRTb'%0A '2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAABhQTFRF6+np2VZWmVVVqxUV1qioxhMTuzw8////'%0A 'fKlS/gAAAAh0Uk5T/////////wDeg71ZAAAAbElEQVR42lyOCwrAMAhD47f3v/ES3aAsBY0PteL8hAl'%0A 'p3Zb4QLZJRAveWigFMB6TmqUa+IDuGcIhp4CQjIBVReSCFjC5C7gaPvksrargDiUtRcsCDDXfbkuRxh'%0A '5G4jHI93QA6aOkXXDpPAIMAD0IA95480JWAAAAAElFTkSuQmCC')%0A%0A%0A
_HTML_TP
@@ -808,16 +808,81 @@
%3Chead%3E%0A
+ %3Clink rel=%22shortcut icon%22 href=%22''' + _FAVICON + '''%22 %3E%0A%0A
|
8f1d2f0e821724f010291d340f30d5842ad32c76
|
add word2vec yahoo for shoes
|
extractVecMat_shoes.py
|
extractVecMat_shoes.py
|
#/datastore/zhenyang/bin/python
import sys
import os
import gensim, logging
import numpy as np
import scipy.io as sio
def main():
##############
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#pretrained_model = './vectors.bin'
#pretrained_model = '../freebase-vectors-skipgram1000-en.bin'
#pretrained_model = '../GoogleNews-vectors-negative300.bin'
#model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=True)
#pretrained_model = './vectors.output'
pretrained_model = '../yahoo_100m_words_30d.output'
model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=False)
##############
classnames = open('shoes_gclasses_vps.txt', 'r').read().splitlines()
cc = 0
clsid = 0
vec_size = 30
word2vec_mat = np.zeros((len(classnames), vec_size))
for classname in classnames:
idc = 1
for cls in classname.split(';'):
wordvec = np.zeros(1, vec_size))
for cls_word in cls.split(' '):
try:
np.add(wordvec, model[cls_word])
idc = 0
except:
print cls_word
idc = 1
break
if idc == 0:
break
word2vec_mat[clsid, :] = wordvec
clsid = clsid + 1
cc = cc + idc
#np.savetxt('attr_word2vec_GoogleNews.txt', word2vec_mat)
#sio.savemat('attr_word2vec_GoogleNews.mat', {'word2vec':word2vec_mat})
np.savetxt('shoes_word2vec_yahoo_30d.txt', word2vec_mat)
sio.savemat('shoes_word2vec_yahoo_30d.mat', {'word2vec':word2vec_mat})
print cc
if __name__ == "__main__":
main()
|
Python
| 0.000238
|
@@ -1101,16 +1101,26 @@
+ wordvec =
np.add(
|
57acc4cb74a9fa0747c628fa5abc30e19883febc
|
add hint to speed up the bounding box calculation
|
examples/addons/drawing/render_model_space_as_tiles.py
|
examples/addons/drawing/render_model_space_as_tiles.py
|
# Copyright (c) 2022, Manfred Moitzi
# License: MIT License
from typing import Iterator
from pathlib import Path
import random
import matplotlib.pyplot as plt
import ezdxf
from ezdxf.addons.drawing import RenderContext, Frontend
from ezdxf.addons.drawing.matplotlib import MatplotlibBackend
from ezdxf import bbox
from ezdxf.math import BoundingBox2d
assert ezdxf.__version__ >= "0.18", "requires newer ezdxf version"
# This example renders the DXF file in rows by cols tiles including filtering
# the DXF entities outside the rendering area.
# But the calculation of the bounding boxes is also costly and entities
# expanding into several tiles are rendered multiple times, therefore this
# solution takes longer than a single-pass rendering, but it shows the concept.
DIR = Path("~/Desktop/Outbox").expanduser()
if not DIR.exists():
DIR = Path(".")
COLORS = list(range(1, 7))
DPI = 300
WIDTH = 400
HEIGHT = 200
def random_points(count: int, width: float, height: float):
for _ in range(count):
yield width * random.random(), height * random.random()
def create_content(msp):
for s, e in zip(
random_points(100, WIDTH, HEIGHT), random_points(100, WIDTH, HEIGHT)
):
msp.add_line(s, e, dxfattribs={"color": random.choice(COLORS)})
def render_areas(extents, grid=(2, 2)) -> Iterator[BoundingBox2d]:
"""Returns a bounding box for each tile to render."""
rows, cols = grid
tile_width = extents.size.x / cols
tile_height = extents.size.y / rows
for row in range(rows):
for col in range(cols):
x_min = extents.extmin.x + col * tile_width
y_min = extents.extmin.y + row * tile_height
# BoundingBox2d ignores the z-axis!
yield BoundingBox2d(
[(x_min, y_min), (x_min + tile_width, y_min + tile_height)]
)
def main(rows: int, cols: int):
doc = ezdxf.new()
msp = doc.modelspace()
create_content(msp)
# Detecting the drawing extents by ezdxf:
# Reuse bounding box calculation for entity filtering.
cache = bbox.Cache()
# This can take along time for big DXF files!
extents = bbox.extents(msp, cache=cache)
ctx = RenderContext(doc)
for tile, render_area in enumerate(render_areas(extents, (rows, cols))):
# Setup drawing add-on:
fig = plt.figure(dpi=DPI)
ax = fig.add_axes([0, 0, 1, 1])
out = MatplotlibBackend(ax)
ax.set_xlim(render_area.extmin.x, render_area.extmax.x)
ax.set_ylim(render_area.extmin.y, render_area.extmax.y)
# Disable rendering of entities outside of the render area:
def is_intersecting_render_area(entity):
# returns True if entity should be rendered
entity_bbox = bbox.extents([entity], cache=cache)
return render_area.has_intersection(entity_bbox)
# Finalizing invokes auto-scaling!
Frontend(ctx, out).draw_layout(
msp, finalize=False, filter_func=is_intersecting_render_area
)
# Set output size in inches
# width = 6 in x 300 dpi = 1800 px
# height = 3 in x 300 dpi = 900 px
fig.set_size_inches(6, 3, forward=True)
filename = f"tile-{tile:02d}.png"
print(f'saving tile #{tile} to "{filename}"')
fig.savefig(DIR / filename, dpi=DPI)
plt.close(fig)
if __name__ == "__main__":
main(3, 3)
|
Python
| 0
|
@@ -398,27 +398,30 @@
res
-newer
ezdxf v
-ersion
+0.18b0 or newer
%22%0A%0A#
@@ -2013,12 +2013,10 @@
#
-Reus
+Th
e bo
@@ -2028,25 +2028,33 @@
g box ca
-lculation
+che can be reused
for ent
@@ -2093,24 +2093,25 @@
Cache()%0A
+%0A
# Th
is can t
@@ -2102,18 +2102,42 @@
# Th
-is
+e bounding box calculation
can tak
@@ -2168,16 +2168,350 @@
files!%0A
+ # If flatten=0 the bounding box calculation for curves (SPLINE, ELLIPSE, ...)%0A # is based on the control points of the Path class, this is less precise but%0A # can speed up the calculation and for this task is a precise bounding box%0A # not required.%0A # This has no impact on this example which uses only straight lines%0A
exte
@@ -2545,16 +2545,27 @@
he=cache
+, flatten=0
)%0A%0A c
@@ -3070,11 +3070,12 @@
-# r
+%22%22%22R
etur
@@ -3110,16 +3110,21 @@
rendered
+. %22%22%22
%0A
@@ -3176,16 +3176,27 @@
he=cache
+, flatten=0
)%0A
@@ -3453,16 +3453,17 @@
n inches
+:
%0A
@@ -3477,16 +3477,18 @@
h = 6 in
+ch
x 300 d
@@ -3523,16 +3523,18 @@
t = 3 in
+ch
x 300 d
|
24f5be6e6a409b7447ccd6fede81b8c55662def4
|
add return data
|
util/strategy.py
|
util/strategy.py
|
import numpy as np
from trendy import segtrends
import pandas as pd
import tradingWithPython as twp
from filter import movingaverage
def orders_from_trends(x, segments=2, charts=True, window=7, momentum=False):
''' generate orders from segtrends '''
x_maxima, maxima, x_minima, minima = segtrends(x, segments, charts, window)
n = len(x)
y = np.array(x)
movy = movingaverage(y, window)
# generate order strategy
orders = np.zeros(n)
last_buy = y[0]
last_sale = y[0]
for i in range(1,n):
# get 2 latest support point y values prior to x
pmin = list(minima[np.where(x_minima<=i)][-2:])
pmax = list(maxima[np.where(x_maxima<=i)][-2:])
# sell if support slop is negative
min_sell = True if ((len(pmin)==2) and (pmin[1]-pmin[0])<0) else False
max_sell = True if ((len(pmax)==2) and (pmax[1]-pmax[0])<0) else False
# if support down, sell
buy = -1 if (min_sell and max_sell) else 0
# buy only if lower the moving average else sale
buy = 1 if ((buy == 0) and (y[i]<movy[i])) else -1
# sell only if ...
buy= -1 if ((buy == -1) and y[i]>last_buy) else 1
buy_price_dec = y[i]<last_buy
sale_price_dec = y[i]<last_sale
orders[i] = buy
last_buy = y[i] if (buy==1) else last_buy
last_sale = y[i] if (buy==-1) else last_sale
import math
if momentum:
# add momentum for buy
if (buy==1) and (orders[i-1]>=1):
#if buy_price_dec:
orders[i]=round(math.log(2*orders[i-1])+1)
#else:
# orders[i]=max(1, round(orders[i-1]/2))
# add momentum for sale
elif (buy==-1) and (orders[i-1]<=-1):
#if sale_price_dec:
orders[i]*=round(math.log(abs(orders[i-1]*2))+1)
#else:
# orders[i]=max(1, round(orders[i-1]/2))
# OUTPUT
return orders
def orders2strategy(orders, price, min_stocks=1):
strategy = pd.Series(index=price.index)
orders=[el*min_stocks for el in orders]
# create a stratgy from order
for i, idx in enumerate(price.index):
if orders[i]!=0:
strategy[idx] = orders[i]
return strategy
def eval(stockname='TSLA', field='open', months=12,
initialCash=20000, min_stocks=30, charts=True):
import tradingWithPython.lib.yahooFinance as yahoo
from pylab import title, figure
n = (5*4)*months
price = yahoo.getHistoricData(stockname)[field][-n:]
if charts:
title('automatic strategy')
orders = orders_from_trends(price, segments=n/5, charts=charts,
momentum=True);
strategy = orders2strategy(orders, price, min_stocks)
# do the backtest
bt = twp.Backtest(price, strategy, initialCash=initialCash, signalType='shares')
if charts:
bt.plotTrades()
figure()
bt.pnl.plot()
title('pnl')
bt.data.plot()
title('all strategy data')
|
Python
| 0.025135
|
@@ -3096,10 +3096,29 @@
y data')
+%0A return bt.data
%0A%0A
|
efc26d1e3065f18a80b601e8416abe0a19c83103
|
Simplify a test case, NFC
|
packages/Python/lldbsuite/test/lang/swift/variables/bridged_string/TestSwiftBridgedStringVariables.py
|
packages/Python/lldbsuite/test/lang/swift/variables/bridged_string/TestSwiftBridgedStringVariables.py
|
# coding=utf-8
# TestSwiftBridgedStringVariables.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Tests simple swift expressions
"""
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftBridgedStringVariables(TestBase):
mydir = TestBase.compute_mydir(__file__)
@decorators.skipUnlessDarwin
@decorators.swiftTest
def test_swift_bridged_string_variables(self):
"""Test that Swift.String formats properly"""
self.build()
self.do_test()
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
def do_test(self):
"""Test that Swift.String formats properly"""
exe_name = "a.out"
exe = self.getBuildArtifact(exe_name)
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set the breakpoints
breakpoint = target.BreakpointCreateBySourceRegex(
'Set breakpoint here', self.main_source_spec)
self.assertTrue(breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
# Frame #0 should be at our breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
s1 = self.frame.FindVariable("s1")
s2 = self.frame.FindVariable("s2")
s3 = self.frame.FindVariable("s3")
s4 = self.frame.FindVariable("s4")
s5 = self.frame.FindVariable("s5")
s6 = self.frame.FindVariable("s6")
lldbutil.check_variable(self, s1, summary='"Hello world"')
lldbutil.check_variable(self, s2, summary='"ΞΕΛΛΘ"')
lldbutil.check_variable(self, s3, summary='"Hello world"')
lldbutil.check_variable(self, s4, summary='"ΞΕΛΛΘ"')
lldbutil.check_variable(self, s5, use_dynamic=True, summary='"abc"')
lldbutil.check_variable(self, s6, summary='"abc"')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
|
Python
| 0.000002
|
@@ -1235,818 +1235,129 @@
-exe_name = %22a.out%22%0A exe = self.getBuildArtifact(exe_name)%0A%0A # Create the target%0A target = self.dbg.CreateTarget(exe)%0A self.assertTrue(target, VALID_TARGET)%0A%0A # Set the breakpoints%0A breakpoint = target.BreakpointCreateBySourceRegex(%0A 'Set breakpoint here', self.main_source_spec)%0A self.assertTrue(breakpoint.GetNumLocations() %3E 0, VALID_BREAKPOINT)%0A%0A # Launch the process, and do not stop at the entry point.%0A process = target.LaunchSimple(None, None, os.getcwd())%0A%0A self.assertTrue(process, PROCESS_IS_VALID)%0A%0A # Frame #0 should be at our breakpoint.%0A threads = lldbutil.get_threads_stopped_at_breakpoint(%0A process, breakpoint)%0A%0A self.assertTrue(len(threads) == 1)%0A self.thread = threads%5B0%5D
+(_, _, thread, _) = lldbutil.run_to_source_breakpoint(self,%0A %22Set breakpoint here%22, self.main_source_spec)
%0A
@@ -1374,21 +1374,16 @@
frame =
-self.
thread.f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.