commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
a6ac3a7a0955fab9cce1d2866a064ff6d4943dd0
bump version to 0.1.2
json5/version.py
json5/version.py
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. VERSION = '0.1.1'
Python
0.000001
@@ -604,11 +604,11 @@ = '0.1. -1 +2 '%0A
8d40b5369a8c38477f004ac2eff467efe44ff3ce
Split device log processing by section #prefactor
corehq/ex-submodules/phonelog/utils.py
corehq/ex-submodules/phonelog/utils.py
from corehq.apps.users.util import format_username from corehq.apps.users.dbaccessors import get_user_id_by_username from phonelog.models import UserEntry, DeviceReportEntry def device_users_by_xform(xform_id): return list( UserEntry.objects.filter(xform_id__exact=xform_id) .distinct('username').values_list('username', flat=True) ) def _force_list(obj_or_list): return obj_or_list if isinstance(obj_or_list, list) else [obj_or_list] def _get_logs(form, report_name, report_slug): report = form.get(report_name, {}) or {} if isinstance(report, list): return filter(None, [log.get(report_slug) for log in report]) return report.get(report_slug, []) def process_device_log(domain, xform): form_data = xform.form_data userlogs = _get_logs(form_data, 'user_subreport', 'user') UserEntry.objects.filter(xform_id=xform.form_id).delete() DeviceReportEntry.objects.filter(xform_id=xform.form_id).delete() to_save = [] for i, log in enumerate(_force_list(userlogs)): to_save.append(UserEntry( xform_id=xform.form_id, i=i, user_id=log["user_id"], username=log["username"], sync_token=log["sync_token"], )) UserEntry.objects.bulk_create(to_save) logs = _get_logs(form_data, 'log_subreport', 'log') logged_in_username = None logged_in_user_id = None to_save = [] for i, log in enumerate(_force_list(logs)): if not log: continue if log["type"] == 'login': # j2me log = user_id_prefix-username logged_in_username = log["msg"].split('-')[1] cc_username = format_username(logged_in_username, domain) logged_in_user_id = get_user_id_by_username(cc_username) elif log["type"] == 'user' and log["msg"][:5] == 'login': # android log = login|username|user_id msg_split = log["msg"].split('|') logged_in_username = msg_split[1] logged_in_user_id = msg_split[2] to_save.append(DeviceReportEntry( xform_id=xform.form_id, i=i, domain=domain, type=log["type"], msg=log["msg"], # must accept either date or datetime string date=log["@date"], server_date=xform.received_on, app_version=form_data.get('app_version'), device_id=form_data.get('device_id'), username=logged_in_username, user_id=logged_in_user_id, )) DeviceReportEntry.objects.bulk_create(to_save)
Python
0
@@ -745,35 +745,118 @@ -form_data = xform.form_data +_process_user_subreport(xform)%0A _process_log_subreport(domain, xform)%0A%0A%0Adef _process_user_subreport(xform): %0A @@ -869,32 +869,38 @@ ogs = _get_logs( +xform. form_data, 'user @@ -1379,16 +1379,92 @@ _save)%0A%0A +%0Adef _process_log_subreport(domain, xform):%0A form_data = xform.form_data%0A logs
c269d9f7398be2b7740aecf2a48f307d29107744
FIX really?
api/sonetworks/buckets/facebook.py
api/sonetworks/buckets/facebook.py
import os import sys import logging import json from django.conf import settings from django.db import migrations, models from requests_oauthlib import OAuth2Session from requests_oauthlib.compliance_fixes import facebook_compliance_fix from oauthlib.oauth2 import WebApplicationClient class Facebook: def __init__(self, account_id = None): self.account_id = account_id self.graph_url = 'https://graph.facebook.com/' self.token_url = self.graph_url + 'oauth/access_token' self.client_id = settings.SOCIAL_AUTH_FACEBOOK_KEY self.client_secret = settings.SOCIAL_AUTH_FACEBOOK_SECRET self.redirect_uri = os.environ["OAUTH2_REDIRECT_URI"] + "?chan=facebook" self.tagname = 'facebook' self.oauth = None self.url = 'https://www.facebook.com/' def fav(self, social_account): """Like an existing post""" pass def get_user_image(self): """ Get user profile image """ self.image = json.loads(self.oauth.get(self.graph_url + self.account_id+"/picture?width=160&height=160&redirect=0").content) return def get_user_detail(self): """ Get user details """ user = json.loads( self.oauth.get(self.graph_url + "me?fields=id,name,email").content) # Fetch user profile image self.account_id = user["id"] self.get_user_image() return { "id": user["id"], "name": user["name"], "email": user["email"] if "email" in user else "sin@email.com", "image": self.image["data"]["url"] if "data" in self.image and "url" in self.image["data"] else None} def get_page_image(self, page_id, token): """ Get page profile image """ self.image = json.loads(self.oauth.get(self.graph_url + page_id+"/picture?access_token="+ token +"&width=160&height=160&redirect=0").content) return def get_user_pages(self, token, account_id): """ Get user pages """ response = json.loads(self.oauth.get(self.graph_url + "me/accounts?access_token=" + token ).content) pages_list = [] for page in response["data"]: pages = {} if "CREATE_CONTENT" in page["perms"]: pages["page_id"] = page["id"] pages["name"] = page["name"] pages["token"] = page["access_token"] pages_list.append(pages) return pages_list def get_oauthsession(self): """ Returns a Facebook requests_oauthlib OAuth2Session """ return self.get_oauth2session() def get_oauth2session(self): """ Returns a Facebook requests_oauthlib OAuth2Session """ self.oauth = OAuth2Session(client_id = self.client_id, redirect_uri = self.redirect_uri) self.oauth = facebook_compliance_fix(self.oauth) return self.oauth def get_token(self, redirect_response): """ Get a facebook OAuth2 token """ token = self.oauth.fetch_token( token_url = self.token_url, client_secret = self.client_secret, authorization_response = redirect_response ) return token def post(self, token, post, account_id, staff = False): """ New facebook post """ copy = post.content["txt"] if staff==1: node = self.graph_url + account_id + "/" else: node = self.graph_url + "me/" payload = { "message": copy} parameter_token = "" if staff: parameter_token= "access_token=" + token # SI EXISTE EL ELEMENTO IMG EN EL CONTENIDO if ("link" in post.content and (post.content["link"] is not None and post.content["link"] != "")): imagen = post.content["link"] #[POSTEANDO IMAGEN CON CUENTAS USUARIO] #POSTEANDO CON PHOTOS if (post.content["linkType"] == "img"): node = node + "photos?" + parameter_token payload["url"] = imagen else: #POSTEANDO CON LINK node = node + "feed?" + parameter_token + "&link=" + imagen + "&caption=" + settings.FACEBOOK_URL_CAPTION else: node = node + "feed?" + parameter_token if staff: self.oauth.access_token = token #pass else: self.oauth.token = token response = self.oauth.post(node, data = payload) return response def share(self, token, permalink, account_id, post_id): """ New facebook post """ #copy = post.content["txt"] parameter_token= "access_token=" + token["access_token"] self.oauth.access_token = token node = self.graph_url + account_id + "/" node = node + "feed?" + parameter_token payload = { "link" : permalink # Si agregamos message no cuenta los share #,"message": 'Elige Bien' } response = self.oauth.post(node, data = payload) return response def set_account_id(self, account_id): self.account_id = account_id
Python
0
@@ -3419,16 +3419,484 @@ )%0A + ltoken = self.oauth.fetch_token(%0A token_url = token_url +%0A 'grant_type=fb_exchange_token&' +%0A 'client_id='+settings.SOCIAL_AUTH_FACEBOOK_KEY +%0A '&client_secret=' + settings.SOCIAL_AUTH_FACEBOOK_SECRET +%0A '&fb_exchange_token='+token%5B'access_token'%5D,%0A client_secret = self.client_secret,%0A authorization_response = redirect_response%0A )%0A%0A %0A
d999e0164a346114d70222581ed37af8f44c457b
Update gapminder_data_analysis.py
gapminder_data_analysis.py
gapminder_data_analysis.py
# Importing the required libraries # Note %matplotlib inline works only for ipython notebook. It will not work for PyCharm. It is used to show the plot distributions %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) # Reading the data where low_memory=False increases the program efficiency data= pd.read_csv("gapminder.csv", low_memory=False) # setting variables that you will be working with to numeric data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True) data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True) data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True) # shows the number of rows and columns print (len(data)) print (len(data.columns)) print (len(data.index)) # Print the column headers/headings names=data.columns.values print names # using the describe function to get the standard deviation and other descriptive statistics of our variables desc1=data['breastcancerper100th'].describe() desc2=data['femaleemployrate'].describe() desc3=data['alcconsumption'].describe() print "\nBreast Cancer per 100th person\n", desc1 print "\nfemale employ rate\n", desc2 print "\nAlcohol consumption in litres\n", desc3 data.describe() # Show the frequency distribution print "\nAlcohol Consumption\nFrequency Distribution (in %)" c1=data['alcconsumption'].value_counts(sort=False,dropna=False) print c1 print "\nBreast Cancer per 100th" c2=data['breastcancerper100th'].value_counts(sort=False) print c2 print "\nFemale Employee Rate" c3=data['femaleemployrate'].value_counts(sort=False) print c3 # Show the frequency distribution of the quantitative variable using the groupby function ac1=data.groupby('alcconsumption').size() print "ac1\n",ac1 # Creating a subset of the data sub1=data[(data['femaleemployrate']>40) & (data['alcconsumption']>=20)& (data['breastcancerper100th']<50)] # creating a copy of the subset. This copy will be used for subsequent analysis sub2=sub1.copy() print "\nContries where Female Employee Rate is greater than 40 &" \ " Alcohol Consumption is greater than 20L & new breast cancer cases reported are less than 50\n" print sub2 print "\nContries where Female Employee Rate is greater than 50 &" \ " Alcohol Consumption is greater than 10L & new breast cancer cases reported are greater than 70\n" sub3=data[(data['alcconsumption']>10)&(data['breastcancerper100th']>70)&(data['femaleemployrate']>50)] print sub3 # Checking for missing values in the data row-wise print "Missing data rows count: ",sum([True for idx,row in data.iterrows() if any(row.isnull())]) # Checking for missing values in the data column-wise print "Showing missing data coulmn-wise" print data.isnull().sum() # Create a copy of the original dataset as sub4 by using the copy() method sub4=data.copy() # Now showing the count of null values in the variables print sub4.isnull().sum() # Since the data is all continuous variables therefore the use the mean() for missing value imputation # if dealing with categorical data, than use the mode() for missing value imputation sub4.fillna(sub4['breastcancerper100th'].mean(), inplace=True) sub4.fillna(sub4['femaleemployrate'].mean(), inplace=True) sub4.fillna(sub4['alcconsumption'].mean(), inplace=True) # Showing the count of null values after imputation print sub4.isnull().sum() # categorize quantitative variable based on customized splits using the cut function sub4['alco']=pd.qcut(sub4.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"]) sub4['brst']=pd.qcut(sub4.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"]) sub4['emply']=pd.qcut(sub4.femaleemployrate,4,labels=["30-39","40-59","60-79","80-90"]) # Showing the frequency distribution of the categorised quantitative variables print "Frequency distribution of the categorized quantitative variables\n" fd1=sub4['alco'].value_counts(sort=False,dropna=False) fd2=sub4['brst'].value_counts(sort=False,dropna=False) fd3=sub4['emply'].value_counts(sort=False,dropna=False) print "Alcohol Consumption\n",fd1 print "\n------------------------\n" print "Breast Cancer per 100th\n",fd2 print "\n------------------------\n" print "Female Employee Rate\n",fd3 print "\n------------------------\n" # Now plotting the univariate quantitative variables using the distribution plot sub5=sub4.copy() sns.distplot(sub5['alcconsumption'].dropna(),kde=True) plt.xlabel('Alcohol consumption in litres') plt.title('Breast cancer in working class women') plt.show() # Note: Although there is no need to use the show() method for ipython notebook as %matplotlib inline does the trick but #I am adding it here because matplotlib inline does not work for an IDE like Pycharm and for that i need to use plt.show sns.distplot(sub5['breastcancerper100th'].dropna(),kde=True) plt.xlabel('Breast cancer per 100th women') plt.title('Breast cancer in working class women') plt.show() sns.distplot(sub5['femaleemployrate'].dropna(),kde=True) plt.xlabel('Female employee rate') plt.title('Breast cancer in working class women') plt.show() # using scatter plot the visulaize quantitative variable. # if categorical variable then use histogram scat1= sns.regplot(x='alcconsumption', y='breastcancerper100th', data=data) plt.xlabel('Alcohol consumption in liters') plt.ylabel('Breast cancer per 100th person') plt.title('Scatterplot for the Association between Alcohol Consumption and Breast Cancer 100th person') scat2= sns.regplot(x='femaleemployrate', y='breastcancerper100th', data=data) plt.xlabel('Female Employ Rate') plt.ylabel('Breast cancer per 100th person') plt.title('Scatterplot for the Association between Female Employ Rate and Breast Cancer per 100th Rate')
Python
0
@@ -159,16 +159,155 @@ butions%0A +# Make sure to put %25matplotlib inline as the first line of code when visualising plots. Also in pyCharm IDE use plt.show() to see the plot%0A %25matplot
007c6283b3ed05b31f1cb9a2dd5a3166f465b828
Move graphs.js to dot dir
generator/generate_data.py
generator/generate_data.py
# -*- coding: utf-8 -*- import itertools import json import multiprocessing import os import re import shutil import urllib import workerpool import jobs LAST_BUILD_URL_BASE = ('https://fuel-jenkins.mirantis.com/job/' 'nailgun_performance_tests/lastCompletedBuild/') LAST_BUILD_INFO = LAST_BUILD_URL_BASE + 'api/json' LAST_BUILD_TAR_BASE = LAST_BUILD_URL_BASE + 'artifact/results/results/' CSV_URL = LAST_BUILD_URL_BASE + 'artifact/nailgun/nailgun_perf_test_report.csv' CSV_TARGET_PATH = '/usr/share/nginx/html/test_report.csv' DOT_TARGET_DIR = 'dot/' DOT_INDEX_PATH = 'graphs.json' try: with open('build_number', 'r') as bn_file: previous_build_number = int(bn_file.read()) except (IOError, ValueError): previous_build_number = 0 current_build_info = json.loads(urllib.urlopen(LAST_BUILD_INFO).read()) current_build_number = current_build_info['number'] if current_build_number > previous_build_number: with open('build_number', 'w') as bn_file: bn_file.write(str(current_build_number)) urllib.urlretrieve(CSV_URL, CSV_TARGET_PATH) shutil.rmtree(DOT_TARGET_DIR) os.mkdir(DOT_TARGET_DIR) arts = [x['fileName'] for x in current_build_info['artifacts'] if 'tar.gz' in x['fileName']] pool = workerpool.WorkerPool(size=2) for filename in arts: job = jobs.DownloadArtifactJob( LAST_BUILD_TAR_BASE + filename, DOT_TARGET_DIR, filename ) pool.put(job) pool.shutdown() pool.wait() tests = [x for x in os.listdir(DOT_TARGET_DIR) if 'tar.gz' not in x and 'txt' not in x] processing_jobs = [] for test in tests: name = re.search(r'[^0-9._].*', test).group(0) extractor = jobs.GraphExtractor(DOT_TARGET_DIR + test, name) for graph in extractor.get_files(): job = jobs.ProcessGraphJob(graph, name) processing_jobs.append(job) def run_job(job): job.run() return {'test_name': job.test_name, 'graph': job.graph} process_pool = multiprocessing.Pool(2) processed_data_index = process_pool.map(run_job, processing_jobs) process_pool.close() graphs_index = {k: list(v) for k,v in itertools.groupby(processed_data_index, lambda x : x['test_name'])} with open(DOT_INDEX_PATH, 'w') as graphs_file: graphs_file.write(json.dumps(graphs_index))
Python
0.000001
@@ -585,16 +585,20 @@ PATH = ' +dot/ graphs.j
d8c7d33aae350d0be1abdc9aedbb8a26e1eec1d2
reformat template_loaders
tenant_schemas/template_loaders.py
tenant_schemas/template_loaders.py
""" Adaptations of the cached and filesystem template loader working in a multi-tenant setting """ import hashlib from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.template.base import TemplateDoesNotExist from django.template.loader import (BaseLoader, get_template_from_string, find_template_loader, make_origin) from django.utils.encoding import force_bytes from django.utils._os import safe_join from django.db import connection from tenant_schemas.postgresql_backend.base import FakeTenant class CachedLoader(BaseLoader): is_usable = True def __init__(self, loaders): self.template_cache = {} self._loaders = loaders self._cached_loaders = [] @property def loaders(self): # Resolve loaders on demand to avoid circular imports if not self._cached_loaders: # Set self._cached_loaders atomically. Otherwise, another thread # could see an incomplete list. See #17303. cached_loaders = [] for loader in self._loaders: cached_loaders.append(find_template_loader(loader)) self._cached_loaders = cached_loaders return self._cached_loaders def find_template(self, name, dirs=None): for loader in self.loaders: try: template, display_name = loader(name, dirs) return template, make_origin(display_name, loader, name, dirs) except TemplateDoesNotExist: pass raise TemplateDoesNotExist(name) def load_template(self, template_name, template_dirs=None): if connection.tenant: key = '-'.join([str(connection.tenant.pk), template_name]) else: key = template_name if template_dirs: # If template directories were specified, use a hash to # differentiate if connection.tenant: key = '-'.join([str(connection.tenant.pk), template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()]) else: key = '-'.join([template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()]) if key not in self.template_cache: template, origin = self.find_template(template_name, template_dirs) if not hasattr(template, 'render'): try: template = get_template_from_string(template, origin, template_name) except TemplateDoesNotExist: # If compiling the template we found raises TemplateDoesNotExist, # back off to returning the source and display name for the template # we were asked to load. This allows for correct identification (later) # of the actual template that does not exist. return template, origin self.template_cache[key] = template return self.template_cache[key], None def reset(self): "Empty the template cache." self.template_cache.clear() class FilesystemLoader(BaseLoader): is_usable = True def get_template_sources(self, template_name, template_dirs=None): """ Returns the absolute paths to "template_name", when appended to each directory in "template_dirs". Any paths that don't lie inside one of the template dirs are excluded from the result set, for security reasons. """ if not connection.tenant or isinstance(connection.tenant, FakeTenant): return if not template_dirs: try: template_dirs = settings.MULTITENANT_TEMPLATE_DIRS except AttributeError: raise ImproperlyConfigured('To use %s.%s you must define the MULTITENANT_TEMPLATE_DIRS' % (__name__, FilesystemLoader.__name__)) for template_dir in template_dirs: try: if '%s' in template_dir: yield safe_join(template_dir % connection.tenant.domain_url, template_name) else: yield safe_join(template_dir, connection.tenant.domain_url, template_name) except UnicodeDecodeError: # The template dir name was a bytestring that wasn't valid UTF-8. raise except ValueError: # The joined path was located outside of this particular # template_dir (it might be inside another one, so this isn't # fatal). pass def load_template_source(self, template_name, template_dirs=None): tried = [] for filepath in self.get_template_sources(template_name, template_dirs): try: with open(filepath, 'rb') as fp: return (fp.read().decode(settings.FILE_CHARSET), filepath) except IOError: tried.append(filepath) if tried: error_msg = "Tried %s" % tried else: error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory." raise TemplateDoesNotExist(error_msg) load_template_source.is_usable = True
Python
0.000001
@@ -2206,16 +2206,48 @@ te_name, +%0A hashlib @@ -2553,16 +2553,41 @@ _string( +%0A template @@ -4474,16 +4474,34 @@ 't valid +%0A # UTF-8.%0A
9ffd9eee7247adebd0f48d53a8d593fc6b9b7bf6
Update forward compatibility horizon to 2020-05-20
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 5, 19) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) if date < _FORWARD_COMPATIBILITY_HORIZON: logging.warning("Trying to set the forward compatibility date to the past" " date %s. This will be ignored by TensorFlow." % (date)) return _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1448,10 +1448,10 @@ 5, -19 +20 )%0A_F
300e0e74fcc4ef7e1f2ec8c7866bd0c192d2578c
Update forward compatibility horizon to 2020-09-24
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 9, 23) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) if date < _FORWARD_COMPATIBILITY_HORIZON: logging.warning("Trying to set the forward compatibility date to the past" " date %s. This will be ignored by TensorFlow." % (date)) return _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1445,17 +1445,17 @@ 20, 9, 2 -3 +4 )%0A_FORWA
dba43fa59df7eda4bcda93ff0a0ac7a9d0db8b7e
Update forward compatibility horizon to 2020-03-30
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 3, 29) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1382,18 +1382,18 @@ 020, 3, -29 +30 )%0A_FORWA
09792df012c22622324f085f46edde33006c7355
Update forward compatibility horizon to 2018-08-26
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 8, 25) @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibiltiy, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). month: A month (1 <= month <= 12) in year. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args : year: A year (e.g. 2018). month: A month (1 <= month <= 12) in year. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Yields: Nothing. """ global _FORWARD_COMPATIBILITY_HORIZON try: old_compat_date = _FORWARD_COMPATIBILITY_HORIZON _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day) yield finally: _FORWARD_COMPATIBILITY_HORIZON = old_compat_date
Python
0
@@ -1142,9 +1142,9 @@ 8, 2 -5 +6 )%0A%0A%0A
b9a8a022573447f2560e31f0a106cf842634ec19
Update forward compatibility horizon to 2019-02-18
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 2, 17) @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibiltiy, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). month: A month (1 <= month <= 12) in year. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args : year: A year (e.g. 2018). month: A month (1 <= month <= 12) in year. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Yields: Nothing. """ global _FORWARD_COMPATIBILITY_HORIZON try: old_compat_date = _FORWARD_COMPATIBILITY_HORIZON _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day) yield finally: _FORWARD_COMPATIBILITY_HORIZON = old_compat_date
Python
0
@@ -1143,9 +1143,9 @@ 2, 1 -7 +8 )%0A%0A%0A
b49bb4851c87890894eaf306ec49954b1cec38c7
Update forward compatibility horizon to 2021-08-06
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 8, 5) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) if date < _FORWARD_COMPATIBILITY_HORIZON: logging.warning("Trying to set the forward compatibility date to the past" " date %s. This will be ignored by TensorFlow." % (date)) return _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1444,17 +1444,17 @@ 021, 8, -5 +6 )%0A_FORWA
2b42112aa540701809b228e1a9e9439f7ee1dbc6
Update forward compatibility horizon to 2021-10-02
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ import datetime import os from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 10, 1) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) if date < _FORWARD_COMPATIBILITY_HORIZON: logging.warning("Trying to set the forward compatibility date to the past" " date %s. This will be ignored by TensorFlow." % (date)) return _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1335,17 +1335,17 @@ 21, 10, -1 +2 )%0A_FORWA
b504d86ca21f9ce57700d017ab8f6d411ca5afde
Update forward compatibility horizon to 2019-01-08
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 1, 7) @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibiltiy, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). month: A month (1 <= month <= 12) in year. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args : year: A year (e.g. 2018). month: A month (1 <= month <= 12) in year. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Yields: Nothing. """ global _FORWARD_COMPATIBILITY_HORIZON try: old_compat_date = _FORWARD_COMPATIBILITY_HORIZON _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day) yield finally: _FORWARD_COMPATIBILITY_HORIZON = old_compat_date
Python
0
@@ -1142,9 +1142,9 @@ 1, -7 +8 )%0A%0A%0A
75ba615684492a49e67fd2c2a59af4ee0e56838b
Update forward compatibility horizon to 2020-02-04
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 2, 3) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None def _date_to_date_number(year, month, day): return (year << 9) | (month << 5) | day def _update_forward_compatibility_date_number(date_to_override=None): """Update the base date to compare in forward_compatible function.""" global _FORWARD_COMPATIBILITY_DATE_NUMBER if date_to_override: date = date_to_override else: date = _FORWARD_COMPATIBILITY_HORIZON delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) if delta_days: date += datetime.timedelta(days=int(delta_days)) _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( date.year, date.month, date.day) _update_forward_compatibility_date_number() @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibiltiy, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ try: _update_forward_compatibility_date_number(datetime.date(year, month, day)) yield finally: _update_forward_compatibility_date_number()
Python
0
@@ -1382,17 +1382,17 @@ 020, 2, -3 +4 )%0A_FORWA
8021e91b13443a0d5f26fb997da883db1636df73
Make "kevlar filter" summary output more correct
kevlar/filter.py
kevlar/filter.py
#!/usr/bin/env python # # ----------------------------------------------------------------------------- # Copyright (c) 2017 The Regents of the University of California # # This file is part of kevlar (http://github.com/dib-lab/kevlar) and is # licensed under the MIT license: see LICENSE. # ----------------------------------------------------------------------------- import re import sys import khmer from khmer import khmer_args import kevlar from kevlar.sketch import KevlarUnsuitableFPRError def load_mask(maskfiles, ksize, memory, maxfpr=0.001, savefile=None, logstream=sys.stderr): """Load reference genome and/or contaminant database from a file.""" if maskfiles is None: return None timer = kevlar.Timer() timer.start('loadmask') print('[kevlar::filter] Loading mask from', maskfiles, file=logstream) if len(maskfiles) == 1 and maskfiles[0].endswith(('.nt', '.nodetable')): mask = kevlar.sketch.load(maskfiles[0]) message = ' nodetable loaded' else: buckets = memory * khmer._buckets_per_byte['nodegraph'] / 4 mask = khmer.Nodetable(ksize, buckets, 4) nr, nk = 0, 0 for maskfile in maskfiles: numreads, numkmers = mask.consume_seqfile(maskfile) nr += numreads nk += numkmers message = ' {:d} sequences and {:d} k-mers consumed'.format(nr, nk) fpr = kevlar.sketch.estimate_fpr(mask) message += '; estimated false positive rate is {:1.3f}'.format(fpr) print(message, file=logstream) if fpr > maxfpr: raise KevlarUnsuitableFPRError('FPR too high, bailing out!!!') if savefile: mask.save(savefile) message = ' nodetable saved to "{:s}"'.format(savefile) print(message, file=logstream) elapsed = timer.stop('loadmask') print('[kevlar::filter]', 'Mask loaded in {:.2f} sec'.format(elapsed), file=logstream) return mask def summarize_readset(readset, logfile): fpr = kevlar.sketch.estimate_fpr(readset._counts) message = ' {:d} instances'.format(readset.read_instances) message += ' of {:d} reads consumed,\n'.format(readset.distinct_reads) message += ' annotated with' message += ' {:d} instances '.format(readset.ikmer_instances) message += 'of {:d} distinct'.format(readset.distinct_ikmers) message += ' "interesting" k-mers;\n' message += ' estimated false positive rate is {:1.3f}'.format(fpr) if logfile is not None: print(message, file=logfile) return fpr def summarize_validate(readset, n, logfile=sys.stderr): int_distinct = readset.masked[0] + readset.lowabund[0] + readset.valid[0] int_instances = readset.masked[1] + readset.lowabund[1] + readset.valid[1] message = ' processed {:d} instances'.format(int_instances) message += ' of {:d} distinct "interesting" k-mers'.format(int_distinct) message += ' in {:d} reads'.format(len(readset)) message += '\n ' message += '{:d} instances'.format(readset.masked[1]) message += ' of {:d} distinct k-mers'.format(readset.masked[0]) message += ' masked by the reference genome' message += '\n ' message += '{:d} instances'.format(readset.lowabund[1]) message += ' of {:d} distinct k-mers'.format(readset.lowabund[0]) message += ' discarded due to low abundance' message += '\n ' message += '{:d} instances'.format(readset.valid[1]) message += ' of {:d} distinct k-mers'.format(readset.valid[0]) message += ' validated as novel' message += '\n ' message += '{:d} reads'.format(readset.discarded) message += ' with no surviving valid k-mers ignored' message += '\n ' message += '{:d} reads written to output'.format(n) if logfile is not None: print(message, file=logfile) def filter(readstream, mask=None, minabund=5, ksize=31, memory=1e6, maxfpr=0.001, logstream=sys.stderr): timer = kevlar.Timer() timer.start('recalc') print('[kevlar::filter] Loading input; recalculate k-mer abundances,', 'de-duplicate reads and merge k-mers', file=logstream) readset = kevlar.seqio.AnnotatedReadSet(ksize, memory) for record in readstream: readset.add(record) fpr = summarize_readset(readset, logstream) if fpr > maxfpr: raise KevlarUnsuitableFPRError('FPR too high, bailing out!!!') elapsed = timer.stop('recalc') print('[kevlar::filter] Input loaded in {:.2f} sec'.format(elapsed), file=logstream) timer.start('validate') print('[kevlar::filter] Validate k-mers and print reads', file=logstream) readset.validate(mask=mask, minabund=minabund) for n, record in enumerate(readset, 1): yield record summarize_validate(readset, n, logstream) elapsed = timer.stop('validate') print('[kevlar::filter] k-mers validated and reads printed', 'in {:.2f} sec'.format(elapsed), file=logstream) def main(args): timer = kevlar.Timer() timer.start() mask = load_mask( args.mask, args.ksize, args.mask_memory, maxfpr=args.mask_max_fpr, savefile=args.save_mask, logstream=args.logfile ) readstream = kevlar.seqio.afxstream(args.augfastq) outstream = kevlar.open(args.out, 'w') filterstream = filter( readstream, mask, minabund=args.min_abund, ksize=args.ksize, memory=args.abund_memory, maxfpr=args.abund_max_fpr, logstream=args.logfile ) for record in filterstream: kevlar.print_augmented_fastx(record, outstream) total = timer.stop() message = 'Total time: {:.2f} seconds'.format(total) print('[kevlar::filter]', message, file=args.logfile)
Python
0.999999
@@ -3145,14 +3145,37 @@ ked -by the +as contaminant or included in ref
8ba62b47d2d94eb56122f9061b8309e06cc62cdd
add .get()
kibitzr/stash.py
kibitzr/stash.py
import contextlib import logging logger = logging.getLogger(__name__) class Stash(object): FILENAME = 'stash.db' @contextlib.contextmanager def open(self): import shelve with contextlib.closing(shelve.open(self.FILENAME)) as db: yield db def read(self): with self.open() as db: return dict(db) def write(self, data): with self.open() as db: for key, value in data.items(): db[key] = value @classmethod def print_content(cls): for key, value in cls().read().items(): print("{0}: {1}".format(key, value)) class LazyStash(Stash): def __init__(self): self._stash = None @property def stash(self): if self._stash is None: self._stash = self.read() return self._stash def __getitem__(self, key): return self.stash[key]
Python
0.000001
@@ -706,16 +706,19 @@ f._stash +obj = None%0A @@ -740,16 +740,17 @@ def +_ stash(se @@ -776,16 +776,19 @@ f._stash +obj is None @@ -812,16 +812,19 @@ f._stash +obj = self. @@ -856,16 +856,19 @@ f._stash +obj %0A%0A de @@ -913,16 +913,17 @@ rn self. +_ stash%5Bke @@ -925,8 +925,148 @@ sh%5Bkey%5D%0A +%0A def get(self, key, default=None):%0A try:%0A return self._stash%5Bkey%5D%0A except KeyError:%0A return default%0A
c749d82035f72b9d57c52dfc2dbdd70f42fbdf66
add missing import
ktbh/__init__.py
ktbh/__init__.py
import time import pika import json import landing_page def hand_off(amqp_host, out_queue, body): connection = pika.BlockingConnection( pika.ConnectionParameters(host=amqp_host)) channel = connection.channel() channel.queue_declare(queue=out_queue, durable=True) channel.basic_publish(exchange='', routing_key=out_queue, body=body, properties=pika.BasicProperties(delivery_mode=2)) connection.close() def add_landing_page(url, config): amqp_host = config.get("main", "amqp_host") out_queue = config.get("main", "lp_queue") payload = json.dumps({ "url": url }) hand_off(amqp_host, out_queue, payload) def get_connection(host): count = 0.4 while count < 60: try: connection = pika.BlockingConnection( pika.ConnectionParameters(host=host)) return connection except: time.sleep(count) count *= 1.7 sys.exit(1) def handle_queue(amqp_host, queue_name, callback_fn): connection = get_connection(amqp_host) try: channel = connection.channel() channel.queue_declare(queue=queue_name, durable=True) channel.basic_qos(prefetch_count=1) channel.basic_consume(callback_fn, queue=queue_name) channel.start_consuming() except: pass finally: connection.close() def examine_landing_pages(config): out_queue = config.get("main", "lp_queue") url_queue = config.get("main", "url_queue") broken_queue = config.get("main", "broken_lp_queue") amqp_host = config.get("main", "amqp_host") def callback(ch, method, properties, body): try: args = json.loads(body) url = args["url"] count = 0 for text, href in landing_page.scrape(url): payload = json.dumps({ "link_text": text, "link_href": href }) hand_off(amqp_host, url_queue, payload) count += 1 if count == 0: hand_off(amqp_host, broken_queue, json.dumps({"url": url})) finally: ch.basic_ack(delivery_tag = method.delivery_tag) while True: handle_queue(amqp_host, out_queue, callback)
Python
0.000042
@@ -1,12 +1,23 @@ +import sys%0A import time%0A
2d08761a898ba96ff84fdbecef4a6d71cdb54926
Use base64 to store pickled data in text fields.
src/django_future/models.py
src/django_future/models.py
import datetime import cPickle from django.db import models from django.conf import settings from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType __all__ = ['ScheduledJob'] END_OF_TIME = datetime.datetime(2047, 9, 14) class ScheduledJob(models.Model): STATUSES = ( ('scheduled', 'Scheduled'), ('running', 'Running'), ('failed', 'Failed'), ('complete', 'Complete'), ('expired', 'Expired'), ) time_slot_start = models.DateTimeField() time_slot_end = models.DateTimeField() execution_start = models.DateTimeField(blank=True, null=True) status = models.CharField(choices=STATUSES, max_length=32, default='scheduled') content_type = models.ForeignKey(ContentType, blank=True, null=True) object_id = models.PositiveIntegerField(blank=True, null=True) content_object = generic.GenericForeignKey() callable_name = models.CharField(max_length=255) args_pickled = models.TextField() kwargs_pickled = models.TextField() def _get_args(self): return cPickle.loads(str(self.args_pickled)) def _set_args(self, value): self.args_pickled = cPickle.dumps(tuple(value)) args = property(_get_args, _set_args) def _get_kwargs(self): return cPickle.loads(str(self.kwargs_pickled)) def _set_kwargs(self, value): self.kwargs_pickled = cPickle.dumps(value) kwargs = property(_get_kwargs, _set_kwargs) def __repr__(self): return '<ScheduledJob (%s) callable=%r>' % ( self.status, self.callable_name) def run(self): # TODO: logging? args = self.args kwargs = self.kwargs if '.' in self.callable_name: module_name, function_name = self.callable_name.rsplit('.', 1) module = __import__(module_name, fromlist=[function_name]) callable_func = getattr(module, function_name) if self.content_object is not None: args = [self.content_object] + list(args) else: callable_func = getattr(self.content_object, self.callable_name) if hasattr(callable_func, 'job_as_parameter'): args = [self] + list(args) callable_func(*args, **kwargs) def reschedule(self, date, callable_name=None, content_object=None, expires='7d', args=None, kwargs=None): """Schedule a clone of this job.""" if callable_name is None: callable_name = self.callable_name if content_object is None: content_object = self.content_object if args is None: args = self.args if kwargs is None: kwargs = self.kwargs from django_future import schedule_job return schedule_job(date, callable_name, content_object=content_object, expires=expires, args=args, kwargs=kwargs)
Python
0
@@ -1125,33 +1125,30 @@ return -cPickle.loads(str +self._unpickle (self.ar @@ -1150,33 +1150,32 @@ lf.args_pickled) -) %0A def _set_ar @@ -1223,25 +1223,18 @@ d = -cPickle.dumps(tup +self._pick le(v @@ -1238,17 +1238,16 @@ e(value) -) %0A arg @@ -1328,25 +1328,22 @@ urn -cPickle.loads(str +self._unpickle (sel @@ -1359,17 +1359,16 @@ pickled) -) %0A def @@ -1428,21 +1428,20 @@ d = -cPickle.dumps +self._pickle (val @@ -1485,32 +1485,199 @@ , _set_kwargs)%0A%0A + def _pickle(self, value):%0A return cPickle.dumps(value).encode('base64')%0A%0A def _unpickle(self, s):%0A return cPickle.loads(str(s).decode('base64'))%0A%0A def __repr__
ba927036c6170c754f4f95d90e62928b5da5d726
Write config file while init
gitcd/Git/Commands/Init.py
gitcd/Git/Commands/Init.py
from gitcd.Git.Command import Command class Init(Command): # no special subcommands, only run which is meant to be default def execute(self, dummy: str): self.config.setMaster( self.interface.askFor( "Branch name for production releases?", False, self.config.getMaster() ) ) featureDefault = self.config.getFeature() if featureDefault is None: featureDefault = '<none>' self.config.setFeature( self.interface.askFor( "Branch name for feature development?", False, featureDefault ) ) testDefault = self.config.getTest() if testDefault is None: testDefault = '<none>' self.config.setTest( self.interface.askFor( "Branch name for test releases?", False, testDefault ) ) tagDefault = self.config.getTag() if tagDefault is None: tagDefault = '<none>' self.config.setTag( self.interface.askFor( "Version tag prefix?", False, tagDefault ) ) # ask for version type, manual or date versionType = self.interface.askFor( "Version type? You can either set your tag number" + " manually or generate it by date.", ['manual', 'date'], self.config.getVersionType() ) self.config.setVersionType(versionType) # if type is date ask for scheme if versionType == 'date': versionScheme = self.interface.askFor( "Scheme for your date-tag?" + " Year: %Y / Month: %m / Day: %d /" + " Hour: %H / Minute: %M / Second: %S", '%Y.%m.%d%H%M', self.config.getVersionScheme() ) else: # you'll be asked for it while a release versionScheme = None # pass version scheme to config self.config.setVersionScheme(versionScheme)
Python
0.000001
@@ -2185,8 +2185,37 @@ Scheme)%0A +%0A self.config.write()%0A
8319a938e1a511073094ba49d95d91c64ccac118
Refactor how the API handles options.
api/common.py
api/common.py
#!/usr/bin/python import mechanize import urllib import simplejson from urllib2 import HTTPError import time class HumbugAPI(): def __init__(self, email, api_key, verbose=False, site="https://app.humbughq.com"): self.browser = mechanize.Browser() self.browser.set_handle_robots(False) self.browser.add_password("https://app.humbughq.com/", "tabbott", "xxxxxxxxxxxxxxxxx", "wiki") self.api_key = api_key self.email = email self.verbose = verbose self.base_url = site def send_message(self, submit_hash): submit_hash["email"] = self.email submit_hash["api-key"] = self.api_key submit_data = urllib.urlencode([(k, v.encode('utf-8')) for k,v in submit_hash.items()]) res = self.browser.open(self.base_url + "/api/v1/send_message", submit_data) return simplejson.loads(res.read()) def get_messages(self, last_received = None): submit_hash = {} submit_hash["email"] = self.email submit_hash["api-key"] = self.api_key if last_received is not None: submit_hash["first"] = "0" submit_hash["last"] = str(last_received) submit_data = urllib.urlencode([(k, v.encode('utf-8')) for k,v in submit_hash.items()]) res = self.browser.open(self.base_url + "/api/v1/get_updates", submit_data) return simplejson.loads(res.read())['zephyrs'] def call_on_each_message(self, callback): max_message_id = None while True: try: messages = self.get_messages(max_message_id) except HTTPError, e: # 502/503 typically means the server was restarted; sleep # a bit, then try again if self.verbose: print "HTTP Error getting zephyrs; trying again soon." print e time.sleep(1) except Exception, e: # For other errors, just try again print e time.sleep(2) continue for message in sorted(messages, key=lambda x: x["id"]): max_message_id = max(max_message_id, message["id"]) callback(message)
Python
0.000008
@@ -907,55 +907,22 @@ lf, -last_received = None):%0A submit_hash +options = %7B%7D +): %0A @@ -922,35 +922,31 @@ %7D):%0A -submit_hash +options %5B%22email%22%5D = @@ -960,35 +960,31 @@ ail%0A -submit_hash +options %5B%22api-key%22%5D @@ -1002,138 +1002,8 @@ key%0A - if last_received is not None:%0A submit_hash%5B%22first%22%5D = %220%22%0A submit_hash%5B%22last%22%5D = str(last_received)%0A @@ -1068,35 +1068,31 @@ for k,v in -submit_hash +options .items()%5D)%0A @@ -1273,16 +1273,30 @@ callback +, options = %7B%7D ):%0A @@ -1357,16 +1357,108 @@ try:%0A + options%5B%22first%22%5D = %220%22%0A options%5B%22last%22%5D = str(last_received)%0A @@ -1494,30 +1494,23 @@ essages( -max_message_id +options )%0A
e2c92e8b6e8fb10addc73986914014b278598470
Fix docstring in standardnormal example
spotpy/examples/spot_setup_standardnormal.py
spotpy/examples/spot_setup_standardnormal.py
''' Copyright 2015 by Tobias Houska This file is part of Statistical Parameter Estimation Tool (SPOTPY). :author: Tobias Houska This example implements the Rosenbrock function into SPOT. ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import spotpy class spot_setup(object): def __init__(self,mean=0,std=1): self.params = [spotpy.parameter.Uniform('x',-5,5,1.5,3.0) ] self.mean=mean self.std=std def parameters(self): return spotpy.parameter.generate(self.params) def simulation(self,x): simulations= (1.0/(std*np.sqrt(2*np.pi)))**((-1.0/2.0)*(((x-self.mean)/self.std)**2)) return simulations def evaluation(self): observations = [0] return observations def objectivefunction(self, simulation,evaluation): objectivefunction = -spotpy.objectivefunctions.rmse(evaluation = evaluation,simulation = simulation) return objectivefunction
Python
0.000051
@@ -155,18 +155,23 @@ the -Rosenbrock +Standard Normal fun
d33a54c56215bf01abd6d119f7ab12585720edee
remove notices from nickserv plugin
gozerlib/plugs/nickserv.py
gozerlib/plugs/nickserv.py
# gozerlib/plugs/nickserv.py # # """ authenticate to NickServ. """ __author__ = "Wijnand 'tehmaze' Modderman - http://tehmaze.com" __license__ ='BSD' ## gozerlib imports from gozerlib.examples import examples from gozerlib.callbacks import callbacks from gozerlib.commands import cmnds from gozerlib.datadir import datadir from gozerlib.fleet import fleet from gozerlib.utils.pdod import Pdod from gozerlib.config import cfg as config ## basic imports import os import time import logging ## NSAuth class class NSAuth(Pdod): """ nickserve auth. """ def __init__(self): self.registered = False Pdod.__init__(self, datadir + os.sep + 'plugs' + os.sep + 'gozerlib.plugs.nickserv' + os.sep + 'nickserv') def add(self, bot, **kwargs): """ add a nickserv entry. """ options = { 'nickserv': 'NickServ', 'identify': 'IDENTIFY', } options.update(kwargs) assert options.has_key('password'), 'A password must be set' for key in options.keys(): Pdod.set(self, bot.name, key, options[key]) self.save() def remove(self, bot): """ remove a nickserv entry. """ if self.has_key(bot.name): del self[bot.name] self.save() def has(self, bot): """ check if a bot is in the nickserv list. """ return self.has_key(bot.name) def register(self, bot, passwd): """ register a bot to nickserv. """ if self.has_key(bot.name) and self.has_key2(bot.name, 'nickserv'): bot.sendraw('PRIVMSG %s :%s %s' % (self.get(bot.name, 'nickserv'), 'REGISTER', passwd)) logging.debug('nickserv - register sent on %s' % bot.server) def identify(self, bot): """ identify a bot to nickserv. """ if self.has_key(bot.name) and self.has_key2(bot.name, 'nickserv'): bot.outnocb(self.get(bot.name, 'nickserv', ), '%s %s' % (self.get(bot.name, 'identify'), self.get(bot.name, 'password')), how="notice") logging.warn('nickserv - identify sent on %s' % bot.server) def listbots(self): """ list all bots know. """ all = [] for bot in self.data.keys(): all.append((bot, self.data[bot]['nickserv'])) return all def sendstring(self, bot, txt): """ send string to nickserver. """ nickservnick = self.get(bot.name, 'nickserv') bot.outnocb(nickservnick, txt, how="msg") def handle_001(self, bot, ievent): self.identify(bot) try: for i in self.data[bot.name]['nickservtxt']: self.sendstring(bot, i) logging.warn('nickserv - sent %s' % i) except: pass ## basic init stuff nsauth = NSAuth() if not nsauth.data: nsauth = NSAuth() ## register clallback callbacks.add('001', nsauth.handle_001, threaded=True) ## ns-add command def handle_nsadd(bot, ievent): """ add a bot to the nickserv. """ if bot.jabber: return if len(ievent.args) < 1: ievent.missing('<password> [<nickserv nick>] [<identify command>]') return if nsauth.has(bot): ievent.reply('replacing previous configuration') options = {} if len(ievent.args) >= 1: options.update({'password': ievent.args[0]}) if len(ievent.args) >= 2: options.update({'nickserv': ievent.args[1]}) if len(ievent.args) >= 3: options.update({'identify': ' '.join(ievent.args[2:])}) nsauth.add(bot, **options) ievent.reply('ok') cmnds.add('ns-add', handle_nsadd, 'OPER', threaded=True) examples.add('ns-add', 'ns-add <password> [<nickserv nick>] [<identify command>] .. add nickserv', 'ns-add mekker') ## ns-del command def handle_nsdel(bot, ievent): """ remove a bot from nickserv. """ if bot.jabber: return if len(ievent.args) != 1: ievent.missing('<fleetbot name>') return botname = ievent.args[0] fbot = fleet.byname(botname) if not fbot: ievent.reply('fleet bot %s not found' % botname) return if not nsauth.has(fbot): ievent.reply('nickserv not configured on %s' % fbot.name) return nsauth.remove(fbot) ievent.reply('ok') cmnds.add('ns-del', handle_nsdel, 'OPER', threaded=True) examples.add('ns-del', 'ns-del <fleetbot name>', 'ns-del test') ## ns-send command def handle_nssend(bot, ievent): """ send string to the nickserv. """ if bot.jabber: return if not ievent.rest: ievent.missing('<txt>') return nsauth.sendstring(bot, ievent.rest) ievent.reply('send') cmnds.add('ns-send', handle_nssend, 'OPER', threaded=True) examples.add('ns-send', 'ns-send <txt> .. send txt to nickserv', 'ns-send identify bla') ## ns-auth command def handle_nsauth(bot, ievent): """ perform an auth request. """ if bot.jabber: return if len(ievent.args) != 1: name = bot.name else: name = ievent.args[0] fbot = fleet.byname(name) if not fbot: ievent.reply('fleet bot %s not found' % name) return if not nsauth.has(fbot): ievent.reply('nickserv not configured on %s' % fbot.name) return nsauth.identify(fbot) ievent.reply('ok') cmnds.add('ns-auth', handle_nsauth, 'OPER', threaded=True) examples.add('ns-auth','ns-auth [<botname>]', '1) ns-auth 2) ns-auth test') ## ns-list command def handle_nslist(bot, ievent): """ show a list of all bots know with nickserv. """ if bot.jabber: return all = dict(nsauth.listbots()) rpl = [] for bot in all.keys(): rpl.append('%s: authenticating through %s' % (bot, all[bot])) rpl.sort() ievent.reply(' .. '.join(rpl)) cmnds.add('ns-list', handle_nslist, 'OPER') examples.add('ns-list', 'list all nickserv entries', 'ns-list')
Python
0
@@ -2003,14 +2003,11 @@ ow=%22 -notice +msg %22)%0A
5db4d1f0b98b2dbef3041e4dd72ea634450e67ee
Use absolute directory path to avoid errors with empty path strings
maxmindupdater/__init__.py
maxmindupdater/__init__.py
"""Function to keep a maxmind database file up to date""" import hashlib import os import shutil import sys import tarfile import requests __version__ = '0.1.0' __url__ = 'https://github.com/yola/maxmind-updater' def _hash_file(filename): if not os.path.exists(filename): return '' block_size = 65536 hasher = hashlib.md5() with open(filename, 'rb') as f: buf = f.read(block_size) while len(buf) > 0: hasher.update(buf) buf = f.read(block_size) return hasher.hexdigest() def update_db(db_path, license_key, edition_id): db_dir_path = os.path.dirname(db_path) db_archive_path = '%s.tar.gz' % db_path def maxmind_download(suffix, **kwargs): return requests.get('https://download.maxmind.com/app/geoip_download', params={'license_key': license_key, 'edition_id': edition_id, 'suffix': suffix, }, **kwargs) expected_md5 = maxmind_download('tar.gz.md5').content curr_md5 = _hash_file(db_archive_path) if expected_md5 == curr_md5 and os.path.exists(db_path): return with open(db_archive_path, 'wb') as local_zip: for chunk in maxmind_download('tar.gz', stream=True ).iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks local_zip.write(chunk) with tarfile.open(db_archive_path) as tar_file: # We only want the mmdb file. Maxmind kindly includes things # we don't want. extract_members = [member for member in tar_file.getmembers() if member.name.endswith('.mmdb')] assert len(extract_members) == 1 tar_file.extractall(path=db_dir_path, members=extract_members) # extractall keeps the subfolder structure. Account for this by # appending the path to the db_dir_path where it was extracted. new_db = os.path.join(db_dir_path, extract_members[0].path) try: pass # TODO # test_ip('8.8.8.8', new_db) # test_ip('2001:420::', new_db) except Exception: sys.stderr.write('Retrieved invalid GeoIP database - ' 'check MaxMind account details.\n') raise if not os.path.exists(db_dir_path): os.makedirs(db_dir_path) shutil.move(new_db, db_path) os.rmdir(os.path.dirname(new_db))
Python
0.000001
@@ -608,16 +608,32 @@ _path = +os.path.abspath( os.path. @@ -644,24 +644,25 @@ ame(db_path) +) %0A db_arch
ac98be78363b98def729e129484a06c26324dccd
Use Undefined instead of the now deprecated INVALID (#1143)
graphene/types/datetime.py
graphene/types/datetime.py
from __future__ import absolute_import import datetime from aniso8601 import parse_date, parse_datetime, parse_time from graphql.error import INVALID from graphql.language import StringValueNode from .scalars import Scalar class Date(Scalar): """ The `Date` scalar type represents a Date value as specified by [iso8601](https://en.wikipedia.org/wiki/ISO_8601). """ @staticmethod def serialize(date): if isinstance(date, datetime.datetime): date = date.date() assert isinstance( date, datetime.date ), 'Received not compatible date "{}"'.format(repr(date)) return date.isoformat() @classmethod def parse_literal(cls, node): if isinstance(node, StringValueNode): return cls.parse_value(node.value) @staticmethod def parse_value(value): try: if isinstance(value, datetime.date): return value elif isinstance(value, str): return parse_date(value) except ValueError: return INVALID class DateTime(Scalar): """ The `DateTime` scalar type represents a DateTime value as specified by [iso8601](https://en.wikipedia.org/wiki/ISO_8601). """ @staticmethod def serialize(dt): assert isinstance( dt, (datetime.datetime, datetime.date) ), 'Received not compatible datetime "{}"'.format(repr(dt)) return dt.isoformat() @classmethod def parse_literal(cls, node): if isinstance(node, StringValueNode): return cls.parse_value(node.value) @staticmethod def parse_value(value): try: if isinstance(value, datetime.datetime): return value elif isinstance(value, str): return parse_datetime(value) except ValueError: return INVALID class Time(Scalar): """ The `Time` scalar type represents a Time value as specified by [iso8601](https://en.wikipedia.org/wiki/ISO_8601). """ @staticmethod def serialize(time): assert isinstance( time, datetime.time ), 'Received not compatible time "{}"'.format(repr(time)) return time.isoformat() @classmethod def parse_literal(cls, node): if isinstance(node, StringValueNode): return cls.parse_value(node.value) @classmethod def parse_value(cls, value): try: if isinstance(value, datetime.time): return value elif isinstance(value, str): return parse_time(value) except ValueError: return INVALID
Python
0
@@ -127,22 +127,16 @@ phql -.error import INVA @@ -131,23 +131,25 @@ import -INVALID +Undefined %0Afrom gr @@ -1062,39 +1062,41 @@ return -INVALID +Undefined %0A%0A%0Aclass DateTim @@ -1893,23 +1893,25 @@ return -INVALID +Undefined %0A%0A%0Aclass @@ -2687,12 +2687,14 @@ urn -INVALID +Undefined %0A
d46368024ee89143bca15a2bdf23f8792970cf5c
add property 'external' to menu nodes
menu_external_urls/menu.py
menu_external_urls/menu.py
from menus.base import Modifier from menus.menu_pool import menu_pool from menu_external_urls.models import MenuExternalUrl class MenuExternalUrlMod(Modifier): """ Adds ability to link page to an external URL. """ def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb): if post_cut: return nodes if breadcrumb: return nodes for node in nodes: try: #Load External URL into nodes menu_external_url = MenuExternalUrl.objects.get(page=(node.id-1)) node.url = menu_external_url.menu_external_url except: pass return nodes menu_pool.register_modifier(MenuExternalUrlMod)
Python
0
@@ -630,16 +630,53 @@ nal_url%0A + node.external = True%0A
c973385f877d940231deb8d81e929647eadc280a
Use standard env var for DATABASE_URL
app/config.py
app/config.py
# -*- coding: utf-8 -*- """ Application configuration """ import os from os.path import dirname, join # get settings from environment, or credstash if running in AWS env = os.environ if env.get('SETTINGS') == 'AWS': from lib.aws_env import env ASSETS_DEBUG = False DEBUG = bool(env.get('DEBUG', True)) HUMANIZE_USE_UTC = True MARKDOWN_EXTENSIONS = [ 'markdown.extensions.nl2br', 'markdown.extensions.sane_lists', 'markdown.extensions.smart_strong', 'markdown.extensions.smarty', ] SECRET_KEY = env.get('SECRET_KEY', os.urandom(24)) SESSION_COOKIE_SECURE = False SQLALCHEMY_DATABASE_PATH = join(dirname(__file__), '../development.db') SQLALCHEMY_DATABASE_URI = env.get( 'DATABASE_URI', 'sqlite:///{}'.format(SQLALCHEMY_DATABASE_PATH)) SQLALCHEMY_TRACK_MODIFICATIONS = bool(env.get( 'SQLALCHEMY_TRACK_MODIFICATIONS', False)) TESTING = bool(env.get('TESTING', False))
Python
0.000001
@@ -710,17 +710,17 @@ ABASE_UR -I +L ',%0A '
fb8a69706f38b2c21f7ab284f8ec9b0ace2216b2
use correct label in warning
addons/hr_attendance/models/hr_employee.py
addons/hr_attendance/models/hr_employee.py
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import pytz from datetime import datetime from dateutil.relativedelta import relativedelta from odoo import models, fields, api, exceptions, _, SUPERUSER_ID class HrEmployeeBase(models.AbstractModel): _inherit = "hr.employee.base" attendance_ids = fields.One2many('hr.attendance', 'employee_id', help='list of attendances for the employee') last_attendance_id = fields.Many2one('hr.attendance', compute='_compute_last_attendance_id', store=True) last_check_in = fields.Datetime(related='last_attendance_id.check_in', store=True) last_check_out = fields.Datetime(related='last_attendance_id.check_out', store=True) attendance_state = fields.Selection(string="Attendance Status", compute='_compute_attendance_state', selection=[('checked_out', "Checked out"), ('checked_in', "Checked in")]) hours_last_month = fields.Float(compute='_compute_hours_last_month') hours_today = fields.Float(compute='_compute_hours_today') hours_last_month_display = fields.Char(compute='_compute_hours_last_month') def _compute_presence_state(self): """ Override to include checkin/checkout in the presence state Attendance has the second highest priority after login """ super()._compute_presence_state() employees = self.filtered(lambda employee: employee.hr_presence_state != 'present') for employee in employees: if employee.attendance_state == 'checked_out' and employee.hr_presence_state == 'to_define': employee.hr_presence_state = 'absent' for employee in employees: if employee.attendance_state == 'checked_in': employee.hr_presence_state = 'present' def _compute_hours_last_month(self): for employee in self: now = datetime.now() start = now + relativedelta(months=-1, day=1) end = now + relativedelta(days=-1, day=1) attendances = self.env['hr.attendance'].search([ ('employee_id', '=', employee.id), ('check_in', '>=', start), ('check_out', '<=', end), ]) employee.hours_last_month = sum(attendances.mapped('worked_hours')) employee.hours_last_month_display = "%g" % employee.hours_last_month def _compute_hours_today(self): now = fields.Datetime.now() now_utc = pytz.utc.localize(now) for employee in self: # start of day in the employee's timezone might be the previous day in utc tz = pytz.timezone(employee.tz) now_tz = now_utc.astimezone(tz) start_tz = now_tz + relativedelta(hour=0, minute=0) # day start in the employee's timezone start_naive = start_tz.astimezone(pytz.utc).replace(tzinfo=None) attendances = self.env['hr.attendance'].search([ ('employee_id', '=', employee.id), ('check_in', '<=', now), '|', ('check_out', '>=', start_naive), ('check_out', '=', False), ]) worked_hours = 0 for attendance in attendances: delta = (attendance.check_out or now) - max(attendance.check_in, start_naive) worked_hours += delta.total_seconds() / 3600.0 employee.hours_today = worked_hours @api.depends('attendance_ids') def _compute_last_attendance_id(self): for employee in self: employee.last_attendance_id = self.env['hr.attendance'].search([ ('employee_id', '=', employee.id), ], limit=1) @api.depends('last_attendance_id.check_in', 'last_attendance_id.check_out', 'last_attendance_id') def _compute_attendance_state(self): for employee in self: att = employee.last_attendance_id.sudo() employee.attendance_state = att and not att.check_out and 'checked_in' or 'checked_out' @api.model def attendance_scan(self, barcode): """ Receive a barcode scanned from the Kiosk Mode and change the attendances of corresponding employee. Returns either an action or a warning. """ employee = self.sudo().search([('barcode', '=', barcode)], limit=1) if employee: return employee._attendance_action('hr_attendance.hr_attendance_action_kiosk_mode') return {'warning': _('No employee corresponding to barcode %(barcode)s') % {'barcode': barcode}} def attendance_manual(self, next_action, entered_pin=None): self.ensure_one() can_check_without_pin = not self.env.user.has_group('hr_attendance.group_hr_attendance_use_pin') or (self.user_id == self.env.user and entered_pin is None) if can_check_without_pin or entered_pin is not None and entered_pin == self.sudo().pin: return self._attendance_action(next_action) return {'warning': _('Wrong PIN')} def _attendance_action(self, next_action): """ Changes the attendance of the employee. Returns an action to the check in/out message, next_action defines which menu the check in/out message should return to. ("My Attendances" or "Kiosk Mode") """ self.ensure_one() employee = self.sudo() action_message = self.env.ref('hr_attendance.hr_attendance_action_greeting_message').read()[0] action_message['previous_attendance_change_date'] = employee.last_attendance_id and (employee.last_attendance_id.check_out or employee.last_attendance_id.check_in) or False action_message['employee_name'] = employee.name action_message['barcode'] = employee.barcode action_message['next_action'] = next_action action_message['hours_today'] = employee.hours_today if employee.user_id: modified_attendance = employee.with_user(employee.user_id)._attendance_action_change() else: modified_attendance = employee._attendance_action_change() action_message['attendance'] = modified_attendance.read()[0] return {'action': action_message} def _attendance_action_change(self): """ Check In/Check Out action Check In: create a new attendance record Check Out: modify check_out field of appropriate attendance record """ self.ensure_one() action_date = fields.Datetime.now() if self.attendance_state != 'checked_in': vals = { 'employee_id': self.id, 'check_in': action_date, } return self.env['hr.attendance'].create(vals) attendance = self.env['hr.attendance'].search([('employee_id', '=', self.id), ('check_out', '=', False)], limit=1) if attendance: attendance.check_out = action_date else: raise exceptions.UserError(_('Cannot perform check out on %(empl_name)s, could not find corresponding check in. ' 'Your attendances have probably been modified manually by human resources.') % {'empl_name': self.sudo().name, }) return attendance @api.model def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True): if 'pin' in groupby or 'pin' in self.env.context.get('group_by', '') or self.env.context.get('no_group_by'): raise exceptions.UserError(_('Such grouping is not allowed.')) return super(HrEmployeeBase, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
Python
0
@@ -4456,17 +4456,17 @@ ing': _( -' +%22 No emplo @@ -4486,24 +4486,26 @@ ding to -barcode +Badge ID ' %25(barcod @@ -4507,17 +4507,19 @@ arcode)s -' +.'%22 ) %25 %7B'ba
2f5c855336a0d182ad731fc50b6dc652f64b80d3
remove lazy=dynamic
app/models.py
app/models.py
from app import db class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column('username', db.String(32), unique=True, index=True) password = db.Column('password', db.String(32)) rounds = db.relationship('Round', backref='user_o', lazy='dynamic') @property def is_authenticated(self): return True @property def is_active(self): return True @property def is_anonymous(self): return False def get_id(self): try: return unicode(self.id) except NameError: return str(self.id) def __repr__(self): return '<User %r>' % (self.username) class Round(db.Model): id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) course_id = db.Column(db.Integer, db.ForeignKey('course.id')) date = db.Column(db.DateTime) tee_color = db.Column(db.String(32)) course_handicap = db.Column(db.Integer) adj_score = db.Column(db.Integer) handicap_index = db.Column(db.Float) scores = db.relationship('Score', backref='round_o', lazy='dynamic') def __repr__(self): return '<Round %r>' % (self.date) class Score(db.Model): id = db.Column(db.Integer, primary_key=True) round_id = db.Column(db.Integer, db.ForeignKey('round.id')) hole = db.Column(db.Integer) score = db.Column(db.Integer) putts = db.Column(db.Integer) gir = db.Column(db.Integer) def __repr__(self): return '<Score %r>' % (self.id) class Course(db.Model): id = db.Column(db.Integer, primary_key=True) nickname = db.Column(db.String(32), unique=True, index=True) name = db.Column(db.String(64)) rounds = db.relationship('Round', backref='course_o', lazy='dynamic') tees = db.relationship('Tee', backref='course_o', lazy='dynamic') def __repr__(self): return '<Course %r>' % (self.name) class Tee(db.Model): id = db.Column(db.Integer, primary_key=True) course_id = db.Column(db.Integer, db.ForeignKey('course.id')) tee_color = db.Column(db.String(32)) rating = db.Column(db.Float) slope = db.Column(db.Integer) holes = db.relationship('Hole', backref='tee_o', lazy='dynamic') def __repr__(self): return '<Tee %r>' % (self.tee_color) class Hole(db.Model): id = db.Column(db.Integer, primary_key=True) tee_id = db.Column(db.Integer, db.ForeignKey('tee.id')) hole = db.Column(db.Integer) yardage = db.Column(db.Integer) par = db.Column(db.Integer) rating = db.Column(db.Float) slope = db.Column(db.Integer) def __repr__(self): return '<Hole %r>' % (self.hole)
Python
0.99834
@@ -270,34 +270,18 @@ 'user_o' -, lazy='dynamic' )%0A + %0A @pr @@ -1120,32 +1120,16 @@ round_o' -, lazy='dynamic' )%0A%0A d @@ -1760,32 +1760,16 @@ ourse_o' -, lazy='dynamic' )%0A te @@ -1814,32 +1814,16 @@ ourse_o' -, lazy='dynamic' )%0A%0A d @@ -2128,24 +2128,24 @@ b.Integer)%0A%0A + holes = @@ -2187,24 +2187,8 @@ e_o' -, lazy='dynamic' )%0A%0A
b07fd0f19de4c1f25ce4fbf6caef9473858e7c40
Change diskusage message.
helpers/resource_helper.py
helpers/resource_helper.py
import collections import datetime from copy import copy from numpy import logspace, log10, where from psutil import virtual_memory, cpu_percent, disk_partitions, disk_usage from helpers import slack_helper from helpers import utils def mem_percents_logspaced(start_percent=None, end_percent=90, bins_count=30): if not start_percent: start_percent = virtual_memory().percent + 1 logspaced = logspace(log10(start_percent), log10(end_percent), bins_count) return logspaced def disk_percents_logspaced(start_percent=None, end_percent=90, bins_count=30): if not start_percent: start_percent = virtual_memory().percent logspaced = logspace(log10(start_percent), log10(end_percent), bins_count) return logspaced def current_mem_percent(resource_store): resource_store.mem_time_last_run = datetime.datetime.now() return virtual_memory().percent def current_cpu_percent(resource_store): resource_store.cpu_time_last_run_local = datetime.datetime.now() resource_store.cpu_time_last_run_utc = datetime.datetime.now().utcnow() return cpu_percent(interval=1, percpu=True) def current_disk_percent(resource_store, filter_by_threshold=False): # physical devices only dfkh = [] for dp in disk_partitions(all=False): du = disk_usage(dp.mountpoint) dfkh.append((dp.device, dp.mountpoint, du.percent, du.total, du.used, du.free)) if filter_by_threshold: dfkh = [df for df in dfkh if df[2] > resource_store.disk_usage_percent_threshold] return dfkh if len(dfkh) > 0 else None def cpu_save_current_usage(resource_store, current_cpu_usage, cpu_time_last_run_utc): try: for t in range(resource_store.logical_cpu_count): resource_store.cpu_circular_buffer[t].append((current_cpu_usage[t], cpu_time_last_run_utc)) except Exception as e: ex = "Exception @cpu_save_current_usage: %s" % e print(ex) def process_cpu_ring_buffer(resource_store): should_notify_cpu_usage = False cpu_gone_wild = [] # (logical_cpu_id, copy_of_ring_buffer) for t in range(resource_store.logical_cpu_count): num_above_threshold = sum(prcnt >= resource_store.cpu_usage_percent_threshold for prcnt, _ in resource_store.cpu_circular_buffer[t]) if num_above_threshold >= resource_store.cpu_notification_threshold_count: should_notify_cpu_usage = True cpu_gone_wild.append((t, copy(resource_store.cpu_circular_buffer[t]))) return should_notify_cpu_usage, cpu_gone_wild def get_cpu_ring_buffer_snapshot(resource_store): # (logical_cpu_id, copy_of_ring_buffer) snapshot = [(t, copy(resource_store.cpu_circular_buffer[t])) for t in range(resource_store.logical_cpu_count)] return snapshot def to_friendly_cpu_notification_message(resource_store, id_and_buff): try: msg = [] header = "\n".join(["{} @{}".format(resource_store.hostname, utils.time_to_str(datetime.datetime.now())), "CPU usage threshold percent: %{}".format(str(resource_store.cpu_usage_percent_threshold)), "Num. occurrences to trigger a notification: {}".format( str(resource_store.cpu_notification_threshold_count)) ]) msg.extend(["~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"]) for cpu_id, ring_buffer in id_and_buff: msg.extend(["{} Core #{}\t%{}\t@{}".format(resource_store.hostname, "%#02d" % (cpu_id + 1), "%#04.1f" % percent, utils.datetime_to_slang(timestamp)) for percent, timestamp in ring_buffer]) msg.extend(["~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"]) msg = "\n".join([header, "\n".join(m for m in msg)]) return msg except Exception as e: ex = "Sorry, I've failed @to_friendly_cpu_notification_message\n{}".format(e) print(ex) return ex def next_threshold(percents_logspaced, current): greater_values = percents_logspaced[where(percents_logspaced > current)] try: return round(greater_values[0], 2) except: return current def to_friendly_mem_notification_message(resource_store): msg = "\n".join(["{hostname} @{time_last_measure}".format(hostname=resource_store.hostname, time_last_measure=utils.time_to_str( resource_store.mem_time_last_run)), "".join(["Current memory usage: %", str(resource_store.mem_last_percent)]), "".join(["Next notification threshold: %", str(resource_store.mem_threshold_next_percent)]) ]) return msg def to_friendly_disk_notification_message(resource_store, dfkh): try: msg = [] header = "\n".join(["{} @{}".format(resource_store.hostname, utils.time_to_str(datetime.datetime.now())), "Disk usage threshold percent: %{}".format( str(resource_store.disk_usage_percent_threshold)), ]) msg.extend(["~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"]) for device, mountpoint, disk_percent, total, used, free in dfkh: msg.extend(["{}\nDevice: {}\nMount Point: {}\nUsed: %{}\nTotal: {}\nUsed: {}\nFree: {}".format( resource_store.hostname, device, mountpoint, "%#04.1f" % disk_percent, utils.byte_size_format(total), utils.byte_size_format(used), utils.byte_size_format(free) )]) msg.extend(["~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"]) msg = "\n".join([header, "\n".join(m for m in msg)]) return msg except Exception as e: ex = "Sorry, I've failed @to_friendly_disk_notification_message\n{}".format(e) print(ex) return ex def send_friendly_message(slack_store, friendly_message, userid): if userid: slack_helper.send_message_to_user(slack_store, friendly_message, "resource", userid) else: slack_helper.send_message(slack_store, friendly_message, "resource")
Python
0
@@ -3394,37 +3394,32 @@ ~~~~~~~~~~~~~~~~ -~~~~~ %22%5D)%0A for @@ -3893,37 +3893,32 @@ ~~~~~~~~~~~~~~~~ -~~~~~ %22%5D)%0A%0A msg @@ -5431,37 +5431,8 @@ ~~~~ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %22%5D)%0A @@ -5962,37 +5962,8 @@ ~~~~ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %22%5D)%0A
c313550b52369edad73e97769ca509075d9e4ef6
Establish table relationships
app/models.py
app/models.py
from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash from app import db class User(UserMixin, db.Model): """This class represents the user table.""" __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(255), nullable=False, unique=True) email = db.Column(db.String(256), nullable=False, unique=True) user_password = db.Column(db.String(255), nullable=False) bucketlists = db.relationship('BucketList', order_by="BucketList.id", cascade="all,delete-orphan") def __init__(self, username, password, email): self.username = username self.password = password self.email = email @property def password(self): raise AttributeError('You cannot access password') @password.setter def password(self): self.user_password = generate_password_hash(self.password) def verify_password(self, password): return check_password_hash(self.password_hash, password) class BucketList(db.Model): """This class represents the bucketlist table.""" __tablename__ = 'bucketlists' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255)) date_created = db.Column(db.DateTime, default=db.func.current_timestamp()) date_modified = db.Column( db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp()) def __init__(self, name): """initialize with name.""" self.name = name def save(self): db.session.add(self) db.session.commit() @staticmethod def get_all(): return BucketList.query.all() def delete(self): db.session.delete(self) db.session.commit() def __repr__(self): return "<BucketList: {}>".format(self.name) class BucketListItem(db.Model): """This class represents the bucketlist_item table""" __tablename__ = 'bucketlistitems' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255)) date_created = db.Column(db.DateTime, default=db.func.current_timestamp()) date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp()) done = db.Column(db.Boolean, default=False) bucketlist_id = db.Column(db.Integer, db.ForeignKey(BucketList.id)) def __init__(self, name, bucketlist_id): """Initialize with name and bucketlist_id""" self.name = name self.bucketlist_id = bucketlist_id def save(self): db.session.add(self) db.session.commit() @staticmethod def get_all_items(): return BucketListItem.query.filter_by(bucketlist_id=BucketList.id) def delete(self): db.session.delete(self) db.session.commit()
Python
0.000023
@@ -1500,16 +1500,79 @@ stamp()) +%0A created_by = db.Column(db.Integer, db.ForeignKey(User.id)) %0A%0A de
7fe6130c1f94bdfcab1508bd9102d02f8fd123a0
update tests
corehq/apps/cloudcare/tests/test_session.py
corehq/apps/cloudcare/tests/test_session.py
import uuid from django.test import TestCase from casexml.apps.case.models import CommCareCase from corehq.apps.cloudcare.touchforms_api import ( get_user_contributions_to_touchforms_session, ) from corehq.apps.custom_data_fields.models import ( CustomDataFieldsDefinition, CustomDataFieldsProfile, Field, PROFILE_SLUG, ) from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView from corehq.apps.users.models import CommCareUser, WebUser class SessionUtilsTest(TestCase): def test_load_session_data_for_mobile_worker(self): user = CommCareUser( domain='cloudcare-tests', username='worker@cloudcare-tests.commcarehq.org', _id=uuid.uuid4().hex ) data = get_user_contributions_to_touchforms_session('cloudcare-tests', user) self.assertEqual('worker', data['username']) self.assertEqual(user._id, data['user_id']) self.assertTrue(isinstance(data['user_data'], dict)) def test_default_user_data(self): user = CommCareUser( domain='cloudcare-tests', username='worker@cloudcare-tests.commcarehq.org', _id=uuid.uuid4().hex ) user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data'] for key in ['commcare_first_name', 'commcare_last_name', 'commcare_phone_number']: self.assertEqual(None, user_data[key]) user.first_name = 'first' user.last_name = 'last' user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data'] self.assertEqual('first', user_data['commcare_first_name']) self.assertEqual('last', user_data['commcare_last_name']) def test_user_data_profile(self): definition = CustomDataFieldsDefinition(domain='cloudcare-tests', field_type=UserFieldsView.field_type) definition.save() definition.set_fields([ Field(slug='word', label='A Word'), ]) definition.save() profile = CustomDataFieldsProfile(name='prof', fields={'word': 'supernova'}, definition=definition) profile.save() user = CommCareUser.create( 'cloudcare-tests', 'worker@cloudcare-tests.commcarehq.org', 'do you want to know a secret', None, None, uuid=uuid.uuid4().hex, metadata={PROFILE_SLUG: profile.id}, ) user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data'] self.assertEqual(profile.id, user_data[PROFILE_SLUG]) self.assertEqual('supernova', user_data['word']) definition.delete() def test_load_session_data_for_web_user(self): user = WebUser( username='web-user@example.com', _id=uuid.uuid4().hex ) data = get_user_contributions_to_touchforms_session('cloudcare-tests', user) self.assertEqual('web-user@example.com', data['username']) self.assertEqual(user._id, data['user_id']) self.assertTrue(isinstance(data['user_data'], dict)) def test_load_session_data_for_commconnect_case(self): user = CommCareCase( name='A case', _id=uuid.uuid4().hex ) data = get_user_contributions_to_touchforms_session('cloudcare-tests', user) self.assertEqual('A case', data['username']) self.assertEqual(user._id, data['user_id']) self.assertEqual({}, data['user_data'])
Python
0.000001
@@ -981,32 +981,114 @@ r_data'%5D, dict)) +%0A self.assertTrue(data%5B'user_data'%5D%5B'commcare_project'%5D, 'cloudcare-tests') %0A%0A def test_d @@ -3228,16 +3228,98 @@ , dict)) +%0A self.assertTrue(data%5B'user_data'%5D%5B'commcare_project'%5D, 'cloudcare-tests') %0A%0A de
962fbf0b493a9d759faeccaa50c7cb34e9991527
allow external command hooks to fail backup
holland/commands/backup.py
holland/commands/backup.py
import os, sys from subprocess import Popen, PIPE import time import errno import fcntl import logging from holland.core.command import Command, option, run from holland.core.backup import BackupRunner, BackupError from holland.core.exceptions import BackupError from holland.core.config import hollandcfg, ConfigError from holland.core.spool import spool from holland.core.util.fmt import format_interval, format_bytes from holland.core.util.path import disk_free, disk_capacity, getmount from holland.core.util.lock import Lock, LockError from holland.core.util.pycompat import Template LOG = logging.getLogger(__name__) class Backup(Command): """${cmd_usage} Backup the specified backupsets or all active backupsets specified in holland.conf ${cmd_option_list} """ name = 'backup' aliases = [ 'bk' ] options = [ option('--abort-immediately', action='store_true', help="Abort on the first backupset that fails."), option('--dry-run', '-n', action='store_true', help="Print backup commands without executing them."), option('--no-lock', '-f', action='store_true', default=False, help="Run even if another copy of Holland is running.") ] description = 'Run backups for active backupsets' def run(self, cmd, opts, *backupsets): if not backupsets: backupsets = hollandcfg.lookup('holland.backupsets') # strip empty items from backupsets list backupsets = [name for name in backupsets if name] if not backupsets: LOG.info("Nothing to backup") return 1 runner = BackupRunner(spool) # dry-run implies no-lock if opts.dry_run: opts.no_lock = True # don't purge if doing a dry-run, or when simultaneous backups may be running if not opts.no_lock: purge_mgr = PurgeManager() runner.register_cb('pre-backup', purge_mgr) runner.register_cb('post-backup', purge_mgr) runner.register_cb('backup-failure', purge_backup) runner.register_cb('post-backup', report_low_space) runner.register_cb('pre-backup', call_hooks) runner.register_cb('post-backup', call_hooks) runner.register_cb('backup-failure', call_hooks) error = 1 LOG.info("--- Starting %s run ---", opts.dry_run and 'dry' or 'backup') for name in backupsets: try: config = hollandcfg.backupset(name) # ensure we have at least an empty holland:backup section config.setdefault('holland:backup', {}) except (SyntaxError, IOError), exc: LOG.error("Could not load backupset '%s': %s", name, exc) break if not opts.no_lock: lock = Lock(config.filename) try: lock.acquire() LOG.info("Acquired lock %s : '%s'", lock.path, lock.lock.name) except LockError: LOG.error("Failed to acquire lock on backupset %s (%s)", name, config.filename) break try: try: runner.backup(name, config, opts.dry_run) except BackupError, exc: LOG.error("Backup failed: %s", exc.args[0]) break except ConfigError, exc: break finally: if not opts.no_lock: if lock.is_locked(): lock.release() LOG.info("Released lock %s", lock.path) else: error = 0 LOG.info("--- Ending %s run ---", opts.dry_run and 'dry' or 'backup') return error def purge_backup(event, entry): if entry.config['holland:backup']['auto-purge-failures']: entry.purge() LOG.info("Purged failed backup: %s", entry.name) else: LOG.info("auto-purge-failures not enabled. Failed backup not purged.") def call_hooks(event, entry): hook = event + "-hook" if entry.config['holland:backup'][hook] is not None: cmd = entry.config['holland:backup'][hook] try: cmd = Template(cmd).safe_substitute( hook=hook, backupset=entry.backupset, backupdir=entry.path ) LOG.info("Calling: %s", cmd) process = Popen(cmd, shell=True, stdin=open("/dev/null", "r"), stdout=PIPE, stderr=PIPE, close_fds=True) output, errors = process.communicate() except OSError, exc: raise BackupError("%s", exc) for line in errors.splitlines(): LOG.error(" ! %s", line) for line in output.splitlines(): LOG.info(" + %s", line) else: return 0 class PurgeManager(object): def __call__(self, event, entry): purge_policy = entry.config['holland:backup']['purge-policy'] if event == 'pre-backup' and purge_policy != 'before-backup': return if event == 'post-backup' and purge_policy != 'after-backup': return backupset = spool.find_backupset(entry.backupset) if not backupset: LOG.info("Nothing to purge") return retention_count = entry.config['holland:backup']['backups-to-keep'] retention_count = int(retention_count) if event == 'post-backup' and retention_count == 0: # Always maintain latest backup LOG.warning("!! backups-to-keep set to 0, but " "purge-policy = after-backup. This would immediately " "purge all backups which is probably not intended. " "Setting backups-to-keep to 1") retention_count = 1 if event == 'pre-backup': retention_count += 1 self.purge_backupset(backupset, retention_count) backupset.update_symlinks() def purge_backupset(self, backupset, retention_count): purge_count = 0 for backup in backupset.purge(retention_count): purge_count += 1 LOG.info("Purged %s", backup.name) if purge_count == 0: LOG.info("No backups purged") else: LOG.info("%d backups purged", purge_count) def report_low_space(event, entry): total_space = disk_capacity(entry.path) free_space = disk_free(entry.path) if free_space < 0.10*total_space: LOG.warning("Extremely low free space on %s's filesystem (%s).", entry.path, getmount(entry.path)) LOG.warning("%s of %s [%.2f%%] remaining", format_bytes(free_space), format_bytes(total_space), (float(free_space) / total_space)*100)
Python
0.000001
@@ -4527,16 +4527,22 @@ ng: %25s%22, + hook, cmd)%0A @@ -5073,31 +5073,110 @@ , line)%0A -%0A + -else:%0A + if process.returncode != 0:%0A raise BackupError(%22%25s command failed%22 %25 hook)%0A retu
172c7a3ee0c75462f08e726716bf906ad88eadab
add test of plugin registry options
altair/utils/tests/test_plugin_registry.py
altair/utils/tests/test_plugin_registry.py
from ..plugin_registry import PluginRegistry class RegistryTest(PluginRegistry): pass def test_plugin_registry(): plugins = RegistryTest() assert plugins.names() == [] assert plugins.active == '' assert plugins.get() is None assert repr(plugins) == "RegistryTest(active='', registered=[])" plugins.register('new_plugin', lambda x: x ** 2) assert plugins.names() == ['new_plugin'] assert plugins.active == '' assert plugins.get() is None assert repr(plugins) == ("RegistryTest(active='', " "registered=['new_plugin'])") plugins.enable('new_plugin') assert plugins.names() == ['new_plugin'] assert plugins.active == 'new_plugin' assert plugins.get()(3) == 9 assert repr(plugins) == ("RegistryTest(active='new_plugin', " "registered=['new_plugin'])")
Python
0
@@ -42,28 +42,145 @@ try%0A -%0A%0Aclass RegistryTest +from typing import Callable%0A%0A%0Aclass TypedCallableRegistry(PluginRegistry%5BCallable%5B%5Bint%5D, int%5D%5D):%0A pass%0A%0A%0Aclass GeneralCallableRegistry (Plu @@ -246,24 +246,37 @@ ugins = +TypedCallable Registry Test()%0A%0A @@ -267,20 +267,16 @@ Registry -Test ()%0A%0A @@ -398,24 +398,37 @@ ns) == %22 +TypedCallable Registry Test(act @@ -411,36 +411,32 @@ CallableRegistry -Test (active='', regi @@ -637,32 +637,45 @@ ugins) == (%22 +TypedCallable Registry Test(active= @@ -654,36 +654,32 @@ CallableRegistry -Test (active='', %22%0A @@ -919,24 +919,37 @@ s) == (%22 +TypedCallable Registry Test(act @@ -944,12 +944,8 @@ stry -Test (act @@ -1003,28 +1003,325 @@ egistered=%5B'new_plugin'%5D)%22)%0A +%0A%0Adef test_plugin_registry_extra_options():%0A plugins = GeneralCallableRegistry()%0A%0A plugins.register('metadata_plugin', lambda x, p=2: x ** p)%0A plugins.enable('metadata_plugin')%0A assert plugins.get()(3) == 9%0A%0A plugins.enable('metadata_plugin', p=3)%0A assert plugins.get()(3) == 27%0A
ff1de15061b94cf9caedb22488664001b6b1e55c
Fix by_gene_role_type function.
indra/db/query_db_stmts.py
indra/db/query_db_stmts.py
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str import json from indra.statements import * from indra.sources.signor import SignorProcessor, _default_csv_file from indra.db import get_primary_db from indra.databases import hgnc_client def by_gene_role_type(agent_id=None, agent_ns='HGNC', role=None, stmt_type=None, count=1000, do_stmt_count=True, db=None): """Get statements from the DB by stmt type, agent, and/or agent role. Parameters ---------- agent_id : str String representing the identifier of the agent from the given namespace. Note: if the agent namespace argument, `agent_ns`, is set to 'HGNC', this function will treat `agent_id` as an HGNC gene symbol and perform an internal lookup of the corresponding HGNC ID. agent_ns : str Namespace for the identifier given in `agent_id`. role : str String corresponding to the role of the agent in the statement. Options are 'SUBJECT', 'OBJECT', or 'OTHER' (in the case of `Complex`, `SelfModification`, and `ActiveForm` Statements). stmt_type : str Name of the Statement class. count : int Number of statements to retrieve in each batch (passed to :py:func:`get_statements`). do_stmt_count : bool Whether or not to perform an initial statement counting step to give more meaningful progress messages. db : indra.db.DatabaseManager object. Optionally specify a database manager that attaches to something besides the primary database, for example a local databse instance. Returns ------- list of Statements from the database corresponding to the query. """ if db is None: db = get_primary_db() if not (agent_id or role or stmt_type): raise ValueError('At least one of agent_id, role, or stmt_type ' 'must be specified.') clauses = [] if agent_id and agent_ns == 'HGNC': hgnc_id = hgnc_client.get_hgnc_id(agent_id) if not hgnc_id: logger.warning('Invalid gene name: %s' % agent_id) return [] clauses.extend([db.Agents.db_name == 'HGNC', db.Agents.db_id == hgnc_id]) elif agent_id: clauses.extend([db.Agents.db_name == agent_ns, db.Agents.db_id == agent_id]) if role: clauses.append(db.Agents.role == role) if agent_id or role: clauses.append(db.Agents.stmt_id == db.Statements.id) if stmt_type: clauses.append(db.Statements.type == stmt_type) stmts = get_statements(clauses, count=count, do_stmt_count=do_stmt_count) return stmts def get_statements(clauses, count=1000, do_stmt_count=True, db=None): """Select statements according to a given set of clauses. Parameters ---------- clauses : list list of sqlalchemy WHERE clauses to pass to the filter query. count : int Number of statements to retrieve and process in each batch. do_stmt_count : bool Whether or not to perform an initial statement counting step to give more meaningful progress messages. db : indra.db.DatabaseManager object. Optionally specify a database manager that attaches to something besides the primary database, for example a local database instance. Returns ------- list of Statements from the database corresponding to the query. """ if db is None: db = get_primary_db() stmts = [] q = db.filter_query('statements', *clauses) if do_stmt_count: print("Counting statements...") num_stmts = q.count() print("Total of %d statements" % num_stmts) db_stmts = q.yield_per(count) subset = [] total_counter = 0 for stmt in db_stmts: subset.append(stmt) if len(subset) == count: stmts.extend(_stmts_from_db_list(subset)) subset = [] total_counter += 1 if total_counter % count == 0: if do_stmt_count: print("%d of %d statements" % (total_counter, num_stmts)) else: print("%d statements" % total_counter) stmts.extend(_stmts_from_db_list(subset)) return stmts def _stmts_from_db_list(db_stmt_objs): stmt_json_list = [] for st_obj in db_stmt_objs: stmt_json_list.append(json.loads(st_obj.json.decode('utf8'))) return stmts_from_json(stmt_json_list)
Python
0
@@ -2723,24 +2723,58 @@ o_stmt_count +,%0A db=db )%0A return
f591b6fde4d59147146c3566973f737caa63d0a6
corrected rval to rval**2
treetime/utils.py
treetime/utils.py
from __future__ import division, print_function import numpy as np from scipy.interpolate import interp1d import config as ttconf from scipy.integrate import quad from scipy import stats import datetime from scipy.ndimage import binary_dilation class DateConversion(object): """ Small container class to store parameters to convert between branch length as it is used in ML computations and the dates of the nodes. It is assumed that the conversion formula is 'length = k*date + b' """ def __init__(self): self.slope = 0 self.intercept = 0 self.r_val = 0 self.p_val = 0 self.sigma = 0 def __str__(self): outstr = ('Root-Tip-Regression:\n --slope:\t%f\n --intercept:\t%f\n --R^2:\t\t%f\n' %(self.slope, self.intercept, self.r_val)) return outstr @classmethod def from_tree(cls, t, slope=None): """ Create the conversion object automatically from the tree """ dates = [] for node in t.find_clades(): if hasattr(node, "numdate_given") and node.numdate_given is not None: dates.append((np.mean(node.numdate_given), node.dist2root)) if len(dates) == 0: raise RuntimeError("Cannot proceed with the TreeTime computations: " "No date has been assigned to the terminal nodes!") dates = np.array(dates) dc = cls() if slope is None: if len(dates) < 3: raise(RuntimeError("There are to few dates set at the leaves of the tree." " Cannot make the conversion function. Aborting.")) # simple regression dc.slope,\ dc.intercept,\ dc.r_val,\ dc.p_val,\ dc.sigma = stats.linregress(dates[:, 0], dates[:, 1]) else: # TODO this seems awkward dc.slope = slope # slope is given min_numdate_given = ttconf.BIG_NUMBER max_numdate_given = -ttconf.BIG_NUMBER max_diam = 0.0 for node in t.get_terminals(): # NOTE: raw_date is time before present in years if hasattr(node, 'numdate_given') and node.numdate_given is not None: if node.numdate_given < min_numdate_given: min_numdate_given = node.numdate_given if node.numdate_given > max_numdate_given: max_numdate_given = node.numdate_given max_diam = node.dist2root if max_numdate_given == -ttconf.BIG_NUMBER: print ("Warning! cannot set the minimal raw date. using today") max_numdate_given = 0.0 if max_diam == 0.0: print ("Error! cannot set the intercept for the date2dist conversion!" "Cannot read tree diameter") dc.intercept = max_diam - slope * max_numdate_given # set the root-mean-square deviation: dc.rms = np.sqrt(np.sum((dates[:, 1] - (dc.intercept + dc.slope * dates[:, 0]))**2) / dates.shape[0]) return dc def get_branch_len(self, date1, date2): """ Compute branch length given the dates of the two nodes. Args: - date1 (int): date of the first node (days before present) - date2 (int): date of the second node (days before present) Returns: - branch length (double): Branch length, assuming that the dependence between the node date and the node depth in the the tree is linear. """ return abs(date1 - date2) * self.slope def get_time_before_present(self, numdate): """ Convert the numeric date to the branch-len scale """ return (numeric_date() - numdate) * abs(self.slope) def get_date(self, abs_t): """ Get the approximate date of the tree node, assuming that the dependence between the node date and the node depth int the tree is linear. Args: - node(Phylo.Tree.Clade): node of the tree. Must be from the TreeAnc class (or its derivative), to contain the necessary attributes ( dist2root). """ days = abs_t / abs(self.slope) #(self.intercept - abs_t) / self.slope if days < 0: print ("The inferred date of the node is later than today!") #print ("Warning: got the negative date! Returning the inverse.") #days = abs(days) return days def min_interp(interp_object): """ Find the global minimum of a function represented as an interpolation object. """ try: return interp_object.x[interp_object(interp_object.x).argmin()] except Exception, e: s = "Cannot find minimum of tthe interpolation object" + str(interp_object.x) + \ "Minimal x: " + str(interp_object.x.min()) + "Maximal x: " + str(interp_object.x.max()) raise e def median_interp(interp_object): """ Find the median of the function represented as an interpolation object. """ new_grid = np.sort(np.concatenate([interp_object.x[:-1] + 0.1*ii*np.diff(interp_object.x) for ii in range(10)]).flatten()) tmp_prop = np.exp(-(interp_object(new_grid)-interp_object.y.min())) tmp_cumsum = np.cumsum(0.5*(tmp_prop[1:]+tmp_prop[:-1])*np.diff(new_grid)) median_index = min(len(tmp_cumsum)-3, max(2,np.searchsorted(tmp_cumsum, tmp_cumsum[-1]*0.5)+1)) return new_grid[median_index] def numeric_date(dt=None): """ Convert datetime object to the numeric date. The numeric date format is YYYY.F, where F is the fraction of the year passed Args: - dt: (datetime.datetime) date of to be converted. if None, assume today """ if dt is None: dt = datetime.datetime.now() try: res = dt.year + dt.timetuple().tm_yday / 365.25 except: res = 0.0 return res if __name__ == '__main__': pass
Python
0.999415
@@ -818,16 +818,19 @@ lf.r_val +**2 ))%0A
6b64a544e9d410e274ac1e36375122d0ba05d9f4
Use something more different; #360
judge/admin/runtime.py
judge/admin/runtime.py
from django.db.models import TextField from django.forms import TextInput, ModelForm, ModelMultipleChoiceField from django.utils.html import format_html from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from reversion.admin import VersionAdmin from django_ace import AceWidget from judge.models import Problem from judge.widgets import AdminPagedownWidget, HeavySelect2MultipleWidget class LanguageForm(ModelForm): problems = ModelMultipleChoiceField( label=_('Disallowed problems'), queryset=Problem.objects.all(), required=False, help_text=_('These problems are NOT allowed to be submitted in this language'), widget=HeavySelect2MultipleWidget(data_view='problem_select2')) class Meta: if AdminPagedownWidget is not None: widgets = {'description': AdminPagedownWidget} class LanguageAdmin(VersionAdmin): fields = ('key', 'name', 'short_name', 'common_name', 'ace', 'pygments', 'info', 'description', 'template', 'problems') list_display = ('key', 'name', 'common_name', 'info') form = LanguageForm def save_model(self, request, obj, form, change): super(LanguageAdmin, self).save_model(request, obj, form, change) obj.problem_set = Problem.objects.exclude(id__in=form.cleaned_data['problems'].values('id')) def get_form(self, request, obj=None, **kwargs): self.form.base_fields['problems'].initial = \ Problem.objects.exclude(id__in=obj.problem_set.values('id')).values_list('pk', flat=True) if obj else [] if obj is not None: self.form.base_fields['template'].widget = AceWidget(obj.ace, request.user.profile.ace_theme) return super(LanguageAdmin, self).get_form(request, obj, **kwargs) class GenerateKeyTextInput(TextInput): def render(self, name, value, attrs=None): text = super(TextInput, self).render(name, value, attrs) return mark_safe(text + format_html( '''\ <a href="#" onclick="return false;" class="button" id="id_{0}_regen">Regenerate</a> <script type="text/javascript"> (function ($) {{ $(document).ready(function () {{ $('#id_{0}_regen').click(function () {{ var length = 100, charset = "abcdefghijklnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`~!@#$%^&*()_+-=|[]{{}};:,<>./?", key = ""; for (var i = 0, n = charset.length; i < length; ++i) {{ key += charset.charAt(Math.floor(Math.random() * n)); }} $('#id_{0}').val(key); }}); }}); }})(django.jQuery); </script> ''', name)) class JudgeAdminForm(ModelForm): class Meta: widgets = {'auth_key': GenerateKeyTextInput} if AdminPagedownWidget is not None: widgets['description'] = AdminPagedownWidget class JudgeAdmin(VersionAdmin): form = JudgeAdminForm readonly_fields = ('created', 'online', 'start_time', 'ping', 'load', 'last_ip', 'runtimes', 'problems') fieldsets = ( (None, {'fields': ('name', 'auth_key')}), (_('Description'), {'fields': ('description',)}), (_('Information'), {'fields': ('created', 'online', 'last_ip', 'start_time', 'ping', 'load')}), (_('Capabilities'), {'fields': ('runtimes', 'problems')}), ) list_display = ('name', 'online', 'start_time', 'ping', 'load', 'last_ip') ordering = ['-online', 'name'] def get_readonly_fields(self, request, obj=None): if obj is not None and obj.online: return self.readonly_fields + ('name',) return self.readonly_fields def has_delete_permission(self, request, obj=None): result = super(JudgeAdmin, self).has_delete_permission(request, obj) if result and obj is not None: return not obj.online return result if AdminPagedownWidget is not None: formfield_overrides = { TextField: {'widget': AdminPagedownWidget}, }
Python
0.000001
@@ -1600,16 +1600,91 @@ else %5B%5D%0A + form = super(LanguageAdmin, self).get_form(request, obj, **kwargs)%0A @@ -1707,37 +1707,32 @@ ne:%0A -self. form.base_fields @@ -1823,67 +1823,12 @@ urn -super(LanguageAdmin, self).get_form(request, obj, **kwargs) +form %0A%0A%0Ac
ebe5d80075ce818181a154b6ec772a08e335ae4a
fix test name
trtools/core/tests/test_timeseries.py
trtools/core/tests/test_timeseries.py
from unittest import TestCase import pandas as pd from pandas.core.groupby import BinGrouper import trtools.util.testing as tm import numpy as np import trtools.core.timeseries as ts # start on friday, so second day is saturday df = tm.fake_ohlc(1000000, freq="5min", start="2000-01-07") # business days and trading hours df = df.ix[df.index.dayofweek < 5] df = ts.trading_hours(df) class TestBinning(TestCase): def __init__(self, *args, **kwargs): TestCase.__init__(self, *args, **kwargs) def runTest(self): pass def setUp(self): pass def downsample(self): # these should be equivalent grouped = df.downsample('D', drop_empty=False) test = grouped.mean() correct = df.resample('D', how='mean') tm.assert_frame_equal(test, correct) def test_downsample_drop_empty(self): """ the drop_empty which is the default will not include empty groups into the GroupBy. """ grouped = df.downsample('D') test = grouped.mean() correct = df.resample('D', how='mean').dropna(how='all') tm.assert_frame_equal(test, correct) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
Python
0.000546
@@ -352,18 +352,17 @@ eek %3C 5%5D - %0A + df = ts. @@ -581,16 +581,21 @@ def +test_ downsamp @@ -872,17 +872,16 @@ %22%22%22 - %0A @@ -1209,78 +1209,8 @@ nose - %0A @@ -1289,12 +1289,9 @@ t=False) - %0A
15d0a23e8a15fe5bb05d00a4c5b9bcbaec43832c
Fix missing help text for option groups
selvbetjening/core/events/forms.py
selvbetjening/core/events/forms.py
from django import forms from django.utils.translation import ugettext_lazy as _ from django.forms import ModelForm from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit, Fieldset, Layout from selvbetjening.core.translation.utility import translate_model from selvbetjening.core.invoice.models import Payment from models import AttendState class OptionGroupForm(forms.Form): def __init__(self, optiongroup, *args, **kwargs): self.optiongroup = translate_model(optiongroup) self.selected_total = 0 self.selected_initally = False self.save_options = [] self.attendee = kwargs.pop('attendee', None) if self.attendee is not None: selections = [selection for selection in self.attendee.selections if selection.option.group == optiongroup] else: selections = [] kwargs['initial'] = {} for selection in selections: kwargs['initial'][self._get_id(selection.option)] = True if selection.suboption: kwargs['initial'][self._get_sub_id(selection.option)] = selection.suboption.id super(OptionGroupForm, self).__init__(*args, **kwargs) self.selected_option_pks = [selection.option.pk for selection in selections] display_params = None # package controls if optiongroup.package_solution: self.fields[self._get_package_id(optiongroup)] = \ forms.BooleanField(label=_(u'Package'), required=False, widget=forms.CheckboxInput(attrs={'class': 'package'})) display_params = {'class': 'in_package in_package_%s' % optiongroup.pk} for option in optiongroup.options: translate_model(option) selected = option.pk in self.selected_option_pks disabled = self.attendee is not None and \ self.optiongroup.lock_selections_on_acceptance == True and \ self.attendee.state != AttendState.waiting disabled = disabled or (option.max_attendees_reached() and not selected) disabled = disabled or option.is_frozen() suboptions = option.suboptions if self._should_save(option, suboptions, disabled): self.save_options.append((option, suboptions)) self._display_option(option, disabled, suboptions, display_params=display_params) self._register_clean_function(option, selected, disabled) # setup display related settings fields = [self.optiongroup.name,] + [field_id for field_id in self.fields] options = {'help_text' : self.optiongroup.description, 'large_hints' : True} layout = Layout(Fieldset(*fields, **options)) self.helper = FormHelper() self.helper.add_layout(layout) self.helper.form_tag = False self.helper.use_csrf_protection = True def _should_save(self, option, suboptions, disabled): return True def _display_option(self, option, disabled, suboptions, display_params=None): if display_params is None: display_params = {} #if len(suboptions) > 0: # display_params['children'] = (self._get_sub_id(option),) if option.price == 0: label = option.name else: label = '%s (%s,-)' % (option.name, option.price) self.fields[self._get_id(option)] = forms.BooleanField(label=label, required=False, help_text=option.description, widget=forms.CheckboxInput(attrs=display_params)) if len(suboptions) > 0: choices = [(suboption.id, suboption.name) for suboption in suboptions] self.fields[self._get_sub_id(option)] = forms.ChoiceField(label=_('Choices'), choices=choices, required=False) def _register_clean_function(self, option, selected_initially, disabled): pass def clean(self): return self.cleaned_data def save(self, attendee=None): if self.attendee is None: self.attendee = attendee for option, suboptions in self.save_options: if self.cleaned_data.get(self._get_id_pk(option.pk), False): suboption_id = self.cleaned_data.get(self._get_sub_id(option), None) if suboption_id: suboption = suboptions.get(pk=suboption_id) else: suboption = None self.attendee.select_option(option, suboption) else: self.attendee.deselect_option(option) def is_selected(self, option_pk): if option_pk in [option.pk for option, suboptions in self.save_options]: return self.cleaned_data.get(self._get_id_pk(option_pk), False) else: return option_pk in self.selected_option_pks @staticmethod def _get_id_pk(option_pk): return 'option_' + str(option_pk) @staticmethod def _get_id(option): return 'option_' + str(option.pk) @staticmethod def _get_sub_id(option): return 'suboptions_' + str(option.pk) @staticmethod def _get_package_id(optiongroup): return 'package_' + str(optiongroup.pk) class OptionForms(object): optiongroupform = OptionGroupForm def __init__(self, event, post=None, attendee=None): self.forms = [] for optiongroup in event.optiongroups.order_by('order'): if post is None: self.forms.append(self.optiongroupform(optiongroup, attendee=attendee)) else: self.forms.append(self.optiongroupform(optiongroup, post, attendee=attendee)) def is_valid(self): is_valid = True for form in self.forms: is_valid = is_valid and form.is_valid() return is_valid def save(self, attendee=None): for form in self.forms: form.save(attendee=attendee) def is_selected(self, option_pk): for form in self.forms: if form.is_selected(option_pk): return True return False def __iter__(self): for form in self.forms: yield form class PaymentForm(ModelForm): class Meta: model = Payment fields = ('amount', 'note') layout = Layout(Fieldset(_(u'Payment'), *Meta.fields)) submit = Submit('submit_payment', _('Pay')) helper = FormHelper() helper.add_layout(layout) helper.add_input(submit) helper.form_tag = True helper.use_csrf_protection = True
Python
0.9986
@@ -196,18 +196,8 @@ mit, - Fieldset, Lay @@ -323,16 +323,76 @@ ayment%0A%0A +from selvbetjening.viewbase.forms.helpers import SFieldset%0A%0A from mod @@ -2713,17 +2713,16 @@ lp_text' - : self.o @@ -2777,17 +2777,16 @@ e_hints' - : True%7D%0A @@ -2802,32 +2802,33 @@ layout = Layout( +S Fieldset(*fields @@ -6738,16 +6738,17 @@ d form%0A%0A +%0A class Pa @@ -6866,16 +6866,17 @@ Layout( +S Fieldset
c22024d29548f93d5bf6cfac6fc9f5bd02915e92
Try reverse with strings
judge/views/problem.py
judge/views/problem.py
from django.contrib.auth.decorators import login_required from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.http import Http404, HttpResponseRedirect from django.shortcuts import render_to_response from django.template import RequestContext from judge.forms import ProblemSubmitForm from judge.models import Problem, Profile, Submission def get_result_table(code): results = {} for submission in Submission.objects.filter(problem__code=code) if code else Submission.objects.all(): r = None if submission.result and submission.result not in ["IE"]: r = submission.result results[r] = results.get(r, 0) + 1 return results def problem(request, code): try: problem = Problem.objects.get(code=code) return render_to_response('problem.html', {'problem': problem, 'results': get_result_table(code), 'title': 'Problem %s' % problem.name}, context_instance=RequestContext(request)) except ObjectDoesNotExist: return Http404() def problems(request): return render_to_response('problems.html', {'problems': Problem.objects.all(), 'title': 'Problems'}, context_instance=RequestContext(request)) @login_required def problem_submit(request, problem=None): if request.method == 'POST': form = ProblemSubmitForm(request.POST, instance=Submission(user=request.user.profile)) if form.is_valid(): model = form.save() model.judge() return HttpResponseRedirect(reverse('judge.view.submission_status', args=[model.id])) else: initial = {'language': request.user.profile.language} if problem is not None: try: initial['problem'] = Problem.objects.get(code=problem) except ObjectDoesNotExist: return Http404() form = ProblemSubmitForm(initial=initial) return render_to_response('problem_submit.html', {'form': form, 'title': 'Submit'}, context_instance=RequestContext(request))
Python
0.999083
@@ -1711,16 +1711,20 @@ gs=%5B +str( model.id %5D))%0A @@ -1719,16 +1719,17 @@ model.id +) %5D))%0A
f386fce820fb60abfe1b18c141dfd8ce268c5f4f
Update queue_on_list.py (#851)
data_structures/queue/queue_on_list.py
data_structures/queue/queue_on_list.py
"""Queue represented by a python list""" class Queue(): def __init__(self): self.entries = [] self.length = 0 self.front=0 def __str__(self): printed = '<' + str(self.entries)[1:-1] + '>' return printed """Enqueues {@code item} @param item item to enqueue""" def put(self, item): self.entries.append(item) self.length = self.length + 1 """Dequeues {@code item} @requirement: |self.length| > 0 @return dequeued item that was dequeued""" def get(self): self.length = self.length - 1 dequeued = self.entries[self.front] self.front-=1 self.entries = self.entries[self.front:] return dequeued """Rotates the queue {@code rotation} times @param rotation number of times to rotate queue""" def rotate(self, rotation): for i in range(rotation): self.put(self.get()) """Enqueues {@code item} @return item at front of self.entries""" def front(self): return self.entries[0] """Returns the length of this.entries""" def size(self): return self.length
Python
0
@@ -664,32 +664,33 @@ front%5D%0D%0A +# self.front-=1%0D%0A @@ -688,32 +688,33 @@ ont-=1%0D%0A +# self.entries = s @@ -739,16 +739,57 @@ ront:%5D%0D%0A + self.entries = self.entries%5B1:%5D%0D%0A
91540aefccdccdeb0c668b8ce5a99bb5471a3200
Change order slightly.
avenue/web.py
avenue/web.py
# -*- coding: utf-8 -*- # Copyright (c) 2012 Michael Babich # See LICENSE.txt or http://opensource.org/licenses/MIT '''Acts as an interface between what Flask serves and what goes on in the rest of the application. ''' from avenue import app, api from avenue.database import content from flask import render_template, make_response, redirect def url_generator(): '''This function acts on a list of URLs, a text rule for each URL, and a function that says what to do to that text rule to serve a page. The action_list associates a subset of URLs with a particular function to be used as the action for that group. ''' data = api.read_data('forum') threads = data['threads'] content.insert_data() themes = content.get_themes() nav = content.get_nav() tags = content.get_tags() redirects = content.get_urls() css = {} for theme in themes: css[themes[theme]['url']] = theme def forum_set_tags(): '''Turns strings containing tag names into tag objects that can be used to generate HTML/CSS renderings of the tag. ''' for thread in threads: for post in threads[thread]['posts']: if 'tags' in post: for i in range(len(post['tags'])): post['tags'][i] = tags[post['tags'][i]] def forum_page(name): '''Makes a forum page of the given thread name. ''' thread = threads[name] html = '%s :: %s :: %s' % (thread['title'], data['forum'], data['site']) main = '%s -- %s' % (data['site'], data['forum']) title = { 'html' : html, 'main' : main, 'thread' : thread['title'], 'url' : data['forum_url'] } return render_template('forum.html', style='night', sidebar=nav, title=title, posts=thread['posts'], threaded=thread['threaded']) def setup_url_rule(urls, action): '''Sets up URL rules, given a dictionary of urls and a function that they will act on. It passes an anonymous function to add_url_rule that always does a particular action to a particular string when that URL is accessed. ''' is_dict = type(urls) == dict for url in urls: text = urls[url] if is_dict else url app.add_url_rule(url, url, lambda text: lambda: action(text)) forum_set_tags() setup_url_rule(redirects, redirect) setup_url_rule(css, lambda theme: api.make_css(themes[theme])) setup_url_rule(threads.keys(), forum_page)
Python
0
@@ -722,16 +722,17 @@ _data()%0A +%0A them @@ -853,89 +853,8 @@ ls() -%0A css = %7B%7D%0A%0A for theme in themes:%0A css%5Bthemes%5Btheme%5D%5B'url'%5D%5D = theme %0A%0A @@ -2492,24 +2492,106 @@ set_tags()%0A%0A + css = %7B%7D%0A%0A for theme in themes:%0A css%5Bthemes%5Btheme%5D%5B'url'%5D%5D = theme%0A%0A setup_ur @@ -2655,39 +2655,16 @@ a theme: -%0A api.mak @@ -2685,17 +2685,16 @@ heme%5D))%0A -%0A setu
50d3fcb1ad4326a55bb156fd641ce40bf52a9a51
rework router
ldapdb/router.py
ldapdb/router.py
# -*- coding: utf-8 -*- # # django-ldapdb # Copyright (c) 2009-2010, Bolloré telecom # All rights reserved. # # See AUTHORS file for a full list of contributors. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Bolloré telecom nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # class Router(object): """A router to control all database operations on models in the myapp application""" def db_for_read(self, model, **hints): "Point all operations on LDAP models to 'ldap'" from ldapdb.models import Model if Model in model.__bases__: return 'ldap' return None def db_for_write(self, model, **hints): "Point all operations on LDAP models to 'ldap'" from ldapdb.models import Model if Model in model.__bases__: return 'ldap' return None
Python
0.000001
@@ -1686,16 +1686,149 @@ AGE.%0A#%0A%0A +def is_ldap_model(model):%0A # FIXME: there is probably a better check than testing 'base_dn'%0A return hasattr(model, 'base_dn')%0A%0A class Ro @@ -2046,75 +2046,31 @@ -from +if is_ ldap -db. +_ model -s import Model%0A if Model in model.__bases__ +(model) :%0A @@ -2226,75 +2226,31 @@ -from +if is_ ldap -db. +_ model -s import Model%0A if Model in model.__bases__ +(model) :%0A
84bbcdcb547468385291a420f6d88eb594c03fd1
Add lex and handle semicolons correctly
twosheds/shell.py
twosheds/shell.py
""" twosheds.shell ~~~~~~~~~~~~~~ This module implements the central user interface for access to an operating system's kernel services. """ import os import subprocess import sys import traceback class Shell(object): """The shell is an sh-compatible command language interpreter that executes commands read from standard input. """ BUILTINS = {'cd': os.chdir} def __init__(self, aliases=None, builtins=None): self.aliases = aliases or {} self.builtins = builtins or self.BUILTINS @property def prompt(self): """Indicate to the user that the shell is waiting for a command.""" return "$ " def output(self, msg): """Output a message.""" sys.stdout.write(msg) def error(self, msg): """Output an error.""" sys.stderr.write(msg) def read(self): """Accept a command from the user.""" try: return self.expand(raw_input(self.prompt)) except EOFError: raise SystemExit() def expand_aliases(self, line): """Expand aliases in a line.""" try: command, args = line.split(" ", 1) except ValueError: command, args = line, "" try: return "%s %s" % (self.aliases[command], args) except KeyError: return line def expand_variables(self, line): """Expand environmental variables in a line.""" tokens = line.split() new_tokens = [] for token in tokens: if token.startswith("$"): try: token = os.environ[token[1:]] except KeyError: pass new_tokens.append(token) return " ".join(new_tokens) def expand(self, line): """Expand any macros in a command.""" return self.expand_variables(self.expand_aliases(line)) def eval(self, line): """Evaluate an input.""" if not line: return tokens = line.split() command, args = tokens[0], tokens[1:] try: self.builtins[command](*args) except KeyError: subprocess.call(line, shell=True) def interact(self, banner=None): """Interact with the user. The optional banner argument specifies the banner to print before the first interaction. By default, no banner is printed. """ if banner: print(banner) while True: try: self.eval(self.read()) except SystemExit: break except: self.error(traceback.format_exc())
Python
0.000003
@@ -935,34 +935,32 @@ -return +lines = self. +l ex -pand (raw_inp @@ -979,63 +979,211 @@ pt)) -%0A except EOFError:%0A raise SystemExit( +.split(%22;%22)%0A for line in lines:%0A yield self.expand(line)%0A except EOFError:%0A raise SystemExit()%0A%0A def lex(self, line):%0A return line.replace(%22;%22, %22 ; %22 )%0A%0A @@ -2696,29 +2696,73 @@ -self.eval(self.read() +for command in self.read():%0A self.eval(command )%0A
9b8ac5d82771a39773d39d6fd607dc7b2b304e37
factor out common code in use_*()
txaio/__init__.py
txaio/__init__.py
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### from txaio.interfaces import IFailedFuture # This is the API # see tx.py for Twisted implementation # see aio.py for asyncio/trollius implementation class _Config: """ This holds all valid configuration options, accessed as class-level variables. For example, if you were using asyncio: .. sourcecode:: python txaio.config.loop = asyncio.get_event_loop() ``loop`` is populated automatically (while importing one of the framework-specific libraries) but can be changed before any call into this library. Currently, it's only used by :meth:`call_later` If using asyncio, you must set this to an event-loop (by default, we use asyncio.get_event_loop). If using Twisted, set this to a reactor instance (by default we "from twisted.internet import reactor" on the first call to call_later) """ #: the event-loop object to use loop = None __all__ = ( 'using_twisted', # True if we're using Twisted 'using_asyncio', # True if we're using asyncio 'use_twisted', # sets the library to use Twisted, or exception 'use_asyncio', # sets the library to use asyncio, or exception 'config', # the config instance, access via attributes 'create_future', # create a Future (can be already resolved/errored) 'as_future', # call a method, and always return a Future 'reject', # errback a Future 'resolve', # callback a Future 'add_callbacks', # add callback and/or errback 'gather', # return a Future waiting for several other Futures 'IFailedFuture', # describes API for arg to errback()s ) def use_twisted(): from txaio import tx import txaio txaio.using_twisted = True txaio.using_asyncio = False for method_name in __all__: if method_name in ['use_twisted', 'use_asyncio']: continue twisted_method = getattr(tx, method_name) setattr(txaio, method_name, twisted_method) def use_asyncio(): from txaio import aio import txaio txaio.using_twisted = False txaio.using_asyncio = True for method_name in __all__: if method_name in ['use_twisted', 'use_asyncio']: continue twisted_method = getattr(aio, method_name) setattr(txaio, method_name, twisted_method) try: from txaio.tx import * # noqa using_twisted = True except ImportError: try: from txaio.aio import * # noqa using_asyncio = True except ImportError: # pragma: no cover # pragma: no cover raise ImportError("Neither asyncio nor Twisted found.")
Python
0.998509
@@ -3024,16 +3024,39 @@ port tx%0A + _use_framework(tx)%0A impo @@ -3056,32 +3056,32 @@ import txaio%0A - txaio.using_ @@ -3131,221 +3131,8 @@ lse%0A - for method_name in __all__:%0A if method_name in %5B'use_twisted', 'use_asyncio'%5D:%0A continue%0A twisted_method = getattr(tx, method_name)%0A setattr(txaio, method_name, twisted_method)%0A %0A%0Ade @@ -3170,24 +3170,48 @@ import aio%0A + _use_framework(aio)%0A import t @@ -3274,24 +3274,181 @@ ncio = True%0A +%0A%0Adef _use_framework(module):%0A %22%22%22%0A Internal helper, to set this modules methods to a specified%0A framework helper-methods.%0A %22%22%22%0A import txaio%0A for meth @@ -3558,33 +3558,18 @@ -twisted_method = g +s etattr( +tx aio, @@ -3580,17 +3580,17 @@ hod_name -) +, %0A @@ -3586,37 +3586,46 @@ me,%0A -s + g etattr( -txaio +module , method_nam @@ -3629,24 +3629,9 @@ name -, twisted_method +) )%0A%0A%0A
cf1137ff1f64f2a13b40583ff5ab9a8563165d2f
Send messages terminating in \r\n, not just \n
txircd/ircbase.py
txircd/ircbase.py
from twisted.protocols.basic import LineOnlyReceiver class IRCBase(LineOnlyReceiver): delimiter = "\n" # Default to splitting by \n, and then we'll also split \r in the handler def lineReceived(self, data): for line in data.split("\r"): command, params, prefix, tags = self._parseLine(line) if command: self.handleCommand(command, params, prefix, tags) def _parseLine(self, line): line = line.replace("\0", "") if not line: return None, None, None, None if line[0] == "@": if " " not in line: return None, None, None, None tagLine, line = line.split(" ", 1) tags = self._parseTags(tagLine[1:]) else: tags = {} prefix = None if line[0] == ":": if " " not in line: return None, None, None, None prefix, line = line.split(" ", 1) prefix = prefix[1:] if " :" in line: linePart, lastParam = line.split(" :", 1) else: linePart = line lastParam = None if not linePart: return None, None, None, None if " " in linePart: command, paramLine = linePart.split(" ", 1) params = paramLine.split(" ") else: command = linePart params = [] while "" in params: params.remove("") if lastParam is not None: params.append(lastParam) return command.upper(), params, prefix, tags def _parseTags(self, tagLine): tags = {} for tagval in tagLine.split(";"): if not tagval: continue if "=" in tagval: tag, escapedValue = tagval.split("=", 1) escaped = False valueChars = [] for char in escapedValue: if char == "\\": escaped = True continue if escaped: if char == "\\": valueChars.append("\\") elif char == ":": valueChars.append(";") elif char == "r": valueChars.append("\r") elif char == "n": valueChars.append("\n") elif char == "s": valueChars.append(" ") escaped = False continue valueChars.append(char) value = "".join(valueChars) else: tag = tagval value = None tags[tag] = value return tags def handleCommand(self, command, params, prefix, tags): pass def sendMessage(self, command, *params, **kw): if "tags" in kw: tags = self._buildTagString(kw["tags"]) else: tags = None if "prefix" in kw: prefix = kw["prefix"] else: prefix = None params = list(params) if params: for param in params[:-1]: for badChar in (" ", "\r", "\n", "\0"): if badChar in param: raise ValueError("Illegal character {!r} found in parameter {!r}".format(badChar, param)) for badChar in ("\r", "\n", "\0"): if badChar in params[-1]: raise ValueError("Illegal character {!r} found in parameter {!r}".format(badChar, params[-1])) if not params[-1] or " " in params[-1] or params[-1][0] == ":": params[-1] = ":{}".format(params[-1]) lineToSend = "" if tags: lineToSend += "@{} ".format(tags) if prefix: lineToSend += ":{} ".format(prefix) lineToSend += "{} {}".format(command, " ".join(params)) self.sendLine(lineToSend.replace("\0", "")) def _buildTagString(self, tags): tagList = [] for tag, value in tags.iteritems(): for char in tag: if not char.isalnum() and char not in ("-", "/", "."): raise ValueError("Illegal character {!r} found in key {!r}".format(char, tag)) if value is None: tagList.append(tag) else: if "\0" in value: raise ValueError("Illegal character '\\0' found in value for key {!r}".format(tag)) escapedValue = value.replace("\\", "\\\\").replace(";", "\\:").replace(" ", "\\s").replace("\r", "\\r").replace("\n", "\\n") tagList.append("{}={}".format(tag, escapedValue)) return ";".join(tagList)
Python
0.000013
@@ -3013,24 +3013,38 @@ lf.sendLine( +%22%7B%7D%5Cr%22.format( lineToSend.r @@ -3060,16 +3060,17 @@ 0%22, %22%22)) +) %0A%09%0A%09def
9501ab023a51ca6f3e37fcad3c9c9ff04223986b
update version to 0.4
txjsonrpc/meta.py
txjsonrpc/meta.py
display_name = "txJSON-RPC" library_name = "txjsonrpc" version = "0.3.1" author = "Duncan McGreggor" author_email = "oubiwann@adytum.us" license = "BSD, GPL" url = "http://launchpad.net/%s" % library_name description = "Code for creatig Twisted JSON-RPC servers and clients."
Python
0.000001
@@ -65,11 +65,9 @@ %220. -3.1 +4 %22%0Aau
0dc29df1e97b8c5f36320b55c659c8290f021c69
Fix parallelization of number of topics script
DilipadTopicModelling/experiment_number_of_topics.py
DilipadTopicModelling/experiment_number_of_topics.py
import logging import glob from multiprocessing import Process from CPTCorpus import CPTCorpus from CPT_Gibbs import GibbsSampler def run_sampler(corpus, nTopics, nIter, beta, out_dir): alpha = 50.0/nTopics logger.info('running Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, ' 'beta: {})'.format(nTopics, nIter, alpha, beta)) sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha, beta=beta, beta_o=beta, out_dir=out_dir.format(nTopics)) sampler._initialize() sampler.run() logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) #logger.setLevel(logging.INFO) files = glob.glob('/home/jvdzwaan/data/tmp/test/*') out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}' corpus = CPTCorpus(files, testSplit=20) corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100) corpus.save_dictionaries(directory=out_dir.format('')) corpus.save(out_dir.format('corpus.json')) #corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir), # topicDict='{}/topicDict.dict'.format(out_dir), # opinionDict='{}/opinionDict.dict'.format(out_dir)) nIter = 200 beta = 0.02 nTopics = range(20, 201, 20) logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics))) processes = [Process(target=run_sampler, args=(corpus, n, nIter, beta, out_dir)) for n in nTopics] # Run processes for p in processes: p.start() # Exit the completed processes for p in processes: p.join()
Python
0.000141
@@ -49,22 +49,19 @@ import P -rocess +ool %0A%0Afrom C @@ -803,16 +803,32 @@ ata/ -tmp/test +dilipad/20112012/gov_opp /*') @@ -864,30 +864,28 @@ ata/ -tmp/generated/test_exp +dilipad/res_20112012 /%7B%7D' @@ -1121,61 +1121,45 @@ pus. -CPTCorpus.load('%7B%7Dcorpus.json'.format(out_dir),%0A# +load(out_dir.format('corpus.json'),%0A# @@ -1178,22 +1178,16 @@ - topicDic @@ -1192,50 +1192,43 @@ ict= -'%7B%7D/topicDict.dict'.format(out_dir),%0A# +out_dir.format('topicDict.dict'),%0A# @@ -1247,22 +1247,16 @@ - opinionD @@ -1263,12 +1263,24 @@ ict= -'%7B%7D/ +out_dir.format(' opin @@ -1296,23 +1296,8 @@ ict' -.format(out_dir ))%0A%0A @@ -1436,98 +1436,149 @@ )%0A%0Ap -rocesses = %5BProcess(target=run_sampler,%0A args=(corpus, n, nIter, beta, +ool = Pool(processes=3)%0Aresults = %5Bpool.apply_async(run_sampler, args=(corpus, n, nIter, beta,%0A out @@ -1584,18 +1584,16 @@ t_dir))%0A - @@ -1617,116 +1617,25 @@ cs%5D%0A -%0A# Run processes%0Afor p in processes:%0A p.start()%0A%0A# Exit the completed processes%0Afor p in processes:%0A p +pool.close()%0Apool .joi
8ec5a19bcac5676cee85a122e1bf9a2554a61b01
bump back to 2 concurrent jobs. too many 503s
ingestor/l8_process_run.py
ingestor/l8_process_run.py
#!/usr/bin/env python import argparse import sys import os import shutil import pprint import sets import time from usgs import api from usgs import USGSError import pusher import scene_info import l8_process_scene from puller_usgs import backoff_factor def retry_login(retries=4, verbose=False): """ Retry USGS login multiple times, with exponential backoff between """ if verbose: print 'logging in...' sleep_time = 5 for _ in xrange(retries + 1): try: api_key = api.login(os.environ['USGS_USERID'], os.environ['USGS_PASSWORD']) if verbose: print ' api_key = %s' % api_key return api_key except USGSError: pass print 'USGS login failed, retry in %s' % sleep_time time.sleep(sleep_time) sleep_time *= backoff_factor(2) return None def query_for_scenes(start_date, end_date, verbose=False, limit=None): if 'USGS_PASSWORD' in os.environ: api_key = retry_login(verbose=verbose) if not api_key: print "Failed to authenticate with USGS servers" sys.exit(1) full_list = [] list_offset = 0 these_scenes = 'start' chunk_size = 500 if limit is not None and limit < chunk_size: chunk_size = limit if verbose: print 'search...' while these_scenes == 'start' or len(these_scenes) == chunk_size: these_scenes = api.search("LANDSAT_8", "EE", start_date=start_date, end_date=end_date, starting_number = 1+list_offset, max_results=chunk_size) if verbose: print '... %d scenes' % len(these_scenes) full_list += these_scenes list_offset += len(these_scenes) if limit is not None and list_offset >= limit: break scene_ids = [scene['entityId'] for scene in full_list] return scene_ids def remove_processed_ids(scene_ids, scene_list_file): prev_scene_ids = sets.Set([ line[:line.find(',')] for line in open( scene_list_file).readlines()]) missing_ids = [] for scene_id in scene_ids: if scene_id not in prev_scene_ids: missing_ids.append(scene_id) return missing_ids def remove_queued_ids(scene_ids): missing_ids = [] for scene_id in scene_ids: if not pusher.check_file_existance('tarq/%s.tar.gz' % scene_id): missing_ids.append(scene_id) return missing_ids def process_scene_set_internal(args, scene_ids, scene_list_file): run_id = pusher.acquire_run_id('(l8_process_run.py)', force=args.break_run_lock) run_file = 'this_run.csv' scene_info.init_list_file(run_file) results = [] for scene_id in scene_ids: scene_dict = l8_process_scene.process( args.source, scene_id, clean = True, verbose = args.verbose) if scene_dict is not None: open(run_file,'a').write( scene_info.make_scene_line(scene_dict)+'\n') open(scene_list_file,'a').write( scene_info.make_scene_line(scene_dict)+'\n') pusher.upload_run_list(run_id, run_file, scene_list_file, verbose = args.verbose) def process_scene_set_external(args, scene_ids, scene_list_file): run_id = pusher.acquire_run_id('(l8_process_run.py)', force=args.break_run_lock) run_file = 'this_run.csv' scene_info.init_list_file(run_file) in_file = 'this_run.lst' open(in_file,'w').write(('\n'.join(scene_ids)) + '\n') cmd = 'parallel -j 6 %s %s -l %s < %s' % ( 'l8_process_scene.py', '--verbose' if args.verbose else '', run_file, in_file) if args.verbose: print cmd rc = os.system(cmd) new_lines = open(run_file).read().strip().split('\n')[1:] open(scene_list_file,'a').write(('\n'.join(new_lines)) + '\n') pusher.upload_run_list(run_id, run_file, scene_list_file, verbose = args.verbose) def copy_scene_set_external(args, scene_ids): in_file = 'this_run.lst' open(in_file,'w').write(('\n'.join(scene_ids)) + '\n') cmd = 'parallel -j 3 %s %s --source %s < %s' % ( 'l8_queue_tar.py', '--verbose' if args.verbose else '', args.source, in_file) if args.verbose: print cmd rc = os.system(cmd) if args.verbose: print 'status=%d, %d scenes requested.' % (rc, len(scene_ids)) def get_parser(): aparser = argparse.ArgumentParser( description='Query for new scenes and ingest them to S3.') aparser.add_argument('-s', '--source', default='usgs', choices=['gcs', 'usgs', 'auto'], help='Source service for tar') aparser.add_argument('-v', '--verbose', action='store_true', help='Report details on progress.') aparser.add_argument('--limit', type=int) aparser.add_argument('--start-date') aparser.add_argument('--end-date') aparser.add_argument('--run-directly', action='store_true') aparser.add_argument('--queue', action='store_true') aparser.add_argument('--parallel', action='store_true') aparser.add_argument('--break-run-lock', action='store_true') return aparser def main(rawargs): args = get_parser().parse_args(rawargs) scene_ids = query_for_scenes(args.start_date, args.end_date, verbose=args.verbose, limit=args.limit) scene_list_file = pusher.get_past_list() scene_ids = remove_processed_ids(scene_ids, scene_list_file) if args.queue: scene_ids = remove_queued_ids(scene_ids) print '%d scenes identified for processing.' % len(scene_ids) sys.stdout.flush() if not args.run_directly and not args.queue: for i in scene_ids: print i sys.exit(1) if args.queue: copy_scene_set_external(args, scene_ids) elif args.parallel: process_scene_set_external(args, scene_ids, scene_list_file) else: process_scene_set_internal(args, scene_ids, scene_list_file) api.logout() if __name__ == '__main__': status = main(sys.argv[1:]) sys.exit(status)
Python
0.000216
@@ -4397,17 +4397,17 @@ llel -j -3 +2 %25s %25s -
23e609ba624ad1eb4cf33f609f873afec38d6c85
Fix Instruments to use self.
labtoolkit/__init__.py
labtoolkit/__init__.py
# -*- coding: utf-8 -*- __author__ = """David A Lutton""" __email__ = 'david@dalun.space' __version__ = '0.1.0' import sys import visa from pprint import pprint from . import PowerMeter from . import SignalGenerator from . import WaveformGenerator from . import SpectrumAnalyser from . import NetworkAnalyser from . import ElectronicAttenuator from . import DigitalMultimeter from . import EnviromentalChamber from . import PowerAnalyser from . import SurgeGenerator from . import AudioAnalyser from . import Osciliscope from . import FieldStrength from . import Positioner from . import FrequencyCounter from . import SourceDC from . import SourceAC from . import SwitchMatrix from . import ModulationMeter from . import ElectronicLoad # from . import PortableApplianceTester def what(): for name in set(sys.modules): if name.startswith(__name__): module = sys.modules[name] try: pprint(module.REGISTER) print() except AttributeError: pass driverclasses = [] for name in set(sys.modules): if name.startswith(__name__+'.'): module = (sys.modules[name]) try: if module.REGISTER: # has drivers name = module.__name__.split('.')[1] # print(name) driverclasses.append(name) except AttributeError: pass # pprint(driverclasses) class ResourceManager(object): """ResourceManager as a context manager.""" def __init__(self, rm): self.rm = visa.ResourceManager(rm) def __enter__(self): return(self.rm) def __exit__(self, exc_type, exc, exc_tb): del(self.rm) class Instrument(object): """Instrument.open_resource as a context manager.""" def __init__(self, rm, resource, *, read_termination=False, write_termination=False, **kwargs): if read_termination is False: read_termination = '\n' if write_termination is False: write_termination = '\r\n' if resource.startswith('ASRL') else '\n' self.inst = rm.open_resource(resource, read_termination=read_termination, write_termination=write_termination, **kwargs) # return self.inst def __enter__(self): # print(repr(self.inst)) return(self.inst) def __exit__(self, *args): self.inst.close() def visaaddresslist(listofaddresses, prefix="GPIB0", suffix="65535::INSTR"): """Generate full address for list of bus address. :param listofaddresses: list of integers which are instrument addresses :param prefix: prefix for the bus :param suffix: suffix for the bus :returns: list of fully formed address of instuments """ instrs = [] for inst in listofaddresses: instrs.append("{}::{}::{}".format(prefix, inst, suffix)) return(instrs) # 'TCPIP::192.168.1.113::INSTR' def visaenumerate(rm, list_resources): """Try to discover IDNs for autodiscoverd instruments on a bus.""" try: pool = {} # if rm.list_resources() is not None: for resource in list_resources: # print(resource) inst = rm.open_resource( resource, read_termination='\n', write_termination='\r\n' if resource.startswith('ASRL') else '\n' ) try: query = "*IDN?" response = inst.query(query) # log.debug("Query {}:{} {}".format(resource, query, response)) IDN = response except visa.VisaIOError: # This was found with print(dir(visa)) pass # traceback.print_exc(file=sys.stdout) IDN = "NONE" finally: pool[inst] = IDN return(pool) except ValueError: pass except OSError: pass except FileNotFoundError: print("File for SIM not found") def driverdispatcher(pool, driverlist): """Pair IDN with corresponding driver class.""" alloc = {} insts = 0 for inst in pool: for driver in driverlist: if pool[inst].startswith(driver): alloc[insts] = driverlist[driver](inst) insts += 1 return(alloc) class Instruments(object): def __repr__(self): # return str(self.__dict__) buffer = [] for key, value in instrument.__dict__.items(): buffer.append('{}:'.format(key)) for valu in value: buffer.append(' {}: {}'.format(valu, value[valu])) return str('\n'.join(buffer)) ''' print() print() with ResourceManager('@sim') as rm: print(rm.list_resources()) print(rm) with Instrument(rm, 'USB::0x1111::0x2222::0x4444::INSTR') as inst: # print(inst) print(inst.query("*IDN?")) # print(inst.session) # print(inst.session) # print(rm.session) '''
Python
0
@@ -861,17 +861,16 @@ ame__):%0A -%0A @@ -4398,26 +4398,20 @@ alue in -instrument +self .__dict_
443774e9adbd59d13ac4f6c076a22f67a9a113f1
Fix courses liste
Rules/Courses.py
Rules/Courses.py
from ._shared import * class Courses(Rule): """Handles shopping list""" def __init__(self, bot): self.bot = bot def __call__(self, serv, author, args): """Handles shopping list""" if len(args) < 3: raise InvalidArgs try: comment = " ".join(args[3:]) except KeyError: comment = "" if args[1] == "acheter": query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " + "comment LIKE %s") values = (args[2], '%'+comment+'%') try: bdd = self.bot.mysql_connect(serv) assert(bdd is not None) except AssertionError: return bdd_cursor = bdd.cursor() bdd_cursor.execute(query, values) row = bdd_cursor.fetchone() if row[0] > 0: self.bot.ans(serv, author, "Item déjà présent dans la liste de courses") return query = ("INSERT INTO shopping(item, author, comment, date, " + "bought) VALUES(%s, %s, %s, %s, 0)") values = (args[2], author, comment, datetime.datetime.now()) bdd_cursor.execute(query, values) self.bot.ans(serv, author, "Item ajouté à la liste de courses.") bdd_cursor.close() bdd.close() elif args[1] == "annuler": query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " + "comment LIKE %s") values = (args[2], '%'+comment+'%') try: bdd = self.bot.mysql_connect(serv) assert(bdd is not None) except AssertionError: return bdd_cursor = bdd.cursor() bdd_cursor.execute(query, values) row = bdd_cursor.fetchone() if row[0] > 1: self.bot.ans(serv, author, "Requêtes trop ambiguë. Plusieurs entrées " + "correspondent.") return query = ("DELETE FROM shopping WHERE item=%s AND " + "comment LIKE %s") bdd_cursor.execute(query, values) self.bot.ans(serv, author, "Item supprimé de la liste de courses.") bdd_cursor.close() bdd.close() elif args[1] == "acheté": query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " + "comment LIKE %s AND bought=0") values = (args[2], '%'+comment+'%') try: bdd = self.bot.mysql_connect(serv) assert(bdd is not None) except AssertionError: return bdd_cursor = bdd.cursor() bdd_cursor.execute(query, values) row = bdd_cursor.fetchone() if row[0] > 1: self.bot.ans(serv, author, "Requêtes trop ambiguë. Plusieurs entrées " + "correspondent.") return query = ("UPDATE shopping SET bought=1 WHERE item=%s AND " + "comment LIKE %s AND bought=0") bdd_cursor.execute(query, values) self.bot.ans(serv, author, "Item marqué comme acheté.") bdd_cursor.close() bdd.close() elif args[1] == "liste": query = ("SELECT item, author, date FROM shopping WHERE bought=0 AND item LIKE %s") values = (args[2],) try: bdd = self.bot.mysql_connect(serv) assert(bdd is not None) except AssertionError: return bdd_cursor = bdd.cursor() bdd_cursor.execute(query, values) for row in bdd_cursor: serv.privmsg(author, '{item} (ajouté par {author} le {date})'.format(**row)) self.bot.ans(serv, author, "Liste de courses envoyée en PM.") else: raise InvalidArgs def close(self): pass
Python
0.000019
@@ -231,16 +231,779 @@ s) %3C 3:%0A + if len(args) == 2 and args%5B1%5D == %22liste%22:%0A query = (%22SELECT item, author, date FROM shopping WHERE bought=0%22)%0A try:%0A bdd = self.bot.mysql_connect(serv)%0A assert(bdd is not None)%0A except AssertionError:%0A return%0A bdd_cursor = bdd.cursor()%0A bdd_cursor.execute(query)%0A serv.privmsg(author, 'Voici la liste de courses (%C3%A9galement consultable sur http://hackens.org/jarvis?do=courses)')%0A for row in bdd_cursor:%0A serv.privmsg(author, '%7B0%7D (ajout%C3%A9 par %7B1%7D le %7B2%7D)'.format(**row))%0A self.bot.ans(serv, author, %22Liste de courses envoy%C3%A9e en PM.%22)%0A else:%0A @@ -4218,622 +4218,8 @@ ()%0A%0A - elif args%5B1%5D == %22liste%22:%0A query = (%22SELECT item, author, date FROM shopping WHERE bought=0 AND item LIKE %25s%22)%0A values = (args%5B2%5D,)%0A try:%0A bdd = self.bot.mysql_connect(serv)%0A assert(bdd is not None)%0A except AssertionError:%0A return%0A bdd_cursor = bdd.cursor()%0A bdd_cursor.execute(query, values)%0A for row in bdd_cursor:%0A serv.privmsg(author, '%7Bitem%7D (ajout%C3%A9 par %7Bauthor%7D le %7Bdate%7D)'.format(**row))%0A self.bot.ans(serv, author, %22Liste de courses envoy%C3%A9e en PM.%22)%0A%0A
0f10ec94a7a62968aeafe10c55913e08bb0c7ce6
Fix Bug: Type Error
Scripts/Judge.py
Scripts/Judge.py
#--coding:utf-8-- import re import json import chardet class Judge(): def __init__(self, SurnameCharacter = 'Surname.Chinese.json', SurnamePinyin = 'Surname.Pinyin.json'): # self.SurnameCharacter = json.load(open(SurnameCharacter, 'rb')) self.SurnamePinyin = json.load(open(SurnamePinyin, 'rb')) self.Extractor = re.compile(r'^([\w]+)[ ]?.*?[ ]?(?:([\w]*)$)') self.NotChineseCharacter = re.compile(ur'^[^\u4e00-\u9fa5]*$') def SurnameJudge(self, Name): Name = Name.decode(chardet.detect(Name)['encoding']) if self.NotChineseCharacter.search(Name) == None: # True if Name contains Chinese Characters. return True Name = Name.lower() Surname = self.Extractor.findall(Name)[0] for element in Surname: try: if self.SurnamePinyin[element]: return True except KeyError: pass return False def DescriptionJudge(self, Description): Description = Description.decode(chardet.detect(Description)['encoding']) if self.NotChineseCharacter.search(Description) == None: # Ture if Description contains Chinese Characters. return True return False
Python
0
@@ -549,17 +549,21 @@ ct(Name) -%5B +.get( 'encodin @@ -556,33 +556,42 @@ ).get('encoding' -%5D +, 'utf-8') )%0A if sel @@ -1095,17 +1095,21 @@ ription) -%5B +.get( 'encodin @@ -1110,17 +1110,26 @@ ncoding' -%5D +, 'utf-8') )%0A
16a54fc100874159da7212e35361e5c7110a7ab2
Add /start route for expeditions
kancolle/api/expedition.py
kancolle/api/expedition.py
"""Expedition blueprint.""" from flask import Blueprint from util import prepare_api_blueprint api_mission = Blueprint("api_mission", __name__) prepare_api_blueprint(api_mission)
Python
0
@@ -21,16 +21,33 @@ int.%22%22%22%0A +import datetime%0A%0A from fla @@ -65,17 +65,119 @@ lueprint -%0A +, g%0Afrom flask import request, abort%0Aimport time%0A%0Aimport util%0Afrom db import Expedition, Fleet, Admiral %0Afrom ut @@ -207,16 +207,24 @@ lueprint +, svdata %0A%0Aapi_mi @@ -300,8 +300,1568 @@ mission) +%0A%0A%0A@api_mission.route(%22/start%22, methods=%5B%22GET%22, %22POST%22%5D)%0Adef start_mission():%0A # This is mostly an internal method.%0A # This sets up the fleet for an expedition, sending them out.%0A%0A # First, get the required data from the request.%0A fleet_id = int(request.values.get(%22api_deck_id%22)) - 1%0A expedition_id = int(request.values.get(%22api_mission%22))%0A # There's an extra value, api_mission.%0A # No idea what it does.%0A%0A # Also, api_serial_cid%0A # This is presumably an anti-bot method by DMM.%0A # We don't have these, because we don't have the game source code (and never will)%0A # So we ignore this%0A%0A # Get the expedition requested by the ID.%0A expedition = Expedition.query.filter(Expedition.id == expedition_id).first_or_404()%0A%0A # Get the fleet requested by the ID.%0A try:%0A fleet = g.admiral.fleets%5Bfleet_id%5D%0A except IndexError:%0A abort(404)%0A return%0A%0A # Set the fleet up.%0A if fleet.expedition is not None:%0A # Nice try.%0A abort(400)%0A return%0A%0A # Set the expedition && time.%0A fleet.expedition = expedition%0A fleet.expedition_completed = time.time() + expedition.time_taken%0A%0A # Internal state updated, now to reflect this state on the rest of the app.%0A return svdata(%0A %7B%22api_complatetime%22: util.%0A millisecond_timestamp(datetime.datetime.now() + datetime.timedelta(seconds=expedition.time_taken)),%0A %22api_complatetime_str%22: datetime.datetime.fromtimestamp(fleet.expedition_completed / 1000)%0A .strftime('%25Y-%25m-%25d %25H:%25M:%25S')%0A %7D)%0A
e75dca8d0b5b1872c509d6f1fa4bc880743a7f45
fix crash with control layers
GlyphNote.glyphsPalette/Contents/Resources/plugin.py
GlyphNote.glyphsPalette/Contents/Resources/plugin.py
# encoding: utf-8 ####################################################################################### # # Palette Plugin # # Read the docs: # https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/Palette # ####################################################################################### from objc import IBOutlet, IBAction, nil from GlyphsApp.plugins import * from GlyphsApp import UPDATEINTERFACE class GlyphNote (PalettePlugin): dialogName = "com.mekkablue.GlyphNote" dialog = objc.IBOutlet() noteTextField = objc.IBOutlet() def settings(self): self.name = Glyphs.localize({ 'en': u'Glyph Note', 'de': u'Glyphennotiz' }) # The minimum/maximum height of the view in pixels. 'max' must be bigger than 'min'. self.min = 30 self.max = 700 # Load .nib dialog (without .extension) self.loadNib('IBdialog', __file__) def start(self): Glyphs.addCallback(self.update, UPDATEINTERFACE) def __del__(self): Glyphs.removeCallback(self.update, UPDATEINTERFACE) @objc.IBAction def setNote_(self, sender): """ Sets the glyph note to whatever has been entered into the text field in the palette. """ # Extract font from sender thisFont = self.windowController().document().font # We’re in the Edit View if thisFont.currentTab: theseGlyphs = [l.parent for l in thisFont.selectedLayers] # We’re in the Font view else: theseGlyphs = [g for g in thisFont.selection] for thisGlyph in theseGlyphs: thisGlyph.note = self.noteTextField.stringValue() def update(self, sender): # only update if there is a window: if self.windowController(): theseGlyphs = [] thisFont = self.windowController().document().font # We’re in the Edit View if thisFont.currentTab: theseGlyphs = [l.parent for l in thisFont.selectedLayers] # We’re in the Font view else: theseGlyphs = [g for g in thisFont.selection] allNotes = [] for thisGlyph in theseGlyphs: thisNote = thisGlyph.note if thisNote == "": thisNote = None allNotes.append(thisNote) numberOfDifferentNotes = len(set(allNotes)) # update glyph note in palette: if numberOfDifferentNotes == 1: self.noteTextField.setPlaceholderString_( Glyphs.localize({ 'en': u'Empty glyph note%s.' % ("s" if len(theseGlyphs)>1 else ""), 'de': u'Leere Glyphennotiz%s.' % ("en" if len(theseGlyphs)>1 else ""), })) thisGlyphNote = theseGlyphs[0].note if not thisGlyphNote: thisGlyphNote = "" self.noteTextField.setStringValue_(thisGlyphNote) elif numberOfDifferentNotes == 0: self.noteTextField.setPlaceholderString_(Glyphs.localize({ 'en': u'No glyph selected.', 'de': u'Keine Glyphe ausgewählt.', })) self.noteTextField.setStringValue_("") else: self.noteTextField.setPlaceholderString_(Glyphs.localize({ 'en': u'Multiple values.', 'de': u'Mehrere Werte.', })) self.noteTextField.setStringValue_("") def __file__(self): """Please leave this method unchanged""" return __file__ # Temporary Fix # Sort ID for compatibility with v919 to v976 def setSortID_(self, id): pass def sortID(self): return 0
Python
0
@@ -1961,16 +1961,26 @@ Glyphs:%0A +%09%09%09%09try:%0A%09 %09%09%09%09this @@ -1997,24 +1997,25 @@ sGlyph.note%0A +%09 %09%09%09%09if thisN @@ -2026,24 +2026,25 @@ == %22%22:%0A%09%09%09%09%09 +%09 thisNote = N @@ -2047,16 +2047,17 @@ = None%0A +%09 %09%09%09%09allN @@ -2078,16 +2078,70 @@ isNote)%0A +%09%09%09%09except:%0A%09%09%09%09%09pass # can happen with control layers %0A%09%09%09numb @@ -2496,27 +2496,19 @@ e = -theseGlyphs%5B0%5D.note +allNotes%5B0%5D %0A%09%09%09
e7e244c3a9914bc2d562b008b00341cea31d2ef7
add boolean_labels
lcdblib/utils/utils.py
lcdblib/utils/utils.py
import os import contextlib from collections.abc import Iterable @contextlib.contextmanager def temp_env(env): """ Context manager to temporarily set os.environ. """ env = dict(env) orig = os.environ.copy() _env = {k: str(v) for k, v in env.items()} os.environ.update(_env) try: yield finally: os.environ.clear() os.environ.update(orig) def flatten(iter): """ Flatten an arbitrarily nested iterable whose innermost items are strings into a flat list of strings. """ if isinstance(iter, dict): iter = iter.values() def gen(): for item in iter: if isinstance(item, dict): item = item.values() if isinstance(item, Iterable) and not isinstance(item, str): yield from flatten(item) else: yield item return list(gen()) def test_flatten(): assert sorted(flatten({ 'a': { 'b': { 'c': ['a','b','c'], }, }, 'x': ['e', 'f', 'g'], 'y': { 'z': 'd' }, })) == ['a', 'b', 'c', 'd', 'e', 'f', 'g'] def updatecopy(orig, update_with, keys=None, override=False): """ Update a copy of a dictionary, with a bit more control than the built-in dict.update. Parameters ----------- orig : dict Dict to update update_with : dict Dict with new values keys : list or None If not None, then only consider these keys in `update_with`. Otherwise consider all. override : bool If True, then this is similar to `dict.update`, except only those keys in `keys` will be considered. If False (default), then if a key exists in both `orig` and `update_with`, no updating will occur so `orig` will retain its original value. """ d = orig.copy() if keys is None: keys = update_with.keys() for k in keys: if k in update_with: if k in d and not override: continue d[k] = update_with[k] return d
Python
0.998838
@@ -2120,8 +2120,943 @@ eturn d%0A +%0A%0Adef boolean_labels(names, idx, mapping=%7BTrue: 'AND', False: 'NOT'%7D,%0A strip='AND_'):%0A %22%22%22%0A Creates labels for boolean lists.%0A%0A For example:%0A%0A %3E%3E%3E names = %5B'exp1', 'exp2', 'exp3'%5D%0A %3E%3E%3E idx = %5BTrue, True, False%5D%0A %3E%3E%3E boolean_labels(names, idx)%0A 'exp1_AND_exp2_NOT_exp3'%0A%0A Parameters%0A ----------%0A%0A names : list%0A List of names to include in output%0A%0A idx : list%0A List of booleans, same size as %60names%60%0A%0A mapping : dict%0A Linking words to use for True and False%0A%0A strip : str%0A Strip this text off the beginning of labels.%0A%0A given a list of names and a same-size boolean, return strings like%0A%0A a_NOT_b_AND_c%0A%0A or%0A%0A a_AND_b_AND_c_NOT_d_AND_e%0A %22%22%22%0A s = %5B%5D%0A for i, (n, x) in enumerate(zip(names, idx)):%0A s.append(mapping%5Bx%5D + '_' + n)%0A s = '_'.join(s)%0A if s.startswith(strip):%0A s = s.replace(strip, '', 1)%0A return s%0A
bbc167ca39beb7a1df5192d61c0385ca34eb129a
Fix bug when id cannot be coerced to right type
src/cerberus_ac/apps.py
src/cerberus_ac/apps.py
# -*- coding: utf-8 -*- """App module providing the application settings class.""" import importlib from django.apps import AppConfig import appsettings as aps class CerberusACConfig(AppConfig): name = 'cerberus_ac' verbose_name = 'Cerberus AC' def ready(self): AppSettings.check() def _import(complete_path): module_name = '.'.join(complete_path.split('.')[:-1]) imported_module = importlib.import_module(name=module_name) function_or_class = getattr(imported_module, complete_path.split('.')[-1]) return function_or_class class Mapping(object): """Mapping class to map roles/resources names to their classes.""" def __init__(self, mapping): """ Initialization method. Args: mapping (dict): CERBERUS_MAPPING setting. """ self.mapping = mapping def class_from_name(self, name): """ Return the class given the name of a role/resource. Args: name (str): the type of role/resource. Returns: class: the corresponding the role/resource class. """ for k, v in self.mapping: if v['name'] == name: return _import(k) return None def instance_from_name_and_id(self, name, id): """ Return an instance given a role/resource type and an ID. Args: name (str): the type of role/resource. id (int): an integer or None. Returns: obj: the instance or a (name, id) tuple if not found. """ cls = self.class_from_name(name) if cls: if hasattr(cls, 'objects') and id: try: return cls.objects.get(id=id) except cls.DoesNotExist: return None return None from .models import Role try: return Role.objects.get(type=name, rid=id) except Role.DoesNotExist: return None def name_from_instance(self, obj): """ Return the type of a role/resource given a Python object. Args: obj (obj): a Python object. Returns: str: the role/resource type. """ for k, v in self.mapping: # FIXME: use complete path, not just the end if k.split('.')[-1] == obj.__class__.__name__: return v['name'] return obj.__class__.__name__ def user_classes(self): """Return the user-role classes.""" return [_import(k) for k, v in self.mapping if 'user' in v['attr'].split()] def group_classes(self): """Return the group-role classes.""" return [_import(k) for k, v in self.mapping if 'group' in v['attr'].split()] def role_types(self): """Return the role types.""" return [v['name'] for k, v in self.mapping if len({'user', 'group', 'role'} & set(v['attr'].split())) > 0] def role_classes(self): """Return the role classes.""" return [_import(k) for k, v in self.mapping if len({'user', 'group', 'role'} & set(v['attr'].split())) > 0] def resource_types(self): """Return the resource types.""" return [v['name'] for k, v in self.mapping if 'resource' in v['attr'].split()] def resource_classes(self): """Return the resource classes.""" return [_import(k) for k, v in self.mapping if 'resource' in v['attr'].split()] def check_mapping(name, value): """Check the value of given mapping setting.""" if not isinstance(value, tuple): raise ValueError('%s must be a tuple' % name) if not all(isinstance(o, tuple) for o in value): raise ValueError('%s must be a tuple of (key, value) tuples' % name) for k, v in value: if not isinstance(k, str): raise ValueError('Keys in %s must be str' % name) if not isinstance(v, dict): raise ValueError('Values in %s must be dict' % name) if set(v.keys()) != {'name', 'attr'}: raise ValueError('Values in %s must be dict ' 'with name and attr keys' % name) _ = [o[1] for o in value] if {x['name'] for x in _ if _.count(x['name']) > 1}: raise ValueError('Names in %s values must be unique' % name) class AppSettings(aps.AppSettings): """ Application settings class. Settings: - default_response (bool): - skip_implicit (bool): - log_access (bool): - log_privileges (bool): - log_hierarchy (bool): - mapping (tuple): - namespace (str): """ allow_update_own_privileges = aps.BoolSetting(default=False) default_response = aps.BoolSetting(default=False) skip_implicit = aps.BoolSetting(default=False) log_access = aps.BoolSetting(default=True) log_privileges = aps.BoolSetting(default=True) log_hierarchy = aps.BoolSetting(default=True) namespace = aps.StringSetting(default='') mapping = aps.Setting(checker=check_mapping, transformer=Mapping, default=()) class Meta: setting_prefix = 'CERBERUS_'
Python
0
@@ -1768,16 +1768,17 @@ except +( cls.Does @@ -1777,32 +1777,45 @@ cls.DoesNotExist +, ValueError) :%0A
684a7b8f972595722b664dfc002faaf63dba25fe
version bump
ubelt/__init__.py
ubelt/__init__.py
# -*- coding: utf-8 -*- # flake8: noqa """ CommandLine: # Partially regenerate __init__.py python -c "import ubelt" python -c "import ubelt" --print-ubelt-init --dyn python -c "import ubelt" --update-ubelt-init --dyn # TODO: ensure this new way will work with multilevel modules python -c "import ubelt._internal as a; a.autogen_init('ubelt', dry=True)" TODO: The following functions and classes are candidates to be ported from utool: * reload_class * inject_func_as_method * inject_func_as_property * parse_cfgstr3 * accumulate * itertwo * iterwin * ParamInfo - move to dtool * embed * rsync """ from __future__ import absolute_import, division, print_function, unicode_literals import sys __version__ = '0.0.34' GLOBAL_MODULES = [ 'util_arg', 'util_cache', 'util_colors', 'util_const', 'util_decor', 'util_dict', 'util_download', 'util_func', 'util_format', 'util_io', 'util_list', 'util_mixins', 'util_path', 'util_platform', 'util_str', 'util_stress', 'util_time', 'progiter', 'meta', ] __DYNAMIC__ = '--dyn' in sys.argv if __DYNAMIC__: # If dynamic, imports can be autogenerated from ubelt._internal import dynamic_make_init dynamic_make_init.dynamic_import(__name__, GLOBAL_MODULES) _DOELSE = False else: # Run the autogenerated imports _DOELSE = True if _DOELSE: # <AUTOGEN_INIT> from ubelt import util_arg from ubelt import util_cache from ubelt import util_colors from ubelt import util_const from ubelt import util_decor from ubelt import util_dict from ubelt import util_download from ubelt import util_func from ubelt import util_format from ubelt import util_io from ubelt import util_list from ubelt import util_mixins from ubelt import util_path from ubelt import util_platform from ubelt import util_str from ubelt import util_stress from ubelt import util_time from ubelt import progiter from ubelt import meta from ubelt.util_arg import (argflag, argval,) from ubelt.util_cache import (Cacher,) from ubelt.util_colors import (color_text, highlight_code,) from ubelt.util_const import (NoParam,) from ubelt.util_decor import (memoize,) from ubelt.util_dict import (AutoDict, AutoOrderedDict, ddict, dict_hist, dict_subset, dict_take, find_duplicates, group_items, invert_dict, map_keys, map_vals, odict,) from ubelt.util_download import (download, grabdata,) from ubelt.util_func import (identity,) from ubelt.util_format import (repr2,) from ubelt.util_io import (delete, readfrom, touch, writeto,) from ubelt.util_list import (argsort, boolmask, chunks, compress, flatten, iterable, take, unique, unique_flags,) from ubelt.util_mixins import (NiceRepr,) from ubelt.util_path import (augpath, compressuser, truepath, userhome,) from ubelt.util_platform import (DARWIN, LINUX, POSIX, PY2, PY3, WIN32, cmd, editfile, ensure_app_cache_dir, ensure_app_resource_dir, ensuredir, get_app_cache_dir, get_app_resource_dir, platform_cache_dir, platform_resource_dir, startfile,) from ubelt.util_str import (CaptureStdout, codeblock, ensure_unicode, hzcat, indent,) from ubelt.util_stress import (find_nth_prime,) from ubelt.util_time import (Timer, Timerit, timestamp,) from ubelt.progiter import (ProgIter,) # </AUTOGEN_INIT> del _DOELSE
Python
0.000001
@@ -779,9 +779,9 @@ .0.3 -4 +5 '%0A%0AG
ae03b5bf4c0ff2c5104bb7b7826adc135528788d
Change ProductMedia url property to return correctly
shoop/core/models/product_media.py
shoop/core/models/product_media.py
# -*- coding: utf-8 -*- # This file is part of Shoop. # # Copyright (c) 2012-2015, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import with_statement from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from easy_thumbnails.files import get_thumbnailer from enumfields import Enum, EnumIntegerField from filer.fields.file import FilerFileField from parler.models import TranslatableModel, TranslatedFields from shoop.core.fields import InternalIdentifierField class ProductMediaKind(Enum): GENERIC_FILE = 1 IMAGE = 2 DOCUMENTATION = 3 SAMPLE = 4 class Labels: GENERIC_FILE = _('file') IMAGE = _('image') DOCUMENTATION = _('documentation') SAMPLE = _('sample') @python_2_unicode_compatible class ProductMedia(TranslatableModel): identifier = InternalIdentifierField(unique=True) product = models.ForeignKey("Product", related_name="media", on_delete=models.CASCADE) shops = models.ManyToManyField("Shop", related_name="product_media") kind = EnumIntegerField( ProductMediaKind, db_index=True, default=ProductMediaKind.GENERIC_FILE, verbose_name=_('kind') ) file = FilerFileField(blank=True, null=True, verbose_name=_('file'), on_delete=models.CASCADE) external_url = models.URLField( blank=True, null=True, verbose_name=u'URL', help_text=_("Enter URL to external file. If this field is filled, the selected media doesn't apply.") ) ordering = models.IntegerField(default=0) # Status enabled = models.BooleanField(db_index=True, default=True, verbose_name=_("enabled")) public = models.BooleanField(default=True, blank=True, verbose_name=_('public (shown on product page)')) purchased = models.BooleanField( default=False, blank=True, verbose_name=_('purchased (shown for finished purchases)') ) translations = TranslatedFields( title=models.CharField(blank=True, max_length=128, verbose_name=_('title')), description=models.TextField(blank=True, verbose_name=_('description')), ) class Meta: verbose_name = _('product attachment') verbose_name_plural = _('product attachments') ordering = ["ordering", ] def __str__(self): # pragma: no cover return self.effective_title @property def effective_title(self): title = self.safe_translation_getter("title") if title: return title if self.file_id: return self.file.label if self.external_url: return self.external_url return _('attachment') @property def url(self): if not self.public: raise ValueError("`get_effective_url()` may not be used on non-public media") if self.file_id: return self.file.url else: return self.external_url @property def easy_thumbnails_thumbnailer(self): """ Get `Thumbnailer` instance. Will return `None` if file cannot be thumbnailed. :rtype:easy_thumbnails.files.Thumbnailer|None """ if not self.file_id: return None if self.kind != ProductMediaKind.IMAGE: return None return get_thumbnailer(self.file) def get_thumbnail(self, **kwargs): """ Get thumbnail for image This will return `None` if there is no file or kind is not `ProductMediaKind.IMAGE` :rtype: easy_thumbnails.files.ThumbnailFile|None """ kwargs.setdefault("size", (64, 64)) kwargs.setdefault("crop", True) # sane defaults kwargs.setdefault("upscale", True) # sane defaults if kwargs["size"] is (0, 0): return None thumbnailer = self.easy_thumbnails_thumbnailer if not thumbnailer: return None return thumbnailer.get_thumbnail(thumbnail_options=kwargs)
Python
0
@@ -2857,23 +2857,25 @@ if -not self. -public +external_url :%0A @@ -2889,85 +2889,31 @@ r -aise ValueError(%22%60get_effective_url()%60 may not be used on non-public media%22)%0A +eturn self.external_url %0A @@ -2921,35 +2921,32 @@ if self.file -_id :%0A re @@ -2976,50 +2976,17 @@ -else:%0A return self.external_url +return %22%22 %0A%0A
49d7ba5c4ddf858129bbdd3dea1c968aff8345c1
Update hackerland_radio_transmitters.py
python/hackerrank/practice/hackerland_radio_transmitters.py
python/hackerrank/practice/hackerland_radio_transmitters.py
n, k = map(int, input().split()) arr = list(map(int, input().split())) # arr=[1,7,8,15,16,18,19,21,23] # n=9 # k=2 # sorted_arr = sorted(arr) sorted_arr = [] coverage = (2 * k) my_set = set() for i in arr: my_set.add(i) for i in my_set: sorted_arr.append(i) # 7 2 4 6 5 9 12 11 - input representation of indexes # 1 2 3 4 5 6 7 8 9 10 11 12 - # - 2 - 3 # instead of binary search get next big element t # print(sorted_arr) def binary_search(l, r, x): while l <= r: mid = l + (r - l) // 2 # print(mid, '---', n) if mid==0: return -2 if sorted_arr[mid] == x: return mid + 1 elif sorted_arr[mid] < x and sorted_arr[mid + 1] > x: return mid + 1 elif sorted_arr[mid] < x: l = mid + 1 else: r = mid - 1 return -2 count = 1 # for i in sorted_arr: index = 0 while index <= n - 1: next_range = binary_search(0, len(sorted_arr) - 2, sorted_arr[index] + coverage) # print(index, '---', sorted_arr[index], ' -- ', next_range) if next_range == -2: break else: index = next_range count += 1 print(count) # while True: # # print("current index:{}".format(index)) # index += coverage # count += 1 # nextrange = get_next_range(index) # # print("next range:{}".format(nextrange)) # if nextrange < 0: # # if index < n-1: # # print("coming here") # # count += 1 # break # print(count)
Python
0.000001
@@ -549,16 +549,17 @@ l %3C= r: + %0A
b5a0c3424b83c779c80e94b3ccfd795eb0e23642
FIX show only paid invoices in together participant barometer
crowdfunding_compassion/controllers/main.py
crowdfunding_compassion/controllers/main.py
############################################################################## # # Copyright (C) 2020 Compassion CH (http://www.compassion.ch) # @author: Quentin Gigon # # The licence is in the file __manifest__.py # ############################################################################## from odoo.http import request, route from odoo.addons.website_event_compassion.controllers.events_controller import ( EventsController, ) class CrowdFundingWebsite(EventsController): @route(["/my_account"], type="http", auth="user", website=True) def my_account(self, form_id=None, **kw): """ Inject data for forms. """ values = {} partner = request.env.user.partner_id participations = request.env["crowdfunding.participant"].search( [ ("partner_id", "=", partner.id), ("project_id.project_owner_id", "!=", partner.id), ] ) donations = participations.mapped("invoice_line_ids").filtered( lambda l: l.state != "cancel") owned_projects = request.env["crowdfunding.project"].search( [("project_owner_id", "=", partner.id)] ) kw["form_model_key"] = "cms.form.partner.coordinates" coordinates_form = self.get_form("res.partner", partner.id, **kw) if form_id is None or form_id == coordinates_form.form_id: coordinates_form.form_process() values.update( { "partner": partner, "owned_projects": owned_projects, "participating_projects": participations, "donations": donations, "coordinates_form": coordinates_form, } ) result = request.render( "crowdfunding_compassion.myaccount_crowdfunding_view_template", values ) return result @route(["/my_account/project/update/"], type="http", auth="user", website=True) def my_account_projects_update(self, project_id=None, **kw): project = request.env["crowdfunding.project"].search([("id", "=", project_id)]) kw["form_model_key"] = "cms.form.crowdfunding.project.update" project_update_form = self.get_form("crowdfunding.project", project.id, **kw) project_update_form.form_process() values = { "form": project_update_form, } if project_update_form.form_success: result = request.redirect("/my_account") else: result = request.render( "crowdfunding_compassion.crowdfunding_form_template", values ) return result @route( ["/my_account/participation/update/"], type="http", auth="user", website=True ) def my_account_participants_update(self, participant_id=None, **kw): participant = request.env["crowdfunding.participant"].search( [("id", "=", participant_id)] ) kw["form_model_key"] = "cms.form.crowdfunding.participant.update" participant_update_form = self.get_form( "crowdfunding.participant", participant.id, **kw ) participant_update_form.form_process() values = { "form": participant_update_form, } if participant_update_form.form_success: result = request.redirect("/my_account") else: result = request.render( "crowdfunding_compassion.crowdfunding_form_template", values ) return result
Python
0
@@ -1039,18 +1039,16 @@ ate -!= %22cancel +== %22paid %22)%0A%0A
62fa3e075284b275c5b65a9d3e7a80c95af5d4cb
Fix bug that could cause crash when opening the GMN home page
d1_mn_generic/src/gmn/app/views/internal.py
d1_mn_generic/src/gmn/app/views/internal.py
# -*- coding: utf-8 -*- # This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2016 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Views for GMN web pages Functionality that is not part of the DataONE Member Node API yet is designed to be available when the MN is in production. """ # Stdlib. import ctypes import datetime import os import platform # Django. from django.conf import settings from django.db.models import Avg, Count, Sum from django.http import HttpResponseRedirect from django.shortcuts import render_to_response import django # DataONE APIs. import d1_common.const import d1_common.date_time import d1_common.types.dataoneTypes import d1_common.types.exceptions # App. import app.auth import app.db_filter import app.event_log import app.models import app.psycopg_adapter import app.sysmeta_validate import app.util import app.views.view_asserts import app.views.view_util def home(request): """Home page. Root of web server should redirect to here.""" if request.path.endswith('/'): return HttpResponseRedirect(request.path[:-1]) gmn_version = app.__version__ django_version = ', '.join(map(str, django.VERSION)) n_science_objects = '{:,}'.format(app.models.ScienceObject.objects.count()) avg_sci_data_size_bytes = app.models.ScienceObject.objects\ .aggregate(Avg('size'))['size__avg'] or 0 avg_sci_data_size = '{:,}'.format(int(avg_sci_data_size_bytes)) n_objects_by_format = app.models.ScienceObject.objects.values( 'format', 'format__format' ).annotate(dcount=Count('format')) n_connections_total = '{:,}'.format(app.models.EventLog.objects.count()) n_connections_in_last_hour = '{:,}'.format( app.models.EventLog.objects.filter( timestamp__gte=datetime.datetime.utcnow() - datetime.timedelta(hours=1) ).count() ) n_unique_subjects = '{:,}'.format(app.models.Subject.objects.count()) n_storage_used = app.models.ScienceObject.objects\ .aggregate(Sum('size'))['size__sum'] or 0 n_storage_free = _get_free_space(settings.MEDIA_ROOT) storage_space = u'{} GiB / {} GiB'.format( n_storage_used / 1024**3, n_storage_free / 1024**3 ) n_permissions = '{:,}'.format(app.models.Permission.objects.count()) server_time = datetime.datetime.utcnow() node_identifier = settings.NODE_IDENTIFIER node_name = settings.NODE_NAME node_description = settings.NODE_DESCRIPTION return render_to_response( 'home.html', locals(), content_type=d1_common.const.CONTENT_TYPE_XHTML ) def _get_free_space(folder): """Return folder/drive free space (in bytes) """ if platform.system() == 'Windows': free_bytes = ctypes.c_ulonglong(0) ctypes.windll.kernel32.GetDiskFreeSpaceExW( ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes) ) return free_bytes.value else: return os.statvfs(folder).f_bfree * os.statvfs(folder).f_frsize
Python
0
@@ -2699,18 +2699,25 @@ ngs. -MEDIA_ROOT +OBJECT_STORE_PATH )%0A
4ea4247f531c78e3d26f135c5b85bbe4b5f2ca5e
Reorder imports
coalib/tests/parsing/ConfParserTest.py
coalib/tests/parsing/ConfParserTest.py
from collections import OrderedDict import os import sys sys.path.insert(0, ".") from coalib.misc.Compatability import FileNotFoundError from coalib.parsing.ConfParser import ConfParser from coalib.settings.Section import Section import unittest import tempfile class ConfParserTest(unittest.TestCase): example_file = """to be ignored a_default, another = val TEST = tobeignored # do you know that thats a comment test = push t = [MakeFiles] j , another = a multiline value # just a omment # just a omment nokey. = value default.test = content makefiles.lastone = val """ def setUp(self): self.tempdir = tempfile.gettempdir() self.file = os.path.join(self.tempdir, ".coafile") self.nonexistentfile = os.path.join(self.tempdir, "e81k7bd98t") with open(self.file, "w") as filehandler: filehandler.write(self.example_file) self.uut = ConfParser() try: os.remove(self.nonexistentfile) except FileNotFoundError: pass def tearDown(self): os.remove(self.file) def test_parse(self): default_should = OrderedDict([ ('a_default', 'val'), ('another', 'val'), ('comment0', '# do you know that thats a comment'), ('test', 'content'), ('t', '') ]) makefiles_should = OrderedDict([ ('j', 'a\nmultiline\nvalue'), ('another', 'a\nmultiline\nvalue'), ('comment1', '# just a omment'), ('comment2', '# just a omment'), ('lastone', 'val'), ('comment3', ''), ('a_default', 'val'), ('comment0', '# do you know that thats a comment'), ('test', 'content'), ('t', '') ]) self.assertRaises(FileNotFoundError, self.uut.parse, self.nonexistentfile) sections = self.uut.parse(self.file) self.assertNotEqual(self.uut.parse(self.file, True), sections) key, val = sections.popitem(last=False) self.assertTrue(isinstance(val, Section)) self.assertEqual(key, 'default') is_dict = OrderedDict() for k in val: is_dict[k] = str(val[k]) self.assertEqual(is_dict, default_should) key, val = sections.popitem(last=False) self.assertTrue(isinstance(val, Section)) self.assertEqual(key, 'makefiles') is_dict = OrderedDict() for k in val: is_dict[k] = str(val[k]) self.assertEqual(is_dict, makefiles_should) self.assertEqual(val["comment1"].key, "comment1") self.assertRaises(IndexError, self.uut.get_section, "inexistent section") def test_config_directory(self): self.uut.parse(self.tempdir) if __name__ == '__main__': unittest.main(verbosity=2)
Python
0
@@ -39,17 +39,16 @@ port os%0A -%0A import s @@ -49,16 +49,48 @@ port sys +%0Aimport tempfile%0Aimport unittest %0A%0Asys.pa @@ -260,40 +260,8 @@ ion%0A -import unittest%0Aimport tempfile%0A %0A%0Acl
008b466b561a923c1cb0b19a4788e0f5bb540dac
Version bump for Pandas connector.
uproot/version.py
uproot/version.py
#!/usr/bin/env python # Copyright (c) 2017, DIANA-HEP # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import re __version__ = "2.2.0" version = __version__ version_info = tuple(re.split(r"[-\.]", __version__)) del re
Python
0
@@ -1586,17 +1586,17 @@ __ = %222. -2 +3 .0%22%0Avers
7cee7de43fc77e362cf19a9484f243d66e034f59
Refactor from_json
upstream/chunk.py
upstream/chunk.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from upstream.exc import ChunkError class Chunk(object): def __init__(self, filehash=None, decryptkey=None, filename=None, filepath=None): """ Stores information about an encryted chunk. Allows for format conversions. :param filehash: The hash for a file. :param decryptkey: The decryption key for a file. :param filename: Name of the file(destroyed on encryption). :param filepath: Location of the file. """ self.filehash = filehash self.decryptkey = decryptkey self.filename = filename self.filepath = filepath def from_uri(self, uri): """ :param uri: URI as a string :return: """ try: self.filehash, self.decryptkey = str(uri).split("?key=") except: raise ChunkError("%s not format of <hash>?key=<key>") def load_json(self, raw): self.raw_json = raw data = json.loads(raw) self.filehash = data['filehash'] self.decryptkey = data['key'] return self # Gets def get_uri(self): if not self.has_hashes(): return return self.filehash + "?key=" + self.decryptkey def get_hashes(self): if not self.has_hashes(): return return self.filehash, self.decryptkey def get_json(self): if not self.has_hashes(): return return json.dumps( { "key": self.decryptkey, "filehash": self.filehash, } ) def has_hashes(self): return self.filehash and self.decryptkey # Extra metadata def set_filename(self, filename): self.filename = filename def set_filepath(self, filepath): self.filepath = filepath def get_filename(self): return self.filename def get_filepath(self): return self.filepath
Python
0.000009
@@ -959,20 +959,20 @@ def -load +from _json(se @@ -975,19 +975,24 @@ n(self, -raw +json_str ):%0A @@ -1003,22 +1003,27 @@ elf. -raw_json = raw +json_str = json_str %0A @@ -1049,11 +1049,16 @@ ads( -raw +json_str )%0A @@ -1133,36 +1133,16 @@ a%5B'key'%5D -%0A return self %0A%0A #
1a089c634bc608e5862ce549ed598e50c02b8d09
Bump version
users/__init__.py
users/__init__.py
__version__ = '0.1.2'
Python
0
@@ -16,7 +16,7 @@ 0.1. -2 +3 '%0A
a070334b7088bbb4f364d2a2f0c9b6da8d652609
add spitzer m81 project dir
util/2mass_get.py
util/2mass_get.py
#! /usr/bin/env python import re import sys import socket import time from urllib2 import urlopen, URLError from urllib import urlencode from urlparse import urlparse, urljoin from os.path import basename from optparse import OptionParser from xml.dom import minidom from astrometry.util.starutil_numpy import * from astrometry.util.file import * from astrometry.util.run_command import run_command # args: radius in deg # returns list of local filenames to which images were written def get_2mass_images(ra, dec, radius=1, basefn=None): formvals = { 'type': 'at', # Atlas images (not quicklook) 'INTERSECT': 'OVERLAPS', # Images overlapping region 'asky': 'asky', # All-sky release 'POS': '%g %g' % (ra,dec), # RA,Dec position 'SIZE': '%g' % radius, # Search radius (deg) # scan, coadd, hem, date 'band': 'A', # All bands (J,H,K_s) } if basefn is None: #basefn = '2mass-%g-%g-' % (ra,dec) basefn = '2mass-' queryurl = 'http://irsa.ipac.caltech.edu/cgi-bin/2MASS/IM/nph-im_inv' print 'submitting form values:' for k,v in formvals.items(): print ' ',k,'=',v print 'encoded as:' print ' ' + urlencode(formvals) print print 'waiting for results...' socket.setdefaulttimeout(300) f = urlopen(queryurl, urlencode(formvals)) doc = f.read() write_file(doc, 'res.html') m = re.search(r'<base href="(.*)" />', doc) if not m: raise 'no results page: server output written to file' resurl = m.group(1) print 'result base url', resurl resurl += 'found.xml' print 'requesting result url', resurl res = urlopen(resurl) print doc = res.read() write_file(doc, 'res2.xml') xmldoc = minidom.parseString(doc) imgs = xmldoc.getElementsByTagName('TR') if len(imgs) == 0: print 'no <TR> tags found' return None fns = [] for imgtag in imgs: print if not imgtag.hasChildNodes(): print '<TR> tag has no child node:', imgtag return None #print 'Image:', imgtag tds = imgtag.getElementsByTagName('TD') if not len(tds): print '<TR> tag has no <TD> child nodes:', imgtag return None print 'Image:', tds[0].firstChild.data print ' URL:', tds[1].firstChild.data print ' Band:', tds[11].firstChild.data print ' dataset (asky):', tds[18].firstChild.data print ' date (yymmdd):', tds[22].firstChild.data print ' hem (n/s):', tds[23].firstChild.data print ' scan:', tds[24].firstChild.data print ' image num:', tds[25].firstChild.data url = tds[1].firstChild.data band = tds[11].firstChild.data dataset = tds[18].firstChild.data date = tds[22].firstChild.data hem = tds[23].firstChild.data scan = int(tds[24].firstChild.data) imgnum = int(tds[25].firstChild.data) fn = basefn + '%s_%s_%s%s%03i%04i.fits' % (band, dataset, date, hem, scan, imgnum) # -t: num retries cmd = "wget -t 1 -c '%s' -O %s" % (url, fn) print 'Running command:', cmd rtn = os.system(cmd) # ctrl-C caught: quit, or continue? if os.WIFSIGNALED(rtn): print 'wget exited with signal', os.WTERMSIG(rtn) break if os.WIFEXITED(rtn) and os.WEXITSTATUS(rtn): # returned non-zero. print 'wget exited with value', os.WEXITSTATUS(rtn) continue fns.append(fn) return fns if __name__ == '__main__': parser = OptionParser(usage='%prog [options] <ra> <dec>') parser.add_option('-r', dest='radius', type='float', help='Search radius, in deg (default 1 deg)') parser.add_option('-b', dest='basefn', help='Base filename (default: 2mass-)') parser.set_defaults(radius=1.0) (opt, args) = parser.parse_args() if len(args) != 2: parser.print_help() print print 'Got extra arguments:', args sys.exit(-1) # parse RA,Dec. ra = float(args[0]) dec = float(args[1]) # ugh! opts = {} for k in ['radius', 'basefn']: opts[k] = getattr(opt, k) get_2mass_images(ra, dec, **opts)
Python
0
@@ -531,16 +531,26 @@ efn=None +, band='A' ):%0A%0A%09for @@ -832,34 +832,50 @@ d': -'A', # All bands ( +band, # 'A'=All bands; alternatives: J,H,K -_s) %0A%09%09%7D @@ -3451,24 +3451,105 @@ : 2mass-)')%0A +%09parser.add_option('-B', dest='band', help='Band (J, H, K); default: all three')%0A %09parser.set_ @@ -3567,16 +3567,26 @@ dius=1.0 +, band='A' )%0A%0A%09(opt @@ -3827,16 +3827,24 @@ 'basefn' +, 'band' %5D:%0A%09%09opt
14f7767ed95346ea89b13ddc0dcb6369292f6105
Fix initial view creation with an empty db.
been/couch.py
been/couch.py
from hashlib import sha1 import couchdb from core import Store # Add time serialization to couchdb's json repertoire. import json import time import calendar class TimeEncoder(json.JSONEncoder): def default(self, obj): if type(obj) is time.struct_time: return calendar.timegm(obj) else: return json.JSONEncoder.default(self, obj) couchdb.json.use( decode=json.JSONDecoder().decode, encode=TimeEncoder().encode) class CouchStore(Store): def load(self): self.server = couchdb.client.Server() db_name = self.config.get('db_name', 'activity') if not db_name in self.server: self.server.create(db_name) self.db = self.server[db_name] self.init_views() return self def init_views(self): doc = self.db.get('_design/activity', {}) doc.update({ "language": "javascript", "views": { "sources": { "map": "function(doc) { if (doc.type == 'source') { emit(doc._id, doc) } }" }, "events": { "map": "function(doc) { if (doc.type == 'event') { emit(doc.timestamp, doc) } }" }, "events-by-source": { "map": "function(doc) { if (doc.type == 'event') { emit(doc.source, doc) } }" }, "events-by-source-count": { "map": "function(doc) { if (doc.type == 'event') { emit(doc.source, doc) } }", "reduce": "_count" }, "events-by-slug": { "map": "function(doc) { if (doc.type == 'event' && doc.slug) { emit(doc.slug, doc) } }" } } }) self.db[doc.id] = doc def get_sources(self): return dict((row.key, row.value) for row in self.db.view('activity/sources')) def store_source(self, source): source_data = source.config.copy() source_data['type'] = 'source' self.db[source.source_id] = source_data def store_events(self, events): ids = {} for event in events: event.setdefault('_id', sha1(event['summary'].encode('utf-8')+str(event['timestamp'])).hexdigest()) ids[event['_id']] = event event['type'] = 'event' tries = 3 while ids and tries: tries -= 1 result = self.db.update(ids.values()) for success, _id, info in result: if success: del ids[_id] else: ids[_id]['_rev'] = self.db[_id]['_rev'] if ids: raise couchdb.ResourceConflict def store_update(self, source, events): for event in events: event['kind'] = source.kind event['source'] = source.source_id self.store_events(events) self.db[source.source_id] = source.config def events(self, count=100): return (event.value for event in self.db.view('activity/events', limit=count, descending=True)) def events_by_slug(self, slug): return (event.value for event in self.db.view('activity/events-by-slug')[slug]) def events_by_source_count(self): return dict((count.key, count.value) for count in self.db.view('activity/events-by-source-count', group_level=1)) def empty(self): for event in self.db.view('activity/events'): self.db.delete(event.value) for row in self.db.view('activity/sources'): source = row.value source['since'] = {} self.db[row.id] = source
Python
0
@@ -819,27 +819,38 @@ -doc = self.db.get(' +views = %7B%0A %22_id%22: %22 _des @@ -865,36 +865,12 @@ vity -', %7B%7D) +%22, %0A - doc.update(%7B%0A @@ -902,16 +902,17 @@ cript%22,%0A + @@ -940,16 +940,17 @@ + %22sources @@ -950,24 +950,25 @@ sources%22: %7B%0A + @@ -1042,32 +1042,33 @@ ._id, doc) %7D %7D%22%0A + %7D @@ -1080,24 +1080,25 @@ + %22events%22: %7B%0A @@ -1093,24 +1093,25 @@ %22events%22: %7B%0A + @@ -1205,35 +1205,37 @@ %0A + %7D,%0A + %22 @@ -1251,24 +1251,25 @@ -source%22: %7B%0A + @@ -1360,35 +1360,37 @@ %0A + %7D,%0A + %22 @@ -1412,24 +1412,25 @@ e-count%22: %7B%0A + @@ -1534,16 +1534,17 @@ + %22reduce%22 @@ -1554,16 +1554,17 @@ _count%22%0A + @@ -1580,32 +1580,33 @@ %0A + %22events-by-slug%22 @@ -1605,24 +1605,25 @@ by-slug%22: %7B%0A + @@ -1724,34 +1724,36 @@ %0A + %7D%0A + %7D%0A @@ -1758,25 +1758,24 @@ %7D -) %0A self.db @@ -1766,24 +1766,30 @@ %0A + doc = self.db %5Bdoc.id%5D @@ -1784,15 +1784,85 @@ f.db -%5Bdoc.id +.get(views%5B'_id'%5D, %7B%7D)%0A doc.update(views)%0A self.db%5Bviews%5B'_id'%5D %5D =
2cc15f71d00eb226cc5072cd28cc7eb134dedd52
correct typo error in generation script.
test/scripts/make_snp_deletions.py
test/scripts/make_snp_deletions.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # base import import argparse import sys import os # specific import import random from collections import defaultdict # log import import logging # logger configuration def conf_logger(): """ Set configuraion of root logger """ logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s : %(message)s') steam_handler = logging.StreamHandler() steam_handler.setLevel(logging.DEBUG) steam_handler.setFormatter(formatter) logger.addHandler(steam_handler) # argparse control argument function def file_exist(filename): """ Check if file exist """ if not os.path.isfile(filename): raise argparse.ArgumentTypeError("we can't access to %s file" % filename) return str(filename) def unsigned_int(numbre): """ Check if numbre is a unsigned integer""" inumber = int(numbre) if numbre < 0: raise argparse.ArgumentTypeError("%s isn't a positive int value" % numbre) return inumber def file_with_extension_exist(filename): """ Check if file with vde or fasta doesn't exist """ for ext in ["eva", "fasta"]: if os.path.isfile(str(filename)+"."+str(ext)): raise argparse.ArgumentTypeError( "we need %s.%s file not exist" % str(filename), str(ext)) return str(filename) # user argument check def check_interval(first, second): """ If intervale isn't valid generate a warning and return the good interval""" if first > second: logging.getLogger().warning( "[%d, %d] isn't a valid interval we use [%d, %d]" % (first, second, second, first)) return second, first if first == second: logging.getLogger().warning( "[%d, %d] interval content just one number" % (first, second)) return first, second return first, second # specific function # def pos_near(base_list, pos, min_dist): # """ Check if pos isn't near a pos in base_list """ # for base_pos in base_list: # if abs(base_pos - pos) < min_dist: # return True # return False def generate_snp_del(seq, pos_del, pos_snp, del_size): """ Create a SNP and a deletion """ nuc = ['A', 'C', 'T', 'G'] nuc.remove(seq[pos_snp]) seq = seq[:pos_snp] + nuc[random.randint(0, 2)] + seq[pos_snp+1:] seq = seq[:pos_del] + seq[pos_del+del_size:] return seq def write_vde(file_handler, pos, type, comment): """ Write information in vde format """ file_handler.write("%s,%s,%s\n" % (pos, type, comment)) def write_seq(file_handler, comment, seq): """ Write information in fasta format """ file_handler.write(">%s\n" % comment) file_handler.write("%s\n" % seq) # Main programme function def main(): """ The main function of make_snp_deletions no argument """ logger = logging.getLogger() parser = argparse.ArgumentParser( prog="make_snp_deletions", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-g", "--genome", type=file_exist, help="fasta file content the genome", required=True) parser.add_argument("-o", "--output", type=file_with_extension_exist, help="""prefix of output file .fasta is genome with SNP and deletion and .vde is the postion and type for each 'variant""", required=True) parser.add_argument("-n", "--number-del", type=unsigned_int, help="number of deletions to generate", default=1) parser.add_argument("-m", "--min-size-del", type=unsigned_int, help="minimal size of the deletions (in bp)", default=100) parser.add_argument("-M", "--max-size-del", type=unsigned_int, help="maximal size of the deletions (in bp)", default=150) parser.add_argument("-s", "--min-dist-snp", type=unsigned_int, help="minimal distance between snp and deletion (in bp)", default=5) parser.add_argument("-S", "--max-dist-snp", type=unsigned_int, help="maximal distance between snp and deletion (in bp)", default=31) parser.add_argument("-d", "--variant-dist", type=unsigned_int, help="distance minimal between two variant (in bp)", default=232) # Parse cli argument arg = vars(parser.parse_args()) # Check interval del_size_min, del_size_max = check_interval( arg["min_size_del"], arg["max_size_del"]) dist_snp_min, dist_snp_max = check_interval( arg["min_dist_snp"], arg["max_dist_snp"]) # Check variant distance if arg["variant_dist"] <= (arg["max_size_del"] + arg["max_dist_snp"]): logger().warning( "variant distance is minus possible variant max size.") comment = "" comment2seq = defaultdict(str) genome_size = 0 with open(arg["genome"]) as genome_file: for line in genome_file: if line.startswith(">"): comment = line.lstrip(">").rstrip() else: genome_size += len(line.rstrip()) comment2seq[comment] += line.rstrip() nuc_per_del = genome_size / arg["number_del"] # output file openning vde_file = open(arg["output"]+".vde", "a") output_file = open(arg["output"]+".fasta", "a") # generate snp and deletion seq_del_cpt = 0 del_cpt = 0 list_pos = list() for comment in comment2seq.keys(): del_pos = 0 while seq_del_cpt < (len(comment2seq[comment]) / nuc_per_del): del_cpt += 1 seq_del_cpt += 1 del_pos_max = del_pos + arg["variant_dist"] * 2 del_pos = random.randint(del_pos+arg["variant_dist"], del_pos_max) snp_pos = del_pos - random.randint(dist_snp_min, dist_snp_max) del_size = random.randint(del_size_min, del_size_max); if (del_pos + del_size) > len(comment2seq[comment]): logger.warning("""We can't create another deletion in this sequence we create %d deletion""" % seq_del_cpt) break comment2seq[comment] = generate_snp_del( comment2seq[comment], del_pos, snp_pos, del_size) write_vde(vde_file, snp_pos, "snp", comment) write_vde(vde_file, del_pos, "homo", comment) seq_del_cpt = 0 write_seq(output_file, comment, comment2seq[comment]) # output file closeing vde_file.close() output_file.close() if __name__ == "__main__": conf_logger() main()
Python
0
@@ -5576,19 +5576,19 @@ put%22%5D+%22. -vde +eva %22, %22a%22)%0A
e90cb967a9bfdaafd2833d5005142c321615a8f2
Add a change log when importing a report. (#1746)
components/server/src/routes/report.py
components/server/src/routes/report.py
"""Report routes.""" import os from urllib import parse import bottle import requests from pymongo.database import Database from database import sessions from database.datamodels import latest_datamodel from database.measurements import recent_measurements_by_metric_uuid from database.reports import insert_new_report, latest_reports from initialization.report import import_json_report from model.actions import copy_report from model.data import ReportData from model.transformations import hide_credentials, summarize_report from server_utilities.functions import iso_timestamp, report_date_time, uuid from server_utilities.type import ReportId @bottle.post("/api/v3/report/import") def post_report_import(database: Database): """Import a preconfigured report into the database.""" report = dict(bottle.request.json) result = import_json_report(database, report) result["new_report_uuid"] = report["report_uuid"] return result @bottle.post("/api/v3/report/new") def post_report_new(database: Database): """Add a new report.""" report_uuid = uuid() user = sessions.user(database) report = dict( report_uuid=report_uuid, title="New report", subjects={}, delta=dict(uuids=[report_uuid], email=user["email"], description=f"{user['user']} created a new report."), ) result = insert_new_report(database, report) result["new_report_uuid"] = report_uuid return result @bottle.post("/api/v3/report/<report_uuid>/copy") def post_report_copy(report_uuid: ReportId, database: Database): """Copy a report.""" data_model = latest_datamodel(database) reports = latest_reports(database) data = ReportData(data_model, reports, report_uuid) report_copy = copy_report(data.report, data.datamodel) user = sessions.user(database) report_copy["delta"] = dict( uuids=[report_uuid, report_copy["report_uuid"]], email=user["email"], description=f"{user['user']} copied the report '{data.report_name}'.", ) result = insert_new_report(database, report_copy) result["new_report_uuid"] = report_copy["report_uuid"] return result @bottle.get("/api/v3/report/<report_uuid>/pdf") def export_report_as_pdf(report_uuid: ReportId): """Download the report as pdf.""" renderer_host = os.environ.get("RENDERER_HOST", "renderer") renderer_port = os.environ.get("RENDERER_PORT", "9000") render_url = f"http://{renderer_host}:{renderer_port}/api/render" proxy_host = os.environ.get("PROXY_HOST", "www") proxy_port = os.environ.get("PROXY_PORT", "80") query_string = f"?{bottle.request.query_string}" if bottle.request.query_string else "" report_url = parse.quote(f"http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}") margins = "&".join([f"pdf.margin.{side}=25" for side in ("top", "bottom", "left", "right")]) # Set pdf scale to 70% or otherwise the dashboard falls off the page options = f"emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}" response = requests.get(f"{render_url}?url={report_url}&{options}") response.raise_for_status() bottle.response.content_type = "application/pdf" return response.content @bottle.delete("/api/v3/report/<report_uuid>") def delete_report(report_uuid: ReportId, database: Database): """Delete a report.""" data_model = latest_datamodel(database) reports = latest_reports(database) data = ReportData(data_model, reports, report_uuid) data.report["deleted"] = "true" user = sessions.user(database) data.report["delta"] = dict( uuids=[report_uuid], email=user["email"], description=f"{user['user']} deleted the report '{data.report_name}'." ) return insert_new_report(database, data.report) @bottle.post("/api/v3/report/<report_uuid>/attribute/<report_attribute>") def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database): """Set a report attribute.""" data_model = latest_datamodel(database) reports = latest_reports(database) data = ReportData(data_model, reports, report_uuid) value = dict(bottle.request.json)[report_attribute] old_value = data.report.get(report_attribute) or "" data.report[report_attribute] = value value_change_description = "" if report_attribute == "layout" else f" from '{old_value}' to '{value}'" user = sessions.user(database) data.report["delta"] = dict( uuids=[report_uuid], email=user["email"], description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'" f"{value_change_description}.", ) return insert_new_report(database, data.report) @bottle.get("/api/v3/tagreport/<tag>") def get_tag_report(tag: str, database: Database): """Get a report with all metrics that have the specified tag.""" date_time = report_date_time() reports = latest_reports(database, date_time) data_model = latest_datamodel(database, date_time) subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag) tag_report = dict( title=f'Report for tag "{tag}"', subtitle="Note: tag reports are read-only", report_uuid=f"tag-{tag}", timestamp=iso_timestamp(), subjects=subjects, ) hide_credentials(data_model, tag_report) summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model) return tag_report def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str): """Return all subjects and metrics that have the tag.""" subjects = {} for report in reports: for subject_uuid, subject in list(report.get("subjects", {}).items()): for metric_uuid, metric in list(subject.get("metrics", {}).items()): if tag not in metric.get("tags", []): del subject["metrics"][metric_uuid] if subject.get("metrics", {}): subject_name = subject.get("name") or data_model["subjects"][subject["type"]]["name"] subject["name"] = report["title"] + " / " + subject_name subjects[subject_uuid] = subject return subjects
Python
0
@@ -827,16 +827,194 @@ t.json)%0A + user = sessions.user(database)%0A report%5B%22delta%22%5D = dict(%0A uuids=%5Breport%5B%22report_uuid%22%5D%5D, email=user%5B%22email%22%5D, description=f%22%7Buser%5B'user'%5D%7D imported a report.%22%0A )%0A resu
03fe02df027ef34cada5417205e641c5238c2403
__init__
TBFW/__init__.py
TBFW/__init__.py
# coding=utf-8 """ TBFW library """ __version__ = '2.0.0' __author__ = 'Nephy Project Team' __license__ = 'MIT' from TBFW.core import Core from TBFW.api import Plugin if __name__ == "__main__": pass
Python
0.998994
@@ -148,25 +148,121 @@ BFW. -api import Plugin +database import DBProvider%0Afrom TBFW.api import Plugin%0Afrom TBFW.exceptions import GeneralError, OutOfMemoryError %0A%0Aif
85a13b7ad7d10c5ff431090cb1de63b84e68ff08
Add proper translation contexts to RemovableDriveOutputDevice
plugins/RemovableDriveOutputDevice/RemovableDriveOutputDevice.py
plugins/RemovableDriveOutputDevice/RemovableDriveOutputDevice.py
import os.path from UM.Application import Application from UM.Logger import Logger from UM.Message import Message from UM.Mesh.WriteMeshJob import WriteMeshJob from UM.Mesh.MeshWriter import MeshWriter from UM.Scene.Iterator.BreadthFirstIterator import BreadthFirstIterator from UM.OutputDevice.OutputDevice import OutputDevice from UM.OutputDevice import OutputDeviceError from UM.i18n import i18nCatalog catalog = i18nCatalog("uranium") class RemovableDriveOutputDevice(OutputDevice): def __init__(self, device_id, device_name): super().__init__(device_id) self.setName(device_name) self.setShortDescription(catalog.i18nc("", "Save to Removable Drive")) self.setDescription(catalog.i18nc("", "Save to Removable Drive {0}").format(device_name)) self.setIconName("save_sd") self.setPriority(1) def requestWrite(self, node): gcode_writer = Application.getInstance().getMeshFileHandler().getWriterByMimeType("text/x-gcode") if not gcode_writer: Logger.log("e", "Could not find GCode writer, not writing to removable drive %s", self.getName()) raise OutputDeviceError.WriteRequestFailedError() file_name = None for n in BreadthFirstIterator(node): if n.getMeshData(): file_name = n.getName() if file_name: break if not file_name: Logger.log("e", "Could not determine a proper file name when trying to write to %s, aborting", self.getName()) raise OutputDeviceError.WriteRequestFailedError() file_name = os.path.join(self.getId(), os.path.splitext(file_name)[0] + ".gcode") try: Logger.log("d", "Writing to %s", file_name) stream = open(file_name, "wt") job = WriteMeshJob(gcode_writer, stream, node, MeshWriter.OutputMode.TextMode) job.setFileName(file_name) job.progress.connect(self._onProgress) job.finished.connect(self._onFinished) message = Message(catalog.i18nc("", "Saving to Removable Drive {0}").format(self.getName()), 0, False, -1) message.show() job._message = message job.start() except PermissionError as e: raise OutputDeviceError.PermissionDeniedError() from e except OSError as e: raise OutputDeviceError.WriteRequestFailedError() from e def _onProgress(self, job, progress): if hasattr(job, "_message"): job._message.setProgress(progress) self.writeProgress.emit(self, progress) def _onFinished(self, job): if hasattr(job, "_message"): job._message.hide() job._message = None self.writeFinished.emit(self) if job.getResult(): message = Message(catalog.i18nc("", "Saved to Removable Drive {0} as {1}").format(self.getName(), os.path.basename(job.getFileName()))) message.addAction("eject", catalog.i18nc("", "Eject"), "eject", catalog.i18nc("", "Eject removable device {0}").format(self.getName())) message.actionTriggered.connect(self._onActionTriggered) message.show() self.writeSuccess.emit(self) else: message = Message(catalog.i18nc("", "Could not save to removable drive {0}: {1}").format(self.getName(), str(job.getError()))) message.show() self.writeError.emit(self) job.getStream().close() def _onActionTriggered(self, message, action): if action == "eject": Application.getInstance().getOutputDeviceManager().getOutputDevicePlugin("RemovableDriveOutputDevice").ejectDevice(self)
Python
0.999928
@@ -642,32 +642,46 @@ (catalog.i18nc(%22 +@action:button %22, %22Save to Remo @@ -730,32 +730,45 @@ (catalog.i18nc(%22 +@info:tooltip %22, %22Save to Remo @@ -2101,24 +2101,36 @@ alog.i18nc(%22 +@info:status %22, %22Saving t @@ -2139,35 +2139,56 @@ Removable Drive -%7B0%7D +%3Cfilename%3E%7B0%7D%3C/filename%3E %22).format(self.g
44202d1c178d76c5db22a9b9ce4e7138a0cb73c7
upgrade to v3.9.4
kiteconnect/__version__.py
kiteconnect/__version__.py
__title__ = "kiteconnect" __description__ = "The official Python client for the Kite Connect trading API" __url__ = "https://kite.trade" __download_url__ = "https://github.com/zerodhatech/pykiteconnect" __version__ = "3.9.2" __author__ = "Zerodha Technology Pvt ltd. (India)" __author_email__ = "talk@zerodha.tech" __license__ = "MIT"
Python
0.000001
@@ -219,9 +219,9 @@ 3.9. -2 +4 %22%0A__
f1217f04f17daa3d77c9a3197b33d87b8f775056
Replace OpenERP by Odoo
l10n_ch_zip/__openerp__.py
l10n_ch_zip/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # Author Nicolas Bessi. Copyright Camptocamp SA # Contributor: WinGo SA # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # ############################################################################## {'name': 'Switzerland - Postal codes (ZIP) list', 'summary': 'Loads all Swiss postal codes', 'version': '1.0.1', 'depends': ['base', 'base_location'], 'author': 'Camptocamp', 'description': """ Swiss postal code (ZIP) list ============================ This module will load all Swiss postal codes (ZIP) in OpenERP to ease the input of partners. It is not mandatory to use OpenERP in Switzerland, but can improve the user experience. """, 'website': 'http://www.camptocamp.com', 'data': ['l10n_ch_better_zip.xml'], 'demo_xml': [], 'installable': True, 'active': False}
Python
0.000144
@@ -1652,22 +1652,19 @@ IP) in O -penERP +doo to%0Aease @@ -1720,14 +1720,11 @@ se O -penERP +doo in
9c029788cf438ec5c796e22b77006559c1e59b99
needs json
lablog/controllers/node.py
lablog/controllers/node.py
from flask import Blueprint, Response, render_template, request, g from flask.views import MethodView from lablog.app import App from lablog import config from lablog.util.jsontools import jsonify from lablog.util import aes from flask_oauthlib.provider import OAuth2Provider import logging from lablog.controllers.auth import oauth from datetime import datetime node = Blueprint( 'node', __name__, template_folder=config.TEMPLATES, url_prefix="/node", ) SKEY = bytearray(["1","1","1","1","1","1","1","1","1","1","1","1","1","1","1",0x00]) KEY = buffer(SKEY) @node.route("/nodes", methods=["GET"]) @oauth.require_oauth('inoffice') def get_nodes(): res = g.INFLUX.query(query="SHOW SERIES FROM light") nodes = [] for v in res.get_points(): nodes.append(v.get('node')) return jsonify({"nodes":nodes}) @node.route("/<node_id>/sensors", methods=["POST"]) def node_sensors(node_id): logging.info(request.data) logging.info(config.SKEY) logging.info(KEY) j = aes.decrypt(request.data, KEY) j = json.loads(j) points = [] for k,v in j.iteritems(): p = dict( measurement=k, tags=dict( node=str(node_id), ), time=datetime.utcnow(), fields=dict( value=v ) ) g.MONGO['lablog']['node_stream'].insert(p) points.append(p) g.INFLUX.write_points(points) return jsonify({'success':True})
Python
0.999702
@@ -273,23 +273,8 @@ der%0A -import logging%0A from @@ -340,16 +340,43 @@ datetime +%0Aimport logging%0Aimport json %0A%0Anode =
dc5eebe521c480fa06c13eae0f41922d3cb08b63
Convert newlines in command subject into spaces.
lambda/control/__init__.py
lambda/control/__init__.py
from __future__ import print_function import datetime import hmac import hashlib import re timestamp_format = '%Y%m%d%H%M%S' signed_cmd_regex = re.compile(r'^(?P<cmd>.+) (?P<timestamp>\d{14}) (?P<signature>[\da-f]{40})$') from config import signing_key, signed_validity_interval from sestools import msg_get_header, msg_get_response_address import boto3 ses = boto3.client('ses') from commands import run class NotSignedException(Exception): pass class ExpiredSignatureException(Exception): pass class InvalidSignatureException(Exception): pass def handle_command(command_address, msg): auto_submitted = msg_get_header(msg, 'auto-submitted') if auto_submitted and auto_submitted.lower() != 'no': # Auto-submitted: header, https://www.iana.org/assignments/auto-submitted-keywords/auto-submitted-keywords.xhtml print("Message appears to be automatically generated ({}), so ignoring it.".format(auto_submitted)) return # Grab the address to which to respond and the subject reply_to = msg_get_response_address(msg) if reply_to is None: print("Failed to get an email address from the Reply-To, From, or Sender headers.") return subject = msg_get_header(msg, 'subject') print("Subject: " + subject) print("Responding to: " + reply_to) # Strip off any re:, fwd:, etc. (everything up to the last :, then trim whitespace) if ':' in subject: subject = subject[subject.rfind(':') + 1:] subject = subject.strip() try: cmd = get_signed_command(subject, reply_to) except ExpiredSignatureException: # TODO (maybe): Reply to the sender to tell them the signature was expired? Or send a newly-signed message? print("Expired signature.") return except InvalidSignatureException: # Do nothing. print("Invalid signature.") return except NotSignedException: # If the subject isn't a signed command... # TODO (maybe): ... check if the reply_to is allowed to run the specific command with the given parameters... # ... and reply with a signed command for the recipient to send back (by replying). print("Signing command: {}".format(subject)) response = send_response( source=command_address, destination=reply_to, subject='Re: {}'.format(sign(subject, reply_to)), body='To verify the origin of the command, please reply to this email.', ) return # TODO (maybe): allow commands in body? # Execute the command. output = run(user=reply_to, cmd=cmd) # Reply with the output/response. response = send_response( source=command_address, destination=reply_to, subject='Re: {}'.format(subject), body='Output of "{}":\n\n{}'.format(cmd, output), ) def send_response(source, destination, subject, body): return ses.send_email( Source=source, Destination={ 'ToAddresses': [ destination, ], }, Message={ 'Subject': { 'Data': subject, }, 'Body': { 'Text': { 'Data': body, }, } }, ) def get_signed_command(subject, address): match = signed_cmd_regex.match(subject) if not match: raise NotSignedException (cmd, timestamp, sig) = (' ' + subject).rsplit(' ', 2) cmd = match.group('cmd').strip() sig = match.group('signature') timestamp = match.group('timestamp') if not sig or not timestamp: raise NotSignedException timestamp_age = datetime.datetime.now() - datetime.datetime.strptime(timestamp, timestamp_format) try: # Check that the timestamp is recent enough. if timestamp_age > signed_validity_interval: raise ExpiredSignatureException except ValueError: raise InvalidSignatureException if not hmac.compare_digest(unicode(signature(' '.join([ address, cmd, timestamp, ]))), unicode(sig)): raise InvalidSignatureException return cmd def signature(cmd): return hmac.new(signing_key, cmd.strip(), hashlib.sha1).hexdigest() def sign(subject, reply_to): timestamp = datetime.datetime.now().strftime(timestamp_format) return ' '.join([ subject, timestamp, signature(' '.join([ reply_to, subject, timestamp, ])), ])
Python
0.000358
@@ -1245,16 +1245,35 @@ ubject') +.replace('%5Cn', ' ') %0A pri
b37988c7d6b260793cc8e88e0057f1a59d2fcc0b
fix migration file
custom/icds_reports/migrations/0060_added_phone_number_to_views.py
custom/icds_reports/migrations/0060_added_phone_number_to_views.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.14 on 2018-09-10 14:05 from __future__ import unicode_literals from __future__ import absolute_import from django.db import migrations from corehq.sql_db.operations import RawSQLMigration migrator = RawSQLMigration(('custom', 'icds_reports', 'migrations', 'sql_templates', 'database_views')) class Migration(migrations.Migration): dependencies = [ ('icds_reports', '0057_aggregateccsrecordpostnatalcareforms_is_ebf'), ] operations = [ migrator.get_migration('agg_awc_daily.sql'), migrator.get_migration('agg_ccs_record_monthly.sql'), migrator.get_migration('agg_child_health_monthly.sql'), migrator.get_migration('child_health_monthly.sql'), migrator.get_migration('daily_attendance.sql'), ]
Python
0.000001
@@ -434,53 +434,27 @@ '005 -7_aggregateccsrecordpostnatalcareforms_is_ebf +9_update_blob_paths '),%0A
84a025793ded83302212ef902660642f26d76d24
fix bug
spider/src/mydm/pipelines/image.py
spider/src/mydm/pipelines/image.py
# -*- coding: utf-8 -*- import logging import base64 from io import BytesIO from urllib.parse import urlparse, urljoin from PIL import Image as ImageLib from lxml.html import fromstring, HTMLParser from scrapy.http import Request from mydm.exceptions import ImgException from scrapy.pipelines.media import MediaPipeline logger = logging.getLogger(__name__) class Image(): IMAGE_MAX_WIDTH = 800 def __init__(self, data): self._image = ImageLib.open(BytesIO(data)) @property def size(self): return self._image.size @property def type(self): return self._image.format def optimize(self, q=75): image = self._image w, h = self._image.size if w > self.IMAGE_MAX_WIDTH: h = int(float(h)/w*self.IMAGE_MAX_WIDTH) w = self.IMAGE_MAX_WIDTH image = self._image.resize((w, h), ImageLib.ANTIALIAS) buf = BytesIO() image.save( buf, format=self._image.format, quality=q ) return buf.getvalue() class ImagesDlownloadPipeline(MediaPipeline): MEDIA_NAME = 'image' IMAGE_MAX_SIZE = 1024*256 def __init__(self, filters): self.filters = filters @classmethod def from_settings(cls, settings): return cls(settings['IMAGE_OPTIMIZE_FILTER']) def _need_optimize(self, size): if self.spiderinfo.spider._id in self.filters: return False if size < self.IMAGE_MAX_SIZE: return False return True def get_media_requests(self, item, info): doc = item['content'] if isinstance(doc, (str, bytes)): doc = fromstring(doc, parser=HTMLParser(encoding=item['encoding'])) item['content'] = doc try: attr = self.spiderinfo.spider.image_url_attr except AttributeError: attr = 'src' urls = [] for e in doc.xpath('//img'): if attr in e.attrib: url = e.get(attr).strip('\t\n\r ') if url.startswith('/'): url = urljoin(item['link'], url) elif url.startswith('//'): r = urlparse(item['link']) url = r.scheme + url urls.append((url, e)) reqs = [] for url, e in urls: if url.startswith('data'): continue try: r = Request(url, meta={'img': e}) except ValueError: logger.error('invalid url[%s]', url) else: reqs.append(r) return reqs def media_failed(self, failure, request, info): logger.error( 'spider[%s] failed to download image[%s]', self.spiderinfo.spider.name, request.url ) try: attr = self.spiderinfo.spider.image_url_attr img = request.meta['img'] src = img.get(attr) img.set('src', src) except AttributeError: pass def media_downloaded(self, response, request, info): if response.status != 200: raise ImgException( f'download image[{response.url}] got status[{response.status}]' ) if not response.body: raise ImgException('image size is 0') img = response.meta['img'] src = response.url data = response.body imgsize = len(data) img.set('src', src) try: image = Image(data) w, _ = image.size if self._need_optimize(imgsize): data = image.optimize() imgtype = image.type except OSError: logger.error( 'spider[%s] got unsupported image type[%s]', self.spiderinfo.spider.name, src ) try: imgtype = response.headers['Content-Type'].split('/')[-1] except KeyError: logger.error( "spider[%s] can't find Content-Type header for %s", self.spiderinfo.spider.name, src ) return img.set('source', src) data = base64.b64encode(data).decode('ascii') img.set('src', 'data:image/{};base64,{}'.format(imgtype, data)) def item_completed(self, results, item, info): return item
Python
0.000001
@@ -1120,24 +1120,25 @@ aPipeline):%0A +%0A MEDIA_NA @@ -1208,17 +1208,62 @@ lf, -filter +settings):%0A super().__init__(settings=setting s) -: %0A @@ -1268,24 +1268,30 @@ self. +image_ filters = fi @@ -1292,69 +1292,131 @@ s = -filters%0A%0A @classmethod%0A def from_settings(cls, +settings%5B'IMAGE_OPTIMIZE_FILTER'%5D%0A%0A @classmethod%0A def from_crawler(cls, crawler):%0A settings = crawler. settings ):%0A @@ -1403,34 +1403,32 @@ crawler.settings -): %0A return @@ -1420,22 +1420,22 @@ -return +pipe = cls(set @@ -1443,34 +1443,60 @@ ings -%5B'IMAGE_OPTIMIZE_FILTER'%5D) +)%0A pipe.crawler = crawler%0A return pipe %0A%0A @@ -1501,17 +1501,16 @@ def -_ need_opt @@ -1574,16 +1574,22 @@ in self. +image_ filters: @@ -3723,38 +3723,8 @@ ta)%0A - w, _ = image.size%0A @@ -3739,17 +3739,16 @@ if self. -_ need_opt
c5f2b65aa172b10206950a5981a06afef5742173
Improve reliability of galera_consistency.py
galera_consistency.py
galera_consistency.py
import optparse import subprocess def table_checksum(user, password, host): args = ['/usr/bin/pt-table-checksum', '-u', user, '-p', password] if host: args.extend(['-h', host]) proc = subprocess.Popen(args, stderr=subprocess.PIPE) (out, err) = proc.communicate() return (proc.return_code, out, err) def main(): usage = "Usage: %prog [-h] [-H] username password" parser = optparse.OptionParser(usage=usage) parser.add_option('-H', '--host', action='store', dest='host', default=None) (options, args) = parser.parse_args() if len(args) != 2: parser.print_help() raise SystemExit(True) (status, _, err) = table_checksum(args[0], args[1], options.host) if status != 0: print "status err %s" % err raise SystemExit(True) print "status ok" if __name__ == '__main__': main()
Python
0
@@ -1,12 +1,22 @@ +import io%0A import optpa @@ -81,16 +81,93 @@ host):%0A + %22%22%22Run pt-table-checksum with the user, password, and host specified.%22%22%22%0A args @@ -276,16 +276,64 @@ host%5D)%0A%0A + out = io.StringIO()%0A err = io.StringIO()%0A proc @@ -386,18 +386,218 @@ PE)%0A +%0A -( +# Let's poll the process to make sure it finishes before we return from%0A # this function.%0A while proc.poll() is None:%0A # Avoid the OS Pipe buffer from blocking the process%0A (std out, +std err) @@ -618,16 +618,367 @@ icate()%0A + # Let's store the aggregated output in buffers%0A out.write(stdout)%0A err.write(stderr)%0A%0A # The process has terminated, let's get the rest of stdout/stderr%0A (stdout, stderr) = proc.communicate()%0A out.write(stdout)%0A err.write(stderr)%0A # At this point we have a valid return code and the full stdout, stderr%0A # logs%0A retu @@ -1002,21 +1002,43 @@ ode, out -, err +.getvalue(), err.getvalue() )%0A%0A%0Adef @@ -1171,16 +1171,25 @@ _option( +%0A '-H', '- @@ -1195,16 +1195,24 @@ --host', +%0A action= @@ -1219,16 +1219,24 @@ 'store', +%0A dest='h @@ -1253,161 +1253,605 @@ - default=None)%0A (options, args) = parser.parse_args()%0A%0A if len(args) != 2:%0A parser.print_help()%0A raise SystemExit(True)%0A +default=None,%0A help=%22Allow user to connect to something other than localhost%22%0A )%0A (options, args) = parser.parse_args()%0A%0A # We will need the username and password to connect to the database%0A if len(args) != 2:%0A parser.print_help()%0A raise SystemExit(True)%0A%0A # According to%0A # http://www.percona.com/doc/percona-toolkit/2.2/pt-table-checksum.html%0A # If the exit status is 0, everything is okay, otherwise the exit status%0A # will be non-zero. We don't need stdout at the moment so we can discard%0A # it. Stderr should contain any problems we run across. %0A @@ -1972,16 +1972,24 @@ s%22 %25 err +.strip() %0A
1b2fa45766b1ea5945f246d74bc4adf0114abe84
Fix typo in description of config item
astroquery/splatalogue/__init__.py
astroquery/splatalogue/__init__.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Splatalogue Catalog Query Tool ----------------------------------- :Author: Adam Ginsburg (adam.g.ginsburg@gmail.com) :Originally contributed by: Magnus Vilhelm Persson (magnusp@vilhelm.nu) """ from astropy import config as _config class Conf(_config.ConfigNamespace): """ Configuration parameters for `astroquery.splatalogue`. """ slap_url = _config.ConfigItem( 'http://find.nrao.edu/splata-slap/slap', 'Splatalogue SLAP interface URL (not used).') query_url = _config.ConfigItem( 'http://www.cv.nrao.edu/php/splat/c_export.php', 'SSplatalogue web interface URL.') timeout = _config.ConfigItem( 60, 'Time limit for connecting to Splatalogue server.') lines_limit = _config.ConfigItem( 1000, 'Limit to number of lines exported.') conf = Conf() from . import load_species_table from . import utils from .core import Splatalogue, SplatalogueClass __all__ = ['Splatalogue', 'SplatalogueClass', 'Conf', 'conf', ]
Python
0.000107
@@ -655,17 +655,16 @@ ' -S Splatalo
8223f308eb1e17ea93981eceaf96aa5210e6724c
Update common.py
bin/common.py
bin/common.py
#!/usr/bin/python import os import numpy as np import multiprocessing as mp import subprocess as sub import shlex #Make sure a directory path contains a trailing /# def fixDirName(dirpath): if dirpath[-1] != '/': dirpath += '/' return dirpath #Check if a directory exists, make it if needed# def makeDir(dirpath): if not os.path.exists(dirpath): os.mkdir(dirpath) #Import sample names from a text file, if specifying a subset to process# def importSampleList(infile): if os.path.exists(infile): files = [] with open(infile, 'r') as IN: for x in IN: if len(x.rstrip().split('\t')) > 1: files.append(x.rstrip().split('\t')[0]) else: files.append(x.rstrip()) else: errorText = '\nERROR: the specified sample name file does not exist, please fix\n\t' + infile + '\n' print(errorText) raise SystemExit return files #Get a list of samples to run processing for a specific step of the pipeline on# def getSampleList(folder, sampleArg, extension): #to process all samples in the input folder# fileList = [ x for x in os.listdir(folder) if extension in x.split('.') ] #to process a specific subset of samples# if sampleArg: sampleList = importSampleList(sampleArg) fileList = [ x for x in fileList if any(y in x for y in sampleList) ] if len(sampleList) != len(fileList): errorText = '\nERROR: ' + str(len(fileList)) + ' samples exist for processing, but ' errorText += str(len(sampleList)) + ' were specified in the sample list file, please fix any discrepancies\n' print(errorText) raise SystemExit fileList = [folder + x for x in fileList] return fileList #Import information about samples from a reference .txt file# def importInfoFile(infoFile, columns, useFunction, skiprows=0): functionDict = { 'normalize': {'names': ('name', 'method', 'cells'), 'formats': ('S50', 'S50', 'int')}, 'normref': {'names': ('chrom', 'chrStart', 'abspos', 'size', 'gc'), 'formats': ('S10', 'int64', 'int64', 'int64', 'float64')}, 'interpret': {'names': ('name', 'cells', 'group'), 'formats': ('S50', 'int', 'S50')} } if not infoFile: return functionDict[useFunction] if skiprows == 0: data = np.loadtxt(infoFile, usecols=columns, dtype=functionDict[useFunction]) else: data = np.loadtxt(infoFile, usecols=columns, dtype=functionDict[useFunction], skiprows=skiprows) return data #import segment data and remove any nonsense lines# def importSegData(sample, segDir, binArray): segDtype1 = {'names': ('start', 'end', 'logCN'), 'formats': ('int', 'int', 'float')} segDtype2 = {'names': ('chrom', 'start', 'end', 'CN'), 'formats': ('S10', 'int', 'int', 'float')} chromDict = {x['abspos']: x['chrom'] for x in binArray} binDict = {y['abspos']: x for x,y in enumerate(binArray)} segData = np.loadtxt(segDir + sample + '.segments.txt', dtype=segDtype1) segDataGood = segData[segData['end'] > segData['start']] segDataFix = np.zeros(len(segDataGood), dtype=segDtype2) segDataFix['chrom'] = [chromDict[x] for x in segDataGood['start']] segDataFix['start'] = segDataGood['start'] segDataFix['end'] = segDataGood['end'] segDataFix['CN'] = [2 ** x if x != np.inf else 2 ** 0 for x in segDataGood['logCN']] segArray = np.zeros(len(binArray)) for i in segDataFix: segArray[binDict[i['start']]:] = len(segArray[binDict[i['start']]:]) * [i['logCN']] return segDataFix, segArray ###daemon to run multiprocessing and parallelize tasks### def daemon(target, argList, name, cpuPerProcess=1, kwargs=False): print( str( '\t' + str(len(argList)) + ' processes to run to ' + name ) ) numCPU = mp.cpu_count() numWorkers = min( [int(numCPU / cpuPerProcess), len(argList)] ) pool = mp.Pool(numWorkers) if not kwargs: processes = [pool.apply_async(target, args=x) for x in argList] else: processes = [pool.apply_async(target, args=x, kwargs=kwargs) for x in argList] pool.close() for i,j in enumerate(processes): j.wait() if not j.successful(): pool.terminate() print '\n\n\nprocessing failed, getting traceback now...' p = mp.Process(target=target, args=argList[i]) p.start() p.join() print( str( '\tAll processing to ' + name + ' complete\n' ) ) def zipping(filepath, gunzip=True): if filepath.split('.')[-1] != 'gz' and gunzip: return filepath elif filepath.split('.')[-1] == 'gz' and not gunzip: return filepath if gunzip: cmd = 'gunzip ' + filepath fixname = filepath[:-3] else: cmd = 'gzip ' + filepath fixname = filepath + '.gz' cmd = shlex.split(cmd) p = sub.Popen(cmd) p.wait() return fixname
Python
0.000001
@@ -2013,24 +2013,27 @@ , 'int64', ' +np. float64')%7D,%0A
3300caac7c7b420d31010e37aebbdde519f4723f
change 开通虚拟消费卡
backend/server/route/user_route.py
backend/server/route/user_route.py
# -*- coding: UTF-8 -*- """ @author: Bingwei Chen @time: 8/4/17 @desc: user route 1. user register 2. user login """ from flask import Blueprint from flask import jsonify from flask import request from flask_jwt_extended import create_access_token from playhouse.shortcuts import model_to_dict from peewee import DoesNotExist from flask_jwt_extended import jwt_required, get_jwt_identity from server.service import user_service from server.utility.exception import PasswordError from server.utility.exception import Error from server.wx import wx_service PREFIX = '/user' user_app = Blueprint("user_app", __name__, url_prefix=PREFIX) @user_app.route('/register', methods=['POST']) def register(): """ eg = { "username": "bingwei", "password": "123456", "name": "陈炳蔚", "phone": 15988731660, "school": "浙江大学", "student_id": "12358", "identify_number": "30032323232322" "openid": "o48xV1b3vqrGQgGX--UrLACbmbHY" } "username": "Shuo_Ren", "password": "123456", "name": "Ren", "phone": 15701683747, "school": "浙江大学", "student_id": "00001", "identify_number": "3003232323232211" :return: :rtype: """ data = request.get_json() username = data.pop('username') password = data.pop('password') if username is None or password is None: return jsonify({'response': 'invalid user or password'}), 400 try: added_user = user_service.add( username=username, password=password, name=data.pop("name"), school=data.pop("school"), student_id=data.pop("student_id"), phone=data.pop("phone"), identify_number=data.pop("identify_number"), # 注释项 是可选项 we_chat_id=data.pop("openid"), # account=data.pop("account"), # account_type=data.pop("account_type"), **data) added_user = model_to_dict(added_user) added_user.pop('password') return jsonify({'response': added_user}), 200 except Error: return jsonify({'response': { "message": "xxx", "error": Error, }}), 400 except Exception as e: print(e) error = e.args[1] message = "字段错误" if "PRIMARY" in error: message = "用户名已存在" if "user_identify_number" in error: message = "身份证号码重复" if "phone" in error: message = "手机号码重复" return jsonify({'response': { "message": message, "error": error, }}), 400 @user_app.route('/login', methods=['POST']) def login(): """ eg = { "username": "bingwei", "password": "123456" } :return: :rtype: """ username = request.json.get('username', None) password = request.json.get('password', None) try: user = user_service.login(username, password) try: wx_user = wx_service.get_user_detail(open_id=user.we_chat_id) except DoesNotExist as e: wx_user = None user = model_to_dict(user) user.pop('password') # Identity can be any data that is json serializable response = { 'response': { 'token': create_access_token(identity=user), 'user': user, 'wx_user': wx_user }} return jsonify(response), 200 except DoesNotExist as e: return jsonify({ 'response': { "error": '%s: %s' % (str(DoesNotExist), e.args), "message": "用户名不存在" }}), 400 except PasswordError as e: return jsonify({ 'response': { "error": '%s: %s' % (str(PasswordError), e.args), "message": "用户名密码错误" }}), 400 @user_app.route('/openid_login', methods=['get']) def openid_to_token(): openid = request.args.get("openid") if not openid: return jsonify({ 'response': { "message": "openid不存在" }}), 400 try: user = user_service.get(we_chat_id=openid) wx_user = wx_service.get_user_detail(open_id=user.we_chat_id) user = model_to_dict(user) user.pop('password') # Identity can be any data that is json serializable response = { 'response': { 'token': create_access_token(identity=user), 'user': user, 'wx_user': wx_user }} return jsonify(response), 200 except DoesNotExist as e: return jsonify({ 'response': { "error": '%s: %s' % (str(DoesNotExist), e.args), "message": "用户不存在" }}), 400 @user_app.route('/manager/login', methods=['POST']) def login_manager(): """ eg = { "username": "bingwei", "password": "123456" } :return: :rtype: """ username = request.json.get('username', None) password = request.json.get('password', None) try: user = user_service.login(username, password) if not user.admin: return jsonify({ 'response': { "message": "非管理员用户" }}), 400 user = model_to_dict(user) user.pop('password') # Identity can be any data that is json serializable response = { 'response': { 'token': create_access_token(identity=user), 'user': user}} return jsonify(response), 200 except DoesNotExist as e: return jsonify({ 'response': { "error": '%s: %s' % (str(DoesNotExist), e.args), "message": "用户名不存在" }}), 400 except PasswordError as e: return jsonify({ 'response': { "error": '%s: %s' % (str(PasswordError), e.args), "message": "用户名密码错误" }}), 400 # 开通虚拟消费卡 @user_app.route('/virtual_card', methods=['PUT']) @jwt_required def create_virtual_card(): """ eg = { # "username": "Shuo_Ren" } :return: :rtype: """ username = get_jwt_identity() data = request.get_json() try: virtual_card = user_service.create_virtual_card( card_no=username, **data ) return jsonify({ 'response': model_to_dict(virtual_card, recurse=False)}), 200 except Error as e: return jsonify({'response': '%s: %s' % (str(Error), e.args)}), 400
Python
0
@@ -6222,37 +6222,9 @@ = %7B%0A - # %22username%22: %22Shuo_Ren%22 %0A + @@ -6297,38 +6297,8 @@ y()%0A - data = request.get_json()%0A @@ -6359,16 +6359,16 @@ l_card(%0A + @@ -6393,27 +6393,8 @@ me,%0A - **data%0A
a1c60939302bd60d0e7708d19b7eee3d2970bbfb
Fix minion state assertions - multiple keys possible
assertions.py
assertions.py
import re import shlex import subprocess from config import SALT_KEY_CMD def assert_minion_key_state(env, expected_state): STATES_MAPPING = dict( unaccepted=re.compile("Unaccepted Keys:\n{HOSTNAME}".format(**env)), accepted=re.compile("Accepted Keys:\n{HOSTNAME}".format(**env)) ) assert expected_state in STATES_MAPPING cmd = shlex.split(SALT_KEY_CMD.format(**env)) cmd.append("-L") process = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env) output, unused_err = process.communicate() assert STATES_MAPPING[expected_state].search(output) def assert_proxyminion_key_state(env, expected_state): STATES_MAPPING = dict( unaccepted=re.compile("Unaccepted Keys:\n{PROXY_ID}".format(**env)), accepted=re.compile("Accepted Keys:\n{PROXY_ID}".format(**env)) ) assert expected_state in STATES_MAPPING cmd = shlex.split(SALT_KEY_CMD.format(**env)) cmd.append("-L") process = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env) output, unused_err = process.communicate() assert STATES_MAPPING[expected_state].search(output)
Python
0.000004
@@ -76,235 +76,57 @@ def +h as -sert_minion_key_state(env, expected_state):%0A STATES_MAPPING = dict(%0A unaccepted=re.compile(%22Unaccepted Keys:%5Cn%7BHOSTNAME%7D%22.format(**env)),%0A accepted=re.compile(%22Accepted Keys:%5Cn%7BHOSTNAME%7D%22.format(**env))%0A ) +_expected_state(expected_state, mapping, env): %0A @@ -147,38 +147,31 @@ ed_state in -STATES_MAPPING +mapping %0A cmd = s @@ -350,37 +350,30 @@ e()%0A -assert STATES_MAPPING +return mapping %5Bexpecte @@ -395,16 +395,28 @@ (output) + is not None %0A%0A%0Adef a @@ -425,13 +425,8 @@ ert_ -proxy mini @@ -530,35 +530,42 @@ cepted Keys: -%5Cn%7BPROXY_ID +(%5Cn.+)*%5Cn%7BHOSTNAME %7D%22.format(** @@ -618,19 +618,26 @@ eys: -%5Cn%7BPROXY_ID +(%5Cn.+)*%5Cn%7BHOSTNAME %7D%22.f @@ -659,32 +659,36 @@ )%0A assert +has_ expected_state i @@ -689,267 +689,362 @@ tate - in STATES_MAPPING%0A cmd = shlex.split(SALT_KEY_CMD.format(**env))%0A cmd.append(%22-L%22)%0A process = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)%0A output, unused_err = process.communicate()%0A assert STATES_MAPPING%5Bexpected_state%5D.search(output +(expected_state, STATES_MAPPING, env)%0A%0A%0Adef assert_proxyminion_key_state(env, expected_state):%0A STATES_MAPPING = dict(%0A unaccepted=re.compile(%22Unaccepted Keys:(%5Cn.+)*%5Cn%7BPROXY_ID%7D%22.format(**env)),%0A accepted=re.compile(%22Accepted Keys:(%5Cn.+)*%5Cn%7BPROXY_ID%7D%22.format(**env))%0A )%0A assert has_expected_state(expected_state, STATES_MAPPING, env )%0A
9845a0566e1c96ae72ce0ac6438e8ddd9f6db053
Add better instructions on changing kernels
metatlas/tools/notebook.py
metatlas/tools/notebook.py
"""Jupyter notebook helper functions""" import logging import os import shutil import sys from pathlib import Path import pandas as pd from IPython.core.display import display, HTML from metatlas.tools.logging import activate_logging logger = logging.getLogger(__name__) def configure_environment(log_level): """ Sets environment variables and configures logging inputs: log_level: one of 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL' """ activate_logging(console_level=log_level) os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE" def validate_kernel(): """ Raise error if problem with kernel When on NERSC, this will install the correct kernel if needed """ allowed_exe = [ "/global/common/software/m2650/metatlas-targeted-20210521/bin/python", ] error_msg = "Invalid kernel setting in Jupyter Notebook." on_nersc = "METATLAS_LOCAL" not in os.environ if on_nersc and sys.executable not in allowed_exe: install_kernel() logger.critical('Please check that the kernel is set to "Metatlas Targeted".') raise ValueError(error_msg) try: # pylint: disable=import-outside-toplevel,unused-import import dataset # noqa: F401 except ModuleNotFoundError as module_error: logger.critical( 'Could not find dataset module. Please check that the kernel is set to "Metatlas Targeted".' ) raise ModuleNotFoundError from module_error def install_kernel(): """ Copies kernel.json from repo to active location under home directory. Only for use on NERC! """ logger.info('Installing kernel.json for "Metatlas Targeted".') repo_path = Path(__file__).resolve().parent.parent.parent source = repo_path / "notebooks" / "kernels" / "metatlas-targeted.kernel.json" dest_dir = Path.home() / ".local" / "share" / "jupyter" / "kernels" / "metatlas-targeted" os.makedirs(dest_dir, exist_ok=True) shutil.copyfile(source, dest_dir / "kernel.json") logger.info('Reload the page and change kernel to "Metatlas Targeted".') def configure_pandas_display(max_rows=5000, max_columns=500, max_colwidth=100): """Set pandas display options""" pd.set_option("display.max_rows", max_rows) pd.set_option("display.max_columns", max_columns) pd.set_option("display.max_colwidth", max_colwidth) def configure_notebook_display(): """Configure output from Jupyter""" # set notebook to have minimal side margins display(HTML("<style>.container { width:100% !important; }</style>")) def setup(log_level): """High level function to prepare the metatlas notebook""" configure_environment(log_level) validate_kernel() configure_notebook_display() configure_pandas_display()
Python
0.005869
@@ -2032,16 +2032,17 @@ er.info( +( 'Reload @@ -2054,16 +2054,21 @@ age and +then change k @@ -2096,17 +2096,185 @@ rgeted%22. -' + '%0A %22On the menu bar at the top of this page select 'Kernel'%3E'Change Kernel..' %22%0A %22then find 'Metatlas Targeted' in the drop down list.%22) )%0A%0A%0Adef
c8b772542dffda0de95034b54b3eaea465dac2e7
Correct exception handling.
bin/sender.py
bin/sender.py
#!/usr/bin/env python # Copyright (C) 2012 STFC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Script to run a sending SSM. @author: Will Rogers ''' from ssm import __version__, set_up_logging from ssm.ssm2 import Ssm2, Ssm2Exception from ssm.crypto import CryptoException from ssm.brokers import StompBrokerGetter, STOMP_SERVICE, STOMP_SSL_SERVICE import logging.config import ldap import sys import os from optparse import OptionParser import ConfigParser def main(): ''' Set up connection, send all messages and quit. ''' ver = "SSM %s.%s.%s" % __version__ op = OptionParser(description=__doc__, version=ver) op.add_option('-c', '--config', help='location of config file', default='/etc/apel/sender.cfg') op.add_option('-l', '--log_config', help='location of logging config file (optional)', default='/etc/apel/logging.cfg') (options, unused_args) = op.parse_args() cp = ConfigParser.ConfigParser() cp.read(options.config) # set up logging try: if os.path.exists(options.log_config): logging.config.fileConfig(options.log_config) else: set_up_logging(cp.get('logging', 'logfile'), cp.get('logging', 'level'), cp.getboolean('logging', 'console')) except (ConfigParser.Error, ValueError, IOError), err: print 'Error configuring logging: %s' % str(err) print 'The system will exit.' sys.exit(1) log = logging.getLogger('ssmsend') log.info('========================================') log.info('Starting sending SSM version %s.%s.%s.' % __version__) # If we can't get a broker to connect to, we have to give up. try: bdii_url = cp.get('broker','bdii') log.info('Retrieving broker details from %s ...' % bdii_url) bg = StompBrokerGetter(bdii_url) use_ssl = cp.getboolean('broker', 'use_ssl') if use_ssl: service = STOMP_SSL_SERVICE else: service = STOMP_SERVICE brokers = bg.get_broker_hosts_and_ports(service, cp.get('broker','network')) log.info('Found %s brokers.' % len(brokers)) except ConfigParser.NoOptionError, e: try: host = cp.get('broker', 'host') port = cp.get('broker', 'port') brokers = [(host, int(port))] except ConfigParser.NoOptionError: log.error('Options incorrectly supplied for either single broker or \ broker network. Please check configuration') log.error('System will exit.') log.info('========================================') print 'SSM failed to start. See log file for details.' sys.exit(1) except ldap.LDAPError, e: log.error('Could not connect to LDAP server: %s' % e) log.error('System will exit.') log.info('========================================') print 'SSM failed to start. See log file for details.' sys.exit(1) if len(brokers) == 0: log.error('No brokers available.') log.error('System will exit.') log.info('========================================') try: server_cert = cp.get('certificates','server') except ConfigParser.NoOptionError: log.info('No server certificate supplied. Will not encrypt messages.') server_cert = None try: try: destination = cp.get('messaging', 'destination') if destination == '': raise Ssm2Exception('No destination queue is configured.') except ConfigParser.NoOptionError, e: raise Ssm2Exception(e) sender = Ssm2(brokers, cp.get('messaging','path'), cert=cp.get('certificates','certificate'), key=cp.get('certificates','key'), dest=cp.get('messaging','destination'), use_ssl=cp.getboolean('broker','use_ssl'), capath=cp.get('certificates', 'capath'), enc_cert=server_cert) if sender.has_msgs(): sender.handle_connect() sender.send_all() log.info('SSM run has finished.') else: log.info('No messages found to send.') except (Ssm2Exception, CryptoException), e: print 'SSM failed to complete successfully. See log file for details.' log.error('SSM failed to complete successfully: %s' % e) except Exception, e: print 'SSM failed to complete successfully. See log file for details.' log.error('Unexpected exception in SSM: %s, %s' % (type(e), e)) log.error('Exception type: %s' % type(e)) try: sender.close_connection() except UnboundLocalError: # SSM not set up. pass log.info('SSM has shut down.') log.info('========================================') if __name__ == '__main__': main()
Python
0.000001
@@ -3828,32 +3828,49 @@ %0A try:%0A + try:%0A server_c @@ -3907,16 +3907,144 @@ erver')%0A + if not os.path.isfile(server_cert):%0A raise Ssm2Exception('Server cerficate location incorrect.')%0A exce @@ -4066,32 +4066,36 @@ .NoOptionError:%0A + log.info @@ -4158,32 +4158,36 @@ ages.')%0A + server_cert = No @@ -4193,33 +4193,28 @@ one%0A -%0A -try: %0A try
f3ec85cd7baf65036ed76a2c4ab4fe935b81b805
introduce logging
midas/scripts/md_config.py
midas/scripts/md_config.py
# -*- coding: utf-8 -*- import sys from midas.scripts import MDCommand import midas.config as md_cfg class MDConfig(MDCommand): """ Read all configuration files, print the final configuration and exit. This can be used to see how a configuration file (e.g. a job file) alters the whole configuration or to generate a default configuration file which is going to be altered in a second step. """ POS_ARG = { 'dest': 'job_cfg', 'nargs': '?', 'metavar': 'FILE', 'help': 'additional configuration file to read'} def __init__(self, argv): MDCommand.__init__(self, argv) if self.args.job_cfg: md_cfg.read(self.args.job_cfg) def run(self): md_cfg.get_configparser().write(sys.stdout)
Python
0
@@ -18,16 +18,31 @@ -8 -*-%0A%0A +import logging%0A import s @@ -113,16 +113,54 @@ md_cfg%0A%0A +logger = logging.getLogger(__name__)%0A%0A class MD
8790eec0fdd94beeb4d0ceac8b24a1de77bd3eee
Update sql2rf.py
bin/sql2rf.py
bin/sql2rf.py
#!/usr/bin/env python # -*- coding: utf8 -*- """Script to search for records within an SQL database created using snapshot2sql and convert to Researcher Format.""" # Import required modules # import datetime import getopt # import sys from iams2rf import * __author__ = 'Victoria Morris' __license__ = 'MIT License' __version__ = '1.0.0' __status__ = '4 - Beta Development' def usage(): print('========================================') print('sql2rf') print('IAMS data extraction for Researcher Format') print('========================================') print('This utility searches an SQL database of IAMS records') print('created using the utility snapshot2sql') print('and converts matching records to Researcher Format') print('\nCorrect syntax is:') print('sql2rf -d DB_PATH -r REQUEST_PATH -o OUTPUT_FOLDER [OPTIONS]') print('\nSearch DB_PATH for records meeting criteria in REQUEST_PATH.') print(' -d Path to the SQL database') print(' -r Path to Outlook message containing details of the request') print(' -o Folder to save Researcher Format output files') print('\nUse quotation marks (") around arguments which contain spaces') print('\nIf REQUEST_PATH is not specified you will be given the option to set parameters for the output') print('\nOptions:') print(' --debug Debug mode.') print(' --help Show this message and exit.') exit_prompt() def main(argv=None): if argv is None: name = str(sys.argv[1]) db_path, request_path, output_folder = '', '', '' debug = False try: opts, args = getopt.getopt(argv, 'd:r:o:', ['db_path=', 'request_path=', 'output_folder=', 'debug', 'help']) except getopt.GetoptError as err: exit_prompt('Error: {}'.format(err)) if opts is None or not opts: usage() for opt, arg in opts: if opt == '--help': usage() elif opt == '--debug': debug = True elif opt in ['-d', '--db_path']: db_path = arg elif opt in ['-r', '--request_path']: request_path = arg elif opt in ['-o', '--output_folder']: output_folder = arg else: exit_prompt('Error: Option {} not recognised'.format(opt)) iams2rf_sql2rf(db_path, request_path, output_folder, debug) print('\n\nAll processing complete') print('----------------------------------------') print(str(datetime.datetime.now())) sys.exit() if __name__ == '__main__': main(sys.argv[1:])
Python
0.000001
@@ -400,24 +400,82 @@ f usage():%0D%0A + %22%22%22Function to print information about the script%22%22%22%0D%0A print('= @@ -509,32 +509,32 @@ ============')%0D%0A - print('sql2r @@ -920,25 +920,8 @@ ATH --o OUTPUT_FOLDER %5BOPT @@ -1141,78 +1141,8 @@ ')%0D%0A - print(' -o Folder to save Researcher Format output files')%0D%0A @@ -1322,24 +1322,24 @@ e output')%0D%0A - print('%5C @@ -1347,24 +1347,91 @@ Options:')%0D%0A + print(' -o OUTPUT_FOLDER to save output files.') %0D%0A print('
544ddadc244df4c77c08cf6a97a4de84f3379f38
raise error if more than one result by id
atws/query.py
atws/query.py
from __future__ import absolute_import import sys from datetime import datetime from xml.etree.ElementTree import Element, SubElement, tostring from xml.sax.saxutils import escape from .helpers import datetime_to_api_timezone from .constants import (AUTOTASK_API_QUERY_ID_LIMIT, AUTOTASK_API_QUERY_DATEFORMAT, WRAPPER_DEFAULT_GET_ALL_ENTITIES) PY3 = sys.version_info >= (3, 0) QUERY_ENCODING = None if PY3: QUERY_ENCODING = 'unicode' XML_QUERY_ESCAPE = { '"' : '&quot;', "'" : '&apos;'} def get_userdefined_field_list_items(wrapper,entity): query = Query('UserDefinedFieldListItem') query.WHERE('UdfFieldId', query.Equals, entity.id) return wrapper.query(query).fetch_all() def get_entity_by_id(wrapper,entity_type,entity_id): result = get_entities_by_field_equals(wrapper, entity_type, 'id', entity_id, False) return result[0] def get_entities_by_field_equals(wrapper,entity_type,field,value,udf=False): query = Query(entity_type) query.WHERE(field,query.Equals,value,udf) return wrapper.query(query) def format_datetime_for_api_query(dt): return datetime_to_api_timezone(dt).strftime(AUTOTASK_API_QUERY_DATEFORMAT) def query_escape(value): return escape(value, XML_QUERY_ESCAPE) def get_id_query(entity_type,id_list): query = Query(entity_type) for entity_id in id_list: query.OR('id',query.Equals,entity_id) return query def get_queries_for_entities_by_id(entity_type, id_list, id_limit=AUTOTASK_API_QUERY_ID_LIMIT, query_function=get_id_query): queries = yield_queries_for_entities_by_id(entity_type, id_list, id_limit, query_function) return list(queries) def yield_queries_for_entities_by_id(entity_type, id_list, id_limit=AUTOTASK_API_QUERY_ID_LIMIT, query_function=get_id_query): query_ids = [] for _id in id_list: if len(query_ids) == id_limit: yield query_function(entity_type, query_ids) query_ids = [] else: query_ids.append(_id) if query_ids: yield query_function(entity_type, query_ids) class Query(object): Equals='Equals' NotEqual='NotEqual' GreaterThan='GreaterThan' LessThan='LessThan' GreaterThanorEquals='GreaterThanorEquals' LessThanOrEquals='LessThanOrEquals' BeginsWith='BeginsWith' EndsWith='EndsWith' Contains='Contains' IsNotNull='IsNotNull' IsNull='IsNull' IsThisDay='IsThisDay' Like='Like' NotLike='NotLike' SoundsLike='SoundsLike' get_all_entities = None def FROM(self,entity_type): self.entity_type = entity_type def WHERE(self,field_name,field_condition,field_value,udf=False): self._add_field(None, field_name, field_condition, field_value, udf) def OR(self,field_name,field_condition,field_value,udf=False): self._add_field('OR', field_name, field_condition, field_value, udf) def AND(self,field_name,field_condition,field_value,udf=False): self._add_field('AND', field_name, field_condition, field_value, udf) def open_bracket(self,operator=None): attrib = {} if operator: attrib = {'operator':operator} self._cursor = SubElement(self._cursor,'condition',attrib=attrib) def close_bracket(self): self._close_cursor() def reset(self): self._query_elements = [] self._query_elements.append(self._query) self._query.clear() self.minimum_id_xml = None self.minimum_id = None self.minimum_id_field = 'id' def get_query_xml(self): self._entityxml.text = self.entity_type if self.minimum_id: self._add_min_id_field() query_xml = tostring(self._queryxml, encoding=QUERY_ENCODING) #query_xml = query_xml.encode('utf-8') return query_xml def pretty_print(self): import xml.dom.minidom return xml.dom.minidom.parseString(self.get_query_xml()).toprettyxml() def set_minimum_id(self,minimum_id,field='id'): self.minimum_id = minimum_id self.minimum_id_field = field @property def _cursor(self): return self._query_elements[-1] @_cursor.setter def _cursor(self, element): self._query_elements.append(element) def _close_cursor(self): del(self._query_elements[-1]) def _add_field(self,operator,field_name,field_condition,field_value,udf=False): attributes = {} if udf: attributes['udf'] = 'true' if operator: self.open_bracket(operator) field = SubElement(self._cursor,'field', attrib=attributes) field.text = field_name expression = SubElement(field,'expression',attrib={'op':field_condition}) expression.text = self._process_field_value(field_value) if operator: self.close_bracket() return field,expression def _add_min_id_field(self): try: self._update_min_id_xml() except AttributeError: self._create_min_id_xml() def _update_min_id_xml(self): self.minimum_id_xml.text = self._process_field_value(self.minimum_id) def _create_min_id_xml(self): minimum_id = self._process_field_value(self.minimum_id) # use _add_field instead of self.AND because we need to keep the # expression expression = self._add_field('AND', self.minimum_id_field, self.GreaterThan, minimum_id)[1] self.minimum_id_xml = expression def _process_field_value(self,value): if type(value) is datetime: return format_datetime_for_api_query(value) return query_escape(str(value)) def __init__(self,entity_type = None): self.get_all_entities = WRAPPER_DEFAULT_GET_ALL_ENTITIES self.entity_type = entity_type self._queryxml = Element('queryxml') self._entityxml = SubElement(self._queryxml, 'entity') self._query = SubElement(self._queryxml, 'query') self.reset() def __str__(self): return repr(self.get_query_xml())
Python
0.000001
@@ -1075,25 +1075,164 @@ -return result%5B0%5D%0A +for i, e in enumerate(result):%0A first_entity = e%0A if i == 1:%0A raise ValueError('too many results')%0A return first_entity%0A %0A%0Ade
2d35031cfdb98503f326cc375f6d9962daf1faf8
Set BaseAnimation.sleep_time as late as possible.
bibliopixel/animation/animation.py
bibliopixel/animation/animation.py
import contextlib, threading, time from . runner import Runner from .. import log from .. threads.animation_threading import AnimationThreading class BaseAnimation(object): free_run = False def __init__(self, led): self._led = led self.internal_delay = None def preRun(self, amt=1): self._led.all_off() def step(self, amt=1): raise RuntimeError("Base class step() called. This shouldn't happen") def cleanup(self): self.threading.stop_thread(wait=True) self._led.cleanup() def is_running(self): if self.threading.stop_event.isSet(): return False if self.runner.max_steps: return self.cur_step < self.runner.max_steps return not (self.runner.until_complete and self.completed) def run_one_frame(self): timestamps = [] def stamp(): timestamps.append(time.time()) stamp() self.step(self.runner.amt) stamp() self._led.frame_render_time = timestamps[1] - timestamps[0] self._led.push_to_driver() stamp() _report_framerate(timestamps) self.cur_step += 1 if self.completed and self.runner.max_cycles > 0: if self.cycle_count < self.runner.max_cycles - 1: self.cycle_count += 1 self.completed = False stamp() self.threading.wait(self.sleep_time, timestamps) @contextlib.contextmanager def run_context(self): self.preRun(self.runner.amt) try: yield finally: self.cleanup() def run_all_frames(self): with self.run_context(): while self.is_running(): self.run_one_frame() def set_runner(self, runner): self.runner = runner self.completed = False self._step = 0 self.cur_step = 0 self.cycle_count = 0 if self.free_run: self.sleep_time = None elif self.internal_delay: self.sleep_time = self.internal_delay else: self.sleep_time = self.runner.sleep_time self._led.animation_sleep_time = self.sleep_time or 0 def start(self): self.threading = AnimationThreading(self.runner, self.run_all_frames) self.threading.start() def run(self, **kwds): # DEPRECATED self.set_runner(Runner(**kwds)) self.start() def _report_framerate(timestamps): total_time = timestamps[-1] - timestamps[0] fps = int(1.0 / max(total_time, 0.001)) log.debug("%dms/%dfps / Frame: %dms / Update: %dms", 1000 * total_time, fps, 1000 * (timestamps[1] - timestamps[0]), 1000 * (timestamps[2] - timestamps[1]))
Python
0
@@ -1498,32 +1498,307 @@ _context(self):%0A + if self.free_run:%0A self.sleep_time = None%0A elif self.internal_delay:%0A self.sleep_time = self.internal_delay%0A else:%0A self.sleep_time = self.runner.sleep_time%0A self._led.animation_sleep_time = self.sleep_time or 0%0A%0A self.pre @@ -2209,283 +2209,8 @@ 0%0A%0A - if self.free_run:%0A self.sleep_time = None%0A elif self.internal_delay:%0A self.sleep_time = self.internal_delay%0A else:%0A self.sleep_time = self.runner.sleep_time%0A self._led.animation_sleep_time = self.sleep_time or 0%0A%0A
796952dca75a78e0b71b5809112bd0815fa87986
Method instructions format: text/x-web-intelligent
bika/lims/content/method.py
bika/lims/content/method.py
from AccessControl import ClassSecurityInfo from Products.CMFCore.permissions import ModifyPortalContent, View from Products.Archetypes.public import * from Products.Archetypes.references import HoldingReference from Products.ATExtensions.ateapi import RecordsField as RecordsField from bika.lims.browser.widgets import RecordsWidget from bika.lims.content.bikaschema import BikaSchema from bika.lims.config import PROJECTNAME import sys from bika.lims import bikaMessageFactory as _ from zope.interface import implements schema = BikaSchema.copy() + Schema(( TextField('Instructions', default_content_type = 'text/plain', allowable_content_types = ('text/plain',), widget = TextAreaWidget( label = _("Method Instructions", "Instructions"), description = _("Technical description and instructions intended for analysts"), ), ), FileField('MethodDocument', # XXX Multiple Method documents please widget = FileWidget( label = _("Method Document"), description = _("Load documents describing the method here"), ) ), )) schema['description'].schemata = 'default' schema['description'].widget.visible = True schema['description'].widget.label = _("Description") schema['description'].widget.description = _("Describes the method in layman terms. This information is made available to lab clients") class Method(BaseFolder): security = ClassSecurityInfo() displayContentsTab = False schema = schema _at_rename_after_creation = True def _renameAfterCreation(self, check_auto_id=False): from bika.lims.idserver import renameAfterCreation renameAfterCreation(self) registerType(Method, PROJECTNAME)
Python
0.999106
@@ -621,21 +621,33 @@ = 'text/ -plain +x-web-intelligent ',%0A @@ -686,16 +686,69 @@ ext/ -plain',) +x-web-intelligent',),%0A default_output_type=%22text/html%22 ,%0A
5132d4f7719862db733ab7cbfe8eec79af90487e
Update the latest_query_at on ohloh
apps/applications/models.py
apps/applications/models.py
import os.path import datetime import urllib, re from xml.dom.minidom import parse, parseString # Django from django.conf import settings from django.db import models from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.db.models.signals import m2m_changed, post_save from django.dispatch import receiver from django.contrib.auth.models import User from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.utils.translation import ugettext, ugettext_lazy as _ from django.contrib.contenttypes import generic from django.core.urlresolvers import reverse as django_reverse # External from easy_thumbnails.fields import ThumbnailerImageField from autoslug.fields import AutoSlugField from taggit.models import Tag from taggit.managers import TaggableManager from subdomains.utils import reverse from licenses.fields import LicenseField from jsonfield.fields import JSONField # Methodmint from publications.models import Reference, AutoReference from authors.models import Author from core.actions import object_saved def application_file_path(instance=None, filename=None): return os.path.join('application', str(instance.id), filename) # Application class class Application(models.Model): def __unicode__(self): return "%s" % (self.name) def get_absolute_url(self): return reverse('application',kwargs={'application_id':str(self.id), 'application_slug':str(self.slug)}, subdomain='install') def get_absolute_path(self): return django_reverse('application', kwargs={'application_id':str(self.id), 'application_slug':str(self.slug)}) # Fields name = models.CharField('Name', max_length = 50, blank = False) tagline = models.CharField('Tagline', max_length = 200, blank = False) slug = AutoSlugField(populate_from='name') description = models.TextField(blank = True) tags = TaggableManager() #through=TaggedMethod) url = models.URLField('URL', blank = True) source_url = models.URLField('Source URL (e.g. Github)', blank = True) icon = ThumbnailerImageField('Icon', max_length=255, upload_to=application_file_path, blank=True) image = ThumbnailerImageField('Image', max_length=255, upload_to=application_file_path, blank=True) license = LicenseField(required=False) objects = models.Manager() created_at = models.DateTimeField(auto_now_add = True, editable = False) updated_at = models.DateTimeField(auto_now = True, editable = False) created_by = models.ForeignKey(User, related_name='created_applications') # Author originally submitted method edited_by = models.ForeignKey(User, related_name='edited_applications', blank=True, null=True) # Author of latest edit authors = generic.GenericRelation(Author) references = generic.GenericRelation(Reference) autoreference = generic.GenericRelation(AutoReference) # Release class class Release(models.Model): def __unicode__(self): return "%s" % (self.release_date) def get_absolute_url(self): return reverse('application',kwargs={'application_id':str(self.id)}) def version(self): # We use date-based versioning on all software yyyy.mm.dd return '%4d.%2d.%2d' % ( self.release_date.year, self.release_date.month, self.release_date.day ) # Fields application = models.ForeignKey(Application, related_name='releases') # This is the date a release will be made available, for partners the software will be available before this date # as part of the partner benefits release_date = models.DateField() notes = models.TextField(blank = True) objects = models.Manager() created_at = models.DateTimeField(auto_now_add = True, editable = False) updated_at = models.DateTimeField(auto_now = True, editable = False) # Feature class class Feature(models.Model): def __unicode__(self): return "%s (%s)" % (self.title, self.application) # Fields application = models.ForeignKey(Application, related_name='features') title = models.CharField('Title', max_length = 50, blank = False) description = models.TextField(blank = True) image = ThumbnailerImageField(max_length=255, upload_to=application_file_path, blank=True) objects = models.Manager() created_at = models.DateTimeField(auto_now_add = True) updated_at = models.DateTimeField(auto_now = True) class Meta: order_with_respect_to = 'application' # Holds ohloh identity, and helper functions for retrieving, parsing and handling OhLoh project metadata class Ohloh(models.Model): # def __unicode__(self): # return " # On first save, check if we have an associated application; if not create it and autopopulate # this allows creating applications directly from just their ohloh id def save(self, force_insert=False, force_update=False): if self.pk == None: self.get_updated_data() self.update_application_data() super(Ohloh, self).save(force_insert, force_update) # Use ohloh data to autopopulate the parent application where data isn't currently set def update_application_data(self): if self.application.name == '': self.application.name = self.data['name'] if self.application.description == '': self.application.description = self.data['description'] for tag in self.data['tags']: self.application.tags.add( tag.replace('_','-') ) # replace is due to ohloh style tags being fugyly_as self.application.save() # Request data for this project as xml, parse out the data into a local data JSON structure for handling def get_updated_data(self): f = urllib.urlopen("https://www.ohloh.net/p/%s.xml?api_key=%s" % ( self.project_id, settings.OHLOH_API_KEY ) ) # Build DOM for requested data dom = parse(f) f.close() if dom: data = { 'languages':[], 'tags':[], } # Iterate over available basic fields and pull them into our model for tag in [ 'name', 'description', 'user_count', 'twelve_month_contributor_count']: if dom.getElementsByTagName(tag): data[ tag ] = dom.getElementsByTagName(tag)[0].childNodes[0].data.strip() # Find multiple tag elements, tags & languages and build lists for field, tag in [ ('tags','tag'), ('languages','language')]: if dom.getElementsByTagName(tag): for el in dom.getElementsByTagName(tag): data[ field ].append( el.childNodes[0].data.strip() ) # Cleanup data self.data = data application = models.OneToOneField('Application', related_name='ohloh') project_id = models.CharField('Ohloh project ID/name', max_length = 50, blank = False) # data data = JSONField(editable=False,blank=True,default=dict()) created_at = models.DateTimeField(auto_now_add = True, editable = False) updated_at = models.DateTimeField(auto_now = True, editable = False) # Below used to delay requests for data < 1/month or similar latest_query_at = models.DateTimeField(editable = False, null=True, blank=False) # Action Stream post_save.connect(object_saved, sender=Application)
Python
0.000001
@@ -6986,24 +6986,83 @@ data = data%0A + self.latest_query_at = datetime.datetime.now()%0A %0A app
39ea9c83646146ef09ce46acd646942a4cc9f03f
Update fabrictask to add devilry_statistics javascript.
devilry/project/develop/fabrictasks.py
devilry/project/develop/fabrictasks.py
import os from os.path import exists, join, relpath from os import remove, getcwd import shutil from fabric.api import local, abort, task from fabric.context_managers import shell_env, lcd DB_FILE = join('devilry_developfiles', 'db.sqlite3') LANGUAGES = ['en', 'nb'] def _managepy(args, djangoenv='develop', environment={}, working_directory=None): working_directory = working_directory or getcwd() managepy_path = relpath('manage.py', working_directory) with shell_env(DJANGOENV=djangoenv, **environment): with lcd(working_directory): local('python {managepy_path} {args}'.format(managepy_path=managepy_path, args=args)) @task def remove_db(djangoenv='develop'): """ Remove ``db.sqlite3`` if it exists. """ if djangoenv == 'sqlite_develop': if exists(DB_FILE): remove(DB_FILE) else: _managepy('dbdev_destroy', djangoenv=djangoenv) @task def syncmigrate(djangoenv='develop'): """ Run ``bin/django_dev.py syncmigrate -v0 --noinput`` """ # _managepy('syncdb -v0 --noinput', djangoenv=djangoenv) _managepy('migrate --noinput', djangoenv=djangoenv) @task def reset_db(djangoenv='develop'): """ Run ``remove_db`` followed by ``syncmigrate``. """ remove_db(djangoenv=djangoenv) if djangoenv != 'sqlite_develop': _managepy('dbdev_init', djangoenv=djangoenv) syncmigrate(djangoenv=djangoenv) # @task # def sandbox(): # _managepy('devilry_sandboxcreate -s "duck2050" -l "DUCK2050 - Programmering uten grenser"') @task def autodb(djangoenv='develop', no_groups=False): """ Run ``remove_db``, ``syncmigrate`` and ``bin/django_dev.py devilry_developer_old_autodb -v2`` :param djangoenv: The DJANGOENV to use. :param no_groups: Use ``autodb:no_groups=yes`` to run devilry_developer_old_autodb with --no-groups. """ no_groups = no_groups == 'yes' autodb_args = '' if no_groups: autodb_args = '--no-groups' reset_db(djangoenv=djangoenv) _managepy('devilry_developer_old_autodb -v2 {}'.format(autodb_args)) # _managepy('rebuild_index --noinput') @task def demodb(djangoenv='develop'): """ Run ``remove_db``, ``syncmigrate`` and ``bin/django_dev.py devilry.project.develop_demodb`` :param djangoenv: The DJANGOENV to use. """ reset_db(djangoenv=djangoenv) _managepy('devilry_developer_demodb', djangoenv=djangoenv, environment={ 'DEVILRY_EMAIL_BACKEND': 'django.core.mail.backends.dummy.EmailBackend' }) def _demodb_managepy(command, djangoenv): _managepy(command, djangoenv=djangoenv, environment={ 'DEVILRY_EMAIL_BACKEND': 'django.core.mail.backends.dummy.EmailBackend' }) @task def new_demodb(djangoenv='develop'): """ Run ``remove_db``, ``syncmigrate`` and ... TODO :param djangoenv: The DJANGOENV to use. """ reset_db(djangoenv=djangoenv) _demodb_managepy('devilry_developer_demodb_createusers', djangoenv=djangoenv) @task def makemessages(): for languagecode in LANGUAGES: _managepy( 'makemessages ' '--ignore "static*" ' '-l {}'.format(languagecode), working_directory='devilry') @task def makemessages_javascript(): """ Build """ for languagecode in LANGUAGES: _managepy( 'makemessages ' '-d djangojs ' '--ignore "app-all.js" ' '--ignore "all-classes.js" ' '--ignore "node_modules" ' '--ignore "bower_components" ' '-l {}'.format(languagecode), working_directory='devilry') @task def compilemessages(): _managepy('compilemessages', working_directory='devilry') @task def sync_cradmin_theme_into_devilry_theme(cradmin_root_dir): """ Copies ``cradmin_base/`` and ``cradmin_theme_default/`` into the devilry_theme ``less/`` directory. """ devilry_theme_lessdir = os.path.join(*'devilry/devilry_theme3/staticsources/devilry_theme3/styles'.split('/')) cradmin_lessdir = os.path.join(cradmin_root_dir, *'django_cradmin/static/django_cradmin/src/less'.split('/')) for directory in 'cradmin_base', 'cradmin_theme_default', 'cradmin_theme_topmenu': sourcedir = os.path.join(cradmin_lessdir, directory) destinationdir = os.path.join(devilry_theme_lessdir, directory) print 'Syncing', sourcedir, 'to', destinationdir if os.path.exists(destinationdir): shutil.rmtree(destinationdir) shutil.copytree(sourcedir, destinationdir) local('git add {}'.format(destinationdir)) @task def test(): """ Run all the tests. """ _managepy('test devilry', djangoenv='test') @task def codeship_test(): """ Run all the tests with settings for CodeShip CI. """ _managepy('test devilry', djangoenv='codeship_test') @task def remove_pyc_files(): os.system('find . -name "*.pyc" -exec rm {} \;') @task def make_source_dist(): local('git rm -r devilry/devilry_theme3/static/devilry_theme3/') local('ievv buildstatic --production --yarn-clean-node-modules') local('git add devilry/devilry_theme3/static/devilry_theme3/') local('python setup.py sdist')
Python
0
@@ -5104,32 +5104,109 @@ vilry_theme3/')%0A + local('git rm -r devilry/devilry_statistics/static/devilry_statistics/')%0A local('ievv @@ -5317,32 +5317,107 @@ vilry_theme3/')%0A + local('git add devilry/devilry_statistics/static/devilry_statistics/')%0A local('pytho
2ddf45b958f45aa1490e95ded2224fb307fe6b27
clean up
budget/database/sqlitedb.py
budget/database/sqlitedb.py
import sqlite3 class Datastore(): """This class represents data-storage object. The Datastore will store all the account information for food and miscellaneous accounts. Attributes: _conn (sqlite3.Connection): A sqlite3.Connection object that represents the database _cursor (sqlite3.Cursor): A sqlite3.Cursor object that enables traversal over the records in the database _databaseFileName (str): The database filename on the disc """ def __init__(self, dbFileName = None): if not dbFileName: self._databaseFileName = "budgetManager.db" else: self._databaseFileName = dbFileName self._conn = None self._cursor = None def connect(self): """Opens a database connection to the database file and creates tables.""" try: self._conn = sqlite3.connect(self._databaseFileName) self._cursor = self._conn.cursor() # Create FoodAccount and MiscAccount database tables self._cursor.execute("CREATE TABLE IF NOT EXISTS FoodAccount (month text, year text, total real)") self._cursor.execute("CREATE TABLE IF NOT EXISTS MiscAccount (month text, year text, total real)") # Commit the change self._conn.commit() except Exception as e: self._conn.rollback() print "Error: while creating tables FoodAccount and/or MiscAccount:", e def disconnect(self): """Closes the database connection.""" # Save (commit) the changes self._conn.commit() # Close the connection self._conn.close() def insertFoodAccount(self, month, year, amount): """Insert food account details into FoodAccount. Args: month (str): The month to insert the amount to year (str): The year to insert the amount to amount (float): The amount to be inserted """ try: # We can use the sqlite3.Connection object as context manager to automatically commit or rollback transactions with self._conn: # First get the total amount for the month and year combination self._cursor.execute("SELECT total FROM FoodAccount WHERE month=:month and year=:year", {"month": month, "year": year}) totalList = self._cursor.fetchall() # Check if the entry for arguments month and year exists in the database or not if not totalList: # Create a new row in FoodAccount for the month and year self._cursor.execute("INSERT INTO FoodAccount VALUES (?, ?, ?)", (month, year, amount)) else: totalAmount = float(amount) + totalList[0][0] self._cursor.execute("UPDATE FoodAccount SET total = ? WHERE month = ? AND year = ?", (totalAmount, month, year)) except sqlite3.IntegrityError: print "Error: couldn't add to FoodAccount", e def insertMiscAccount(self, month, year, amount): """Insert misc. account details into MiscAccount. Args: month (str): The month to insert the amount to year (str): The year to insert the amount to amount (float): The amount to be inserted """ try: # We can use the sqlite3.Connection object as context manager to automatically commit or rollback transactions with self._conn: # First get the total amount for the month and year combination self._cursor.execute("SELECT total FROM MiscAccount WHERE month=:month and year=:year", {"month": month, "year": year}) totalList = self._cursor.fetchall() # Check if the entry for arguments month and year exists in the database or not if not totalList: # Create a new row in MiscAccount for the month and year self._cursor.execute("INSERT INTO MiscAccount VALUES (?, ?, ?)", (month, year, amount)) else: totalAmount = float(amount) + totalList[0][0] self._cursor.execute("UPDATE MiscAccount SET total = ? WHERE month = ? AND year = ?", (totalAmount, month, year)) except sqlite3.IntegrityError: print "Error: couldn't add to MiscAccount", e def fetchFoodAccount(self, month=None, year=None): # NOTE: This method can combined with insertMiscAccount to refactor the code """Fetch the details from FoodAccount. Args: month (str): The month to fetch amount from year (str): The year to fetch amount from """ if month is None or year is None: return str(-1) # Error case else: try: # We can use the sqlite3.Connection object as context manager to automatically commit or rollback transactions with self._conn: self._cursor.execute("SELECT total FROM FoodAccount WHERE month=:month and year=:year", {"month": month, "year": year}) totalAmount = self._cursor.fetchall() return totalAmount[0][0] except sqlite3.IntegrityError: print "Error: couldn't fetch from the FoodAccount", e return str(-1) # Error case def fetchMiscAccount(self, month=None, year=None): """Fetch the details from MiscAccount. Args: month (str): The month to fetch amount from year (str): The year to fetch amount from """ if month is None or year is None: return str(-1) # Error case else: try: # We can use the sqlite3.Connection object as context manager to automatically commit or rollback transactions with self._conn: self._cursor.execute("SELECT total FROM MiscAccount WHERE month=:month and year=:year", {"month": month, "year": year}) totalAmount = self._cursor.fetchall() return totalAmount[0][0] except sqlite3.IntegrityError: print "Error: couldn't fetch from the MiscAccount", e return str(-1) # Error case # XXX db = Datastore() db.connect() db.insertFoodAccount("May", "2016", 5.5) db.insertFoodAccount("May", "2016", 15.3) db.insertMiscAccount("October", "2016", 44.3) print db.fetchFoodAccount("May", "2016") print db.fetchMiscAccount("October", "2016")
Python
0.000001
@@ -511,19 +511,17 @@ FileName - = += None):%0A
7c382a33fa3f691fcbf89621b48c0c9e3a921d03
update version number
vaspy/__init__.py
vaspy/__init__.py
__version__ = '0.1.0' # add electro module class VasPy(object): def __init__(self, filename): "Base class to be inherited by all classes in VASPy." self.filename = filename class CarfileValueError(Exception): "Exception raised for errors in the CONTCAR-like file." pass class UnmatchedDataShape(Exception): "Exception raised for errors in unmatched data shape." pass
Python
0.000002
@@ -12,17 +12,17 @@ = '0.1. -0 +1 ' # add @@ -26,22 +26,33 @@ add -electro module +d-band center calculation %0A%0A%0Ac
4d63320c2bf077e90cffb98286e0354dcab1fc64
Make runTestCases.py possible to run independently
build-tools/runTestCases.py
build-tools/runTestCases.py
#! /usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re import subprocess import sys testCmdFile = 'build-tools/test.conf' tcCmdReg = re.compile('^mvn\s.*$') tcNameReg = re.compile('-Dtest=(.+?)\s') tcModuleReg = re.compile('-pl\s(.+?)\s') with open(testCmdFile) as fp: for line in fp: match = tcCmdReg.findall(line) if match: logFilePath = "testlog/" + tcNameReg.findall(line)[0] + ".log" print("[INFO] Running " + tcNameReg.findall(line)[0] + " test case for \"" + tcModuleReg.findall(line)[0] + "\"...") try: #maven build subprocess.check_call(match[0] + ">" + logFilePath, stderr=subprocess.STDOUT, shell=True) print("[SUCCESS] Test case " + tcNameReg.findall(line)[0] + " for \"" + tcModuleReg.findall(line)[0]+ "\" is completed!") except subprocess.CalledProcessError as e: print("[ERROR] This test case requires \"pmalloc\" memory service to pass, please check if \"pmalloc\" has been configured correctly! If \"pmalloc\" is installed, please refer to testlog/" + tcNameReg.findall(line)[0] + ".log for detailed information.") sys.exit(1) print("[DONE] All test cases are completed! Log files are available under folder testlog!")
Python
0
@@ -840,16 +840,115 @@ port sys +%0Aimport os%0A%0AtestLogDir = %22testlog/%22%0A%0Aif not os.path.exists(testLogDir):%0A os.makedirs(testLogDir) %0A%0AtestCm @@ -1229,26 +1229,26 @@ ePath = -%22 test -log/%22 +LogDir + tcNam
cb7574b1f4f4362988ba8dbfa279a7ce78e29b6a
version bumb
vespa/__init__.py
vespa/__init__.py
__version__ = '0.4.6' try: __VESPA_SETUP__ except NameError: __VESPA_SETUP__ = False if not __VESPA_SETUP__: __all__ = ['FPPCalculation', 'EclipsePopulation', 'EBPopulation', 'HEBPopulation', 'BEBPopulation', 'PlanetPopulation', 'PopulationSet', 'StarPopulation', 'MultipleStarPopulation', 'ColormatchMultipleStarPopulation', 'Spectroscopic_MultipleStarPopulation', 'BGStarPopulation', 'BGStarPopulation_TRILEGAL', 'BinaryPopulation', 'Simulated_BinaryPopulation', 'Raghavan_BinaryPopulation', 'TriplePopulation', 'MAInterpolationFunction'] #StarPopulation & children from .stars.populations import StarPopulation from .stars.populations import MultipleStarPopulation from .stars.populations import ColormatchMultipleStarPopulation from .stars.populations import Spectroscopic_MultipleStarPopulation from .stars.populations import BGStarPopulation, BGStarPopulation_TRILEGAL from .stars.populations import BinaryPopulation from .stars.populations import Simulated_BinaryPopulation from .stars.populations import Raghavan_BinaryPopulation from .stars.populations import TriplePopulation from .transit_basic import MAInterpolationFunction #EclipsePopulation & children from .populations import EclipsePopulation from .populations import EBPopulation, HEBPopulation, BEBPopulation from .populations import PlanetPopulation from .populations import PopulationSet #from .populations import calculate_eclipses from .transitsignal import TransitSignal from .fpp import FPPCalculation
Python
0.000001
@@ -16,9 +16,9 @@ 0.4. -6 +7 '%0A%0At
a6270ec08d8c5945435a1b3334b9b363026a3b56
Use BadRequest exception
app/soc/modules/gci/views/student_forms.py
app/soc/modules/gci/views/student_forms.py
#!/usr/bin/env python2.5 # # Copyright 2011 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for students in GCI to upload their forms. """ __authors__ = [ '"Lennard de Rijk" <ljvderijk@gmail.com>', ] from django import forms from django.utils.translation import ugettext from google.appengine.dist import httplib from google.appengine.ext import blobstore from soc.views.helper import blobstore as bs_helper from soc.views.helper import url_patterns from soc.modules.gci.models.profile import GCIStudentInfo from soc.modules.gci.views import forms as gci_forms from soc.modules.gci.views.base import RequestHandler from soc.modules.gci.views.helper.url_patterns import url DEF_NO_UPLOAD = ugettext('Please choose at least one file to upload.') class UploadForm(gci_forms.GCIModelForm): """Django form to upload student forms """ class Meta: model = GCIStudentInfo css_prefix = 'gci_student_forms' fields = ['consent_form', 'student_id_form'] consent_form = forms.FileField(required=False) student_id_form = forms.FileField(required=False) def __init__(self, r, *args, **kwargs): """Initializes the FileFields. """ super(UploadForm, self).__init__(*args, **kwargs) if self.instance: self.fields['consent_form']._file = self.instance.consent_form download_url = '%s?consent_form' %r.program().urlOf('gci_student_form_upload') self.fields['consent_form']._link = download_url self.fields['student_id_form']._file = self.instance.student_id_form download_url = '%s?student_id_form' %r.program().urlOf('gci_student_form_upload') self.fields['student_id_form']._link = download_url def clean(self): """Ensure that at least one of the fields has data. """ cleaned_data = self.cleaned_data consent_form = cleaned_data.get('consent_form') student_id_form = cleaned_data.get('student_id_form') if not (consent_form or student_id_form): raise gci_forms.ValidationError(DEF_NO_UPLOAD) return cleaned_data class StudentFormUpload(RequestHandler): """View for uploading your student forms. """ def djangoURLPatterns(self): """The URL pattern for the view. """ return [ url(r'student/forms/%s$' % url_patterns.PROGRAM, self, name='gci_student_form_upload')] def checkAccess(self): """Denies access if you are not a student. """ self.check.isActiveStudent() def templatePath(self): """Returns the path to the template. """ return 'v2/modules/gci/student_forms/base.html' def get(self): """Handles download of the forms otherwise resumes normal rendering. """ if 'consent_form' in self.data.GET: download = self.data.student_info.consent_form elif 'student_id_form' in self.data.GET: download = self.data.student_info.student_id_form else: return super(StudentFormUpload, self).get() # download has been requested if not download: self.error(httplib.NOT_FOUND, 'File not found') self.response = bs_helper.send_blob(download, save_as=True) def context(self): """Handler for default HTTP GET request. """ context = { 'page_name': 'Student form upload' } if self.data.POST: upload_form = UploadForm(self.redirect, self.data.POST, instance=self.data.student_info) else: upload_form = UploadForm(self.redirect, instance=self.data.student_info) context['form'] = upload_form url = self.redirect.program().urlOf('gci_student_form_upload') context['upload_url'] = blobstore.create_upload_url(url) return context def post(self): """Handles POST requests for the bulk create page. """ form = UploadForm( self.redirect,data=self.data.POST, instance=self.data.student_info, files=self.data.request.file_uploads) if not form.is_valid(): # we are not storing this form, remove the uploaded blobs from the cloud for file in self.data.request.file_uploads.itervalues(): file.delete() return self.get() # delete existing data cleaned_data = form.cleaned_data for field_name in self.data.request.file_uploads.keys(): if field_name in cleaned_data: existing = getattr(self.data.student_info, field_name) if existing: existing.delete() form.save() self.redirect.program().to('gci_student_form_upload') class StudentFormDownload(RequestHandler): """View for uploading your student forms. """ def djangoURLPatterns(self): """The URL pattern for the view. """ return [ url(r'student/forms/%s$' % url_patterns.PROFILE, self, name='gci_student_form_download')] def checkAccess(self): """Denies access if you are not a host. """ self.check.isHost() self.mutator.studentFromKwargs() def get(self): """Allows hosts to download the student forms. """ download = None if 'consent_form' in self.data.GET: download = self.data.url_student_info.consent_form elif 'student_id_form' in self.data.GET: download = self.data.url_student_info.student_id_form else: return self.error(httplib.BAD_REQUEST, 'No file requested') # download has been requested if not download: self.error(httplib.NOT_FOUND, 'File not found') self.response = bs_helper.send_blob(download, save_as=True)
Python
0
@@ -885,16 +885,60 @@ bstore%0A%0A +from soc.logic.exceptions import BadRequest%0A from soc @@ -5769,46 +5769,24 @@ r -eturn self.error(httplib.BAD_REQUEST, +aise BadRequest( 'No
bbdc969214e698a62020603dafac9165d7bf6a84
add tests that train is called if no model passed
tests/cli/test_rasa_interactive.py
tests/cli/test_rasa_interactive.py
import argparse from typing import Callable, Text from unittest.mock import Mock from _pytest.monkeypatch import MonkeyPatch from _pytest.pytester import RunResult import rasa from rasa.cli import interactive, train def test_interactive_help(run: Callable[..., RunResult]): output = run("interactive", "--help") help_text = """usage: rasa interactive [-h] [-v] [-vv] [--quiet] [--e2e] [-m MODEL] [--data DATA [DATA ...]] [--skip-visualization] [--endpoints ENDPOINTS] [-c CONFIG] [-d DOMAIN] [--out OUT] [--augmentation AUGMENTATION] [--debug-plots] [--dump-stories] [--force] [--persist-nlu-data] {core} ... [model-as-positional-argument]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert output.outlines[i] == line def test_interactive_core_help(run: Callable[..., RunResult]): output = run("interactive", "core", "--help") help_text = """usage: rasa interactive core [-h] [-v] [-vv] [--quiet] [-m MODEL] [-s STORIES] [--skip-visualization] [--endpoints ENDPOINTS] [-c CONFIG] [-d DOMAIN] [--out OUT] [--augmentation AUGMENTATION] [--debug-plots] [--dump-stories] [model-as-positional-argument]""" lines = help_text.split("\n") for i, line in enumerate(lines): assert output.outlines[i] == line def test_pass_arguments_to_rasa_train( default_stack_config: Text, monkeypatch: MonkeyPatch ) -> None: # Create parser parser = argparse.ArgumentParser() sub_parser = parser.add_subparsers() interactive.add_subparser(sub_parser, []) # Parse interactive command args = parser.parse_args(["interactive", "--config", default_stack_config]) interactive._set_not_required_args(args) # Mock actual training mock = Mock() monkeypatch.setattr(rasa, "train", mock.method) # If the `Namespace` object does not have all required fields this will throw train.train(args) # Assert `train` was actually code mock.method.assert_called_once()
Python
0
@@ -211,16 +211,53 @@ , train%0A +from rasa import train as rasa_train%0A %0A%0Adef te @@ -2255,27 +2255,1713 @@ ly c -ode%0A mock.method +alled%0A mock.method.assert_called_once()%0A%0A%0Adef test_train_called_when_no_model_passed(%0A default_stack_config: Text, monkeypatch: MonkeyPatch,%0A) -%3E None:%0A parser = argparse.ArgumentParser()%0A sub_parser = parser.add_subparsers()%0A interactive.add_subparser(sub_parser, %5B%5D)%0A%0A args = parser.parse_args(%0A %5B%0A %22interactive%22,%0A %22--config%22,%0A default_stack_config,%0A %22--data%22,%0A %22examples/moodbot/data%22,%0A %5D%0A )%0A interactive._set_not_required_args(args)%0A%0A # Mock actual training and interactive learning methods%0A mock = Mock()%0A monkeypatch.setattr(train, %22train%22, mock.train_model)%0A monkeypatch.setattr(interactive, %22perform_interactive_learning%22, mock.method)%0A%0A interactive.interactive(args)%0A mock.train_model.assert_called_once()%0A%0A%0Adef test_train_core_called_when_no_model_passed_and_core(%0A default_stack_config: Text, monkeypatch: MonkeyPatch,%0A) -%3E None:%0A parser = argparse.ArgumentParser()%0A sub_parser = parser.add_subparsers()%0A interactive.add_subparser(sub_parser, %5B%5D)%0A%0A args = parser.parse_args(%0A %5B%0A %22interactive%22,%0A %22core%22,%0A %22--config%22,%0A default_stack_config,%0A %22--stories%22,%0A %22examples/moodbot/data/stories.md%22,%0A %22--domain%22,%0A %22examples/moodbot/domain.yml%22,%0A %5D%0A )%0A interactive._set_not_required_args(args)%0A%0A # Mock actual training and interactive learning methods%0A mock = Mock()%0A monkeypatch.setattr(train, %22train_core%22, mock.train_core)%0A monkeypatch.setattr(interactive, %22perform_interactive_learning%22, mock.method)%0A%0A interactive.interactive(args)%0A mock.train_core .ass
200a2492129cbfab4024c435e8971e79c8aa836f
Build scripts: the insertValue transformation no longer uses the re module
buildlib/transformations.py
buildlib/transformations.py
''' The MIT License Copyright (c) 2011 Steven G. Brown Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' # Developed with Python v3.0.1 import io, os, re, sys def insertValue(variableName, variableValue): ''' Return a function that will transform the script contents by replacing an inline #INCLUDE tag with variableValue. For example, if this function is called with variableName='version' and variableValue='1.0', then any occurances of '#INCLUDE version' in the given script will be replaced with '1.0'. ''' def insertValueTransformation(fileContents): return re.sub(r'#INCLUDE ' + variableName + '#', variableValue, fileContents) return insertValueTransformation def insertExternalFiles(*includesDirectories): ''' Return a function that will transform the script contents by including the contents of external files. For example, if the script contains the line: '#INCLUDE Frames.js;', then the file 'Frames.js' will be found in one of the includes directories and inserted in this location. If the inserted file has a license header, it will be removed. If the file to be inserted cannot be found, a ValueError will be thrown. ''' includesRegex = re.compile(r'^#INCLUDE ([^;]*);$', re.MULTILINE) def insertExternalFilesTransformation(fileContents): while True: includesMatch = includesRegex.search(fileContents) if not includesMatch: break with io.open(_findFile(includesDirectories, includesMatch.group(1))) as includeFile: includeFileContents = _removeLicenseHeader(includeFile.read()) leadingFileContents = fileContents[:includesMatch.start()] trailingFileContents = fileContents[includesMatch.end():] if len(trailingFileContents) >= 2 and trailingFileContents[:2] != '\n\n': trailingFileContents = '\n\n' + trailingFileContents fileContents =\ leadingFileContents +\ '//' + includesMatch.group() + '\n' +\ '\n' +\ includeFileContents.strip() +\ trailingFileContents return fileContents return insertExternalFilesTransformation def _findFile(searchDirectories, filename): ''' Find a file in the given list of search directories. If found, the absolute path to this file will be returned. Otherwise, a ValueError will be thrown. ''' for directory in searchDirectories: absolutePath = os.path.join(directory, filename) if os.path.exists(absolutePath): return absolutePath raise ValueError('\'' + filename + '\' not found in ' + str(searchDirectories)) def _removeLicenseHeader(scriptContents): ''' Return the given script contents with the license header removed. ''' licenseHeaderRegex = re.compile(r'^.*?\n\s\*/\n\n\s*(.*)', re.DOTALL) licenseHeaderMatch = licenseHeaderRegex.match(scriptContents) if licenseHeaderMatch: scriptContents = licenseHeaderMatch.group(1) return scriptContents def prepend(filePath): ''' Return a function that will transform the script contents by prepending the contents of the given file. ''' with io.open(filePath) as fileToPrepend: fileToPrependContents = fileToPrepend.read() def prependTransformation(fileContents): return fileToPrependContents + '\n' + fileContents return prependTransformation
Python
0
@@ -1156,31 +1156,25 @@ lue( -variableName, variableV +includeTagName, v alue @@ -1285,25 +1285,27 @@ ag with -variableV +the given v alue. Fo @@ -1349,24 +1349,26 @@ ed with -variable +includeTag Name='ve @@ -1380,24 +1380,16 @@ ' and va -riableVa lue='1.0 @@ -1399,18 +1399,16 @@ then any -%0A occuran @@ -1413,16 +1413,18 @@ ances of +%0A '#INCLU @@ -1476,18 +1476,16 @@ ced with -%0A '1.0'.%0A @@ -1553,16 +1553,29 @@ urn -re.sub(r +fileContents.replace( '#IN @@ -1584,24 +1584,26 @@ UDE ' + -variable +includeTag Name + ' @@ -1612,33 +1612,11 @@ , va -riableValue, fileContents +lue )%0A%0A
6afc7a4b8ae60a85e804980bbeace8395d899683
Append self doctype to get_applicable_for_doctype_list
frappe/core/doctype/user_permission/user_permission.py
frappe/core/doctype/user_permission/user_permission.py
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe, json from frappe.model.document import Document from frappe.permissions import (get_valid_perms, update_permission_property) from frappe import _ from frappe.core.utils import find from frappe.desk.form.linked_with import get_linked_doctypes class UserPermission(Document): def validate(self): duplicate_exists = frappe.db.get_all(self.doctype, filters={ 'allow': self.allow, 'for_value': self.for_value, 'user': self.user, 'applicable_for': self.applicable_for, 'apply_to_all_doctypes': self.apply_to_all_doctypes, 'name': ['!=', self.name] }, limit=1) if duplicate_exists: frappe.throw(_("User permission already exists"), frappe.DuplicateEntryError) def on_update(self): frappe.cache().delete_value('user_permissions') frappe.publish_realtime('update_user_permissions') def on_trash(self): # pylint: disable=no-self-use frappe.cache().delete_value('user_permissions') frappe.publish_realtime('update_user_permissions') @frappe.whitelist() def get_user_permissions(user=None): '''Get all users permissions for the user as a dict of doctype''' # if this is called from client-side, # user can access only his/her user permissions if frappe.request and frappe.local.form_dict.cmd == 'get_user_permissions': user = frappe.session.user if not user: user = frappe.session.user cached_user_permissions = frappe.cache().hget("user_permissions", user) if cached_user_permissions is not None: return cached_user_permissions out = {} def add_doc_to_perm(perm, doc_name): # group rules for each type # for example if allow is "Customer", then build all allowed customers # in a list if not out.get(perm.allow): out[perm.allow] = [] out[perm.allow].append({ 'doc': doc_name, 'applicable_for': perm.get('applicable_for') }) try: for perm in frappe.get_all('User Permission', fields=['allow', 'for_value', 'applicable_for'], filters=dict(user=user)): meta = frappe.get_meta(perm.allow) add_doc_to_perm(perm, perm.for_value) if meta.is_nested_set(): decendants = frappe.db.get_descendants(perm.allow, perm.for_value) for doc in decendants: add_doc_to_perm(perm, doc) frappe.cache().hset("user_permissions", user, out) except frappe.SQLError as e: if e.args[0]==1146: # called from patch pass return out def user_permission_exists(user, allow, for_value, applicable_for=None): '''Checks if similar user permission already exists''' user_permissions = get_user_permissions(user).get(allow, []) if not user_permissions: return None has_same_user_permission = find(user_permissions, lambda perm:perm["doc"] == for_value and perm.get('applicable_for') == applicable_for) return has_same_user_permission def get_applicable_for_doctype_list(doctype, txt, searchfield, start, page_len, filters): linked_doctypes = get_linked_doctypes(doctype, True).keys() if txt: linked_doctypes = [d for d in linked_doctypes if txt in d.lower()] linked_doctypes.sort() return_list = [] for doctype in linked_doctypes[start:page_len]: return_list.append([doctype]) return return_list
Python
0
@@ -3059,16 +3059,46 @@ .keys()%0A +%09linked_doctypes += %5Bdoctype%5D%0A %09if txt:
e977d997ab66196b519c60dea34e360dfa4fb15d
Complete decreasing pivot swap reverse sol
lc0031_next_permutation.py
lc0031_next_permutation.py
"""Leetcode 31. Next Permutation Medium URL: https://leetcode.com/problems/next-permutation/ Implement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers. If such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order). The replacement must be in-place and use only constant extra memory. Here are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column. 1,2,3 -> 1,3,2 3,2,1 -> 1,2,3 1,1,5 -> 1,5,1 """ class Solution(object): def nextPermutation(self, nums): """ :type nums: List[int] :rtype: None Do not return anything, modify nums in-place instead. """ pass def main(): pass if __name__ == '__main__': main()
Python
0.000002
@@ -396,17 +396,16 @@ memory.%0A -%0A Here are @@ -579,16 +579,42 @@ Solution +DecreasingPivotSwapReverse (object) @@ -773,54 +773,1296 @@ ad.%0A - %22%22%22%0A pass%0A%0A%0Adef main():%0A pas +%0A Time complexity: O(n).%0A Space complexity: O(1).%0A %22%22%22%0A # From backward find the first pos (pivot) which is not in decreasing order.%0A i = len(nums) - 1%0A while i %3E 0 and nums%5Bi - 1%5D %3E= nums%5Bi%5D:%0A i -= 1%0A%0A pivot = i - 1%0A%0A # If we cannot find that number, all numbers are increasing. Reverse them.%0A if pivot == -1:%0A nums.reverse()%0A return None%0A%0A # Find the first pos j with num which is bigger than pivot number. Swap them.%0A j = len(nums) - 1%0A while j %3E pivot and nums%5Bj%5D %3C= nums%5Bpivot%5D:%0A j -= 1%0A%0A nums%5Bpivot%5D, nums%5Bj%5D = nums%5Bj%5D, nums%5Bpivot%5D%0A%0A # Reverse the remaining numbers on the right of pivot.%0A left, right = pivot + 1, len(nums) - 1%0A while left %3C right:%0A nums%5Bleft%5D, nums%5Bright%5D = nums%5Bright%5D, nums%5Bleft%5D%0A%0A left += 1%0A right -= 1%0A%0A%0Adef main():%0A # 1,2,3 -%3E 1,3,2%0A nums = %5B1,2,3%5D%0A SolutionDecreasingPivotSwapReverse().nextPermutation(nums)%0A print nums%0A%0A # 3,2,1 -%3E 1,2,3%0A nums = %5B3,2,1%5D%0A SolutionDecreasingPivotSwapReverse().nextPermutation(nums)%0A print nums%0A%0A # 1,1,5 -%3E 1,5,1%0A nums = %5B1,1,5%5D%0A SolutionDecreasingPivotSwapReverse().nextPermutation(nums)%0A print num s%0A%0A%0A