_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q259100
OMXPlayer.quit
validation
def quit(self): """ Quit the player, blocking until the process has died """ if self._process is None: logger.debug('Quit was called after self._process had already been released') return try: logger.debug('Quitting OMXPlayer') process_group_id = os.getpgid(self._process.pid) os.killpg(process_group_id, signal.SIGTERM) logger.debug('SIGTERM Sent to pid: %s' % process_group_id) self._process_monitor.join() except OSError: logger.error('Could not find the process to kill') self._process = None
python
{ "resource": "" }
q259101
BlogDetailView.render_to_response
validation
def render_to_response(self, context, **response_kwargs): """ Returns a response with a template depending if the request is ajax or not and it renders with the given context. """ if self.request.is_ajax(): template = self.page_template else: template = self.get_template_names() return self.response_class( request=self.request, template=template, context=context, **response_kwargs )
python
{ "resource": "" }
q259102
translate_value
validation
def translate_value(document_field, form_value): """ Given a document_field and a form_value this will translate the value to the correct result for mongo to use. """ value = form_value if isinstance(document_field, ReferenceField): value = document_field.document_type.objects.get(id=form_value) if form_value else None return value
python
{ "resource": "" }
q259103
trim_field_key
validation
def trim_field_key(document, field_key): """ Returns the smallest delimited version of field_key that is an attribute on document. return (key, left_over_array) """ trimming = True left_over_key_values = [] current_key = field_key while trimming and current_key: if hasattr(document, current_key): trimming = False else: key_array = current_key.split("_") left_over_key_values.append(key_array.pop()) current_key = u"_".join(key_array) left_over_key_values.reverse() return current_key, left_over_key_values
python
{ "resource": "" }
q259104
BaseMongoAdmin.has_edit_permission
validation
def has_edit_permission(self, request): """ Can edit this object """ return request.user.is_authenticated and request.user.is_active and request.user.is_staff
python
{ "resource": "" }
q259105
BaseMongoAdmin.has_add_permission
validation
def has_add_permission(self, request): """ Can add this object """ return request.user.is_authenticated and request.user.is_active and request.user.is_staff
python
{ "resource": "" }
q259106
BaseMongoAdmin.has_delete_permission
validation
def has_delete_permission(self, request): """ Can delete this object """ return request.user.is_authenticated and request.user.is_active and request.user.is_superuser
python
{ "resource": "" }
q259107
MongoModelFormBaseMixin.set_form_fields
validation
def set_form_fields(self, form_field_dict, parent_key=None, field_type=None): """ Set the form fields for every key in the form_field_dict. Params: form_field_dict -- a dictionary created by get_form_field_dict parent_key -- the key for the previous key in the recursive call field_type -- used to determine what kind of field we are setting """ for form_key, field_value in form_field_dict.items(): form_key = make_key(parent_key, form_key) if parent_key is not None else form_key if isinstance(field_value, tuple): set_list_class = False base_key = form_key # Style list fields if ListField in (field_value.field_type, field_type): # Nested lists/embedded docs need special care to get # styles to work out nicely. if parent_key is None or ListField == field_value.field_type: if field_type != EmbeddedDocumentField: field_value.widget.attrs['class'] += ' listField {0}'.format(form_key) set_list_class = True else: field_value.widget.attrs['class'] += ' listField' # Compute number value for list key list_keys = [field_key for field_key in self.form.fields.keys() if has_digit(field_key)] key_int = 0 while form_key in list_keys: key_int += 1 form_key = make_key(form_key, key_int) if parent_key is not None: # Get the base key for our embedded field class valid_base_keys = [model_key for model_key in self.model_map_dict.keys() if not model_key.startswith("_")] while base_key not in valid_base_keys and base_key: base_key = make_key(base_key, exclude_last_string=True) # We need to remove the trailing number from the key # so that grouping will occur on the front end when we have a list. embedded_key_class = None if set_list_class: field_value.widget.attrs['class'] += " listField".format(base_key) embedded_key_class = make_key(field_key, exclude_last_string=True) field_value.widget.attrs['class'] += " embeddedField" # Setting the embedded key correctly allows to visually nest the # embedded documents on the front end. if base_key == parent_key: field_value.widget.attrs['class'] += ' {0}'.format(base_key) else: field_value.widget.attrs['class'] += ' {0} {1}'.format(base_key, parent_key) if embedded_key_class is not None: field_value.widget.attrs['class'] += ' {0}'.format(embedded_key_class) default_value = self.get_field_value(form_key) # Style embedded documents if isinstance(default_value, list) and len(default_value) > 0: key_index = int(form_key.split("_")[-1]) new_base_key = make_key(form_key, exclude_last_string=True) for list_value in default_value: # Note, this is copied every time so each widget gets a different class list_widget = deepcopy(field_value.widget) new_key = make_key(new_base_key, six.text_type(key_index)) list_widget.attrs['class'] += " {0}".format(make_key(base_key, key_index)) self.set_form_field(list_widget, field_value.document_field, new_key, list_value) key_index += 1 else: self.set_form_field(field_value.widget, field_value.document_field, form_key, default_value) elif isinstance(field_value, dict): self.set_form_fields(field_value, form_key, field_value.get("_field_type", None))
python
{ "resource": "" }
q259108
MongoModelFormBaseMixin.get_field_value
validation
def get_field_value(self, field_key): """ Given field_key will return value held at self.model_instance. If model_instance has not been provided will return None. """ def get_value(document, field_key): # Short circuit the function if we do not have a document if document is None: return None current_key, new_key_array = trim_field_key(document, field_key) key_array_digit = int(new_key_array[-1]) if new_key_array and has_digit(new_key_array) else None new_key = make_key(new_key_array) if key_array_digit is not None and len(new_key_array) > 0: # Handleing list fields if len(new_key_array) == 1: return_data = document._data.get(current_key, []) elif isinstance(document, BaseList): return_list = [] if len(document) > 0: return_list = [get_value(doc, new_key) for doc in document] return_data = return_list else: return_data = get_value(getattr(document, current_key), new_key) elif len(new_key_array) > 0: return_data = get_value(document._data.get(current_key), new_key) else: # Handeling all other fields and id try: # Added try except otherwise we get "TypeError: getattr(): attribute name must be string" error from mongoengine/base/datastructures.py return_data = (document._data.get(None, None) if current_key == "id" else document._data.get(current_key, None)) except: return_data = document._data.get(current_key, None) return return_data if self.is_initialized: return get_value(self.model_instance, field_key) else: return None
python
{ "resource": "" }
q259109
has_digit
validation
def has_digit(string_or_list, sep="_"): """ Given a string or a list will return true if the last word or element is a digit. sep is used when a string is given to know what separates one word from another. """ if isinstance(string_or_list, (tuple, list)): list_length = len(string_or_list) if list_length: return six.text_type(string_or_list[-1]).isdigit() else: return False else: return has_digit(string_or_list.split(sep))
python
{ "resource": "" }
q259110
make_key
validation
def make_key(*args, **kwargs): """ Given any number of lists and strings will join them in order as one string separated by the sep kwarg. sep defaults to u"_". Add exclude_last_string=True as a kwarg to exclude the last item in a given string after being split by sep. Note if you only have one word in your string you can end up getting an empty string. Example uses: >>> from mongonaut.forms.form_utils import make_key >>> make_key('hi', 'my', 'firend') >>> u'hi_my_firend' >>> make_key('hi', 'my', 'firend', sep='i') >>> 'hiimyifirend' >>> make_key('hi', 'my', 'firend',['this', 'be', 'what'], sep='i') >>> 'hiimyifirendithisibeiwhat' >>> make_key('hi', 'my', 'firend',['this', 'be', 'what']) >>> u'hi_my_firend_this_be_what' """ sep = kwargs.get('sep', u"_") exclude_last_string = kwargs.get('exclude_last_string', False) string_array = [] for arg in args: if isinstance(arg, list): string_array.append(six.text_type(sep.join(arg))) else: if exclude_last_string: new_key_array = arg.split(sep)[:-1] if len(new_key_array) > 0: string_array.append(make_key(new_key_array)) else: string_array.append(six.text_type(arg)) return sep.join(string_array)
python
{ "resource": "" }
q259111
MongoModelForm.set_fields
validation
def set_fields(self): """Sets existing data to form fields.""" # Get dictionary map of current model if self.is_initialized: self.model_map_dict = self.create_document_dictionary(self.model_instance) else: self.model_map_dict = self.create_document_dictionary(self.model) form_field_dict = self.get_form_field_dict(self.model_map_dict) self.set_form_fields(form_field_dict)
python
{ "resource": "" }
q259112
MongoModelForm.set_post_data
validation
def set_post_data(self): """ Need to set form data so that validation on all post data occurs and places newly entered form data on the form object. """ self.form.data = self.post_data_dict # Specifically adding list field keys to the form so they are included # in form.cleaned_data after the call to is_valid for field_key, field in self.form.fields.items(): if has_digit(field_key): # We have a list field. base_key = make_key(field_key, exclude_last_string=True) # Add new key value with field to form fields so validation # will work correctly for key in self.post_data_dict.keys(): if base_key in key: self.form.fields.update({key: field})
python
{ "resource": "" }
q259113
MongoModelForm.get_form
validation
def get_form(self): """ Generate the form for view. """ self.set_fields() if self.post_data_dict is not None: self.set_post_data() return self.form
python
{ "resource": "" }
q259114
MongoModelForm.create_list_dict
validation
def create_list_dict(self, document, list_field, doc_key): """ Genereates a dictionary representation of the list field. Document should be the document the list_field comes from. DO NOT CALL DIRECTLY """ list_dict = {"_document": document} if isinstance(list_field.field, EmbeddedDocumentField): list_dict.update(self.create_document_dictionary(document=list_field.field.document_type_obj, owner_document=document)) # Set the list_dict after it may have been updated list_dict.update({"_document_field": list_field.field, "_key": doc_key, "_field_type": ListField, "_widget": get_widget(list_field.field), "_value": getattr(document, doc_key, None)}) return list_dict
python
{ "resource": "" }
q259115
MongoModelForm.create_document_dictionary
validation
def create_document_dictionary(self, document, document_key=None, owner_document=None): """ Given document generates a dictionary representation of the document. Includes the widget for each for each field in the document. """ doc_dict = self.create_doc_dict(document, document_key, owner_document) for doc_key, doc_field in doc_dict.items(): # Base fields should not be evaluated if doc_key.startswith("_"): continue if isinstance(doc_field, ListField): doc_dict[doc_key] = self.create_list_dict(document, doc_field, doc_key) elif isinstance(doc_field, EmbeddedDocumentField): doc_dict[doc_key] = self.create_document_dictionary(doc_dict[doc_key].document_type_obj, doc_key) else: doc_dict[doc_key] = {"_document": document, "_key": doc_key, "_document_field": doc_field, "_widget": get_widget(doc_dict[doc_key], getattr(doc_field, 'disabled', False))} return doc_dict
python
{ "resource": "" }
q259116
get_widget
validation
def get_widget(model_field, disabled=False): """Choose which widget to display for a field.""" attrs = get_attrs(model_field, disabled) if hasattr(model_field, "max_length") and not model_field.max_length: return forms.Textarea(attrs=attrs) elif isinstance(model_field, DateTimeField): return forms.DateTimeInput(attrs=attrs) elif isinstance(model_field, BooleanField): return forms.CheckboxInput(attrs=attrs) elif isinstance(model_field, ReferenceField) or model_field.choices: return forms.Select(attrs=attrs) elif (isinstance(model_field, ListField) or isinstance(model_field, EmbeddedDocumentField) or isinstance(model_field, GeoPointField)): return None else: return forms.TextInput(attrs=attrs)
python
{ "resource": "" }
q259117
get_attrs
validation
def get_attrs(model_field, disabled=False): """Set attributes on the display widget.""" attrs = {} attrs['class'] = 'span6 xlarge' if disabled or isinstance(model_field, ObjectIdField): attrs['class'] += ' disabled' attrs['readonly'] = 'readonly' return attrs
python
{ "resource": "" }
q259118
get_form_field_class
validation
def get_form_field_class(model_field): """Gets the default form field for a mongoenigne field.""" FIELD_MAPPING = { IntField: forms.IntegerField, StringField: forms.CharField, FloatField: forms.FloatField, BooleanField: forms.BooleanField, DateTimeField: forms.DateTimeField, DecimalField: forms.DecimalField, URLField: forms.URLField, EmailField: forms.EmailField } return FIELD_MAPPING.get(model_field.__class__, forms.CharField)
python
{ "resource": "" }
q259119
DocumentListView.get_qset
validation
def get_qset(self, queryset, q): """Performs filtering against the default queryset returned by mongoengine. """ if self.mongoadmin.search_fields and q: params = {} for field in self.mongoadmin.search_fields: if field == 'id': # check to make sure this is a valid ID, otherwise we just continue if is_valid_object_id(q): return queryset.filter(pk=q) continue search_key = "{field}__icontains".format(field=field) params[search_key] = q queryset = queryset.filter(**params) return queryset
python
{ "resource": "" }
q259120
DocumentListView.get_context_data
validation
def get_context_data(self, **kwargs): """Injects data into the context to replicate CBV ListView.""" context = super(DocumentListView, self).get_context_data(**kwargs) context = self.set_permissions_in_context(context) if not context['has_view_permission']: return HttpResponseForbidden("You do not have permissions to view this content.") context['object_list'] = self.get_queryset() context['document'] = self.document context['app_label'] = self.app_label context['document_name'] = self.document_name context['request'] = self.request # pagination bits context['page'] = self.page context['documents_per_page'] = self.documents_per_page if self.page > 1: previous_page_number = self.page - 1 else: previous_page_number = None if self.page < self.total_pages: next_page_number = self.page + 1 else: next_page_number = None context['previous_page_number'] = previous_page_number context['has_previous_page'] = previous_page_number is not None context['next_page_number'] = next_page_number context['has_next_page'] = next_page_number is not None context['total_pages'] = self.total_pages # Part of upcoming list view form functionality if self.queryset.count(): context['keys'] = ['id', ] # Show those items for which we've got list_fields on the mongoadmin for key in [x for x in self.mongoadmin.list_fields if x != 'id' and x in self.document._fields.keys()]: # TODO - Figure out why this EmbeddedDocumentField and ListField breaks this view # Note - This is the challenge part, right? :) if isinstance(self.document._fields[key], EmbeddedDocumentField): continue if isinstance(self.document._fields[key], ListField): continue context['keys'].append(key) if self.mongoadmin.search_fields: context['search_field'] = True return context
python
{ "resource": "" }
q259121
DocumentListView.post
validation
def post(self, request, *args, **kwargs): """Creates new mongoengine records.""" # TODO - make sure to check the rights of the poster #self.get_queryset() # TODO - write something that grabs the document class better form_class = self.get_form_class() form = self.get_form(form_class) mongo_ids = self.get_initial()['mongo_id'] for form_mongo_id in form.data.getlist('mongo_id'): for mongo_id in mongo_ids: if form_mongo_id == mongo_id: self.document.objects.get(pk=mongo_id).delete() return self.form_invalid(form)
python
{ "resource": "" }
q259122
MongonautViewMixin.get_mongoadmins
validation
def get_mongoadmins(self): """ Returns a list of all mongoadmin implementations for the site """ apps = [] for app_name in settings.INSTALLED_APPS: mongoadmin = "{0}.mongoadmin".format(app_name) try: module = import_module(mongoadmin) except ImportError as e: if str(e).startswith("No module named"): continue raise e app_store = AppStore(module) apps.append(dict( app_name=app_name, obj=app_store )) return apps
python
{ "resource": "" }
q259123
MongonautViewMixin.set_mongonaut_base
validation
def set_mongonaut_base(self): """ Sets a number of commonly used attributes """ if hasattr(self, "app_label"): # prevents us from calling this multiple times return None self.app_label = self.kwargs.get('app_label') self.document_name = self.kwargs.get('document_name') # TODO Allow this to be assigned via url variable self.models_name = self.kwargs.get('models_name', 'models') # import the models file self.model_name = "{0}.{1}".format(self.app_label, self.models_name) self.models = import_module(self.model_name)
python
{ "resource": "" }
q259124
MongonautViewMixin.set_permissions_in_context
validation
def set_permissions_in_context(self, context={}): """ Provides permissions for mongoadmin for use in the context""" context['has_view_permission'] = self.mongoadmin.has_view_permission(self.request) context['has_edit_permission'] = self.mongoadmin.has_edit_permission(self.request) context['has_add_permission'] = self.mongoadmin.has_add_permission(self.request) context['has_delete_permission'] = self.mongoadmin.has_delete_permission(self.request) return context
python
{ "resource": "" }
q259125
MongonautFormViewMixin.process_post_form
validation
def process_post_form(self, success_message=None): """ As long as the form is set on the view this method will validate the form and save the submitted data. Only call this if you are posting data. The given success_message will be used with the djanog messages framework if the posted data sucessfully submits. """ # When on initial args are given we need to set the base document. if not hasattr(self, 'document') or self.document is None: self.document = self.document_type() self.form = MongoModelForm(model=self.document_type, instance=self.document, form_post_data=self.request.POST).get_form() self.form.is_bound = True if self.form.is_valid(): self.document_map_dict = MongoModelForm(model=self.document_type).create_document_dictionary(self.document_type) self.new_document = self.document_type # Used to keep track of embedded documents in lists. Keyed by the list and the number of the # document. self.embedded_list_docs = {} if self.new_document is None: messages.error(self.request, u"Failed to save document") else: self.new_document = self.new_document() for form_key in self.form.cleaned_data.keys(): if form_key == 'id' and hasattr(self, 'document'): self.new_document.id = self.document.id continue self.process_document(self.new_document, form_key, None) self.new_document.save() if success_message: messages.success(self.request, success_message) return self.form
python
{ "resource": "" }
q259126
MongonautFormViewMixin.process_document
validation
def process_document(self, document, form_key, passed_key): """ Given the form_key will evaluate the document and set values correctly for the document given. """ if passed_key is not None: current_key, remaining_key_array = trim_field_key(document, passed_key) else: current_key, remaining_key_array = trim_field_key(document, form_key) key_array_digit = remaining_key_array[-1] if remaining_key_array and has_digit(remaining_key_array) else None remaining_key = make_key(remaining_key_array) if current_key.lower() == 'id': raise KeyError(u"Mongonaut does not work with models which have fields beginning with id_") # Create boolean checks to make processing document easier is_embedded_doc = (isinstance(document._fields.get(current_key, None), EmbeddedDocumentField) if hasattr(document, '_fields') else False) is_list = not key_array_digit is None key_in_fields = current_key in document._fields.keys() if hasattr(document, '_fields') else False # This ensures you only go through each documents keys once, and do not duplicate data if key_in_fields: if is_embedded_doc: self.set_embedded_doc(document, form_key, current_key, remaining_key) elif is_list: self.set_list_field(document, form_key, current_key, remaining_key, key_array_digit) else: value = translate_value(document._fields[current_key], self.form.cleaned_data[form_key]) setattr(document, current_key, value)
python
{ "resource": "" }
q259127
MongonautFormViewMixin.set_embedded_doc
validation
def set_embedded_doc(self, document, form_key, current_key, remaining_key): """Get the existing embedded document if it exists, else created it.""" embedded_doc = getattr(document, current_key, False) if not embedded_doc: embedded_doc = document._fields[current_key].document_type_obj() new_key, new_remaining_key_array = trim_field_key(embedded_doc, remaining_key) self.process_document(embedded_doc, form_key, make_key(new_key, new_remaining_key_array)) setattr(document, current_key, embedded_doc)
python
{ "resource": "" }
q259128
MongonautFormViewMixin.set_list_field
validation
def set_list_field(self, document, form_key, current_key, remaining_key, key_array_digit): """1. Figures out what value the list ought to have 2. Sets the list """ document_field = document._fields.get(current_key) # Figure out what value the list ought to have # None value for ListFields make mongoengine very un-happy list_value = translate_value(document_field.field, self.form.cleaned_data[form_key]) if list_value is None or (not list_value and not bool(list_value)): return None current_list = getattr(document, current_key, None) if isinstance(document_field.field, EmbeddedDocumentField): embedded_list_key = u"{0}_{1}".format(current_key, key_array_digit) # Get the embedded document if it exists, else create it. embedded_list_document = self.embedded_list_docs.get(embedded_list_key, None) if embedded_list_document is None: embedded_list_document = document_field.field.document_type_obj() new_key, new_remaining_key_array = trim_field_key(embedded_list_document, remaining_key) self.process_document(embedded_list_document, form_key, new_key) list_value = embedded_list_document self.embedded_list_docs[embedded_list_key] = embedded_list_document if isinstance(current_list, list): # Do not add the same document twice if embedded_list_document not in current_list: current_list.append(embedded_list_document) else: setattr(document, current_key, [embedded_list_document]) elif isinstance(current_list, list): current_list.append(list_value) else: setattr(document, current_key, [list_value])
python
{ "resource": "" }
q259129
with_tz
validation
def with_tz(request): """ Get the time with TZ enabled """ dt = datetime.now() t = Template('{% load tz %}{% localtime on %}{% get_current_timezone as TIME_ZONE %}{{ TIME_ZONE }}{% endlocaltime %}') c = RequestContext(request) response = t.render(c) return HttpResponse(response)
python
{ "resource": "" }
q259130
without_tz
validation
def without_tz(request): """ Get the time without TZ enabled """ t = Template('{% load tz %}{% get_current_timezone as TIME_ZONE %}{{ TIME_ZONE }}') c = RequestContext(request) response = t.render(c) return HttpResponse(response)
python
{ "resource": "" }
q259131
is_valid_ip
validation
def is_valid_ip(ip_address): """ Check Validity of an IP address """ try: ip = ipaddress.ip_address(u'' + ip_address) return True except ValueError as e: return False
python
{ "resource": "" }
q259132
is_local_ip
validation
def is_local_ip(ip_address): """ Check if IP is local """ try: ip = ipaddress.ip_address(u'' + ip_address) return ip.is_loopback except ValueError as e: return None
python
{ "resource": "" }
q259133
EasyTimezoneMiddleware.process_request
validation
def process_request(self, request): """ If we can get a valid IP from the request, look up that address in the database to get the appropriate timezone and activate it. Else, use the default. """ if not request: return if not db_loaded: load_db() tz = request.session.get('django_timezone') if not tz: # use the default timezone (settings.TIME_ZONE) for localhost tz = timezone.get_default_timezone() client_ip = get_ip_address_from_request(request) ip_addrs = client_ip.split(',') for ip in ip_addrs: if is_valid_ip(ip) and not is_local_ip(ip): if ':' in ip: tz = db_v6.time_zone_by_addr(ip) break else: tz = db.time_zone_by_addr(ip) break if tz: timezone.activate(tz) request.session['django_timezone'] = str(tz) if getattr(settings, 'AUTH_USER_MODEL', None) and getattr(request, 'user', None): detected_timezone.send(sender=get_user_model(), instance=request.user, timezone=tz) else: timezone.deactivate()
python
{ "resource": "" }
q259134
ElasticQuery.search
validation
def search(self): """ This is the most important method """ try: filters = json.loads(self.query) except ValueError: return False result = self.model_query if 'filter'in filters.keys(): result = self.parse_filter(filters['filter']) if 'sort'in filters.keys(): result = result.order_by(*self.sort(filters['sort'])) return result
python
{ "resource": "" }
q259135
ElasticQuery.parse_filter
validation
def parse_filter(self, filters): """ This method process the filters """ for filter_type in filters: if filter_type == 'or' or filter_type == 'and': conditions = [] for field in filters[filter_type]: if self.is_field_allowed(field): conditions.append(self.create_query(self.parse_field(field, filters[filter_type][field]))) if filter_type == 'or': self.model_query = self.model_query.filter(or_(*conditions)) elif filter_type == 'and': self.model_query = self.model_query.filter(and_(*conditions)) else: if self.is_field_allowed(filter_type): conditions = self.create_query(self.parse_field(filter_type, filters[filter_type])) self.model_query = self.model_query.filter(conditions) return self.model_query
python
{ "resource": "" }
q259136
ElasticQuery.create_query
validation
def create_query(self, attr): """ Mix all values and make the query """ field = attr[0] operator = attr[1] value = attr[2] model = self.model if '.' in field: field_items = field.split('.') field_name = getattr(model, field_items[0], None) class_name = field_name.property.mapper.class_ new_model = getattr(class_name, field_items[1]) return field_name.has(OPERATORS[operator](new_model, value)) return OPERATORS[operator](getattr(model, field, None), value)
python
{ "resource": "" }
q259137
SMTP_dummy.sendmail
validation
def sendmail(self, msg_from, msg_to, msg): """Remember the recipients.""" SMTP_dummy.msg_from = msg_from SMTP_dummy.msg_to = msg_to SMTP_dummy.msg = msg
python
{ "resource": "" }
q259138
parsemail
validation
def parsemail(raw_message): """Parse message headers, then remove BCC header.""" message = email.parser.Parser().parsestr(raw_message) # Detect encoding detected = chardet.detect(bytearray(raw_message, "utf-8")) encoding = detected["encoding"] print(">>> encoding {}".format(encoding)) for part in message.walk(): if part.get_content_maintype() == 'multipart': continue part.set_charset(encoding) # Extract recipients addrs = email.utils.getaddresses(message.get_all("TO", [])) + \ email.utils.getaddresses(message.get_all("CC", [])) + \ email.utils.getaddresses(message.get_all("BCC", [])) recipients = [x[1] for x in addrs] message.__delitem__("bcc") message.__setitem__('Date', email.utils.formatdate()) sender = message["from"] return (message, sender, recipients)
python
{ "resource": "" }
q259139
_create_boundary
validation
def _create_boundary(message): """Add boundary parameter to multipart message if they are not present.""" if not message.is_multipart() or message.get_boundary() is not None: return message # HACK: Python2 lists do not natively have a `copy` method. Unfortunately, # due to a bug in the Backport for the email module, the method # `Message.set_boundary` converts the Message headers into a native list, # so that other methods that rely on "copying" the Message headers fail. # `Message.set_boundary` is called from `Generator.handle_multipart` if the # message does not already have a boundary present. (This method itself is # called from `Message.as_string`.) # Hence, to prevent `Message.set_boundary` from being called, add a # boundary header manually. from future.backports.email.generator import Generator # pylint: disable=protected-access boundary = Generator._make_boundary(message.policy.linesep) message.set_param('boundary', boundary) return message
python
{ "resource": "" }
q259140
make_message_multipart
validation
def make_message_multipart(message): """Convert a message into a multipart message.""" if not message.is_multipart(): multipart_message = email.mime.multipart.MIMEMultipart('alternative') for header_key in set(message.keys()): # Preserve duplicate headers values = message.get_all(header_key, failobj=[]) for value in values: multipart_message[header_key] = value original_text = message.get_payload() multipart_message.attach(email.mime.text.MIMEText(original_text)) message = multipart_message # HACK: For Python2 (see comments in `_create_boundary`) message = _create_boundary(message) return message
python
{ "resource": "" }
q259141
convert_markdown
validation
def convert_markdown(message): """Convert markdown in message text to HTML.""" assert message['Content-Type'].startswith("text/markdown") del message['Content-Type'] # Convert the text from markdown and then make the message multipart message = make_message_multipart(message) for payload_item in set(message.get_payload()): # Assume the plaintext item is formatted with markdown. # Add corresponding HTML version of the item as the last part of # the multipart message (as per RFC 2046) if payload_item['Content-Type'].startswith('text/plain'): original_text = payload_item.get_payload() html_text = markdown.markdown(original_text) html_payload = future.backports.email.mime.text.MIMEText( "<html><body>{}</body></html>".format(html_text), "html", ) message.attach(html_payload) return message
python
{ "resource": "" }
q259142
addattachments
validation
def addattachments(message, template_path): """Add the attachments from the message from the commandline options.""" if 'attachment' not in message: return message, 0 message = make_message_multipart(message) attachment_filepaths = message.get_all('attachment', failobj=[]) template_parent_dir = os.path.dirname(template_path) for attachment_filepath in attachment_filepaths: attachment_filepath = os.path.expanduser(attachment_filepath.strip()) if not attachment_filepath: continue if not os.path.isabs(attachment_filepath): # Relative paths are relative to the template's parent directory attachment_filepath = os.path.join(template_parent_dir, attachment_filepath) normalized_path = os.path.abspath(attachment_filepath) # Check that the attachment exists if not os.path.exists(normalized_path): print("Error: can't find attachment " + normalized_path) sys.exit(1) filename = os.path.basename(normalized_path) with open(normalized_path, "rb") as attachment: part = email.mime.application.MIMEApplication(attachment.read(), Name=filename) part.add_header('Content-Disposition', 'attachment; filename="{}"'.format(filename)) message.attach(part) print(">>> attached {}".format(normalized_path)) del message['attachment'] return message, len(attachment_filepaths)
python
{ "resource": "" }
q259143
sendmail
validation
def sendmail(message, sender, recipients, config_filename): """Send email message using Python SMTP library.""" # Read config file from disk to get SMTP server host, port, username if not hasattr(sendmail, "host"): config = configparser.RawConfigParser() config.read(config_filename) sendmail.host = config.get("smtp_server", "host") sendmail.port = config.getint("smtp_server", "port") sendmail.username = config.get("smtp_server", "username") sendmail.security = config.get("smtp_server", "security") print(">>> Read SMTP server configuration from {}".format( config_filename)) print(">>> host = {}".format(sendmail.host)) print(">>> port = {}".format(sendmail.port)) print(">>> username = {}".format(sendmail.username)) print(">>> security = {}".format(sendmail.security)) # Prompt for password if not hasattr(sendmail, "password"): if sendmail.security == "Dummy" or sendmail.username == "None": sendmail.password = None else: prompt = ">>> password for {} on {}: ".format(sendmail.username, sendmail.host) sendmail.password = getpass.getpass(prompt) # Connect to SMTP server if sendmail.security == "SSL/TLS": smtp = smtplib.SMTP_SSL(sendmail.host, sendmail.port) elif sendmail.security == "STARTTLS": smtp = smtplib.SMTP(sendmail.host, sendmail.port) smtp.ehlo() smtp.starttls() smtp.ehlo() elif sendmail.security == "Never": smtp = smtplib.SMTP(sendmail.host, sendmail.port) elif sendmail.security == "Dummy": smtp = smtp_dummy.SMTP_dummy() else: raise configparser.Error("Unrecognized security type: {}".format( sendmail.security)) # Send credentials if sendmail.username != "None": smtp.login(sendmail.username, sendmail.password) # Send message. Note that we can't use the elegant # "smtp.send_message(message)" because that's python3 only smtp.sendmail(sender, recipients, message.as_string()) smtp.close()
python
{ "resource": "" }
q259144
create_sample_input_files
validation
def create_sample_input_files(template_filename, database_filename, config_filename): """Create sample template email and database.""" print("Creating sample template email {}".format(template_filename)) if os.path.exists(template_filename): print("Error: file exists: " + template_filename) sys.exit(1) with io.open(template_filename, "w") as template_file: template_file.write( u"TO: {{email}}\n" u"SUBJECT: Testing mailmerge\n" u"FROM: My Self <myself@mydomain.com>\n" u"\n" u"Hi, {{name}},\n" u"\n" u"Your number is {{number}}.\n" ) print("Creating sample database {}".format(database_filename)) if os.path.exists(database_filename): print("Error: file exists: " + database_filename) sys.exit(1) with io.open(database_filename, "w") as database_file: database_file.write( u'email,name,number\n' u'myself@mydomain.com,"Myself",17\n' u'bob@bobdomain.com,"Bob",42\n' ) print("Creating sample config file {}".format(config_filename)) if os.path.exists(config_filename): print("Error: file exists: " + config_filename) sys.exit(1) with io.open(config_filename, "w") as config_file: config_file.write( u"# Example: GMail\n" u"[smtp_server]\n" u"host = smtp.gmail.com\n" u"port = 465\n" u"security = SSL/TLS\n" u"username = YOUR_USERNAME_HERE\n" u"#\n" u"# Example: Wide open\n" u"# [smtp_server]\n" u"# host = open-smtp.example.com\n" u"# port = 25\n" u"# security = Never\n" u"# username = None\n" u"#\n" u"# Example: University of Michigan\n" u"# [smtp_server]\n" u"# host = smtp.mail.umich.edu\n" u"# port = 465\n" u"# security = SSL/TLS\n" u"# username = YOUR_USERNAME_HERE\n" u"#\n" u"# Example: University of Michigan EECS Dept., with STARTTLS security\n" # noqa: E501 u"# [smtp_server]\n" u"# host = newman.eecs.umich.edu\n" u"# port = 25\n" u"# security = STARTTLS\n" u"# username = YOUR_USERNAME_HERE\n" u"#\n" u"# Example: University of Michigan EECS Dept., with no encryption\n" # noqa: E501 u"# [smtp_server]\n" u"# host = newman.eecs.umich.edu\n" u"# port = 25\n" u"# security = Never\n" u"# username = YOUR_USERNAME_HERE\n" ) print("Edit these files, and then run mailmerge again")
python
{ "resource": "" }
q259145
cli
validation
def cli(sample, dry_run, limit, no_limit, database_filename, template_filename, config_filename): """Command line interface.""" # pylint: disable=too-many-arguments mailmerge.api.main( sample=sample, dry_run=dry_run, limit=limit, no_limit=no_limit, database_filename=database_filename, template_filename=template_filename, config_filename=config_filename, )
python
{ "resource": "" }
q259146
with_continuations
validation
def with_continuations(**c): """ A decorator for defining tail-call optimized functions. Example ------- @with_continuations() def factorial(n, k, self=None): return self(n-1, k*n) if n > 1 else k @with_continuations() def identity(x, self=None): return x @with_continuations(out=identity) def factorial2(n, k, self=None, out=None): return self(n-1, k*n) if n > 1 else out(k) print(factorial(7,1)) print(factorial2(7,1)) """ if len(c): keys, k = zip(*c.items()) else: keys, k = tuple([]), tuple([]) def d(f): return C( lambda kself, *conts: lambda *args: f(*args, self=kself, **dict(zip(keys, conts)))) (*k) return d
python
{ "resource": "" }
q259147
parse_int_list
validation
def parse_int_list(string): """ Parses a string of numbers and ranges into a list of integers. Ranges are separated by dashes and inclusive of both the start and end number. Example: parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13] """ integers = [] for comma_part in string.split(","): for substring in comma_part.split(" "): if len(substring) == 0: continue if "-" in substring: left, right = substring.split("-") left_val = int(left.strip()) right_val = int(right.strip()) integers.extend(range(left_val, right_val + 1)) else: integers.append(int(substring.strip())) return integers
python
{ "resource": "" }
q259148
BasePeonyClient._get_base_url
validation
def _get_base_url(base_url, api, version): """ create the base url for the api Parameters ---------- base_url : str format of the base_url using {api} and {version} api : str name of the api to use version : str version of the api Returns ------- str the base url of the api you want to use """ format_args = {} if "{api}" in base_url: if api == "": base_url = base_url.replace('{api}.', '') else: format_args['api'] = api if "{version}" in base_url: if version == "": base_url = base_url.replace('/{version}', '') else: format_args['version'] = version return base_url.format(api=api, version=version)
python
{ "resource": "" }
q259149
BasePeonyClient.request
validation
async def request(self, method, url, future, headers=None, session=None, encoding=None, **kwargs): """ Make requests to the REST API Parameters ---------- future : asyncio.Future Future used to return the response method : str Method to be used by the request url : str URL of the resource headers : .oauth.PeonyHeaders Custom headers (doesn't overwrite `Authorization` headers) session : aiohttp.ClientSession, optional Client session used to make the request Returns ------- data.PeonyResponse Response to the request """ await self.setup # prepare request arguments, particularly the headers req_kwargs = await self.headers.prepare_request( method=method, url=url, headers=headers, proxy=self.proxy, **kwargs ) if encoding is None: encoding = self.encoding session = session if (session is not None) else self._session logger.debug("making request with parameters: %s" % req_kwargs) async with session.request(**req_kwargs) as response: if response.status < 400: data = await data_processing.read(response, self._loads, encoding=encoding) future.set_result(data_processing.PeonyResponse( data=data, headers=response.headers, url=response.url, request=req_kwargs )) else: # throw exception if status is not 2xx await exceptions.throw(response, loads=self._loads, encoding=encoding, url=url)
python
{ "resource": "" }
q259150
BasePeonyClient.stream_request
validation
def stream_request(self, method, url, headers=None, _session=None, *args, **kwargs): """ Make requests to the Streaming API Parameters ---------- method : str Method to be used by the request url : str URL of the resource headers : dict Custom headers (doesn't overwrite `Authorization` headers) _session : aiohttp.ClientSession, optional The session to use for this specific request, the session given as argument of :meth:`__init__` is used by default Returns ------- .stream.StreamResponse Stream context for the request """ return StreamResponse( method=method, url=url, client=self, headers=headers, session=_session, proxy=self.proxy, **kwargs )
python
{ "resource": "" }
q259151
BasePeonyClient.get_tasks
validation
def get_tasks(self): """ Get the tasks attached to the instance Returns ------- list List of tasks (:class:`asyncio.Task`) """ tasks = self._get_tasks() tasks.extend(self._streams.get_tasks(self)) return tasks
python
{ "resource": "" }
q259152
BasePeonyClient.run_tasks
validation
async def run_tasks(self): """ Run the tasks attached to the instance """ tasks = self.get_tasks() self._gathered_tasks = asyncio.gather(*tasks, loop=self.loop) try: await self._gathered_tasks except CancelledError: pass
python
{ "resource": "" }
q259153
BasePeonyClient.close
validation
async def close(self): """ properly close the client """ tasks = self._get_close_tasks() if tasks: await asyncio.wait(tasks) self._session = None
python
{ "resource": "" }
q259154
PeonyClient._chunked_upload
validation
async def _chunked_upload(self, media, media_size, path=None, media_type=None, media_category=None, chunk_size=2**20, **params): """ upload media in chunks Parameters ---------- media : file object a file object of the media media_size : int size of the media path : str, optional filename of the media media_type : str, optional mime type of the media media_category : str, optional twitter media category, must be used with ``media_type`` chunk_size : int, optional size of a chunk in bytes params : dict, optional additional parameters of the request Returns ------- .data_processing.PeonyResponse Response of the request """ if isinstance(media, bytes): media = io.BytesIO(media) chunk = media.read(chunk_size) is_coro = asyncio.iscoroutine(chunk) if is_coro: chunk = await chunk if media_type is None: media_metadata = await utils.get_media_metadata(chunk, path) media_type, media_category = media_metadata elif media_category is None: media_category = utils.get_category(media_type) response = await self.upload.media.upload.post( command="INIT", total_bytes=media_size, media_type=media_type, media_category=media_category, **params ) media_id = response['media_id'] i = 0 while chunk: if is_coro: req = self.upload.media.upload.post(command="APPEND", media_id=media_id, media=chunk, segment_index=i) chunk, _ = await asyncio.gather(media.read(chunk_size), req) else: await self.upload.media.upload.post(command="APPEND", media_id=media_id, media=chunk, segment_index=i) chunk = media.read(chunk_size) i += 1 status = await self.upload.media.upload.post(command="FINALIZE", media_id=media_id) if 'processing_info' in status: while status['processing_info'].get('state') != "succeeded": processing_info = status['processing_info'] if processing_info.get('state') == "failed": error = processing_info.get('error', {}) message = error.get('message', str(status)) raise exceptions.MediaProcessingError(data=status, message=message, **params) delay = processing_info['check_after_secs'] await asyncio.sleep(delay) status = await self.upload.media.upload.get( command="STATUS", media_id=media_id, **params ) return response
python
{ "resource": "" }
q259155
PeonyClient.upload_media
validation
async def upload_media(self, file_, media_type=None, media_category=None, chunked=None, size_limit=None, **params): """ upload a media on twitter Parameters ---------- file_ : str or pathlib.Path or file Path to the file or file object media_type : str, optional mime type of the media media_category : str, optional Twitter's media category of the media, must be used with ``media_type`` chunked : bool, optional If True, force the use of the chunked upload for the media size_limit : int, optional If set, the media will be sent using a multipart upload if its size is over ``size_limit`` bytes params : dict parameters used when making the request Returns ------- .data_processing.PeonyResponse Response of the request """ if isinstance(file_, str): url = urlparse(file_) if url.scheme.startswith('http'): media = await self._session.get(file_) else: path = urlparse(file_).path.strip(" \"'") media = await utils.execute(open(path, 'rb')) elif hasattr(file_, 'read') or isinstance(file_, bytes): media = file_ else: raise TypeError("upload_media input must be a file object or a " "filename or binary data or an aiohttp request") media_size = await utils.get_size(media) if chunked is not None: size_test = False else: size_test = await self._size_test(media_size, size_limit) if isinstance(media, aiohttp.ClientResponse): # send the content of the response media = media.content if chunked or (size_test and chunked is None): args = media, media_size, file_, media_type, media_category response = await self._chunked_upload(*args, **params) else: response = await self.upload.media.upload.post(media=media, **params) if not hasattr(file_, 'read') and not getattr(media, 'closed', True): media.close() return response
python
{ "resource": "" }
q259156
_parse_iedb_response
validation
def _parse_iedb_response(response): """Take the binding predictions returned by IEDB's web API and parse them into a DataFrame Expect response to look like: allele seq_num start end length peptide ic50 percentile_rank HLA-A*01:01 1 2 10 9 LYNTVATLY 2145.70 3.7 HLA-A*01:01 1 5 13 9 TVATLYCVH 2216.49 3.9 HLA-A*01:01 1 7 15 9 ATLYCVHQR 2635.42 5.1 HLA-A*01:01 1 4 12 9 NTVATLYCV 6829.04 20 HLA-A*01:01 1 1 9 9 SLYNTVATL 8032.38 24 HLA-A*01:01 1 8 16 9 TLYCVHQRI 8853.90 26 HLA-A*01:01 1 3 11 9 YNTVATLYC 9865.62 29 HLA-A*01:01 1 6 14 9 VATLYCVHQ 27575.71 58 HLA-A*01:01 1 10 18 9 YCVHQRIDV 48929.64 74 HLA-A*01:01 1 9 17 9 LYCVHQRID 50000.00 75 """ if len(response) == 0: raise ValueError("Empty response from IEDB!") df = pd.read_csv(io.BytesIO(response), delim_whitespace=True, header=0) # pylint doesn't realize that df is a DataFrame, so tell is assert type(df) == pd.DataFrame df = pd.DataFrame(df) if len(df) == 0: raise ValueError( "No binding predictions in response from IEDB: %s" % (response,)) required_columns = [ "allele", "peptide", "ic50", "start", "end", ] for column in required_columns: if column not in df.columns: raise ValueError( "Response from IEDB is missing '%s' column: %s. Full " "response:\n%s" % ( column, df.ix[0], response)) # since IEDB has allowed multiple column names for percentile rank, # we're defensively normalizing all of them to just 'rank' df = df.rename(columns={ "percentile_rank": "rank", "percentile rank": "rank"}) return df
python
{ "resource": "" }
q259157
IedbBasePredictor.predict_subsequences
validation
def predict_subsequences(self, sequence_dict, peptide_lengths=None): """Given a dictionary mapping unique keys to amino acid sequences, run MHC binding predictions on all candidate epitopes extracted from sequences and return a EpitopeCollection. Parameters ---------- fasta_dictionary : dict or string Mapping of protein identifiers to protein amino acid sequences. If string then converted to dictionary. """ sequence_dict = check_sequence_dictionary(sequence_dict) peptide_lengths = self._check_peptide_lengths(peptide_lengths) # take each mutated sequence in the dataframe # and general MHC binding scores for all k-mer substrings binding_predictions = [] expected_peptides = set([]) normalized_alleles = [] for key, amino_acid_sequence in sequence_dict.items(): for l in peptide_lengths: for i in range(len(amino_acid_sequence) - l + 1): expected_peptides.add(amino_acid_sequence[i:i + l]) self._check_peptide_inputs(expected_peptides) for allele in self.alleles: # IEDB MHCII predictor expects DRA1 to be omitted. allele = normalize_allele_name(allele, omit_dra1=True) normalized_alleles.append(allele) request = self._get_iedb_request_params( amino_acid_sequence, allele) logger.info( "Calling IEDB (%s) with request %s", self.url, request) response_df = _query_iedb(request, self.url) for _, row in response_df.iterrows(): binding_predictions.append( BindingPrediction( source_sequence_name=key, offset=row['start'] - 1, allele=row['allele'], peptide=row['peptide'], affinity=row['ic50'], percentile_rank=row['rank'], prediction_method_name="iedb-" + self.prediction_method)) self._check_results( binding_predictions, alleles=normalized_alleles, peptides=expected_peptides) return BindingPredictionCollection(binding_predictions)
python
{ "resource": "" }
q259158
get_args
validation
def get_args(func, skip=0): """ Hackish way to get the arguments of a function Parameters ---------- func : callable Function to get the arguments from skip : int, optional Arguments to skip, defaults to 0 set it to 1 to skip the ``self`` argument of a method. Returns ------- tuple Function's arguments """ code = getattr(func, '__code__', None) if code is None: code = func.__call__.__code__ return code.co_varnames[skip:code.co_argcount]
python
{ "resource": "" }
q259159
log_error
validation
def log_error(msg=None, exc_info=None, logger=None, **kwargs): """ log an exception and its traceback on the logger defined Parameters ---------- msg : str, optional A message to add to the error exc_info : tuple Information about the current exception logger : logging.Logger logger to use """ if logger is None: logger = _logger if not exc_info: exc_info = sys.exc_info() if msg is None: msg = "" exc_class, exc_msg, _ = exc_info if all(info is not None for info in exc_info): logger.error(msg, exc_info=exc_info)
python
{ "resource": "" }
q259160
get_media_metadata
validation
async def get_media_metadata(data, path=None): """ Get all the file's metadata and read any kind of file object Parameters ---------- data : bytes first bytes of the file (the mimetype shoudl be guessed from the file headers path : str, optional path to the file Returns ------- str The mimetype of the media str The category of the media on Twitter """ if isinstance(data, bytes): media_type = await get_type(data, path) else: raise TypeError("get_metadata input must be a bytes") media_category = get_category(media_type) _logger.info("media_type: %s, media_category: %s" % (media_type, media_category)) return media_type, media_category
python
{ "resource": "" }
q259161
get_size
validation
async def get_size(media): """ Get the size of a file Parameters ---------- media : file object The file object of the media Returns ------- int The size of the file """ if hasattr(media, 'seek'): await execute(media.seek(0, os.SEEK_END)) size = await execute(media.tell()) await execute(media.seek(0)) elif hasattr(media, 'headers'): size = int(media.headers['Content-Length']) elif isinstance(media, bytes): size = len(media) else: raise TypeError("Can't get size of media of type:", type(media).__name__) _logger.info("media size: %dB" % size) return size
python
{ "resource": "" }
q259162
set_debug
validation
def set_debug(): """ activates error messages, useful during development """ logging.basicConfig(level=logging.WARNING) peony.logger.setLevel(logging.DEBUG)
python
{ "resource": "" }
q259163
BindingPrediction.clone_with_updates
validation
def clone_with_updates(self, **kwargs): """Returns new BindingPrediction with updated fields""" fields_dict = self.to_dict() fields_dict.update(kwargs) return BindingPrediction(**fields_dict)
python
{ "resource": "" }
q259164
IdIterator.get_data
validation
def get_data(self, response): """ Get the data from the response """ if self._response_list: return response elif self._response_key is None: if hasattr(response, "items"): for key, data in response.items(): if (hasattr(data, "__getitem__") and not hasattr(data, "items") and len(data) > 0 and 'id' in data[0]): self._response_key = key return data else: self._response_list = True return response else: return response[self._response_key] raise NoDataFound(response=response, url=self.request.get_url())
python
{ "resource": "" }
q259165
SinceIdIterator.call_on_response
validation
async def call_on_response(self, data): """ Try to fill the gaps and strip last tweet from the response if its id is that of the first tweet of the last response Parameters ---------- data : list The response data """ since_id = self.kwargs.get(self.param, 0) + 1 if self.fill_gaps: if data[-1]['id'] != since_id: max_id = data[-1]['id'] - 1 responses = with_max_id(self.request(**self.kwargs, max_id=max_id)) async for tweets in responses: data.extend(tweets) if data[-1]['id'] == self.last_id: data = data[:-1] if not data and not self.force: raise StopAsyncIteration await self.set_param(data)
python
{ "resource": "" }
q259166
get_oauth_token
validation
async def get_oauth_token(consumer_key, consumer_secret, callback_uri="oob"): """ Get a temporary oauth token Parameters ---------- consumer_key : str Your consumer key consumer_secret : str Your consumer secret callback_uri : str, optional Callback uri, defaults to 'oob' Returns ------- dict Temporary tokens """ client = BasePeonyClient(consumer_key=consumer_key, consumer_secret=consumer_secret, api_version="", suffix="") response = await client.api.oauth.request_token.post( _suffix="", oauth_callback=callback_uri ) return parse_token(response)
python
{ "resource": "" }
q259167
get_oauth_verifier
validation
async def get_oauth_verifier(oauth_token): """ Open authorize page in a browser, print the url if it didn't work Arguments --------- oauth_token : str The oauth token received in :func:`get_oauth_token` Returns ------- str The PIN entered by the user """ url = "https://api.twitter.com/oauth/authorize?oauth_token=" + oauth_token try: browser = webbrowser.open(url) await asyncio.sleep(2) if not browser: raise RuntimeError except RuntimeError: print("could not open a browser\ngo here to enter your PIN: " + url) verifier = input("\nEnter your PIN: ") return verifier
python
{ "resource": "" }
q259168
get_access_token
validation
async def get_access_token(consumer_key, consumer_secret, oauth_token, oauth_token_secret, oauth_verifier, **kwargs): """ get the access token of the user Parameters ---------- consumer_key : str Your consumer key consumer_secret : str Your consumer secret oauth_token : str OAuth token from :func:`get_oauth_token` oauth_token_secret : str OAuth token secret from :func:`get_oauth_token` oauth_verifier : str OAuth verifier from :func:`get_oauth_verifier` Returns ------- dict Access tokens """ client = BasePeonyClient(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token=oauth_token, access_token_secret=oauth_token_secret, api_version="", suffix="") response = await client.api.oauth.access_token.get( _suffix="", oauth_verifier=oauth_verifier ) return parse_token(response)
python
{ "resource": "" }
q259169
parse_token
validation
def parse_token(response): """ parse the responses containing the tokens Parameters ---------- response : str The response containing the tokens Returns ------- dict The parsed tokens """ items = response.split("&") items = [item.split("=") for item in items] return {key: value for key, value in items}
python
{ "resource": "" }
q259170
NetChop.predict
validation
def predict(self, sequences): """ Return netChop predictions for each position in each sequence. Parameters ----------- sequences : list of string Amino acid sequences to predict cleavage for Returns ----------- list of list of float The i'th list corresponds to the i'th sequence. Each list gives the cleavage probability for each position in the sequence. """ with tempfile.NamedTemporaryFile(suffix=".fsa", mode="w") as input_fd: for (i, sequence) in enumerate(sequences): input_fd.write("> %d\n" % i) input_fd.write(sequence) input_fd.write("\n") input_fd.flush() try: output = subprocess.check_output(["netChop", input_fd.name]) except subprocess.CalledProcessError as e: logging.error("Error calling netChop: %s:\n%s" % (e, e.output)) raise parsed = self.parse_netchop(output) assert len(parsed) == len(sequences), \ "Expected %d results but got %d" % ( len(sequences), len(parsed)) assert [len(x) for x in parsed] == [len(x) for x in sequences] return parsed
python
{ "resource": "" }
q259171
NetChop.parse_netchop
validation
def parse_netchop(netchop_output): """ Parse netChop stdout. """ line_iterator = iter(netchop_output.decode().split("\n")) scores = [] for line in line_iterator: if "pos" in line and 'AA' in line and 'score' in line: scores.append([]) if "----" not in next(line_iterator): raise ValueError("Dashes expected") line = next(line_iterator) while '-------' not in line: score = float(line.split()[3]) scores[-1].append(score) line = next(line_iterator) return scores
python
{ "resource": "" }
q259172
BindingPredictionCollection.to_dataframe
validation
def to_dataframe( self, columns=BindingPrediction.fields + ("length",)): """ Converts collection of BindingPrediction objects to DataFrame """ return pd.DataFrame.from_records( [tuple([getattr(x, name) for name in columns]) for x in self], columns=columns)
python
{ "resource": "" }
q259173
NetMHC
validation
def NetMHC(alleles, default_peptide_lengths=[9], program_name="netMHC"): """ This function wraps NetMHC3 and NetMHC4 to automatically detect which class to use. Currently based on running the '-h' command and looking for discriminating substrings between the versions. """ # run NetMHC's help command and parse discriminating substrings out of # the resulting str output with open(os.devnull, 'w') as devnull: help_output = check_output([program_name, "-h"], stderr=devnull) help_output_str = help_output.decode("ascii", "ignore") substring_to_netmhc_class = { "-listMHC": NetMHC4, "--Alleles": NetMHC3, } successes = [] for substring, netmhc_class in substring_to_netmhc_class.items(): if substring in help_output_str: successes.append(netmhc_class) if len(successes) > 1: raise SystemError("Command %s is valid for multiple NetMHC versions. " "This is likely an mhctools bug." % program_name) if len(successes) == 0: raise SystemError("Command %s is not a valid way of calling any NetMHC software." % program_name) netmhc_class = successes[0] return netmhc_class( alleles=alleles, default_peptide_lengths=default_peptide_lengths, program_name=program_name)
python
{ "resource": "" }
q259174
MHCflurry.predict_peptides
validation
def predict_peptides(self, peptides): """ Predict MHC affinity for peptides. """ # importing locally to avoid slowing down CLI applications which # don't use MHCflurry from mhcflurry.encodable_sequences import EncodableSequences binding_predictions = [] encodable_sequences = EncodableSequences.create(peptides) for allele in self.alleles: predictions_df = self.predictor.predict_to_dataframe( encodable_sequences, allele=allele) for (_, row) in predictions_df.iterrows(): binding_prediction = BindingPrediction( allele=allele, peptide=row.peptide, affinity=row.prediction, percentile_rank=( row.prediction_percentile if 'prediction_percentile' in row else nan), prediction_method_name="mhcflurry" ) binding_predictions.append(binding_prediction) return BindingPredictionCollection(binding_predictions)
python
{ "resource": "" }
q259175
seq_to_str
validation
def seq_to_str(obj, sep=","): """ Given a sequence convert it to a comma separated string. If, however, the argument is a single object, return its string representation. """ if isinstance(obj, string_classes): return obj elif isinstance(obj, (list, tuple)): return sep.join([str(x) for x in obj]) else: return str(obj)
python
{ "resource": "" }
q259176
create_input_peptides_files
validation
def create_input_peptides_files( peptides, max_peptides_per_file=None, group_by_length=False): """ Creates one or more files containing one peptide per line, returns names of files. """ if group_by_length: peptide_lengths = {len(p) for p in peptides} peptide_groups = {l: [] for l in peptide_lengths} for p in peptides: peptide_groups[len(p)].append(p) else: peptide_groups = {"": peptides} file_names = [] for key, group in peptide_groups.items(): n_peptides = len(group) if not max_peptides_per_file: max_peptides_per_file = n_peptides input_file = None for i, p in enumerate(group): if i % max_peptides_per_file == 0: if input_file is not None: file_names.append(input_file.name) input_file.close() input_file = make_writable_tempfile( prefix_number=i // max_peptides_per_file, prefix_name=key, suffix=".txt") input_file.write("%s\n" % p) if input_file is not None: file_names.append(input_file.name) input_file.close() return file_names
python
{ "resource": "" }
q259177
BasePredictor._check_peptide_lengths
validation
def _check_peptide_lengths(self, peptide_lengths=None): """ If peptide lengths not specified, then try using the default lengths associated with this predictor object. If those aren't a valid non-empty sequence of integers, then raise an exception. Otherwise return the peptide lengths. """ if not peptide_lengths: peptide_lengths = self.default_peptide_lengths if not peptide_lengths: raise ValueError( ("Must either provide 'peptide_lengths' argument " "or set 'default_peptide_lengths")) if isinstance(peptide_lengths, int): peptide_lengths = [peptide_lengths] require_iterable_of(peptide_lengths, int) for peptide_length in peptide_lengths: if (self.min_peptide_length is not None and peptide_length < self.min_peptide_length): raise ValueError( "Invalid peptide length %d, shorter than min %d" % ( peptide_length, self.min_peptide_length)) elif (self.max_peptide_length is not None and peptide_length > self.max_peptide_length): raise ValueError( "Invalid peptide length %d, longer than max %d" % ( peptide_length, self.max_peptide_length)) return peptide_lengths
python
{ "resource": "" }
q259178
BasePredictor._check_peptide_inputs
validation
def _check_peptide_inputs(self, peptides): """ Check peptide sequences to make sure they are valid for this predictor. """ require_iterable_of(peptides, string_types) check_X = not self.allow_X_in_peptides check_lower = not self.allow_lowercase_in_peptides check_min_length = self.min_peptide_length is not None min_length = self.min_peptide_length check_max_length = self.max_peptide_length is not None max_length = self.max_peptide_length for p in peptides: if not p.isalpha(): raise ValueError("Invalid characters in peptide '%s'" % p) elif check_X and "X" in p: raise ValueError("Invalid character 'X' in peptide '%s'" % p) elif check_lower and not p.isupper(): raise ValueError("Invalid lowercase letters in peptide '%s'" % p) elif check_min_length and len(p) < min_length: raise ValueError( "Peptide '%s' too short (%d chars), must be at least %d" % ( p, len(p), min_length)) elif check_max_length and len(p) > max_length: raise ValueError( "Peptide '%s' too long (%d chars), must be at least %d" % ( p, len(p), max_length))
python
{ "resource": "" }
q259179
BasePredictor.predict_subsequences
validation
def predict_subsequences( self, sequence_dict, peptide_lengths=None): """ Given a dictionary mapping sequence names to amino acid strings, and an optional list of peptide lengths, returns a BindingPredictionCollection. """ if isinstance(sequence_dict, string_types): sequence_dict = {"seq": sequence_dict} elif isinstance(sequence_dict, (list, tuple)): sequence_dict = {seq: seq for seq in sequence_dict} peptide_lengths = self._check_peptide_lengths(peptide_lengths) # convert long protein sequences to set of peptides and # associated sequence name / offsets that each peptide may have come # from peptide_set = set([]) peptide_to_name_offset_pairs = defaultdict(list) for name, sequence in sequence_dict.items(): for peptide_length in peptide_lengths: for i in range(len(sequence) - peptide_length + 1): peptide = sequence[i:i + peptide_length] peptide_set.add(peptide) peptide_to_name_offset_pairs[peptide].append((name, i)) peptide_list = sorted(peptide_set) binding_predictions = self.predict_peptides(peptide_list) # create BindingPrediction objects with sequence name and offset results = [] for binding_prediction in binding_predictions: for name, offset in peptide_to_name_offset_pairs[ binding_prediction.peptide]: results.append(binding_prediction.clone_with_updates( source_sequence_name=name, offset=offset)) self._check_results( results, peptides=peptide_set, alleles=self.alleles) return BindingPredictionCollection(results)
python
{ "resource": "" }
q259180
BasePredictor._check_hla_alleles
validation
def _check_hla_alleles( alleles, valid_alleles=None): """ Given a list of HLA alleles and an optional list of valid HLA alleles, return a set of alleles that we will pass into the MHC binding predictor. """ require_iterable_of(alleles, string_types, "HLA alleles") # Don't run the MHC predictor twice for homozygous alleles, # only run it for unique alleles alleles = { normalize_allele_name(allele.strip().upper()) for allele in alleles } if valid_alleles: # For some reason netMHCpan drops the '*' in names, so # 'HLA-A*03:01' becomes 'HLA-A03:01' missing_alleles = [ allele for allele in alleles if allele not in valid_alleles ] if len(missing_alleles) > 0: raise UnsupportedAllele( "Unsupported HLA alleles: %s" % missing_alleles) return list(alleles)
python
{ "resource": "" }
q259181
StreamResponse._connect
validation
async def _connect(self): """ Connect to the stream Returns ------- asyncio.coroutine The streaming response """ logger.debug("connecting to the stream") await self.client.setup if self.session is None: self.session = self.client._session kwargs = await self.client.headers.prepare_request(**self.kwargs) request = self.client.error_handler(self.session.request) return await request(timeout=0, **kwargs)
python
{ "resource": "" }
q259182
StreamResponse.connect
validation
async def connect(self): """ Create the connection Returns ------- self Raises ------ exception.PeonyException On a response status in 4xx that are not status 420 or 429 Also on statuses in 1xx or 3xx since this should not be the status received here """ with async_timeout.timeout(self.timeout): self.response = await self._connect() if self.response.status in range(200, 300): self._error_timeout = 0 self.state = NORMAL elif self.response.status == 500: self.state = DISCONNECTION elif self.response.status in range(501, 600): self.state = RECONNECTION elif self.response.status in (420, 429): self.state = ENHANCE_YOUR_CALM else: logger.debug("raising error during stream connection") raise await exceptions.throw(self.response, loads=self.client._loads, url=self.kwargs['url']) logger.debug("stream state: %d" % self.state)
python
{ "resource": "" }
q259183
Handler.with_prefix
validation
def with_prefix(self, prefix, strict=False): """ decorator to handle commands with prefixes Parameters ---------- prefix : str the prefix of the command strict : bool, optional If set to True the command must be at the beginning of the message. Defaults to False. Returns ------- function a decorator that returns an :class:`EventHandler` instance """ def decorated(func): return EventHandler(func=func, event=self.event, prefix=prefix, strict=strict) return decorated
python
{ "resource": "" }
q259184
BDClient.set_tz
validation
async def set_tz(self): """ set the environment timezone to the timezone set in your twitter settings """ settings = await self.api.account.settings.get() tz = settings.time_zone.tzinfo_name os.environ['TZ'] = tz time.tzset()
python
{ "resource": "" }
q259185
run_command
validation
def run_command(args, **kwargs): """ Given a list whose first element is a command name, followed by arguments, execute it and show timing info. """ assert len(args) > 0 start_time = time.time() process = AsyncProcess(args, **kwargs) process.wait() elapsed_time = time.time() - start_time logger.info("%s took %0.4f seconds", args[0], elapsed_time)
python
{ "resource": "" }
q259186
run_multiple_commands_redirect_stdout
validation
def run_multiple_commands_redirect_stdout( multiple_args_dict, print_commands=True, process_limit=-1, polling_freq=0.5, **kwargs): """ Run multiple shell commands in parallel, write each of their stdout output to files associated with each command. Parameters ---------- multiple_args_dict : dict A dictionary whose keys are files and values are args list. Run each args list as a subprocess and write stdout to the corresponding file. print_commands : bool Print shell commands before running them. process_limit : int Limit the number of concurrent processes to this number. 0 if there is no limit, -1 to use max number of processors polling_freq : int Number of seconds between checking for done processes, if we have a process limit """ assert len(multiple_args_dict) > 0 assert all(len(args) > 0 for args in multiple_args_dict.values()) assert all(hasattr(f, 'name') for f in multiple_args_dict.keys()) if process_limit < 0: logger.debug("Using %d processes" % cpu_count()) process_limit = cpu_count() start_time = time.time() processes = Queue(maxsize=process_limit) def add_to_queue(process): process.start() if print_commands: handler = logging.FileHandler(process.redirect_stdout_file.name) handler.setLevel(logging.DEBUG) logger.addHandler(handler) logger.debug(" ".join(process.args)) logger.removeHandler(handler) processes.put(process) for f, args in multiple_args_dict.items(): p = AsyncProcess( args, redirect_stdout_file=f, **kwargs) if not processes.full(): add_to_queue(p) else: while processes.full(): # Are there any done processes? to_remove = [] for possibly_done in processes.queue: if possibly_done.poll() is not None: possibly_done.wait() to_remove.append(possibly_done) # Remove them from the queue and stop checking if to_remove: for process_to_remove in to_remove: processes.queue.remove(process_to_remove) break # Check again in a second if there weren't time.sleep(polling_freq) add_to_queue(p) # Wait for all the rest of the processes while not processes.empty(): processes.get().wait() elapsed_time = time.time() - start_time logger.info( "Ran %d commands in %0.4f seconds", len(multiple_args_dict), elapsed_time)
python
{ "resource": "" }
q259187
loads
validation
def loads(json_data, encoding="utf-8", **kwargs): """ Custom loads function with an object_hook and automatic decoding Parameters ---------- json_data : str The JSON data to decode *args Positional arguments, passed to :func:`json.loads` encoding : :obj:`str`, optional The encoding of the bytestring **kwargs Keyword arguments passed to :func:`json.loads` Returns ------- :obj:`dict` or :obj:`list` Decoded json data """ if isinstance(json_data, bytes): json_data = json_data.decode(encoding) return json.loads(json_data, object_hook=JSONData, **kwargs)
python
{ "resource": "" }
q259188
read
validation
async def read(response, loads=loads, encoding=None): """ read the data of the response Parameters ---------- response : aiohttp.ClientResponse response loads : callable json loads function encoding : :obj:`str`, optional character encoding of the response, if set to None aiohttp should guess the right encoding Returns ------- :obj:`bytes`, :obj:`str`, :obj:`dict` or :obj:`list` the data returned depends on the response """ ctype = response.headers.get('Content-Type', "").lower() try: if "application/json" in ctype: logger.info("decoding data as json") return await response.json(encoding=encoding, loads=loads) if "text" in ctype: logger.info("decoding data as text") return await response.text(encoding=encoding) except (UnicodeDecodeError, json.JSONDecodeError) as exc: data = await response.read() raise exceptions.PeonyDecodeError(response=response, data=data, exception=exc) return await response.read()
python
{ "resource": "" }
q259189
doc
validation
def doc(func): """ Find the message shown when someone calls the help command Parameters ---------- func : function the function Returns ------- str The help message for this command """ stripped_chars = " \t" if hasattr(func, '__doc__'): docstring = func.__doc__.lstrip(" \n\t") if "\n" in docstring: i = docstring.index("\n") return docstring[:i].rstrip(stripped_chars) elif docstring: return docstring.rstrip(stripped_chars) return ""
python
{ "resource": "" }
q259190
permission_check
validation
def permission_check(data, command_permissions, command=None, permissions=None): """ Check the permissions of the user requesting a command Parameters ---------- data : dict message data command_permissions : dict permissions of the command, contains all the roles as key and users with these permissions as values command : function the command that is run permissions : tuple or list a list of permissions for the command Returns ------- bool True if the user has the right permissions, False otherwise """ if permissions: pass elif command: if hasattr(command, 'permissions'): permissions = command.permissions else: return True # true if no permission is required else: msg = "{name} must be called with command or permissions argument" raise RuntimeError(msg.format(name="_permission_check")) return any(data['sender']['id'] in command_permissions[permission] for permission in permissions if permission in command_permissions)
python
{ "resource": "" }
q259191
main
validation
def main(args_list=None): """ Script to make pMHC binding predictions from amino acid sequences. Usage example: mhctools --sequence SFFPIQQQQQAAALLLI \ --sequence SILQQQAQAQQAQAASSSC \ --extract-subsequences \ --mhc-predictor netmhc \ --mhc-alleles HLA-A0201 H2-Db \ --mhc-predictor netmhc \ --output-csv epitope.csv """ args = parse_args(args_list) binding_predictions = run_predictor(args) df = binding_predictions.to_dataframe() logger.info('\n%s', df) if args.output_csv: df.to_csv(args.output_csv, index=False) print("Wrote: %s" % args.output_csv)
python
{ "resource": "" }
q259192
NetMHCIIpan._prepare_drb_allele_name
validation
def _prepare_drb_allele_name(self, parsed_beta_allele): """ Assume that we're dealing with a human DRB allele which NetMHCIIpan treats differently because there is little population diversity in the DR-alpha gene """ if "DRB" not in parsed_beta_allele.gene: raise ValueError("Unexpected allele %s" % parsed_beta_allele) return "%s_%s%s" % ( parsed_beta_allele.gene, parsed_beta_allele.allele_family, parsed_beta_allele.allele_code)
python
{ "resource": "" }
q259193
get_error
validation
def get_error(data): """ return the error if there is a corresponding exception """ if isinstance(data, dict): if 'errors' in data: error = data['errors'][0] else: error = data.get('error', None) if isinstance(error, dict): if error.get('code') in errors: return error
python
{ "resource": "" }
q259194
throw
validation
async def throw(response, loads=None, encoding=None, **kwargs): """ Get the response data if possible and raise an exception """ if loads is None: loads = data_processing.loads data = await data_processing.read(response, loads=loads, encoding=encoding) error = get_error(data) if error is not None: exception = errors[error['code']] raise exception(response=response, error=error, data=data, **kwargs) if response.status in statuses: exception = statuses[response.status] raise exception(response=response, data=data, **kwargs) # raise PeonyException if no specific exception was found raise PeonyException(response=response, data=data, **kwargs)
python
{ "resource": "" }
q259195
ErrorDict.code
validation
def code(self, code): """ Decorator to associate a code to an exception """ def decorator(exception): self[code] = exception return exception return decorator
python
{ "resource": "" }
q259196
PeonyHeaders.prepare_request
validation
async def prepare_request(self, method, url, headers=None, skip_params=False, proxy=None, **kwargs): """ prepare all the arguments for the request Parameters ---------- method : str HTTP method used by the request url : str The url to request headers : dict, optional Additionnal headers proxy : str proxy of the request skip_params : bool Don't use the parameters to sign the request Returns ------- dict Parameters of the request correctly formatted """ if method.lower() == "post": key = 'data' else: key = 'params' if key in kwargs and not skip_params: request_params = {key: kwargs.pop(key)} else: request_params = {} request_params.update(dict(method=method.upper(), url=url)) coro = self.sign(**request_params, skip_params=skip_params, headers=headers) request_params['headers'] = await utils.execute(coro) request_params['proxy'] = proxy kwargs.update(request_params) return kwargs
python
{ "resource": "" }
q259197
PeonyHeaders._user_headers
validation
def _user_headers(self, headers=None): """ Make sure the user doesn't override the Authorization header """ h = self.copy() if headers is not None: keys = set(headers.keys()) if h.get('Authorization', False): keys -= {'Authorization'} for key in keys: h[key] = headers[key] return h
python
{ "resource": "" }
q259198
process_keys
validation
def process_keys(func): """ Raise error for keys that are not strings and add the prefix if it is missing """ @wraps(func) def decorated(self, k, *args): if not isinstance(k, str): msg = "%s: key must be a string" % self.__class__.__name__ raise ValueError(msg) if not k.startswith(self.prefix): k = self.prefix + k return func(self, k, *args) return decorated
python
{ "resource": "" }
q259199
Functions._get
validation
def _get(self, text): """ Analyze the text to get the right function Parameters ---------- text : str The text that could call a function """ if self.strict: match = self.prog.match(text) if match: cmd = match.group() if cmd in self: return cmd else: words = self.prog.findall(text) for word in words: if word in self: return word
python
{ "resource": "" }