text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import re
import typing
from typing import List, Tuple
from anchore_engine.common import os_package_types
from anchore_engine.db.entities.policy_engine import ImageCpe, ImagePackage
def compare_fields(lhs, rhs):
"""
Comparison function for cpe fields for ordering
- * is considered least specific and any non-* value is considered greater
- if both sides are non-*, comparison defaults to python lexicographic comparison for consistency
"""
if lhs == "*":
if rhs == "*":
return 0
else:
return 1
else:
if rhs == "*":
return -1
else:
# case where both are not *, i.e. some value. pick a way to compare them, using lexicographic comparison for now
if rhs == lhs:
return 0
elif rhs > lhs:
return 1
else:
return -1
def compare_cpes(lhs: ImageCpe, rhs: ImageCpe):
"""
Compares the cpes based on business logic and returns -1, 0 or 1 if the lhs is lower than, equal to or greater than the rhs respectively
Business logic here is to compare vendor, name, version, update and meta fields in that order
"""
vendor_cmp = compare_fields(lhs.vendor, rhs.vendor)
if vendor_cmp != 0:
return vendor_cmp
name_cmp = compare_fields(lhs.name, rhs.name)
if name_cmp != 0:
return name_cmp
version_cmp = compare_fields(lhs.version, rhs.version)
if version_cmp != 0:
return version_cmp
update_cmp = compare_fields(lhs.update, rhs.update)
if update_cmp != 0:
return update_cmp
meta_cmp = compare_fields(lhs.meta, rhs.meta)
if meta_cmp != 0:
return meta_cmp
# all avenues of comparison have been depleted, the two cpes are same for all practical purposes
return 0
class FuzzyCandidateCpeGenerator:
"""
A generator for CPEs from package metadata that generates candidates predictively
"""
embedded_semver_regex = re.compile(r".*([0-9]+\.[0-9]+\.[0-9]+).*")
def for_distro_package(self, package: ImagePackage) -> typing.List[ImageCpe]:
"""
Create the cpes for a single distro package
:param package:
:return: list of ImageCpes for the package
"""
cpes = []
# Do a "-" --> "_" substitution addition so may have multiple cpe candidates for a single package
names = {package.name, re.sub("-", "_", package.name)}
if not package.pkg_path and package.pkg_type in os_package_types:
pkg_path = "pkgdb"
else:
pkg_path = package.pkg_path
for name in names:
# for vendor in vendors:
c = ImageCpe()
c.name = name
c.version = package.version
c.pkg_type = package.pkg_type
c.cpetype = "a"
c.vendor = "*" # vendor match anything
c.meta = "-"
c.update = "-"
c.pkg_path = pkg_path
cpes.append(c)
return cpes
def _fuzzy_products(self, package: ImagePackage) -> typing.List[str]:
"""
General handler for fuzzy CPE generation
:param package:
:return:
"""
products = {package.name}
# TODO: add the generic product generation code (including nomatch exclusions here)
return list(products)
def _fuzzy_versions(self, package: ImagePackage) -> typing.List[str]:
versions = {package.version}
patt = re.match(self.embedded_semver_regex, package.version)
if patt:
candidate_version = patt.group(1)
versions.add(candidate_version)
return list(versions)
class BasicVersionCpeMatcher:
"""
Simple matcher that only examines the product and version fields for exact matches
"""
def matches(self, cpe_a, cpe_b) -> bool:
return cpe_a.product == cpe_b.product and cpe_a.version == cpe_b.version
class VendorEnabledMatcher:
"""
Extended support for vendor matching, but
"""
def matches(self, cpe_a, cpe_b) -> bool:
return (
cpe_a.vendor == cpe_b.vendor
and cpe_a.product == cpe_b.product
and cpe_a.version == cpe_b.version
)
def dedup_cpe_vulnerabilities(image_vuln_tuples: List[Tuple]) -> List[Tuple]:
"""
Due to multiple cpes per package in the analysis data, the list of matched vulnerabilities may contain duplicates.
This function filters the list and yields one record aka image vulnerability cpe tuple per vulnerability affecting a package
"""
if not image_vuln_tuples:
return list()
# build a hash with vulnerability as the key mapped to a unique set of packages
dedup_hash = dict()
for image_cpe, vuln_cpe in image_vuln_tuples:
# construct key that ties vulnerability and package - a unique indicator for the vulnerability affecting a package
vuln_pkg_key = (
vuln_cpe.vulnerability_id,
vuln_cpe.namespace_name,
image_cpe.pkg_path,
)
# check if the vulnerability was already recorded for the package
if vuln_pkg_key in dedup_hash:
# compare the existing cpe to the new cpe
current_cpe = dedup_hash[vuln_pkg_key][0]
if compare_cpes(current_cpe, image_cpe) > 0:
# if the new cpe trumps the existing one, overwrite
dedup_hash[vuln_pkg_key] = (image_cpe, vuln_cpe)
else:
# otherwise leave the existing cpe be
pass
else:
# vulnerability was never recorded for the package, nothing to compare it against
dedup_hash[vuln_pkg_key] = (image_cpe, vuln_cpe)
final_results = list(dedup_hash.values())
return final_results
|
{
"content_hash": "c66aa24f037001ba462c9c132fcb4d9c",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 140,
"avg_line_length": 32.32044198895028,
"alnum_prop": 0.607008547008547,
"repo_name": "anchore/anchore-engine",
"id": "34b75445907221f4fbaa899c816013f3757d7e73",
"size": "5850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anchore_engine/services/policy_engine/engine/vulns/cpes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
}
|
import enum
from collections import namedtuple
class GameOptions:
PredefinedOptions = namedtuple('PredefinedOptions', ['app_id', 'context_id'])
STEAM = PredefinedOptions('753', '6')
DOTA2 = PredefinedOptions('570', '2')
CS = PredefinedOptions('730', '2')
TF2 = PredefinedOptions('440', '2')
PUBG = PredefinedOptions('578080', '2')
RUST = PredefinedOptions('252490', '2')
def __init__(self, app_id: str, context_id: str) -> None:
self.app_id = app_id
self.context_id = context_id
class Asset:
def __init__(self, asset_id: str, game: GameOptions, amount: int = 1) -> None:
self.asset_id = asset_id
self.game = game
self.amount = amount
def to_dict(self):
return {
'appid': int(self.game.app_id),
'contextid': self.game.context_id,
'amount': self.amount,
'assetid': self.asset_id
}
class Currency(enum.IntEnum):
USD = 1
GBP = 2
EURO = 3
CHF = 4
RUB = 5
UAH = 18
AUD = 21
class TradeOfferState(enum.IntEnum):
Invalid = 1
Active = 2
Accepted = 3
Countered = 4
Expired = 5
Canceled = 6
Declined = 7
InvalidItems = 8
ConfirmationNeed = 9
CanceledBySecondaryFactor = 10
StateInEscrow = 11
class SteamUrl:
API_URL = "https://api.steampowered.com"
COMMUNITY_URL = "https://steamcommunity.com"
STORE_URL = 'https://store.steampowered.com'
class Endpoints:
CHAT_LOGIN = SteamUrl.API_URL + "/ISteamWebUserPresenceOAuth/Logon/v1"
SEND_MESSAGE = SteamUrl.API_URL + "/ISteamWebUserPresenceOAuth/Message/v1"
CHAT_LOGOUT = SteamUrl.API_URL + "/ISteamWebUserPresenceOAuth/Logoff/v1"
CHAT_POLL = SteamUrl.API_URL + "/ISteamWebUserPresenceOAuth/Poll/v1"
|
{
"content_hash": "6b3765359e1cd4e8e1183e1d131568b9",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 82,
"avg_line_length": 26.33823529411765,
"alnum_prop": 0.6231155778894473,
"repo_name": "bukson/steampy",
"id": "2ef244f8f85b92abdb21f676dad1a7a3fcdaeed3",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steampy/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67976"
}
],
"symlink_target": ""
}
|
import urllib2
import json
import sys
import datetime
from bs4 import BeautifulSoup
def parse_matches(url='http://en.wikipedia.org/wiki/2014_FIFA_World_Cup'):
response = urllib2.urlopen(url)
soup = BeautifulSoup(response.read())
ret = dict()
for h3 in soup('h3'):
if not h3.text.startswith('Group'):
continue
matches = list()
for div in h3.find_next_siblings('div', {'class' : 'vevent'}, limit=6):
match = dict()
tables = div('table')
date_string = tables[0].text.replace('\n', ' ').strip()
date = datetime.datetime.strptime(date_string, '%d %B %Y %H:%M')
date += datetime.timedelta(hours=3) # times listed in UTC-3
match['Date'] = date.isoformat()
teams = tables[1].text.strip().split('\n')
match['Home Team'] = teams[0].strip()
match['Match'] = int(teams[1].split(' ')[1])
match['Away Team'] = teams[2].strip()
venue = tables[2].text.strip()
match['Venue'] = venue
matches.append(match)
ret[h3.text] = matches
return ret
if __name__ == '__main__':
matches = parse_matches()
dump = json.dumps(matches, sort_keys=True, indent=4, separators=(',', ': '))
if len(sys.argv) > 1:
with open(sys.argv[1], 'w') as f:
f.write(dump)
else:
print dump
|
{
"content_hash": "2dae92f6e15b3966bf48dc23d0c3efe2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 26.3265306122449,
"alnum_prop": 0.5984496124031008,
"repo_name": "shadow-broker/WorldCup2014",
"id": "27e31b87771ac59d876db7dc0a4a70c272244c77",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/matches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3986"
}
],
"symlink_target": ""
}
|
import math
import numpy as np
# Back Propergation
#
#
#
def back_propergation(errs, unit_output, preunit_output, func, learning_rate, solved = 'fitting'):
if solved == 'fitting' or solved == 'fit':
if func == sigmoid : delta_part = (errs * unit_output * (1 - unit_output))
elif func == tanh : delta_part = (errs * (1 - np.square(unit_output)))
elif func == linear : delta_part = errs
elif func == perceptron : delta_part = (errs * 0)
elif solved == 'classification' or solved == 'class':
if func == sigmoid : delta_part = errs
elif func == tanh : pass
elif func == linear : delta_part = errs
elif func == perceptron : pass
elif func == softmax : delta_part = errs
delta = learning_rate * np.dot(preunit_output, delta_part)
'''
print "***************************************"
print "err :";print errs
print "unit :";print unit_output
print "preunit:";print preunit_output
print "func :";print func
print "rate :";print learning_rate
print "solve :";print solved
print "delta-p:";print delta_part
print "delta :";print delta
print "***************************************"
'''
return [delta, delta_part]
# Sigmoid function
# src_data : Value of input signal
def sigmoid(src_data):
return (1 / (1 + math.exp(-src_data)))
# Hypubolic tangent function
# src_data : Value of input signal
def tanh(src_data):
return math.tanh(src_data)
# Linear function
# src_data : Value of input signal
def linear(src_data):
return src_data
# Perceptron function
# src_data : Value of input signal
def perceptron(src_data):
return (0 if src_data <= 0 else 1)
# SoftMax function
# src_data : Value of input signal
# sum_data : Sum Value of all input signal
def softmax(src_data):
tmp = np.exp(src_data)
return tmp / np.sum(tmp)
|
{
"content_hash": "6c5eb85a326778bf603a6d9fe783fa06",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 98,
"avg_line_length": 30.015625,
"alnum_prop": 0.5929203539823009,
"repo_name": "WaterIsland/DLStudy",
"id": "0b65fdbc0a3d4303a8ae45d725e32d9ab526d240",
"size": "2100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mln/BiosLearning/neuro_function.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "385300"
}
],
"symlink_target": ""
}
|
from ..osid import managers as osid_managers
from ..osid import sessions as osid_sessions
class LocaleProfile(osid_managers.OsidProfile):
"""The locale profile describes the interoperability of locale services."""
def get_language_types_for_source(self, source_language_type, source_script_type):
"""Gets the list of target language types for a given source language type.
:param source_language_type: the type of the source language
:type source_language_type: ``osid.type.Type``
:param source_script_type: the type of the source script
:type source_script_type: ``osid.type.Type``
:return: the list of supported types for the given source language type
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_language_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_source_language_types(self):
"""Gets all the source language types supported.
:return: the list of supported language types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_language_types = property(fget=get_source_language_types)
def get_script_types_for_language_type(self, language_type):
"""Gets the list of script types available for a given language type.
:param language_type: the type of the language
:type language_type: ``osid.type.Type``
:return: the list of supported script types for the given language type
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``language_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_numeric_format_types(self):
"""Gets all the numeric format types supported.
:return: the list of supported numeric format types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
numeric_format_types = property(fget=get_numeric_format_types)
def get_calendar_types_for_formatting(self):
"""Gets all the calendar types for which formats are available.
:return: the list of calendar types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
calendar_types_for_formatting = property(fget=get_calendar_types_for_formatting)
def get_date_format_types_for_calendar_type(self, calendar_type):
"""Gets the list of date format types for a given calendar type.
:param calendar_type: the type of the calendar
:type calendar_type: ``osid.type.Type``
:return: the list of supported date format types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``calendar_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_time_types_for_formatting(self):
"""Gets all the time types for which formatting is available.
:return: the list of time types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
time_types_for_formatting = property(fget=get_time_types_for_formatting)
def get_time_format_types_for_time_type(self, time_type):
"""Gets the list of time format types for a given time type.
:param time_type: the type of the time
:type time_type: ``osid.type.Type``
:return: the list of supported time format types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``time_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_currency_types_for_formatting(self):
"""Gets all the currency types for which formatting is available.
:return: the list of currency types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
currency_types_for_formatting = property(fget=get_currency_types_for_formatting)
def get_currency_format_types_for_currency_type(self, currency_type):
"""Gets the list of currency format types for a given currency type.
:param currency_type: the type of the currency
:type currency_type: ``osid.type.Type``
:return: the list of supported currency format types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``currency_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_coordinate_types_for_formatting(self):
"""Gets all the coordinate types for which formatting is available.
:return: the list of coordinate types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
coordinate_types_for_formatting = property(fget=get_coordinate_types_for_formatting)
def get_coordinate_format_types_for_coordinate_type(self, coordinate_type):
"""Gets the list of coordinate format types for a given coordinate type.
:param coordinate_type: the type of the coordinate
:type coordinate_type: ``osid.type.Type``
:return: the list of supported coordinate format types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``coordinater_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_unit_types_for_source(self, source_unit_type):
"""Gets the list of target measure types for a given source measure type.
:param source_unit_type: the type of the source measure
:type source_unit_type: ``osid.type.Type``
:return: the list of supported target measure types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_unit_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_source_unit_types(self):
"""Gets all the source unit types supported.
:return: the list of supported source unit types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_unit_types = property(fget=get_source_unit_types)
def get_currency_types_for_source(self, source_currency_type):
"""Gets the list of target currency types for a given source currency type.
:param source_currency_type: the type of the source currency
:type source_currency_type: ``osid.type.Type``
:return: the list of supported currency types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_currency_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_source_currency_types(self):
"""Gets the list of source currency types.
:return: the list of supported source currency types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_currency_types = property(fget=get_source_currency_types)
def get_calendar_types_for_source(self, source_calendar_type):
"""Gets the list of target calendar types for a given source calendar type.
:param source_calendar_type: the type of the source calendar
:type source_calendar_type: ``osid.type.Type``
:return: the list of supported calendar types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_calendar_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_source_calendar_types(self):
"""Gets the list of source calendar types.
:return: the list of supported source calendar types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_calendar_types = property(fget=get_source_calendar_types)
def get_time_types_for_source(self, source_time_type):
"""Gets the list of target time types for a given source time type.
:param source_time_type: the type of the source time
:type source_time_type: ``osid.type.Type``
:return: the list of supported time types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_time_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_source_time_types(self):
"""Gets the list of source time types.
:return: the list of supported source time types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_time_types = property(fget=get_source_time_types)
def get_time_types_for_calendar_type(self, calendar_type):
"""Gets the list of time types supported for a given calendar type where they are both used in a ``DateTime``.
:param calendar_type: the type of the calendar
:type calendar_type: ``osid.type.Type``
:return: the list of supported time types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``calendar_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_calendar_types_for_time_type(self, time_type):
"""Gets the list of calendar types supported for a given time type where they are both used in a ``DateTime``.
:param time_type: the type of the time system
:type time_type: ``osid.type.Type``
:return: the list of supported calendar types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``time_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_coordinate_types_for_source(self, source_coordinate_type):
"""Gets the list of target coordinate types for a given source coordinate type.
:param source_coordinate_type: the type of the source coordinate
:type source_coordinate_type: ``osid.type.Type``
:return: the list of supported target coordinate types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_source_coordinate_types(self):
"""Gets the list of source coordinate types.
:return: the list of supported source coordinate types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_coordinate_types = property(fget=get_source_coordinate_types)
def get_spatial_unit_record_types_for_source(self, source_spatial_unit_record_type):
"""Gets the list of target spatial unit types for a given source spatial unit type.
:param source_spatial_unit_record_type: the type of the source spatial unit record
:type source_spatial_unit_record_type: ``osid.type.Type``
:return: the list of supported target spatial unit record types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_spatial_unit_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_source_spatial_unit_record_types(self):
"""Gets the list of source spatial unit record types.
:return: the list of supported source spatial unit record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_spatial_unit_record_types = property(fget=get_source_spatial_unit_record_types)
def get_format_types_for_source(self, source_format_type):
"""Gets the list of target format types for a given source spatial unit type.
:param source_format_type: the type of the source format
:type source_format_type: ``osid.type.Type``
:return: the list of supported target format types
:rtype: ``osid.type.TypeList``
:raise: ``NullArgument`` -- ``source_format_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
def get_source_format_types(self):
"""Gets the list of source format types.
:return: the list of supported source format types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
source_format_types = property(fget=get_source_format_types)
class LocaleManager(osid_managers.OsidManager, osid_sessions.OsidSession, LocaleProfile):
"""The locale manager provides access to locale sessions and provides interoperability tests for various aspects of this service.
The sessions included in this manager are:
* ``TranslationSession:`` a session translate strings
* ``TranslationAdminSession: a`` session to update the string
translations for a locale
* ``NumericFormattingSession:`` a session for formatting and
parsing numbers
* ``CalendarFormattingSession:`` a session for formatting and
parsing dates and times
* ``CurrencyFormattingSession`` : a session for formatting and
parsing currency amounts
* ``CoordinateFormattingSession:`` a session for formatting and
parsing coordinates
* ``UnitConversionSession:`` a session to convert measurement
units ``None``
* ``CurrencyConversionSession:`` a session to convert currency
* ``CalendarConversionSession:`` a session to convert dates across
calendars
* ``CoordinateConversionSession:`` a session to convert coordinate
systems
* ``SpatialUnitConversionSession:`` a session to convert spatial
units
* ``FormatConversionSession:`` a session to convert text formats
* ``CalendarInfoSession:`` a session for examining calendaring and
time systems
"""
class LocaleProxyManager(osid_managers.OsidProxyManager, LocaleProfile):
"""The locale manager provides access to locale sessions and provides interoperability tests for various aspects of this service.
Methods in this manager support the passing of a ``Proxy`` for
passing information from server environments. The sessions included
in this manager are:
* ``TranslationSession:`` a session translate strings
* ``TranslationAdminSession: a`` session to update the string
translations for a locale
* ``NumericFormattingSession:`` a session for formatting and
parsing numbers
* ``CalendarFormattingSession:`` a session for formatting and
parsing dates and times
* ``CurrencyFormattingSession`` : a session for formatting and
parsing currency amounts
* ``CoordinateFormattingSession:`` a session for formatting and
parsing coordinates
* ``UnitConversionSession:`` a session to convert measurement
units ``None``
* ``CurrencyConversionSession:`` a session to convert currency
* ``CalendarConversionSession:`` a session to convert dates across
calendars
* ``CoordinateConversionSession:`` a session to convert coordinate
systems
* ``SpatialUnitConversionSession:`` a session to convert spatial
units
* ``FormatConversionSession:`` a session to convert text formats
* ``CalendarInfoSession:`` a session for examining calendaring and
time systems
"""
|
{
"content_hash": "37717493a7e599bd171904c9cf9b64f7",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 133,
"avg_line_length": 35.57203389830509,
"alnum_prop": 0.6527695056581299,
"repo_name": "birdland/dlkit-doc",
"id": "5733f6fbca68d297e8f4d6102c86f4995ec96cc3",
"size": "16791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlkit/locale/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12458859"
}
],
"symlink_target": ""
}
|
import dj_database_url
from decouple import config
from .base import * # noqa
DEBUG = True
SECRET_KEY = 'secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'fixmydjango',
}
}
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
MEDIA_ROOT = 'mediafiles'
MEDIA_URL = '/media/'
RECAPTCHA_KEY = ''
RECAPTCHA_SECRET_KEY = ''
|
{
"content_hash": "d2ce789320719ddd9cf5dc66919ec477",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 50,
"avg_line_length": 14.923076923076923,
"alnum_prop": 0.634020618556701,
"repo_name": "vintasoftware/fixmydjango",
"id": "db7a53bc2cd08248400c0feca9b84808e9e75475",
"size": "388",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fix_my_django/settings/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42107"
},
{
"name": "HTML",
"bytes": "22116"
},
{
"name": "JavaScript",
"bytes": "68"
},
{
"name": "Makefile",
"bytes": "316"
},
{
"name": "Python",
"bytes": "57045"
}
],
"symlink_target": ""
}
|
import bitcoin.core.script
import colorcore.caching
import openassets.protocol
import tests.helpers
import unittest
class SqliteCacheTests(unittest.TestCase):
@tests.helpers.async_test
def test_colored_output(self, loop):
target = colorcore.caching.SqliteCache(':memory:')
output = openassets.protocol.TransactionOutput(
150,
bitcoin.core.script.CScript(b'abcd'),
b'1234',
75,
openassets.protocol.OutputType.issuance
)
yield from target.put(b'transaction', 5, output)
result = yield from target.get(b'transaction', 5)
self.assert_output(result, 150, b'abcd', b'1234', 75, openassets.protocol.OutputType.issuance)
@tests.helpers.async_test
def test_commit(self, loop):
target = colorcore.caching.SqliteCache(':memory:')
output = openassets.protocol.TransactionOutput(
150,
bitcoin.core.script.CScript(b'abcd'),
b'1234',
75,
openassets.protocol.OutputType.issuance
)
yield from target.put(b'transaction', 5, output)
yield from target.commit()
result = yield from target.get(b'transaction', 5)
self.assert_output(result, 150, b'abcd', b'1234', 75, openassets.protocol.OutputType.issuance)
@tests.helpers.async_test
def test_uncolored_output(self, loop):
target = colorcore.caching.SqliteCache(':memory:')
output = openassets.protocol.TransactionOutput(
150,
bitcoin.core.script.CScript(b'abcd'),
None,
0,
openassets.protocol.OutputType.uncolored
)
yield from target.put(b'transaction', 5, output)
result = yield from target.get(b'transaction', 5)
self.assert_output(result, 150, b'abcd', None, 0, openassets.protocol.OutputType.uncolored)
@tests.helpers.async_test
def test_max_values(self, loop):
target = colorcore.caching.SqliteCache(':memory:')
output = openassets.protocol.TransactionOutput(
2 ** 63 - 1,
bitcoin.core.script.CScript(b'a' * 16384),
b'1234',
2 ** 63 - 1,
openassets.protocol.OutputType.issuance
)
yield from target.put(b'transaction', 5, output)
result = yield from target.get(b'transaction', 5)
self.assert_output(
result, 2 ** 63 - 1, b'a' * 16384, b'1234', 2 ** 63 - 1, openassets.protocol.OutputType.issuance)
@tests.helpers.async_test
def test_cache_miss(self, loop):
target = colorcore.caching.SqliteCache(':memory:')
result = yield from target.get(b'transaction', 5)
self.assertIsNone(result)
def assert_output(self, output, value, script, asset_id, asset_quantity, output_type):
self.assertEqual(value, output.value)
self.assertEqual(script, bytes(output.script))
self.assertEqual(asset_id, output.asset_id)
self.assertEqual(asset_quantity, output.asset_quantity)
self.assertEqual(output_type, output.output_type)
|
{
"content_hash": "410f56862b5b0998903ffe08e146f649",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 109,
"avg_line_length": 33.90217391304348,
"alnum_prop": 0.629368387303623,
"repo_name": "OpenAssets/colorcore",
"id": "d3248e6101e9906745e14c62c09b7d933327ef18",
"size": "4267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_caching.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96358"
}
],
"symlink_target": ""
}
|
import struct
import sys
import tempfile
from mock import Mock, patch
from nose.tools import *
from behave.formatter import formatters
from behave.formatter import pretty
from behave.formatter import tag_count
from behave.model import Tag, Feature, Match, Scenario, Step
class TestGetTerminalSize(object):
def setUp(self):
try:
self.ioctl_patch = patch('fcntl.ioctl')
self.ioctl = self.ioctl_patch.start()
except ImportError:
self.ioctl_patch = None
self.ioctl = None
self.zero_struct = struct.pack('HHHH', 0, 0, 0, 0)
def tearDown(self):
if self.ioctl_patch:
self.ioctl_patch.stop()
def test_windows_fallback(self):
platform = sys.platform
sys.platform = 'windows'
eq_(pretty.get_terminal_size(), (80, 24))
sys.platform = platform
def test_termios_fallback(self):
try:
import termios
return
except ImportError:
pass
eq_(pretty.get_terminal_size(), (80, 24))
def test_exception_in_ioctl(self):
try:
import termios
except ImportError:
return
def raiser(*args, **kwargs):
raise Exception('yeehar!')
self.ioctl.side_effect = raiser
eq_(pretty.get_terminal_size(), (80, 24))
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def test_happy_path(self):
try:
import termios
except ImportError:
return
self.ioctl.return_value = struct.pack('HHHH', 17, 23, 5, 5)
eq_(pretty.get_terminal_size(), (23, 17))
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def test_zero_size_fallback(self):
try:
import termios
except ImportError:
return
self.ioctl.return_value = self.zero_struct
eq_(pretty.get_terminal_size(), (80, 24))
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def _tf():
'''Open a temp file that looks a bunch like stdout.
'''
if sys.version_info[0] == 3:
# in python3 it's got an encoding and accepts new-style strings
return tempfile.TemporaryFile(mode='w', encoding='UTF-8')
# pre-python3 it's not got an encoding and accepts encoded data
# (old-style strings)
return tempfile.TemporaryFile(mode='w')
class FormatterTests(object):
def setUp(self):
self.config = Mock()
self.config.color = True
self.config.format = [self.formatter_name]
_line = 0
@property
def line(self):
self._line += 1
return self._line
def _formatter(self, file, config):
f = formatters.get_formatter(config, file)
f.uri('<string>')
return f
def _feature(self, keyword=u'k\xe9yword', name=u'name', tags=[u'spam', u'ham'],
location=u'location', description=[u'description'], scenarios=[],
background=None):
line = self.line
tags = [Tag(name, line) for name in tags]
return Feature('<string>', line, keyword, name, tags=tags,
description=description, scenarios=scenarios,
background=background)
def _scenario(self, keyword=u'k\xe9yword', name=u'name', tags=[], steps=[]):
line = self.line
tags = [Tag(name, line) for name in tags]
return Scenario('<string>', line, keyword, name, tags=tags, steps=steps)
def _step(self, keyword=u'k\xe9yword', step_type='given', name=u'name',
text=None, table=None):
line = self.line
return Step('<string>', line, keyword, step_type, name, text=text,
table=table)
def _match(self, arguments=None):
def dummy():
pass
return Match(dummy, arguments)
def test_feature(self):
# this test does not actually check the result of the formatting; it
# just exists to make sure that formatting doesn't explode in the face of
# unicode and stuff
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
def test_scenario(self):
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
s = self._scenario()
p.scenario(s)
def test_step(self):
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
s = self._scenario()
p.scenario(s)
s = self._step()
p.step(s)
p.match(self._match([]))
s.status = u'passed'
p.result(s)
class TestPretty(FormatterTests):
formatter_name = 'pretty'
class TestPlain(FormatterTests):
formatter_name = 'plain'
class TestJson(FormatterTests):
formatter_name = 'json'
class TestTagCount(FormatterTests):
formatter_name = 'plain'
def _formatter(self, stream, config, tag_counts=None):
if tag_counts is None: tag_counts = {}
f = formatters.get_formatter(config, stream)
f.uri('<string>')
f = tag_count.TagCountFormatter(f, tag_counts)
f.uri('<string>')
return f
def test_tag_count(self):
counts = {}
p = self._formatter(_tf(), self.config, counts)
s = self._scenario()
f = self._feature(scenarios=[s])
p.feature(f)
p.scenario(s)
eq_(counts, {'ham': ['<string>:1'], 'spam': ['<string>:1']})
|
{
"content_hash": "e3a182ccc208fccae18686a42ae341c4",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 83,
"avg_line_length": 27.71356783919598,
"alnum_prop": 0.585312783318223,
"repo_name": "memee/behave",
"id": "5efca9c9e25089389965116bfff42a93d689d71d",
"size": "5515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_formatter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "261402"
}
],
"symlink_target": ""
}
|
import datetime
import glance_store
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import http_client as http
import webob
import glance.api.v2.image_members
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355)
ISOTIME = '2012-05-16T15:27:36Z'
CONF = cfg.CONF
BASE_URI = unit_test_utils.BASE_URI
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc'
UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7'
UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86'
UUID5 = '3eee7cc2-eae7-4c0f-b50d-a7160b0c62ed'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8'
TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4'
def _db_fixture(id, **kwargs):
obj = {
'id': id,
'name': None,
'visibility': 'shared',
'properties': {},
'checksum': None,
'owner': None,
'status': 'queued',
'tags': [],
'size': None,
'locations': [],
'protected': False,
'disk_format': None,
'container_format': None,
'deleted': False,
'min_ram': None,
'min_disk': None,
}
obj.update(kwargs)
return obj
def _db_image_member_fixture(image_id, member_id, **kwargs):
obj = {
'image_id': image_id,
'member': member_id,
'status': 'pending',
}
obj.update(kwargs)
return obj
def _domain_fixture(id, **kwargs):
properties = {
'id': id,
}
properties.update(kwargs)
return glance.domain.ImageMembership(**properties)
class TestImageMembersController(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMembersController, self).setUp()
self.db = unit_test_utils.FakeDB(initialize=False)
self.store = unit_test_utils.FakeStoreAPI()
self.policy = unit_test_utils.FakePolicyEnforcer()
self.notifier = unit_test_utils.FakeNotifier()
self._create_images()
self._create_image_members()
self.controller = glance.api.v2.image_members.ImageMembersController(
self.db,
self.policy,
self.notifier,
self.store)
glance_store.register_opts(CONF)
self.config(default_store='filesystem',
filesystem_store_datadir=self.test_dir,
group="glance_store")
glance_store.create_stores()
def _create_images(self):
self.images = [
_db_fixture(UUID1, owner=TENANT1, name='1', size=256,
visibility='public',
locations=[{'url': '%s/%s' % (BASE_URI, UUID1),
'metadata': {}, 'status': 'active'}]),
_db_fixture(UUID2, owner=TENANT1, name='2', size=512),
_db_fixture(UUID3, owner=TENANT3, name='3', size=512),
_db_fixture(UUID4, owner=TENANT4, name='4', size=1024),
_db_fixture(UUID5, owner=TENANT1, name='5', size=1024),
]
[self.db.image_create(None, image) for image in self.images]
self.db.image_tag_set_all(None, UUID1, ['ping', 'pong'])
def _create_image_members(self):
self.image_members = [
_db_image_member_fixture(UUID2, TENANT4),
_db_image_member_fixture(UUID3, TENANT4),
_db_image_member_fixture(UUID3, TENANT2),
_db_image_member_fixture(UUID4, TENANT1),
]
[self.db.image_member_create(None, image_member)
for image_member in self.image_members]
def test_index(self):
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID2)
self.assertEqual(1, len(output['members']))
actual = set([image_member.member_id
for image_member in output['members']])
expected = set([TENANT4])
self.assertEqual(expected, actual)
def test_index_no_members(self):
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID5)
self.assertEqual(0, len(output['members']))
self.assertEqual({'members': []}, output)
def test_index_member_view(self):
# UUID3 is a shared image owned by TENANT3
# UUID3 has members TENANT2 and TENANT4
# When TENANT4 lists members for UUID3, should not see TENANT2
request = unit_test_utils.get_fake_request(tenant=TENANT4)
output = self.controller.index(request, UUID3)
self.assertEqual(1, len(output['members']))
actual = set([image_member.member_id
for image_member in output['members']])
expected = set([TENANT4])
self.assertEqual(expected, actual)
def test_index_private_image(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
request, UUID5)
def test_index_public_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.index,
request, UUID1)
def test_index_private_image_visible_members_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
output = self.controller.index(request, UUID4)
self.assertEqual(1, len(output['members']))
actual = set([image_member.member_id
for image_member in output['members']])
expected = set([TENANT1])
self.assertEqual(expected, actual)
def test_index_allowed_by_get_members_policy(self):
rules = {"get_members": True}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID2)
self.assertEqual(1, len(output['members']))
def test_index_forbidden_by_get_members_policy(self):
rules = {"get_members": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.index,
request, image_id=UUID2)
def test_show(self):
request = unit_test_utils.get_fake_request(tenant=TENANT1)
output = self.controller.show(request, UUID2, TENANT4)
expected = self.image_members[0]
self.assertEqual(expected['image_id'], output.image_id)
self.assertEqual(expected['member'], output.member_id)
self.assertEqual(expected['status'], output.status)
def test_show_by_member(self):
request = unit_test_utils.get_fake_request(tenant=TENANT4)
output = self.controller.show(request, UUID2, TENANT4)
expected = self.image_members[0]
self.assertEqual(expected['image_id'], output.image_id)
self.assertEqual(expected['member'], output.member_id)
self.assertEqual(expected['status'], output.status)
def test_show_forbidden(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
request, UUID2, TENANT4)
def test_show_not_found(self):
# one member should not be able to view status of another member
# of the same image
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
request, UUID3, TENANT4)
def test_create(self):
request = unit_test_utils.get_fake_request()
image_id = UUID2
member_id = TENANT3
output = self.controller.create(request, image_id=image_id,
member_id=member_id)
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT3, output.member_id)
def test_create_allowed_by_add_policy(self):
rules = {"add_member": True}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request()
output = self.controller.create(request, image_id=UUID2,
member_id=TENANT3)
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT3, output.member_id)
def test_create_forbidden_by_add_policy(self):
rules = {"add_member": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
request, image_id=UUID2, member_id=TENANT3)
def test_create_duplicate_member(self):
request = unit_test_utils.get_fake_request()
image_id = UUID2
member_id = TENANT3
output = self.controller.create(request, image_id=image_id,
member_id=member_id)
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT3, output.member_id)
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
request, image_id=image_id, member_id=member_id)
def test_create_overlimit(self):
self.config(image_member_quota=0)
request = unit_test_utils.get_fake_request()
image_id = UUID2
member_id = TENANT3
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, request,
image_id=image_id, member_id=member_id)
def test_create_unlimited(self):
self.config(image_member_quota=-1)
request = unit_test_utils.get_fake_request()
image_id = UUID2
member_id = TENANT3
output = self.controller.create(request, image_id=image_id,
member_id=member_id)
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT3, output.member_id)
def test_update_done_by_member(self):
request = unit_test_utils.get_fake_request(tenant=TENANT4)
image_id = UUID2
member_id = TENANT4
output = self.controller.update(request, image_id=image_id,
member_id=member_id,
status='accepted')
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT4, output.member_id)
self.assertEqual('accepted', output.status)
def test_update_done_by_member_forbidden_by_policy(self):
rules = {"modify_member": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT4)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
request, image_id=UUID2, member_id=TENANT4,
status='accepted')
def test_update_done_by_member_allowed_by_policy(self):
rules = {"modify_member": True}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT4)
output = self.controller.update(request, image_id=UUID2,
member_id=TENANT4,
status='accepted')
self.assertEqual(UUID2, output.image_id)
self.assertEqual(TENANT4, output.member_id)
self.assertEqual('accepted', output.status)
def test_update_done_by_owner(self):
request = unit_test_utils.get_fake_request(tenant=TENANT1)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
request, UUID2, TENANT4, status='accepted')
def test_update_non_existent_image(self):
request = unit_test_utils.get_fake_request(tenant=TENANT1)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
request, '123', TENANT4, status='accepted')
def test_update_invalid_status(self):
request = unit_test_utils.get_fake_request(tenant=TENANT4)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
request, UUID2, TENANT4, status='accept')
def test_create_private_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
request, UUID4, TENANT2)
def test_create_public_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
request, UUID1, TENANT2)
def test_create_image_does_not_exist(self):
request = unit_test_utils.get_fake_request()
image_id = 'fake-image-id'
member_id = TENANT3
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
request, image_id=image_id, member_id=member_id)
def test_delete(self):
request = unit_test_utils.get_fake_request()
member_id = TENANT4
image_id = UUID2
res = self.controller.delete(request, image_id, member_id)
self.assertEqual(b'', res.body)
self.assertEqual(http.NO_CONTENT, res.status_code)
found_member = self.db.image_member_find(
request.context, image_id=image_id, member=member_id)
self.assertEqual([], found_member)
def test_delete_by_member(self):
request = unit_test_utils.get_fake_request(tenant=TENANT4)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID2, TENANT4)
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID2)
self.assertEqual(1, len(output['members']))
actual = set([image_member.member_id
for image_member in output['members']])
expected = set([TENANT4])
self.assertEqual(expected, actual)
def test_delete_allowed_by_policies(self):
rules = {"get_member": True, "delete_member": True}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT1)
output = self.controller.delete(request, image_id=UUID2,
member_id=TENANT4)
request = unit_test_utils.get_fake_request()
output = self.controller.index(request, UUID2)
self.assertEqual(0, len(output['members']))
def test_delete_forbidden_by_get_member_policy(self):
rules = {"get_member": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT1)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID2, TENANT4)
def test_delete_forbidden_by_delete_member_policy(self):
rules = {"delete_member": False}
self.policy.set_rules(rules)
request = unit_test_utils.get_fake_request(tenant=TENANT1)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID2, TENANT4)
def test_delete_private_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID4, TENANT1)
def test_delete_public_image(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
request, UUID1, TENANT1)
def test_delete_image_does_not_exist(self):
request = unit_test_utils.get_fake_request()
member_id = TENANT2
image_id = 'fake-image-id'
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
request, image_id, member_id)
def test_delete_member_does_not_exist(self):
request = unit_test_utils.get_fake_request()
member_id = 'fake-member-id'
image_id = UUID2
found_member = self.db.image_member_find(
request.context, image_id=image_id, member=member_id)
self.assertEqual([], found_member)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
request, image_id, member_id)
class TestImageMembersSerializer(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMembersSerializer, self).setUp()
self.serializer = glance.api.v2.image_members.ResponseSerializer()
self.fixtures = [
_domain_fixture(id='1', image_id=UUID2, member_id=TENANT1,
status='accepted',
created_at=DATETIME, updated_at=DATETIME),
_domain_fixture(id='2', image_id=UUID2, member_id=TENANT2,
status='pending',
created_at=DATETIME, updated_at=DATETIME),
]
def test_index(self):
expected = {
'members': [
{
'image_id': UUID2,
'member_id': TENANT1,
'status': 'accepted',
'created_at': ISOTIME,
'updated_at': ISOTIME,
'schema': '/v2/schemas/member',
},
{
'image_id': UUID2,
'member_id': TENANT2,
'status': 'pending',
'created_at': ISOTIME,
'updated_at': ISOTIME,
'schema': '/v2/schemas/member',
},
],
'schema': '/v2/schemas/members',
}
request = webob.Request.blank('/v2/images/%s/members' % UUID2)
response = webob.Response(request=request)
result = {'members': self.fixtures}
self.serializer.index(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
def test_show(self):
expected = {
'image_id': UUID2,
'member_id': TENANT1,
'status': 'accepted',
'created_at': ISOTIME,
'updated_at': ISOTIME,
'schema': '/v2/schemas/member',
}
request = webob.Request.blank('/v2/images/%s/members/%s'
% (UUID2, TENANT1))
response = webob.Response(request=request)
result = self.fixtures[0]
self.serializer.show(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
def test_create(self):
expected = {'image_id': UUID2,
'member_id': TENANT1,
'status': 'accepted',
'schema': '/v2/schemas/member',
'created_at': ISOTIME,
'updated_at': ISOTIME}
request = webob.Request.blank('/v2/images/%s/members/%s'
% (UUID2, TENANT1))
response = webob.Response(request=request)
result = self.fixtures[0]
self.serializer.create(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
def test_update(self):
expected = {'image_id': UUID2,
'member_id': TENANT1,
'status': 'accepted',
'schema': '/v2/schemas/member',
'created_at': ISOTIME,
'updated_at': ISOTIME}
request = webob.Request.blank('/v2/images/%s/members/%s'
% (UUID2, TENANT1))
response = webob.Response(request=request)
result = self.fixtures[0]
self.serializer.update(response, result)
actual = jsonutils.loads(response.body)
self.assertEqual(expected, actual)
self.assertEqual('application/json', response.content_type)
class TestImagesDeserializer(test_utils.BaseTestCase):
def setUp(self):
super(TestImagesDeserializer, self).setUp()
self.deserializer = glance.api.v2.image_members.RequestDeserializer()
def test_create(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dump_as_bytes({'member': TENANT1})
output = self.deserializer.create(request)
expected = {'member_id': TENANT1}
self.assertEqual(expected, output)
def test_create_invalid(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dump_as_bytes({'mem': TENANT1})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create,
request)
def test_create_no_body(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create,
request)
def test_create_member_empty(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dump_as_bytes({'member': ''})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create,
request)
def test_create_list_return_error(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dump_as_bytes([TENANT1])
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create,
request)
def test_update_list_return_error(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dump_as_bytes([TENANT1])
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update,
request)
def test_update(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dump_as_bytes({'status': 'accepted'})
output = self.deserializer.update(request)
expected = {'status': 'accepted'}
self.assertEqual(expected, output)
def test_update_invalid(self):
request = unit_test_utils.get_fake_request()
request.body = jsonutils.dump_as_bytes({'mem': TENANT1})
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update,
request)
def test_update_no_body(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update,
request)
|
{
"content_hash": "6d9f2aa2fcd0889068404c39f8e745d5",
"timestamp": "",
"source": "github",
"line_count": 557,
"max_line_length": 77,
"avg_line_length": 40.94075403949731,
"alnum_prop": 0.5995439396597089,
"repo_name": "rajalokan/glance",
"id": "6a4a12cf8df2fbfc74f0c525aa7f8d7080e446c6",
"size": "23440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/unit/v2/test_image_members_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "673"
},
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "3840692"
}
],
"symlink_target": ""
}
|
"""\
=====================================
Parse RDF data received from a uri
=====================================
Fetch and parse RDF data, and then send out TopologyViewer commands
Example Usage
-------------
A simple console driven RDF parser and draw them with 3D topology viewer::
Pipeline( ConsoleReader(),
RDFParser(),
TopologyViewer3DWithParams(),
ConsoleEchoer(),
).run()
Then at runtime try typing these commands to change RDF data in real time::
>>> http://fooshed.net/foaf.rdf 2 10
>>> http://bigasterisk.com/foaf.rdf
>>> http://apassant.net/about/#alex
How does it work?
-----------------
RDFParser is a Kamaeila component which fetch data from a uri which is a RDF data file
or a web page which embeds RDF data.
The input format:
"uri max_layer max_nodePerLayer":
- uri -- is the uri of the data file. If the uri is a rdf data file, it will parse it directly;
if not, it will extract rdf data first before parsing.
- max_layer -- the maximum layers of the rdf hierarchy structure (how deep) to parse
- max_nodePerLayer -- the maximum nodes in one layer (how wide) to parse
The output is TopologyViewer commands.
Typically, it receives inputs from ConsoleReader or ConsoleReader
and send output to TopologyViewer3D.
You may also need to install librdf, a rdf parsing lib from redland.
See http://librdf.org/ for more information and
http://librdf.org/bindings/INSTALL.html for installation information.
See http://www.w3.org/TR/rdf-sparql-query/ for more information about SPARQL query,
http://www.w3.org/RDF/ about for more information about RDF.
"""
from Kamaelia.Util.DataSource import DataSource
from Kamaelia.Util.Console import ConsoleReader,ConsoleEchoer
from Kamaelia.Visualisation.PhysicsGraph3D.TopologyViewer3DWithParams import TopologyViewer3DWithParams
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Apps.CL.FOAFViewer.RDFParsing import RDFParser
Graphline(
CONSOLEREADER = ConsoleReader('>>>'),
PARSER = RDFParser(),
VIEWER = TopologyViewer3DWithParams(),
CONSOLEECHOER = ConsoleEchoer(),
linkages = {
("CONSOLEREADER","outbox") : ("PARSER","inbox"),
("PARSER","outbox") : ("VIEWER","inbox"),
("VIEWER","outbox") : ("CONSOLEECHOER","inbox"),
#("PARSER","outbox") : ("CONSOLEECHOER","inbox"),
}
).run()
|
{
"content_hash": "a3b5a8193efb28c6486bd8531fa43175",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 116,
"avg_line_length": 35.38028169014085,
"alnum_prop": 0.6540605095541401,
"repo_name": "sparkslabs/kamaelia",
"id": "830daf3328ada01e1d1d6f285cab7c776d2b0188",
"size": "3445",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/GraphVisualisation/FOAFViewer/FOAFViewer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "M4",
"bytes": "12224"
},
{
"name": "Makefile",
"bytes": "150947"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "OCaml",
"bytes": "643"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Python",
"bytes": "18900785"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707588"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import # Avoid importing `importlib` from this package.
import imp
from importlib import import_module
import os
import sys
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImproperlyConfigured if something goes wrong.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
raise ImproperlyConfigured("%s%s doesn't look like a module path" % (
error_prefix, dotted_path))
try:
module = import_module(module_path)
except ImportError as e:
msg = '%sError importing module %s: "%s"' % (
error_prefix, module_path, e)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
try:
attr = getattr(module, class_name)
except AttributeError:
raise ImproperlyConfigured('%sModule "%s" does not define a "%s" attribute/class' % (
error_prefix, module_path, class_name))
return attr
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
try:
# None indicates a cached miss; see mark_miss() in Python/import.c.
return sys.modules[name] is not None
except KeyError:
pass
try:
package_path = package.__path__ # No __path__, then not a package.
except AttributeError:
# Since the remainder of this function assumes that we're dealing with
# a package (module with a __path__), so if it's not, then bail here.
return False
for finder in sys.meta_path:
if finder.find_module(name, package_path):
return True
for entry in package_path:
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
|
{
"content_hash": "a3d2c1590215398a3489fbcc2c1df248",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 93,
"avg_line_length": 37.49,
"alnum_prop": 0.5380101360362763,
"repo_name": "adambrenecki/django",
"id": "9c8ea98d509a66970e21e933a8af116c7a66812c",
"size": "3749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/utils/module_loading.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50381"
},
{
"name": "JavaScript",
"bytes": "100819"
},
{
"name": "Python",
"bytes": "8829204"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class Account(models.Model):
balance = models.DecimalField(decimal_places=2, max_digits=12)
def withdraw(self, amount):
print(f"Withdraw {amount} from {self}")
self.balance -= amount
def deposit(self, amount):
print(f"Deposit {amount} in {self}")
self.balance += amount
|
{
"content_hash": "94ab09c26944a062443d6ac6fa0ba395",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 23.5,
"alnum_prop": 0.651595744680851,
"repo_name": "amolenaar/roles",
"id": "c6e322aec3e959e914db2972bb6be0b934b52bef",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_dci/account/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "49615"
},
{
"name": "Shell",
"bytes": "823"
}
],
"symlink_target": ""
}
|
import sys
from src.functions import multiply, add
def test_multiply():
assert multiply(1, 2) == 2
assert multiply(0, 1) == 0
def test_add():
assert add(1, 1) == 2
def test_python3():
if sys.version_info[0] == 3:
assert True
else:
assert True
|
{
"content_hash": "8665f41753eadd496cab47879e197c23",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 39,
"avg_line_length": 15.833333333333334,
"alnum_prop": 0.5929824561403508,
"repo_name": "filaton/basic_travis",
"id": "6a0d5101c6ebb9de1ecd707a91441d02022c4438",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "356"
}
],
"symlink_target": ""
}
|
import os.path
import saml2
def create_conf(sp_host='sp.example.com', idp_hosts=['idp.example.com']):
BASEDIR = os.path.dirname(os.path.abspath(__file__))
config = {
'xmlsec_binary': '/usr/bin/xmlsec1',
'entityid': 'http://%s/saml2/metadata/' % sp_host,
'attribute_map_dir': os.path.join(BASEDIR, 'attribute-maps'),
'service': {
'sp': {
'name': 'Test SP',
'endpoints': {
'assertion_consumer_service': [
('http://%s/saml2/acs/' % sp_host,
saml2.BINDING_HTTP_POST),
],
'single_logout_service': [
('http://%s/saml2/ls/' % sp_host,
saml2.BINDING_HTTP_REDIRECT),
],
},
'required_attributes': ['uid'],
'optional_attributes': ['eduPersonAffiliation'],
'idp': {} # this is filled later
},
},
'metadata': {
'local': [os.path.join(BASEDIR, 'remote_metadata.xml')],
},
'debug': 1,
# certificates
'key_file': os.path.join(BASEDIR, 'mycert.key'),
'cert_file': os.path.join(BASEDIR, 'mycert.pem'),
# These fields are only used when generating the metadata
'contact_person': [
{'given_name': 'Technical givenname',
'sur_name': 'Technical surname',
'company': 'Example Inc.',
'email_address': 'technical@sp.example.com',
'contact_type': 'technical'},
{'given_name': 'Administrative givenname',
'sur_name': 'Administrative surname',
'company': 'Example Inc.',
'email_address': 'administrative@sp.example.ccom',
'contact_type': 'administrative'},
],
'organization': {
'name': [('Ejemplo S.A.', 'es'), ('Example Inc.', 'en')],
'display_name': [('Ejemplo', 'es'), ('Example', 'en')],
'url': [('http://www.example.es', 'es'),
('http://www.example.com', 'en')],
},
}
for idp in idp_hosts:
entity_id = 'https://%s/simplesaml/saml2/idp/metadata.php' % idp
config['service']['sp']['idp'][entity_id] = {
'single_sign_on_service': {
saml2.BINDING_HTTP_REDIRECT: 'https://%s/simplesaml/saml2/idp/SSOService.php' % idp,
},
'single_logout_service': {
saml2.BINDING_HTTP_REDIRECT: 'https://%s/simplesaml/saml2/idp/SingleLogoutService.php' % idp,
},
}
return config
|
{
"content_hash": "c1988088551fb352ed84208af89f3f14",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 109,
"avg_line_length": 36.53333333333333,
"alnum_prop": 0.46970802919708027,
"repo_name": "sdelements/djangosaml2",
"id": "3c4119b9c240bf2ebf2229b90e905b8391e71cf2",
"size": "3428",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "djangosaml2/tests/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "123167"
}
],
"symlink_target": ""
}
|
def checkio(text):
char = [list(append(wd) for wd in text.split() ]
print(char)
return 'a'
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio("Hello World!") == "l", "Hello test"
assert checkio("How do you do?") == "o", "O is most wanted"
assert checkio("One") == "e", "All letter only once."
assert checkio("Oops!") == "o", "Don't forget about lower case."
assert checkio("AAaooo!!!!") == "a", "Only letters."
assert checkio("abe") == "a", "The First."
print("Start the long test")
assert checkio("a" * 9000 + "b" * 1000) == "a", "Long."
print("The local tests are done.")
|
{
"content_hash": "6762ca9a280ce85244d61a0add0a480d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 84,
"avg_line_length": 38.94444444444444,
"alnum_prop": 0.5891583452211127,
"repo_name": "hkaushalya/CheckIO",
"id": "03e363492e8945f17680a6649f45e8fff5729469",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checkio_home_4_highestfreq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10578"
}
],
"symlink_target": ""
}
|
"""
Django settings for BusyBot project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
HOSTNAME = 'localhost'
PORT = 8000
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_rp)-5@a3dc+q^200us64^fgi@e1w*&0toq5r_c52x^lk8@m4='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'Site',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.static',
'django.core.context_processors.media',
'django.contrib.auth.context_processors.auth',
)
ROOT_URLCONF = 'BusyBot.urls'
WSGI_APPLICATION = 'BusyBot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
CELERY_TIMEZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'), )
WEBSOCKET_LOCATION = 'ws://localhost:8083'
LOGIN_EXEMPT_URLS = (
'log/'
)
ZWAVE_MODULE = 'Site.zwave.zwaveme'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
try:
from settings_local import *
except ImportError:
pass
|
{
"content_hash": "f0b9fa99ad06eaa65e9f2724b3ca6a58",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 71,
"avg_line_length": 23.931623931623932,
"alnum_prop": 0.7160714285714286,
"repo_name": "bytedreamer/BusyBot",
"id": "3f0601ed7f8b6df3c5bd8ef18818ba7a019ba480",
"size": "2800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BusyBot/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "35265"
},
{
"name": "HTML",
"bytes": "11326"
},
{
"name": "JavaScript",
"bytes": "594624"
},
{
"name": "Python",
"bytes": "36015"
}
],
"symlink_target": ""
}
|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
class OCSecretAdd(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecretAdd, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.verbose = verbose
self._service_account = None
@property
def service_account(self):
''' Property for the yed var '''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter for the yed var '''
self._service_account = data
def exists(self, in_secret):
''' return whether a key, value pair exists '''
result = self.service_account.find_secret(in_secret)
if not result:
return False
return True
def get(self):
'''return a environment variables '''
env = self._get(OCSecretAdd.kind, self.config.name)
if env['returncode'] == 0:
self.service_account = ServiceAccount(content=env['results'][0])
env['results'] = self.service_account.get('secrets')
return env
def delete(self):
'''delete secrets '''
modified = []
for rem_secret in self.service_account.secrets:
modified.append(self.service_account.delete_secret(rem_secret))
if any(modified):
return self._replace_content(OCSecretAdd.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
'''place secrets into sa '''
modified = False
for add_secret in self.config.secrets:
if not self.service_account.find_secret(add_secret):
self.service_account.add_secret(add_secret)
modified = True
if modified:
return self._replace_content(OCSecretAdd.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' return a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
# pylint: disable=too-many-public-methods
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets == None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
print "Getting secrets property"
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret})
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret})
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
def main():
'''
ansible oc module for services
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
kind=dict(default='rc', choices=['dc', 'rc', 'pods'], type='str'),
namespace=dict(default='default', type='str'),
secrets=dict(default=None, type='list'),
service_account=dict(default=None, type='str'),
),
supports_check_mode=True,
)
sconfig = ServiceAccountConfig(module.params['service_account'],
module.params['namespace'],
module.params['kubeconfig'],
module.params['secrets'],
None)
oc_secret_add = OCSecretAdd(sconfig,
verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_secret_add.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
for secret in module.params.get('secrets', []):
if oc_secret_add.exists(secret):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_secret_add.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
for secret in module.params.get('secrets', []):
if not oc_secret_add.exists(secret):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_secret_add.put()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_secret_add.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
{
"content_hash": "ce8952e4b8ad4b0f245ea40bb015166e",
"timestamp": "",
"source": "github",
"line_count": 1180,
"max_line_length": 118,
"avg_line_length": 32.947457627118645,
"alnum_prop": 0.5296568753536705,
"repo_name": "themurph/openshift-tools",
"id": "94332a3eb3ba95a99c8d73d177c96ea9a463a6e7",
"size": "39289",
"binary": false,
"copies": "3",
"ref": "refs/heads/prod",
"path": "ansible/roles/lib_openshift_3.2/library/oc_secret_add.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "108987"
},
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "43950"
},
{
"name": "JavaScript",
"bytes": "229"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "11349806"
},
{
"name": "Shell",
"bytes": "752773"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
}
|
from itertools import chain
from contextlib import contextmanager
from plumbum.machines.local import CommandsProvider
from plumbum.commands.base import BaseCommand
from plumbum.commands.processes import run_proc, CommandNotFound, ProcessExecutionError
class EmptyCluster(Exception):
"""Raised by :class:`Cluster <plumbum.machines.parallel.Cluster>` when actions are attempted on a cluster
that has no machines"""
pass
def make_concurrent(self, rhs):
if not isinstance(rhs, BaseCommand):
return NotImplemented
if isinstance(self, ConcurrentCommand):
if isinstance(rhs, ConcurrentCommand):
self.commands.extend(rhs.commands)
else:
self.commands.append(rhs)
return self
elif isinstance(rhs, ConcurrentCommand):
rhs.commands.insert(0, self)
return rhs
else:
return ConcurrentCommand(self, rhs)
BaseCommand.__and__ = make_concurrent
class ConcurrentPopen(object):
def __init__(self, procs):
self.procs = procs
self.stdin = None
self.stdout = None
self.stderr = None
self.encoding = None
self.returncode = None
self.machine = list(set(proc.machine for proc in procs))
@property
def argv(self):
return [getattr(proc, "argv", []) for proc in self.procs]
def poll(self):
if self.returncode is not None:
return self.returncode
rcs = [proc.poll() for proc in self.procs]
if any(rc is None for rc in rcs):
return None
self.returncode = 0
for rc in rcs:
if rc != 0:
self.returncode = rc
break
return self.returncode
def wait(self):
for proc in self.procs:
proc.wait()
return self.poll()
def communicate(self, input=None):
if input:
raise ValueError("Cannot pass input to ConcurrentPopen.communicate")
out_err_tuples = [proc.communicate() for proc in self.procs]
self.wait()
return tuple(zip(*out_err_tuples))
def _decode(self, bytes):
return [proc._decode(b) for (proc, b) in zip(self.procs, bytes)]
class ConcurrentCommand(BaseCommand):
def __init__(self, *commands):
assert commands, EmptyConcurrentCommand()
self.commands = list(commands)
@property
def machine(self):
return list(set(cmd.machine for cmd in self.commands))
def formulate(self, level=0, args=()):
form = ["("]
for cmd in self.commands:
form.extend(cmd.formulate(level, args))
form.append("&")
return form + [")"]
def popen(self, *args, **kwargs):
return ConcurrentPopen([cmd[args].popen(**kwargs) for cmd in self.commands])
def __getitem__(self, args):
"""Creates a bound-command with the given arguments"""
if not isinstance(args, (tuple, list)):
args = [args, ]
if not args:
return self
else:
return ConcurrentCommand(*(cmd[args] for cmd in self.commands))
class Cluster(CommandsProvider):
def __init__(self, *machines):
self.machines = list(machines)
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def close(self):
for mach in self.machines:
mach.close()
del self.machines[:]
def add_machine(self, machine):
self.machines.append(machine)
def __len__(self):
return len(self.machines)
def empty(self):
return not self
def __iter__(self):
return iter(self.machines)
def __add__(self, other):
return self.__class__(*chain(self, other))
def filter(self, pred):
return self.__class__(*filter(pred, self))
def which(self, progname):
return [mach.which(progname) for mach in self]
def list_processes(self):
return [mach.list_processes() for mach in self]
def pgrep(self, pattern):
return [mach.pgrep(pattern) for mach in self]
def path(self, *parts):
return [mach.path(*parts) for mach in self]
def __getitem__(self, progname):
if isinstance(progname, int):
return self.machines[progname]
if isinstance(progname, slice):
return self.__class__(*self.machines[progname])
if not isinstance(progname, str):
raise TypeError("progname must be a string, not %r" % (type(progname,)))
if not self.machines:
raise EmptyCluster("Cluster is empty")
return ConcurrentCommand(*(mach[progname] for mach in self))
def __contains__(self, cmd):
try:
self[cmd]
except CommandNotFound:
return False
else:
return True
@property
def python(self):
if not self.machines:
raise EmptyCluster()
return ConcurrentCommand(*(mach.python for mach in self))
def session(self):
if not self.machines:
raise EmptyCluster()
return ClusterSession(*(mach.session() for mach in self))
@contextmanager
def as_user(self, user=None):
with nested(*(mach.as_user(user) for mach in self)):
yield self
def as_root(self):
return self.as_user()
class ClusterSession(object):
def __init__(self, *sessions):
self.sessions = sessions
def __iter__(self):
return iter(self.sessions)
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __del__(self):
try:
self.close()
except Exception:
pass
def alive(self):
"""Returns ``True`` if the underlying shells are all alive, ``False`` otherwise"""
return all(session.alive for session in self)
def close(self):
"""Closes (terminates) all underlying shell sessions"""
for session in self.sessions:
session.close()
del self.sessions[:]
def popen(self, cmd):
return ConcurrentPopen([session.popen(cmd) for session in self])
def run(self, cmd, retcode=None):
return run_proc(self.popen(cmd), retcode)
if __name__ == "__main__":
from plumbum import local
from plumbum.cmd import ls, date, sleep
c = ls & date & sleep[1]
print(c())
c = ls & date & sleep[1] & sleep["-z"]
try:
c()
except ProcessExecutionError as ex:
print(ex)
else:
assert False
clst = Cluster(local, local, local)
print(clst["ls"]())
# This works fine
print(local.session().run("echo $$"))
# this does not
ret, stdout, stderr = clst.session().run("echo $$")
print(ret)
ret = [int(pid) for pid in stdout]
assert(len(set(ret))==3)
try:
from contextlib import nested
except ImportError:
try:
from contextlib import ExitStack
except ImportError:
# we're probably on python 3.2, so we'll need to redefine the deprecated 'nested' function
import sys
@contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit, enter = mgr.__exit__, mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
e, v, t = exc
raise v.with_traceback(t)
else:
@contextmanager
def nested(*managers):
with ExitStack() as stack:
yield [stack.enter_context(ctx) for ctx in managers]
|
{
"content_hash": "773a3a7cce1e1769cd77cc208dd18763",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 109,
"avg_line_length": 30.965384615384615,
"alnum_prop": 0.5704881381194883,
"repo_name": "weka-io/plumbum",
"id": "d1574befc5f5d52ef2fdaed95e1618d5a01b7ba0",
"size": "8051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plumbum/machines/parallel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "367248"
},
{
"name": "Shell",
"bytes": "590"
}
],
"symlink_target": ""
}
|
import re
# Extract variable definitions from a Makefile.
# Return a dictionary mapping names to values.
# May raise IOError.
makevardef = re.compile('^([a-zA-Z0-9_]+)[ \t]*=(.*)')
def getmakevars(filename):
variables = {}
fp = open(filename)
pendingline = ""
try:
while 1:
line = fp.readline()
if pendingline:
line = pendingline + line
pendingline = ""
if not line:
break
if line.endswith('\\\n'):
pendingline = line[:-2]
matchobj = makevardef.match(line)
if not matchobj:
continue
(name, value) = matchobj.group(1, 2)
# Strip trailing comment
i = value.find('#')
if i >= 0:
value = value[:i]
value = value.strip()
variables[name] = value
finally:
fp.close()
return variables
# Parse a Python Setup(.in) file.
# Return two dictionaries, the first mapping modules to their
# definitions, the second mapping variable names to their values.
# May raise IOError.
setupvardef = re.compile('^([a-zA-Z0-9_]+)=(.*)')
def getsetupinfo(filename):
modules = {}
variables = {}
fp = open(filename)
pendingline = ""
try:
while 1:
line = fp.readline()
if pendingline:
line = pendingline + line
pendingline = ""
if not line:
break
# Strip comments
i = line.find('#')
if i >= 0:
line = line[:i]
if line.endswith('\\\n'):
pendingline = line[:-2]
continue
matchobj = setupvardef.match(line)
if matchobj:
(name, value) = matchobj.group(1, 2)
variables[name] = value.strip()
else:
words = line.split()
if words:
modules[words[0]] = words[1:]
finally:
fp.close()
return modules, variables
# Test the above functions.
def test():
import sys
import os
if not sys.argv[1:]:
print 'usage: python parsesetup.py Makefile*|Setup* ...'
sys.exit(2)
for arg in sys.argv[1:]:
base = os.path.basename(arg)
if base[:8] == 'Makefile':
print 'Make style parsing:', arg
v = getmakevars(arg)
prdict(v)
elif base[:5] == 'Setup':
print 'Setup style parsing:', arg
m, v = getsetupinfo(arg)
prdict(m)
prdict(v)
else:
print arg, 'is neither a Makefile nor a Setup file'
print '(name must begin with "Makefile" or "Setup")'
def prdict(d):
keys = d.keys()
keys.sort()
for key in keys:
value = d[key]
print "%-15s" % key, str(value)
if __name__ == '__main__':
test()
|
{
"content_hash": "e1d8d0699771a0ce035327c74f95903b",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 65,
"avg_line_length": 27.693693693693692,
"alnum_prop": 0.47397527651268706,
"repo_name": "MattDevo/edk2",
"id": "b14a6dac47b1989abecbc150222e9720dbead68a",
"size": "3122",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "AppPkg/Applications/Python/Python-2.7.2/Tools/freeze/parsesetup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "4545237"
},
{
"name": "Batchfile",
"bytes": "93042"
},
{
"name": "C",
"bytes": "94289702"
},
{
"name": "C++",
"bytes": "20170310"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "DIGITAL Command Language",
"bytes": "13695"
},
{
"name": "GAP",
"bytes": "698245"
},
{
"name": "GDB",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "472114"
},
{
"name": "Lua",
"bytes": "249"
},
{
"name": "Makefile",
"bytes": "231845"
},
{
"name": "NSIS",
"bytes": "2229"
},
{
"name": "Objective-C",
"bytes": "4147834"
},
{
"name": "PHP",
"bytes": "674"
},
{
"name": "PLSQL",
"bytes": "24782"
},
{
"name": "Perl",
"bytes": "6218"
},
{
"name": "Python",
"bytes": "27130096"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "Roff",
"bytes": "28192"
},
{
"name": "Shell",
"bytes": "104362"
},
{
"name": "SourcePawn",
"bytes": "29427"
},
{
"name": "Visual Basic",
"bytes": "494"
}
],
"symlink_target": ""
}
|
hiddenimports = ['scipy.io.matlab.streams']
|
{
"content_hash": "a87f3f25e95c560086fa822fba91038f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 43,
"avg_line_length": 44,
"alnum_prop": 0.75,
"repo_name": "TeamSWAP/swap",
"id": "60da0fdc4e2b50b33dee6b9c28e6724cbb42dd28",
"size": "554",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "external/pyinstaller/PyInstaller/hooks/hook-scipy.io.matlab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "884174"
},
{
"name": "C++",
"bytes": "578"
},
{
"name": "CSS",
"bytes": "3410"
},
{
"name": "Objective-C",
"bytes": "30562"
},
{
"name": "Python",
"bytes": "3447566"
},
{
"name": "Shell",
"bytes": "1323"
},
{
"name": "TeX",
"bytes": "64614"
},
{
"name": "Visual Basic",
"bytes": "166"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from issues.views import *
urlpatterns = patterns("issues.views",
(r"^delete-comment/(\d+)/$", "delete_comment", {}, "delete_comment"),
(r"^update-issue/(\d+)/(delete)/$", "update_issue", {}, "update_issue"),
(r"^update-issue/(\d+)/(closed|progress)/(on|off|\d+)/$", "update_issue", {}, "update_issue"),
(r"^update-issue-detail/(?P<mfpk>\d+)/$", staff_member_required(UpdateIssue.as_view()), {}, "update_issue_detail"),
(r"^reports/$", staff_member_required(ReportList.as_view()), {}, "reports"),
(r"^create-issue/$", staff_member_required(CreateIssue.as_view()), {}, "create_issue"),
(r"^create-report/$", staff_member_required(CreateReport.as_view()), {}, "create_report"),
(r"^update-report/(?P<mfpk>\d+)/$", staff_member_required(UpdateReport.as_view()), {}, "update_report"),
(r"^duplicate-report/(?P<dpk>\d+)/$", staff_member_required(DuplicateReport.as_view()), {}, "duplicate_report"),
(r"^issue/(?P<dpk>\d+)/$", staff_member_required(ViewIssue.as_view()), {}, "issue"),
(r"^attachments/(?P<dpk>\d+)/$", staff_member_required(AttachmentsView.as_view()), {}, "attachments"),
# (r"^attachments/(?P<dpk>\d+)/$", staff_member_required(attachments_view), {}, "attachments"),
(r"^update-comment/(?P<mfpk>\d+)/$", staff_member_required(UpdateComment.as_view()), {}, "update_comment"),
(r"^add-issues/$", staff_member_required(AddIssues.as_view()), {}, "add_issues"),
(r"^report/(?P<dpk>\d+)/$", staff_member_required(ReportView.as_view()), {}, "report"),
)
|
{
"content_hash": "26e8191e147036a5f2cae12536c84c6c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 119,
"avg_line_length": 46.37837837837838,
"alnum_prop": 0.6357808857808858,
"repo_name": "akulakov/mangotrac",
"id": "07d2419f07f2ec1e57156d1f4ebbe77d666964c2",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proj_issues/issues/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37861"
},
{
"name": "JavaScript",
"bytes": "85995"
},
{
"name": "Python",
"bytes": "315517"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('human_resources', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='identification',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='identificationtype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='staff',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='timesheet',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='timesheetentry',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
{
"content_hash": "9002175814a3f506e227e0ab60b6c9e5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 111,
"avg_line_length": 35.02777777777778,
"alnum_prop": 0.5900079302141158,
"repo_name": "Semprini/cbe",
"id": "6ca6e4ef99a94831ff01d021e750332a38e092b5",
"size": "1310",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cbe/cbe/human_resources/migrations/0002_auto_20210617_2350.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2292"
},
{
"name": "HTML",
"bytes": "3112"
},
{
"name": "PowerShell",
"bytes": "20448"
},
{
"name": "Python",
"bytes": "241197"
}
],
"symlink_target": ""
}
|
"""Generates various info tables from SPIR-V JSON grammar."""
from __future__ import print_function
import errno
import json
import os.path
import re
# Prefix for all C variables generated by this script.
PYGEN_VARIABLE_PREFIX = 'pygen_variable'
# Extensions to recognize, but which don't necessarily come from the SPIR-V
# core or KHR grammar files. Get this list from the SPIR-V registery web page.
# NOTE: Only put things on this list if it is not in those grammar files.
EXTENSIONS_FROM_SPIRV_REGISTRY_AND_NOT_FROM_GRAMMARS = """
SPV_AMD_gcn_shader
SPV_AMD_gpu_shader_half_float
SPV_AMD_gpu_shader_int16
SPV_AMD_shader_trinary_minmax
"""
def make_path_to_file(f):
"""Makes all ancestor directories to the given file, if they
don't yet exist.
Arguments:
f: The file whose ancestor directories are to be created.
"""
dir = os.path.dirname(os.path.abspath(f))
try:
os.makedirs(dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dir):
pass
else:
raise
def convert_min_required_version(version):
"""Converts the minimal required SPIR-V version encoded in the
grammar to the symbol in SPIRV-Tools"""
if version is None:
return 'SPV_SPIRV_VERSION_WORD(1, 0)'
if version == 'None':
return '0xffffffffu'
return 'SPV_SPIRV_VERSION_WORD({})'.format(version.replace('.', ','))
def compose_capability_list(caps):
"""Returns a string containing a braced list of capabilities as enums.
Arguments:
- caps: a sequence of capability names
Returns:
a string containing the braced list of SpvCapability* enums named by caps.
"""
return "{" + ", ".join(['SpvCapability{}'.format(c) for c in caps]) + "}"
def get_capability_array_name(caps):
"""Returns the name of the array containing all the given capabilities.
Args:
- caps: a sequence of capability names
"""
if not caps:
return 'nullptr'
return '{}_caps_{}'.format(PYGEN_VARIABLE_PREFIX, ''.join(caps))
def generate_capability_arrays(caps):
"""Returns the arrays of capabilities.
Arguments:
- caps: a sequence of sequence of capability names
"""
caps = sorted(set([tuple(c) for c in caps if c]))
arrays = [
'static const SpvCapability {}[] = {};'.format(
get_capability_array_name(c), compose_capability_list(c))
for c in caps]
return '\n'.join(arrays)
def compose_extension_list(exts):
"""Returns a string containing a braced list of extensions as enums.
Arguments:
- exts: a sequence of extension names
Returns:
a string containing the braced list of extensions named by exts.
"""
return "{" + ", ".join(
['libspirv::Extension::k{}'.format(e) for e in exts]) + "}"
def get_extension_array_name(extensions):
"""Returns the name of the array containing all the given extensions.
Args:
- extensions: a sequence of extension names
"""
if not extensions:
return 'nullptr'
else:
return '{}_exts_{}'.format(
PYGEN_VARIABLE_PREFIX, ''.join(extensions))
def generate_extension_arrays(extensions):
"""Returns the arrays of extensions.
Arguments:
- caps: a sequence of sequence of extension names
"""
extensions = sorted(set([tuple(e) for e in extensions if e]))
arrays = [
'static const libspirv::Extension {}[] = {};'.format(
get_extension_array_name(e), compose_extension_list(e))
for e in extensions]
return '\n'.join(arrays)
def convert_operand_kind(operand_tuple):
"""Returns the corresponding operand type used in spirv-tools for
the given operand kind and quantifier used in the JSON grammar.
Arguments:
- operand_tuple: a tuple of two elements:
- operand kind: used in the JSON grammar
- quantifier: '', '?', or '*'
Returns:
a string of the enumerant name in spv_operand_type_t
"""
kind, quantifier = operand_tuple
# The following cases are where we differ between the JSON grammar and
# spirv-tools.
if kind == 'IdResultType':
kind = 'TypeId'
elif kind == 'IdResult':
kind = 'ResultId'
elif kind == 'IdMemorySemantics' or kind == 'MemorySemantics':
kind = 'MemorySemanticsId'
elif kind == 'IdScope' or kind == 'Scope':
kind = 'ScopeId'
elif kind == 'IdRef':
kind = 'Id'
elif kind == 'ImageOperands':
kind = 'Image'
elif kind == 'Dim':
kind = 'Dimensionality'
elif kind == 'ImageFormat':
kind = 'SamplerImageFormat'
elif kind == 'KernelEnqueueFlags':
kind = 'KernelEnqFlags'
elif kind == 'LiteralExtInstInteger':
kind = 'ExtensionInstructionNumber'
elif kind == 'LiteralSpecConstantOpInteger':
kind = 'SpecConstantOpNumber'
elif kind == 'LiteralContextDependentNumber':
kind = 'TypedLiteralNumber'
elif kind == 'PairLiteralIntegerIdRef':
kind = 'LiteralIntegerId'
elif kind == 'PairIdRefLiteralInteger':
kind = 'IdLiteralInteger'
elif kind == 'PairIdRefIdRef': # Used by OpPhi in the grammar
kind = 'Id'
if kind == 'FPRoundingMode':
kind = 'FpRoundingMode'
elif kind == 'FPFastMathMode':
kind = 'FpFastMathMode'
if quantifier == '?':
kind = 'Optional{}'.format(kind)
elif quantifier == '*':
kind = 'Variable{}'.format(kind)
return 'SPV_OPERAND_TYPE_{}'.format(
re.sub(r'([a-z])([A-Z])', r'\1_\2', kind).upper())
class InstInitializer(object):
"""Instances holds a SPIR-V instruction suitable for printing as
the initializer for spv_opcode_desc_t."""
def __init__(self, opname, caps, exts, operands, version):
"""Initialization.
Arguments:
- opname: opcode name (with the 'Op' prefix)
- caps: a sequence of capability names required by this opcode
- exts: a sequence of names of extensions enabling this enumerant
- operands: a sequence of (operand-kind, operand-quantifier) tuples
- version: minimal SPIR-V version required for this opcode
"""
assert opname.startswith('Op')
self.opname = opname[2:] # Remove the "Op" prefix.
self.num_caps = len(caps)
self.caps_mask = get_capability_array_name(caps)
self.num_exts = len(exts)
self.exts = get_extension_array_name(exts)
self.operands = [convert_operand_kind(o) for o in operands]
self.fix_syntax()
operands = [o[0] for o in operands]
self.ref_type_id = 'IdResultType' in operands
self.def_result_id = 'IdResult' in operands
self.version = convert_min_required_version(version)
def fix_syntax(self):
"""Fix an instruction's syntax, adjusting for differences between
the officially released grammar and how SPIRV-Tools uses the grammar.
Fixes:
- ExtInst should not end with SPV_OPERAND_VARIABLE_ID.
https://github.com/KhronosGroup/SPIRV-Tools/issues/233
"""
if (self.opname == 'ExtInst'
and self.operands[-1] == 'SPV_OPERAND_TYPE_VARIABLE_ID'):
self.operands.pop()
def __str__(self):
template = ['{{"{opname}"', 'SpvOp{opname}',
'{num_caps}', '{caps_mask}',
'{num_operands}', '{{{operands}}}',
'{def_result_id}', '{ref_type_id}',
'{num_exts}', '{exts}',
'{min_version}}}']
return ', '.join(template).format(
opname=self.opname,
num_caps=self.num_caps,
caps_mask=self.caps_mask,
num_operands=len(self.operands),
operands=', '.join(self.operands),
def_result_id=(1 if self.def_result_id else 0),
ref_type_id=(1 if self.ref_type_id else 0),
num_exts=self.num_exts,
exts=self.exts,
min_version=self.version)
class ExtInstInitializer(object):
"""Instances holds a SPIR-V extended instruction suitable for printing as
the initializer for spv_ext_inst_desc_t."""
def __init__(self, opname, opcode, caps, operands):
"""Initialization.
Arguments:
- opname: opcode name
- opcode: enumerant value for this opcode
- caps: a sequence of capability names required by this opcode
- operands: a sequence of (operand-kind, operand-quantifier) tuples
"""
self.opname = opname
self.opcode = opcode
self.num_caps = len(caps)
self.caps_mask = get_capability_array_name(caps)
self.operands = [convert_operand_kind(o) for o in operands]
self.operands.append('SPV_OPERAND_TYPE_NONE')
def __str__(self):
template = ['{{"{opname}"', '{opcode}', '{num_caps}', '{caps_mask}',
'{{{operands}}}}}']
return ', '.join(template).format(
opname=self.opname,
opcode=self.opcode,
num_caps=self.num_caps,
caps_mask=self.caps_mask,
operands=', '.join(self.operands))
def generate_instruction(inst, is_ext_inst):
"""Returns the C initializer for the given SPIR-V instruction.
Arguments:
- inst: a dict containing information about a SPIR-V instruction
- is_ext_inst: a bool indicating whether |inst| is an extended
instruction.
Returns:
a string containing the C initializer for spv_opcode_desc_t or
spv_ext_inst_desc_t
"""
opname = inst.get('opname')
opcode = inst.get('opcode')
caps = inst.get('capabilities', [])
exts = inst.get('extensions', [])
operands = inst.get('operands', {})
operands = [(o['kind'], o.get('quantifier', '')) for o in operands]
min_version = inst.get('version', None)
assert opname is not None
if is_ext_inst:
return str(ExtInstInitializer(opname, opcode, caps, operands))
else:
return str(InstInitializer(opname, caps, exts, operands, min_version))
def generate_instruction_table(inst_table):
"""Returns the info table containing all SPIR-V instructions,
sorted by opcode, and prefixed by capability arrays.
Note:
- the built-in sorted() function is guaranteed to be stable.
https://docs.python.org/3/library/functions.html#sorted
Arguments:
- inst_table: a list containing all SPIR-V instructions.
"""
inst_table = sorted(inst_table, key=lambda k: (k['opcode'], k['opname']))
caps_arrays = generate_capability_arrays(
[inst.get('capabilities', []) for inst in inst_table])
exts_arrays = generate_extension_arrays(
[inst.get('extensions', []) for inst in inst_table])
insts = [generate_instruction(inst, False) for inst in inst_table]
insts = ['static const spv_opcode_desc_t kOpcodeTableEntries[] = {{\n'
' {}\n}};'.format(',\n '.join(insts))]
return '{}\n\n{}\n\n{}'.format(caps_arrays, exts_arrays, '\n'.join(insts))
def generate_extended_instruction_table(inst_table, set_name):
"""Returns the info table containing all SPIR-V extended instructions,
sorted by opcode, and prefixed by capability arrays.
Arguments:
- inst_table: a list containing all SPIR-V instructions.
- set_name: the name of the extended instruction set.
"""
inst_table = sorted(inst_table, key=lambda k: k['opcode'])
caps = [inst.get('capabilities', []) for inst in inst_table]
caps_arrays = generate_capability_arrays(caps)
insts = [generate_instruction(inst, True) for inst in inst_table]
insts = ['static const spv_ext_inst_desc_t {}_entries[] = {{\n'
' {}\n}};'.format(set_name, ',\n '.join(insts))]
return '{}\n\n{}'.format(caps_arrays, '\n'.join(insts))
class EnumerantInitializer(object):
"""Prints an enumerant as the initializer for spv_operand_desc_t."""
def __init__(self, enumerant, value, caps, exts, parameters, version):
"""Initialization.
Arguments:
- enumerant: enumerant name
- value: enumerant value
- caps: a sequence of capability names required by this enumerant
- exts: a sequence of names of extensions enabling this enumerant
- parameters: a sequence of (operand-kind, operand-quantifier) tuples
- version: minimal SPIR-V version required for this opcode
"""
self.enumerant = enumerant
self.value = value
self.num_caps = len(caps)
self.caps = get_capability_array_name(caps)
self.num_exts = len(exts)
self.exts = get_extension_array_name(exts)
self.parameters = [convert_operand_kind(p) for p in parameters]
self.version = convert_min_required_version(version)
def __str__(self):
template = ['{{"{enumerant}"', '{value}', '{num_caps}',
'{caps}', '{num_exts}', '{exts}',
'{{{parameters}}}', '{min_version}}}']
return ', '.join(template).format(
enumerant=self.enumerant,
value=self.value,
num_caps=self.num_caps,
caps=self.caps,
num_exts=self.num_exts,
exts=self.exts,
parameters=', '.join(self.parameters),
min_version=self.version)
def generate_enum_operand_kind_entry(entry):
"""Returns the C initializer for the given operand enum entry.
Arguments:
- entry: a dict containing information about an enum entry
Returns:
a string containing the C initializer for spv_operand_desc_t
"""
enumerant = entry.get('enumerant')
value = entry.get('value')
caps = entry.get('capabilities', [])
exts = entry.get('extensions', [])
params = entry.get('parameters', [])
params = [p.get('kind') for p in params]
params = zip(params, [''] * len(params))
version = entry.get('version', None)
assert enumerant is not None
assert value is not None
return str(EnumerantInitializer(
enumerant, value, caps, exts, params, version))
def generate_enum_operand_kind(enum):
"""Returns the C definition for the given operand kind."""
kind = enum.get('kind')
assert kind is not None
# Sort all enumerants first according to their values and then
# their names so that the symbols with the same values are
# grouped together.
if enum.get('category') == 'ValueEnum':
functor = lambda k: (k['value'], k['enumerant'])
else:
functor = lambda k: (int(k['value'], 16), k['enumerant'])
entries = sorted(enum.get('enumerants', []), key=functor)
name = '{}_{}Entries'.format(PYGEN_VARIABLE_PREFIX, kind)
entries = [' {}'.format(generate_enum_operand_kind_entry(e))
for e in entries]
template = ['static const spv_operand_desc_t {name}[] = {{',
'{entries}', '}};']
entries = '\n'.join(template).format(
name=name,
entries=',\n'.join(entries))
return kind, name, entries
def generate_operand_kind_table(enums):
"""Returns the info table containing all SPIR-V operand kinds."""
# We only need to output info tables for those operand kinds that are enums.
enums = [e for e in enums if e.get('category') in ['ValueEnum', 'BitEnum']]
caps = [entry.get('capabilities', [])
for enum in enums
for entry in enum.get('enumerants', [])]
caps_arrays = generate_capability_arrays(caps)
exts = [entry.get('extensions', [])
for enum in enums
for entry in enum.get('enumerants', [])]
exts_arrays = generate_extension_arrays(exts)
enums = [generate_enum_operand_kind(e) for e in enums]
# We have three operand kinds that requires their optional counterpart to
# exist in the operand info table.
three_optional_enums = ['ImageOperands', 'AccessQualifier', 'MemoryAccess']
three_optional_enums = [e for e in enums if e[0] in three_optional_enums]
enums.extend(three_optional_enums)
enum_kinds, enum_names, enum_entries = zip(*enums)
# Mark the last three as optional ones.
enum_quantifiers = [''] * (len(enums) - 3) + ['?'] * 3
# And we don't want redefinition of them.
enum_entries = enum_entries[:-3]
enum_kinds = [convert_operand_kind(e)
for e in zip(enum_kinds, enum_quantifiers)]
table_entries = zip(enum_kinds, enum_names, enum_names)
table_entries = [' {{{}, ARRAY_SIZE({}), {}}}'.format(*e)
for e in table_entries]
template = [
'static const spv_operand_desc_group_t {p}_OperandInfoTable[] = {{',
'{enums}', '}};']
table = '\n'.join(template).format(
p=PYGEN_VARIABLE_PREFIX, enums=',\n'.join(table_entries))
return '\n\n'.join((caps_arrays,) + (exts_arrays,) + enum_entries + (table,))
def get_extension_list(instructions, operand_kinds):
"""Returns extensions as an alphabetically sorted list of strings."""
things_with_an_extensions_field = [item for item in instructions]
enumerants = sum([item.get('enumerants', []) for item in operand_kinds], [])
things_with_an_extensions_field.extend(enumerants)
extensions = sum([item.get('extensions', [])
for item in things_with_an_extensions_field
if item.get('extensions')], [])
for item in EXTENSIONS_FROM_SPIRV_REGISTRY_AND_NOT_FROM_GRAMMARS.split():
# If it's already listed in a grammar, then don't put it in the
# special exceptions list.
assert item not in extensions, "Extension %s is already in a grammar file" % item
extensions.extend(EXTENSIONS_FROM_SPIRV_REGISTRY_AND_NOT_FROM_GRAMMARS.split())
# Validator would ignore type declaration unique check. Should only be used
# for legacy autogenerated test files containing multiple instances of the
# same type declaration, if fixing the test by other methods is too
# difficult. Shouldn't be used for any other reasons.
extensions.append('SPV_VALIDATOR_ignore_type_decl_unique')
return sorted(set(extensions))
def get_capabilities(operand_kinds):
"""Returns capabilities as a list of JSON objects, in order of
appearance.
"""
enumerants = sum([item.get('enumerants', []) for item in operand_kinds
if item.get('kind') in ['Capability']], [])
return enumerants
def generate_extension_enum(extensions):
"""Returns enumeration containing extensions declared in the grammar."""
return ',\n'.join(['k' + extension for extension in extensions])
def generate_extension_to_string_mapping(extensions):
"""Returns mapping function from extensions to corresponding strings."""
function = 'const char* ExtensionToString(Extension extension) {\n'
function += ' switch (extension) {\n'
template = ' case Extension::k{extension}:\n' \
' return "{extension}";\n'
function += ''.join([template.format(extension=extension)
for extension in extensions])
function += ' };\n\n return "";\n}'
return function
def generate_string_to_extension_mapping(extensions):
"""Returns mapping function from strings to corresponding extensions."""
function = '''
bool GetExtensionFromString(const char* str, Extension* extension) {{
static const char* known_ext_strs[] = {{ {strs} }};
static const Extension known_ext_ids[] = {{ {ids} }};
const auto b = std::begin(known_ext_strs);
const auto e = std::end(known_ext_strs);
const auto found = std::equal_range(
b, e, str, [](const char* str1, const char* str2) {{
return std::strcmp(str1, str2) < 0;
}});
if (found.first == e || found.first == found.second) return false;
*extension = known_ext_ids[found.first - b];
return true;
}}
'''.format(strs=', '.join(['"{}"'.format(e) for e in extensions]),
ids=', '.join(['Extension::k{}'.format(e) for e in extensions]))
return function
def generate_capability_to_string_mapping(operand_kinds):
"""Returns mapping function from capabilities to corresponding strings.
We take care to avoid emitting duplicate values.
"""
function = 'const char* CapabilityToString(SpvCapability capability) {\n'
function += ' switch (capability) {\n'
template = ' case SpvCapability{capability}:\n' \
' return "{capability}";\n'
emitted = set() # The values of capabilities we already have emitted
for capability in get_capabilities(operand_kinds):
value = capability.get('value')
if value not in emitted:
emitted.add(value)
function += template.format(capability=capability.get('enumerant'))
function += ' case SpvCapabilityMax:\n' \
' assert(0 && "Attempting to convert SpvCapabilityMax to string");\n' \
' return "";\n'
function += ' };\n\n return "";\n}'
return function
def generate_all_string_enum_mappings(extensions, operand_kinds):
"""Returns all string-to-enum / enum-to-string mapping tables."""
tables = []
tables.append(generate_extension_to_string_mapping(extensions))
tables.append(generate_string_to_extension_mapping(extensions))
tables.append(generate_capability_to_string_mapping(operand_kinds))
return '\n\n'.join(tables)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate SPIR-V info tables')
parser.add_argument('--spirv-core-grammar', metavar='<path>',
type=str, required=False,
help='input JSON grammar file for core SPIR-V '
'instructions')
parser.add_argument('--extinst-debuginfo-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for DebugInfo extended '
'instruction set')
parser.add_argument('--extinst-glsl-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for GLSL extended '
'instruction set')
parser.add_argument('--extinst-opencl-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for OpenCL extended '
'instruction set')
parser.add_argument('--core-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for core SPIR-V instructions')
parser.add_argument('--glsl-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for GLSL extended instruction set')
parser.add_argument('--opencl-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for OpenCL extended instruction set')
parser.add_argument('--operand-kinds-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for operand kinds')
parser.add_argument('--extension-enum-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for extension enumeration')
parser.add_argument('--enum-string-mapping-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for enum-string mappings')
parser.add_argument('--extinst-vendor-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for vendor extended '
'instruction set'),
parser.add_argument('--vendor-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for vendor extended instruction set')
args = parser.parse_args()
if (args.core_insts_output is None) != \
(args.operand_kinds_output is None):
print('error: --core-insts-output and --operand-kinds-output '
'should be specified together.')
exit(1)
if args.operand_kinds_output and not (args.spirv_core_grammar and args.extinst_debuginfo_grammar):
print('error: --operand-kinds-output requires --spirv-core-grammar '
'and --exinst-debuginfo-grammar')
exit(1)
if (args.glsl_insts_output is None) != \
(args.extinst_glsl_grammar is None):
print('error: --glsl-insts-output and --extinst-glsl-grammar '
'should be specified together.')
exit(1)
if (args.opencl_insts_output is None) != \
(args.extinst_opencl_grammar is None):
print('error: --opencl-insts-output and --extinst-opencl-grammar '
'should be specified together.')
exit(1)
if (args.vendor_insts_output is None) != \
(args.extinst_vendor_grammar is None):
print('error: --vendor-insts-output and '
'--extinst-vendor-grammar should be specified together.')
exit(1)
if all([args.core_insts_output is None,
args.glsl_insts_output is None,
args.opencl_insts_output is None,
args.vendor_insts_output is None,
args.extension_enum_output is None,
args.enum_string_mapping_output is None]):
print('error: at least one output should be specified.')
exit(1)
if args.spirv_core_grammar is not None:
with open(args.spirv_core_grammar) as json_file:
core_grammar = json.loads(json_file.read())
with open(args.extinst_debuginfo_grammar) as debuginfo_json_file:
debuginfo_grammar = json.loads(debuginfo_json_file.read())
instructions = []
instructions.extend(core_grammar['instructions'])
instructions.extend(debuginfo_grammar['instructions'])
operand_kinds = []
operand_kinds.extend(core_grammar['operand_kinds'])
operand_kinds.extend(debuginfo_grammar['operand_kinds'])
extensions = get_extension_list(instructions, operand_kinds)
if args.core_insts_output is not None:
make_path_to_file(args.core_insts_output)
make_path_to_file(args.operand_kinds_output)
print(generate_instruction_table(core_grammar['instructions']),
file=open(args.core_insts_output, 'w'))
print(generate_operand_kind_table(operand_kinds),
file=open(args.operand_kinds_output, 'w'))
if args.extension_enum_output is not None:
make_path_to_file(args.extension_enum_output)
print(generate_extension_enum(extensions),
file=open(args.extension_enum_output, 'w'))
if args.enum_string_mapping_output is not None:
make_path_to_file(args.enum_string_mapping_output)
print(generate_all_string_enum_mappings(extensions, operand_kinds),
file=open(args.enum_string_mapping_output, 'w'))
if args.extinst_glsl_grammar is not None:
with open(args.extinst_glsl_grammar) as json_file:
grammar = json.loads(json_file.read())
make_path_to_file(args.glsl_insts_output)
print(generate_extended_instruction_table(
grammar['instructions'], "glsl"),
file=open(args.glsl_insts_output, 'w'))
if args.extinst_opencl_grammar is not None:
with open(args.extinst_opencl_grammar) as json_file:
grammar = json.loads(json_file.read())
make_path_to_file(args.opencl_insts_output)
print(generate_extended_instruction_table(
grammar['instructions'], "opencl"),
file=open(args.opencl_insts_output, 'w'))
if args.extinst_vendor_grammar is not None:
with open(args.extinst_vendor_grammar) as json_file:
grammar = json.loads(json_file.read())
make_path_to_file(args.vendor_insts_output)
name = args.extinst_vendor_grammar
start = name.find("extinst.") + len("extinst.")
name = name[start:-len(".grammar.json")].replace("-", "_")
print(generate_extended_instruction_table(
grammar['instructions'], name),
file=open(args.vendor_insts_output, 'w'))
if __name__ == '__main__':
main()
|
{
"content_hash": "0ea0ea8a5b8218dbddb65779dd2f33ec",
"timestamp": "",
"source": "github",
"line_count": 735,
"max_line_length": 102,
"avg_line_length": 39.310204081632655,
"alnum_prop": 0.6122244142179767,
"repo_name": "septag/termite",
"id": "ae948d3886e542e7bd08120a61c9fdd07ab8d6f7",
"size": "29493",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "deps/bgfx/3rdparty/spirv-tools/utils/generate_grammar_tables.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "13601"
},
{
"name": "C++",
"bytes": "1210874"
},
{
"name": "CMake",
"bytes": "153739"
},
{
"name": "Java",
"bytes": "122029"
},
{
"name": "JavaScript",
"bytes": "2241"
},
{
"name": "Objective-C++",
"bytes": "3177"
},
{
"name": "Python",
"bytes": "19559"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
}
|
"""
list of subjects that are to be excluded, can be a text file or a list
"""
#exclusionSubjectList = '/home/data/settings/ex_subjects.txt'
#exclusionSubjectList = ['sub001', 'sub003']
exclusionSubjectList = None
"""
list of subjects that are included, can be a text file or a list
if None, extract data runs on all the subjects
"""
#subjectList = '/home/data/settings/include_subjects.txt'
#subjectList = ['sub002', 'sub003']
subjectList = None
"""
Put %s where site and subjects are in the path
"""
anatomicalTemplate = '/path/to/data/%s/%s/session_*/anat_*/mprage.nii.gz'
"""
Functional Path
Put %s where site and subjects are in the path
"""
functionalTemplate = '/path/to/data/%s/%s/session_*/rest_*/rest.nii.gz'
"""
list of sites, can be a text file or a list
if None, extract data runs on all sites
"""
#siteList = ['ABIDE', 'ADHD-200']
siteList = None
"""
Scan Parameters csv file path. This file is mandatory for
slice timing correction. please use the right format for the csv,
refer to user guide. If None, CPAC does not slice timing correction
"""
#scanParametersCSV = /home/data/settings/scan_parameters.csv
scanParametersCSV = None
|
{
"content_hash": "acb4d87a2dd8832b11c48c576a339879",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 73,
"avg_line_length": 25.755555555555556,
"alnum_prop": 0.722174288179465,
"repo_name": "sgiavasis/C-PAC",
"id": "163c1bad23f6aabe999ea4b202355dda0088fc29",
"size": "1159",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "CPAC/GUI/resources/html/_static/data_config_adhd.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16855"
},
{
"name": "JavaScript",
"bytes": "52711"
},
{
"name": "Perl",
"bytes": "480"
},
{
"name": "Python",
"bytes": "5521856"
},
{
"name": "Shell",
"bytes": "4507"
}
],
"symlink_target": ""
}
|
"""
Implements operations on volumes residing on VMware datastores.
"""
from oslo_log import log as logging
from oslo_utils import units
from oslo_vmware import exceptions
from oslo_vmware import pbm
from oslo_vmware import vim_util
import six
from six.moves import urllib
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
LOG = logging.getLogger(__name__)
LINKED_CLONE_TYPE = 'linked'
FULL_CLONE_TYPE = 'full'
def split_datastore_path(datastore_path):
"""Split the datastore path to components.
return the datastore name, relative folder path and the file name
E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns
(datastore1, my_volume/, my_volume.vmdk)
:param datastore_path: Datastore path of a file
:return: Parsed datastore name, relative folder path and file name
"""
splits = datastore_path.split('[', 1)[1].split(']', 1)
datastore_name = None
folder_path = None
file_name = None
if len(splits) == 1:
datastore_name = splits[0]
else:
datastore_name, path = splits
# Path will be of form my_volume/my_volume.vmdk
# we need into my_volumes/ and my_volume.vmdk
splits = path.split('/')
file_name = splits[len(splits) - 1]
folder_path = path[:-len(file_name)]
return (datastore_name.strip(), folder_path.strip(), file_name.strip())
class VirtualDiskPath(object):
"""Class representing paths of files comprising a virtual disk."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name)
self._descriptor_ds_file_path = self.get_datastore_file_path(
ds_name, self._descriptor_file_path)
def get_datastore_file_path(self, ds_name, file_path):
"""Get datastore path corresponding to the given file path.
:param ds_name: name of the datastore containing the file represented
by the given file path
:param file_path: absolute path of the file
:return: datastore file path
"""
return "[%s] %s" % (ds_name, file_path)
def get_descriptor_file_path(self):
"""Get absolute file path of the virtual disk descriptor."""
return self._descriptor_file_path
def get_descriptor_ds_file_path(self):
"""Get datastore file path of the virtual disk descriptor."""
return self._descriptor_ds_file_path
class FlatExtentVirtualDiskPath(VirtualDiskPath):
"""Paths of files in a non-monolithic disk with a single flat extent."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
super(FlatExtentVirtualDiskPath, self).__init__(
ds_name, folder_path, disk_name)
self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path,
disk_name)
self._flat_extent_ds_file_path = self.get_datastore_file_path(
ds_name, self._flat_extent_file_path)
def get_flat_extent_file_path(self):
"""Get absolute file path of the flat extent."""
return self._flat_extent_file_path
def get_flat_extent_ds_file_path(self):
"""Get datastore file path of the flat extent."""
return self._flat_extent_ds_file_path
class MonolithicSparseVirtualDiskPath(VirtualDiskPath):
"""Paths of file comprising a monolithic sparse disk."""
pass
class VirtualDiskType(object):
"""Supported virtual disk types."""
EAGER_ZEROED_THICK = "eagerZeroedThick"
PREALLOCATED = "preallocated"
THIN = "thin"
# thick in extra_spec means lazy-zeroed thick disk
EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK,
'thick': PREALLOCATED,
'thin': THIN
}
@staticmethod
def is_valid(extra_spec_disk_type):
"""Check if the given disk type in extra_spec is valid.
:param extra_spec_disk_type: disk type in extra_spec
:return: True if valid
"""
return (extra_spec_disk_type in
VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT)
@staticmethod
def validate(extra_spec_disk_type):
"""Validate the given disk type in extra_spec.
This method throws an instance of InvalidDiskTypeException if the given
disk type is invalid.
:param extra_spec_disk_type: disk type in extra_spec
:raises: InvalidDiskTypeException
"""
if not VirtualDiskType.is_valid(extra_spec_disk_type):
raise vmdk_exceptions.InvalidDiskTypeException(
disk_type=extra_spec_disk_type)
@staticmethod
def get_virtual_disk_type(extra_spec_disk_type):
"""Return disk type corresponding to the extra_spec disk type.
:param extra_spec_disk_type: disk type in extra_spec
:return: virtual disk type
:raises: InvalidDiskTypeException
"""
VirtualDiskType.validate(extra_spec_disk_type)
return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[
extra_spec_disk_type])
class VirtualDiskAdapterType(object):
"""Supported virtual disk adapter types."""
LSI_LOGIC = "lsiLogic"
BUS_LOGIC = "busLogic"
LSI_LOGIC_SAS = "lsiLogicsas"
IDE = "ide"
@staticmethod
def is_valid(adapter_type):
"""Check if the given adapter type is valid.
:param adapter_type: adapter type to check
:return: True if valid
"""
return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE]
@staticmethod
def validate(extra_spec_adapter_type):
"""Validate the given adapter type in extra_spec.
This method throws an instance of InvalidAdapterTypeException if the
given adapter type is invalid.
:param extra_spec_adapter_type: adapter type in extra_spec
:raises: InvalidAdapterTypeException
"""
if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type):
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=extra_spec_adapter_type)
@staticmethod
def get_adapter_type(extra_spec_adapter_type):
"""Get the adapter type to be used in VirtualDiskSpec.
:param extra_spec_adapter_type: adapter type in the extra_spec
:return: adapter type to be used in VirtualDiskSpec
"""
VirtualDiskAdapterType.validate(extra_spec_adapter_type)
# We set the adapter type as lsiLogic for lsiLogicsas since it is not
# supported by VirtualDiskManager APIs. This won't be a problem because
# we attach the virtual disk to the correct controller type and the
# disk adapter type is always resolved using its controller key.
if extra_spec_adapter_type == VirtualDiskAdapterType.LSI_LOGIC_SAS:
return VirtualDiskAdapterType.LSI_LOGIC
return extra_spec_adapter_type
class ControllerType(object):
"""Encapsulate various controller types."""
LSI_LOGIC = 'VirtualLsiLogicController'
BUS_LOGIC = 'VirtualBusLogicController'
LSI_LOGIC_SAS = 'VirtualLsiLogicSASController'
IDE = 'VirtualIDEController'
CONTROLLER_TYPE_DICT = {
VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE: IDE}
@staticmethod
def get_controller_type(adapter_type):
"""Get the disk controller type based on the given adapter type.
:param adapter_type: disk adapter type
:return: controller type corresponding to the given adapter type
:raises: InvalidAdapterTypeException
"""
if adapter_type in ControllerType.CONTROLLER_TYPE_DICT:
return ControllerType.CONTROLLER_TYPE_DICT[adapter_type]
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=adapter_type)
@staticmethod
def is_scsi_controller(controller_type):
"""Check if the given controller is a SCSI controller.
:param controller_type: controller type
:return: True if the controller is a SCSI controller
"""
return controller_type in [ControllerType.LSI_LOGIC,
ControllerType.BUS_LOGIC,
ControllerType.LSI_LOGIC_SAS]
class VMwareVolumeOps(object):
"""Manages volume operations."""
def __init__(self, session, max_objects):
self._session = session
self._max_objects = max_objects
self._folder_cache = {}
def get_backing(self, name):
"""Get the backing based on name.
:param name: Name of the backing
:return: Managed object reference to the backing
"""
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'VirtualMachine',
self._max_objects)
while retrieve_result:
vms = retrieve_result.objects
for vm in vms:
if vm.propSet[0].val == name:
# We got the result, so cancel further retrieval.
self.cancel_retrieval(retrieve_result)
return vm.obj
# Result not obtained, continue retrieving results.
retrieve_result = self.continue_retrieval(retrieve_result)
LOG.debug("Did not find any backing with name: %s", name)
def delete_backing(self, backing):
"""Delete the backing.
:param backing: Managed object reference to the backing
"""
LOG.debug("Deleting the VM backing: %s.", backing)
task = self._session.invoke_api(self._session.vim, 'Destroy_Task',
backing)
LOG.debug("Initiated deletion of VM backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted the VM backing: %s."), backing)
# TODO(kartikaditya) Keep the methods not specific to volume in
# a different file
def get_host(self, instance):
"""Get host under which instance is present.
:param instance: Managed object reference of the instance VM
:return: Host managing the instance VM
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, instance,
'runtime.host')
def is_host_usable(self, host):
"""Check if the given ESX host is usable.
A host is usable if it is connected to vCenter server and not in
maintenance mode.
:param host: Managed object reference to the ESX host
:return: True if host is usable, False otherwise
"""
runtime_info = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
host,
'runtime')
return (runtime_info.connectionState == 'connected' and
not runtime_info.inMaintenanceMode)
def get_hosts(self):
"""Get all host from the inventory.
:return: All the hosts from the inventory
"""
return self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'HostSystem', self._max_objects)
def continue_retrieval(self, retrieve_result):
"""Continue retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
return self._session.invoke_api(vim_util, 'continue_retrieval',
self._session.vim, retrieve_result)
def cancel_retrieval(self, retrieve_result):
"""Cancel retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
self._session.invoke_api(vim_util, 'cancel_retrieval',
self._session.vim, retrieve_result)
def _is_usable(self, mount_info):
"""Check if a datastore is usable as per the given mount info.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param mount_info: Host mount information
:return: True if datastore is usable
"""
writable = mount_info.accessMode == 'readWrite'
# If mounted attribute is not set, then default is True
mounted = getattr(mount_info, 'mounted', True)
# If accessible attribute is not set, then default is False
accessible = getattr(mount_info, 'accessible', False)
return writable and mounted and accessible
def get_connected_hosts(self, datastore):
"""Get all the hosts to which the datastore is connected and usable.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param datastore: Reference to the datastore entity
:return: List of managed object references of all connected
hosts
"""
summary = self.get_summary(datastore)
if not summary.accessible:
return []
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
if not hasattr(host_mounts, 'DatastoreHostMount'):
return []
connected_hosts = []
for host_mount in host_mounts.DatastoreHostMount:
if self._is_usable(host_mount.mountInfo):
connected_hosts.append(host_mount.key.value)
return connected_hosts
def is_datastore_accessible(self, datastore, host):
"""Check if the datastore is accessible to the given host.
:param datastore: datastore reference
:return: True if the datastore is accessible
"""
hosts = self.get_connected_hosts(datastore)
return host.value in hosts
def _in_maintenance(self, summary):
"""Check if a datastore is entering maintenance or in maintenance.
:param summary: Summary information about the datastore
:return: True if the datastore is entering maintenance or in
maintenance
"""
if hasattr(summary, 'maintenanceMode'):
return summary.maintenanceMode in ['enteringMaintenance',
'inMaintenance']
return False
def _is_valid(self, datastore, host):
"""Check if the datastore is valid for the given host.
A datastore is considered valid for a host only if the datastore is
writable, mounted and accessible. Also, the datastore should not be
in maintenance mode.
:param datastore: Reference to the datastore entity
:param host: Reference to the host entity
:return: True if datastore can be used for volume creation
"""
summary = self.get_summary(datastore)
in_maintenance = self._in_maintenance(summary)
if not summary.accessible or in_maintenance:
return False
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
for host_mount in host_mounts.DatastoreHostMount:
if host_mount.key.value == host.value:
return self._is_usable(host_mount.mountInfo)
return False
def get_dss_rp(self, host):
"""Get accessible datastores and resource pool of the host.
:param host: Managed object reference of the host
:return: Datastores accessible to the host and resource pool to which
the host belongs to
"""
props = self._session.invoke_api(vim_util, 'get_object_properties',
self._session.vim, host,
['datastore', 'parent'])
# Get datastores and compute resource or cluster compute resource
datastores = []
compute_resource = None
for elem in props:
for prop in elem.propSet:
if prop.name == 'datastore' and prop.val:
# Consider only if datastores are present under host
datastores = prop.val.ManagedObjectReference
elif prop.name == 'parent':
compute_resource = prop.val
LOG.debug("Datastores attached to host %(host)s are: %(ds)s.",
{'host': host, 'ds': datastores})
# Filter datastores based on if it is accessible, mounted and writable
valid_dss = []
for datastore in datastores:
if self._is_valid(datastore, host):
valid_dss.append(datastore)
# Get resource pool from compute resource or cluster compute resource
resource_pool = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
compute_resource,
'resourcePool')
if not valid_dss:
msg = _("There are no valid datastores attached to %s.") % host
LOG.error(msg)
raise exceptions.VimException(msg)
else:
LOG.debug("Valid datastores are: %s", valid_dss)
return (valid_dss, resource_pool)
def _get_parent(self, child, parent_type):
"""Get immediate parent of given type via 'parent' property.
:param child: Child entity reference
:param parent_type: Entity type of the parent
:return: Immediate parent of specific type up the hierarchy via
'parent' property
"""
if not child:
return None
if child._type == parent_type:
return child
parent = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, child, 'parent')
return self._get_parent(parent, parent_type)
def get_dc(self, child):
"""Get parent datacenter up the hierarchy via 'parent' property.
:param child: Reference of the child entity
:return: Parent Datacenter of the param child entity
"""
return self._get_parent(child, 'Datacenter')
def get_vmfolder(self, datacenter):
"""Get the vmFolder.
:param datacenter: Reference to the datacenter entity
:return: vmFolder property of the datacenter
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datacenter,
'vmFolder')
def _get_child_folder(self, parent_folder, child_folder_name):
# Get list of child entities for the parent folder
prop_val = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, parent_folder,
'childEntity')
if prop_val and hasattr(prop_val, 'ManagedObjectReference'):
child_entities = prop_val.ManagedObjectReference
# Return if the child folder with input name is already present
for child_entity in child_entities:
if child_entity._type != 'Folder':
continue
child_entity_name = self.get_entity_name(child_entity)
if (child_entity_name
and (urllib.parse.unquote(child_entity_name)
== child_folder_name)):
LOG.debug("Child folder: %s exists.", child_folder_name)
return child_entity
def create_folder(self, parent_folder, child_folder_name):
"""Creates child folder with given name under the given parent folder.
The method first checks if a child folder already exists, if it does,
then it returns a moref for the folder, else it creates one and then
return the moref.
:param parent_folder: Reference to the folder entity
:param child_folder_name: Name of the child folder
:return: Reference to the child folder with input name if it already
exists, else create one and return the reference
"""
LOG.debug("Creating folder: %(child_folder_name)s under parent "
"folder: %(parent_folder)s.",
{'child_folder_name': child_folder_name,
'parent_folder': parent_folder})
child_folder = self._get_child_folder(parent_folder, child_folder_name)
if not child_folder:
# Need to create the child folder.
try:
child_folder = self._session.invoke_api(self._session.vim,
'CreateFolder',
parent_folder,
name=child_folder_name)
LOG.debug("Created child folder: %s.", child_folder)
except exceptions.DuplicateName:
# Another thread is trying to create the same folder, ignore
# the exception.
child_folder = self._get_child_folder(parent_folder,
child_folder_name)
return child_folder
def create_vm_inventory_folder(self, datacenter, path_comp):
"""Create and return a VM inventory folder.
This method caches references to inventory folders returned.
:param datacenter: Reference to datacenter
:param path_comp: Path components as a list
"""
LOG.debug("Creating inventory folder: %(path_comp)s under VM folder "
"of datacenter: %(datacenter)s.",
{'path_comp': path_comp,
'datacenter': datacenter})
path = "/" + datacenter.value
parent = self._folder_cache.get(path)
if not parent:
parent = self.get_vmfolder(datacenter)
self._folder_cache[path] = parent
folder = None
for folder_name in path_comp:
path = "/".join([path, folder_name])
folder = self._folder_cache.get(path)
if not folder:
folder = self.create_folder(parent, folder_name)
self._folder_cache[path] = folder
parent = folder
LOG.debug("Inventory folder for path: %(path)s is %(folder)s.",
{'path': path,
'folder': folder})
return folder
def extend_virtual_disk(self, requested_size_in_gb, path, dc_ref,
eager_zero=False):
"""Extend the virtual disk to the requested size.
:param requested_size_in_gb: Size of the volume in GB
:param path: Datastore path of the virtual disk to extend
:param dc_ref: Reference to datacenter
:param eager_zero: Boolean determining if the free space
is zeroed out
"""
LOG.debug("Extending virtual disk: %(path)s to %(size)s GB.",
{'path': path, 'size': requested_size_in_gb})
diskMgr = self._session.vim.service_content.virtualDiskManager
# VMWare API needs the capacity unit to be in KB, so convert the
# capacity unit from GB to KB.
size_in_kb = requested_size_in_gb * units.Mi
task = self._session.invoke_api(self._session.vim,
"ExtendVirtualDisk_Task",
diskMgr,
name=path,
datacenter=dc_ref,
newCapacityKb=size_in_kb,
eagerZero=eager_zero)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully extended virtual disk: %(path)s to "
"%(size)s GB."),
{'path': path, 'size': requested_size_in_gb})
def _create_controller_config_spec(self, adapter_type):
"""Returns config spec for adding a disk controller."""
cf = self._session.vim.client.factory
controller_type = ControllerType.get_controller_type(adapter_type)
controller_device = cf.create('ns0:%s' % controller_type)
controller_device.key = -100
controller_device.busNumber = 0
if ControllerType.is_scsi_controller(controller_type):
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
return controller_spec
def _create_disk_backing(self, disk_type, vmdk_ds_file_path):
"""Creates file backing for virtual disk."""
cf = self._session.vim.client.factory
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == VirtualDiskType.EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == VirtualDiskType.THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = vmdk_ds_file_path or ''
disk_device_bkng.diskMode = 'persistent'
return disk_device_bkng
def _create_virtual_disk_config_spec(self, size_kb, disk_type,
controller_key, vmdk_ds_file_path):
"""Returns config spec for adding a virtual disk."""
cf = self._session.vim.client.factory
disk_device = cf.create('ns0:VirtualDisk')
# disk size should be at least 1024KB
disk_device.capacityInKB = max(units.Ki, int(size_kb))
if controller_key < 0:
disk_device.key = controller_key - 1
else:
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = controller_key
disk_device.backing = self._create_disk_backing(disk_type,
vmdk_ds_file_path)
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
if vmdk_ds_file_path is None:
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
return disk_spec
def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type,
vmdk_ds_file_path=None):
"""Create controller and disk config specs for adding a new disk.
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: Optional datastore file path of an existing
virtual disk. If specified, file backing is
not created for the virtual disk.
:return: list containing controller and disk config specs
"""
controller_spec = None
if adapter_type == 'ide':
# For IDE disks, use one of the default IDE controllers (with keys
# 200 and 201) created as part of backing VM creation.
controller_key = 200
else:
controller_spec = self._create_controller_config_spec(adapter_type)
controller_key = controller_spec.device.key
disk_spec = self._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
vmdk_ds_file_path)
specs = [disk_spec]
if controller_spec is not None:
specs.append(controller_spec)
return specs
def _get_extra_config_option_values(self, extra_config):
cf = self._session.vim.client.factory
option_values = []
for key, value in six.iteritems(extra_config):
opt = cf.create('ns0:OptionValue')
opt.key = key
opt.value = value
option_values.append(opt)
return option_values
def _get_create_spec_disk_less(self, name, ds_name, profileId=None,
extra_config=None):
"""Return spec for creating disk-less backing.
:param name: Name of the backing
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID for the backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Spec for creation
"""
cf = self._session.vim.client.factory
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.files = vm_file_info
# Set the hardware version to a compatible version supported by
# vSphere 5.0. This will ensure that the backing VM can be migrated
# without any incompatibility issues in a mixed cluster of ESX hosts
# with versions 5.0 or above.
create_spec.version = "vmx-08"
if profileId:
vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vmProfile.profileId = profileId
create_spec.vmProfile = [vmProfile]
if extra_config:
create_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
return create_spec
def get_create_spec(self, name, size_kb, disk_type, ds_name,
profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Return spec for creating backing with a single disk.
:param name: name of the backing
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param ds_name: datastore name where the disk is to be provisioned
:param profileId: storage profile ID for the backing
:param adapter_type: disk adapter type
:param extra_config: key-value pairs to be written to backing's
extra-config
:return: spec for creation
"""
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
create_spec.deviceChange = self._create_specs_for_disk_add(
size_kb, disk_type, adapter_type)
return create_spec
def _create_backing_int(self, folder, resource_pool, host, create_spec):
"""Helper for create backing methods."""
LOG.debug("Creating volume backing with spec: %s.", create_spec)
task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
folder, config=create_spec,
pool=resource_pool, host=host)
task_info = self._session.wait_for_task(task)
backing = task_info.result
LOG.info(_LI("Successfully created volume backing: %s."), backing)
return backing
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
host, ds_name, profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Create backing for the volume.
Creates a VM with one VMDK based on the given inputs.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param folder: Folder, where to create the backing under
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID to be associated with backing
:param adapter_type: Disk adapter type
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating volume backing with name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s "
"adapter_type: %(adapter_type)s profileId: %(profile)s at "
"folder: %(folder)s resource_pool: %(resource_pool)s "
"host: %(host)s datastore_name: %(ds_name)s.",
{'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
'folder': folder, 'resource_pool': resource_pool,
'ds_name': ds_name, 'profile': profileId, 'host': host,
'adapter_type': adapter_type})
create_spec = self.get_create_spec(
name, size_kb, disk_type, ds_name, profileId=profileId,
adapter_type=adapter_type, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def create_backing_disk_less(self, name, folder, resource_pool,
host, ds_name, profileId=None,
extra_config=None):
"""Create disk-less volume backing.
This type of backing is useful for creating volume from image. The
downloaded image from the image service can be copied to a virtual
disk of desired provisioning type and added to the backing VM.
:param name: Name of the backing
:param folder: Folder where the backing is created
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Name of the datastore used for VM storage
:param profileId: Storage profile ID to be associated with backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating disk-less volume backing with name: %(name)s "
"profileId: %(profile)s at folder: %(folder)s "
"resource pool: %(resource_pool)s host: %(host)s "
"datastore_name: %(ds_name)s.",
{'name': name, 'profile': profileId, 'folder': folder,
'resource_pool': resource_pool, 'host': host,
'ds_name': ds_name})
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def get_datastore(self, backing):
"""Get datastore where the backing resides.
:param backing: Reference to the backing
:return: Datastore reference to which the backing belongs
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'datastore').ManagedObjectReference[0]
def get_summary(self, datastore):
"""Get datastore summary.
:param datastore: Reference to the datastore
:return: 'summary' property of the datastore
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'summary')
def _create_relocate_spec_disk_locator(self, datastore, disk_type,
disk_device):
"""Creates spec for disk type conversion during relocate."""
cf = self._session.vim.client.factory
disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator")
disk_locator.datastore = datastore
disk_locator.diskId = disk_device.key
disk_locator.diskBackingInfo = self._create_disk_backing(disk_type,
None)
return disk_locator
def _get_relocate_spec(self, datastore, resource_pool, host,
disk_move_type, disk_type=None, disk_device=None):
"""Return spec for relocating volume backing.
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_move_type: Disk move type option
:param disk_type: Destination disk type
:param disk_device: Virtual device corresponding to the disk
:return: Spec for relocation
"""
cf = self._session.vim.client.factory
relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec')
relocate_spec.datastore = datastore
relocate_spec.pool = resource_pool
relocate_spec.host = host
relocate_spec.diskMoveType = disk_move_type
if disk_type is not None and disk_device is not None:
disk_locator = self._create_relocate_spec_disk_locator(datastore,
disk_type,
disk_device)
relocate_spec.disk = [disk_locator]
LOG.debug("Spec for relocating the backing: %s.", relocate_spec)
return relocate_spec
def relocate_backing(
self, backing, datastore, resource_pool, host, disk_type=None):
"""Relocates backing to the input datastore and resource pool.
The implementation uses moveAllDiskBackingsAndAllowSharing disk move
type.
:param backing: Reference to the backing
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_type: destination disk type
"""
LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s with destination disk type: "
"%(disk_type)s.",
{'backing': backing,
'ds': datastore,
'rp': resource_pool,
'disk_type': disk_type})
# Relocate the volume backing
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
disk_device = None
if disk_type is not None:
disk_device = self._get_disk_device(backing)
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
backing, spec=relocate_spec)
LOG.debug("Initiated relocation of volume backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully relocated volume backing: %(backing)s "
"to datastore: %(ds)s and resource pool: %(rp)s."),
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
def move_backing_to_folder(self, backing, folder):
"""Move the volume backing to the folder.
:param backing: Reference to the backing
:param folder: Reference to the folder
"""
LOG.debug("Moving backing: %(backing)s to folder: %(fol)s.",
{'backing': backing, 'fol': folder})
task = self._session.invoke_api(self._session.vim,
'MoveIntoFolder_Task', folder,
list=[backing])
LOG.debug("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s.", {'backing': backing, 'fol': folder})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully moved volume "
"backing: %(backing)s into the "
"folder: %(fol)s."), {'backing': backing, 'fol': folder})
def create_snapshot(self, backing, name, description, quiesce=False):
"""Create snapshot of the backing with given name and description.
:param backing: Reference to the backing entity
:param name: Snapshot name
:param description: Snapshot description
:param quiesce: Whether to quiesce the backing when taking snapshot
:return: Created snapshot entity reference
"""
LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s.",
{'backing': backing, 'name': name})
task = self._session.invoke_api(self._session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=description,
memory=False, quiesce=quiesce)
LOG.debug("Initiated snapshot of volume backing: %(backing)s "
"named: %(name)s.", {'backing': backing, 'name': name})
task_info = self._session.wait_for_task(task)
snapshot = task_info.result
LOG.info(_LI("Successfully created snapshot: %(snap)s for volume "
"backing: %(backing)s."),
{'snap': snapshot, 'backing': backing})
return snapshot
@staticmethod
def _get_snapshot_from_tree(name, root):
"""Get snapshot by name from the snapshot tree root.
:param name: Snapshot name
:param root: Current root node in the snapshot tree
:return: None in the snapshot tree with given snapshot name
"""
if not root:
return None
if root.name == name:
return root.snapshot
if (not hasattr(root, 'childSnapshotList') or
not root.childSnapshotList):
# When root does not have children, the childSnapshotList attr
# is missing sometime. Adding an additional check.
return None
for node in root.childSnapshotList:
snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node)
if snapshot:
return snapshot
def get_snapshot(self, backing, name):
"""Get snapshot of the backing with given name.
:param backing: Reference to the backing entity
:param name: Snapshot name
:return: Snapshot entity of the backing with given name
"""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if not snapshot or not snapshot.rootSnapshotList:
return None
for root in snapshot.rootSnapshotList:
return VMwareVolumeOps._get_snapshot_from_tree(name, root)
def snapshot_exists(self, backing):
"""Check if the given backing contains snapshots."""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if snapshot is None or snapshot.rootSnapshotList is None:
return False
return len(snapshot.rootSnapshotList) != 0
def delete_snapshot(self, backing, name):
"""Delete a given snapshot from volume backing.
:param backing: Reference to the backing entity
:param name: Snapshot name
"""
LOG.debug("Deleting the snapshot: %(name)s from backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
LOG.info(_LI("Did not find the snapshot: %(name)s for backing: "
"%(backing)s. Need not delete anything."),
{'name': name, 'backing': backing})
return
task = self._session.invoke_api(self._session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
LOG.debug("Initiated snapshot: %(name)s deletion for backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: "
"%(backing)s."), {'backing': backing, 'name': name})
def _get_folder(self, backing):
"""Get parent folder of the backing.
:param backing: Reference to the backing entity
:return: Reference to parent folder of the backing entity
"""
return self._get_parent(backing, 'Folder')
def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
disk_type, host=None, resource_pool=None,
extra_config=None):
"""Get the clone spec.
:param datastore: Reference to datastore
:param disk_move_type: Disk move type
:param snapshot: Reference to snapshot
:param backing: Source backing VM
:param disk_type: Disk type of clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Clone spec
"""
if disk_type is not None:
disk_device = self._get_disk_device(backing)
else:
disk_device = None
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
cf = self._session.vim.client.factory
clone_spec = cf.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = relocate_spec
clone_spec.powerOn = False
clone_spec.template = False
clone_spec.snapshot = snapshot
if extra_config:
config_spec = cf.create('ns0:VirtualMachineConfigSpec')
config_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
clone_spec.config = config_spec
LOG.debug("Spec for cloning the backing: %s.", clone_spec)
return clone_spec
def clone_backing(self, name, backing, snapshot, clone_type, datastore,
disk_type=None, host=None, resource_pool=None,
extra_config=None):
"""Clone backing.
If the clone_type is 'full', then a full clone of the source volume
backing will be created. Else, if it is 'linked', then a linked clone
of the source volume backing will be created.
:param name: Name for the clone
:param backing: Reference to the backing entity
:param snapshot: Snapshot point from which the clone should be done
:param clone_type: Whether a full clone or linked clone is to be made
:param datastore: Reference to the datastore entity
:param disk_type: Disk type of the clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
"""
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
"resource pool: %(resource_pool)s, host: %(host)s, "
"datastore: %(ds)s with disk type: %(disk_type)s.",
{'back': backing, 'name': name, 'type': clone_type,
'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
'host': host, 'resource_pool': resource_pool})
folder = self._get_folder(backing)
if clone_type == LINKED_CLONE_TYPE:
disk_move_type = 'createNewChildDiskBacking'
else:
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
clone_spec = self._get_clone_spec(
datastore, disk_move_type, snapshot, backing, disk_type, host=host,
resource_pool=resource_pool, extra_config=extra_config)
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)
LOG.debug("Initiated clone of backing: %s.", name)
task_info = self._session.wait_for_task(task)
new_backing = task_info.result
LOG.info(_LI("Successfully created clone: %s."), new_backing)
return new_backing
def _reconfigure_backing(self, backing, reconfig_spec):
"""Reconfigure backing VM with the given spec."""
LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.",
{'backing': backing,
'spec': reconfig_spec})
reconfig_task = self._session.invoke_api(self._session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
LOG.debug("Task: %s created for reconfiguring backing VM.",
reconfig_task)
self._session.wait_for_task(reconfig_task)
def attach_disk_to_backing(self, backing, size_in_kb, disk_type,
adapter_type, vmdk_ds_file_path):
"""Attach an existing virtual disk to the backing VM.
:param backing: reference to the backing VM
:param size_in_kb: disk size in KB
:param disk_type: virtual disk type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: datastore file path of the virtual disk to
be attached
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: "
"%(path)s with size (KB): %(size)d and adapter type: "
"%(adapter_type)s.",
{'backing': backing,
'path': vmdk_ds_file_path,
'size': size_in_kb,
'adapter_type': adapter_type})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
specs = self._create_specs_for_disk_add(size_in_kb,
disk_type,
adapter_type,
vmdk_ds_file_path)
reconfig_spec.deviceChange = specs
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %s reconfigured with new disk.", backing)
def rename_backing(self, backing, new_name):
"""Rename backing VM.
:param backing: VM to be renamed
:param new_name: new VM name
"""
LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
rename_task = self._session.invoke_api(self._session.vim,
"Rename_Task",
backing,
newName=new_name)
LOG.debug("Task: %s created for renaming VM.", rename_task)
self._session.wait_for_task(rename_task)
LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
def change_backing_profile(self, backing, profile_id):
"""Change storage profile of the backing VM.
The current profile is removed if the new profile is None.
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:"
" %(profile)s.",
{'backing': backing,
'profile': profile_id})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
if profile_id is None:
vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec')
vm_profile.dynamicType = 'profile'
else:
vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vm_profile.profileId = profile_id.uniqueId
reconfig_spec.vmProfile = [vm_profile]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new profile: "
"%(profile)s.",
{'backing': backing,
'profile': profile_id})
def update_backing_disk_uuid(self, backing, disk_uuid):
"""Update backing VM's disk UUID.
:param backing: Reference to backing VM
:param disk_uuid: New disk UUID
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change disk UUID "
"to: %(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
disk_device = self._get_disk_device(backing)
disk_device.backing.uuid = disk_uuid
cf = self._session.vim.client.factory
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.device = disk_device
disk_spec.operation = 'edit'
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
reconfig_spec.deviceChange = [disk_spec]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new disk UUID: "
"%(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
def delete_file(self, file_path, datacenter=None):
"""Delete file or folder on the datastore.
:param file_path: Datastore path of the file or folder
"""
LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s.",
{'file': file_path, 'dc': datacenter})
fileManager = self._session.vim.service_content.fileManager
task = self._session.invoke_api(self._session.vim,
'DeleteDatastoreFile_Task',
fileManager,
name=file_path,
datacenter=datacenter)
LOG.debug("Initiated deletion via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted file: %s."), file_path)
def create_datastore_folder(self, ds_name, folder_path, datacenter):
"""Creates a datastore folder.
This method returns silently if the folder already exists.
:param ds_name: datastore name
:param folder_path: path of folder to create
:param datacenter: datacenter of target datastore
"""
fileManager = self._session.vim.service_content.fileManager
ds_folder_path = "[%s] %s" % (ds_name, folder_path)
LOG.debug("Creating datastore folder: %s.", ds_folder_path)
try:
self._session.invoke_api(self._session.vim,
'MakeDirectory',
fileManager,
name=ds_folder_path,
datacenter=datacenter)
LOG.info(_LI("Created datastore folder: %s."), folder_path)
except exceptions.FileAlreadyExistsException:
LOG.debug("Datastore folder: %s already exists.", folder_path)
def get_path_name(self, backing):
"""Get path name of the backing.
:param backing: Reference to the backing entity
:return: Path name of the backing
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'config.files').vmPathName
def get_entity_name(self, entity):
"""Get name of the managed entity.
:param entity: Reference to the entity
:return: Name of the managed entity
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, entity, 'name')
def _get_disk_device(self, backing):
"""Get the virtual device corresponding to disk."""
hardware_devices = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
backing,
'config.hardware.device')
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
return device
LOG.error(_LE("Virtual disk device of "
"backing: %s not found."), backing)
raise vmdk_exceptions.VirtualDiskNotFoundException()
def get_vmdk_path(self, backing):
"""Get the vmdk file name of the backing.
The vmdk file path of the backing returned is of the form:
"[datastore1] my_folder/my_vm.vmdk"
:param backing: Reference to the backing
:return: VMDK file path of the backing
"""
disk_device = self._get_disk_device(backing)
backing = disk_device.backing
if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo":
msg = _("Invalid disk backing: %s.") % backing.__class__.__name__
LOG.error(msg)
raise AssertionError(msg)
return backing.fileName
def get_disk_size(self, backing):
"""Get disk size of the backing.
:param backing: backing VM reference
:return: disk size in bytes
"""
disk_device = self._get_disk_device(backing)
return disk_device.capacityInKB * units.Ki
def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type,
disk_type):
"""Return spec for file-backed virtual disk creation."""
cf = self._session.vim.client.factory
spec = cf.create('ns0:FileBackedVirtualDiskSpec')
spec.capacityKb = size_in_kb
spec.adapterType = VirtualDiskAdapterType.get_adapter_type(
adapter_type)
spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type)
return spec
def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type='busLogic', disk_type='preallocated'):
"""Create virtual disk with the given settings.
:param dc_ref: datacenter reference
:param vmdk_ds_file_path: datastore file path of the virtual disk
:param size_in_kb: disk size in KB
:param adapter_type: disk adapter type
:param disk_type: vmdk type
"""
virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb,
adapter_type,
disk_type)
LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec)
disk_manager = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CreateVirtualDisk_Task',
disk_manager,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=virtual_disk_spec)
LOG.debug("Task: %s created for virtual disk creation.", task)
self._session.wait_for_task(task)
LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec)
def create_flat_extent_virtual_disk_descriptor(
self, dc_ref, path, size_in_kb, adapter_type, disk_type):
"""Create descriptor for a single flat extent virtual disk.
To create the descriptor, we create a virtual disk and delete its flat
extent.
:param dc_ref: reference to the datacenter
:param path: descriptor datastore file path
:param size_in_kb: size of the virtual disk in KB
:param adapter_type: virtual disk adapter type
:param disk_type: type of the virtual disk
"""
LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, "
"adapter_type: %(adapter_type)s and disk_type: "
"%(disk_type)s.",
{'path': path.get_descriptor_ds_file_path(),
'size': size_in_kb,
'adapter_type': adapter_type,
'disk_type': disk_type
})
self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(),
size_in_kb, adapter_type, disk_type)
self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref)
LOG.debug("Created descriptor: %s.",
path.get_descriptor_ds_file_path())
def copy_vmdk_file(self, src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path, dest_dc_ref=None):
"""Copy contents of the src vmdk file to dest vmdk file.
:param src_dc_ref: Reference to datacenter containing src datastore
:param src_vmdk_file_path: Source vmdk file path
:param dest_vmdk_file_path: Destination vmdk file path
:param dest_dc_ref: Reference to datacenter of dest datastore.
If unspecified, source datacenter is used.
"""
LOG.debug('Copying disk: %(src)s to %(dest)s.',
{'src': src_vmdk_file_path,
'dest': dest_vmdk_file_path})
dest_dc_ref = dest_dc_ref or src_dc_ref
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CopyVirtualDisk_Task',
diskMgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
LOG.debug("Initiated copying disk data via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s."),
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
def delete_vmdk_file(self, vmdk_file_path, dc_ref):
"""Delete given vmdk files.
:param vmdk_file_path: VMDK file path to be deleted
:param dc_ref: Reference to datacenter that contains this VMDK file
"""
LOG.debug("Deleting vmdk file: %s.", vmdk_file_path)
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'DeleteVirtualDisk_Task',
diskMgr,
name=vmdk_file_path,
datacenter=dc_ref)
LOG.debug("Initiated deleting vmdk file via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted vmdk file: %s."), vmdk_file_path)
def get_profile(self, backing):
"""Query storage profile associated with the given backing.
:param backing: backing reference
:return: profile name
"""
profile_ids = pbm.get_profiles(self._session, backing)
if profile_ids:
return pbm.get_profiles_by_ids(self._session, profile_ids)[0].name
def _get_all_clusters(self):
clusters = {}
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'ClusterComputeResource',
self._max_objects)
while retrieve_result:
if retrieve_result.objects:
for cluster in retrieve_result.objects:
name = urllib.parse.unquote(cluster.propSet[0].val)
clusters[name] = cluster.obj
retrieve_result = self.continue_retrieval(retrieve_result)
return clusters
def get_cluster_refs(self, names):
"""Get references to given clusters.
:param names: list of cluster names
:return: Dictionary of cluster names to references
"""
clusters = self._get_all_clusters()
for name in names:
if name not in clusters:
LOG.error(_LE("Compute cluster: %s not found."), name)
raise vmdk_exceptions.ClusterNotFoundException(cluster=name)
return {name: clusters[name] for name in names}
def get_cluster_hosts(self, cluster):
"""Get hosts in the given cluster.
:param cluster: cluster reference
:return: references to hosts in the cluster
"""
hosts = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
cluster,
'host')
host_refs = []
if hosts and hosts.ManagedObjectReference:
host_refs.extend(hosts.ManagedObjectReference)
return host_refs
|
{
"content_hash": "88096f72ab7d4ca81b0a362a119cfb3b",
"timestamp": "",
"source": "github",
"line_count": 1554,
"max_line_length": 79,
"avg_line_length": 43.68790218790219,
"alnum_prop": 0.5665699429968626,
"repo_name": "scality/cinder",
"id": "c238e32c62f59a176e26b7588eab50c2efcf1b3c",
"size": "68523",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/vmware/volumeops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13431518"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
}
|
import http.client as httplib
from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
from ..exceptions import HeaderParsingError
def is_fp_closed(obj: object) -> bool:
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check `isclosed()` first, in case Python3 doesn't set `closed`.
# GH Issue #928
return obj.isclosed() # type: ignore[no-any-return, attr-defined]
except AttributeError:
pass
try:
# Check via the official file-like-object way.
return obj.closed # type: ignore[no-any-return, attr-defined]
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None # type: ignore[attr-defined]
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers: httplib.HTTPMessage) -> None:
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param http.client.HTTPMessage headers: Headers to verify.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError(f"expected httplib.Message, got {type(headers)}.")
unparsed_data = None
# get_payload is actually email.message.Message.get_payload;
# we're only interested in the result if it's not a multipart message
if not headers.is_multipart():
payload = headers.get_payload()
if isinstance(payload, (bytes, str)):
unparsed_data = payload
# httplib is assuming a response body is available
# when parsing headers even when httplib only sends
# header data to parse_headers() This results in
# defects on multipart responses in particular.
# See: https://github.com/urllib3/urllib3/issues/800
# So we ignore the following defects:
# - StartBoundaryNotFoundDefect:
# The claimed start boundary was never found.
# - MultipartInvariantViolationDefect:
# A message claimed to be a multipart but no subparts were found.
defects = [
defect
for defect in headers.defects
if not isinstance(
defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
)
]
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response: httplib.HTTPResponse) -> bool:
"""
Checks whether the request of a response has been a HEAD-request.
:param http.client.HTTPResponse response:
Response to check if the originating request
used 'HEAD' as a method.
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method_str = response._method # type: str # type: ignore[attr-defined]
return method_str.upper() == "HEAD"
|
{
"content_hash": "92599db31d73e6559802ce35ed2ab331",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 87,
"avg_line_length": 33.717171717171716,
"alnum_prop": 0.6824445775913721,
"repo_name": "sigmavirus24/urllib3",
"id": "38ed9d3bed425ac9bde346f9d1e4299c51cbd84c",
"size": "3338",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/urllib3/util/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "877710"
},
{
"name": "Shell",
"bytes": "338"
}
],
"symlink_target": ""
}
|
import asyncio
import os
from datetime import datetime
import pytest
import sqlalchemy as sa
from asyncpg.pool import Pool
from puckdb import db, fetch
from puckdb.db import get_connection_str, metadata
db_name = os.getenv('PUCKDB_DB_TEST_DATABASE', os.getenv('PUCKDB_DB_DATABASE'))
@pytest.fixture(scope='function')
async def pool():
return await db.get_pool(database=db_name)
@pytest.fixture(scope='function')
def database(event_loop: asyncio.AbstractEventLoop, pool: Pool):
engine = sa.create_engine(get_connection_str(db_name))
metadata.drop_all(engine)
metadata.create_all(engine)
yield engine
metadata.drop_all(engine)
@pytest.fixture(scope='function')
async def database_teams(event_loop: asyncio.AbstractEventLoop, database, pool: Pool):
await fetch.get_teams(pool=pool)
yield database
class TestFetch:
@pytest.mark.asyncio
async def test_get_games(self, database_teams, pool):
date = datetime(2016, 2, 23)
games = await fetch.get_games(from_date=date, to_date=date, pool=pool)
assert games is not None
assert len(games) == 9
@pytest.mark.asyncio
async def test_get_game(self, database_teams, pool):
live = await fetch.get_game(2016021207, pool=pool)
assert live['id'] == 2016021207
assert live['version'] == 20170920092415
assert live['season'] == 20162017
assert live['type'] == 'regular'
assert live['home'] == 9
assert live['away'] == 3
assert live['first_star'] == 8473544
assert live['second_star'] == 8476419
assert live['third_star'] == 8474884
@pytest.mark.asyncio
async def test_get_playoff_game(self, database_teams, pool):
live = await fetch.get_game(2016030313, pool=pool)
assert live['id'] == 2016030313
assert live['season'] == 20162017
assert live['type'] == 'playoff'
assert live['home'] == 9
assert live['away'] == 5
assert live['first_star'] == 8467950
assert live['second_star'] == 8471676
assert live['third_star'] == 8470602
@pytest.mark.asyncio
async def test_get_teams(self, database, pool):
teams = await fetch.get_teams(pool=pool)
assert teams is not None
assert len(teams) == 82
|
{
"content_hash": "33b0070e9a6577ae76863baf411525a0",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 86,
"avg_line_length": 31.944444444444443,
"alnum_prop": 0.6569565217391304,
"repo_name": "aaront/puckdb",
"id": "31f403ed8c1faf45ce10764a4965674ac664c2fc",
"size": "2300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fetch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "145"
},
{
"name": "Jupyter Notebook",
"bytes": "3531"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "34750"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
# Standard library imports
import json
import unittest
# Local imports
from binstar_client.errors import Conflict
from binstar_client.scripts.cli import main
from binstar_client.tests.urlmock import urlpatch
from binstar_client.tests.fixture import CLITestCase
class Test(CLITestCase):
@urlpatch
def test_copy_label(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(
method='POST', path='/copy/package/u1/p1/1.0/', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_replace(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(
method='PUT', path='/copy/package/u1/p1/1.0/', content='[{"basename": "copied-file_1.0.tgz"}]')
main(
['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0', '--replace'],
False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_update(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(
method='PATCH', path='/copy/package/u1/p1/1.0/', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0', '--update'],
False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_file_conflict(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(
method='POST', path='/copy/package/u1/p1/1.0/', status=409
)
with self.assertRaises(Conflict):
main(['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_argument_error(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
with self.assertRaises(SystemExit):
main([
'--show-traceback', 'copy', '--from-label', 'dev',
'--to-label', 'release/xyz', 'u1/p1/1.0', '--update', '--replace'], False)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9cbd8b2076a0114e8750aa81d014f372",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 119,
"avg_line_length": 38.675,
"alnum_prop": 0.5872656755009696,
"repo_name": "Anaconda-Platform/anaconda-client",
"id": "3ede632933e7a574728729e1ee41bfe7aaa78a51",
"size": "3094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "binstar_client/tests/test_copy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "101"
},
{
"name": "Jupyter Notebook",
"bytes": "2976"
},
{
"name": "Python",
"bytes": "318160"
},
{
"name": "Ruby",
"bytes": "8"
},
{
"name": "Shell",
"bytes": "10280"
}
],
"symlink_target": ""
}
|
import re
import os
import csv
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
from urlparse import urlparse
import time
HERE = os.path.abspath(os.path.dirname(__file__))
class ScrewfixComSpider(BaseSpider):
name = 'screwfix.com-pro'
allowed_domains = ['screwfix.com', 'www.screwfix.com']
start_urls = ['http://www.screwfix.com/']
def parse(self, response):
with open(os.path.join(HERE, 'screwfix_com.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
if not len(row['url'].strip()):
continue
url = re.sub(r'#.+$', '', row['url'])
log.msg('URL: %s' % url)
request = Request(url, callback=self.parse_product)
request.meta['sku'] = row['sku']
yield request
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_xpath('name', '//h1[@id="product_description"]/text()')
product_loader.add_value('price', hxs.select('//p[@id="product_price"]/span/text()').re('(\d+(?:\.\d+))')[0])
product_loader.add_value('sku', response.meta['sku'])
product_loader.add_value('url', response.url)
yield product_loader.load_item()
|
{
"content_hash": "aedd7c6bd5c982b11f0dff600fbaf18a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 117,
"avg_line_length": 39.30952380952381,
"alnum_prop": 0.6238643246517263,
"repo_name": "ddy88958620/lib",
"id": "687f9c7513aa0c333ab159f2df793ed6e17f0483",
"size": "1651",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/scrapy/bosch_uk_professional/screwfix_com.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from pyface.viewer.content_provider import *
|
{
"content_hash": "41532c61fb387d56fa4edccd5507b871",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 44,
"avg_line_length": 45,
"alnum_prop": 0.8222222222222222,
"repo_name": "enthought/etsproxy",
"id": "cd633287d96bfaf02d2af0844762b4067e6c9051",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/pyface/viewer/content_provider.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
'''
Copyright (c) 2008 Georgios Giannoudovardis, <vardis.g@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import logging
from direct.interval.IntervalGlobal import *
from pano.constants import PanoConstants
from pano.control.fsm import FSMState
from pano.control.ExploreState import ExploreState
from pano.view.VideoPlayer import VideoPlayer
class IntroState(FSMState):
'''
Controls the state of the game when displaying the introductory videos.
It displays the video sequences defined in the configuration and allows the user to interrupt
the introduction by pressing a key.
'''
def __init__(self, gameRef = None, node = None):
FSMState.__init__(self, gameRef, PanoConstants.STATE_INTRO)
self.log = logging.getLogger('pano.introState')
self.videoPlayer = None
# list of video files to play, we assume they have an audio as well
self.videos = []
# index to the active video
self.activeVid = -1
# delay between videos or images
self.delay = 0
# set to True when we are transitioning between videos or images
self.transitioning = False
def enter(self):
# get a comma separated list of video to play in sequence
vids = self.getGame().getConfig().get(PanoConstants.CVAR_INTRO_STATE_VIDEOS, '')
if len(vids) == 0:
self.videos = []
else:
self.videos = vids.split(',')
self.delay = self.getGame().getConfig().getFloat(PanoConstants.CVAR_INTRO_STATE_DELAY, 0.0)
self.log.debug("video intro delay: %f" % self.delay)
self.transitioning = False
self.game.getInput().pushMappings('intro')
def exit(self):
self.game.getInput().popMappings()
self.videos = None
self.activeVid = -1
self.transitioning = False
def update(self, millis):
if self.videos is None or len(self.videos) == 0:
self.log.debug('No videos defined, switching to explore state')
self.getGame().getState().scheduleStateChange(PanoConstants.STATE_EXPLORE)
return
# wait for transition to end
if self.transitioning:
return
if self.activeVid < 0:
self.activeVid = 0
self._playActiveIdVideo()
else:
if self.videoPlayer is None or self.videoPlayer.hasFinished():
# active video ended, check if there are more
self.activeVid += 1
if self.activeVid < len(self.videos):
if self.delay > 0.0:
self.transitioning = True
Sequence(Wait(self.delay), Func(self._playActiveIdVideo)).start()
else:
self._playActiveIdVideo()
else:
# played through all the list, we are done
self._stopPlayback()
self.log.debug('all videos done')
self.getGame().getState().scheduleStateChange(PanoConstants.STATE_EXPLORE)
def onInputAction(self, action):
if action == "interrupt":
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug('Intro interrupted')
self._stopPlayback()
self.activeVid = len(self.videos)
return True
else:
return False
def _stopPlayback(self):
if self.videoPlayer is not None:
self.videoPlayer.stop()
self.videoPlayer.dispose()
self.videoPlayer = None
def _playActiveIdVideo(self):
assert self.activeVid < len(self.videos), 'activeId is out of range'
# we are out of transition phase
self.transitioning = False
if self.videoPlayer is not None:
self.videoPlayer.stop()
self.videoPlayer.dispose()
videoFile = self.videos[self.activeVid]
self.videoPlayer = VideoPlayer('intro_player', self.getGame().getResources())
self.videoPlayer.playFullScreen(videoFile, videoFile)
anim = self.videoPlayer.getAnimInterface()
if anim is None:
self.log.error('Could not playback video ' + videoFile)
else:
anim.play()
|
{
"content_hash": "cb69452fc7ff49a7c28336bf8109aaeb",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 99,
"avg_line_length": 37.89473684210526,
"alnum_prop": 0.5868055555555556,
"repo_name": "vardis/pano",
"id": "e1c9ef79fc8832cf4a4836f09852ee8537875242",
"size": "5760",
"binary": false,
"copies": "1",
"ref": "refs/heads/Panorama",
"path": "src/pano/control/IntroState.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "566158"
}
],
"symlink_target": ""
}
|
import mock
from heat.engine import resource
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
from ..resources import role # noqa
keystone_role_template = {
'heat_template_version': '2013-05-23',
'resources': {
'test_role': {
'type': 'OS::Keystone::Role',
'properties': {
'name': 'test_role_1'
}
}
}
}
RESOURCE_TYPE = 'OS::Keystone::Role'
class KeystoneRoleTest(common.HeatTestCase):
def setUp(self):
super(KeystoneRoleTest, self).setUp()
self.ctx = utils.dummy_context()
# For unit testing purpose. Register resource provider explicitly.
resource._register_class(RESOURCE_TYPE, role.KeystoneRole)
self.stack = stack.Stack(
self.ctx, 'test_stack_keystone',
template.Template(keystone_role_template)
)
self.test_role = self.stack['test_role']
self.keystoneclient = mock.MagicMock()
self.test_role.keystone = mock.MagicMock()
self.test_role.keystone.return_value = self.keystoneclient
self.roles = self.keystoneclient.client.roles
def _get_mock_role(self):
value = mock.MagicMock()
role_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
value.id = role_id
return value
def test_resource_mapping(self):
mapping = role.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(role.KeystoneRole, mapping[RESOURCE_TYPE])
self.assertIsInstance(self.test_role, role.KeystoneRole)
def test_role_handle_create(self):
mock_role = self._get_mock_role()
self.roles.create.return_value = mock_role
# validate the properties
self.assertEqual('test_role_1',
self.test_role.properties.get(role.KeystoneRole.NAME))
self.test_role.handle_create()
# validate role creation with given name
self.roles.create.assert_called_once_with(name='test_role_1')
# validate physical resource id
self.assertEqual(mock_role.id, self.test_role.resource_id)
def test_role_handle_create_default_name(self):
# reset the NAME value to None, to make sure role is
# created with physical_resource_name
self.test_role.properties = mock.MagicMock()
self.test_role.properties.get.return_value = None
self.test_role.handle_create()
# validate role creation with default name
physical_resource_name = self.test_role.physical_resource_name()
self.roles.create.assert_called_once_with(name=physical_resource_name)
def test_role_handle_update(self):
self.test_role.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
# update the name property
prop_diff = {role.KeystoneRole.NAME: 'test_role_1_updated'}
self.test_role.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
self.roles.update.assert_called_once_with(
role=self.test_role.resource_id,
name=prop_diff[role.KeystoneRole.NAME]
)
def test_role_handle_delete(self):
self.test_role.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.roles.delete.return_value = None
self.assertIsNone(self.test_role.handle_delete())
self.roles.delete.assert_called_once_with(
self.test_role.resource_id
)
def test_role_handle_delete_resource_id_is_none(self):
self.resource_id = None
self.assertIsNone(self.test_role.handle_delete())
assert not self.roles.delete.called
def test_role_handle_delete_not_found(self):
exc = self.keystoneclient.NotFound
self.roles.delete.side_effect = exc
self.assertIsNone(self.test_role.handle_delete())
|
{
"content_hash": "d7a12528b5a4c269347936c298346f55",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 32.49180327868852,
"alnum_prop": 0.636226034308779,
"repo_name": "pshchelo/heat",
"id": "a2e7b557b8e71679cd846351eb07c2c51fdc8eb9",
"size": "4539",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/heat_keystone/heat_keystone/tests/test_role.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5949441"
},
{
"name": "Shell",
"bytes": "25070"
}
],
"symlink_target": ""
}
|
from django import template
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.utils.http import int_to_base36
from inviter.models import OptOut
from inviter.views import import_attribute, TOKEN_GENERATOR
import shortuuid
FROM_EMAIL = getattr(settings, 'INVITER_FROM_EMAIL', settings.DEFAULT_FROM_EMAIL)
token_generator = import_attribute(TOKEN_GENERATOR)
def send_invite(invitee, inviter, url=None, opt_out_url=None, **kwargs):
"""
Send the default invitation email assembled from
``inviter/email/subject.txt`` and ``inviter/email/body.txt``
Both templates will receive all the ``kwargs``.
:param invitee: The invited user
:param inviter: The inviting user
:param url: The invite URL
:param subject_template: The template to render for the subject
:param body_template: The template to render for the body
:param opt_out_url: A URL where users can permanently opt out of invitations
"""
ctx = {'invitee': invitee, 'inviter': inviter}
ctx.update(kwargs)
ctx.update(site=Site.objects.get_current(), url=url)
ctx = template.Context(ctx)
subject_template = kwargs.pop('subject_template', 'inviter/email/subject.txt')
body_template = kwargs.pop('body_template', 'inviter/email/body.txt')
subject = template.loader.get_template(subject_template)
body = template.loader.get_template(body_template)
subject = subject.render(ctx)
body = body.render(ctx)
subject = ' '.join(subject.split('\n')) # No newlines in subject lines allowed
send_mail(subject, body, FROM_EMAIL, [invitee.email])
def invite(email, inviter, sendfn=send_invite, resend=True, **kwargs):
"""
Invite a given email address and return a ``(User, sent)`` tuple similar
to the Django :meth:`django.db.models.Manager.get_or_create` method.
If a user with ``email`` address does not exist:
* Creates a :class:`django.contrib.auth.models.User` object
* Set ``user.email = email``
* Set ``user.is_active = False``
* Set a random password
* Send the invitation email
* Return ``(user, True)``
If a user with ``email`` address exists and ``user.is_active == False``:
* Re-send the invitation
* Return ``(user, True)``
If a user with ``email`` address exists:
* Don't send the invitation
* Return ``(user, False)``
If the email address is blocked:
* Don't send the invitation
* Return ``(None, False)``
To customize sending, pass in a new ``sendfn`` function as documented by
:attr:`inviter.utils.send_invite`:
::
sendfn = lambda invitee, inviter, **kwargs: 1
invite("foo@bar.com", request.user, sendfn = sendfn)
:param email: The email address
:param inviter: The user inviting the email address
:param sendfn: An email sending function. Defaults to :attr:`inviter.utils.send_invite`
:param resend: Resend email to users that are not registered yet
"""
if OptOut.objects.is_blocked(email):
return None, False
try:
user = User.objects.get(email=email)
if user.is_active:
return user, False
if not resend:
return user, False
except User.DoesNotExist:
user = User.objects.create(
username=shortuuid.uuid(),
email=email,
is_active=False
)
user.set_unusable_password()
user.save()
url_parts = int_to_base36(user.id), token_generator.make_token(user)
url = reverse('inviter:register', args=url_parts)
opt_out_url = reverse('inviter:opt-out', args=url_parts)
kwargs.update(opt_out_url=opt_out_url)
sendfn(user, inviter, url=url, **kwargs)
return user, True
|
{
"content_hash": "4a8c25caa46b2182811e9f8bf453a53d",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 91,
"avg_line_length": 33.80508474576271,
"alnum_prop": 0.6565555277011782,
"repo_name": "caffeinehit/django-inviter",
"id": "1e332f380a23e68ccd998fc599bdd44ec48aed28",
"size": "3989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inviter/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32229"
}
],
"symlink_target": ""
}
|
"""Definitions for the `Line` class."""
import numexpr as ne
import numpy as np
from astropy import constants as c
from astropy import units as u
from mosfit.modules.seds.sed import SED
from mosfit.constants import SQRT_2_PI
# Important: Only define one ``Module`` class per file.
class Line(SED):
"""Line spectral energy distribution, modifies existing SED."""
C_CONST = c.c.cgs.value
def process(self, **kwargs):
"""Process module."""
kwargs = self.prepare_input(self.key('luminosities'), **kwargs)
prt = self._printer
self._rest_t_explosion = kwargs[self.key('resttexplosion')]
self._times = kwargs[self.key('rest_times')]
self._seds = kwargs.get(self.key('seds'))
self._bands = kwargs['all_bands']
self._band_indices = kwargs['all_band_indices']
self._sample_wavelengths = kwargs['sample_wavelengths']
self._frequencies = kwargs['all_frequencies']
self._luminosities = kwargs[self.key('luminosities')]
self._line_wavelength = kwargs[self.key('line_wavelength')]
self._line_width = kwargs[self.key('line_width')]
self._line_time = kwargs[self.key('line_time')]
self._line_duration = kwargs[self.key('line_duration')]
self._line_amplitude = kwargs[self.key('line_amplitude')]
lw = self._line_wavelength # noqa: F841
ls = self._line_width
cc = self.C_CONST
# Some temp vars for speed.
zp1 = 1.0 + kwargs[self.key('redshift')]
czp1A = cc / (zp1 * u.Angstrom.cgs.scale)
amps = self._line_amplitude * np.array([
np.exp(-0.5 * (
(x - self._rest_t_explosion - self._line_time) /
self._line_duration) ** 2) for x in self._times])
if self._seds is None:
raise ValueError(prt.message('line_sed'))
seds = [x * (1.0 - amps[xi]) for xi, x in enumerate(self._seds)]
amps *= self._luminosities / (ls * SQRT_2_PI)
amps_dict = {}
evaled = False
for li, lum in enumerate(self._luminosities):
bi = self._band_indices[li]
if lum == 0.0:
continue
bind = czp1A / self._frequencies[li] if bi < 0 else bi
if bind not in amps_dict:
# Leave `rest_wavs` in Angstroms.
if bi >= 0:
rest_wavs = self._sample_wavelengths[bi] / zp1
else:
rest_wavs = np.array([bind]) # noqa: F841
if not evaled:
amps_dict[bind] = ne.evaluate(
'exp(-0.5 * ((rest_wavs - lw) / ls) ** 2)')
evaled = True
else:
amps_dict[bind] = ne.re_evaluate()
seds[li] += amps[li] * amps_dict[bind]
# seds[li][np.isnan(seds[li])] = 0.0
# Units of `seds` is ergs / s / Angstrom.
return {'sample_wavelengths': self._sample_wavelengths,
self.key('seds'): seds}
|
{
"content_hash": "9cc19b804a195a6fbd4ce1014c1964d7",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 72,
"avg_line_length": 36.59036144578313,
"alnum_prop": 0.548238393151136,
"repo_name": "villrv/MOSFiT",
"id": "8642868fc9f095c6c8607f843e3be3c3edeacb83",
"size": "3037",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mosfit/modules/seds/line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "15866"
},
{
"name": "Python",
"bytes": "529748"
},
{
"name": "Shell",
"bytes": "2454"
}
],
"symlink_target": ""
}
|
import socket
import pytest
import concurrent_server
from multiprocessing import Process
from server import ADDR
from time import sleep
###################################################
# Constants
###################################################
CRLF = b'\r\n'
DUMMY_DATE = b"Sun, 21 Jul 2001 23:32:15 GTM"
STATUS200 = b"".join([b"HTTP/1.1 200 OK\r\n",
b"DATE: Sun, 21 Jul 2001 23:32:15 GTM\r\n",
b"SERVER: Python/2.7.6\r\n",
b"\r\n"])
STATUS500 = b"".join([b"HTTP 500 Internal Server Error\r\n",
b"DATE: Sun, 21 Jul 2001 23:32:15 GTM\r\n",
b"SERVER: Python/2.7.6\r\n",
b"\r\n"])
Response_SKEL = CRLF.join([b"{Response} {requri} {protocol}",
b"Host: {host}", "Date: {date}", CRLF]).lstrip(CRLF)
REQ_GOOD = Response_SKEL.format(Response=b'get',
requri=b'http://www.host.com/images',
protocol=b"HTTP/1.1",
host=b"www.host.com",
date=DUMMY_DATE)
REQ_BAD_METHOD = Response_SKEL.format(Response=b'post',
requri=b'http://www.host.com/images',
protocol=b"HTTP/1.1",
host=b"www.host.com",
date=DUMMY_DATE)
REQ_BAD_PROTOCOL = Response_SKEL.format(Response=b'get',
requri=b'http://www.host.com/images',
protocol=b"HTTP/1.0",
host=b"www.host.com",
date=DUMMY_DATE)
REQ_BAD_HOST = CRLF.join([b"{Response} {requri} {protocol}",
b"Date: {date}"]).lstrip(CRLF).format(
Response=b'Get',
requri=b'http://www.host.com/images',
protocol=b"HTTP/1.1", date=DUMMY_DATE)
###################################################
# Fixtures
###################################################
@pytest.yield_fixture()
def server_process(request):
process = Process(target=concurrent_server.start_server)
process.daemon = True
process.start()
sleep(0.1)
def cleanup():
process.terminate()
request.addfinalizer(cleanup)
yield process
@pytest.fixture()
def client_socket():
client_socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP
)
client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return client_socket
###################################################
# Helper Functions
###################################################
def parse_response(response):
"""Take an HTTP response and determine whether it is valid; will raise
an appropriate error if not
will validate the following:
* Response is HTTP/1.1
* Response include valid date
* Response includes valid status code
if these validations are met, then return status code from response
as well as the body as (status_code, body)"""
response = response.strip(CRLF).strip()
lines = response.split(CRLF)
initial_line = lines[0]
protocol = initial_line.split()[0].strip().upper()
response_code = initial_line.split()[1].strip()
headers = [line for line in lines if b':' in line]
headers = [line.split(b':')[0].strip().upper() for line in lines]
headers = set(headers)
try:
int(response_code)
except ValueError:
to_return = False, None
else:
if b'DATE' not in headers:
to_return = False, None
elif b'HTTP/1.1' not in protocol:
to_return = False, None
else:
# HTTP response passes all prior checks, pass response code back
to_return = response_code, lines[-1]
return to_return
###################################################
# Functional Tests
###################################################
def test_functional_test_of_bad_request(server_process, client_socket):
client_socket.connect(ADDR)
client_socket.sendall(b"Hello there.")
while True:
response = client_socket.recv(1024)
if len(response) < 1024:
break
# import pdb; pdb.set_trace()
assert parse_response(response)[0] == "405"
def test_functional_test_of_good_request(server_process, client_socket):
client_socket.connect(ADDR)
client_socket.sendall(REQ_GOOD)
while True:
response = client_socket.recv(1024)
if len(response) < 1024:
break
assert parse_response(response)[0] == "200"
def test_functional_request_of_dir(server_process, client_socket):
request = Response_SKEL.format(Response=b'get',
requri=b'http://www.host.com/',
protocol=b"HTTP/1.1", host=b"www.host.com",
date=DUMMY_DATE)
client_socket.connect(ADDR)
client_socket.sendall(request)
while True:
response = client_socket.recv(1024)
if len(response) < 1024:
break
assert b'text/html' in response
assert parse_response(response)[0] == b'200'
assert b'a_web_page.html' in response
assert b'JPEG_example.jpg' not in response
def test_functional_request_of_dir(server_process, client_socket):
request = Response_SKEL.format(Response=b'get',
requri=b'http://www.host.com/',
protocol=b"HTTP/1.1", host=b"www.host.com",
date=DUMMY_DATE)
client_socket.connect(ADDR)
client_socket.sendall(request)
while True:
response = client_socket.recv(1024)
if len(response) < 1024:
break
assert b'text/html' in response
assert parse_response(response)[0] == b'200'
assert b'a_web_page.html' in response
assert b'JPEG_example.jpg' not in response
def test_functional_request_of_text_file(server_process, client_socket):
request = Response_SKEL.format(Response=b'get',
requri=b'http://www.host.com/sample.txt',
protocol=b"HTTP/1.1", host=b"www.host.com",
date=DUMMY_DATE)
client_socket.connect(ADDR)
client_socket.sendall(request)
while True:
response = client_socket.recv(1024)
if len(response) < 1024:
break
assert b'text/plain' in response
assert parse_response(response)[0] == b'200'
assert b'This is a very simple text file.' in response
def test_functional_request_of_image_file(server_process, client_socket):
request = Response_SKEL.format(Response=b'get',
requri=b'http://www.host.com/images/JPEG_example.jpg',
protocol=b"HTTP/1.1", host=b"www.host.com",
date=DUMMY_DATE)
client_socket.connect(ADDR)
client_socket.sendall(request)
response = ""
while True:
acc = client_socket.recv(1024)
response += acc
if len(acc) < 1024:
break
code, body = parse_response(response)
assert code == b'200'
assert b'image/jpeg' in response[:-2]
|
{
"content_hash": "cb7cfe2e99df633b61ac735b4a2904f0",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 78,
"avg_line_length": 33.47663551401869,
"alnum_prop": 0.5501116694584032,
"repo_name": "jay-tyler/http-server",
"id": "4e7336299a42afa7177054a34bde99f1a9574619",
"size": "7188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_concurrent_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "136"
},
{
"name": "Python",
"bytes": "23871"
}
],
"symlink_target": ""
}
|
from ...errors.httpbadrequestexception import HttpBadRequestException
import saklient
# module saklient.cloud.errors.paramresnotfoundexception
class ParamResNotFoundException(HttpBadRequestException):
## 不適切な要求です。パラメータで指定されたリソースが存在しません。IDをご確認ください。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status, code=None, message=""):
super(ParamResNotFoundException, self).__init__(status, code, "不適切な要求です。パラメータで指定されたリソースが存在しません。IDをご確認ください。" if message is None or message == "" else message)
|
{
"content_hash": "2119a50660e5c4e51d6d77616c23045e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 165,
"avg_line_length": 40.785714285714285,
"alnum_prop": 0.7338003502626971,
"repo_name": "hnakamur/saklient.python",
"id": "eb1edbe2a382b6334d5d843eeeaab55799771f11",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saklient/cloud/errors/paramresnotfoundexception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "539448"
},
{
"name": "Shell",
"bytes": "874"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import unittest
from unittest.mock import call, patch
import pytest
from hdfs import HdfsError
from airflow.models.connection import Connection
from airflow.providers.apache.hdfs.hooks.webhdfs import AirflowWebHDFSHookException, WebHDFSHook
class TestWebHDFSHook(unittest.TestCase):
def setUp(self):
self.webhdfs_hook = WebHDFSHook()
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.requests.Session", return_value="session")
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.InsecureClient")
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(host="host_1.com,host_2.com", port=321, login="user"),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_get_conn_without_schema(
self, socket_mock, mock_get_connection, mock_insecure_client, mock_session
):
mock_insecure_client.side_effect = [HdfsError("Error"), mock_insecure_client.return_value]
socket_mock.socket.return_value.connect_ex.return_value = 0
conn = self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
hosts = connection.host.split(",")
mock_insecure_client.assert_has_calls(
[
call(
f"http://{host}:{connection.port}",
user=connection.login,
session=mock_session.return_value,
)
for host in hosts
]
)
mock_insecure_client.return_value.status.assert_called_once_with("/")
assert conn == mock_insecure_client.return_value
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.requests.Session", return_value="session")
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.InsecureClient")
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(host="host_1.com,host_2.com", port=321, schema="schema", login="user"),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_get_conn_with_schema(self, socket_mock, mock_get_connection, mock_insecure_client, mock_session):
mock_insecure_client.side_effect = [HdfsError("Error"), mock_insecure_client.return_value]
socket_mock.socket.return_value.connect_ex.return_value = 0
conn = self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
hosts = connection.host.split(",")
mock_insecure_client.assert_has_calls(
[
call(
f"http://{host}:{connection.port}/{connection.schema}",
user=connection.login,
session=mock_session.return_value,
)
for host in hosts
]
)
mock_insecure_client.return_value.status.assert_called_once_with("/")
assert conn == mock_insecure_client.return_value
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.requests.Session", return_value="session")
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.InsecureClient")
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(host="host_1.com,host_2.com", login="user"),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_get_conn_without_port_schema(
self, socket_mock, mock_get_connection, mock_insecure_client, mock_session
):
mock_insecure_client.side_effect = [HdfsError("Error"), mock_insecure_client.return_value]
socket_mock.socket.return_value.connect_ex.return_value = 0
conn = self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
hosts = connection.host.split(",")
mock_insecure_client.assert_has_calls(
[
call(
f"http://{host}",
user=connection.login,
session=mock_session.return_value,
)
for host in hosts
]
)
mock_insecure_client.return_value.status.assert_called_once_with("/")
assert conn == mock_insecure_client.return_value
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.requests.Session", create=True)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.InsecureClient")
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(host="host_1.com,host_2.com", login="user", password="password"),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_get_conn_with_password_without_port_schema(
self, socket_mock, mock_get_connection, mock_insecure_client, mock_session
):
mock_insecure_client.side_effect = [HdfsError("Error"), mock_insecure_client.return_value]
socket_mock.socket.return_value.connect_ex.return_value = 0
conn = self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
hosts = connection.host.split(",")
mock_insecure_client.assert_has_calls(
[
call(
f"http://{host}",
user=connection.login,
session=mock_session.return_value,
)
for host in hosts
]
)
mock_insecure_client.return_value.status.assert_called_once_with("/")
assert conn == mock_insecure_client.return_value
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.InsecureClient", side_effect=HdfsError("Error"))
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(host="host_2", port=321, login="user"),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_get_conn_hdfs_error(self, socket_mock, mock_get_connection, mock_insecure_client):
socket_mock.socket.return_value.connect_ex.return_value = 0
with pytest.raises(AirflowWebHDFSHookException):
self.webhdfs_hook.get_conn()
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.requests.Session", return_value="session")
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.KerberosClient", create=True)
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(host="host_1", port=123),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs._kerberos_security_mode", return_value=True)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_get_conn_kerberos_security_mode(
self,
socket_mock,
mock_kerberos_security_mode,
mock_get_connection,
mock_kerberos_client,
mock_session,
):
socket_mock.socket.return_value.connect_ex.return_value = 0
conn = self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
mock_kerberos_client.assert_called_once_with(
f"http://{connection.host}:{connection.port}", session=mock_session.return_value
)
assert conn == mock_kerberos_client.return_value
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook._find_valid_server", return_value=None)
def test_get_conn_no_connection_found(self, mock_get_connection):
with pytest.raises(AirflowWebHDFSHookException):
self.webhdfs_hook.get_conn()
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_conn")
def test_check_for_path(self, mock_get_conn):
hdfs_path = "path"
exists_path = self.webhdfs_hook.check_for_path(hdfs_path)
mock_get_conn.assert_called_once_with()
mock_status = mock_get_conn.return_value.status
mock_status.assert_called_once_with(hdfs_path, strict=False)
assert exists_path == bool(mock_status.return_value)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_conn")
def test_load_file(self, mock_get_conn):
source = "source"
destination = "destination"
self.webhdfs_hook.load_file(source, destination)
mock_get_conn.assert_called_once_with()
mock_upload = mock_get_conn.return_value.upload
mock_upload.assert_called_once_with(
hdfs_path=destination, local_path=source, overwrite=True, n_threads=1
)
def test_simple_init(self):
hook = WebHDFSHook()
assert hook.proxy_user is None
def test_init_proxy_user(self):
hook = WebHDFSHook(proxy_user="someone")
assert "someone" == hook.proxy_user
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.KerberosClient", create=True)
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(
host="host_1", port=123, extra={"use_ssl": "True", "verify": "/ssl/cert/path"}
),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs._kerberos_security_mode", return_value=True)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_conn_kerberos_ssl(
self, socket_mock, mock_kerberos_security_mode, mock_get_connection, mock_kerberos_client
):
socket_mock.socket.return_value.connect_ex.return_value = 0
self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
assert f"https://{connection.host}:{connection.port}" == mock_kerberos_client.call_args[0][0]
assert "/ssl/cert/path" == mock_kerberos_client.call_args[1]["session"].verify
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.InsecureClient")
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(
host="host_1", port=123, schema="schema", extra={"use_ssl": "True", "verify": False}
),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_conn_insecure_ssl_with_port_schema(self, socket_mock, mock_get_connection, mock_insecure_client):
socket_mock.socket.return_value.connect_ex.return_value = 0
self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
assert (
f"https://{connection.host}:{connection.port}/{connection.schema}"
== mock_insecure_client.call_args[0][0]
)
assert not mock_insecure_client.call_args[1]["session"].verify
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.InsecureClient")
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(host="host_1", schema="schema", extra={"use_ssl": "True", "verify": False}),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_conn_insecure_ssl_without_port(self, socket_mock, mock_get_connection, mock_insecure_client):
socket_mock.socket.return_value.connect_ex.return_value = 0
self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
assert f"https://{connection.host}/{connection.schema}" == mock_insecure_client.call_args[0][0]
assert not mock_insecure_client.call_args[1]["session"].verify
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.InsecureClient")
@patch(
"airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook.get_connection",
return_value=Connection(host="host_1", port=123, extra={"use_ssl": "True", "verify": False}),
)
@patch("airflow.providers.apache.hdfs.hooks.webhdfs.socket")
def test_conn_insecure_ssl_without_schema(self, socket_mock, mock_get_connection, mock_insecure_client):
socket_mock.socket.return_value.connect_ex.return_value = 0
self.webhdfs_hook.get_conn()
connection = mock_get_connection.return_value
assert f"https://{connection.host}:{connection.port}" == mock_insecure_client.call_args[0][0]
assert not mock_insecure_client.call_args[1]["session"].verify
|
{
"content_hash": "0d288d50aa57d0d675deb0d68b180a58",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 110,
"avg_line_length": 46.22348484848485,
"alnum_prop": 0.6599196918790461,
"repo_name": "apache/airflow",
"id": "71e1231dac3d9cd9026db5f3ffd6406ef716febf",
"size": "12990",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/providers/apache/hdfs/hooks/test_webhdfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
}
|
import logging
from gensim.models.word2vec import Word2Vec, Text8Corpus
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def pprint(result):
for word, score in result:
print "%s\t%f" % (word, score)
def generate_model(filename, **kwag):
sentences = Text8Corpus(filename)
model = Word2Vec(sentences, **kwag)
model.save("w2v-%s-%s.model" % (filename, kwag.__repr__()))
return model
for window in [1, 3, 5, 7, 10]:
model = generate_model("prettified.txt",
window=window, size=150,
workers=4, min_count=10)
print window
print "-" * 10
for word in ["初音ミク", "炎上", "フォロー"]:
print "---"
pprint(model.most_similar([word]))
for size in [10, 50, 100, 150]: # 200はsegment faultが起きたのでパス
generate_model("prettified.txt",
window=5, size=size,
workers=4, min_count=10)
print size
print "-" * 10
for word in ["初音ミク", "炎上", "フォロー"]:
print "---"
pprint(model.most_similar([word]))
|
{
"content_hash": "eeeb8a862759a8050e1f2eaccca77aca",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 91,
"avg_line_length": 28.45945945945946,
"alnum_prop": 0.597340930674264,
"repo_name": "yamitzky/word2vec-japanese-twitter",
"id": "b59b16417de24162518c1445cd9e476432f0ef57",
"size": "1135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2219"
}
],
"symlink_target": ""
}
|
import inspect
from ...listener import EventListener
class Trampoline(object):
def __init__(self, name, events, lock):
self.name = name
self.events = events
self.lock = lock
def __call__(self, **kwargs):
with self.lock:
try:
self.events.put_nowait({'fn': self.name, 'kwargs': kwargs})
except Exception:
pass
class WuiListener(object):
def __init__(self, events, lock):
for fn, _ in inspect.getmembers(EventListener, predicate=inspect.isfunction):
setattr(self, fn, Trampoline(name=fn, events=events, lock=lock))
|
{
"content_hash": "e03bb0f8d904b54cacb28231b4c628bb",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 85,
"avg_line_length": 25.6,
"alnum_prop": 0.5890625,
"repo_name": "renatahodovan/fuzzinator",
"id": "2e32e18053ee7470f848655429b6f72892060f32",
"size": "922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fuzzinator/ui/wui/wui_listener.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "339"
},
{
"name": "C++",
"bytes": "3678"
},
{
"name": "CSS",
"bytes": "10728"
},
{
"name": "HTML",
"bytes": "44477"
},
{
"name": "JavaScript",
"bytes": "25491"
},
{
"name": "Makefile",
"bytes": "1755"
},
{
"name": "Python",
"bytes": "412762"
}
],
"symlink_target": ""
}
|
import json
import datetime
from libdlt.protocol.exceptions import AllocationError
from libdlt.protocol.ibp.allocation import IBPExtent
from libdlt.depot import Depot
import libdlt.protocol.ibp.services as services
import libdlt.protocol.ibp.flags as flags
from lace import logging
from lace.logging import trace
from unis.models import Extent
# construct adaptor from existing metadata
@trace.info("libdlt.IBP.factory")
def buildAllocation(json):
if type(json) is str:
try:
json = json.loads(json)
except Exception as exp:
logger.warn("{func:>20}| Could not decode allocation - {exp}".format(func = "buildAllocation", exp = exp))
raise AllocationError("Could not decode json")
if isinstance(json, IBPExtent):
alloc = json
elif isinstance(json, dict):
alloc = IBPExtent(json)
else:
raise AllocationError("Invalid input type")
alloc.depot = Depot(alloc.location)
tmpAdapter = IBPAdaptor(alloc)
return tmpAdapter
# create a new object and metadata given data and depot target
@trace.info("libdlt.IBP.factory")
def makeAllocation(data, offset, depot, **kwds):
try:
return IBPAdaptor(data=data, offset=offset, depot=depot, **kwds)
except:
raise AllocationError("Failed to generate allocation")
class IBPAdaptor(object):
@trace.debug("libdlt.IBPAdaptor")
def __init__(self, alloc=None, data=None, offset=None, depot=None, **kwds):
self.log = logging.getLogger("libdlt")
self._service = services.ProtocolService()
if data:
self._allocation = self._service.allocate(depot, offset, len(data), **kwds)
self.write(data,**kwds)
else:
self._allocation = alloc
@trace.info("libdlt.IBPAdaptor")
def getMetadata(self):
return self._allocation
@trace.info("libdlt.IBPAdaptor")
def read(self, **kwds):
return self._service.load(self._allocation, **kwds)
@trace.info("libdlt.IBPAdaptor")
def write(self, data, **kwds):
try:
self._service.store(self._allocation, data, len(data), **kwds)
except:
import traceback
traceback.print_exc()
raise
@trace.info("libdlt.IBPAdaptor")
def check(self, **kwds):
depot_status = self._service.getStatus(self._allocation.depot)
if not depot_status:
raise AllocationError("could not contact Depot")
alloc_status = self._service.probe(self._allocation)
self._log.debug("IBPAdapter.Check: {status}".format(status = alloc_status))
if not alloc_status:
raise AllocationError("Could not retrieve status")
if "duration" in alloc_status:
self._allocation.end = datetime.datetime.utcnow() + datetime.timedelta(seconds = int(alloc_status["duration"]))
return True
@trace.info("libdlt.IBPAdaptor")
def copy(self, destination, src_kwargs, dst_kwargs, **kwds):
host = self._allocation.depot.host
port = self._allocation.depot.port
offset = kwds.get("offset", 0)
size = kwds.get("size", self._allocation.depotSize - offset)
dest_alloc = buildAllocation(self._allocation.to_JSON())
response = self._service.allocate(destination, size, **kwds)
if not response:
return False
dest_alloc._allocation.setReadCapability(str(response.getReadCapability()))
dest_alloc._allocation.setWriteCapability(str(response.getWriteCapability()))
dest_alloc._allocation.setManageCapability(str(response.getManageCapability()))
dest_alloc._allocation.depot = response.depot
dest_alloc._allocation.location = response.location
dest_alloc.offset = offset
del dest_alloc._allocation.function
duration = self._service.send(self._allocation, dest_alloc, **kwds)
if not duration:
return False
dest_alloc._allocation.start = datetime.datetime.utcnow()
dest_alloc._allocation.end = datetime.datetime.utcnow() + datetime.timedelta(seconds = duration)
return dest_alloc
@trace.info("libdlt.IBPAdaptor")
def move(self, destination, **kwds):
return self.copy(destination, **kwds)
@trace.info("libdlt.IBPAdaptor")
def release(self):
details = self._service.probe(self._allocation)
self._allocation.end = datetime.datetime.utcnow()
if details:
for i in range(1, int(details["read_count"]) + 1):
result = self._service.manage(self._allocation, mode = flags.IBP_DECR, cap_type = flags.IBP_READCAP)
if not result:
return False
return True
else:
return False
@trace.info("libdlt.IBPAdaptor")
def manage(self, **kwds):
if not self._service.manage(self._allocation, **kwds):
return False
#####################
# FOR DEBUGGING ONLY#
status = self._service.probe(self._allocation)
self.log.debug("Manage result: {status}".format(status = status))
#####################
if "duration" in kwds:
self._allocation.end = datetime.datetime.utcnow() + datetime.timedelta(seconds = kwds["duration"])
def __eq__(self, other):
if type(other) is IBPAdaptor:
return str(self._allocation.getReadCapability()) == str(other._allocation.getReadCapability())
else:
return NotImplemented
def __ne__(self, other):
if type(other) is IBPAdaptor:
return str(self._allocation.getReadCapability()) != str(self._allocation.getReadCapability())
else:
return NotImplemented
def __cmp__(self, other):
if type(other) is IBPAdaptor:
if self._allocation.timestamp < other._allocation.timestamp:
return -1
elif self._allocation.timestamp == other._allocation.timestamp:
return 0
else:
return 1
elif type(other) is datetime.datetime:
if self._allocation.end < other:
return -1
elif self._allocation.end == other:
return 0
else:
return 1
else:
raise TypeError("Cannot compare {t1} and {t2}".format(t1 = type(self), t2 = type(other)))
|
{
"content_hash": "2ddc89e4fda4baac5426ffc1b24ed9ec",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 123,
"avg_line_length": 36.05464480874317,
"alnum_prop": 0.6048802667474993,
"repo_name": "datalogistics/libdlt",
"id": "7e38891ca70388aa1b903fd881ecdb0a10a39c3b",
"size": "6599",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "libdlt/protocol/ibp/factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "151148"
},
{
"name": "Ruby",
"bytes": "4876"
},
{
"name": "Shell",
"bytes": "11385"
}
],
"symlink_target": ""
}
|
from lxml import etree
from pykml.factory import KML_ElementMaker as KML
import shapefile
import sys
def create_reader(args):
shp_fname = args[1]
dbf_fname = args[2]
shp_file = open(shp_fname, 'rb')
dbf_file = open(dbf_fname, 'rb')
reader = shapefile.Reader(shp=shp_file, dbf=dbf_file)
return reader
def create_kml_root():
doc = KML.kml()
return etree.SubElement(doc, 'Document')
def main(args):
reader = create_reader(args)
doc = create_kml_root()
for shapeRec in reader.shapeRecords():
name = shapeRec.record[1]
points = shapeRec.shape.points
doc.append(KML.Placemark(
KML.name(name),
KML.LineString(
KML.coordinates(
' '.join([str(item).strip('[]').replace(' ', '') for item in points])))))
print(etree.tostring(doc))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "de8a30d4d1a39af56212fc2d9d243d4b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 93,
"avg_line_length": 24.5,
"alnum_prop": 0.5704081632653061,
"repo_name": "cthrall/shp2kml",
"id": "3076925d12dd4203f533c7b8d0f656d40678f85f",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shp2kml.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "980"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from . import __version__ as library_version
from .user import User
from .member import Member
from .channel import Channel, PrivateChannel
from .server import Server
from .message import Message
from .invite import Invite
from .object import Object
from .reaction import Reaction
from .role import Role
from .errors import *
from .state import ConnectionState
from .permissions import Permissions, PermissionOverwrite
from . import utils, compat
from .enums import ChannelType, ServerRegion, VerificationLevel, Status
from .voice_client import VoiceClient
from .iterators import LogsFromIterator
from .gateway import *
from .emoji import Emoji
from .http import HTTPClient
import asyncio
import aiohttp
import websockets
import logging, traceback
import sys, re, io, enum
import tempfile, os, hashlib
import itertools
import datetime
from collections import namedtuple
from os.path import split as path_split
PY35 = sys.version_info >= (3, 5)
log = logging.getLogger(__name__)
AppInfo = namedtuple('AppInfo', 'id name description icon owner')
WaitedReaction = namedtuple('WaitedReaction', 'reaction user')
def app_info_icon_url(self):
"""Retrieves the application's icon_url if it exists. Empty string otherwise."""
if not self.icon:
return ''
return 'https://cdn.discordapp.com/app-icons/{0.id}/{0.icon}.jpg'.format(self)
AppInfo.icon_url = property(app_info_icon_url)
class WaitForType(enum.Enum):
message = 0
reaction = 1
ChannelPermissions = namedtuple('ChannelPermissions', 'target overwrite')
ChannelPermissions.__new__.__defaults__ = (PermissionOverwrite(),)
class Client:
"""Represents a client connection that connects to Discord.
This class is used to interact with the Discord WebSocket and API.
A number of options can be passed to the :class:`Client`.
.. _deque: https://docs.python.org/3.4/library/collections.html#collections.deque
.. _event loop: https://docs.python.org/3/library/asyncio-eventloops.html
.. _connector: http://aiohttp.readthedocs.org/en/stable/client_reference.html#connectors
.. _ProxyConnector: http://aiohttp.readthedocs.org/en/stable/client_reference.html#proxyconnector
Parameters
----------
max_messages : Optional[int]
The maximum number of messages to store in :attr:`messages`.
This defaults to 5000. Passing in `None` or a value less than 100
will use the default instead of the passed in value.
loop : Optional[event loop].
The `event loop`_ to use for asynchronous operations. Defaults to ``None``,
in which case the default event loop is used via ``asyncio.get_event_loop()``.
cache_auth : Optional[bool]
Indicates if :meth:`login` should cache the authentication tokens. Defaults
to ``True``. The method in which the cache is written is done by writing to
disk to a temporary directory.
connector : aiohttp.BaseConnector
The `connector`_ to use for connection pooling. Useful for proxies, e.g.
with a `ProxyConnector`_.
shard_id : Optional[int]
Integer starting at 0 and less than shard_count.
shard_count : Optional[int]
The total number of shards.
Attributes
-----------
user : Optional[:class:`User`]
Represents the connected client. None if not logged in.
voice_clients : iterable of :class:`VoiceClient`
Represents a list of voice connections. To connect to voice use
:meth:`join_voice_channel`. To query the voice connection state use
:meth:`is_voice_connected`.
servers : iterable of :class:`Server`
The servers that the connected client is a member of.
private_channels : iterable of :class:`PrivateChannel`
The private channels that the connected client is participating on.
messages
A deque_ of :class:`Message` that the client has received from all
servers and private messages. The number of messages stored in this
deque is controlled by the ``max_messages`` parameter.
email
The email used to login. This is only set if login is successful,
otherwise it's None.
ws
The websocket gateway the client is currently connected to. Could be None.
loop
The `event loop`_ that the client uses for HTTP requests and websocket operations.
"""
def __init__(self, *, loop=None, **options):
self.ws = None
self.email = None
self.loop = asyncio.get_event_loop() if loop is None else loop
self._listeners = []
self.cache_auth = options.get('cache_auth', True)
self.shard_id = options.get('shard_id')
self.shard_count = options.get('shard_count')
max_messages = options.get('max_messages')
if max_messages is None or max_messages < 100:
max_messages = 5000
self.connection = ConnectionState(self.dispatch, self.request_offline_members,
self._syncer, max_messages, loop=self.loop)
connector = options.pop('connector', None)
self.http = HTTPClient(connector, loop=self.loop)
self._closed = asyncio.Event(loop=self.loop)
self._is_logged_in = asyncio.Event(loop=self.loop)
self._is_ready = asyncio.Event(loop=self.loop)
if VoiceClient.warn_nacl:
VoiceClient.warn_nacl = False
log.warning("PyNaCl is not installed, voice will NOT be supported")
# internals
@asyncio.coroutine
def _syncer(self, guilds):
yield from self.ws.request_sync(guilds)
def _get_cache_filename(self, email):
filename = hashlib.md5(email.encode('utf-8')).hexdigest()
return os.path.join(tempfile.gettempdir(), 'discord_py', filename)
def _get_cache_token(self, email, password):
try:
log.info('attempting to login via cache')
cache_file = self._get_cache_filename(email)
self.email = email
with open(cache_file, 'r') as f:
log.info('login cache file found')
return f.read()
# at this point our check failed
# so we have to login and get the proper token and then
# redo the cache
except OSError:
log.info('a problem occurred while opening login cache')
return None # file not found et al
def _update_cache(self, email, password):
try:
cache_file = self._get_cache_filename(email)
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
with os.fdopen(os.open(cache_file, os.O_WRONLY | os.O_CREAT, 0o0600), 'w') as f:
log.info('updating login cache')
f.write(self.http.token)
except OSError:
log.info('a problem occurred while updating the login cache')
pass
def handle_reaction_add(self, reaction, user):
removed = []
for i, (condition, future, event_type) in enumerate(self._listeners):
if event_type is not WaitForType.reaction:
continue
if future.cancelled():
removed.append(i)
continue
try:
result = condition(reaction, user)
except Exception as e:
future.set_exception(e)
removed.append(i)
else:
if result:
future.set_result(WaitedReaction(reaction, user))
removed.append(i)
for idx in reversed(removed):
del self._listeners[idx]
def handle_message(self, message):
removed = []
for i, (condition, future, event_type) in enumerate(self._listeners):
if event_type is not WaitForType.message:
continue
if future.cancelled():
removed.append(i)
continue
try:
result = condition(message)
except Exception as e:
future.set_exception(e)
removed.append(i)
else:
if result:
future.set_result(message)
removed.append(i)
for idx in reversed(removed):
del self._listeners[idx]
def handle_ready(self):
self._is_ready.set()
def _resolve_invite(self, invite):
if isinstance(invite, Invite) or isinstance(invite, Object):
return invite.id
else:
rx = r'(?:https?\:\/\/)?discord\.gg\/(.+)'
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
@asyncio.coroutine
def _resolve_destination(self, destination):
if isinstance(destination, Channel):
return destination.id, destination.server.id
elif isinstance(destination, PrivateChannel):
return destination.id, None
elif isinstance(destination, Server):
return destination.id, destination.id
elif isinstance(destination, User):
found = self.connection._get_private_channel_by_user(destination.id)
if found is None:
# Couldn't find the user, so start a PM with them first.
channel = yield from self.start_private_message(destination)
return channel.id, None
else:
return found.id, None
elif isinstance(destination, Object):
found = self.get_channel(destination.id)
if found is not None:
return (yield from self._resolve_destination(found))
# couldn't find it in cache so YOLO
return destination.id, destination.id
else:
fmt = 'Destination must be Channel, PrivateChannel, User, or Object. Received {0.__class__.__name__}'
raise InvalidArgument(fmt.format(destination))
def __getattr__(self, name):
if name in ('user', 'servers', 'private_channels', 'messages', 'voice_clients'):
return getattr(self.connection, name)
else:
msg = "'{}' object has no attribute '{}'"
raise AttributeError(msg.format(self.__class__, name))
def __setattr__(self, name, value):
if name in ('user', 'servers', 'private_channels', 'messages', 'voice_clients'):
return setattr(self.connection, name, value)
else:
object.__setattr__(self, name, value)
@asyncio.coroutine
def _run_event(self, event, *args, **kwargs):
try:
yield from getattr(self, event)(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
yield from self.on_error(event, *args, **kwargs)
except asyncio.CancelledError:
pass
def dispatch(self, event, *args, **kwargs):
log.debug('Dispatching event {}'.format(event))
method = 'on_' + event
handler = 'handle_' + event
if hasattr(self, handler):
getattr(self, handler)(*args, **kwargs)
if hasattr(self, method):
compat.create_task(self._run_event(method, *args, **kwargs), loop=self.loop)
@asyncio.coroutine
def on_error(self, event_method, *args, **kwargs):
"""|coro|
The default error handler provided by the client.
By default this prints to ``sys.stderr`` however it could be
overridden to have a different implementation.
Check :func:`discord.on_error` for more details.
"""
print('Ignoring exception in {}'.format(event_method), file=sys.stderr)
traceback.print_exc()
# login state management
@asyncio.coroutine
def _login_1(self, token, **kwargs):
log.info('logging in using static token')
is_bot = kwargs.pop('bot', True)
data = yield from self.http.static_login(token, bot=is_bot)
self.email = data.get('email', None)
self.connection.is_bot = is_bot
self._is_logged_in.set()
@asyncio.coroutine
def _login_2(self, email, password, **kwargs):
# attempt to read the token from cache
self.connection.is_bot = False
if self.cache_auth:
token = self._get_cache_token(email, password)
try:
yield from self.http.static_login(token, bot=False)
except:
log.info('cache auth token is out of date')
else:
self._is_logged_in.set()
return
yield from self.http.email_login(email, password)
self.email = email
self._is_logged_in.set()
# since we went through all this trouble
# let's make sure we don't have to do it again
if self.cache_auth:
self._update_cache(email, password)
@asyncio.coroutine
def login(self, *args, **kwargs):
"""|coro|
Logs in the client with the specified credentials.
This function can be used in two different ways.
.. code-block:: python
await client.login('token')
# or
await client.login('email', 'password')
More than 2 parameters or less than 1 parameter raises a
:exc:`TypeError`.
Parameters
-----------
bot : bool
Keyword argument that specifies if the account logging on is a bot
token or not. Only useful for logging in with a static token.
Ignored for the email and password combo. Defaults to ``True``.
Raises
------
LoginFailure
The wrong credentials are passed.
HTTPException
An unknown HTTP related error occurred,
usually when it isn't 200 or the known incorrect credentials
passing status code.
TypeError
The incorrect number of parameters is passed.
"""
n = len(args)
if n in (2, 1):
yield from getattr(self, '_login_' + str(n))(*args, **kwargs)
else:
raise TypeError('login() takes 1 or 2 positional arguments but {} were given'.format(n))
@asyncio.coroutine
def logout(self):
"""|coro|
Logs out of Discord and closes all connections.
"""
yield from self.close()
self._is_logged_in.clear()
@asyncio.coroutine
def connect(self):
"""|coro|
Creates a websocket connection and lets the websocket listen
to messages from discord.
Raises
-------
GatewayNotFound
If the gateway to connect to discord is not found. Usually if this
is thrown then there is a discord API outage.
ConnectionClosed
The websocket connection has been terminated.
"""
self.ws = yield from DiscordWebSocket.from_client(self)
while not self.is_closed:
try:
yield from self.ws.poll_event()
except (ReconnectWebSocket, ResumeWebSocket) as e:
resume = type(e) is ResumeWebSocket
log.info('Got ' + type(e).__name__)
self.ws = yield from DiscordWebSocket.from_client(self, resume=resume)
except ConnectionClosed as e:
yield from self.close()
if e.code != 1000:
raise
@asyncio.coroutine
def close(self):
"""|coro|
Closes the connection to discord.
"""
if self.is_closed:
return
for voice in list(self.voice_clients):
try:
yield from voice.disconnect()
except:
# if an error happens during disconnects, disregard it.
pass
self.connection._remove_voice_client(voice.server.id)
if self.ws is not None and self.ws.open:
yield from self.ws.close()
yield from self.http.close()
self._closed.set()
self._is_ready.clear()
@asyncio.coroutine
def start(self, *args, **kwargs):
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
"""
yield from self.login(*args, **kwargs)
yield from self.connect()
def run(self, *args, **kwargs):
"""A blocking call that abstracts away the `event loop`_
initialisation from you.
If you want more control over the event loop then this
function should not be used. Use :meth:`start` coroutine
or :meth:`connect` + :meth:`login`.
Roughly Equivalent to: ::
try:
loop.run_until_complete(start(*args, **kwargs))
except KeyboardInterrupt:
loop.run_until_complete(logout())
# cancel all tasks lingering
finally:
loop.close()
Warning
--------
This function must be the last function to call due to the fact that it
is blocking. That means that registration of events or anything being
called after this function call will not execute until it returns.
"""
try:
self.loop.run_until_complete(self.start(*args, **kwargs))
except KeyboardInterrupt:
self.loop.run_until_complete(self.logout())
pending = asyncio.Task.all_tasks(loop=self.loop)
gathered = asyncio.gather(*pending, loop=self.loop)
try:
gathered.cancel()
self.loop.run_until_complete(gathered)
# we want to retrieve any exceptions to make sure that
# they don't nag us about it being un-retrieved.
gathered.exception()
except:
pass
finally:
self.loop.close()
# properties
@property
def is_logged_in(self):
"""bool: Indicates if the client has logged in successfully."""
return self._is_logged_in.is_set()
@property
def is_closed(self):
"""bool: Indicates if the websocket connection is closed."""
return self._closed.is_set()
# helpers/getters
def get_channel(self, id):
"""Returns a :class:`Channel` or :class:`PrivateChannel` with the following ID. If not found, returns None."""
return self.connection.get_channel(id)
def get_server(self, id):
"""Returns a :class:`Server` with the given ID. If not found, returns None."""
return self.connection._get_server(id)
def get_all_emojis(self):
"""Returns a generator with every :class:`Emoji` the client can see."""
for server in self.servers:
for emoji in server.emojis:
yield emoji
def get_all_channels(self):
"""A generator that retrieves every :class:`Channel` the client can 'access'.
This is equivalent to: ::
for server in client.servers:
for channel in server.channels:
yield channel
Note
-----
Just because you receive a :class:`Channel` does not mean that
you can communicate in said channel. :meth:`Channel.permissions_for` should
be used for that.
"""
for server in self.servers:
for channel in server.channels:
yield channel
def get_all_members(self):
"""Returns a generator with every :class:`Member` the client can see.
This is equivalent to: ::
for server in client.servers:
for member in server.members:
yield member
"""
for server in self.servers:
for member in server.members:
yield member
# listeners/waiters
@asyncio.coroutine
def wait_until_ready(self):
"""|coro|
This coroutine waits until the client is all ready. This could be considered
another way of asking for :func:`discord.on_ready` except meant for your own
background tasks.
"""
yield from self._is_ready.wait()
@asyncio.coroutine
def wait_until_login(self):
"""|coro|
This coroutine waits until the client is logged on successfully. This
is different from waiting until the client's state is all ready. For
that check :func:`discord.on_ready` and :meth:`wait_until_ready`.
"""
yield from self._is_logged_in.wait()
@asyncio.coroutine
def wait_for_message(self, timeout=None, *, author=None, channel=None, content=None, check=None):
"""|coro|
Waits for a message reply from Discord. This could be seen as another
:func:`discord.on_message` event outside of the actual event. This could
also be used for follow-ups and easier user interactions.
The keyword arguments passed into this function are combined using the logical and
operator. The ``check`` keyword argument can be used to pass in more complicated
checks and must be a regular function (not a coroutine).
The ``timeout`` parameter is passed into `asyncio.wait_for`_. By default, it
does not timeout. Instead of throwing ``asyncio.TimeoutError`` the coroutine
catches the exception and returns ``None`` instead of a :class:`Message`.
If the ``check`` predicate throws an exception, then the exception is propagated.
This function returns the **first message that meets the requirements**.
.. _asyncio.wait_for: https://docs.python.org/3/library/asyncio-task.html#asyncio.wait_for
Examples
----------
Basic example:
.. code-block:: python
:emphasize-lines: 5
@client.event
async def on_message(message):
if message.content.startswith('$greet'):
await client.send_message(message.channel, 'Say hello')
msg = await client.wait_for_message(author=message.author, content='hello')
await client.send_message(message.channel, 'Hello.')
Asking for a follow-up question:
.. code-block:: python
:emphasize-lines: 6
@client.event
async def on_message(message):
if message.content.startswith('$start'):
await client.send_message(message.channel, 'Type $stop 4 times.')
for i in range(4):
msg = await client.wait_for_message(author=message.author, content='$stop')
fmt = '{} left to go...'
await client.send_message(message.channel, fmt.format(3 - i))
await client.send_message(message.channel, 'Good job!')
Advanced filters using ``check``:
.. code-block:: python
:emphasize-lines: 9
@client.event
async def on_message(message):
if message.content.startswith('$cool'):
await client.send_message(message.channel, 'Who is cool? Type $name namehere')
def check(msg):
return msg.content.startswith('$name')
message = await client.wait_for_message(author=message.author, check=check)
name = message.content[len('$name'):].strip()
await client.send_message(message.channel, '{} is cool indeed'.format(name))
Parameters
-----------
timeout : float
The number of seconds to wait before returning ``None``.
author : :class:`Member` or :class:`User`
The author the message must be from.
channel : :class:`Channel` or :class:`PrivateChannel` or :class:`Object`
The channel the message must be from.
content : str
The exact content the message must have.
check : function
A predicate for other complicated checks. The predicate must take
a :class:`Message` as its only parameter.
Returns
--------
:class:`Message`
The message that you requested for.
"""
def predicate(message):
result = True
if author is not None:
result = result and message.author == author
if content is not None:
result = result and message.content == content
if channel is not None:
result = result and message.channel.id == channel.id
if callable(check):
# the exception thrown by check is propagated through the future.
result = result and check(message)
return result
future = asyncio.Future(loop=self.loop)
self._listeners.append((predicate, future, WaitForType.message))
try:
message = yield from asyncio.wait_for(future, timeout, loop=self.loop)
except asyncio.TimeoutError:
message = None
return message
@asyncio.coroutine
def wait_for_reaction(self, emoji=None, *, user=None, timeout=None, message=None, check=None):
"""|coro|
Waits for a message reaction from Discord. This is similar to :meth:`wait_for_message`
and could be seen as another :func:`on_reaction_add` event outside of the actual event.
This could be used for follow up situations.
Similar to :meth:`wait_for_message`, the keyword arguments are combined using logical
AND operator. The ``check`` keyword argument can be used to pass in more complicated
checks and must a regular function taking in two arguments, ``(reaction, user)``. It
must not be a coroutine.
The ``timeout`` parameter is passed into asyncio.wait_for. By default, it
does not timeout. Instead of throwing ``asyncio.TimeoutError`` the coroutine
catches the exception and returns ``None`` instead of a the ``(reaction, user)``
tuple.
If the ``check`` predicate throws an exception, then the exception is propagated.
The ``emoji`` parameter can be either a :class:`Emoji`, a ``str`` representing
an emoji, or a sequence of either type. If the ``emoji`` parameter is a sequence
then the first reaction emoji that is in the list is returned. If ``None`` is
passed then the first reaction emoji used is returned.
This function returns the **first reaction that meets the requirements**.
Examples
---------
Basic Example:
.. code-block:: python
@client.event
async def on_message(message):
if message.content.startswith('$react'):
msg = await client.send_message(message.channel, 'React with thumbs up or thumbs down.')
res = await client.wait_for_reaction(['\N{THUMBS UP SIGN}', '\N{THUMBS DOWN SIGN}'], message=msg)
await client.send_message(message.channel, '{0.user} reacted with {0.reaction.emoji}!'.format(res))
Checking for reaction emoji regardless of skin tone:
.. code-block:: python
@client.event
async def on_message(message):
if message.content.startswith('$react'):
msg = await client.send_message(message.channel, 'React with thumbs up or thumbs down.')
def check(reaction, user):
e = str(reaction.emoji)
return e.startswith(('\N{THUMBS UP SIGN}', '\N{THUMBS DOWN SIGN}'))
res = await client.wait_for_reaction(message=msg, check=check)
await client.send_message(message.channel, '{0.user} reacted with {0.reaction.emoji}!'.format(res))
Parameters
-----------
timeout: float
The number of seconds to wait before returning ``None``.
user: :class:`Member` or :class:`User`
The user the reaction must be from.
emoji: str or :class:`Emoji` or sequence
The emoji that we are waiting to react with.
message: :class:`Message`
The message that we want the reaction to be from.
check: function
A predicate for other complicated checks. The predicate must take
``(reaction, user)`` as its two parameters, which ``reaction`` being a
:class:`Reaction` and ``user`` being either a :class:`User` or a
:class:`Member`.
Returns
--------
namedtuple
A namedtuple with attributes ``reaction`` and ``user`` similar to :func:`on_reaction_add`.
"""
if emoji is None:
emoji_check = lambda r: True
elif isinstance(emoji, (str, Emoji)):
emoji_check = lambda r: r.emoji == emoji
else:
emoji_check = lambda r: r.emoji in emoji
def predicate(reaction, reaction_user):
result = emoji_check(reaction)
if message is not None:
result = result and message.id == reaction.message.id
if user is not None:
result = result and user.id == reaction_user.id
if callable(check):
# the exception thrown by check is propagated through the future.
result = result and check(reaction, reaction_user)
return result
future = asyncio.Future(loop=self.loop)
self._listeners.append((predicate, future, WaitForType.reaction))
try:
return (yield from asyncio.wait_for(future, timeout, loop=self.loop))
except asyncio.TimeoutError:
return None
# event registration
def event(self, coro):
"""A decorator that registers an event to listen to.
You can find more info about the events on the :ref:`documentation below <discord-api-events>`.
The events must be a |corourl|_, if not, :exc:`ClientException` is raised.
Examples
---------
Using the basic :meth:`event` decorator: ::
@client.event
@asyncio.coroutine
def on_ready():
print('Ready!')
Saving characters by using the :meth:`async_event` decorator: ::
@client.async_event
def on_ready():
print('Ready!')
"""
if not asyncio.iscoroutinefunction(coro):
raise ClientException('event registered must be a coroutine function')
setattr(self, coro.__name__, coro)
log.info('{0.__name__} has successfully been registered as an event'.format(coro))
return coro
def async_event(self, coro):
"""A shorthand decorator for ``asyncio.coroutine`` + :meth:`event`."""
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
return self.event(coro)
# Message sending/management
@asyncio.coroutine
def start_private_message(self, user):
"""|coro|
Starts a private message with the user. This allows you to
:meth:`send_message` to the user.
Note
-----
This method should rarely be called as :meth:`send_message`
does it automatically for you.
Parameters
-----------
user : :class:`User`
The user to start the private message with.
Raises
------
HTTPException
The request failed.
InvalidArgument
The user argument was not of :class:`User`.
"""
if not isinstance(user, User):
raise InvalidArgument('user argument must be a User')
data = yield from self.http.start_private_message(user.id)
channel = PrivateChannel(me=self.user, **data)
self.connection._add_private_channel(channel)
return channel
@asyncio.coroutine
def add_reaction(self, message, emoji):
"""|coro|
Add a reaction to the given message.
The message must be a :class:`Message` that exists. emoji may be a unicode emoji,
or a custom server :class:`Emoji`.
Parameters
------------
message : :class:`Message`
The message to react to.
emoji : :class:`Emoji` or str
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The message or emoji you specified was not found.
InvalidArgument
The message or emoji parameter is invalid.
"""
if not isinstance(message, Message):
raise InvalidArgument('message argument must be a Message')
if not isinstance(emoji, (str, Emoji)):
raise InvalidArgument('emoji argument must be a string or Emoji')
if isinstance(emoji, Emoji):
emoji = '{}:{}'.format(emoji.name, emoji.id)
yield from self.http.add_reaction(message.id, message.channel.id, emoji)
@asyncio.coroutine
def remove_reaction(self, message, emoji, member):
"""|coro|
Remove a reaction by the member from the given message.
If member != server.me, you need Manage Messages to remove the reaction.
The message must be a :class:`Message` that exists. emoji may be a unicode emoji,
or a custom server :class:`Emoji`.
Parameters
------------
message : :class:`Message`
The message.
emoji : :class:`Emoji` or str
The emoji to remove.
member : :class:`Member`
The member for which to delete the reaction.
Raises
--------
HTTPException
Removing the reaction failed.
Forbidden
You do not have the proper permissions to remove the reaction.
NotFound
The message or emoji you specified was not found.
InvalidArgument
The message or emoji parameter is invalid.
"""
if not isinstance(message, Message):
raise InvalidArgument('message argument must be a Message')
if not isinstance(emoji, (str, Emoji)):
raise InvalidArgument('emoji must be a string or Emoji')
if isinstance(emoji, Emoji):
emoji = '{}:{}'.format(emoji.name, emoji.id)
if member == self.user:
member_id = '@me'
else:
member_id = member.id
yield from self.http.remove_reaction(message.id, message.channel.id, emoji, member_id)
@asyncio.coroutine
def get_reaction_users(self, reaction, limit=100, after=None):
"""|coro|
Get the users that added a reaction to a message.
Parameters
------------
reaction : :class:`Reaction`
The reaction to retrieve users for.
limit : int
The maximum number of results to return.
after : :class:`Member` or :class:`Object`
For pagination, reactions are sorted by member.
Raises
--------
HTTPException
Getting the users for the reaction failed.
NotFound
The message or emoji you specified was not found.
InvalidArgument
The reaction parameter is invalid.
"""
if not isinstance(reaction, Reaction):
raise InvalidArgument('reaction must be a Reaction')
emoji = reaction.emoji
if isinstance(emoji, Emoji):
emoji = '{}:{}'.format(emoji.name, emoji.id)
if after:
after = after.id
data = yield from self.http.get_reaction_users(
reaction.message.id, reaction.message.channel.id,
emoji, limit, after=after)
return [User(**user) for user in data]
@asyncio.coroutine
def clear_reactions(self, message):
"""|coro|
Removes all the reactions from a given message.
You need Manage Messages permission to use this.
Parameters
-----------
message: :class:`Message`
The message to remove all reactions from.
Raises
--------
HTTPException
Removing the reactions failed.
Forbidden
You do not have the proper permissions to remove all the reactions.
"""
yield from self.http.clear_reactions(message.id, message.channel.id)
@asyncio.coroutine
def send_message(self, destination, content=None, *, tts=False, embed=None):
"""|coro|
Sends a message to the destination given with the content given.
The destination could be a :class:`Channel`, :class:`PrivateChannel` or :class:`Server`.
For convenience it could also be a :class:`User`. If it's a :class:`User` or :class:`PrivateChannel`
then it sends the message via private message, otherwise it sends the message to the channel.
If the destination is a :class:`Server` then it's equivalent to calling
:attr:`Server.default_channel` and sending it there.
If it is a :class:`Object` instance then it is assumed to be the
destination ID. The destination ID is a *channel* so passing in a user
ID will not be a valid destination.
.. versionchanged:: 0.9.0
``str`` being allowed was removed and replaced with :class:`Object`.
The content must be a type that can convert to a string through ``str(content)``.
If the content is set to ``None`` (the default), then the ``embed`` parameter must
be provided.
If the ``embed`` parameter is provided, it must be of type :class:`Embed` and
it must be a rich embed type.
Parameters
------------
destination
The location to send the message.
content
The content of the message to send. If this is missing,
then the ``embed`` parameter must be present.
tts : bool
Indicates if the message should be sent using text-to-speech.
embed: :class:`Embed`
The rich embed for the content.
Raises
--------
HTTPException
Sending the message failed.
Forbidden
You do not have the proper permissions to send the message.
NotFound
The destination was not found and hence is invalid.
InvalidArgument
The destination parameter is invalid.
Examples
----------
Sending a regular message:
.. code-block:: python
await client.send_message(message.channel, 'Hello')
Sending a TTS message:
.. code-block:: python
await client.send_message(message.channel, 'Goodbye.', tts=True)
Sending an embed message:
.. code-block:: python
em = discord.Embed(title='My Embed Title', description='My Embed Content.', colour=0xDEADBF)
em.set_author(name='Someone', icon_url=client.user.default_avatar_url)
await client.send_message(message.channel, embed=em)
Returns
---------
:class:`Message`
The message that was sent.
"""
channel_id, guild_id = yield from self._resolve_destination(destination)
content = str(content) if content is not None else None
if embed is not None:
embed = embed.to_dict()
data = yield from self.http.send_message(channel_id, content, guild_id=guild_id, tts=tts, embed=embed)
channel = self.get_channel(data.get('channel_id'))
message = self.connection._create_message(channel=channel, **data)
return message
@asyncio.coroutine
def send_typing(self, destination):
"""|coro|
Send a *typing* status to the destination.
*Typing* status will go away after 10 seconds, or after a message is sent.
The destination parameter follows the same rules as :meth:`send_message`.
Parameters
----------
destination
The location to send the typing update.
"""
channel_id, guild_id = yield from self._resolve_destination(destination)
yield from self.http.send_typing(channel_id)
@asyncio.coroutine
def send_file(self, destination, fp, *, filename=None, content=None, tts=False):
"""|coro|
Sends a message to the destination given with the file given.
The destination parameter follows the same rules as :meth:`send_message`.
The ``fp`` parameter should be either a string denoting the location for a
file or a *file-like object*. The *file-like object* passed is **not closed**
at the end of execution. You are responsible for closing it yourself.
.. note::
If the file-like object passed is opened via ``open`` then the modes
'rb' should be used.
The ``filename`` parameter is the filename of the file.
If this is not given then it defaults to ``fp.name`` or if ``fp`` is a string
then the ``filename`` will default to the string given. You can overwrite
this value by passing this in.
Parameters
------------
destination
The location to send the message.
fp
The *file-like object* or file path to send.
filename : str
The filename of the file. Defaults to ``fp.name`` if it's available.
content
The content of the message to send along with the file. This is
forced into a string by a ``str(content)`` call.
tts : bool
If the content of the message should be sent with TTS enabled.
Raises
-------
HTTPException
Sending the file failed.
Returns
--------
:class:`Message`
The message sent.
"""
channel_id, guild_id = yield from self._resolve_destination(destination)
try:
with open(fp, 'rb') as f:
buffer = io.BytesIO(f.read())
if filename is None:
_, filename = path_split(fp)
except TypeError:
buffer = fp
content = str(content) if content is not None else None
data = yield from self.http.send_file(channel_id, buffer, guild_id=guild_id,
filename=filename, content=content, tts=tts)
channel = self.get_channel(data.get('channel_id'))
message = self.connection._create_message(channel=channel, **data)
return message
@asyncio.coroutine
def delete_message(self, message):
"""|coro|
Deletes a :class:`Message`.
Your own messages could be deleted without any proper permissions. However to
delete other people's messages, you need the proper permissions to do so.
Parameters
-----------
message : :class:`Message`
The message to delete.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
HTTPException
Deleting the message failed.
"""
channel = message.channel
guild_id = channel.server.id if not getattr(channel, 'is_private', True) else None
yield from self.http.delete_message(channel.id, message.id, guild_id)
@asyncio.coroutine
def delete_messages(self, messages):
"""|coro|
Deletes a list of messages. This is similar to :func:`delete_message`
except it bulk deletes multiple messages.
The channel to check where the message is deleted from is handled via
the first element of the iterable's ``.channel.id`` attributes. If the
channel is not consistent throughout the entire sequence, then an
:exc:`HTTPException` will be raised.
Usable only by bot accounts.
Parameters
-----------
messages : iterable of :class:`Message`
An iterable of messages denoting which ones to bulk delete.
Raises
------
ClientException
The number of messages to delete is less than 2 or more than 100.
Forbidden
You do not have proper permissions to delete the messages or
you're not using a bot account.
HTTPException
Deleting the messages failed.
"""
messages = list(messages)
if len(messages) > 100 or len(messages) < 2:
raise ClientException('Can only delete messages in the range of [2, 100]')
channel = messages[0].channel
message_ids = [m.id for m in messages]
guild_id = channel.server.id if not getattr(channel, 'is_private', True) else None
yield from self.http.delete_messages(channel.id, message_ids, guild_id)
@asyncio.coroutine
def purge_from(self, channel, *, limit=100, check=None, before=None, after=None, around=None):
"""|coro|
Purges a list of messages that meet the criteria given by the predicate
``check``. If a ``check`` is not provided then all messages are deleted
without discrimination.
You must have Manage Messages permission to delete messages even if they
are your own. The Read Message History permission is also needed to
retrieve message history.
Usable only by bot accounts.
Parameters
-----------
channel : :class:`Channel`
The channel to purge from.
limit : int
The number of messages to search through. This is not the number
of messages that will be deleted, though it can be.
check : predicate
The function used to check if a message should be deleted.
It must take a :class:`Message` as its sole parameter.
before : :class:`Message` or `datetime`
The message or date before which all deleted messages must be.
If a date is provided it must be a timezone-naive datetime representing UTC time.
after : :class:`Message` or `datetime`
The message or date after which all deleted messages must be.
If a date is provided it must be a timezone-naive datetime representing UTC time.
around : :class:`Message` or `datetime`
The message or date around which all deleted messages must be.
If a date is provided it must be a timezone-naive datetime representing UTC time.
Raises
-------
Forbidden
You do not have proper permissions to do the actions required or
you're not using a bot account.
HTTPException
Purging the messages failed.
Examples
---------
Deleting bot's messages ::
def is_me(m):
return m.author == client.user
deleted = await client.purge_from(channel, limit=100, check=is_me)
await client.send_message(channel, 'Deleted {} message(s)'.format(len(deleted)))
Returns
--------
list
The list of messages that were deleted.
"""
if check is None:
check = lambda m: True
if isinstance(before, datetime.datetime):
before = Object(utils.time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(utils.time_snowflake(after, high=True))
if isinstance(around, datetime.datetime):
around = Object(utils.time_snowflake(around, high=True))
iterator = LogsFromIterator(self, channel, limit, before=before, after=after, around=around)
ret = []
count = 0
while True:
try:
msg = yield from iterator.iterate()
except asyncio.QueueEmpty:
# no more messages to poll
if count >= 2:
# more than 2 messages -> bulk delete
to_delete = ret[-count:]
yield from self.delete_messages(to_delete)
elif count == 1:
# delete a single message
yield from self.delete_message(ret[-1])
return ret
else:
if count == 100:
# we've reached a full 'queue'
to_delete = ret[-100:]
yield from self.delete_messages(to_delete)
count = 0
yield from asyncio.sleep(1, loop=self.loop)
if check(msg):
count += 1
ret.append(msg)
@asyncio.coroutine
def edit_message(self, message, new_content=None, *, embed=None):
"""|coro|
Edits a :class:`Message` with the new message content.
The new_content must be able to be transformed into a string via ``str(new_content)``.
If the ``new_content`` is not provided, then ``embed`` must be provided, which must
be of type :class:`Embed`.
The :class:`Message` object is not directly modified afterwards until the
corresponding WebSocket event is received.
Parameters
-----------
message : :class:`Message`
The message to edit.
new_content
The new content to replace the message with.
embed: :class:`Embed`
The new embed to replace the original embed with.
Raises
-------
HTTPException
Editing the message failed.
Returns
--------
:class:`Message`
The new edited message.
"""
channel = message.channel
content = str(new_content) if new_content else None
embed = embed.to_dict() if embed else None
guild_id = channel.server.id if not getattr(channel, 'is_private', True) else None
data = yield from self.http.edit_message(message.id, channel.id, content, guild_id=guild_id, embed=embed)
return self.connection._create_message(channel=channel, **data)
@asyncio.coroutine
def get_message(self, channel, id):
"""|coro|
Retrieves a single :class:`Message` from a :class:`Channel`.
This can only be used by bot accounts.
Parameters
------------
channel: :class:`Channel` or :class:`PrivateChannel`
The text channel to retrieve the message from.
id: str
The message ID to look for.
Returns
--------
:class:`Message`
The message asked for.
Raises
--------
NotFound
The specified channel or message was not found.
Forbidden
You do not have the permissions required to get a message.
HTTPException
Retrieving the message failed.
"""
data = yield from self.http.get_message(channel.id, id)
return self.connection._create_message(channel=channel, **data)
@asyncio.coroutine
def pin_message(self, message):
"""|coro|
Pins a message. You must have Manage Messages permissions
to do this in a non-private channel context.
Parameters
-----------
message: :class:`Message`
The message to pin.
Raises
-------
Forbidden
You do not have permissions to pin the message.
NotFound
The message or channel was not found.
HTTPException
Pinning the message failed, probably due to the channel
having more than 50 pinned messages.
"""
yield from self.http.pin_message(message.channel.id, message.id)
@asyncio.coroutine
def unpin_message(self, message):
"""|coro|
Unpins a message. You must have Manage Messages permissions
to do this in a non-private channel context.
Parameters
-----------
message: :class:`Message`
The message to unpin.
Raises
-------
Forbidden
You do not have permissions to unpin the message.
NotFound
The message or channel was not found.
HTTPException
Unpinning the message failed.
"""
yield from self.http.unpin_message(message.channel.id, message.id)
@asyncio.coroutine
def pins_from(self, channel):
"""|coro|
Returns a list of :class:`Message` that are currently pinned for
the specified :class:`Channel` or :class:`PrivateChannel`.
Parameters
-----------
channel: :class:`Channel` or :class:`PrivateChannel`
The channel to look through pins for.
Raises
-------
NotFound
The channel was not found.
HTTPException
Retrieving the pinned messages failed.
"""
data = yield from self.http.pins_from(channel.id)
return [self.connection._create_message(channel=channel, **m) for m in data]
def _logs_from(self, channel, limit=100, before=None, after=None, around=None):
"""|coro|
This coroutine returns a generator that obtains logs from a specified channel.
Parameters
-----------
channel : :class:`Channel` or :class:`PrivateChannel`
The channel to obtain the logs from.
limit : int
The number of messages to retrieve.
before : :class:`Message` or `datetime`
The message or date before which all returned messages must be.
If a date is provided it must be a timezone-naive datetime representing UTC time.
after : :class:`Message` or `datetime`
The message or date after which all returned messages must be.
If a date is provided it must be a timezone-naive datetime representing UTC time.
around : :class:`Message` or `datetime`
The message or date around which all returned messages must be.
If a date is provided it must be a timezone-naive datetime representing UTC time.
Raises
------
Forbidden
You do not have permissions to get channel logs.
NotFound
The channel you are requesting for doesn't exist.
HTTPException
The request to get logs failed.
Yields
-------
:class:`Message`
The message with the message data parsed.
Examples
---------
Basic logging: ::
logs = yield from client.logs_from(channel)
for message in logs:
if message.content.startswith('!hello'):
if message.author == client.user:
yield from client.edit_message(message, 'goodbye')
Python 3.5 Usage ::
counter = 0
async for message in client.logs_from(channel, limit=500):
if message.author == client.user:
counter += 1
"""
before = getattr(before, 'id', None)
after = getattr(after, 'id', None)
around = getattr(around, 'id', None)
return self.http.logs_from(channel.id, limit, before=before, after=after, around=around)
if PY35:
def logs_from(self, channel, limit=100, *, before=None, after=None, around=None, reverse=False):
if isinstance(before, datetime.datetime):
before = Object(utils.time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(utils.time_snowflake(after, high=True))
if isinstance(around, datetime.datetime):
around = Object(utils.time_snowflake(around))
return LogsFromIterator(self, channel, limit, before=before, after=after, around=around, reverse=reverse)
else:
@asyncio.coroutine
def logs_from(self, channel, limit=100, *, before=None, after=None):
if isinstance(before, datetime.datetime):
before = Object(utils.time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(utils.time_snowflake(after, high=True))
def generator(data):
for message in data:
yield self.connection._create_message(channel=channel, **message)
result = []
while limit > 0:
retrieve = limit if limit <= 100 else 100
data = yield from self._logs_from(channel, retrieve, before, after)
if len(data):
limit -= retrieve
result.extend(data)
before = Object(id=data[-1]['id'])
else:
break
return generator(result)
logs_from.__doc__ = _logs_from.__doc__
# Member management
@asyncio.coroutine
def request_offline_members(self, server):
"""|coro|
Requests previously offline members from the server to be filled up
into the :attr:`Server.members` cache. This function is usually not
called.
When the client logs on and connects to the websocket, Discord does
not provide the library with offline members if the number of members
in the server is larger than 250. You can check if a server is large
if :attr:`Server.large` is ``True``.
Parameters
-----------
server : :class:`Server` or iterable
The server to request offline members for. If this parameter is a
iterable then it is interpreted as an iterator of servers to
request offline members for.
"""
if hasattr(server, 'id'):
guild_id = server.id
else:
guild_id = [s.id for s in server]
payload = {
'op': 8,
'd': {
'guild_id': guild_id,
'query': '',
'limit': 0
}
}
yield from self.ws.send_as_json(payload)
@asyncio.coroutine
def kick(self, member):
"""|coro|
Kicks a :class:`Member` from the server they belong to.
Warning
--------
This function kicks the :class:`Member` based on the server it
belongs to, which is accessed via :attr:`Member.server`. So you
must have the proper permissions in that server.
Parameters
-----------
member : :class:`Member`
The member to kick from their server.
Raises
-------
Forbidden
You do not have the proper permissions to kick.
HTTPException
Kicking failed.
"""
yield from self.http.kick(member.id, member.server.id)
@asyncio.coroutine
def ban(self, member, delete_message_days=1):
"""|coro|
Bans a :class:`Member` from the server they belong to.
Warning
--------
This function bans the :class:`Member` based on the server it
belongs to, which is accessed via :attr:`Member.server`. So you
must have the proper permissions in that server.
Parameters
-----------
member : :class:`Member`
The member to ban from their server.
delete_message_days : int
The number of days worth of messages to delete from the user
in the server. The minimum is 0 and the maximum is 7.
Raises
-------
Forbidden
You do not have the proper permissions to ban.
HTTPException
Banning failed.
"""
yield from self.http.ban(member.id, member.server.id, delete_message_days)
@asyncio.coroutine
def unban(self, server, user):
"""|coro|
Unbans a :class:`User` from the server they are banned from.
Parameters
-----------
server : :class:`Server`
The server to unban the user from.
user : :class:`User`
The user to unban.
Raises
-------
Forbidden
You do not have the proper permissions to unban.
HTTPException
Unbanning failed.
"""
yield from self.http.unban(user.id, server.id)
@asyncio.coroutine
def server_voice_state(self, member, *, mute=None, deafen=None):
"""|coro|
Server mutes or deafens a specific :class:`Member`.
Warning
--------
This function mutes or un-deafens the :class:`Member` based on the
server it belongs to, which is accessed via :attr:`Member.server`.
So you must have the proper permissions in that server.
Parameters
-----------
member : :class:`Member`
The member to unban from their server.
mute: Optional[bool]
Indicates if the member should be server muted or un-muted.
deafen: Optional[bool]
Indicates if the member should be server deafened or un-deafened.
Raises
-------
Forbidden
You do not have the proper permissions to deafen or mute.
HTTPException
The operation failed.
"""
yield from self.http.server_voice_state(member.id, member.server.id, mute=mute, deafen=deafen)
@asyncio.coroutine
def edit_profile(self, password=None, **fields):
"""|coro|
Edits the current profile of the client.
If a bot account is used then the password field is optional,
otherwise it is required.
The :attr:`Client.user` object is not modified directly afterwards until the
corresponding WebSocket event is received.
Note
-----
To upload an avatar, a *bytes-like object* must be passed in that
represents the image being uploaded. If this is done through a file
then the file must be opened via ``open('some_filename', 'rb')`` and
the *bytes-like object* is given through the use of ``fp.read()``.
The only image formats supported for uploading is JPEG and PNG.
Parameters
-----------
password : str
The current password for the client's account. Not used
for bot accounts.
new_password : str
The new password you wish to change to.
email : str
The new email you wish to change to.
username :str
The new username you wish to change to.
avatar : bytes
A *bytes-like object* representing the image to upload.
Could be ``None`` to denote no avatar.
Raises
------
HTTPException
Editing your profile failed.
InvalidArgument
Wrong image format passed for ``avatar``.
ClientException
Password is required for non-bot accounts.
"""
try:
avatar_bytes = fields['avatar']
except KeyError:
avatar = self.user.avatar
else:
if avatar_bytes is not None:
avatar = utils._bytes_to_base64_data(avatar_bytes)
else:
avatar = None
not_bot_account = not self.user.bot
if not_bot_account and password is None:
raise ClientException('Password is required for non-bot accounts.')
args = {
'password': password,
'username': fields.get('username', self.user.name),
'avatar': avatar
}
if not_bot_account:
args['email'] = fields.get('email', self.email)
if 'new_password' in fields:
args['new_password'] = fields['new_password']
data = yield from self.http.edit_profile(**args)
if not_bot_account:
self.email = data['email']
if 'token' in data:
self.http._token(data['token'], bot=False)
if self.cache_auth:
self._update_cache(self.email, password)
@asyncio.coroutine
@utils.deprecated('change_presence')
def change_status(self, game=None, idle=False):
"""|coro|
Changes the client's status.
The game parameter is a Game object (not a string) that represents
a game being played currently.
The idle parameter is a boolean parameter that indicates whether the
client should go idle or not.
.. deprecated:: v0.13.0
Use :meth:`change_presence` instead.
Parameters
----------
game : Optional[:class:`Game`]
The game being played. None if no game is being played.
idle : bool
Indicates if the client should go idle.
Raises
------
InvalidArgument
If the ``game`` parameter is not :class:`Game` or None.
"""
yield from self.ws.change_presence(game=game, idle=idle)
@asyncio.coroutine
def change_presence(self, *, game=None, status=None, afk=False):
"""|coro|
Changes the client's presence.
The game parameter is a Game object (not a string) that represents
a game being played currently.
Parameters
----------
game: Optional[:class:`Game`]
The game being played. None if no game is being played.
status: Optional[:class:`Status`]
Indicates what status to change to. If None, then
:attr:`Status.online` is used.
afk: bool
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
Raises
------
InvalidArgument
If the ``game`` parameter is not :class:`Game` or None.
"""
if status is None:
status = 'online'
elif status is Status.offline:
status = 'invisible'
else:
status = str(status)
yield from self.ws.change_presence(game=game, status=status, afk=afk)
@asyncio.coroutine
def change_nickname(self, member, nickname):
"""|coro|
Changes a member's nickname.
You must have the proper permissions to change someone's
(or your own) nickname.
Parameters
----------
member : :class:`Member`
The member to change the nickname for.
nickname : Optional[str]
The nickname to change it to. ``None`` to remove
the nickname.
Raises
------
Forbidden
You do not have permissions to change the nickname.
HTTPException
Changing the nickname failed.
"""
nickname = nickname if nickname else ''
if member == self.user:
yield from self.http.change_my_nickname(member.server.id, nickname)
else:
yield from self.http.change_nickname(member.server.id, member.id, nickname)
# Channel management
@asyncio.coroutine
def edit_channel(self, channel, **options):
"""|coro|
Edits a :class:`Channel`.
You must have the proper permissions to edit the channel.
To move the channel's position use :meth:`move_channel` instead.
The :class:`Channel` object is not directly modified afterwards until the
corresponding WebSocket event is received.
Parameters
----------
channel : :class:`Channel`
The channel to update.
name : str
The new channel name.
topic : str
The new channel's topic.
bitrate : int
The new channel's bitrate. Voice only.
user_limit : int
The new channel's user limit. Voice only.
Raises
------
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
keys = ('name', 'topic', 'position')
for key in keys:
if key not in options:
options[key] = getattr(channel, key)
yield from self.http.edit_channel(channel.id, **options)
@asyncio.coroutine
def move_channel(self, channel, position):
"""|coro|
Moves the specified :class:`Channel` to the given position in the GUI.
Note that voice channels and text channels have different position values.
The :class:`Channel` object is not directly modified afterwards until the
corresponding WebSocket event is received.
.. warning::
:class:`Object` instances do not work with this function.
Parameters
-----------
channel : :class:`Channel`
The channel to change positions of.
position : int
The position to insert the channel to.
Raises
-------
InvalidArgument
If position is less than 0 or greater than the number of channels.
Forbidden
You do not have permissions to change channel order.
HTTPException
If moving the channel failed, or you are of too low rank to move the channel.
"""
if position < 0:
raise InvalidArgument('Channel position cannot be less than 0.')
channels = [c for c in channel.server.channels if c.type is channel.type]
if position >= len(channels):
raise InvalidArgument('Channel position cannot be greater than {}'.format(len(channels) - 1))
channels.sort(key=lambda c: c.position)
try:
# remove ourselves from the channel list
channels.remove(channel)
except ValueError:
# not there somehow lol
return
else:
# add ourselves at our designated position
channels.insert(position, channel)
payload = [{'id': c.id, 'position': index } for index, c in enumerate(channels)]
yield from self.http.move_channel_position(channel.server.id, payload)
@asyncio.coroutine
def create_channel(self, server, name, *overwrites, type=None):
"""|coro|
Creates a :class:`Channel` in the specified :class:`Server`.
Note that you need the proper permissions to create the channel.
The ``overwrites`` argument list can be used to create a 'secret'
channel upon creation. A namedtuple of :class:`ChannelPermissions`
is exposed to create a channel-specific permission overwrite in a more
self-documenting matter. You can also use a regular tuple of ``(target, overwrite)``
where the ``overwrite`` expected has to be of type :class:`PermissionOverwrite`.
Examples
----------
Creating a voice channel:
.. code-block:: python
await client.create_channel(server, 'Voice', type=discord.ChannelType.voice)
Creating a 'secret' text channel:
.. code-block:: python
everyone_perms = discord.PermissionOverwrite(read_messages=False)
my_perms = discord.PermissionOverwrite(read_messages=True)
everyone = discord.ChannelPermissions(target=server.default_role, overwrite=everyone_perms)
mine = discord.ChannelPermissions(target=server.me, overwrite=my_perms)
await client.create_channel(server, 'secret', everyone, mine)
Or in a more 'compact' way:
.. code-block:: python
everyone = discord.PermissionOverwrite(read_messages=False)
mine = discord.PermissionOverwrite(read_messages=True)
await client.create_channel(server, 'secret', (server.default_role, everyone), (server.me, mine))
Parameters
-----------
server : :class:`Server`
The server to create the channel in.
name : str
The channel's name.
type : :class:`ChannelType`
The type of channel to create. Defaults to :attr:`ChannelType.text`.
overwrites:
An argument list of channel specific overwrites to apply on the channel on
creation. Useful for creating 'secret' channels.
Raises
-------
Forbidden
You do not have the proper permissions to create the channel.
NotFound
The server specified was not found.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite array is not in proper form.
Returns
-------
:class:`Channel`
The channel that was just created. This channel is
different than the one that will be added in cache.
"""
if type is None:
type = ChannelType.text
perms = []
for overwrite in overwrites:
target = overwrite[0]
perm = overwrite[1]
if not isinstance(perm, PermissionOverwrite):
raise InvalidArgument('Expected PermissionOverwrite received {0.__name__}'.format(type(perm)))
allow, deny = perm.pair()
payload = {
'allow': allow.value,
'deny': deny.value,
'id': target.id
}
if isinstance(target, User):
payload['type'] = 'member'
elif isinstance(target, Role):
payload['type'] = 'role'
else:
raise InvalidArgument('Expected Role, User, or Member target, received {0.__name__}'.format(type(target)))
perms.append(payload)
data = yield from self.http.create_channel(server.id, name, str(type), permission_overwrites=perms)
channel = Channel(server=server, **data)
return channel
@asyncio.coroutine
def delete_channel(self, channel):
"""|coro|
Deletes a :class:`Channel`.
In order to delete the channel, the client must have the proper permissions
in the server the channel belongs to.
Parameters
------------
channel : :class:`Channel`
The channel to delete.
Raises
-------
Forbidden
You do not have proper permissions to delete the channel.
NotFound
The specified channel was not found.
HTTPException
Deleting the channel failed.
"""
yield from self.http.delete_channel(channel.id)
# Server management
@asyncio.coroutine
def leave_server(self, server):
"""|coro|
Leaves a :class:`Server`.
Note
--------
You cannot leave the server that you own, you must delete it instead
via :meth:`delete_server`.
Parameters
----------
server : :class:`Server`
The server to leave.
Raises
--------
HTTPException
If leaving the server failed.
"""
yield from self.http.leave_server(server.id)
@asyncio.coroutine
def delete_server(self, server):
"""|coro|
Deletes a :class:`Server`. You must be the server owner to delete the
server.
Parameters
----------
server : :class:`Server`
The server to delete.
Raises
--------
HTTPException
If deleting the server failed.
Forbidden
You do not have permissions to delete the server.
"""
yield from self.http.delete_server(server.id)
@asyncio.coroutine
def create_server(self, name, region=None, icon=None):
"""|coro|
Creates a :class:`Server`.
Bot accounts generally are not allowed to create servers.
See Discord's official documentation for more info.
Parameters
----------
name : str
The name of the server.
region : :class:`ServerRegion`
The region for the voice communication server.
Defaults to :attr:`ServerRegion.us_west`.
icon : bytes
The *bytes-like* object representing the icon. See :meth:`edit_profile`
for more details on what is expected.
Raises
------
HTTPException
Server creation failed.
InvalidArgument
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`Server`
The server created. This is not the same server that is
added to cache.
"""
if icon is not None:
icon = utils._bytes_to_base64_data(icon)
if region is None:
region = ServerRegion.us_west.value
else:
region = region.value
data = yield from self.http.create_server(name, region, icon)
return Server(**data)
@asyncio.coroutine
def edit_server(self, server, **fields):
"""|coro|
Edits a :class:`Server`.
You must have the proper permissions to edit the server.
The :class:`Server` object is not directly modified afterwards until the
corresponding WebSocket event is received.
Parameters
----------
server: :class:`Server`
The server to edit.
name: str
The new name of the server.
icon: bytes
A *bytes-like* object representing the icon. See :meth:`edit_profile`
for more details. Could be ``None`` to denote no icon.
splash: bytes
A *bytes-like* object representing the invite splash. See
:meth:`edit_profile` for more details. Could be ``None`` to denote
no invite splash. Only available for partnered servers with
``INVITE_SPLASH`` feature.
region: :class:`ServerRegion`
The new region for the server's voice communication.
afk_channel: :class:`Channel`
The new channel that is the AFK channel. Could be ``None`` for no AFK channel.
afk_timeout: int
The number of seconds until someone is moved to the AFK channel.
owner: :class:`Member`
The new owner of the server to transfer ownership to. Note that you must
be owner of the server to do this.
verification_level: :class:`VerificationLevel`
The new verification level for the server.
Raises
-------
Forbidden
You do not have permissions to edit the server.
NotFound
The server you are trying to edit does not exist.
HTTPException
Editing the server failed.
InvalidArgument
The image format passed in to ``icon`` is invalid. It must be
PNG or JPG. This is also raised if you are not the owner of the
server and request an ownership transfer.
"""
try:
icon_bytes = fields['icon']
except KeyError:
icon = server.icon
else:
if icon_bytes is not None:
icon = utils._bytes_to_base64_data(icon_bytes)
else:
icon = None
try:
splash_bytes = fields['splash']
except KeyError:
splash = server.splash
else:
if splash_bytes is not None:
splash = utils._bytes_to_base64_data(splash_bytes)
else:
splash = None
fields['icon'] = icon
fields['splash'] = splash
if 'afk_channel' in fields:
fields['afk_channel_id'] = fields['afk_channel'].id
if 'owner' in fields:
if server.owner != server.me:
raise InvalidArgument('To transfer ownership you must be the owner of the server.')
fields['owner_id'] = fields['owner'].id
if 'region' in fields:
fields['region'] = str(fields['region'])
level = fields.get('verification_level', server.verification_level)
if not isinstance(level, VerificationLevel):
raise InvalidArgument('verification_level field must of type VerificationLevel')
fields['verification_level'] = level.value
yield from self.http.edit_server(server.id, **fields)
@asyncio.coroutine
def get_bans(self, server):
"""|coro|
Retrieves all the :class:`User` s that are banned from the specified
server.
You must have proper permissions to get this information.
Parameters
----------
server : :class:`Server`
The server to get ban information from.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
--------
list
A list of :class:`User` that have been banned.
"""
data = yield from self.http.get_bans(server.id)
return [User(**user['user']) for user in data]
@asyncio.coroutine
def prune_members(self, server, *, days):
"""|coro|
Prunes a :class:`Server` from its inactive members.
The inactive members are denoted if they have not logged on in
``days`` number of days and they have no roles.
You must have the "Kick Members" permission to use this.
To check how many members you would prune without actually pruning,
see the :meth:`estimate_pruned_members` function.
Parameters
-----------
server: :class:`Server`
The server to prune from.
days: int
The number of days before counting as inactive.
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while pruning members.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
int
The number of members pruned.
"""
if not isinstance(days, int):
raise InvalidArgument('Expected int for ``days``, received {0.__class__.__name__} instead.'.format(days))
data = yield from self.http.prune_members(server.id, days)
return data['pruned']
@asyncio.coroutine
def estimate_pruned_members(self, server, *, days):
"""|coro|
Similar to :meth:`prune_members` except instead of actually
pruning members, it returns how many members it would prune
from the server had it been called.
Parameters
-----------
server: :class:`Server`
The server to estimate a prune from.
days: int
The number of days before counting as inactive.
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while fetching the prune members estimate.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
int
The number of members estimated to be pruned.
"""
if not isinstance(days, int):
raise InvalidArgument('Expected int for ``days``, received {0.__class__.__name__} instead.'.format(days))
data = yield from self.http.estimate_pruned_members(server.id, days)
return data['pruned']
@asyncio.coroutine
def create_custom_emoji(self, server, *, name, image):
"""|coro|
Creates a custom :class:`Emoji` for a :class:`Server`.
This endpoint is only allowed for user bots or white listed
bots. If this is done by a user bot then this is a local
emoji that can only be used inside that server.
There is currently a limit of 50 local emotes per server.
Parameters
-----------
server: :class:`Server`
The server to add the emoji to.
name: str
The emoji name. Must be at least 2 characters.
image: bytes
The *bytes-like* object representing the image data to use.
Only JPG and PNG images are supported.
Returns
--------
:class:`Emoji`
The created emoji.
Raises
-------
Forbidden
You are not allowed to create emojis.
HTTPException
An error occurred creating an emoji.
"""
img = utils._bytes_to_base64_data(image)
data = yield from self.http.create_custom_emoji(server.id, name, img)
return Emoji(server=server, **data)
@asyncio.coroutine
def delete_custom_emoji(self, emoji):
"""|coro|
Deletes a custom :class:`Emoji` from a :class:`Server`.
This follows the same rules as :meth:`create_custom_emoji`.
Parameters
-----------
emoji: :class:`Emoji`
The emoji to delete.
Raises
-------
Forbidden
You are not allowed to delete emojis.
HTTPException
An error occurred deleting the emoji.
"""
yield from self.http.delete_custom_emoji(emoji.server.id, emoji.id)
@asyncio.coroutine
def edit_custom_emoji(self, emoji, *, name):
"""|coro|
Edits a :class:`Emoji`.
Parameters
-----------
emoji: :class:`Emoji`
The emoji to edit.
name: str
The new emoji name.
Raises
-------
Forbidden
You are not allowed to edit emojis.
HTTPException
An error occurred editing the emoji.
"""
yield from self.http.edit_custom_emoji(emoji.server.id, emoji.id, name=name)
# Invite management
def _fill_invite_data(self, data):
server = self.connection._get_server(data['guild']['id'])
if server is not None:
ch_id = data['channel']['id']
channel = server.get_channel(ch_id)
else:
server = Object(id=data['guild']['id'])
server.name = data['guild']['name']
channel = Object(id=data['channel']['id'])
channel.name = data['channel']['name']
data['server'] = server
data['channel'] = channel
@asyncio.coroutine
def create_invite(self, destination, **options):
"""|coro|
Creates an invite for the destination which could be either a
:class:`Server` or :class:`Channel`.
Parameters
------------
destination
The :class:`Server` or :class:`Channel` to create the invite to.
max_age : int
How long the invite should last. If it's 0 then the invite
doesn't expire. Defaults to 0.
max_uses : int
How many uses the invite could be used for. If it's 0 then there
are unlimited uses. Defaults to 0.
temporary : bool
Denotes that the invite grants temporary membership
(i.e. they get kicked after they disconnect). Defaults to False.
unique: bool
Indicates if a unique invite URL should be created. Defaults to True.
If this is set to False then it will return a previously created
invite.
Raises
-------
HTTPException
Invite creation failed.
Returns
--------
:class:`Invite`
The invite that was created.
"""
data = yield from self.http.create_invite(destination.id, **options)
self._fill_invite_data(data)
return Invite(**data)
@asyncio.coroutine
def get_invite(self, url):
"""|coro|
Gets a :class:`Invite` from a discord.gg URL or ID.
Note
------
If the invite is for a server you have not joined, the server and channel
attributes of the returned invite will be :class:`Object` with the names
patched in.
Parameters
-----------
url : str
The discord invite ID or URL (must be a discord.gg URL).
Raises
-------
NotFound
The invite has expired or is invalid.
HTTPException
Getting the invite failed.
Returns
--------
:class:`Invite`
The invite from the URL/ID.
"""
invite_id = self._resolve_invite(url)
data = yield from self.http.get_invite(invite_id)
self._fill_invite_data(data)
return Invite(**data)
@asyncio.coroutine
def invites_from(self, server):
"""|coro|
Returns a list of all active instant invites from a :class:`Server`.
You must have proper permissions to get this information.
Parameters
----------
server : :class:`Server`
The server to get invites from.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
-------
list of :class:`Invite`
The list of invites that are currently active.
"""
data = yield from self.http.invites_from(server.id)
result = []
for invite in data:
channel = server.get_channel(invite['channel']['id'])
invite['channel'] = channel
invite['server'] = server
result.append(Invite(**invite))
return result
@asyncio.coroutine
def accept_invite(self, invite):
"""|coro|
Accepts an :class:`Invite`, URL or ID to an invite.
The URL must be a discord.gg URL. e.g. "http://discord.gg/codehere".
An ID for the invite is just the "codehere" portion of the invite URL.
Parameters
-----------
invite
The :class:`Invite` or URL to an invite to accept.
Raises
-------
HTTPException
Accepting the invite failed.
NotFound
The invite is invalid or expired.
Forbidden
You are a bot user and cannot use this endpoint.
"""
invite_id = self._resolve_invite(invite)
yield from self.http.accept_invite(invite_id)
@asyncio.coroutine
def delete_invite(self, invite):
"""|coro|
Revokes an :class:`Invite`, URL, or ID to an invite.
The ``invite`` parameter follows the same rules as
:meth:`accept_invite`.
Parameters
----------
invite
The invite to revoke.
Raises
-------
Forbidden
You do not have permissions to revoke invites.
NotFound
The invite is invalid or expired.
HTTPException
Revoking the invite failed.
"""
invite_id = self._resolve_invite(invite)
yield from self.http.delete_invite(invite_id)
# Role management
@asyncio.coroutine
def move_role(self, server, role, position):
"""|coro|
Moves the specified :class:`Role` to the given position in the :class:`Server`.
The :class:`Role` object is not directly modified afterwards until the
corresponding WebSocket event is received.
Parameters
-----------
server : :class:`Server`
The server the role belongs to.
role : :class:`Role`
The role to edit.
position : int
The position to insert the role to.
Raises
-------
InvalidArgument
If position is 0, or role is server.default_role
Forbidden
You do not have permissions to change role order.
HTTPException
If moving the role failed, or you are of too low rank to move the role.
"""
if position == 0:
raise InvalidArgument("Cannot move role to position 0")
if role == server.default_role:
raise InvalidArgument("Cannot move default role")
if role.position == position:
return # Save discord the extra request.
change_range = range(min(role.position, position), max(role.position, position) + 1)
roles = [r.id for r in sorted(filter(lambda x: (x.position in change_range) and x != role, server.roles), key=lambda x: x.position)]
if role.position > position:
roles.insert(0, role.id)
else:
roles.append(role.id)
payload = [{"id": z[0], "position": z[1]} for z in zip(roles, change_range)]
yield from self.http.move_role_position(server.id, payload)
@asyncio.coroutine
def edit_role(self, server, role, **fields):
"""|coro|
Edits the specified :class:`Role` for the entire :class:`Server`.
The :class:`Role` object is not directly modified afterwards until the
corresponding WebSocket event is received.
All fields except ``server`` and ``role`` are optional. To change
the position of a role, use :func:`move_role` instead.
.. versionchanged:: 0.8.0
Editing now uses keyword arguments instead of editing the :class:`Role` object directly.
Parameters
-----------
server : :class:`Server`
The server the role belongs to.
role : :class:`Role`
The role to edit.
name : str
The new role name to change to.
permissions : :class:`Permissions`
The new permissions to change to.
colour : :class:`Colour`
The new colour to change to. (aliased to color as well)
hoist : bool
Indicates if the role should be shown separately in the online list.
mentionable : bool
Indicates if the role should be mentionable by others.
Raises
-------
Forbidden
You do not have permissions to change the role.
HTTPException
Editing the role failed.
"""
colour = fields.get('colour')
if colour is None:
colour = fields.get('color', role.colour)
payload = {
'name': fields.get('name', role.name),
'permissions': fields.get('permissions', role.permissions).value,
'color': colour.value,
'hoist': fields.get('hoist', role.hoist),
'mentionable': fields.get('mentionable', role.mentionable)
}
yield from self.http.edit_role(server.id, role.id, **payload)
@asyncio.coroutine
def delete_role(self, server, role):
"""|coro|
Deletes the specified :class:`Role` for the entire :class:`Server`.
Parameters
-----------
server : :class:`Server`
The server the role belongs to.
role : :class:`Role`
The role to delete.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
"""
yield from self.http.delete_role(server.id, role.id)
@asyncio.coroutine
def _replace_roles(self, member, roles):
yield from self.http.replace_roles(member.id, member.server.id, roles)
@asyncio.coroutine
def add_roles(self, member, *roles):
"""|coro|
Gives the specified :class:`Member` a number of :class:`Role` s.
You must have the proper permissions to use this function.
The :class:`Member` object is not directly modified afterwards until the
corresponding WebSocket event is received.
Parameters
-----------
member : :class:`Member`
The member to give roles to.
\*roles
An argument list of :class:`Role` s to give the member.
Raises
-------
Forbidden
You do not have permissions to add roles.
HTTPException
Adding roles failed.
"""
new_roles = utils._unique(role.id for role in itertools.chain(member.roles, roles))
yield from self._replace_roles(member, new_roles)
@asyncio.coroutine
def remove_roles(self, member, *roles):
"""|coro|
Removes the :class:`Role` s from the :class:`Member`.
You must have the proper permissions to use this function.
The :class:`Member` object is not directly modified afterwards until the
corresponding WebSocket event is received.
Parameters
-----------
member : :class:`Member`
The member to revoke roles from.
\*roles
An argument list of :class:`Role` s to revoke the member.
Raises
-------
Forbidden
You do not have permissions to revoke roles.
HTTPException
Removing roles failed.
"""
new_roles = [x.id for x in member.roles]
for role in roles:
try:
new_roles.remove(role.id)
except ValueError:
pass
yield from self._replace_roles(member, new_roles)
@asyncio.coroutine
def replace_roles(self, member, *roles):
"""|coro|
Replaces the :class:`Member`'s roles.
You must have the proper permissions to use this function.
This function **replaces** all roles that the member has.
For example if the member has roles ``[a, b, c]`` and the
call is ``client.replace_roles(member, d, e, c)`` then
the member has the roles ``[d, e, c]``.
The :class:`Member` object is not directly modified afterwards until the
corresponding WebSocket event is received.
Parameters
-----------
member : :class:`Member`
The member to replace roles from.
\*roles
An argument list of :class:`Role` s to replace the roles with.
Raises
-------
Forbidden
You do not have permissions to revoke roles.
HTTPException
Removing roles failed.
"""
new_roles = utils._unique(role.id for role in roles)
yield from self._replace_roles(member, new_roles)
@asyncio.coroutine
def create_role(self, server, **fields):
"""|coro|
Creates a :class:`Role`.
This function is similar to :class:`edit_role` in both
the fields taken and exceptions thrown.
Returns
--------
:class:`Role`
The newly created role. This not the same role that
is stored in cache.
"""
data = yield from self.http.create_role(server.id)
role = Role(server=server, **data)
# we have to call edit because you can't pass a payload to the
# http request currently.
yield from self.edit_role(server, role, **fields)
return role
@asyncio.coroutine
def edit_channel_permissions(self, channel, target, overwrite=None):
"""|coro|
Sets the channel specific permission overwrites for a target in the
specified :class:`Channel`.
The ``target`` parameter should either be a :class:`Member` or a
:class:`Role` that belongs to the channel's server.
You must have the proper permissions to do this.
Examples
----------
Setting allow and deny: ::
overwrite = discord.PermissionOverwrite()
overwrite.read_messages = True
overwrite.ban_members = False
yield from client.edit_channel_permissions(message.channel, message.author, overwrite)
Parameters
-----------
channel : :class:`Channel`
The channel to give the specific permissions for.
target
The :class:`Member` or :class:`Role` to overwrite permissions for.
overwrite: :class:`PermissionOverwrite`
The permissions to allow and deny to the target.
Raises
-------
Forbidden
You do not have permissions to edit channel specific permissions.
NotFound
The channel specified was not found.
HTTPException
Editing channel specific permissions failed.
InvalidArgument
The overwrite parameter was not of type :class:`PermissionOverwrite`
or the target type was not :class:`Role` or :class:`Member`.
"""
overwrite = PermissionOverwrite() if overwrite is None else overwrite
if not isinstance(overwrite, PermissionOverwrite):
raise InvalidArgument('allow and deny parameters must be PermissionOverwrite')
allow, deny = overwrite.pair()
if isinstance(target, Member):
perm_type = 'member'
elif isinstance(target, Role):
perm_type = 'role'
else:
raise InvalidArgument('target parameter must be either Member or Role')
yield from self.http.edit_channel_permissions(channel.id, target.id, allow.value, deny.value, perm_type)
@asyncio.coroutine
def delete_channel_permissions(self, channel, target):
"""|coro|
Removes a channel specific permission overwrites for a target
in the specified :class:`Channel`.
The target parameter follows the same rules as :meth:`edit_channel_permissions`.
You must have the proper permissions to do this.
Parameters
----------
channel : :class:`Channel`
The channel to give the specific permissions for.
target
The :class:`Member` or :class:`Role` to overwrite permissions for.
Raises
------
Forbidden
You do not have permissions to delete channel specific permissions.
NotFound
The channel specified was not found.
HTTPException
Deleting channel specific permissions failed.
"""
yield from self.http.delete_channel_permissions(channel.id, target.id)
# Voice management
@asyncio.coroutine
def move_member(self, member, channel):
"""|coro|
Moves a :class:`Member` to a different voice channel.
You must have proper permissions to do this.
Note
-----
You cannot pass in a :class:`Object` instead of a :class:`Channel`
object in this function.
Parameters
-----------
member : :class:`Member`
The member to move to another voice channel.
channel : :class:`Channel`
The voice channel to move the member to.
Raises
-------
InvalidArgument
The channel provided is not a voice channel.
HTTPException
Moving the member failed.
Forbidden
You do not have permissions to move the member.
"""
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise InvalidArgument('The channel provided must be a voice channel.')
yield from self.http.move_member(member.id, member.server.id, channel.id)
@asyncio.coroutine
def join_voice_channel(self, channel):
"""|coro|
Joins a voice channel and creates a :class:`VoiceClient` to
establish your connection to the voice server.
After this function is successfully called, :attr:`voice` is
set to the returned :class:`VoiceClient`.
Parameters
----------
channel : :class:`Channel`
The voice channel to join to.
Raises
-------
InvalidArgument
The channel was not a voice channel.
asyncio.TimeoutError
Could not connect to the voice channel in time.
ClientException
You are already connected to a voice channel.
OpusNotLoaded
The opus library has not been loaded.
Returns
-------
:class:`VoiceClient`
A voice client that is fully connected to the voice server.
"""
if isinstance(channel, Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise InvalidArgument('Channel passed must be a voice channel')
server = channel.server
if self.is_voice_connected(server):
raise ClientException('Already connected to a voice channel in this server')
log.info('attempting to join voice channel {0.name}'.format(channel))
def session_id_found(data):
user_id = data.get('user_id')
guild_id = data.get('guild_id')
return user_id == self.user.id and guild_id == server.id
# register the futures for waiting
session_id_future = self.ws.wait_for('VOICE_STATE_UPDATE', session_id_found)
voice_data_future = self.ws.wait_for('VOICE_SERVER_UPDATE', lambda d: d.get('guild_id') == server.id)
# request joining
yield from self.ws.voice_state(server.id, channel.id)
session_id_data = yield from asyncio.wait_for(session_id_future, timeout=10.0, loop=self.loop)
data = yield from asyncio.wait_for(voice_data_future, timeout=10.0, loop=self.loop)
kwargs = {
'user': self.user,
'channel': channel,
'data': data,
'loop': self.loop,
'session_id': session_id_data.get('session_id'),
'main_ws': self.ws
}
voice = VoiceClient(**kwargs)
try:
yield from voice.connect()
except asyncio.TimeoutError as e:
try:
yield from voice.disconnect()
except:
# we don't care if disconnect failed because connection failed
pass
raise e # re-raise
self.connection._add_voice_client(server.id, voice)
return voice
def is_voice_connected(self, server):
"""Indicates if we are currently connected to a voice channel in the
specified server.
Parameters
-----------
server : :class:`Server`
The server to query if we're connected to it.
"""
voice = self.voice_client_in(server)
return voice is not None
def voice_client_in(self, server):
"""Returns the voice client associated with a server.
If no voice client is found then ``None`` is returned.
Parameters
-----------
server : :class:`Server`
The server to query if we have a voice client for.
Returns
--------
:class:`VoiceClient`
The voice client associated with the server.
"""
return self.connection._get_voice_client(server.id)
def group_call_in(self, channel):
"""Returns the :class:`GroupCall` associated with a private channel.
If no group call is found then ``None`` is returned.
Parameters
-----------
channel: :class:`PrivateChannel`
The group private channel to query the group call for.
Returns
--------
Optional[:class:`GroupCall`]
The group call.
"""
return self.connection._calls.get(channel.id)
# Miscellaneous stuff
@asyncio.coroutine
def application_info(self):
"""|coro|
Retrieve's the bot's application information.
Returns
--------
:class:`AppInfo`
A namedtuple representing the application info.
Raises
-------
HTTPException
Retrieving the information failed somehow.
"""
data = yield from self.http.application_info()
return AppInfo(id=data['id'], name=data['name'],
description=data['description'], icon=data['icon'],
owner=User(**data['owner']))
@asyncio.coroutine
def get_user_info(self, user_id):
"""|coro|
Retrieves a :class:`User` based on their ID. This can only
be used by bot accounts. You do not have to share any servers
with the user to get this information, however many operations
do require that you do.
Parameters
-----------
user_id: str
The user's ID to fetch from.
Returns
--------
:class:`User`
The user you requested.
Raises
-------
NotFound
A user with this ID does not exist.
HTTPException
Fetching the user failed.
"""
data = yield from self.http.get_user_info(user_id)
return User(**data)
|
{
"content_hash": "009287e1e2483dc2cd446c8d94c59ab8",
"timestamp": "",
"source": "github",
"line_count": 3304,
"max_line_length": 140,
"avg_line_length": 33.301755447941886,
"alnum_prop": 0.5829554026665698,
"repo_name": "OthmanEmpire/project_othbot",
"id": "c7c7f08ae3ab7d671f64834f4905d264df12aca1",
"size": "110054",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "discord/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "474615"
}
],
"symlink_target": ""
}
|
async def f11(x):
y = (await<error descr="Expression expected"> </error>for await<error descr="Expression expected"> </error>in []) # fail
await x
def f12(x):
y = (await for await in [])
return x
async def f21(x):
y = (mapper(await<error descr="Expression expected">)</error> for await<error descr="Expression expected"> </error>in []) # fail
await x
def f22(x):
y = (mapper(await) for await in [])
return x
async def f31(x):
await<error descr="Expression expected"> </error>= [] # fail
y = (i for i in await<error descr="Expression expected">)</error> # fail
await x
def f32(x):
await = []
y = (i for i in await)
return x
async def f41(x):
y = (<error descr="Python version 3.5 does not support 'await' inside comprehensions">await</error> z for z in []) # fail
await x
async def f42(x):
y = (mapper(<error descr="Python version 3.5 does not support 'await' inside comprehensions">await</error> z) for z in []) # fail
await x
async def f43(x):
y = (z for <error descr="Cannot assign to await expression">await z</error> in []) # fail
await x
async def f44(x):
y = (z for z in await x)
await x
|
{
"content_hash": "dd8cc0a83b187f95bbb490fd851ee39e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 134,
"avg_line_length": 24.12,
"alnum_prop": 0.6194029850746269,
"repo_name": "dahlstrom-g/intellij-community",
"id": "3e27aca6991c8b6ce70fdc0447a665143735e734",
"size": "1206",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "python/testData/highlighting/awaitInGenerator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Classes and functions related to IRI/URI processing, validation, resolution, etc.
Copyright 2008-2020 Uche Ogbuji and Mike Brown
"""
__all__ = [
'IriError',
'I',
# IRI tools
"iri_to_uri",
"nfc_normalize",
"convert_ireg_name",
# RFC 3986 implementation
'matches_uri_ref_syntax', 'matches_uri_syntax',
'percent_encode', 'percent_decode',
'split_uri_ref', 'unsplit_uri_ref',
'split_authority', 'split_fragment',
'absolutize', 'relativize', 'remove_dot_segments',
'normalize_case', 'normalize_percent_encoding',
'normalize_path_segments', 'normalize_path_segments_in_uri',
# RFC 3151 implementation
'urn_to_public_id', 'public_id_to_urn',
# Miscellaneous
'is_absolute', 'get_scheme', 'strip_fragment',
'os_path_to_uri', 'uri_to_os_path', 'basejoin', 'join',
'WINDOWS_SLASH_COMPAT', 'path_resolve',
]
import os, sys
import urllib, urllib.request
import re, io
import email
from string import ascii_letters
from email.utils import formatdate as _formatdate
from uuid import UUID, uuid1, uuid4
from .irihelper import I
# whether os_path_to_uri should treat "/" same as "\" in a Windows path
WINDOWS_SLASH_COMPAT = True
DEFAULT_HIERARCHICAL_SEP = '/'
PERCENT_DECODE_BYTES = ('0123456789%s-._~' % ascii_letters).encode('ascii')
class IriError(Exception):
"""
Exception related to URI/IRI processing
"""
pass
#FIXME: Re-incorporate the exception details below
'''
return {
IriError.INVALID_BASE_URI: _(
"Invalid base URI: %(base)r cannot be used to resolve "
" reference %(ref)r"),
IriError.RELATIVE_BASE_URI: _(
"Invalid base URI: %(base)r cannot be used to resolve "
"reference %(ref)r; the base URI must be absolute, not "
"relative."),
IriError.NON_FILE_URI: _(
"Only a 'file' URI can be converted to an OS-specific path; "
"URI given was %(uri)r"),
IriError.UNIX_REMOTE_HOST_FILE_URI: _(
"A URI containing a remote host name cannot be converted to a "
" path on posix; URI given was %(uri)r"),
IriError.RESOURCE_ERROR: _(
"Error retrieving resource %(loc)r: %(msg)s"),
IriError.UNSUPPORTED_PLATFORM: _(
"Platform %(platform)r not supported by URI function "
"%(function)s"),
IriError.SCHEME_REQUIRED: _(
"Scheme-based resolution requires a URI with a scheme; "
"neither the base URI %(base)r nor the reference %(ref)r "
"have one."),
IriError.INVALID_PUBLIC_ID_URN: _(
"A public ID cannot be derived from URN %(urn)r "
"because it does not conform to RFC 3151."),
IriError.UNSUPPORTED_SCHEME: _(
"The URI scheme %(scheme)s is not supported by resolver "),
IriError.DENIED_BY_RULE: _(
"Access to IRI %(uri)r was denied by action of an IRI restriction"),
}
'''
def iri_to_uri(iri, convertHost=False):
r"""
Converts an IRI or IRI reference to a URI or URI reference,
implementing sec. 3.1 of draft-duerst-iri-10.
The convertHost flag indicates whether to perform conversion of
the ireg-name (host) component of the IRI to an RFC 2396-compatible
URI reg-name (IDNA encoded), e.g.
iri_to_uri('http://r\xe9sum\xe9.example.org/', convertHost=False)
=> 'http://r%C3%A9sum%C3%A9.example.org/'
iri_to_uri('http://r\xe9sum\xe9.example.org/', convertHost=True)
=> 'http://xn--rsum-bpad.example.org/'
Ordinarily, the IRI should be given as a unicode string. If the IRI
is instead given as a byte string, then it will be assumed to be
UTF-8 encoded, will be decoded accordingly, and as per the
requirements of the conversion algorithm, will NOT be normalized.
"""
if not isinstance(iri, str):
iri = nfc_normalize(iri)
# first we have to get the host
(scheme, auth, path, query, frag) = split_uri_ref(iri)
if auth and auth.find('@') > -1:
userinfo, hostport = auth.split('@')
else:
userinfo = None
hostport = auth
if hostport and hostport.find(':') > -1:
host, port = hostport.split(':')
else:
host = hostport
port = None
if host:
host = convert_ireg_name(host)
auth = ''
if userinfo:
auth += userinfo + '@'
auth += host
if port:
auth += ':' + port
iri = unsplit_uri_ref((scheme, auth, path, query, frag))
res = ''
pos = 0
#FIXME: use re.subn with substitution function for big speed-up
surrogate = None
for c in iri:
cp = ord(c)
if cp > 128:
if cp < 160:
# FIXME: i18n
raise ValueError(_("Illegal character at position %d (0-based) of IRI %r" % (pos, iri)))
# 'for c in iri' may give us surrogate pairs
elif cp > 55295:
if cp < 56320:
# d800-dbff
surrogate = c
continue
elif cp < 57344:
# dc00-dfff
if surrogate is None:
raise ValueError(_("Illegal surrogate pair in %r" % iri))
c = surrogate + c
else:
raise ValueError(_("Illegal surrogate pair in %r" % iri))
surrogate = None
for octet in c.encode('utf-8'):
res += '%%%02X' % ord(octet)
else:
res += c
pos += 1
return res
def nfc_normalize(iri):
"""
Normalizes the given unicode string according to Unicode Normalization Form C (NFC)
so that it can be used as an IRI or IRI reference.
"""
from unicodedata import normalize
return normalize('NFC', iri)
def convert_ireg_name(iregname):
"""
Converts the given ireg-name component of an IRI to a string suitable for use
as a URI reg-name in pre-rfc2396bis schemes and resolvers. Returns the ireg-name
"""
# I have not yet verified that the default IDNA encoding
# matches the algorithm required by the IRI spec, but it
# does work on the one simple example in the spec.
return iregname.encode('idna').decode('ascii')
#=============================================================================
# Functions that implement aspects of RFC 3986
#
_validation_setup_completed = False
def _init_uri_validation_regex():
"""
Called internally to compile the regular expressions needed by
URI validation functions, just once, the first time a function
that needs them is called.
"""
global _validation_setup_completed
if _validation_setup_completed:
return
#-------------------------------------------------------------------------
# Regular expressions for determining the non-URI-ness of strings
#
# A given string's designation as a URI or URI reference comes from the
# context in which it is being used, not from its syntax; a regular
# expression can at most only determine whether a given string COULD be a
# URI or URI reference, based on its lexical structure.
#
# 1. Altova's regex (in the public domain; courtesy Altova)
#
# # based on the BNF grammar in the original RFC 2396
# ALTOVA_REGEX = r"(([a-zA-Z][0-9a-zA-Z+\-\.]*:)?/{0,2}" + \
# r"[0-9a-zA-Z;/?:@&=+$\.\-_!~*'()%]+)?" + \
# r"(#[0-9a-zA-Z;/?:@&=+$\.\-_!~*'()%]+)?"
#
# This regex matches URI references, and thus URIs as well. It is also
# lenient; some strings that are not URI references can falsely match.
#
# It is also not very useful as-is, because it essentially has the form
# (group1)?(group2)? -- this matches the empty string, and in fact any
# string or substring can be said to match this pattern. To be useful,
# this regex (and any like it) must be changed so that it only matches
# an entire string. This is accomplished in Python by using the \A and \Z
# delimiters around the pattern:
#
# BETTER_ALTOVA_REGEX = r"\A(?!\n)%s\Z" % ALTOVA_REGEX
#
# The (?!\n) takes care of an edge case where a string consisting of a
# sole linefeed character would falsely match.
#
# 2. Python regular expressions for strict validation of URIs and URI
# references (in the public domain; courtesy Fourthought, Inc.)
#
# Note that we do not use any \d or \w shortcuts, as these are
# potentially locale or Unicode sensitive.
#
# # based on the ABNF in RFC 3986,
# # "Uniform Resource Identifier (URI): Generic Syntax"
pchar = r"(?:[0-9A-Za-z\-_\.!~*'();:@&=+$,]|(?:%[0-9A-Fa-f]{2}))"
fragment = r"(?:[0-9A-Za-z\-_\.!~*'();:@&=+$,/?]|(?:%[0-9A-Fa-f]{2}))*"
query = fragment
segment_nz_nc = r"(?:[0-9A-Za-z\-_\.!~*'();@&=+$,]|(?:%[0-9A-Fa-f]{2}))+"
segment_nz = r'%s+' % pchar
segment = r'%s*' % pchar
#path_empty = r'' # zero characters
path_rootless = r'%s(?:/%s)*' % (segment_nz, segment) # begins with a segment
path_noscheme = r'%s(?:/%s)*' % (segment_nz_nc, segment) # begins with a non-colon segment
path_absolute = r'/(?:%s)?' % path_rootless # begins with "/" but not "//"
path_abempty = r'(?:/%s)*' % segment # begins with "/" or is empty
#path = r'(?:(?:%s)|(?:%s)|(?:%s)|(?:%s))?' % (path_abempty, path_absolute, path_noscheme, path_rootless)
domainlabel = r'[0-9A-Za-z](?:[0-9A-Za-z\-]{0,61}[0-9A-Za-z])?'
qualified = r'(?:\.%s)*\.?' % domainlabel
reg_name = r"(?:(?:[0-9A-Za-z\-_\.!~*'();&=+$,]|(?:%[0-9A-Fa-f]{2}))*)"
dec_octet = r'(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])'
IPv4address = r'(?:%s\.){3}(?:%s)' % (dec_octet, dec_octet)
h16 = r'[0-9A-Fa-f]{1,4}'
ls32 = r'(?:(?:%s:%s)|%s)' % (h16, h16, IPv4address)
IPv6address = r'(?:' + \
r'(?:(?:%s:){6}%s)' % (h16, ls32) + \
r'|(?:::(?:%s:){5}%s)' % (h16, ls32) + \
r'|(?:%s?::(?:%s:){4}%s)' % (h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s)?::(?:%s:){3}%s)' % (h16, h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s){0,2}::(?:%s:){2}%s)' % (h16, h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s){0,3}::%s:%s)' % (h16, h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s){0,4}::%s)' % (h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s){0,5}::%s)' % (h16, h16, h16) + \
r'|(?:(?:(?:%s:)?%s){0,6}::)' % (h16, h16) + \
r')'
IPvFuture = r"(?:v[0-9A-Fa-f]+\.[0-9A-Za-z\-\._~!$&'()*+,;=:]+)"
IP_literal = r'\[(?:%s|%s)\]' % (IPv6address, IPvFuture)
port = r'[0-9]*'
host = r'(?:%s|%s|%s)?' % (IP_literal, IPv4address, reg_name)
userinfo = r"(?:[0-9A-Za-z\-_\.!~*'();:@&=+$,]|(?:%[0-9A-Fa-f]{2}))*"
authority = r'(?:%s@)?%s(?::%s)?' % (userinfo, host, port)
scheme = r'[A-Za-z][0-9A-Za-z+\-\.]*'
#absolute_URI = r'%s:%s(?:\?%s)?' % (scheme, hier_part, query)
relative_part = r'(?:(?://%s%s)|(?:%s)|(?:%s))?' % (authority, path_abempty,
path_absolute, path_noscheme)
relative_ref = r'%s(?:\?%s)?(?:#%s)?' % (relative_part, query, fragment)
hier_part = r'(?:(?://%s%s)|(?:%s)|(?:%s))?' % (authority, path_abempty,
path_absolute, path_rootless)
URI = r'%s:%s(?:\?%s)?(?:#%s)?' % (scheme, hier_part, query, fragment)
URI_reference = r'(?:%s|%s)' % (URI, relative_ref)
STRICT_URI_PYREGEX = r"\A%s\Z" % URI
STRICT_URIREF_PYREGEX = r"\A(?!\n)%s\Z" % URI_reference
global URI_PATTERN, URI_REF_PATTERN
URI_PATTERN = re.compile(STRICT_URI_PYREGEX) # strict checking for URIs
URI_REF_PATTERN = re.compile(STRICT_URIREF_PYREGEX) # strict checking for URI refs
_validation_setup_completed = True
return
def matches_uri_ref_syntax(s):
"""
This function returns true if the given string could be a URI reference,
as defined in RFC 3986, just based on the string's syntax.
A URI reference can be a URI or certain portions of one, including the
empty string, and it can have a fragment component.
"""
if not _validation_setup_completed:
_init_uri_validation_regex()
return URI_REF_PATTERN.match(s) is not None
def matches_uri_syntax(s):
"""
This function returns true if the given string could be a URI, as defined
in RFC 3986, just based on the string's syntax.
A URI is by definition absolute (begins with a scheme) and does not end
with a #fragment. It also must adhere to various other syntax rules.
"""
if not _validation_setup_completed:
_init_uri_validation_regex()
return URI_PATTERN.match(s) is not None
_split_uri_ref_setup_completed = False
def _init_split_uri_ref_pattern():
"""
Called internally to compile the regular expression used by
split_uri_ref() just once, the first time the function is called.
"""
global _split_uri_ref_setup_completed
if _split_uri_ref_setup_completed:
return
# Like the others, this regex is also in the public domain.
# It is based on this one, from RFC 3986 appendix B
# (unchanged from RFC 2396 appendix B):
# ^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?
regex = r"^(?:(?P<scheme>[^:/?#]+):)?(?://(?P<authority>[^/?#]*))?(?P<path>[^?#]*)(?:\?(?P<query>[^#]*))?(?:#(?P<fragment>.*))?$"
global SPLIT_URI_REF_PATTERN
SPLIT_URI_REF_PATTERN = re.compile(regex)
_split_uri_ref_setup_completed = True
return
def split_uri_ref(iri_ref):
"""
Given a valid URI reference as a string, returns a tuple representing the
generic URI components, as per RFC 3986 appendix B. The tuple's structure
is (scheme, authority, path, query, fragment).
All values will be strings (possibly empty) or None if undefined.
Note that per RFC 3986, there is no distinction between a path and
an "opaque part", as there was in RFC 2396.
"""
if not _split_uri_ref_setup_completed:
_init_split_uri_ref_pattern()
# the pattern will match every possible string, so it's safe to
# assume there's a groupdict method to call.
g = SPLIT_URI_REF_PATTERN.match(iri_ref).groupdict()
scheme = g['scheme']
authority = g['authority']
path = g['path']
query = g['query']
fragment = g['fragment']
return (scheme, authority, path, query, fragment)
def unsplit_uri_ref(iri_refSeq):
"""
Given a sequence as would be produced by split_uri_ref(), assembles and
returns a URI reference as a string.
"""
if not isinstance(iri_refSeq, (tuple, list)):
raise TypeError(_("sequence expected, got %s" % type(iri_refSeq)))
(scheme, authority, path, query, fragment) = iri_refSeq
uri = ''
if scheme is not None:
uri += scheme + ':'
if authority is not None:
uri += '//' + authority
uri += path
if query is not None:
uri += '?' + query
if fragment is not None:
uri += '#' + fragment
return uri
_split_authority_setup_completed = False
def _init_split_authority_pattern():
"""
Called internally to compile the regular expression used by
split_authority() just once, the first time the function is called.
"""
global _split_authority_setup_completed
if _split_authority_setup_completed:
return
global SPLIT_AUTHORITY_PATTERN
regex = r'(?:(?P<userinfo>[^@]*)@)?(?P<host>[^:]*)(?::(?P<port>.*))?'
SPLIT_AUTHORITY_PATTERN = re.compile(regex)
_split_authority_setup_completed = True
return
def split_authority(authority):
"""
Given a string representing the authority component of a URI, returns
a tuple consisting of the subcomponents (userinfo, host, port). No
percent-decoding is performed.
"""
if not _split_authority_setup_completed:
_init_split_authority_pattern()
m = SPLIT_AUTHORITY_PATTERN.match(authority)
if m:
return m.groups()
else:
return (None, authority, None)
def split_fragment(uri):
"""
Given a URI or URI reference, returns a tuple consisting of
(base, fragment), where base is the portion before the '#' that
precedes the fragment component.
"""
# The only '#' in a legit URI will be the fragment separator,
# but in the wild, people get sloppy. Assume the last '#' is it.
pos = uri.rfind('#')
if pos == -1:
return (uri, uri[:0])
else:
return (uri[:pos], uri[pos+1:])
# "unreserved" characters are allowed in a URI, and do not have special
# meaning as delimiters of URI components or subcomponents. They may
# appear raw or percent-encoded, but percent-encoding is discouraged.
# This set of characters is sufficiently long enough that using a
# compiled regex is faster than using a string with the "in" operator.
#UNRESERVED_PATTERN = re.compile(r"[0-9A-Za-z\-\._~!*'()]") # RFC 2396
UNRESERVED_PATTERN = re.compile(r'[0-9A-Za-z\-\._~]') # RFC 3986
# "reserved" characters are allowed in a URI, but they may or always do
# have special meaning as delimiters of URI components or subcomponents.
# When being used as delimiters, they must be raw, and when not being
# used as delimiters, they must be percent-encoded.
# This set of characters is sufficiently short enough that using a
# string with the "in" operator is faster than using a compiled regex.
# The characters in the string are ordered according to how likely they
# are to be found (approximately), for faster operation with "in".
#RESERVED = "/&=+?;@,:$[]" # RFC 2396 + RFC 2732
RESERVED = "/=&+?#;@,:$!*[]()'" # RFC 3986
def percent_encode(s, encoding='utf-8', encodeReserved=True, spaceToPlus=False,
nlChars=None, reservedChars=RESERVED):
"""
[*** Experimental API ***] This function applies percent-encoding, as
described in RFC 3986 sec. 2.1, to the given string, in order to prepare
the string for use in a URI. It replaces characters that are not allowed
in a URI. By default, it also replaces characters in the reserved set,
which normally includes the generic URI component delimiters ":" "/"
"?" \"#\" "[" "]" "@" and the subcomponent delimiters "!" "$" "&" "\'" "("
")" "*" "+" "," ";" "=".
Ideally, this function should be used on individual components or
subcomponents of a URI prior to assembly of the complete URI, not
afterward, because this function has no way of knowing which characters
in the reserved set are being used for their reserved purpose and which
are part of the data. By default it assumes that they are all being used
as data, thus they all become percent-encoded.
The characters in the reserved set can be overridden from the default by
setting the reservedChars argument. The percent-encoding of characters
in the reserved set can be disabled by unsetting the encodeReserved flag.
Do this if the string is an already-assembled URI or a URI component,
such as a complete path.
The encoding argument will be used to determine the percent-encoded octets
for characters that are not in the U+0000 to U+007F range. The codec
identified by the encoding argument must return a byte string.
The spaceToPlus flag controls whether space characters are changed to
"+" characters in the result, rather than being percent-encoded.
Generally, this is not required, and given the status of "+" as a
reserved character, is often undesirable. But it is required in certain
situations, such as when generating application/x-www-form-urlencoded
content or RFC 3151 public identifier URNs, so it is supported here.
The nlChars argument, if given, is a sequence type in which each member
is a substring that indicates a "new line". Occurrences of this substring
will be replaced by '%0D%0A' in the result, as is required when generating
application/x-www-form-urlencoded content.
This function is similar to urllib.quote(), but is more conformant and
Unicode-friendly. Suggestions for improvements welcome.
>>> from amara3 import iri
>>> iri.percent_encode('http://bibfra.me/vocab/relation/論定')
http%3A%2F%2Fbibfra.me%2Fvocab%2Frelation%2F%E8%AB%96%E5%AE%9A
"""
res = ''
if nlChars is not None:
for c in nlChars:
s.replace(c, '\r\n')
#FIXME: use re.subn with substitution function for big speed-up
for c in s:
# surrogates? -> percent-encode according to given encoding
if UNRESERVED_PATTERN.match(c) is None:
cp = ord(c)
# ASCII range?
if cp < 128:
# space? -> plus if desired
if spaceToPlus and c == ' ':
res += '+'
# reserved? -> percent-encode if desired
elif c in reservedChars:
if encodeReserved:
res += '%%%02X' % cp
else:
res += c
# not unreserved or reserved, so percent-encode
# FIXME: should percent-encode according to given encoding;
# ASCII range is not special!
else:
res += '%%%02X' % cp
# non-ASCII-range unicode?
else:
# percent-encode according to given encoding
for octet in c.encode(encoding):
res += '%%%02X' % octet
# unreserved -> safe to use as-is
else:
res += c
return res
_ASCII_PAT = re.compile('([\x00-\x7f]+)')
_HEXDIG = '0123456789ABCDEFabcdef'
_HEXTOBYTE = None
def _unquote_to_bytes(s, decodable=None):
"""_unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not s:
# Is it a string-like object?
s.split
return b''
if isinstance(s, str):
s = s.encode('utf-8')
bits = s.split(b'%')
if len(bits) == 1:
return s
res = [bits[0]]
append = res.append
# Delay the initialization of the table to not waste memory
# if the function is never called
global _HEXTOBYTE
if _HEXTOBYTE is None:
_HEXTOBYTE = {(a + b).encode(): bytes([int(a + b, 16)])
for a in _HEXDIG for b in _HEXDIG}
for item in bits[1:]:
try:
c = chr(int(item[:2], 16)).encode('ascii')
if decodable is None or c in decodable:
append(_HEXTOBYTE[item[:2]])
append(item[2:])
#FIXME: We'll need to do our own surrogate pair decoding because:
#>>> '\ud800'.encode('utf-8') -> UnicodeEncodeError: 'utf-8' codec can't encode character '\ud800' in position 0: surrogates not allowed
else:
append(b'%')
append(item)
except (ValueError, KeyError):
append(b'%')
append(item)
return b''.join(res)
#>>> from amara3.iri import percent_decode
#>>> u0 = 'example://A/b/c/%7bfoo%7d'
#>>> u1 = percent_decode(u0)
#>>> u1
#'example://A/b/c/{foo}'
def percent_decode(s, encoding='utf-8', decodable=None, errors='replace'):
"""
[*** Experimental API ***] Reverses the percent-encoding of the given
string.
Similar to urllib.parse.unquote()
By default, all percent-encoded sequences are decoded, but if a byte
string is given via the 'decodable' argument, only the sequences
corresponding to those octets will be decoded.
Percent-encoded sequences are converted to bytes, then converted back to
string (Unicode) according to the given encoding.
For example, by default, 'abc%E2%80%A2' will be converted to 'abc\u2022',
because byte sequence E2 80 A2 represents character U+2022 in UTF-8.
This function is intended for use on the portions of a URI that are
delimited by reserved characters (see percent_encode), or on a value from
data of media type application/x-www-form-urlencoded.
>>> from amara3.iri import percent_decode
>>> u0 = 'http://host/abc%E2%80%A2/x/y/z'
>>> u1 = percent_decode(u0)
>>> hex(ord(u1[15]))
'0x2022'
"""
# Most of this comes from urllib.parse.unquote().
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
# If given a string argument, does not decode
# percent-encoded octets above %7F.
if '%' not in s:
#s.split
return s
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _ASCII_PAT.split(s)
res = [bits[0]]
append = res.append #Saving the func lookups in the tight loop below
for i in range(1, len(bits), 2):
append(_unquote_to_bytes(bits[i], decodable=decodable).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def absolutize(iri_ref, base_iri, limit_schemes=None):
"""
Resolves a IRI reference to absolute form, effecting the result of RFC
3986 section 5. The IRI reference is considered to be relative to the
given base IRI.
iri_ref - relative URI to be resolved into absolute form. If already
absolute, it will be returned as is.
base_iri - base IRI for resolving iri_ref. If '' or None iri_ref will be
returned as is. base_iri should matche the absolute-URI syntax rule of
RFC 3986, and its path component should not contain '.' or '..' segments
if the scheme is hierarchical. If these are violated you may get unexpected
results.
This function only conducts a minimal sanity check in order to determine
if relative resolution is possible: it raises a ValueError if the base
URI does not have a scheme component. While it is true that the base URI
is irrelevant if the URI reference has a scheme, an exception is raised
in order to signal that the given string does not even come close to
meeting the criteria to be usable as a base URI.
It is the caller's responsibility to make a determination of whether the
URI reference constitutes a "same-document reference", as defined in RFC
2396 or RFC 3986. As per the spec, dereferencing a same-document
reference "should not" involve retrieval of a new representation of the
referenced resource. Note that the two specs have different definitions
of same-document reference: RFC 2396 says it is *only* the cases where the
reference is the empty string, or \"#\" followed by a fragment; RFC 3986
requires making a comparison of the base URI to the absolute form of the
reference (as is returned by the spec), minus its fragment component,
if any.
This function is similar to urlparse.urljoin() and urllib.basejoin().
Those functions, however, are (as of Python 2.3) outdated, buggy, and/or
designed to produce results acceptable for use with other core Python
libraries, rather than being earnest implementations of the relevant
specs. Their problems are most noticeable in their handling of
same-document references and 'file:' URIs, both being situations that
come up far too often to consider the functions reliable enough for
general use.
"""
# Reasons to avoid using urllib.basejoin() and urlparse.urljoin():
# - Both are partial implementations of long-obsolete specs.
# - Both accept relative URLs as the base, which no spec allows.
# - urllib.basejoin() mishandles the '' and '..' references.
# - If the base URL uses a non-hierarchical or relative path,
# or if the URL scheme is unrecognized, the result is not
# always as expected (partly due to issues in RFC 1808).
# - If the authority component of a 'file' URI is empty,
# the authority component is removed altogether. If it was
# not present, an empty authority component is in the result.
# - '.' and '..' segments are not always collapsed as well as they
# should be (partly due to issues in RFC 1808).
# - Effective Python 2.4, urllib.basejoin() *is* urlparse.urljoin(),
# but urlparse.urljoin() is still based on RFC 1808.
# This procedure is based on the pseudocode in RFC 3986 sec. 5.2.
#
# ensure base URI is absolute
if not base_iri or is_absolute(iri_ref):
return iri_ref
if not base_iri or not is_absolute(base_iri):
raise ValueError("Invalid base URI: {base} cannot be used to resolve "
"reference {ref}; the base URI must be absolute, not "
"relative.".format(base=base_iri, ref=iri_ref))
if limit_schemes and get_scheme(base_iri) not in limit_schemes:
scheme = get_scheme(base_iri)
raise ValueError("The URI scheme {scheme} is not supported by resolver".format(scheme=scheme))
# shortcut for the simplest same-document reference cases
if iri_ref == '' or iri_ref[0] == '#':
return base_iri.split('#')[0] + iri_ref
# ensure a clean slate
tScheme = tAuth = tPath = tQuery = None
# parse the reference into its components
(rScheme, rAuth, rPath, rQuery, rFrag) = split_uri_ref(iri_ref)
# if the reference is absolute, eliminate '.' and '..' path segments
# and skip to the end
if rScheme is not None:
tScheme = rScheme
tAuth = rAuth
tPath = remove_dot_segments(rPath)
tQuery = rQuery
else:
# the base URI's scheme, and possibly more, will be inherited
(bScheme, bAuth, bPath, bQuery, bFrag) = split_uri_ref(base_iri)
# if the reference is a net-path, just eliminate '.' and '..' path
# segments; no other changes needed.
if rAuth is not None:
tAuth = rAuth
tPath = remove_dot_segments(rPath)
tQuery = rQuery
# if it's not a net-path, we need to inherit pieces of the base URI
else:
# use base URI's path if the reference's path is empty
if not rPath:
tPath = bPath
# use the reference's query, if any, or else the base URI's,
tQuery = rQuery is not None and rQuery or bQuery
# the reference's path is not empty
else:
# just use the reference's path if it's absolute
if rPath[0] == '/':
tPath = remove_dot_segments(rPath)
# merge the reference's relative path with the base URI's path
else:
if bAuth is not None and not bPath:
tPath = '/' + rPath
else:
tPath = bPath[:bPath.rfind('/')+1] + rPath
tPath = remove_dot_segments(tPath)
# use the reference's query
tQuery = rQuery
# since the reference isn't a net-path,
# use the authority from the base URI
tAuth = bAuth
# inherit the scheme from the base URI
tScheme = bScheme
# always use the reference's fragment (but no need to define another var)
#tFrag = rFrag
# now compose the target URI (RFC 3986 sec. 5.3)
return unsplit_uri_ref((tScheme, tAuth, tPath, tQuery, rFrag))
def relativize(targetUri, againstUri, subPathOnly=False):
"""
This method returns a relative URI that is consistent with `targetURI`
when resolved against `againstUri`. If no such relative URI exists, for
whatever reason, this method returns `None`.
To be precise, if a string called `rel` exists such that
``absolutize(rel, againstUri) == targetUri``, then `rel` is returned by
this function. In these cases, `relativize` is in a sense the inverse
of `absolutize`. In all other cases, `relativize` returns `None`.
The following idiom may be useful for obtaining compliant relative
reference strings (e.g. for `path`) for use in other methods of this
package::
path = relativize(os_path_to_uri(path), os_path_to_uri('.'))
If `subPathOnly` is `True`, then this method will only return a relative
reference if such a reference exists relative to the last hierarchical
segment of `againstUri`. In particular, this relative reference will
not start with '/' or '../'.
"""
# We might want to change the semantics slightly to allow a relative
# target URI to be a valid "relative path" (and just return it). For
# now, though, absolute URIs only.
if not is_absolute(targetUri) or not is_absolute(againstUri):
return None
targetUri = normalize_path_segments_in_uri(targetUri)
againstUri = normalize_path_segments_in_uri(againstUri)
splitTarget = list(split_uri_ref(absolutize(targetUri, targetUri)))
splitAgainst = list(split_uri_ref(absolutize(againstUri, againstUri)))
if not splitTarget[:2] == splitAgainst[:2]:
return None
subPathSplit = [None, None] + splitTarget[2:]
targetPath = splitTarget[2]
againstPath = splitAgainst[2] or '/'
leadingSlash = False
if targetPath[:1] == '/' or againstPath[:1] == '/':
if targetPath[:1] == againstPath[:1]:
targetPath = targetPath[1:]
againstPath = againstPath[1:]
leadingSlash = True
else:
return None
targetPathSegments = targetPath.split('/')
againstPathSegments = againstPath.split('/')
# Count the number of path segments in common.
i = 0
while True:
# Stop if we get to the end of either segment list.
if not(len(targetPathSegments) > i and
len(againstPathSegments) > i):
break
# Increment the count when the lists agree, unless we are at the
# last segment of either list and that segment is an empty segment.
# We bail on this case because an empty ending segment in one path
# must not match a mid-path empty segment in the other.
if (targetPathSegments[i] == againstPathSegments[i]
and not (i + 1 == len(againstPathSegments) and
'' == againstPathSegments[i])
and not (i + 1 == len(targetPathSegments) and
'' == targetPathSegments[i])):
i = i + 1
# Otherwise stop.
else:
break
# The target path has `i` segments in common with the basis path, and
# the last segment (after the final '/') doesn't matter; we'll need to
# traverse the rest.
traverse = len(againstPathSegments) - i - 1
relativePath = None
# If the two paths do not agree on any segments, we have two special
# cases.
if i == 0 and leadingSlash:
# First, if the ruling path only had one segment, then our result
# can be a relative path.
if len(againstPathSegments) == 1:
relativePath = targetPath
# Otherwise, the ruling path had a number of segments, so our result
# must be an absolute path (unless we only want a subpath result, in
# which case none exists).
elif subPathOnly:
return None
else:
relativePath = '/' + targetPath
elif traverse > 0:
if subPathOnly:
return None
relativePath = (("../" * traverse) +
'/'.join(targetPathSegments[i:]))
# If the ith segment of the target path is empty and that is not the
# final segment, then we need to precede the path with "./" to make it a
# relative path.
elif (len(targetPathSegments) > i + 1 and
'' == targetPathSegments[i]):
relativePath = "./" + '/'.join(targetPathSegments[i:])
else:
relativePath = '/'.join(targetPathSegments[i:])
return unsplit_uri_ref([None, None, relativePath] + splitTarget[3:])
def remove_dot_segments(path):
"""
Supports absolutize() by implementing the remove_dot_segments function
described in RFC 3986 sec. 5.2. It collapses most of the '.' and '..'
segments out of a path without eliminating empty segments. It is intended
to be used during the path merging process and may not give expected
results when used independently. Use normalize_path_segments() or
normalize_path_segments_in_uri() if more general normalization is desired.
"""
# return empty string if entire path is just "." or ".."
if path == '.' or path == '..':
return path[0:0] # preserves string type
# remove all "./" or "../" segments at the beginning
while path:
if path[:2] == './':
path = path[2:]
elif path[:3] == '../':
path = path[3:]
else:
break
# We need to keep track of whether there was a leading slash,
# because we're going to drop it in order to prevent our list of
# segments from having an ambiguous empty first item when we call
# split().
leading_slash = False
if path[:1] == '/':
path = path[1:]
leading_slash = True
# replace a trailing "/." with just "/"
if path[-2:] == '/.':
path = path[:-1]
# convert the segments into a list and process each segment in
# order from left to right.
segments = path.split('/')
keepers = []
segments.reverse()
while segments:
seg = segments.pop()
# '..' means drop the previous kept segment, if any.
# If none, and if the path is relative, then keep the '..'.
# If the '..' was the last segment, ensure
# that the result ends with '/'.
if seg == '..':
if keepers:
keepers.pop()
elif not leading_slash:
keepers.append(seg)
if not segments:
keepers.append('')
# ignore '.' segments and keep all others, even empty ones
elif seg != '.':
keepers.append(seg)
# reassemble the kept segments
return leading_slash * '/' + '/'.join(keepers)
def normalize_case(iri_ref, doHost=False):
"""
Returns the given URI reference with the case of the scheme,
percent-encoded octets, and, optionally, the host, all normalized,
implementing section 6.2.2.1 of RFC 3986. The normal form of
scheme and host is lowercase, and the normal form of
percent-encoded octets is uppercase.
The URI reference can be given as either a string or as a sequence as
would be provided by the split_uri_ref function. The return value will
be a string or tuple.
"""
if not isinstance(iri_ref, (tuple, list)):
iri_ref = split_uri_ref(iri_ref)
tup = None
else:
tup = True
# normalize percent-encoded octets
newRef = []
for component in iri_ref:
if component:
newRef.append(re.sub('%([0-9a-f][0-9a-f])',
lambda m: m.group(0).upper(), component))
else:
newRef.append(component)
# normalize scheme
scheme = newRef[0]
if scheme:
scheme = scheme.lower()
# normalize host
authority = newRef[1]
if doHost:
if authority:
userinfo, host, port = split_authority(authority)
authority = ''
if userinfo is not None:
authority += '%s@' % userinfo
authority += host.lower()
if port is not None:
authority += ':%s' % port
res = (scheme, authority, newRef[2], newRef[3], newRef[4])
if tup:
return res
else:
return unsplit_uri_ref(res)
def normalize_percent_encoding(s):
"""
Given a string representing a URI reference or a component thereof,
returns the string with all percent-encoded octets that correspond to
unreserved characters decoded, implementing section 6.2.2.2 of RFC
3986.
>>> u0 = 'http://host/abc%E2%80%A2/x/y/z'
>>> u1 = normalize_percent_encoding(u0)
>>> hex(ord(u1[15]))
'0x2022'
"""
return percent_decode(s, decodable=PERCENT_DECODE_BYTES)
def normalize_path_segments(path):
"""
Given a string representing the path component of a URI reference having a
hierarchical scheme, returns the string with dot segments ('.' and '..')
removed, implementing section 6.2.2.3 of RFC 3986. If the path is
relative, it is returned with no changes.
"""
if not path or path[:1] != '/':
return path
else:
return remove_dot_segments(path)
def normalize_path_segments_in_uri(uri):
"""
Given a string representing a URI or URI reference having a hierarchical
scheme, returns the string with dot segments ('.' and '..') removed from
the path component, implementing section 6.2.2.3 of RFC 3986. If the
path is relative, the URI or URI reference is returned with no changes.
"""
components = list(split_uri_ref(uri))
components[2] = normalize_path_segments(components[2])
return unsplit_uri_ref(components)
#=============================================================================
# RFC 3151 implementation
#
def urn_to_public_id(urn):
"""
Converts a URN that conforms to RFC 3151 to a public identifier.
For example, the URN
"urn:publicid:%2B:IDN+example.org:DTD+XML+Bookmarks+1.0:EN:XML"
will be converted to the public identifier
"+//IDN example.org//DTD XML Bookmarks 1.0//EN//XML"
Raises a ValueError if the given URN cannot be converted.
Query and fragment components, if present, are ignored.
"""
if urn is not None and urn:
(scheme, auth, path, query, frag) = split_uri_ref(urn)
if scheme is not None and scheme.lower() == 'urn':
pp = path.split(':', 1)
if len(pp) > 1:
urn_scheme = percent_decode(pp[0])
if urn_scheme == 'publicid':
publicid = pp[1].replace('+', ' ')
publicid = publicid.replace(':', '//')
publicid = publicid.replace(';', '::')
publicid = percent_decode(publicid)
return publicid
raise ValueError("A public ID cannot be derived from URN {urn} "
"because it does not conform to RFC 3151.".format(urn=urn))
def public_id_to_urn(publicid):
"""
Converts a public identifier to a URN that conforms to RFC 3151.
"""
# 1. condense whitespace, XSLT-style
publicid = re.sub('[ \t\r\n]+', ' ', publicid.strip())
# 2. // -> :
# :: -> ;
# space -> +
# + ; ' ? # % / : -> percent-encode
# (actually, the intent of the RFC is to not conflict with RFC 2396,
# so any character not in the unreserved set must be percent-encoded)
r = ':'.join([';'.join([percent_encode(dcpart, spaceToPlus=True)
for dcpart in dspart.split('::')])
for dspart in publicid.split('//')])
return 'urn:publicid:%s' % r
#=============================================================================
# Miscellaneous public functions
#
SCHEME_PATTERN = re.compile(r'([a-zA-Z][a-zA-Z0-9+\-.]*):')
def get_scheme(iri_ref):
"""
Obtains, with optimum efficiency, just the scheme from a URI reference.
Returns a string, or if no scheme could be found, returns None.
"""
# Using a regex seems to be the best option. Called 50,000 times on
# different URIs, on a 1.0-GHz PIII with FreeBSD 4.7 and Python
# 2.2.1, this method completed in 0.95s, and 0.05s if there was no
# scheme to find. By comparison,
# urllib.splittype()[0] took 1.5s always;
# Ft.Lib.Uri.split_uri_ref()[0] took 2.5s always;
# urlparse.urlparse()[0] took 3.5s always.
m = SCHEME_PATTERN.match(iri_ref)
if m is None:
return None
else:
return m.group(1)
def strip_fragment(iri_ref):
"""
Returns the given URI or URI reference with the fragment component, if
any, removed.
"""
return split_fragment(iri_ref)[0]
def is_absolute(identifier):
"""
Given a string believed to be a URI or URI reference, tests that it is
absolute (as per RFC 3986), not relative -- i.e., that it has a scheme.
"""
# We do it this way to avoid compiling another massive regex.
return get_scheme(identifier) is not None
_ntPathToUriSetupCompleted = False
def _initNtPathPattern():
"""
Called internally to compile the regular expression used by
os_path_to_uri() on Windows just once, the first time the function is
called.
"""
global _ntPathToUriSetupCompleted
if _ntPathToUriSetupCompleted:
return
# path variations we try to handle:
#
# a\b\c (a relative path)
# file:a/b/c is the best we can do.
# Dot segments should not be collapsed in the final URL.
#
# \a\b\c
# file:///a/b/c is correct
#
# C:\a\b\c
# urllib.urlopen() requires file:///C|/a/b/c or ///C|/a/b/c
# because it currently relies on urllib.url2pathname().
# Windows resolver will accept the first or file:///C:/a/b/c
#
# \\host\share\x\y\z
# Windows resolver accepts file://host/share/x/y/z
# Netscape (4.x?) accepts file:////host/share/x/y/z
#
# If an entire drive is shared, the share name might be
# $drive$, like this: \\host\$c$\a\b\c
# We could recognize it as a drive letter, but it's probably
# best not to treat it specially, since it will never occur
# without a host. It's just another share name.
#
# There's also a weird C:\\host\share\x\y\z convention
# that is hard to find any information on. Presumably the C:
# is ignored, but the question is do we put it in the URI?
#
# So the format, in ABNF, is roughly:
# [ drive ":" ] ( [ "\\" host "\" share ] abs-path ) / rel-path
drive = r'(?P<drive>[A-Za-z])'
host = r'(?P<host>[^\\]*)'
share = r'(?P<share>[^\\]+)'
abs_path = r'(?P<abspath>\\(?:[^\\]+\\?)*)'
rel_path = r'(?P<relpath>(?:[^\\]+\\?)*)'
NT_PATH_REGEX = r"^(?:%s:)?(?:(?:(?:\\\\%s\\%s)?%s)|%s)$" % (
drive,
host,
share,
abs_path,
rel_path)
global NT_PATH_PATTERN
NT_PATH_PATTERN = re.compile(NT_PATH_REGEX)
# We can now use NT_PATH_PATTERN.match(path) to parse the path and use
# the returned object's .groupdict() method to get a dictionary of
# path subcomponents. For example,
# NT_PATH_PATTERN.match(r"\\h\$c$\x\y\z").groupdict()
# yields
# {'abspath': r'\x\y\z',
# 'share': '$c$',
# 'drive': None,
# 'host': 'h',
# 'relpath': None
# }
# Note that invalid paths such as r'\\foo\bar'
# (a UNC path with no trailing '\') will not match at all.
_ntPathToUriSetupCompleted = True
return
def _splitNtPath(path):
"""
Called internally to get a tuple representing components of the given
Windows path.
"""
if not _ntPathToUriSetupCompleted:
_initNtPathPattern()
m = NT_PATH_PATTERN.match(path)
if not m:
raise ValueError("Path {path} is not a valid Windows path.".format(path=path))
components = m.groupdict()
(drive, host, share, abspath, relpath) = (
components['drive'],
components['host'],
components['share'],
components['abspath'],
components['relpath'],
)
return (drive, host, share, abspath, relpath)
def _get_drive_letter(s):
"""
Called internally to get a drive letter from a string, if the string
is a drivespec.
"""
if len(s) == 2 and s[1] in ':|' and s[0] in ascii_letters:
return s[0]
return
def os_path_to_uri(path, attemptAbsolute=True, osname=None):
r"""This function converts an OS-specific file system path to a URI of
the form 'file:///path/to/the/file'.
In addition, if the path is absolute, any dot segments ('.' or '..') will
be collapsed, so that the resulting URI can be safely used as a base URI
by functions such as absolutize().
The given path will be interpreted as being one that is appropriate for
use on the local operating system, unless a different osname argument is
given.
If the given path is relative, an attempt may be made to first convert
the path to absolute form by interpreting the path as being relative
to the current working directory. This is the case if the attemptAbsolute
flag is True (the default). If attemptAbsolute is False, a relative
path will result in a URI of the form file:relative/path/to/a/file .
attemptAbsolute has no effect if the given path is not for the
local operating system.
On Windows, the drivespec will become the first step in the path component
of the URI. If the given path contains a UNC hostname, this name will be
used for the authority component of the URI.
Warning: Some libraries, such as urllib.urlopen(), may not behave as
expected when given a URI generated by this function. On Windows you may
want to call re.sub('(/[A-Za-z]):', r'\1|', uri) on the URI to prepare it
for use by functions such as urllib.url2pathname() or urllib.urlopen().
This function is similar to urllib.pathname2url(), but is more featureful
and produces better URIs.
"""
# Problems with urllib.pathname2url() on all platforms include:
# - the generated URL has no scheme component;
# - percent-encoding is incorrect, due to urllib.quote() issues.
#
# Problems with urllib.pathname2url() on Windows include:
# - trailing backslashes are ignored;
# - all leading backslashes are considered part of the absolute
# path, so UNC paths aren't properly converted (assuming that
# a proper conversion would be to use the UNC hostname in the
# hostname component of the resulting URL);
# - non-leading, consecutive backslashes are collapsed, which may
# be desirable but is correcting what is, arguably, user error;
# - the presence of a letter followed by ":" is believed to
# indicate a drivespec, no matter where it occurs in the path,
# which may have been considered a safe assumption since the
# drivespec is the only place where ":" can legally, but there's
# no need to search the whole string for it;
# - the ":" in a drivespec is converted to "|", a convention that
# is becoming increasingly less common. For compatibility, most
# web browser resolvers will accept either "|" or ":" in a URL,
# but urllib.urlopen(), relying on url2pathname(), expects "|"
# only. In our opinion, the callers of those functions should
# ensure that the arguments are what are expected. Our goal
# here is to produce a quality URL, not a URL designed to play
# nice with urllib's bugs & limitations.
# - it treats "/" the same as "\", which results in being able to
# call the function with a posix-style path, a convenience
# which allows the caller to get sloppy about whether they are
# really passing a path that is apprropriate for the desired OS.
# We do this a lot in 4Suite.
#
# There is some disagreement over whether a drivespec should be placed in
# the authority or in the path. Placing it in the authority means that
# ":", which has a reserved purpose in the authority, cannot be used --
# this, along with the fact that prior to RFC 3986, percent-encoded
# octets were disallowed in the authority, is presumably a reason why "|"
# is a popular substitute for ":". Using the authority also allows for
# the drive letter to be retained whe resolving references like this:
# reference '/a/b/c' + base 'file://C|/x/y/z' = 'file://C|/a/b/c'
# The question is, is that really the ideal result? Should the drive info
# be inherited from the base URI, if it is unspecified in a reference
# that is otherwise representing an absolute path? Using the authority
# for this purpose means that it would be overloaded if we also used it
# to represent the host part of a UNC path. The alternative is to put the
# UNC host in the path (e.g. 'file:////host/share/path'), but when such a
# URI is used as a base URI, relative reference resolution often returns
# unexpected results.
#
osname = osname or os.name
if osname == 'nt':
if WINDOWS_SLASH_COMPAT:
path = path.replace('/','\\')
(drive, host, share, abspath, relpath) = _splitNtPath(path)
if attemptAbsolute and relpath is not None and osname == os.name:
path = os.path.join(os.getcwd(), relpath)
(drive, host, share, abspath, relpath) = _splitNtPath(path)
path = abspath or relpath
path = '/'.join([percent_encode(seg) for seg in path.split('\\')])
uri = 'file:'
if host:
uri += '//%s' % percent_encode(host)
elif abspath:
uri += '//'
if drive:
uri += '/%s:' % drive.upper()
if share:
uri += '/%s' % percent_encode(share)
if abspath:
path = remove_dot_segments(path)
uri += path
elif osname == 'posix':
try:
from posixpath import isabs
except ImportError:
isabs = lambda p: p[:1] == '/'
pathisabs = isabs(path)
if pathisabs:
path = remove_dot_segments(path)
elif attemptAbsolute and osname == os.name:
path = os.path.join(os.getcwd(), path)
pathisabs = isabs(path)
path = '/'.join([percent_encode(seg) for seg in path.split('/')])
if pathisabs:
uri = 'file://%s' % path
else:
uri = 'file:%s' % path
else:
# 4Suite only supports posix and nt, so we're not going to worry about
# improving upon urllib.pathname2url() for other OSes.
if osname == os.name:
from urllib import pathname2url
if attemptAbsolute and not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
else:
try:
module = '%surl2path' % osname
exec('from %s import pathname2url' % module, globals(), locals())
except ImportError:
raise RuntimeError("Platform {platform} not supported by URI function "
"{function}".format(platform=osname, function="os_path_to_uri"))
uri = 'file:' + pathname2url(path)
return uri
def uri_to_os_path(uri, attemptAbsolute=True, encoding='utf-8', osname=None):
r"""
This function converts a URI reference to an OS-specific file system path.
If the URI reference is given as a Unicode string, then the encoding
argument determines how percent-encoded components are interpreted, and
the result will be a Unicode string. If the URI reference is a regular
byte string, the encoding argument is ignored and the result will be a
byte string in which percent-encoded octets have been converted to the
bytes they represent. For example, the trailing path segment of
'file:///a/b/%E2%80%A2' will by default be converted to '\u2022',
because sequence E2 80 A2 represents character U+2022 in UTF-8. If the
string were not Unicode, the trailing segment would become the 3-byte
string '\xe2\x80\xa2'.
The osname argument determines for what operating system the resulting
path is appropriate. It defaults to os.name and is typically the value
'posix' on Unix systems (including Mac OS X and Cygwin), and 'nt' on
Windows NT/2000/XP.
This function is similar to urllib.url2pathname(), but is more featureful
and produces better paths.
If the given URI reference is not relative, its scheme component must be
'file', and an exception will be raised if it isn't.
In accordance with RFC 3986, RFC 1738 and RFC 1630, an authority
component that is the string 'localhost' will be treated the same as an
empty authority.
Dot segments ('.' or '..') in the path component are NOT collapsed.
If the path component of the URI reference is relative and the
attemptAbsolute flag is True (the default), then the resulting path
will be made absolute by considering the path to be relative to the
current working directory. There is no guarantee that such a result
will be an accurate interpretation of the URI reference.
attemptAbsolute has no effect if the
result is not being produced for the local operating system.
Fragment and query components of the URI reference are ignored.
If osname is 'posix', the authority component must be empty or just
'localhost'. An exception will be raised otherwise, because there is no
standard way of interpreting other authorities. Also, if '%2F' is in a
path segment, it will be converted to r'\/' (a backslash-escaped forward
slash). The caller may need to take additional steps to prevent this from
being interpreted as if it were a path segment separator.
If osname is 'nt', a drivespec is recognized as the first occurrence of a
single letter (A-Z, case-insensitive) followed by '|' or ':', occurring as
either the first segment of the path component, or (incorrectly) as the
entire authority component. A UNC hostname is recognized as a non-empty,
non-'localhost' authority component that has not been recognized as a
drivespec, or as the second path segment if the first path segment is
empty. If a UNC hostname is detected, the result will begin with
'\\<hostname>\'. If a drivespec was detected also, the first path segment
will be '$<driveletter>$'. If a drivespec was detected but a UNC hostname
was not, then the result will begin with '<driveletter>:'.
Windows examples:
'file:x/y/z' => r'x\y\z';
'file:/x/y/z' (not recommended) => r'\x\y\z';
'file:///x/y/z' => r'\x\y\z';
'file:///c:/x/y/z' => r'C:\x\y\z';
'file:///c|/x/y/z' => r'C:\x\y\z';
'file:///c:/x:/y/z' => r'C:\x:\y\z' (bad path, valid interpretation);
'file://c:/x/y/z' (not recommended) => r'C:\x\y\z';
'file://host/share/x/y/z' => r'\\host\share\x\y\z';
'file:////host/share/x/y/z' => r'\\host\share\x\y\z'
'file://host/x:/y/z' => r'\\host\x:\y\z' (bad path, valid interp.);
'file://localhost/x/y/z' => r'\x\y\z';
'file://localhost/c:/x/y/z' => r'C:\x\y\z';
'file:///C:%5Cx%5Cy%5Cz' (not recommended) => r'C:\x\y\z'
"""
(scheme, authority, path) = split_uri_ref(uri)[0:3]
if scheme and scheme != 'file':
raise ValueError("Only a 'file' URI can be converted to an OS-specific path; "
"URI given was {uri}".format(uri=uri))
# enforce 'localhost' URI equivalence mandated by RFCs 1630, 1738, 3986
if authority == 'localhost':
authority = None
osname = osname or os.name
if osname == 'nt':
# Get the drive letter and UNC hostname, if any. Fragile!
unchost = None
driveletter = None
if authority:
authority = percent_decode(authority, encoding=encoding)
if _get_drive_letter(authority):
driveletter = authority[0]
else:
unchost = authority
if not (driveletter or unchost):
# Note that we have to treat %5C (backslash) as a path separator
# in order to catch cases like file:///C:%5Cx%5Cy%5Cz => C:\x\y\z
# We will also treat %2F (slash) as a path separator for
# compatibility.
if WINDOWS_SLASH_COMPAT:
regex = '%2[fF]|%5[cC]'
else:
regex = '%5[cC]'
path = re.sub(regex, '/', path)
segs = path.split('/')
if not segs[0]:
# //host/... => [ '', '', 'host', '...' ]
if len(segs) > 2 and not segs[1]:
unchost = percent_decode(segs[2], encoding=encoding)
path = len(segs) > 3 and '/' + '/'.join(segs[3:]) or ''
# /C:/... => [ '', 'C:', '...' ]
elif len(segs) > 1:
driveletter = _get_drive_letter(percent_decode(segs[1],
encoding=encoding))
if driveletter:
path = len(segs) > 2 and '/' + '/'.join(segs[2:]) or ''
else:
# C:/... => [ 'C:', '...' ]
driveletter = _get_drive_letter(percent_decode(segs[0],
encoding=encoding))
if driveletter:
path = len(segs) > 1 and path[2:] or ''
# Do the conversion of the path part
sep = '\\' # we could try to import from ntpath,
# but at this point it would just waste cycles.
path = percent_decode(path.replace('/', sep), encoding=encoding)
# Assemble and return the path
if unchost:
# It's a UNC path of the form \\host\share\path.
# driveletter is ignored.
path = r'%s%s%s' % (sep * 2, unchost, path)
elif driveletter:
# It's an ordinary Windows path of the form C:\x\y\z
path = r'%s:%s' % (driveletter.upper(), path)
# It's an ordinary Windows path of the form \x\y\z or x\y\z.
# We need to make sure it doesn't end up looking like a UNC
# path, so we discard extra leading backslashes
elif path[:1] == '\\':
path = re.sub(r'^\\+', '\\\\', path)
# It's a relative path. If the caller wants it absolute, attempt to comply
elif attemptAbsolute and osname == os.name:
path = os.path.join(os.getcwd(), path)
return path
elif osname == 'posix':
# a non-empty, non-'localhost' authority component is ambiguous on Unix
if authority:
raise ValueError("A URI containing a remote host name cannot be converted to a "
" path on posix; URI given was {uri}".format(uri=uri))
# %2F in a path segment would indicate a literal '/' in a
# filename, which is possible on posix, but there is no
# way to consistently represent it. We'll backslash-escape
# the literal slash and leave it to the caller to ensure it
# gets handled the way they want.
path = percent_decode(re.sub('%2[fF]', '\\/', path), encoding=encoding)
# If it's relative and the caller wants it absolute, attempt to comply
if attemptAbsolute and osname == os.name and not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
return path
else:
# 4Suite only supports posix and nt, so we're not going to worry about
# improving upon urllib.pathname2url() for other OSes.
if osname == os.name:
from urllib import url2pathname
else:
try:
module = '%surl2path' % osname
exec('from %s import url2pathname' % module, globals(), locals())
except ImportError:
raise RuntimeError("Platform {platform} not supported by URI function "
"{function}".format(platform=osname, function="uri_to_os_path"))
# drop the scheme before passing to url2pathname
if scheme:
uri = uri[len(scheme)+1:]
return url2pathname(uri)
REG_NAME_HOST_PATTERN = re.compile(r"^(?:(?:[0-9A-Za-z\-_\.!~*'();&=+$,]|(?:%[0-9A-Fa-f]{2}))*)$")
def path_resolve(paths):
"""
This function takes a list of file URIs. The first can be
absolute or relative to the URI equivalent of the current working
directory. The rest must be relative to the first.
The function converts them all to OS paths appropriate for the local
system, and then creates a single final path by resolving each path
in the list against the following one. This final path is returned
as a URI.
"""
if not paths: return paths
paths = [uri_to_os_path(p, attemptAbsolute=False) for p in paths]
if not os.path.isabs(paths[0]):
paths[0] = os.path.join(os.getcwd(), paths[0])
resolved = reduce(lambda a, b: \
basejoin(os.path.isdir(a)
and os_path_to_uri(
os.path.join(a, ''),
attemptAbsolute=False,
) or os_path_to_uri(a, attemptAbsolute=False),
os_path_to_uri(b, attemptAbsolute=False)[5:]),
paths)
return resolved
def basejoin(base, iri_ref):
"""
Merges a base URI reference with another URI reference, returning a
new URI reference.
It behaves exactly the same as absolutize(), except the arguments
are reversed, and it accepts any URI reference (even a relative URI)
as the base URI. If the base has no scheme component, it is
evaluated as if it did, and then the scheme component of the result
is removed from the result, unless the iri_ref had a scheme. Thus, if
neither argument has a scheme component, the result won't have one.
This function is named basejoin because it is very much like
urllib.basejoin(), but it follows the current RFC 3986 algorithms
for path merging, dot segment elimination, and inheritance of query
and fragment components.
WARNING: This function exists for 2 reasons: (1) because of a need
within the 4Suite repository to perform URI reference absolutization
using base URIs that are stored (inappropriately) as absolute paths
in the subjects of statements in the RDF model, and (2) because of
a similar need to interpret relative repo paths in a 4Suite product
setup.xml file as being relative to a path that can be set outside
the document. When these needs go away, this function probably will,
too, so it is not advisable to use it.
"""
if is_absolute(base):
return absolutize(iri_ref, base)
else:
dummyscheme = 'basejoin'
res = absolutize(iri_ref, '%s:%s' % (dummyscheme, base))
if is_absolute(iri_ref):
# scheme will be inherited from iri_ref
return res
else:
# no scheme in, no scheme out
return res[len(dummyscheme)+1:]
def join(*uriparts):
"""
Merges a series of URI reference parts, returning a new URI reference.
Much like iri.basejoin, but takes multiple arguments
"""
if len(uriparts) == 0:
raise TypeError("FIXME...")
elif len(uriparts) == 1:
return uriparts[0]
else:
base = uriparts[0]
for part in uriparts[1:]:
base = basejoin(base.rstrip(DEFAULT_HIERARCHICAL_SEP) + DEFAULT_HIERARCHICAL_SEP, part)
return base
#generate_iri
#Use:
#from uuid import *; newuri = uuid4().urn
#=======================================================================
#
# Further reading re: percent-encoding
#
# http://lists.w3.org/Archives/Public/ietf-http-wg/2004JulSep/0009.html
#
#=======================================================================
#
# 'file:' URI notes
#
# 'file:' URI resolution is difficult to get right, because the 'file'
# URL scheme is underspecified, and is handled by resolvers in very
# lenient and inconsistent ways.
#
# RFC 3986 provides definitive clarification on how all URIs,
# including the quirky 'file:' ones, are to be interpreted for purposes
# of resolution to absolute form, so that is what we implement to the
# best of our ability.
#
#-----------------------------------------------------------------------
#
# Notes from our previous research on 'file:' URI resolution:
#
# According to RFC 2396 (original), these are valid absolute URIs:
# file:/autoexec.bat (scheme ":" abs_path)
# file:///autoexec.bat (scheme ":" net_path)
#
# This one is valid but is not what you'd expect it to be:
#
# file://autoexec.bat (authority = autoexec.bat, no abs_path)
#
# If you have any more than 3 slashes, it's OK because each path segment
# can be an empty string.
#
# This one is valid too, although everything after 'file:' is
# considered an opaque_part (note that RFC 3986 changes this):
#
# file:etc/passwd
#
# Unescaped backslashes are NOT allowed in URIs, ever.
# It is not possible to use them as path segment separators.
# Yet... Windows Explorer will accept these:
# file:C:\WINNT\setuplog.txt
# file:/C:\WINNT\setuplog.txt
# file:///C:\WINNT\setuplog.txt
# However, it will also accept "|" in place of the colon after the drive:
# file:C|/WINNT/setuplog.txt
# file:/C|/WINNT/setuplog.txt
# file:///C|/WINNT/setuplog.txt
#
# RFC 1738 says file://localhost/ and file:/// are equivalent;
# localhost in this case is always the local machine, no matter what
# your DNS says.
#
# Basically, any file: URI is valid. Good luck resolving them, though.
#
# Jeremy's idea is to not use open() or urllib.urlopen() on Windows;
# instead, use a C function that wraps Windows' generic open function,
# which resolves any path or URI exactly as Explorer would (he thinks).
#
#-----------------------------------------------------------------------
#
# References for further research on 'file:' URI resolution:
# http://mail.python.org/pipermail/xml-sig/2001-February/004572.html
# http://mail.python.org/pipermail/xml-sig/2001-March/004791.html
# http://mail.python.org/pipermail/xml-sig/2002-August/008236.html
# http://www.perldoc.com/perl5.8.0/lib/URI/file.html
# http://lists.w3.org/Archives/Public/uri/2004Jul/0013.html
#
#=======================================================================
|
{
"content_hash": "b28214696063eb65f750fa859b91a3d4",
"timestamp": "",
"source": "github",
"line_count": 1693,
"max_line_length": 148,
"avg_line_length": 41.54459539279386,
"alnum_prop": 0.6042653017701002,
"repo_name": "uogbuji/amara3-iri",
"id": "866471b0c73f2f93666fdb97ccbb217f73144489",
"size": "70339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylib/iri.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "154671"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import User
class Fox(models.Model):
user = models.OneToOneField(User)
ip = models.GenericIPAddressField(protocol='IPv4')
def __unicode__(self):
return self.ip
|
{
"content_hash": "e9cf2344df10b03a538227d58db85e46",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 27,
"alnum_prop": 0.7160493827160493,
"repo_name": "TechnicalFox/Fox-Latch",
"id": "786832bbc85cb6d9654239c3591dfdac71c7fccb",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "site/fox/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22911"
}
],
"symlink_target": ""
}
|
from gunstar.http import RequestHandler
class IndexHandler(RequestHandler):
def get(self):
self.render_template('index.html')
class OtherHandler(RequestHandler):
def get(self, name):
self.render_template('index.html', name=name)
|
{
"content_hash": "71f3e184bf911bde97a8adb6c820d496",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 20.53846153846154,
"alnum_prop": 0.6853932584269663,
"repo_name": "allisson/gunstar",
"id": "bbc22e62fc74c5fe8c4d90fc2a579af51f6da6b0",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/hello_world/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63482"
},
{
"name": "Shell",
"bytes": "6703"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('flisol_event', '0007_auto_20150110_1133'),
]
operations = [
migrations.AlterField(
model_name='flisolevent',
name='slug',
field=autoslug.fields.AutoSlugField(unique=True, editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name='flisolinstance',
name='email_contact',
field=models.EmailField(help_text='please ask for a mail list', max_length=75, verbose_name='email contact'),
preserve_default=True,
),
migrations.AlterField(
model_name='flisolinstance',
name='slug',
field=autoslug.fields.AutoSlugField(unique=True, editable=False),
preserve_default=True,
),
]
|
{
"content_hash": "e2caeae4a33a9916ffb9fc688db3a51e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 121,
"avg_line_length": 30.1875,
"alnum_prop": 0.6004140786749482,
"repo_name": "ikks/flisol-connect",
"id": "8f61646ee350ff8f145e4aaf50f879d5214b1a97",
"size": "990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flisol_event/migrations/0008_auto_20150112_2155.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "663"
},
{
"name": "JavaScript",
"bytes": "7742"
},
{
"name": "Python",
"bytes": "81440"
}
],
"symlink_target": ""
}
|
""" This is just a very basic module to find a specific prime number. This is my first python program. Everything I do and plan to do is CC-BY-SA. Thanks for stopping by. Alpha32
"""
how_high = int(input("Enter the range: "))
nth_prime = int(input("Enter the prime to find: "))
def find_a_prime(nth_prime, how_high):
primes = [1,3,5,7]
""" I did this because I ran into issues getting the single-digit primes to show up.
If you have a better solution, let me know.
"""
for i in range(10, how_high):
if i%2==0:
continue
elif i%3==0:
continue
elif i%5==0:
continue
else:
primes.append(i)
print(primes[nth_prime])
if __name__ == '__main__':
find_a_prime(nth_prime, how_high)
|
{
"content_hash": "a8ebabbd6090b82823f213223165a8c5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 178,
"avg_line_length": 26.14814814814815,
"alnum_prop": 0.6628895184135978,
"repo_name": "amneher/python_stuff",
"id": "beac28f90b379562dc0c8aa9f8c32633f0640a17",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "find_a_prime/find_a_prime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3583"
}
],
"symlink_target": ""
}
|
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile, ast
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename) as f:
return f.read(), filename
# Use sys.stdout encoding for ouput.
_encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(_encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
if not self.buf:
# Reset it to an empty string, to make sure it's not unicode.
self.buf = ''
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _strip_exception_details(msg):
# Support for IGNORE_EXCEPTION_DETAIL.
# Get rid of everything except the exception name; in particular, drop
# the possibly dotted module path (if any) and the exception message (if
# any). We assume that a colon is never part of a dotted name, or of an
# exception name.
# E.g., given
# "foo.bar.MyError: la di da"
# return "MyError"
# Or for "abc.def" or "abc.def:\n" return "def".
start, end = 0, len(msg)
# The exception name must appear on the first line.
i = msg.find("\n")
if i >= 0:
end = i
# retain up to the first colon (if any)
i = msg.find(':', 0, end)
if i >= 0:
end = i
# retain just the exception name
i = msg.rfind('.', 0, end)
if i >= 0:
start = i+1
return msg[start: end]
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.source == other.source and \
self.want == other.want and \
self.lineno == other.lineno and \
self.indent == other.indent and \
self.options == other.options and \
self.exc_msg == other.exc_msg
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.source, self.want, self.lineno, self.indent,
self.exc_msg))
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.examples == other.examples and \
self.docstring == other.docstring and \
self.globs == other.globs and \
self.name == other.name and \
self.filename == other.filename and \
self.lineno == other.lineno
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.docstring, self.name, self.filename, self.lineno))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
# If want can be `eval`ed compare the result of the last
# expression with want, otherwise compare the want and
# got strings.
try:
# If the last node in the source is not an expression
# `_` will be set to an arbitrary value.
tree = ast.parse(example.source)
if not isinstance(tree.body[-1], ast.Expr):
raise Exception # Fallback
actual = test.globs['__builtins__'].get('_')
expected = eval(example.want, test.globs)
except:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
else:
if expected == actual:
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
if check(_strip_exception_details(example.exc_msg),
_strip_exception_details(exc_msg),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
source = example.source
if isinstance(source, unicode):
source = source.encode('ascii', 'backslashreplace')
return source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print "*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._dt_test == other._dt_test and \
self._dt_optionflags == other._dt_optionflags and \
self._dt_setUp == other._dt_setUp and \
self._dt_tearDown == other._dt_tearDown and \
self._dt_checker == other._dt_checker
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
self._dt_checker))
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % self.module.__name__
__str__ = shortDescription
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
suite.addTest(SkipDocTestCase(module))
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
# It is probably a bug that this exception is not also raised if the
# number of doctest examples in tests is zero (i.e. if no doctest
# examples were found). However, we should probably not be raising
# an exception at all here, though it is too late to make this change
# for a maintenance release. See also issue #14649.
raise ValueError(module, "has no docstrings")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
"eval-ed expectation": r"""
If the expected value is `eval`able, then it will be
compared to the result with `==`.
>>> {'any', 'order', 'works'}
{'any', 'order', 'works'}
>>> {'order', 'any', 'works'}
{'any', 'order', 'works'}
>>> {'a': 1, 'b': 2}
{'a': 1, 'b': 2}
>>> {'a': 1, 'b': 2}
{'b': 2, 'a': 1}
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if not testfiles:
name = os.path.basename(sys.argv[0])
if '__loader__' in globals(): # python -m
name, _ = os.path.splitext(name)
print("usage: {0} [-v] file ...".format(name))
return 2
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
|
{
"content_hash": "e619d15b8bdbfbeaf4aaa5a9ded6b6e1",
"timestamp": "",
"source": "github",
"line_count": 2841,
"max_line_length": 79,
"avg_line_length": 37.404787046814505,
"alnum_prop": 0.5683137756782444,
"repo_name": "mrgriffin/doctest-eval",
"id": "17b498d7c181d7af264baa4e609cffe1f1b0cc17",
"size": "106514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doctest.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "106514"
}
],
"symlink_target": ""
}
|
from bson.objectid import ObjectId
from datetime import datetime
from scraper.repository import Repository, Property, Collection
STATUS_CREATED = "created"
STATUS_EXECUTING = "executing"
STATUS_FINISHED = "finished"
class Job(Collection, Repository):
__collection__ = 'job'
_id = Property(ObjectId, "job id")
global_id = Property(unicode, "global id")
status = Property(unicode, "job status")
started_at = Property(datetime, "updated time")
completed_at = Property(datetime, "updated time")
|
{
"content_hash": "ab1af9a5fd384ffa52033c0c6e7bda47",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 28.88888888888889,
"alnum_prop": 0.7230769230769231,
"repo_name": "victorpantoja/webscraper",
"id": "ded33489300e3821524185a821190c12fb9ce651",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webscraper/models/job.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24628"
},
{
"name": "JavaScript",
"bytes": "1356"
},
{
"name": "Python",
"bytes": "20100"
}
],
"symlink_target": ""
}
|
from flask import current_app, redirect, request, session, url_for
from flask.views import MethodView
from oauth2client.client import OAuth2WebServerFlow
from freight.config import db
from freight.constants import PYTHON_VERSION
from freight.models import User
GOOGLE_AUTH_URI = "https://accounts.google.com/o/oauth2/auth"
GOOGLE_REVOKE_URI = "https://accounts.google.com/o/oauth2/revoke"
GOOGLE_TOKEN_URI = "https://accounts.google.com/o/oauth2/token"
def get_auth_flow(redirect_uri=None):
# XXX(dcramer): we have to generate this each request because oauth2client
# doesn't want you to set redirect_uri as part of the request, which causes
# a lot of runtime issues.
auth_uri = GOOGLE_AUTH_URI
if current_app.config["GOOGLE_DOMAIN"]:
auth_uri = auth_uri + "?hd=" + current_app.config["GOOGLE_DOMAIN"]
return OAuth2WebServerFlow(
client_id=current_app.config["GOOGLE_CLIENT_ID"],
client_secret=current_app.config["GOOGLE_CLIENT_SECRET"],
scope="https://www.googleapis.com/auth/userinfo.email",
redirect_uri=redirect_uri,
user_agent=f"freight (python {PYTHON_VERSION})",
auth_uri=auth_uri,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
)
class LoginView(MethodView):
def __init__(self, authorized_url):
self.authorized_url = authorized_url
super().__init__()
def get(self):
redirect_uri = url_for(self.authorized_url, _external=True)
flow = get_auth_flow(redirect_uri=redirect_uri)
auth_uri = flow.step1_get_authorize_url()
return redirect(auth_uri)
class AuthorizedView(MethodView):
def __init__(self, complete_url, authorized_url):
self.complete_url = complete_url
self.authorized_url = authorized_url
super().__init__()
def get(self):
redirect_uri = url_for(self.authorized_url, _external=True)
flow = get_auth_flow(redirect_uri=redirect_uri)
resp = flow.step2_exchange(request.args["code"])
if current_app.config["GOOGLE_DOMAIN"]:
if resp.id_token.get("hd") != current_app.config["GOOGLE_DOMAIN"]:
# TODO(dcramer): this should show some kind of error
return redirect(url_for(self.complete_url))
user = User.query.filter(User.name == resp.id_token["email"]).first()
if user is None:
user = User(name=resp.id_token["email"])
db.session.add(user)
db.session.flush()
session["uid"] = user.id
session["access_token"] = resp.access_token
session["email"] = resp.id_token["email"]
return redirect(url_for(self.complete_url))
class LogoutView(MethodView):
def __init__(self, complete_url):
self.complete_url = complete_url
super().__init__()
def get(self):
session.pop("uid", None)
session.pop("access_token", None)
session.pop("email", None)
return redirect(url_for(self.complete_url))
|
{
"content_hash": "7086e594245b838bc2d858ba5e1b6485",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 35.916666666666664,
"alnum_prop": 0.6476632416307591,
"repo_name": "getsentry/freight",
"id": "468159446081847a85add0e92c7d3baade962a42",
"size": "3017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freight/web/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3875"
},
{
"name": "HTML",
"bytes": "243"
},
{
"name": "JavaScript",
"bytes": "63952"
},
{
"name": "Less",
"bytes": "15455"
},
{
"name": "Makefile",
"bytes": "749"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "274562"
},
{
"name": "Ruby",
"bytes": "4941"
},
{
"name": "Shell",
"bytes": "864"
}
],
"symlink_target": ""
}
|
import argparse
import sys
from ros_buildfarm.argument import add_argument_source_dir
from ros_buildfarm.common import Scope
from ros_buildfarm.sourcedeb_job import build_sourcedeb
def main(argv=sys.argv[1:]):
with Scope('SUBSECTION', 'build sourcedeb'):
parser = argparse.ArgumentParser(
description='Build package sourcedeb')
add_argument_source_dir(parser)
args = parser.parse_args(argv)
return build_sourcedeb(args.source_dir)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "d4d917321e018324ff52cd3d3c736b88",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 58,
"avg_line_length": 26.65,
"alnum_prop": 0.6923076923076923,
"repo_name": "mani-monaj/ros_buildfarm",
"id": "c3d8210ef931cb4cae3e59dc92eebd6b98f222d8",
"size": "557",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/release/build_sourcedeb.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4148"
},
{
"name": "EmberScript",
"bytes": "175019"
},
{
"name": "JavaScript",
"bytes": "10890"
},
{
"name": "Python",
"bytes": "348910"
},
{
"name": "Shell",
"bytes": "5932"
}
],
"symlink_target": ""
}
|
import datetime
from django.contrib import messages
from django.core.mail import BadHeaderError
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse_lazy
from django.utils import timezone
from django.views.generic import DetailView, ListView, FormView
from pydotorg.mixins import LoginRequiredMixin
from .models import Calendar, Event, EventCategory, EventLocation
from .forms import EventForm
class CalendarList(ListView):
model = Calendar
class EventListBase(ListView):
model = Event
paginate_by = 6
def get_object(self, queryset=None):
return None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
featured_events = self.get_queryset().filter(featured=True)
try:
context['featured'] = featured_events[0]
except IndexError:
pass
context['event_categories'] = EventCategory.objects.all()[:10]
context['event_locations'] = EventLocation.objects.all()[:10]
context['object'] = self.get_object()
return context
class EventHomepage(ListView):
""" Main Event Landing Page """
template_name = 'events/event_list.html'
def get_queryset(self):
return Event.objects.for_datetime(timezone.now()).order_by('occurring_rule__dt_start')
class EventDetail(DetailView):
model = Event
def get_queryset(self):
return super().get_queryset().select_related()
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
if data['object'].next_time:
dt = data['object'].next_time.dt_start
data.update({
'next_7': dt + datetime.timedelta(days=7),
'next_30': dt + datetime.timedelta(days=30),
'next_90': dt + datetime.timedelta(days=90),
'next_365': dt + datetime.timedelta(days=365),
})
return data
class EventList(EventListBase):
def get_queryset(self):
return Event.objects.for_datetime(timezone.now()).filter(calendar__slug=self.kwargs['calendar_slug']).order_by('occurring_rule__dt_start')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['events_today'] = Event.objects.until_datetime(timezone.now()).filter(calendar__slug=self.kwargs['calendar_slug'])[:2]
context['calendar'] = get_object_or_404(Calendar, slug=self.kwargs['calendar_slug'])
return context
class PastEventList(EventList):
template_name = 'events/event_list_past.html'
def get_queryset(self):
return Event.objects.until_datetime(timezone.now()).filter(calendar__slug=self.kwargs['calendar_slug'])
class EventListByDate(EventList):
def get_object(self):
year = int(self.kwargs['year'])
month = int(self.kwargs['month'])
day = int(self.kwargs['day'])
return datetime.date(year, month, day)
def get_queryset(self):
return Event.objects.for_datetime(self.get_object()).filter(calendar__slug=self.kwargs['calendar_slug'])
class EventListByCategory(EventList):
def get_object(self, queryset=None):
return get_object_or_404(EventCategory, calendar__slug=self.kwargs['calendar_slug'], slug=self.kwargs['slug'])
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(categories__slug=self.kwargs['slug'])
class EventListByLocation(EventList):
def get_object(self, queryset=None):
return get_object_or_404(EventLocation, calendar__slug=self.kwargs['calendar_slug'], pk=self.kwargs['pk'])
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(venue__pk=self.kwargs['pk'])
class EventCategoryList(ListView):
model = EventCategory
paginate_by = 30
def get_queryset(self):
return self.model.objects.filter(calendar__slug=self.kwargs['calendar_slug'])
def get_context_data(self, **kwargs):
kwargs['event_categories'] = self.get_queryset()[:10]
return super().get_context_data(**kwargs)
class EventLocationList(ListView):
model = EventLocation
paginate_by = 30
def get_queryset(self):
return self.model.objects.filter(calendar__slug=self.kwargs['calendar_slug'])
class EventSubmit(LoginRequiredMixin, FormView):
template_name = 'events/event_form.html'
form_class = EventForm
success_url = reverse_lazy('events:event_thanks')
def form_valid(self, form):
try:
form.send_email(self.request.user)
except BadHeaderError:
messages.add_message(self.request, messages.ERROR, 'Invalid header found.')
return redirect('events:event_submit')
return super().form_valid(form)
|
{
"content_hash": "cf131c40e47ae1440c672220414cf233",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 146,
"avg_line_length": 32.38513513513514,
"alnum_prop": 0.6645107448362195,
"repo_name": "manhhomienbienthuy/pythondotorg",
"id": "2490626e3d97e7e1e6c28e616f6a10eb658160b4",
"size": "4793",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "events/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "HTML",
"bytes": "491673"
},
{
"name": "JavaScript",
"bytes": "20834"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1075699"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "197973"
}
],
"symlink_target": ""
}
|
"""TensorSpec factory for ragged tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops.ragged import ragged_tensor
def ragged_tensor_spec(shape=None, dtype=dtypes.float32,
ragged_rank=None, row_splits_dtype=dtypes.int64,
name=None):
"""Returns a tensor specification for a RaggedTensor.
Returns an object which can be passed to `tf.function` (or other
functions that expect `TensorSpec`s) to specify shape constraints
for a `RaggedTensor` argument.
Args:
shape: The shape of the RaggedTensor, or `None` to allow any shape.
dtype: Data type of values in the RaggedTensor.
ragged_rank: Python integer, the ragged rank of the RaggedTensor
to be described. Defaults to `shape.ndims - 1`.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor.
One of `tf.int32` or `tf.int64`.
name: Optional name prefix for the `TensorSpec`s.
Returns:
An object describing the `flat_values` and `nested_row_splits` tensors
that comprise the `RaggedTensor`.
"""
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.TensorShape(shape)
if ragged_rank is None:
if shape.ndims is None:
raise ValueError("Must specify ragged_rank or a shape with known rank.")
ragged_rank = shape.ndims - 1
elif not isinstance(ragged_rank, int):
raise TypeError("ragged_rank must be an int")
if ragged_rank == 0:
return tensor_spec.TensorSpec(shape=shape, dtype=dtype, name=name)
result = tensor_spec.TensorSpec(
tensor_shape.TensorShape([None]).concatenate(shape[ragged_rank + 1:]),
dtype, name)
for i in range(ragged_rank - 1, 0, -1):
splits = tensor_spec.TensorSpec(
[None], row_splits_dtype,
"%s.row_splits_%d" % (name, i) if name else None)
result = ragged_tensor.RaggedTensor.from_row_splits(result, splits)
outer_dim = tensor_shape.dimension_at_index(shape, 0)
splits_shape = [None if outer_dim is None else outer_dim + 1]
splits = tensor_spec.TensorSpec(
splits_shape, row_splits_dtype,
"%s.row_splits_0" % name if name else None)
result = ragged_tensor.RaggedTensor.from_row_splits(result, splits)
return result
|
{
"content_hash": "259455a88d45fdc70fb64f8dc8824f72",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 38.77777777777778,
"alnum_prop": 0.7028243962341384,
"repo_name": "ghchinoy/tensorflow",
"id": "9da282cea3c99af4f05fb6926e091f4c9ec7bdca",
"size": "3132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/ragged/ragged_tensor_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
}
|
import sys, os
# Append the top level directory of the docs, so we can import from the config dir.
sys.path.insert(0, os.path.abspath('..'))
# Pull in all the configuration options defined in the global config file..
from config.all import *
language = 'en'
|
{
"content_hash": "073863d4cb059d64974c322996805d48",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 83,
"avg_line_length": 29,
"alnum_prop": 0.7318007662835249,
"repo_name": "cakephp/chronos",
"id": "f638bda2257a926dc478e25b869358c894214189",
"size": "261",
"binary": false,
"copies": "6",
"ref": "refs/heads/2.x",
"path": "docs/en/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1017"
},
{
"name": "PHP",
"bytes": "445425"
}
],
"symlink_target": ""
}
|
import sys
class Message(object):
def __init__(self):
self.classname = self.__class__.__name__
self.errorcolor = "\033[31m"#red
self.infocolor = "\033[34m"#blue
self.warningcolor = "\033[33m"#yellow
self.successcolor = "\033[32m"#green
self.endcolor = "\033[0m"#reset
def error(self,message, functionname = ""):
printstring = ""
if functionname == "":
printstring = "\n"+self.errorcolor+"*** Error ["+self.classname+"]: "+message+" ***\n"+self.endcolor
else:
printstring = "\n"+self.errorcolor+"*** Error ["+self.classname+"::"+functionname+"]: "+message+" ***\n"+self.endcolor
sys.exit(printstring)
def info(self,message,newline=True):
printstring = self.infocolor+"["+self.classname+"]: "+message+self.endcolor
if newline:
print(printstring)
else:
print(self.infocolor+message+self.endcolor)
sys.stdout.flush()
def warning(self,message,functionname = ""):
printstring = ""
if functionname == "":
printstring = self.warningcolor+"["+self.classname+"] Warning: "+message+self.endcolor
else:
printstring = self.warningcolor+"["+self.classname+"::"+functionname+"] Warning: "+message+self.endcolor
print(printstring)
def success(self,message):
printstring = self.successcolor+"["+self.classname+"]: "+message+self.endcolor
print(printstring)
|
{
"content_hash": "7951b4670976e0f9add0c570cf2aa2d0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 130,
"avg_line_length": 39.61538461538461,
"alnum_prop": 0.574757281553398,
"repo_name": "gammapy/enrico",
"id": "c7d2630ea1e0477fe3793279895e814e08d84c83",
"size": "1545",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "enrico/Loggin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "380955"
},
{
"name": "Shell",
"bytes": "8497"
}
],
"symlink_target": ""
}
|
from gi.repository import Gdk
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
#from FlatCAMApp import *
from FlatCAM_GTK import FlatCAMApp
class PlotCanvas:
"""
Class handling the plotting area in the application.
"""
def __init__(self, container):
"""
The constructor configures the Matplotlib figure that
will contain all plots, creates the base axes and connects
events to the plotting area.
:param container: The parent container in which to draw plots.
:rtype: PlotCanvas
"""
# Options
self.x_margin = 15 # pixels
self.y_margin = 25 # Pixels
# Parent container
self.container = container
# Plots go onto a single matplotlib.figure
self.figure = Figure(dpi=50) # TODO: dpi needed?
self.figure.patch.set_visible(False)
# These axes show the ticks and grid. No plotting done here.
# New axes must have a label, otherwise mpl returns an existing one.
self.axes = self.figure.add_axes([0.05, 0.05, 0.9, 0.9], label="base", alpha=0.0)
self.axes.set_aspect(1)
self.axes.grid(True)
# The canvas is the top level container (Gtk.DrawingArea)
self.canvas = FigureCanvas(self.figure)
self.canvas.set_hexpand(1)
self.canvas.set_vexpand(1)
self.canvas.set_can_focus(True) # For key press
# Attach to parent
self.container.attach(self.canvas, 0, 0, 600, 400) # TODO: Height and width are num. columns??
# Events
self.canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
self.canvas.connect('configure-event', self.auto_adjust_axes)
self.canvas.add_events(Gdk.EventMask.SMOOTH_SCROLL_MASK)
self.canvas.connect("scroll-event", self.on_scroll)
self.canvas.mpl_connect('key_press_event', self.on_key_down)
self.canvas.mpl_connect('key_release_event', self.on_key_up)
self.mouse = [0, 0]
self.key = None
def on_key_down(self, event):
"""
:param event:
:return:
"""
self.key = event.key
def on_key_up(self, event):
"""
:param event:
:return:
"""
self.key = None
def mpl_connect(self, event_name, callback):
"""
Attach an event handler to the canvas through the Matplotlib interface.
:param event_name: Name of the event
:type event_name: str
:param callback: Function to call
:type callback: func
:return: Connection id
:rtype: int
"""
return self.canvas.mpl_connect(event_name, callback)
def mpl_disconnect(self, cid):
"""
Disconnect callback with the give id.
:param cid: Callback id.
:return: None
"""
self.canvas.mpl_disconnect(cid)
def connect(self, event_name, callback):
"""
Attach an event handler to the canvas through the native GTK interface.
:param event_name: Name of the event
:type event_name: str
:param callback: Function to call
:type callback: function
:return: Nothing
"""
self.canvas.connect(event_name, callback)
def clear(self):
"""
Clears axes and figure.
:return: None
"""
# Clear
self.axes.cla()
try:
self.figure.clf()
except KeyError:
FlatCAMApp.App.log.warning("KeyError in MPL figure.clf()")
# Re-build
self.figure.add_axes(self.axes)
self.axes.set_aspect(1)
self.axes.grid(True)
# Re-draw
self.canvas.queue_draw()
def adjust_axes(self, xmin, ymin, xmax, ymax):
"""
Adjusts all axes while maintaining the use of the whole canvas
and an aspect ratio to 1:1 between x and y axes. The parameters are an original
request that will be modified to fit these restrictions.
:param xmin: Requested minimum value for the X axis.
:type xmin: float
:param ymin: Requested minimum value for the Y axis.
:type ymin: float
:param xmax: Requested maximum value for the X axis.
:type xmax: float
:param ymax: Requested maximum value for the Y axis.
:type ymax: float
:return: None
"""
FlatCAMApp.App.log.debug("PC.adjust_axes()")
width = xmax - xmin
height = ymax - ymin
try:
r = width / height
except ZeroDivisionError:
FlatCAMApp.App.log.error("Height is %f" % height)
return
canvas_w, canvas_h = self.canvas.get_width_height()
canvas_r = float(canvas_w) / canvas_h
x_ratio = float(self.x_margin) / canvas_w
y_ratio = float(self.y_margin) / canvas_h
if r > canvas_r:
ycenter = (ymin + ymax) / 2.0
newheight = height * r / canvas_r
ymin = ycenter - newheight / 2.0
ymax = ycenter + newheight / 2.0
else:
xcenter = (xmax + xmin) / 2.0
newwidth = width * canvas_r / r
xmin = xcenter - newwidth / 2.0
xmax = xcenter + newwidth / 2.0
# Adjust axes
for ax in self.figure.get_axes():
if ax._label != 'base':
ax.set_frame_on(False) # No frame
ax.set_xticks([]) # No tick
ax.set_yticks([]) # No ticks
ax.patch.set_visible(False) # No background
ax.set_aspect(1)
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
ax.set_position([x_ratio, y_ratio, 1 - 2 * x_ratio, 1 - 2 * y_ratio])
# Re-draw
self.canvas.queue_draw()
def auto_adjust_axes(self, *args):
"""
Calls ``adjust_axes()`` using the extents of the base axes.
:rtype : None
:return: None
"""
xmin, xmax = self.axes.get_xlim()
ymin, ymax = self.axes.get_ylim()
self.adjust_axes(xmin, ymin, xmax, ymax)
def zoom(self, factor, center=None):
"""
Zooms the plot by factor around a given
center point. Takes care of re-drawing.
:param factor: Number by which to scale the plot.
:type factor: float
:param center: Coordinates [x, y] of the point around which to scale the plot.
:type center: list
:return: None
"""
xmin, xmax = self.axes.get_xlim()
ymin, ymax = self.axes.get_ylim()
width = xmax - xmin
height = ymax - ymin
if center is None or center == [None, None]:
center = [(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]
# For keeping the point at the pointer location
relx = (xmax - center[0]) / width
rely = (ymax - center[1]) / height
new_width = width / factor
new_height = height / factor
xmin = center[0] - new_width * (1 - relx)
xmax = center[0] + new_width * relx
ymin = center[1] - new_height * (1 - rely)
ymax = center[1] + new_height * rely
# Adjust axes
for ax in self.figure.get_axes():
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
# Re-draw
self.canvas.queue_draw()
def pan(self, x, y):
xmin, xmax = self.axes.get_xlim()
ymin, ymax = self.axes.get_ylim()
width = xmax - xmin
height = ymax - ymin
# Adjust axes
for ax in self.figure.get_axes():
ax.set_xlim((xmin + x*width, xmax + x*width))
ax.set_ylim((ymin + y*height, ymax + y*height))
# Re-draw
self.canvas.queue_draw()
def new_axes(self, name):
"""
Creates and returns an Axes object attached to this object's Figure.
:param name: Unique label for the axes.
:return: Axes attached to the figure.
:rtype: Axes
"""
return self.figure.add_axes([0.05, 0.05, 0.9, 0.9], label=name)
def on_scroll(self, canvas, event):
"""
Scroll event handler.
:param canvas: The widget generating the event. Ignored.
:param event: Event object containing the event information.
:return: None
"""
# So it can receive key presses
self.canvas.grab_focus()
# Event info
z, direction = event.get_scroll_direction()
if self.key is None:
if direction is Gdk.ScrollDirection.UP:
self.zoom(1.5, self.mouse)
else:
self.zoom(1/1.5, self.mouse)
return
if self.key == 'shift':
if direction is Gdk.ScrollDirection.UP:
self.pan(0.3, 0)
else:
self.pan(-0.3, 0)
return
if self.key == 'ctrl+control':
if direction is Gdk.ScrollDirection.UP:
self.pan(0, 0.3)
else:
self.pan(0, -0.3)
return
def on_mouse_move(self, event):
"""
Mouse movement event hadler. Stores the coordinates.
:param event: Contains information about the event.
:return: None
"""
self.mouse = [event.xdata, event.ydata]
|
{
"content_hash": "be7a6dc488dc189c1ee1e520f9cfbcf5",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 103,
"avg_line_length": 30.31290322580645,
"alnum_prop": 0.5560285197403426,
"repo_name": "silasb/flatcam",
"id": "2096c11c1c0ac695a1580afb077485d05f7be96a",
"size": "9825",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "FlatCAM_GTK/PlotCanvas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12363"
},
{
"name": "JavaScript",
"bytes": "54735"
},
{
"name": "Python",
"bytes": "462732"
},
{
"name": "Shell",
"bytes": "7305"
}
],
"symlink_target": ""
}
|
"""Utility functions for managing customer supplied encryption keys."""
import base64
import json
from googlecloudsdk.calliope import exceptions
EXPECTED_RECORD_KEY_KEYS = set(['uri', 'key'])
BASE64_KEY_LENGTH_IN_CHARS = 44
class MissingCsekKeyException(exceptions.ToolException):
def __init__(self, resource):
super(MissingCsekKeyException, self).__init__(
'Key required for resource [{0}], but none found.'.format(resource))
class InvalidKeyFileException(exceptions.ToolException):
"""There's a problem in a CSEK file."""
def __init__(self, base_message):
super(InvalidKeyFileException, self).__init__(
'{0}'.format(base_message))
# TODO(user) Update this message to include
# a lint to friendly documentation.
class BadPatternException(InvalidKeyFileException):
"""A (e.g.) url pattern is bad bad and why."""
def __init__(self, pattern_type, pattern):
self.pattern_type = pattern_type
self.pattern = pattern
super(BadPatternException, self).__init__(
'Invalid value for [{0}] pattern: [{1}]'.format(
self.pattern_type,
self.pattern))
class InvalidKeyException(InvalidKeyFileException):
"""Indicate that a particular key is bad, why, and where."""
def __init__(self, key, key_id, issue):
self.key = key
self.key_id = key_id
self.issue = issue
super(InvalidKeyException, self).__init__(
'Invalid key, [{0}], for [{1}]: {2}'.format(
self.key,
self.key_id,
self.issue))
def ValidateKey(base64_encoded_string, key_for):
"""ValidateKey(s, k) returns None or raises InvalidKeyException."""
if len(base64_encoded_string) != 44:
raise InvalidKeyException(
base64_encoded_string, key_for,
'Key should contain {0} characters (including padding), '
'but is [{1}] characters long.'.format(
BASE64_KEY_LENGTH_IN_CHARS,
len(base64_encoded_string)))
if base64_encoded_string[-1] != '=':
raise InvalidKeyException(
base64_encoded_string, key_for,
'Bad padding. Keys should end with an \'=\' character.')
try:
base64.standard_b64decode(base64_encoded_string)
except TypeError as t:
raise InvalidKeyException(
base64_encoded_string, key_for,
'Key is not valid base64: [{0}].'.format(t.message))
def AddCsekKeyArgs(parser, flags_about_creation=True):
"""Adds arguments related to csek keys."""
# TODO(b/20883005)
# We're temporarily disabling CSEK to allow cl/92889254 to land without
# breaking our tests.
return
# pylint: disable=unreachable
csek_key_file = parser.add_argument(
'--csek-key-file',
help=('Path to a csek key file'),
metavar='FILE')
csek_key_file.detailed_help = (
'Path to a csek key file, mapping GCE resources to user managed '
'keys to be used when creating, mounting, or snapshotting disks. ')
# TODO(user)
# Argument - indicates the key file should be read from stdin.'
if flags_about_creation:
no_require_csek_key_create = parser.add_argument(
'--no-require-csek-key-create',
help=('Allow creating of resources not protected by csek key.'),
action='store_true')
no_require_csek_key_create.detailed_help = (
'When invoked with --csek-key-file gcloud will refuse to create '
'resources not protected by a user managed key in the key file. This '
'is intended to prevent incorrect gcloud invocations from accidentally '
'creating resources with no user managed key. This flag disables the '
'check and allows creation of resources without csek keys.')
# TODO(b/20883005) remove:
# pylint: enable=unreachable
class UriPattern(object):
"""A uri-based pattern that maybe be matched against resource objects."""
def __init__(self, path_as_string):
if not path_as_string.startswith('http'):
raise BadPatternException('uri', path_as_string)
self._path_as_string = path_as_string
def Matches(self, resource):
"""Tests if its argument matches the pattern."""
return self._path_as_string == resource.SelfLink()
def __str__(self):
return 'Uri Pattern: ' + self._path_as_string
class CsekKeyStore(object):
"""Represents a map from resource patterns to keys."""
# Members
# self._state: dictionary from UriPattern to a valid, base64-encoded key
@staticmethod
def FromFile(fname):
"""FromFile loads a CsekKeyStore from a file.
Args:
fname: str, the name of a file intended to contain a well-formed key file
Returns:
A MaterKeyStore, if found
Raises:
exceptions.BadFileException: there's a problem reading fname
exceptions.InvalidKeyFileException: the key file failed to parse
or was otherwise invalid
"""
with open(fname) as infile:
content = infile.read()
return CsekKeyStore(content)
@staticmethod
def FromArgs(args):
"""FromFile attempts to load a CsekKeyStore from a command's args.
Args:
args: CLI args with a csek_key_file field set
Returns:
A CsekKeyStore, if a valid key file name is provided as csek_key_file
None, if args.csek_key_file is None
Raises:
exceptions.BadFileException: there's a problem reading fname
exceptions.InvalidKeyFileException: the key file failed to parse
or was otherwise invalid
"""
assert hasattr(args, 'csek_key_file')
if args.csek_key_file is None:
return None
return CsekKeyStore.FromFile(args.csek_key_file)
@staticmethod
def _ParseAndValidate(s):
"""_ParseAndValidate(s) inteprets s as a csek key file.
Args:
s: str, an input to parse
Returns:
a valid state object
Raises:
InvalidKeyFileException: if the input doesn't parse or is not well-formed.
"""
assert type(s) is str
state = {}
try:
records = json.loads(s)
if type(records) is not list:
raise InvalidKeyFileException(
"Key file's top-level element must be a JSON list.")
for key_record in records:
if type(key_record) is not dict:
raise InvalidKeyFileException(
'Key file records must be JSON objects, but [{0}] found.'.format(
json.dumps(key_record)))
if set(key_record.keys()) != EXPECTED_RECORD_KEY_KEYS:
raise InvalidKeyFileException(
'Record [{0}] has incorrect json keys; [{1}] expected'.format(
json.dumps(key_record),
','.join(EXPECTED_RECORD_KEY_KEYS)))
pattern = UriPattern(key_record['uri'])
ValidateKey(key_record['key'], pattern)
state[pattern] = key_record['key']
except ValueError:
raise InvalidKeyFileException.FromCurrent()
assert type(state) is dict
return state
def __len__(self):
return len(self.state)
def LookupKey(self, resource, raise_if_missing=False):
"""Search for the unique key corresponding to a given resource.
Args:
resource: the resource to find a key for.
raise_if_missing: bool, raise an exception if the resource is not found.
Returns:
The base64 encoded string corresponding to the resource,
or none if not found and not raise_if_missing.
Raises:
InvalidKeyFileException: if there are two records matching the resource.
MissingCsekKeyException: if raise_if_missing and no key is found
for the provided resoure.
"""
assert type(self.state) is dict
search_state = (None, None)
for pat, key in self.state.iteritems():
if pat.Matches(resource):
# TODO(user) what's the best thing to do if there are multiple
# matches?
if search_state[0]:
raise exceptions.InvalidKeyFileException(
'Uri patterns [{0}] and [{1}] both match '
'resource [{2}]. Bailing out.'.format(
search_state[0], pat, str(resource)))
search_state = (pat, key)
if raise_if_missing and (search_state[1] is None):
raise MissingCsekKeyException(resource)
return search_state[1]
def __init__(self, json_string):
self.state = CsekKeyStore._ParseAndValidate(json_string)
def MaybeLookupKey(csek_keys_or_none, resource):
if csek_keys_or_none and resource:
return csek_keys_or_none.LookupKey(resource)
return None
def MaybeLookupKeys(csek_keys_or_none, resources):
return [MaybeLookupKey(csek_keys_or_none, r) for r in resources]
def MaybeLookupKeysByUri(csek_keys_or_none, parser, uris):
return MaybeLookupKeys(
csek_keys_or_none,
[(parser.Parse(u) if u else None) for u in uris])
|
{
"content_hash": "7f58123a8267111af1420c655dba68cb",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 80,
"avg_line_length": 30.49824561403509,
"alnum_prop": 0.6585365853658537,
"repo_name": "wemanuel/smry",
"id": "45ce64edac8479cd76816b1ab1c096b289424987",
"size": "8742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/lib/csek_utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
import matplotlib as mpl
mpl.use('Agg') # required to prevent DISPLAY error; must be before pyplot (REF 050)
import matplotlib.pyplot as plt
import numpy as np
class PlotTestCase(object):
'''Adatped from seaborn.'''
def setUp(self):
np.random.seed(33)
def tearDown(self):
plt.close('all')
|
{
"content_hash": "501d695ee1872f8f42b995d8cbd6c7b3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 127,
"avg_line_length": 28,
"alnum_prop": 0.5989010989010989,
"repo_name": "par2/lamana",
"id": "3586331909a4517a66040042bc3c67e40d945141",
"size": "365",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "lamana/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "546664"
}
],
"symlink_target": ""
}
|
import numpy as np
def init_random_normal(lower, upper, n_points, mean=None, std=None, rng=None):
"""
Returns as initial design N data points sampled from a normal
distribution.
Parameters
----------
lower: np.ndarray (D)
Lower bound of the input space
upper: np.ndarray (D)
Upper bound of the input space
n_points: int
The number of initial data points
mean: np.ndarray (D)
Mean of the normal distribution for each dimension
std: np.ndarray (D)
Std of the normal distribution for each dimension
rng: np.random.RandomState
Random number generator
Returns
-------
np.ndarray(N, D)
The initial design data points
"""
if rng is None:
rng = np.random.RandomState(np.random.randint(0, 10000))
n_dims = lower.shape[0]
# take the center as mean
if mean is None:
mean = (upper + lower) * 0.5
if std is None:
std = np.ones([n_dims]) * 0.1
return np.array([np.clip(rng.normal(mean[i], std[i], n_points), lower[i], upper[i])
for i in range(n_dims)]).T
|
{
"content_hash": "1f79fc71bd262cb9e88ce4519f302166",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 87,
"avg_line_length": 27.73170731707317,
"alnum_prop": 0.5980650835532102,
"repo_name": "numairmansur/RoBO",
"id": "56088503338aecaaccaf55cc530671139d51f5ab",
"size": "1138",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "robo/initial_design/init_random_normal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1008348"
},
{
"name": "Python",
"bytes": "340324"
},
{
"name": "Shell",
"bytes": "1006"
}
],
"symlink_target": ""
}
|
'''
Created on 2013-01-22
@author: levi
'''
from util.parser import load_parser
from util.test_script_base import Test
class ATLTest(Test):
def __init__(self, args):
Test.__init__(self)
#============TRANSFORMATION=================
self.full_transformation = [
['HCountry2Community'],
['HFather2Man'],
['HMother2Woman'],
['HDaughter2Woman'],
['HSon2Man'],
['HNeighborhood2District'],
['HCity2TownHall', 'HCityCompany2Association'],
['HcopersonsSolveRefCountryFamilyParentCommunityMan'],
['HcopersonsSolveRefCountryFamilyParentCommunityWoman'],
['HcopersonsSolveRefCountryFamilyChildCommunityMan'],
['HcopersonsSolveRefCountryFamilyChildCommunityWoman'],
['HcotownHallsSolveRefCountryCityCommunityTownHall',
'HcoassociationsSolveRefCountryCityCompanyCommunityAssociation',
'HacommitteeSolveRefCompanyCityAssociationCommittee'],
['HtworkersSolveRefCompanyParentCityTownHallPerson'],
['HtdistrictsSolveRefCityNeighborhoodTownHallDistrict'],
['HdfacilitiesSolveRefNeighborhoodSchoolServiceChildDistrictOrdinaryFacilityPerson'],
['HdfacilitiesSolveRefNeighborhoodSchoolServiceChildDistrictSpecialFacilityPerson']
]
self.transformation_directory = "ExFamToPerson/transformation"
#self.transformation_directory = "/home/dcx/Projects/SyVOLT/tmp/backend/transformation/"
self.artifact_directory = "~/Projects/SyVOLT/"
#=====METAMODELS===============
self.inputMM = "~/Projects/SyVOLT/eclipse_integration/metamodels/Families_Extended.ecore"
self.outputMM = "~/Projects/SyVOLT/eclipse_integration/metamodels/Persons_Extended.ecore"
#====CONTRACTS==================
if not hasattr(args, "integration_contracts") or args.integration_contracts:
self.contract_directory = "ExFamToPerson/contracts"
self.atomic_contracts = [
"Neg_CityCompany",
"Neg_CountryCity",
"Neg_SchoolOrdFac",
"Neg_DaughterMother",
"Pos_AssocCity",
"Pos_ChildSchool",
"Pos_FourMembers",
"Pos_MotherFather",
"Pos_ParentCompany",
"Pos_TownHallComm",
]
else:
self.contract_directory = "ExFamToPerson/contracts/unit"
self.atomic_contracts = [
"UnitCountry2Community",
"UnitDaughter2Woman",
"UnitFather2Man",
"UnitMother2Woman",
"UnitSon2Man",
"UnitCity2TownHall",
"UnitN2D",
"UnitConnectDaughter",
"UnitConnectSon",
"UnitConnectMother",
"UnitConnectFather",
"UnitConnectAssoc",
# #"UnitConnectCommittee",
"UnitConnectDistricts",
"UnitConnectTownHall",
"UnitConnectOrdSchool",
"UnitConnectSpecSchool",
]
self.if_then_contracts = [
#[["Neg_CountryCity"], ["Neg_CityCompany", "Neg_SchoolOrdFac", "AND"]],
# [["EmptyContract"], ["Pos_FourMembers", "Pos_MotherFather", "AND"]],
]
#=========PC SAVE LOCATION
self.pc_save_filename = "pcs_atlTrans_extended.txt"
if __name__ == "__main__":
parser = load_parser()
args = parser.parse_args()
exFam = ATLTest(args)
exFam.test_correct(args)
|
{
"content_hash": "66ba81f96963e4300930672d74884a39",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 97,
"avg_line_length": 33.06306306306306,
"alnum_prop": 0.5743869209809265,
"repo_name": "levilucio/SyVOLT",
"id": "2328b958c63a49ad558855bdb2764bf2711976ac",
"size": "3670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_atlTrans_extended.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Integration", sigma = 0.0, exog_count = 20, ar_order = 0);
|
{
"content_hash": "45a0337bd5db2864334986b9bc7f1081",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 165,
"avg_line_length": 37.857142857142854,
"alnum_prop": 0.7056603773584905,
"repo_name": "antoinecarme/pyaf",
"id": "13c6bdd554e07985475bcc1366101d5e773429b0",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Integration/trend_PolyTrend/cycle_7/ar_/test_artificial_32_Integration_PolyTrend_7__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import sys
import re
import dns
import navcoin
import dnssec
from util import StoreDict, print_error
from i18n import _
class Contacts(StoreDict):
def __init__(self, config):
StoreDict.__init__(self, config, 'contacts')
def resolve(self, k):
if navcoin.is_address(k):
return {
'address': k,
'type': 'address'
}
if k in self.keys():
_type, addr = self[k]
if _type == 'address':
return {
'address': addr,
'type': 'contact'
}
out = self.resolve_openalias(k)
if out:
address, name, validated = out
return {
'address': address,
'name': name,
'type': 'openalias',
'validated': validated
}
raise Exception("Invalid NavCoin address or alias", k)
def resolve_openalias(self, url):
# support email-style addresses, per the OA standard
url = url.replace('@', '.')
records, validated = dnssec.query(url, dns.rdatatype.TXT)
prefix = 'btc'
for record in records:
string = record.strings[0]
if string.startswith('oa1:' + prefix):
address = self.find_regex(string, r'recipient_address=([A-Za-z0-9]+)')
name = self.find_regex(string, r'recipient_name=([^;]+)')
if not name:
name = address
if not address:
continue
return address, name, validated
def find_regex(self, haystack, needle):
regex = re.compile(needle)
try:
return regex.search(haystack).groups()[0]
except AttributeError:
return None
|
{
"content_hash": "2e36c3a8f046c4374c497072ed0c64a7",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 86,
"avg_line_length": 29.580645161290324,
"alnum_prop": 0.4989094874591058,
"repo_name": "sherlockcoin/Electrum-NavCoin",
"id": "2ac128ff94749f9de3523225eca41cdf6a7ba64b",
"size": "2973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/contacts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3343"
},
{
"name": "Makefile",
"bytes": "835"
},
{
"name": "NSIS",
"bytes": "6912"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "1148604"
},
{
"name": "Shell",
"bytes": "6463"
}
],
"symlink_target": ""
}
|
import lldb
import commands
import optparse
import shlex
class StackArgument:
def __init__(self, frame, index):
self.index = index
self.frame = frame
def GetValueAs(self, type):
return self.frame.EvaluateExpression('*(' + type + '*)($rsp+' + str(self.index) + ')').value
def getDescription(frame, expr):
desc = frame.EvaluateExpression('(char*)[[' + str(expr) + ' description] UTF8String]').GetSummary()
if desc is None: return '(null)'
return desc.strip('"')
def stripTypeQualifiers(str):
return str.lstrip('rnNoORV')
def pmsg(debugger, command, result, internal_dict):
frame = debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame()
regs = frame.GetRegisters()['General Purpose Registers'][0]
fregs = frame.GetRegisters()['Floating Point Registers'][0]
obj = getDescription(frame, '$rdi')
msg = '[' + obj
methodsig = frame.EvaluateExpression('(id)[' + ('$rdi' if regs.GetChildMemberWithName('rdi').GetValueAsUnsigned() != 0 else '[NSObject class]') + ' methodSignatureForSelector: $rsi]')
selector = frame.EvaluateExpression('(id)[NSStringFromSelector($rsi) componentsSeparatedByString: @":"]')
argcount = frame.EvaluateExpression('(uint64_t)[' + str(methodsig.GetValueAsUnsigned()) + ' numberOfArguments]').GetValueAsUnsigned()
if argcount == 2:
msg += ' ' + getDescription(frame, '[' + str(selector.GetValueAsUnsigned()) + ' objectAtIndex: 0]')
else:
stackptr = 0
ints = 2
floats = 0
for i in range(2,argcount):
msg += ' ' + getDescription(frame, '[' + str(selector.GetValueAsUnsigned()) + ' objectAtIndex: ' + str(i-2) + ']') + ':'
argtype = stripTypeQualifiers(frame.EvaluateExpression('(char*)[' + str(methodsig.GetValueAsUnsigned()) + ' getArgumentTypeAtIndex: ' + str(i) + ']').GetSummary().strip('"'))
if ints == 2:
currentObjecti = regs.GetChildMemberWithName('rdx')
elif ints == 3:
currentObjecti = regs.GetChildMemberWithName('rcx')
elif ints == 4:
currentObjecti = regs.GetChildMemberWithName('r8')
elif ints == 5:
currentObjecti = regs.GetChildMemberWithName('r9')
else:
currentObjecti = StackArgument(frame, stackptr)
if floats == 0:
currentObjectf = fregs.GetChildMemberWithName('xmm0')
elif floats == 1:
currentObjectf = fregs.GetChildMemberWithName('xmm1')
elif floats == 2:
currentObjectf = fregs.GetChildMemberWithName('xmm2')
elif floats == 3:
currentObjectf = fregs.GetChildMemberWithName('xmm3')
elif floats == 4:
currentObjectf = fregs.GetChildMemberWithName('xmm4')
elif floats == 5:
currentObjectf = fregs.GetChildMemberWithName('xmm5')
elif floats == 6:
currentObjectf = fregs.GetChildMemberWithName('xmm6')
elif floats == 7:
currentObjectf = fregs.GetChildMemberWithName('xmm7')
else:
currentObjectf = StackArgument(frame, stackptr)
if currentObjectf == None or currentObjecti == None: continue
if argtype[0] == '@':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
obj = str(currentObjecti.GetValueAs('id'))
else:
ints += 1
obj = str(currentObjecti.GetValueAsUnsigned())
desc = getDescription(frame, obj)
if frame.EvaluateExpression('(int)[' + obj + ' isKindOfClass: [NSString class]]').GetValueAsUnsigned() == 1:
msg += '@"' + desc + '"'
else:
msg += desc
elif argtype[0] == '^':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += hex(int(currentObjecti.GetValueAs('void*'), 16))
else:
ints += 1
msg += hex(currentObjecti.GetValueAsUnsigned())
elif argtype[0] == '*':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
ptr = str(currentObjecti.GetValueAs('char*'))
else:
ints += 1
ptr = str(currentObjecti.GetValueAsUnsigned())
s = frame.EvaluateExpression('(char*)' + ptr).GetSummary()
msg += s if s is not None else '(null)'
elif argtype[0] == 'i':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('int'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsSigned())
elif argtype[0] == 'c':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('char'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsSigned())
elif argtype[0] == 's':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('short'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsSigned())
elif argtype[0] == 'l':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('long'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsSigned())
elif argtype[0] == 'q':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('long long'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsSigned())
elif argtype[0] == 'C':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('unsigned char'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsUnsigned())
elif argtype[0] == 'I':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('unsigned int'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsUnsigned())
elif argtype[0] == 'S':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('unsigned short'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsUnsigned())
elif argtype[0] == 'L':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('unsigned long'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsUnsigned())
elif argtype[0] == 'Q':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('unsigned long long'))
else:
ints += 1
msg += str(currentObjecti.GetValueAsUnsigned())
elif argtype[0] == 'B':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjecti.GetValueAs('bool'))
else:
ints += 1
msg += 'false' if currentObjecti.GetValueAsUnsigned() == 0 else 'true'
elif argtype[0] == '#':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
cls = str(currentObjecti.GetValueAs('Class'))
else:
ints += 1
cls = str(currentObjecti.GetValueAsUnsigned())
msg += getDescription(frame, cls)
elif argtype[0] == ':':
if currentObjecti.__class__ == StackArgument:
stackptr += 8
ptr = str(currentObjecti.GetValueAs('SEL'))
else:
ints += 1
ptr = str(currentObjecti.GetValueAsUnsigned())
s = frame.EvaluateExpression('(char*)sel_getName(' + ptr + ')').GetSummary()
msg += ('@selector(' + s.strip('"') + ')') if s is not None else '(null)'
elif argtype[0] == 'f':
if currentObjectf.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjectf.GetValueAs('float'))
else:
floats += 1
msg += getDescription(frame, '[NSNumber numberWithFloat: ((float __attribute__((ext_vector_type(4))))$' + currentObjectf.GetName() + ')[0]]')
elif argtype[0] == 'd':
if currentObjectf.__class__ == StackArgument:
stackptr += 8
msg += str(currentObjectf.GetValueAs('double'))
else:
floats += 1
msg += getDescription(frame, '[NSNumber numberWithDouble: ((double __attribute__((ext_vector_type(2))))$' + currentObjectf.GetName() + ')[0]]')
else:
#Just assume it would be an integer
if currentObjecti.__class__ == StackArgument: stackptr += 8
else: ints += 1
print '(' + str(i-2) + ') Unsupported type:', argtype
print >>result, msg + ']'
# And the initialization code to add your commands
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('command script add -f pmsg_lldb.pmsg pmsg')
print 'The "pmsg" python command has been installed and is ready for use.'
|
{
"content_hash": "514cb1bd9fe334220c2623d17bdd7cdf",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 187,
"avg_line_length": 45.136752136752136,
"alnum_prop": 0.49981064192387803,
"repo_name": "ScrimpyCat/pmsg",
"id": "e3dd2d9e9b1f24d349387f9273e7eb2cce8434c1",
"size": "10598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pmsg_lldb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
from rest_framework.generics import ListCreateAPIView
from apis.betterself.v1.mood.filters import UserMoodLogFilter
from apis.betterself.v1.mood.serializers import MoodReadOnlySerializer, MoodCreateUpdateSerializer
from apis.betterself.v1.utils.views import ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin
from config.pagination import ModifiedPageNumberPagination
from events.models import UserMoodLog
class UserMoodViewSet(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin):
model = UserMoodLog
pagination_class = ModifiedPageNumberPagination
read_serializer_class = MoodReadOnlySerializer
write_serializer_class = MoodCreateUpdateSerializer
update_serializer_class = MoodCreateUpdateSerializer
filter_class = UserMoodLogFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
|
{
"content_hash": "3d106e2adbd9b0b27f954873ba272e46",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 105,
"avg_line_length": 45.13636363636363,
"alnum_prop": 0.8227593152064451,
"repo_name": "jeffshek/betterself",
"id": "641afa5733a23a2115a8c78fd585f830e540c8d1",
"size": "993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apis/betterself/v1/mood/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "430527"
},
{
"name": "HTML",
"bytes": "26382"
},
{
"name": "JavaScript",
"bytes": "232349"
},
{
"name": "Python",
"bytes": "525014"
},
{
"name": "Shell",
"bytes": "6298"
}
],
"symlink_target": ""
}
|
"""
Analyze docstrings to detect errors.
If no argument is provided, it does a quick check of docstrings and returns
a csv with all API functions and results of basic checks.
If a function or method is provided in the form "pandas.function",
"pandas.module.class.method", etc. a list of all errors in the docstring for
the specified function or method.
Usage::
$ ./validate_docstrings.py
$ ./validate_docstrings.py pandas.DataFrame.head
"""
from __future__ import annotations
import argparse
import doctest
import importlib
import io
import json
import os
import pathlib
import subprocess
import sys
import tempfile
import matplotlib
import matplotlib.pyplot as plt
import numpy
from numpydoc.docscrape import get_doc_object
from numpydoc.validate import (
Validator,
validate,
)
import pandas
# With template backend, matplotlib plots nothing
matplotlib.use("template")
# Styler methods are Jinja2 objects who's docstrings we don't own.
IGNORE_VALIDATION = {
"Styler.env",
"Styler.template_html",
"Styler.template_html_style",
"Styler.template_html_table",
"Styler.template_latex",
"Styler.template_string",
"Styler.loader",
}
PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"]
ERROR_MSGS = {
"GL04": "Private classes ({mentioned_private_classes}) should not be "
"mentioned in public docstrings",
"GL05": "Use 'array-like' rather than 'array_like' in docstrings.",
"SA05": "{reference_name} in `See Also` section does not need `pandas` "
"prefix, use {right_reference} instead.",
"EX02": "Examples do not pass tests:\n{doctest_log}",
"EX03": "flake8 error: {error_code} {error_message}{times_happening}",
"EX04": "Do not import {imported_library}, as it is imported "
"automatically for the examples (numpy as np, pandas as pd)",
}
def pandas_error(code, **kwargs):
"""
Copy of the numpydoc error function, since ERROR_MSGS can't be updated
with our custom errors yet.
"""
return (code, ERROR_MSGS[code].format(**kwargs))
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = "pandas"
previous_line = current_section = current_subsection = ""
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set("-"):
current_section = previous_line
continue
if set(line) == set("~"):
current_subsection = previous_line
continue
if line.startswith(".. currentmodule::"):
current_module = line.replace(".. currentmodule::", "").strip()
continue
if line == ".. autosummary::":
position = "autosummary"
continue
if position == "autosummary":
if line == "":
position = "items"
continue
if position == "items":
if line == "":
position = None
continue
item = line.strip()
if item in IGNORE_VALIDATION:
continue
func = importlib.import_module(current_module)
for part in item.split("."):
func = getattr(func, part)
yield (
".".join([current_module, item]),
func,
current_section,
current_subsection,
)
previous_line = line
class PandasDocstring(Validator):
def __init__(self, func_name: str, doc_obj=None) -> None:
self.func_name = func_name
if doc_obj is None:
doc_obj = get_doc_object(Validator._load_obj(func_name))
super().__init__(doc_obj)
@property
def name(self):
return self.func_name
@property
def mentioned_private_classes(self):
return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc]
@property
def examples_errors(self):
flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL
finder = doctest.DocTestFinder()
runner = doctest.DocTestRunner(optionflags=flags)
context = {"np": numpy, "pd": pandas}
error_msgs = ""
current_dir = set(os.listdir())
for test in finder.find(self.raw_doc, self.name, globs=context):
f = io.StringIO()
runner.run(test, out=f.write)
error_msgs += f.getvalue()
leftovers = set(os.listdir()).difference(current_dir)
if leftovers:
for leftover in leftovers:
path = pathlib.Path(leftover).resolve()
if path.is_dir():
path.rmdir()
elif path.is_file():
path.unlink(missing_ok=True)
raise Exception(
f"The following files were leftover from the doctest: "
f"{leftovers}. Please use # doctest: +SKIP"
)
return error_msgs
@property
def examples_source_code(self):
lines = doctest.DocTestParser().get_examples(self.raw_doc)
return [line.source for line in lines]
def validate_pep8(self):
if not self.examples:
return
# F401 is needed to not generate flake8 errors in examples
# that do not user numpy or pandas
content = "".join(
(
"import numpy as np # noqa: F401\n",
"import pandas as pd # noqa: F401\n",
*self.examples_source_code,
)
)
error_messages = []
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file:
file.write(content)
file.flush()
cmd = ["python", "-m", "flake8", "--quiet", "--statistics", file.name]
response = subprocess.run(cmd, capture_output=True, check=False, text=True)
stdout = response.stdout
stdout = stdout.replace(file.name, "")
messages = stdout.strip("\n")
if messages:
error_messages.append(messages)
for error_message in error_messages:
error_count, error_code, message = error_message.split(maxsplit=2)
yield error_code, message, int(error_count)
def non_hyphenated_array_like(self):
return "array_like" in self.raw_doc
def pandas_validate(func_name: str):
"""
Call the numpydoc validation, and add the errors specific to pandas.
Parameters
----------
func_name : str
Name of the object of the docstring to validate.
Returns
-------
dict
Information about the docstring and the errors found.
"""
func_obj = Validator._load_obj(func_name)
# Some objects are instances, e.g. IndexSlice, which numpydoc can't validate
doc_obj = get_doc_object(func_obj, doc=func_obj.__doc__)
doc = PandasDocstring(func_name, doc_obj)
result = validate(doc_obj)
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
result["errors"].append(
pandas_error("GL04", mentioned_private_classes=", ".join(mentioned_errs))
)
if doc.see_also:
for rel_name in doc.see_also:
if rel_name.startswith("pandas."):
result["errors"].append(
pandas_error(
"SA05",
reference_name=rel_name,
right_reference=rel_name[len("pandas.") :],
)
)
result["examples_errs"] = ""
if doc.examples:
result["examples_errs"] = doc.examples_errors
if result["examples_errs"]:
result["errors"].append(
pandas_error("EX02", doctest_log=result["examples_errs"])
)
for error_code, error_message, error_count in doc.validate_pep8():
times_happening = f" ({error_count} times)" if error_count > 1 else ""
result["errors"].append(
pandas_error(
"EX03",
error_code=error_code,
error_message=error_message,
times_happening=times_happening,
)
)
examples_source_code = "".join(doc.examples_source_code)
for wrong_import in ("numpy", "pandas"):
if f"import {wrong_import}" in examples_source_code:
result["errors"].append(
pandas_error("EX04", imported_library=wrong_import)
)
if doc.non_hyphenated_array_like():
result["errors"].append(pandas_error("GL05"))
plt.close("all")
return result
def validate_all(prefix, ignore_deprecated=False):
"""
Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information.
"""
result = {}
seen = {}
base_path = pathlib.Path(__file__).parent.parent
api_doc_fnames = pathlib.Path(base_path, "doc", "source", "reference")
api_items = []
for api_doc_fname in api_doc_fnames.glob("*.rst"):
with open(api_doc_fname) as f:
api_items += list(get_api_items(f))
for func_name, _, section, subsection in api_items:
if prefix and not func_name.startswith(prefix):
continue
doc_info = pandas_validate(func_name)
if ignore_deprecated and doc_info["deprecated"]:
continue
result[func_name] = doc_info
shared_code_key = doc_info["file"], doc_info["file_line"]
shared_code = seen.get(shared_code_key, "")
result[func_name].update(
{
"in_api": True,
"section": section,
"subsection": subsection,
"shared_code_with": shared_code,
}
)
seen[shared_code_key] = func_name
return result
def print_validate_all_results(
prefix: str,
errors: list[str] | None,
output_format: str,
ignore_deprecated: bool,
):
if output_format not in ("default", "json", "actions"):
raise ValueError(f'Unknown output_format "{output_format}"')
result = validate_all(prefix, ignore_deprecated)
if output_format == "json":
sys.stdout.write(json.dumps(result))
return 0
prefix = "##[error]" if output_format == "actions" else ""
exit_status = 0
for name, res in result.items():
for err_code, err_desc in res["errors"]:
if errors and err_code not in errors:
continue
sys.stdout.write(
f'{prefix}{res["file"]}:{res["file_line"]}:'
f"{err_code}:{name}:{err_desc}\n"
)
exit_status += 1
return exit_status
def print_validate_one_results(func_name: str):
def header(title, width=80, char="#"):
full_line = char * width
side_len = (width - len(title) - 2) // 2
adj = "" if len(title) % 2 == 0 else " "
title_line = f"{char * side_len} {title}{adj} {char * side_len}"
return f"\n{full_line}\n{title_line}\n{full_line}\n\n"
result = pandas_validate(func_name)
sys.stderr.write(header(f"Docstring ({func_name})"))
sys.stderr.write(f"{result['docstring']}\n")
sys.stderr.write(header("Validation"))
if result["errors"]:
sys.stderr.write(f'{len(result["errors"])} Errors found:\n')
for err_code, err_desc in result["errors"]:
if err_code == "EX02": # Failing examples are printed at the end
sys.stderr.write("\tExamples do not pass tests\n")
continue
sys.stderr.write(f"\t{err_desc}\n")
else:
sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n')
if result["examples_errs"]:
sys.stderr.write(header("Doctests"))
sys.stderr.write(result["examples_errs"])
def main(func_name, prefix, errors, output_format, ignore_deprecated):
"""
Main entry point. Call the validation for one or for all docstrings.
"""
if func_name is None:
return print_validate_all_results(
prefix, errors, output_format, ignore_deprecated
)
else:
print_validate_one_results(func_name)
return 0
if __name__ == "__main__":
format_opts = "default", "json", "actions"
func_help = (
"function or method to validate (e.g. pandas.DataFrame.head) "
"if not provided, all docstrings are validated and returned "
"as JSON"
)
argparser = argparse.ArgumentParser(description="validate pandas docstrings")
argparser.add_argument("function", nargs="?", default=None, help=func_help)
argparser.add_argument(
"--format",
default="default",
choices=format_opts,
help="format of the output when validating "
"multiple docstrings (ignored when validating one). "
"It can be {str(format_opts)[1:-1]}",
)
argparser.add_argument(
"--prefix",
default=None,
help="pattern for the "
"docstring names, in order to decide which ones "
'will be validated. A prefix "pandas.Series.str."'
"will make the script validate all the docstrings "
"of methods starting by this pattern. It is "
"ignored if parameter function is provided",
)
argparser.add_argument(
"--errors",
default=None,
help="comma separated "
"list of error codes to validate. By default it "
"validates all errors (ignored when validating "
"a single docstring)",
)
argparser.add_argument(
"--ignore_deprecated",
default=False,
action="store_true",
help="if this flag is set, "
"deprecated objects are ignored when validating "
"all docstrings",
)
args = argparser.parse_args()
sys.exit(
main(
args.function,
args.prefix,
args.errors.split(",") if args.errors else None,
args.format,
args.ignore_deprecated,
)
)
|
{
"content_hash": "dc596f0d1683f9407b672a524ec74f34",
"timestamp": "",
"source": "github",
"line_count": 476,
"max_line_length": 87,
"avg_line_length": 32.3235294117647,
"alnum_prop": 0.5834524892759652,
"repo_name": "pandas-dev/pandas",
"id": "a86630eba7d5dc07bc5a17b84fee2f03cdf5ce5c",
"size": "15409",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/validate_docstrings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "C",
"bytes": "366145"
},
{
"name": "CSS",
"bytes": "1800"
},
{
"name": "Cython",
"bytes": "1186787"
},
{
"name": "Dockerfile",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "456531"
},
{
"name": "Python",
"bytes": "18778786"
},
{
"name": "Shell",
"bytes": "10369"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
}
|
''' CoordsConf: Configure QGIS coordinate display
Store this script as .qgis2/python/startup.py
Redoute, 19.10.2013 '''
from __future__ import unicode_literals, division
import qgis
QgsPoint = qgis.core.QgsPoint
CoordinateReferenceSystem = qgis.core.QgsCoordinateReferenceSystem
CoordinateTransform = qgis.core.QgsCoordinateTransform
ProjectionSelector = qgis.gui.QgsProjectionSelector
import PyQt4
qActionsContextMenu = PyQt4.QtCore.Qt.ActionsContextMenu
qAlignCenter = PyQt4.QtCore.Qt.AlignCenter
qAlignHCenter = PyQt4.QtCore.Qt.AlignHCenter
qAlignRight = PyQt4.QtCore.Qt.AlignRight
qAlignVCenter = PyQt4.QtCore.Qt.AlignVCenter
qHorizontal = PyQt4.QtCore.Qt.Horizontal
qToolButtonTextOnly = PyQt4.QtCore.Qt.ToolButtonTextOnly
settings = PyQt4.QtCore.QSettings()
Action = PyQt4.QtGui.QAction
ButtonGroup = PyQt4.QtGui.QButtonGroup
Dialog = PyQt4.QtGui.QDialog
DialogButtonBox = PyQt4.QtGui.QDialogButtonBox
GroupBox = PyQt4.QtGui.QGroupBox
Label = PyQt4.QtGui.QLabel
LineEdit = PyQt4.QtGui.QLineEdit
msgBox = PyQt4.QtGui.QMessageBox.information
PushButton = PyQt4.QtGui.QPushButton
RadioButton = PyQt4.QtGui.QRadioButton
TabWidget = PyQt4.QtGui.QTabWidget
ToolButton = PyQt4.QtGui.QToolButton
VBoxLayout = PyQt4.QtGui.QVBoxLayout
Widget = PyQt4.QtGui.QWidget
class Config(object):
# read Options
dstAuthId = settings.value('CoordsConf/dstAuthId', 'EPSG:4326')
dstCrs = CoordinateReferenceSystem(dstAuthId)
rule1 = settings.value('CoordsConf/rule1', 0, type=int)
rule2 = settings.value('CoordsConf/rule2', 0, type=int)
rule3 = settings.value('CoordsConf/rule3', 0, type=int)
rule4 = settings.value('CoordsConf/rule4', 0, type=int)
iface = qgis.utils.iface
mw = iface.mainWindow()
sb = mw.statusBar()
mc = iface.mapCanvas()
mr = mc.mapRenderer()
def makeGroup(parent, odict):
l = VBoxLayout(parent)
g = ButtonGroup(parent)
for id, label in odict.iteritems():
rb = RadioButton(label, parent)
l.addWidget(rb)
g.addButton(rb, id)
return g
# actions of coord display
class Dlg(Dialog):
def __init__(self):
super(Dialog, self).__init__(mw)
self.setWindowTitle('Configure Coords Display')
layout = VBoxLayout(self)
tabs = TabWidget(self)
tabCrs = Widget()
lTabCrs = VBoxLayout(tabCrs)
self.ps = ProjectionSelector(tabCrs)
self.ps.setSelectedAuthId(Config.dstAuthId)
lTabCrs.addWidget(self.ps)
tabs.addTab(tabCrs, '&CRS for coords display')
tabOptions = Widget()
lTabOptions = VBoxLayout(tabOptions)
g1 = GroupBox('&1) When \'on the fly\' CRS transformation is enabled and active layer CRS is available:', tabOptions)
self.o1 = makeGroup(g1,
{0: 'Show display CRS',
2: 'Show project CRS',
3: 'Show active layer CRS',
5: 'Show screen coords'})
lTabOptions.addWidget(g1)
g2 = GroupBox('&2) When \'on the fly\' CRS transformation is enabled and active layer CRS is not available:', tabOptions)
self.o2 = makeGroup(g2,
{0: 'Show display CRS',
2: 'Show project CRS',
5: 'Show screen coords'})
lTabOptions.addWidget(g2)
g3 = GroupBox('&3) When \'on the fly\' CRS transformation is disabled and active layer CRS is available:', tabOptions)
self.o3 = makeGroup(g3,
{1: 'Show display CRS, reprojected via active layer CRS',
0: 'Show display CRS, reprojected via project CRS',
4: 'Show active layer CRS',
2: 'Show project CRS',
5: 'Show screen Coords'})
lTabOptions.addWidget(g3)
g4 = GroupBox('&4) When \'on the fly\' CRS transformation is disabled and active layer CRS is not available:', tabOptions)
self.o4 = makeGroup(g4,
{0: 'Show display CRS, reprojected via project CRS',
2: 'Show project CRS',
5: 'Show screen Coords'})
lTabOptions.addWidget(g4)
lTabOptions.addStretch(1)
tabs.addTab(tabOptions, '&Options')
layout.addWidget(tabs)
bbox = DialogButtonBox(self)
bbox.setOrientation(qHorizontal)
bbox.setStandardButtons(DialogButtonBox.Ok | DialogButtonBox.Cancel)
bbox.accepted.connect(self.accept)
bbox.rejected.connect(self.reject)
layout.addWidget(bbox)
self.adjustSize() ### ???
self.ps.setSelectedAuthId(Config.dstAuthId)
self.o1.button(Config.rule1).setChecked(True)
self.o2.button(Config.rule2).setChecked(True)
self.o3.button(Config.rule3).setChecked(True)
self.o4.button(Config.rule4).setChecked(True)
dlg = Dlg()
def configure():
if dlg.exec_():
Config.dstAuthId = dlg.ps.selectedAuthId()
Config.dstCrs = CoordinateReferenceSystem(Config.dstAuthId)
settings.setValue('CoordsConf/dstAuthId', Config.dstAuthId)
Config.rule1 = dlg.o1.checkedId()
settings.setValue('CoordsConf/rule1', Config.rule1)
Config.rule2 = dlg.o2.checkedId()
settings.setValue('CoordsConf/rule2', Config.rule2)
Config.rule3 = dlg.o3.checkedId()
settings.setValue('CoordsConf/rule3', Config.rule3)
Config.rule4 = dlg.o4.checkedId()
settings.setValue('CoordsConf/rule4', Config.rule4)
# remove builtin statusbar widgetgs
# check extents view button (reduces calculations in QgisApp::showMouseCoordinate)
w = sb.findChild(object, 'mToggleExtentsViewButton')
w.setChecked(True)
sb.removeWidget(w)
sb.removeWidget(sb.findChild(object, 'mCoordsLabel'))
sb.removeWidget(sb.findChild(object, 'mCoordsEdit'))
# create new label widgets for statusbar
crsButton = ToolButton(sb)
crsButton.setToolButtonStyle(qToolButtonTextOnly)
crsButton.setMinimumWidth(20)
crsButton.setMaximumHeight(20)
crsButton.setAutoRaise(True)
crsButton.clicked.connect(configure)
sb.insertPermanentWidget(1, crsButton)
xyButton = ToolButton(sb)
xyButton.setToolButtonStyle(qToolButtonTextOnly)
xyButton.setMinimumWidth(200)
xyButton.setMaximumHeight(20)
xyButton.setAutoRaise(True)
sb.insertPermanentWidget(2, xyButton)
# bind signal
def showCoords(xy):
# Which rule is applicable?
c1 = mr.hasCrsTransformEnabled()
l = iface.activeLayer()
c2 = bool(l and l.crs())
if c1 and c2:
rule = Config.rule1
elif c1:
rule = Config.rule2
elif c2:
rule = Config.rule3
else:
rule = Config.rule4
# transform xy and get auth id according to rule
if rule == 0:
xyCrs = mr.destinationCrs()
xy = CoordinateTransform(xyCrs, Config.dstCrs).transform(xy)
authId = Config.dstAuthId
elif rule == 1:
xyCrs = iface.activeLayer().crs()
xy = CoordinateTransform(xyCrs, Config.dstCrs).transform(xy)
authId = Config.dstAuthId
elif rule == 2:
authId = mr.destinationCrs().authid()
elif rule == 3:
xyCrs = mr.destinationCrs()
layerCrs = iface.activeLayer().crs()
xy = CoordinateTransform(xyCrs, layerCrs).transform(xy)
authId = layerCrs.authid()
elif rule == 4:
authId = iface.activeLayer().crs().authid()
elif rule == 5:
xy = mc.mouseLastXY()
xy = QgsPoint(xy.x(), xy.y())
authId = 'Screen'
# label buttons
crsButton.setText(authId)
xyButton.setText(xy.toString(5))
# init
showCoords(qgis.core.QgsPoint(0, 0))
mc.xyCoordinates.connect(showCoords)
|
{
"content_hash": "a8499a44d4369623e594c640dccf8f80",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 130,
"avg_line_length": 34.60648148148148,
"alnum_prop": 0.6816053511705685,
"repo_name": "ActiveState/code",
"id": "9249fafa82a1f25102a84180a92501f204f6143a",
"size": "7501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/578692_QGstartscript_Change_display/recipe-578692.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
"""
CeilometerConf - file ``/etc/ceilometer/ceilometer.conf``
=========================================================
"""
from insights.core import IniConfigFile
from insights.core.plugins import parser
from insights.specs import Specs
@parser(Specs.ceilometer_conf)
class CeilometerConf(IniConfigFile):
"""
The ``/etc/ceilometer/ceilometer.conf`` file is in a standard '.ini' format,
and this parser uses the IniConfigFile base class to read this.
Given a file containing the following test data::
[DEFAULT]
#
# From ceilometer
http_timeout = 600
debug = False
verbose = False
log_dir = /var/log/ceilometer
meter_dispatcher=database
event_dispatcher=database
[alarm]
evaluation_interval = 60
evaluation_service=ceilometer.alarm.service.SingletonAlarmService
partition_rpc_topic=alarm_partition_coordination
[api]
port = 8777
host = 192.0.2.10
[central]
[collector]
udp_address = 0.0.0.0
udp_port = 4952
[compute]
[coordination]
backend_url = redis://:chDWmHdH8dyjsmpCWfCEpJR87@192.0.2.7:6379/
Examples:
>>> type(config)
<class 'insights.parsers.ceilometer_conf.CeilometerConf'>
>>> config.sections()
['alarm', 'api', 'central', 'collector', 'compute', 'coordination']
>>> config.has_option('alarm', 'evaluation_interval')
True
>>> config.get('coordination', 'backend_url')
'redis://:chDWmHdH8dyjsmpCWfCEpJR87@192.0.2.7:6379/'
>>> config.getint('collector', 'udp_port')
4952
>>> config.getboolean('DEFAULT', 'debug')
False
"""
pass
|
{
"content_hash": "edf671efd179af917c4694f57bb8d601",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 80,
"avg_line_length": 31.196428571428573,
"alnum_prop": 0.5941614195764167,
"repo_name": "RedHatInsights/insights-core",
"id": "189c44caeaf9e9cbd78ac5fed7360e9b381a8485",
"size": "1747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/parsers/ceilometer_conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "8219046"
},
{
"name": "Shell",
"bytes": "1754"
}
],
"symlink_target": ""
}
|
"""Provides a class for controlling the TI ADS105."""
from time import sleep
import RPi.GPIO as GPIO
# ACHTUNG: Das SMBus-Protokoll geht bei der Übertragung von Werten mit
# mehr als einem Byte Länge (16-bit-Word) davon aus, dass das LSB zuerst
# übertragen wird. Der ADS1015 sendet jedoch das MSB zuerst, daher ist
# eine Konvertierung notwendig
def _swap_bytes_16bit(value):
return ((value & 0xFF) << 8) | ((value & 0xFF00) >> 8)
def _set_bits(config, mask, bits):
# clear the bits before setting them
config &= ~mask
config |= (bits & mask)
return config
class ADS1015:
"""
Implements the I2C protocol for the TI ADS1015 chip.
Provides methods for configuring and reading A/D conversion values via an
I2C bus from the Texas Instruments ADS1015 chip.
"""
alrt=22
_CHANNELS = [0, 1, 2, 3]
__CONFIG_REGISTER = 0b01
__CONVERSION_REGISTER = 0b00
# bits [15] start the conversion
__CONFIG_CONVERSION_START = 1 << 15
# bits [14:12] are for configuring the mux
__CONFIG_MUX_BITS = 0b111 << 12
# Bits zur Konfiguration des Muxers zum Messen von Spannungen relativ
# zu Masse, für Eingänge A1 bis A4
__CONFIG_MUX_ABSOLUTE = [
0b100 << 12,
0b101 << 12,
0b110 << 12,
0b111 << 12]
# bits [11:9] configure the programmable gain amplifier
__CONFIG_PGA_BITS = 0b111 << 9
__CONFIG_PGA_4_VOLT = 0b001 << 9
__CONFIG_MODE_BITS = 1 << 8
__CONFIG_MODE_CONTINUOUS = 0 << 8
__CONFIG_MODE_SINGLE = 1 << 8
def __init__(self, bus, bus_address=0x49):
"""Create a new object for communicating on a given bus and address."""
self.__bus = bus
self.__bus_address = bus_address
config = self.read_config_register()
config=_set_bits(config, self.__CONFIG_MUX_BITS, self.__CONFIG_MUX_ABSOLUTE[0])
config=_set_bits(config, self.__CONFIG_PGA_BITS, self.__CONFIG_PGA_4_VOLT)
config=_set_bits(config, self.__CONFIG_MODE_BITS, self.__CONFIG_MODE_SINGLE)
self.write_config_register(config)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.alrt, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def bus(self):
"""Get the bus that is currently used for communication."""
return self.__bus
def bus_address(self):
"""Get the bus address of the chip used for communication."""
return self.__bus_address
def read_config_register(self):
"""Retrieve the two-byte configuration register from the ADC."""
config = self.__bus.read_word_data(
self.__bus_address, self.__CONFIG_REGISTER)
return _swap_bytes_16bit(config)
def write_config_register(self, configuration):
"""
Set the 2-byte ADC configuration register.
:type configuration: 16-bit word
"""
self.__bus.write_word_data(
self.__bus_address,
self.__CONFIG_REGISTER,
_swap_bytes_16bit(configuration))
def read_conversion_value(self):
"""Read the contents of the conversion register on the ADC."""
sensor_value = self.__bus.read_word_data(
self.__bus_address, self.__CONVERSION_REGISTER)
sensor_value = _swap_bytes_16bit(sensor_value)
# die niederwertigsten 4 Bit sind immer auf 0 gesetzt, daher können
# diese ignoriert werden (siehe Datenblatt, Registerbeschreibung
# Tabelle 8 auf Seite 15)
return sensor_value >> 4
def set_mux_absolute(self, channel):
"""
Configure the ADC to read absolute values on the given channel.
Absolute voltage values are measured with reference to the ground
potential.
"""
if channel not in self._CHANNELS:
return
else:
config = self.read_config_register()
config=_set_bits(config,
self.__CONFIG_MUX_BITS,
self.__CONFIG_MUX_ABSOLUTE[channel])
config=_set_bits(config, self.__CONFIG_CONVERSION_START, self.__CONFIG_CONVERSION_START)
self.write_config_register(config)
def read_channel(self, channel):
"""
Read one value from the given ADC channel.
:param channel: the index of the channel to be read - can be one of
[0, 1, 2, 3]
:returns the converted channel value as an integer
"""
if channel not in self._CHANNELS:
return
else:
self.set_mux_absolute(channel)
# lässt dem ADC Zeit, eine neue Messung mit der neuen
# Konfiguration stattfinden zu lassen
while not GPIO.input(self.alrt):
a=1
return self.read_conversion_value()
|
{
"content_hash": "ddaf5ee02a8b9026e2d65afd08606629",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 100,
"avg_line_length": 33,
"alnum_prop": 0.6125841750841751,
"repo_name": "alex-Symbroson/BotScript",
"id": "171a871525d07a0af3b5fb0a944b52941524300e",
"size": "4775",
"binary": false,
"copies": "1",
"ref": "refs/heads/stack",
"path": "BotScript/res/raspibot/ADC.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "85358"
},
{
"name": "Makefile",
"bytes": "2122"
},
{
"name": "Python",
"bytes": "29608"
}
],
"symlink_target": ""
}
|
import lib.record_rate
import lib.commonclient
import lib.getconfig
import lib.puylogger
import lib.basecheck
import json
marathon_url = lib.getconfig.getparam('Mesos-Marathon', 'stats')
check_type = 'marathon'
class Check(lib.basecheck.CheckBase):
def precheck(self):
try:
marathon_stats = lib.commonclient.httpget(__name__, marathon_url)
stats_json = json.loads(marathon_stats)
gauges = ('marathon.apps.active.gauge', 'marathon.deployments.active.gauge', 'marathon.instances.running.gauge', 'marathon.instances.launch-overdue.gauge',
'marathon.pods.active.gauge', 'marathon.http.requests.active.gauge', 'marathon.groups.active.gauge',
'marathon.http.event-streams.active.gauge', 'marathon.instances.launch-overdue.gauge', 'marathon.instances.staged.gauge',
'marathon.jvm.memory.heap.committed.gauge.bytes', 'marathon.jvm.memory.heap.used.gauge.bytes',
'marathon.jvm.memory.non-heap.committed.gauge.bytes', 'marathon.jvm.memory.non-heap.used.gauge.bytes')
for k in stats_json['gauges'].keys():
if 'marathon.jvm.gc' in k:
gcgc = k.split('.')
if len(gcgc) is 6:
self.local_vars.append({'name': 'mesos_marathon_gc_collections', 'timestamp': self.timestamp, 'value': float(stats_json['gauges'][k]['value']), 'check_type': check_type, 'extra_tag': {'gctype': gcgc[3]}})
if len(gcgc) is 8:
self.local_vars.append({'name': 'mesos_marathon_gc_duration', 'timestamp': self.timestamp, 'value': float(stats_json['gauges'][k]['value']), 'check_type': check_type, 'extra_tag': {'gctype': gcgc[3]}})
for gauge in gauges:
fsto = gauge.replace('marathon.', '').\
replace('.gauge', '').\
replace('jvm.memory.heap', 'heap'). \
replace('jvm.memory.non-heap', 'nonheap'). \
replace('.bytes', '').\
replace('.', '_').replace('-', '_')
self.local_vars.append({'name': 'mesos_marathon_' + fsto, 'timestamp': self.timestamp, 'value': stats_json['gauges'][gauge]['value'], 'check_type': check_type})
except Exception as e:
lib.puylogger.print_message(__name__ + ' Error : ' + str(e))
pass
|
{
"content_hash": "6c94a506acd9d27717d6b4a4c9974848",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 228,
"avg_line_length": 52.67391304347826,
"alnum_prop": 0.5815105241436236,
"repo_name": "oddeyeco/oe-agent2",
"id": "8a2367731d07c5b550a79dd2d2c11b2333068fe6",
"size": "2423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checks_available/check_mesos_marathon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "260"
},
{
"name": "Python",
"bytes": "288903"
},
{
"name": "Shell",
"bytes": "1968"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="layout.polar.radialaxis", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "f9e7b6de3eeabf6ddf27a6f3996941e8",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 33.86666666666667,
"alnum_prop": 0.5846456692913385,
"repo_name": "plotly/python-api",
"id": "6134ec43f3a3d8670dc5e806fccc1c6f337b1db3",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/polar/radialaxis/_nticks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from functools import partial
import pyspark.context
from pyspark_cassandra.rdd import CassandraRDD
def monkey_patch_sc(sc):
sc.__class__ = CassandraSparkContext
sc.__dict__["cassandraTable"] = partial(CassandraSparkContext.cassandraTable, sc)
sc.__dict__["cassandraTable"].__doc__ = CassandraSparkContext.cassandraTable.__doc__
class CassandraSparkContext(pyspark.context.SparkContext):
"""Wraps a SparkContext which allows reading CQL rows from Cassandra"""
def cassandraTable(self, keyspace, table, row_format=None, read_conf=None):
"""Returns a CassandraRDD for the given keyspace and table"""
return CassandraRDD(keyspace, table, self, row_format, read_conf)
|
{
"content_hash": "ad348a5d7222b1f55baec39cc8d88496",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 85,
"avg_line_length": 37.72222222222222,
"alnum_prop": 0.7717231222385862,
"repo_name": "omriiluz/pyspark-cassandra",
"id": "aefe6470a942163d0c83f5012d5cd922dd9ed4ad",
"size": "1226",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark_cassandra/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "99414"
},
{
"name": "Makefile",
"bytes": "2448"
},
{
"name": "Python",
"bytes": "30992"
},
{
"name": "Scala",
"bytes": "804"
}
],
"symlink_target": ""
}
|
"""
This module, views.py, is where all the backend and frontend server requests are handled and returned to the user.
"""
# Needed to send back a rendered HTML page.
from django.shortcuts import render
# Needed for sending a simple HttpResponse such as a string response.
from django.http import HttpResponse
# Needed for allowing POST requests without requiring a CSRF token.
from django.views.decorators.csrf import csrf_exempt
# Needed to perform HttpRequests to run Locust.
import requests
# Needed for making JsonResponses.
from django.http import JsonResponse
# Needed to check data on Users.
from django.contrib.auth.models import User
from .models import PROJECT_NAME_QA
from .models import PROJECT_NAME_SK
from .models import SeleniumTestSuite
from .models import SeleniumTest
from .models import SeleniumTestResults
from quality_assurance.selenium_scripts import selenium_base
import threading
import time
import datetime
import sys
#sys.path.remove('/home/dev_usr/urbtek/nexus_django')
sys.path.append('/home/dev_usr/urbtek')
# Needed for debugging.
from universal_code import debugging as dbg
from web_utilities import slack_notification
from quality_assurance.selenium_scripts.qa import qa_selenium_scripts as qss
from quality_assurance.selenium_scripts.sk import sk_selenium_scripts as sss
from quality_assurance.selenium_scripts import test_parser
from universal_code import anything_time_related as atr
''' __ __ __ ___ ___ __ __ ___ __ ___ __ ___ __ __
/ _` | / \ |__) /\ | | | | | | | | \ / \ / /\ |__) | /\ |__) | |__ /__` _|_ |__ | | |\ | / ` | | / \ |\ | /__`
\__> |___ \__/ |__) /~~\ |___ \__/ | | |___ | | | \/ /~~\ | \ | /~~\ |__) |___ |___ .__/ | | \__/ | \| \__, | | \__/ | \| .__/
'''
ALL_QA_SELENIUM_SCRIPTS = test_parser.get_test_functions_from_file(test_parser.QA_PATH)
ALL_SK_SELENIUM_SCRIPTS = test_parser.get_test_functions_from_file(test_parser.SK_PATH)
NEXUS_URL = 'http://192.237.253.249:1337/'
SEND_SELENIUM_RESULT_URL = NEXUS_URL + 'selenium_result'
# POST parameter names.
PROJECT_NAME = 'project_name'
PEON_URL = 'peon_url'
UNIQUE_TEST_ID = 'unique_id'
TEST_SUITE_RESULTS = 'test_suite_results'
# Field names for work hours logging.
START_OR_STOP_TIME = 'start_or_stop_time'
IS_START_TIME = 'is_start_time'
# Server response messages.
SERVER_REPLY_INVALID_POST_DATA_ERROR = HttpResponse('Invalid POST data!')
SERVER_REPLY_INVALID_NUMBER_OF_POST_ARGUMENTS_ERROR = HttpResponse('Invalid number of POST arguments!')
SERVER_REPLY_GENERIC_NO = HttpResponse('n')
SERVER_REPLY_GENERIC_YES = HttpResponse('y')
SERVER_REPLY_GENERIC_LOGIC_ERROR = HttpResponse('l')
SERVER_REPLY_GENERIC_SERVER_ERROR = HttpResponse('s')
class PeonServer:
"""
Just an object representation of this server and some useful state information.
"""
def __init__(self):
self.is_busy = False
self.unique_test_id = None
def set_unique_test_id(self, unique_id):
self.unique_test_id = unique_id
def set_to_busy(self):
self.is_busy = True
def set_to_not_busy(self):
self.is_busy = False
def is_busy(self):
return self.is_busy
peon_server = PeonServer()
peon_server.set_to_not_busy()
def check_POST_arguments(arguments, request):
"""Just a utility function to raise an exception if there is an in-correct match on POST arguments.
:param arguments: The arguments to check for.
:param request: Contains information regarding the request sent in.
:return: Boolean indicating if this threw an exception or not.
"""
if len(request.POST) != len(arguments):
dbg.raise_exception(dbg.MyException, 'Got ' + str(len(request.POST)) + ' number of arguments instead of ' + str(len(arguments)))
return SERVER_REPLY_INVALID_NUMBER_OF_POST_ARGUMENTS_ERROR
for arg in arguments:
if arg not in request.POST:
dbg.raise_exception(dbg.MyException, 'Argument not passed in : ' + str(arg) + '.')
return SERVER_REPLY_INVALID_POST_DATA_ERROR
return None
@csrf_exempt
def is_peon_busy_POST(request):
"""This handles the POST request to determine if the peon is busy or not.
:param request: Contains information regarding the request sent in.
:return: A string response indicating if the server is busy or not.
"""
if peon_server.is_busy:
return SERVER_REPLY_GENERIC_YES
else:
return SERVER_REPLY_GENERIC_NO
@csrf_exempt
def get_test_suite_number_POST(request):
"""This handles the POST request to get a test suite id number.
:param request: Contains information regarding the request sent in.
:return: A string response containing the test suite number.
"""
if check_POST_arguments([PROJECT_NAME], request) is not None:
return check_POST_arguments([PROJECT_NAME], request)
test_suite = SeleniumTestSuite(project_name=request.POST[PROJECT_NAME], ran_at=datetime.datetime.now())
test_suite.save()
#print('Just made a new test suite. The id is : ' + str(test_suite.unique_id))
return HttpResponse(test_suite.unique_id)
def _run_selenium_test_in_background(unique_test_id, project_name):
"""An internal method to launch the selenium tests in a background thread.
:param unique_test_id: The unique test ID that this test is ran under.
:return: Void.
"""
peon_server.set_to_busy()
scripts = None
all_selenium_scripts_to_use = None
if project_name == PROJECT_NAME_QA:
scripts = qss.QASeleniumScripts()
all_selenium_scripts_to_use = ALL_QA_SELENIUM_SCRIPTS
elif project_name == PROJECT_NAME_SK:
scripts = sss.SKSeleniumScripts()
all_selenium_scripts_to_use = ALL_SK_SELENIUM_SCRIPTS
else:
dbg.terminate('Invalid project name!')
for browser in selenium_base.SUPPORTED_BROWSERS:
scripts.run_pre_test(browser)
for t in all_selenium_scripts_to_use:
easy_timer = atr.EasyTimer()
run_function = getattr(scripts, t.full_function_name)
passed, notes = run_function()
run_time = easy_timer.get_run_time()
t.send_results_to_nexus(unique_test_id, run_time, passed, notes, browser)
selenium_test_results = SeleniumTestResults(test_name=t.name, run_time=run_time, passed=passed, notes=notes, test_suite_id=SeleniumTestSuite.objects.filter(unique_id=unique_test_id)[0], browser_used=browser)
selenium_test_results.save()
scripts.terminate()
peon_server.set_to_not_busy()
@csrf_exempt
def launch_selenium_suite_POST(request):
"""This handles the POST request to launch QA Selenium tests.
:param request: Contains information regarding the request sent in.
:return: A String response indicating if the server is busy or not.
"""
if check_POST_arguments([UNIQUE_TEST_ID, PROJECT_NAME], request) is not None:
return check_POST_arguments([UNIQUE_TEST_ID, PROJECT_NAME], request)
if peon_server.is_busy:
return HttpResponse('The peon server is currently busy.')
else:
received_unique_test_id = request.POST[UNIQUE_TEST_ID]
received_project_name = request.POST[PROJECT_NAME]
# TODO : check if the unique test id is valid.
thread = threading.Thread(target=_run_selenium_test_in_background, args=(received_unique_test_id, received_project_name))
thread.daemon = True
thread.start()
return SERVER_REPLY_GENERIC_YES
@csrf_exempt
def get_selenium_project_data_POST(request):
"""This handles the POST request to get selenium project data.
:param request: Contains information regarding the request sent in.
:return: A JSON response containing all the data.
"""
if check_POST_arguments([PROJECT_NAME], request) is not None:
return check_POST_arguments([PROJECT_NAME], request)
# TODO : Optimize this later.
all_data = SeleniumTestResults.objects.all()
#for d in all_data:
# print(d)
received_project_name = request.POST[PROJECT_NAME]
passed_tests_by_suite = {}
failed_tests_by_suite = {}
combined_tests = {}
for data in all_data:
if data.test_suite_id.project_name == received_project_name:
passed_tests_by_suite[str(data.test_suite_id.unique_id)] = 0
failed_tests_by_suite[str(data.test_suite_id.unique_id)] = 0
for data in all_data:
if data.test_suite_id.project_name == received_project_name:
if data.passed:
passed_tests_by_suite[str(data.test_suite_id.unique_id)] += 1
else:
failed_tests_by_suite[str(data.test_suite_id.unique_id)] += 1
for key in passed_tests_by_suite:
combined_tests[str(key)] = [passed_tests_by_suite[str(key)], failed_tests_by_suite[str(key)]]
return JsonResponse(combined_tests, safe=False)
@csrf_exempt
def send_data_to_flax_POST(request):
"""This handles the POST request to send selenium project data to flax.
:param request: Contains information regarding the request sent in.
:return: A HTTPResponse.
"""
if check_POST_arguments(['qa_data'], request) is not None:
return check_POST_arguments(['qa_data'], request)
print('Printing qa_data')
data = str(request.POST['qa_data'])
#print(request.POST['qa_data'])
#slack_notification.send_message_to_slack_qa_channel('Quick test to make sure tests can automatically send data. Note in the future an image will be sent instead of text (or both). : ' + str(data).replace('{', '').replace('}', '').replace('\n', '\\n'))
slack_notification.send_message_to_slack_qa_channel('Data being sent has some invalid keys to be cleaned out, testing simple string for now.')
return SERVER_REPLY_GENERIC_YES
|
{
"content_hash": "d382f107dc4a4d71325be7303e24cafc",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 253,
"avg_line_length": 36.20912547528517,
"alnum_prop": 0.695999159928594,
"repo_name": "utarsuno/urbtek",
"id": "d4eb5a8f1b8a99fdca5f523b995fb0380a93241b",
"size": "9539",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "peon/back_end/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "390447"
},
{
"name": "HTML",
"bytes": "80203"
},
{
"name": "JavaScript",
"bytes": "298511"
},
{
"name": "Python",
"bytes": "880231"
},
{
"name": "Shell",
"bytes": "22758"
}
],
"symlink_target": ""
}
|
""" OAuth 1.0 Authorization.
Uses python-oauth2 library to perform 3-way handshake.
1. Create a new instance OAuth 1.0
2. Call the generateAuthorizationURL method to create
the authorization URL
3. Once the user grants access
4. Call the authorize method to upgrade to an access
token.
"""
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
import oauth2
import urllib
OAUTH_SETTINGS = {
'scope' : "https://www.googleapis.com/auth/fusiontables",
'request_token_url':"https://www.google.com/accounts/OAuthGetRequestToken",
'authorize_url':'https://www.google.com/accounts/OAuthAuthorizeToken',
'access_token_url':'https://www.google.com/accounts/OAuthGetAccessToken',
}
class OAuth():
def generateAuthorizationURL(self, consumer_key, consumer_secret, domain, callback_url=None):
""" Fetch the OAuthToken and generate the authorization URL.
Returns:
the Authorization URL
"""
consumer = oauth2.Consumer(consumer_key, consumer_secret)
client = oauth2.Client(consumer)
resp, content = client.request("%s?scope=%s" % (OAUTH_SETTINGS['request_token_url'], OAUTH_SETTINGS['scope']), "GET")
if resp['status'] != '200': raise Exception("Invalid response %s." % resp['status'])
urlparts = content.split("&")
oauth_token = urllib.unquote_plus(urlparts[0].split("=")[1])
oauth_token_secret = urllib.unquote_plus(urlparts[1].split("=")[1])
if callback_url:
auth_url = "%s?oauth_token=%s&scope=%s&domain=%s&oauth_callback=%s" % (OAUTH_SETTINGS['authorize_url'],
oauth_token,
OAUTH_SETTINGS['scope'],
domain,
callback_url)
else:
auth_url = "%s?oauth_token=%s&scope=%s&domain=%s" % (OAUTH_SETTINGS['authorize_url'],
oauth_token,
OAUTH_SETTINGS['scope'],
domain)
return auth_url, oauth_token, oauth_token_secret
def authorize(self, consumer_key, consumer_secret, oauth_token, oauth_token_secret):
""" Upgrade OAuth to Access Token
Returns:
the oauth token
the token secret
"""
consumer = oauth2.Consumer(consumer_key, consumer_secret)
token = oauth2.Token(oauth_token, oauth_token_secret)
client = oauth2.Client(consumer, token)
resp, content = client.request(OAUTH_SETTINGS['access_token_url'], "POST")
urlparts = content.split("&")
oauth_token = urllib.unquote_plus(urlparts[0].split("=")[1])
oauth_token_secret = urllib.unquote_plus(urlparts[1].split("=")[1])
return oauth_token, oauth_token_secret
|
{
"content_hash": "42de71c79feb226017f06ff784319d2f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 121,
"avg_line_length": 38.71052631578947,
"alnum_prop": 0.5781781101291639,
"repo_name": "MapofLife/MOL",
"id": "4abcfa90c2f7119f87dada29b56fec18a7e1089e",
"size": "2996",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "earthengine/gft/src/authorization/oauth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "83354"
},
{
"name": "CSS",
"bytes": "245523"
},
{
"name": "JavaScript",
"bytes": "1302309"
},
{
"name": "PHP",
"bytes": "613"
},
{
"name": "Perl",
"bytes": "2100"
},
{
"name": "Python",
"bytes": "1953387"
},
{
"name": "R",
"bytes": "52"
},
{
"name": "SQL",
"bytes": "21299"
},
{
"name": "Shell",
"bytes": "3146"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmax", parent_name="treemap.marker", **kwargs):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
{
"content_hash": "463a5bb1acaceb6479246ed5d0ae508e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 83,
"avg_line_length": 39,
"alnum_prop": 0.6132478632478633,
"repo_name": "plotly/plotly.py",
"id": "691f37e2e353cdaeac6d639c008b4754b81db3ee",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/treemap/marker/_cmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zoning', '0005_auto_20201126_0706'),
]
operations = [
# 10_zoning
migrations.RunSQL('DROP INDEX IF EXISTS couche_communes_geom_idx;'),
migrations.RunSQL('DROP INDEX IF EXISTS l_commune_geom_idx;'),
migrations.RunSQL('DROP INDEX IF EXISTS zoning_city_geom_idx;'),
migrations.RunSQL('DROP INDEX IF EXISTS couche_secteurs_geom_idx;'),
migrations.RunSQL('DROP INDEX IF EXISTS l_secteur_geom_idx;'),
migrations.RunSQL('DROP INDEX IF EXISTS zoning_district_geom_idx;'),
migrations.RunSQL('DROP INDEX IF EXISTS couche_zonage_reglementaire_geom_idx;'),
migrations.RunSQL('DROP INDEX IF EXISTS l_zonage_reglementaire_geom_idx;'),
migrations.RunSQL('DROP INDEX IF EXISTS zoning_restrictedarea_geom_idx;'),
]
|
{
"content_hash": "f467bb34ff06a3eedeae3bb5a5a85c21",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 88,
"avg_line_length": 39.08695652173913,
"alnum_prop": 0.67853170189099,
"repo_name": "GeotrekCE/Geotrek-admin",
"id": "b2bb240f6c631bb64a90a8e9f926c771e0761708",
"size": "899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/zoning/migrations/0006_clean_spatial_index.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46138"
},
{
"name": "Dockerfile",
"bytes": "1816"
},
{
"name": "HTML",
"bytes": "274524"
},
{
"name": "JavaScript",
"bytes": "231326"
},
{
"name": "Makefile",
"bytes": "1909"
},
{
"name": "PLpgSQL",
"bytes": "78024"
},
{
"name": "Python",
"bytes": "3456569"
},
{
"name": "SCSS",
"bytes": "7179"
},
{
"name": "Shell",
"bytes": "14369"
}
],
"symlink_target": ""
}
|
import demistomock as demisto
def test_main(mocker):
from RegexExtractAll import main
# test basic functionality
with open('TestData/data.txt', 'r') as f:
test_data = f.read()
mocker.patch.object(demisto, 'args', return_value={
'value': test_data,
'regex': r'\b[A-Za-z0-9._%=+\p{L}-]+@[A-Za-z0-9\p{L}.-]+\.[A-Za-z]{2,}\b',
'multi_line': 'false',
'ignore_case': 'false',
'period_matches_newline': 'false',
'error_if_no_match': 'false',
'unpack_matches': 'false'
})
mocker.patch.object(demisto, 'results')
main()
assert demisto.results.call_count == 1
results = demisto.results.call_args[0][0]
assert len(results) == 3
assert results[0] == 'test@test.com'
assert results[1] == 'testtrainee@test.com'
assert results[2] == 'testtrainee@test.com'
# test case insensitive
mocker.patch.object(demisto, 'args', return_value={
'value': test_data,
'regex': r'\bTEST[A-Za-z@.]+\b',
'multi_line': 'false',
'ignore_case': 'true',
'period_matches_newline': 'false',
'error_if_no_match': 'false',
'unpack_matches': 'false'
})
mocker.patch.object(demisto, 'results')
main()
assert demisto.results.call_count == 1
results = demisto.results.call_args[0][0]
assert len(results) == 3
assert results[0] == 'test@test.com'
assert results[1] == 'testtrainee@test.com'
assert results[2] == 'testtrainee@test.com'
# test unpack matches
mocker.patch.object(demisto, 'args', return_value={
'value': test_data,
'regex': r'([A-Za-z@.]+@([A-Za-z@.]+))',
'multi_line': 'false',
'ignore_case': 'true',
'period_matches_newline': 'false',
'error_if_no_match': 'false',
'unpack_matches': 'true'
})
mocker.patch.object(demisto, 'results')
main()
assert demisto.results.call_count == 1
results = demisto.results.call_args[0][0]
assert len(results) == 6
assert results[0] == 'test@test.com'
assert results[1] == 'test.com'
assert results[2] == 'testtrainee@test.com'
assert results[3] == 'test.com'
assert results[4] == 'testtrainee@test.com'
assert results[5] == 'test.com'
|
{
"content_hash": "ca9ec44b21e72f0809b0ea1d2b5b8945",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 82,
"avg_line_length": 33.39705882352941,
"alnum_prop": 0.5830030823425804,
"repo_name": "demisto/content",
"id": "6590ca656921e86fafb30dac24fe913f86234f70",
"size": "2271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/CommonScripts/Scripts/RegexExtractAll/RegexExtractAll_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from django.contrib.sites.models import SiteManager
from threaded_multihost import sites
import logging
log = logging.getLogger('threaded_multihost')
def site_get_current(self):
"""Overridden version of get_current, which is multihost aware."""
return sites.by_host()
SiteManager.get_current = site_get_current
SiteManager.MULTIHOST = True
log.debug('Patched Django for multihost awareness.')
|
{
"content_hash": "bb9fbf4b83954759d6de927397c8807a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 31.076923076923077,
"alnum_prop": 0.7797029702970297,
"repo_name": "diver-in-sky/django-threaded-multihost",
"id": "845c9bd059bb2cb673e2622d0f0274e332cfe20e",
"size": "404",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "threaded_multihost/multihost_patch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "34550"
}
],
"symlink_target": ""
}
|
"""
Written by Luckylau
Github: https://github.com/Luckylau
Email: laujunbupt0913@163.com
# Note: Example code For testing purposes only
"""
from pyVim.connect import SmartConnect, Disconnect
import atexit
from pyVmomi import vim
import sys
import argparse
import getpass
import ssl
def add_nic(vm, mac, port):
spec = vim.vm.ConfigSpec()
nic_changes = []
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualE1000()
nic_spec.device.deviceInfo = vim.Description()
nic_spec.device.deviceInfo.summary = 'vCenter API'
nic_spec.device.backing = \
vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic_spec.device.backing.port = vim.dvs.PortConnection()
nic_spec.device.backing.port.portgroupKey = port.portgroupKey
nic_spec.device.backing.port.switchUuid = port.dvsUuid
nic_spec.device.backing.port.portKey = port.key
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.allowGuestControl = True
nic_spec.device.connectable.connected = False
nic_spec.device.connectable.status = 'untried'
nic_spec.device.wakeOnLanEnabled = True
nic_spec.device.addressType = 'assigned'
nic_spec.device.macAddress = mac
nic_changes.append(nic_spec)
spec.deviceChange = nic_changes
e = vm.ReconfigVM_Task(spec=spec)
print("Nic card added success ...")
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True
)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def search_port(dvs, portgroupkey):
search_portkey = []
criteria = vim.dvs.PortCriteria()
criteria.connected = False
criteria.inside = True
criteria.portgroupKey = portgroupkey
ports = dvs.FetchDVPorts(criteria)
for port in ports:
search_portkey.append(port.key)
print(search_portkey)
return search_portkey[0]
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSphere service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use')
parser.add_argument('-v', '--vm-name',
required=True,
action='store',
help='Name of the vm')
parser.add_argument('-pg', '--portgroup',
required=True,
action='store',
help='Port group to connect on')
parser.add_argument('-mac', '--macaddress',
required=True,
action='store',
help='Macadress of vm')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter password')
return args
def port_find(dvs, key):
obj = None
ports = dvs.FetchDVPorts()
for c in ports:
if c.key == key:
obj = c
return obj
def main():
args = get_args()
context = None
if hasattr(ssl, "_create_unverified_context"):
context = ssl._create_unverified_context()
serviceInstance = SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=args.port,
sslContext=context)
atexit.register(Disconnect, serviceInstance)
content = serviceInstance.RetrieveContent()
print("Search VDS PortGroup by Name ...")
portgroup = None
portgroup = get_obj(content,
[vim.dvs.DistributedVirtualPortgroup], args.portgroup)
if portgroup is None:
print("Portgroup not Found in DVS ...")
exit(0)
print("Search Available(Unused) port for VM...")
dvs = portgroup.config.distributedVirtualSwitch
portKey = search_port(dvs, portgroup.key)
port = port_find(dvs, portKey)
print("Search VM by Name ...")
vm = None
vm = get_obj(content, [vim.VirtualMachine], args.vm_name)
if vm:
print("Find Vm , Add Nic Card ...")
add_nic(vm, args.macaddress, port)
else:
print("Vm not Found ...")
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "bd5a5332820ce487f3b8eed60a94554f",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 78,
"avg_line_length": 30.36094674556213,
"alnum_prop": 0.583122198401871,
"repo_name": "jm66/pyvmomi-community-samples",
"id": "bb6d4c793cb7d476386eb3d8b886ce2eeac04af8",
"size": "5153",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "samples/add_vm_nic_to_dvs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1631"
}
],
"symlink_target": ""
}
|
'''
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.core.urlresolvers import reverse
from django.db import models
from docfish.settings import MEDIA_ROOT
from itertools import chain
import collections
import operator
import os
#######################################################################################################
# Supporting Functions and Variables ##################################################################
#######################################################################################################
# Create a token for the user when the user is created (with oAuth2)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Get path to where images are stored for teams
def get_image_path(instance, filename):
team_folder = os.path.join(MEDIA_ROOT,'teams')
if not os.path.exists(team_folder):
os.mkdir(team_folder)
return os.path.join('teams', filename)
TEAM_TYPES = (('invite', 'Invite only. The user must be invited by the team administrator.'),
('institution', 'Institution only. Any user with the same institution as the creator can join'),
('open','Open. Anyone can join the team without asking.'))
REQUEST_CHOICES = (("denied", 'Request has not been granted.'),
("pending", 'Request is pending.'),
("granted", 'Request has been granted'),)
#######################################################################################################
# Teams ###############################################################################################
#######################################################################################################
class Team(models.Model):
'''A user team is a group of individuals that are annotating reports together. They can be reports across collections, or
institutions, however each user is only allowed to join one team.
'''
name = models.CharField(max_length=250, null=False, blank=False,verbose_name="Team Name")
owner = models.ForeignKey(User, blank=True, verbose_name="Team owner and adminstrator.")
created_at = models.DateTimeField('date of creation', auto_now_add=True)
updated_at = models.DateTimeField('date of last update', auto_now=True)
collection_ids = JSONField(default=[])
team_image = models.ImageField(upload_to=get_image_path, blank=True, null=True)
metrics_updated_at = models.DateTimeField('date of last calculation of rank and annotations',blank=True,null=True)
ranking = models.PositiveIntegerField(blank=True,null=True,
verbose_name="team ranking based on total number of annotations, calculated once daily.")
annotation_count = models.IntegerField(blank=False,null=False,
verbose_name="team annotation count, calculated once daily.",
default=0)
permission = models.CharField(choices=TEAM_TYPES,
default='open',
max_length=100,
verbose_name="Permission level for joining the team.")
members = models.ManyToManyField(User,
related_name="team_members",
related_query_name="team_members", blank=True,
help_text="Members of the team. By default, creator is made member.")
# would more ideally be implemented with User model, but this will work
# we will constrain each user to joining one team on view side
def collections(self):
from docfish.apps.main.models import Collection
return Collection.objects.filter(id__in=self.collection_ids)
def __str__(self):
return "%s:%s" %(self.id,self.name)
def __unicode__(self):
return "%s:%s" %(self.id,self.name)
def get_absolute_url(self):
return reverse('team_details', args=[str(self.id)])
def contender_collections(self):
from docfish.apps.main.models import Collection
owner_collections = Collection.objects.filter(owner=self.owner)
public_collections = Collection.objects.exclude(owner=self.owner,private=False)
return list(chain(owner_collections,public_collections))
def add_collection(self,cid):
if cid not in self.collection_ids:
self.collection_ids.append(cid)
def remove_collection(self,cid):
self.collection_ids = [x for x in self.collection_ids if x != cid]
self.save()
def has_collections(self):
if len(self.collection_ids) > 0:
return True
return False
def get_label(self):
return "users"
class Meta:
app_label = 'users'
class MembershipInvite(models.Model):
'''An invitation to join a team.
'''
code = models.CharField(max_length=200, null=False, blank=False)
team = models.ForeignKey(Team)
def __str__(self):
return "<%s:%s>" %(self.id,self.team.name)
def __unicode__(self):
return "<%s:%s>" %(self.id,self.team.name)
def get_label(self):
return "users"
class Meta:
app_label = 'users'
unique_together = (("code", "team"),)
class MembershipRequest(models.Model):
'''A request for membership is tied to a team.
A user is granted access if the owner grants him/her permission.
'''
user = models.ForeignKey(User)
team = models.ForeignKey(Team)
created_at = models.DateTimeField('date of request', auto_now_add=True)
status = models.CharField(max_length=200, null=False,
verbose_name="Status of request",
default="pending",choices=REQUEST_CHOICES)
def __str__(self):
return "<%s:%s>" %(self.user,self.team.name)
def __unicode__(self):
return "<%s:%s>" %(self.user,self.team.name)
def get_label(self):
return "users"
class Meta:
app_label = 'users'
unique_together = (("user", "team"),)
|
{
"content_hash": "958eba130f16bc240d991186d3485d03",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 131,
"avg_line_length": 41.20967741935484,
"alnum_prop": 0.6044357469015004,
"repo_name": "vsoch/docfish",
"id": "0000342b1d618c2364aae93fb20732e00e14a2b7",
"size": "7665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docfish/apps/users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1203987"
},
{
"name": "HTML",
"bytes": "679372"
},
{
"name": "JavaScript",
"bytes": "3447989"
},
{
"name": "Nginx",
"bytes": "1783"
},
{
"name": "Python",
"bytes": "322003"
},
{
"name": "Shell",
"bytes": "365"
}
],
"symlink_target": ""
}
|
import attr
import datetime
from ._abc import ThreadABC
from .._common import log, attrs_default
from .. import _util, _session, _models
from typing import Optional
GENDERS = {
# For standard requests
0: "unknown",
1: "female_singular",
2: "male_singular",
3: "female_singular_guess",
4: "male_singular_guess",
5: "mixed",
6: "neuter_singular",
7: "unknown_singular",
8: "female_plural",
9: "male_plural",
10: "neuter_plural",
11: "unknown_plural",
# For graphql requests
"UNKNOWN": "unknown",
"FEMALE": "female_singular",
"MALE": "male_singular",
# '': 'female_singular_guess',
# '': 'male_singular_guess',
# '': 'mixed',
"NEUTER": "neuter_singular",
# '': 'unknown_singular',
# '': 'female_plural',
# '': 'male_plural',
# '': 'neuter_plural',
# '': 'unknown_plural',
}
@attrs_default
class User(ThreadABC):
"""Represents a Facebook user. Implements `ThreadABC`.
Example:
>>> user = fbchat.User(session=session, id="1234")
"""
#: The session to use when making requests.
session = attr.ib(type=_session.Session)
#: The user's unique identifier.
id = attr.ib(converter=str, type=str)
def _to_send_data(self):
return {
"other_user_fbid": self.id,
# The entry below is to support .wave
"specific_to_list[0]": "fbid:{}".format(self.id),
}
def _copy(self) -> "User":
return User(session=self.session, id=self.id)
def confirm_friend_request(self):
"""Confirm a friend request, adding the user to your friend list.
Example:
>>> user.confirm_friend_request()
"""
data = {"to_friend": self.id, "action": "confirm"}
j = self.session._payload_post("/ajax/add_friend/action.php?dpr=1", data)
def remove_friend(self):
"""Remove the user from the client's friend list.
Example:
>>> user.remove_friend()
"""
data = {"uid": self.id}
j = self.session._payload_post("/ajax/profile/removefriendconfirm.php", data)
def block(self):
"""Block messages from the user.
Example:
>>> user.block()
"""
data = {"fbid": self.id}
j = self.session._payload_post("/messaging/block_messages/?dpr=1", data)
def unblock(self):
"""Unblock a previously blocked user.
Example:
>>> user.unblock()
"""
data = {"fbid": self.id}
j = self.session._payload_post("/messaging/unblock_messages/?dpr=1", data)
@attrs_default
class UserData(User):
"""Represents data about a Facebook user.
Inherits `User`, and implements `ThreadABC`.
"""
#: The user's picture
photo = attr.ib(type=_models.Image)
#: The name of the user
name = attr.ib(type=str)
#: Whether the user and the client are friends
is_friend = attr.ib(type=bool)
#: The users first name
first_name = attr.ib(type=str)
#: The users last name
last_name = attr.ib(None, type=Optional[str])
#: When the thread was last active / when the last message was sent
last_active = attr.ib(None, type=Optional[datetime.datetime])
#: Number of messages in the thread
message_count = attr.ib(None, type=Optional[int])
#: Set `Plan`
plan = attr.ib(None, type=Optional[_models.PlanData])
#: The profile URL. ``None`` for Messenger-only users
url = attr.ib(None, type=Optional[str])
#: The user's gender
gender = attr.ib(None, type=Optional[str])
#: From 0 to 1. How close the client is to the user
affinity = attr.ib(None, type=Optional[float])
#: The user's nickname
nickname = attr.ib(None, type=Optional[str])
#: The clients nickname, as seen by the user
own_nickname = attr.ib(None, type=Optional[str])
#: The message color
color = attr.ib(None, type=Optional[str])
#: The default emoji
emoji = attr.ib(None, type=Optional[str])
@staticmethod
def _get_other_user(data):
(user,) = (
node["messaging_actor"]
for node in data["all_participants"]["nodes"]
if node["messaging_actor"]["id"] == data["thread_key"]["other_user_id"]
)
return user
@classmethod
def _from_graphql(cls, session, data):
c_info = cls._parse_customization_info(data)
plan = None
if data.get("event_reminders") and data["event_reminders"].get("nodes"):
plan = _models.PlanData._from_graphql(
session, data["event_reminders"]["nodes"][0]
)
return cls(
session=session,
id=data["id"],
url=data["url"],
first_name=data["first_name"],
last_name=data.get("last_name"),
is_friend=data["is_viewer_friend"],
gender=GENDERS.get(data["gender"]),
affinity=data.get("viewer_affinity"),
nickname=c_info.get("nickname"),
color=c_info["color"],
emoji=c_info["emoji"],
own_nickname=c_info.get("own_nickname"),
photo=_models.Image._from_uri(data["profile_picture"]),
name=data["name"],
message_count=data.get("messages_count"),
plan=plan,
)
@classmethod
def _from_thread_fetch(cls, session, data):
user = cls._get_other_user(data)
if user["__typename"] != "User":
# TODO: Add Page._from_thread_fetch, and parse it there
log.warning("Tried to parse %s as a user.", user["__typename"])
return None
c_info = cls._parse_customization_info(data)
plan = None
if data["event_reminders"]["nodes"]:
plan = _models.PlanData._from_graphql(
session, data["event_reminders"]["nodes"][0]
)
return cls(
session=session,
id=user["id"],
url=user["url"],
name=user["name"],
first_name=user["short_name"],
is_friend=user["is_viewer_friend"],
gender=GENDERS.get(user["gender"]),
nickname=c_info.get("nickname"),
color=c_info["color"],
emoji=c_info["emoji"],
own_nickname=c_info.get("own_nickname"),
photo=_models.Image._from_uri(user["big_image_src"]),
message_count=data["messages_count"],
last_active=_util.millis_to_datetime(int(data["updated_time_precise"])),
plan=plan,
)
@classmethod
def _from_all_fetch(cls, session, data):
return cls(
session=session,
id=data["id"],
first_name=data["firstName"],
url=data["uri"],
photo=_models.Image(url=data["thumbSrc"]),
name=data["name"],
is_friend=data["is_friend"],
gender=GENDERS.get(data["gender"]),
)
|
{
"content_hash": "1aa308d75f918271dfdb1a1a72cecdf9",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 85,
"avg_line_length": 31.506787330316744,
"alnum_prop": 0.5619704150509838,
"repo_name": "carpedm20/fbchat",
"id": "00bbeab791bcfb420151e59ee23f026541ac888b",
"size": "6963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fbchat/_threads/_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "332266"
}
],
"symlink_target": ""
}
|
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from .base import SelectorMixin
from ..metrics.scorer import check_scoring
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]` corresponds to the \
ranking position of the i-th feature. Selected (i.e., estimated \
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
`grid_scores_[i]` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
scores = np.zeros(X.shape[1])
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
# Compute a full ranking of the features
ranking_ = rfe.fit(X_train, y_train).ranking_
# Score each subset of features
for k in range(0, max(ranking_)):
mask = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X_train[:, mask], y_train)
score = _score(estimator, X_test[:, mask], y_test, scorer)
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, score=%f"
% (k + 1, max(ranking_), score))
scores[k] += score
# Pick the best number of features on average
k = np.argmax(scores)
best_score = scores[k]
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=k+1,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
|
{
"content_hash": "bd85029f0dc62ccdf6089a014e89108e",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 79,
"avg_line_length": 37.68292682926829,
"alnum_prop": 0.6137360661632506,
"repo_name": "thilbern/scikit-learn",
"id": "a0589fd0c3d946bda615eb3afa01be4e0e6c11ae",
"size": "14090",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/feature_selection/rfe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385102"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1418"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "4850479"
},
{
"name": "Shell",
"bytes": "3861"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import QuestionSet
# Register your models here.
admin.site.register(QuestionSet)
|
{
"content_hash": "8c58b75eff11b1642877d670196441e2",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 25.4,
"alnum_prop": 0.8267716535433071,
"repo_name": "jolahde/rak",
"id": "74a129a901c5f235efd497e21e1a4e20c6622820",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rmt/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1406"
},
{
"name": "HTML",
"bytes": "3372"
},
{
"name": "Python",
"bytes": "13208"
}
],
"symlink_target": ""
}
|
"""Tests for Cloud Spanner Operations api."""
from tests.gcloud import emulator
class GCloudOperationsTest(emulator.TestCase):
def testDescribeOperation(self):
# Create an instance to start a LRO.
operation_uri = self.CreateInstance('test-instance')
# Describe the operation.
# Describe returns start & end time that are different across each response
# therefore only matching a subset of the response.
self.assertGreater(
self.RunGCloud(
'spanner', 'operations', 'describe', operation_uri,
'--instance=test-instance').find(
' name: projects/test-project/instances/test-instance'), 0)
def testListOperation(self):
# Create an instance to start a LRO.
self.assertEqual(
self.RunGCloud('spanner', 'instances', 'create', 'test-instance',
'--config=emulator-config',
'--description=Test Instance', '--nodes', '3'),
self.JoinLines(''))
# List the operation for given instance.
self.assertEqual(
self.RunGCloud('spanner', 'operations', 'list',
'--instance=test-instance'),
self.JoinLines(
'OPERATION_ID STATEMENTS DONE @TYPE',
'_auto0 True CreateInstanceMetadata'))
if __name__ == '__main__':
emulator.RunTests()
|
{
"content_hash": "8e754efeed5eb3980dc2b23f3812ac1d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 34.87179487179487,
"alnum_prop": 0.6125,
"repo_name": "GoogleCloudPlatform/cloud-spanner-emulator",
"id": "099a4885a477e4312bf511f957c8abef47a6f3f3",
"size": "1939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gcloud/operation_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "930"
},
{
"name": "C",
"bytes": "1191"
},
{
"name": "C++",
"bytes": "2784986"
},
{
"name": "Dockerfile",
"bytes": "1074"
},
{
"name": "Go",
"bytes": "11426"
},
{
"name": "Python",
"bytes": "27832"
},
{
"name": "Shell",
"bytes": "18701"
},
{
"name": "Starlark",
"bytes": "171188"
}
],
"symlink_target": ""
}
|
from netkit.line_box import LineBox
class KolaBox(LineBox):
def __init__(self, init_data=None):
# 不要让顶层初始化init_data
super(KolaBox, self).__init__(None)
if init_data:
self.set_json(init_data)
@property
def values(self):
if not getattr(self, '_values', None):
# 只赋值一次免得性能太低
try:
self._values = self.get_json()
except:
self._values = None
return self._values
@property
def cmd(self):
return self.values.get('endpoint', None)
@property
def sn(self):
return self.values.get('sn', 0)
def map(self, map_data):
init_data = dict(
endpoint=self.cmd,
sn=self.sn,
)
init_data.update(map_data)
return self.__class__(init_data)
|
{
"content_hash": "0cdc6fc92e69dc23f5279bfcd85b7733",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 48,
"avg_line_length": 21.225,
"alnum_prop": 0.5217903415783275,
"repo_name": "yangdw/PyRepo",
"id": "d64a3c2ba9ad20bc1795e58252614423879c6284",
"size": "912",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/annotation/haven/examples/benchmark_test/kola_box.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "30989"
},
{
"name": "HTML",
"bytes": "2746"
},
{
"name": "Makefile",
"bytes": "32699"
},
{
"name": "Nginx",
"bytes": "635"
},
{
"name": "Protocol Buffer",
"bytes": "11054"
},
{
"name": "Python",
"bytes": "2086749"
},
{
"name": "SQLPL",
"bytes": "3364"
},
{
"name": "Shell",
"bytes": "48"
}
],
"symlink_target": ""
}
|
"""
FLI
Object-oriented interface for handling Finger Lakes Instrumentation devices
author: Craig Wm. Versek, Yankee Environmental Systems
author_email: cwv@yesinc.com
"""
from camera import USBCamera
from filter_wheel import USBFilterWheel
from focuser import USBFocuser
|
{
"content_hash": "0b5dc21aec3a57762a8b4f64b2502021",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 26,
"alnum_prop": 0.7867132867132867,
"repo_name": "tzuchiangshen/python-FLI",
"id": "f14b5898464a59ac1b71a6242602544d7f4e5ac5",
"size": "286",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/FLI/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2235"
},
{
"name": "Python",
"bytes": "48223"
}
],
"symlink_target": ""
}
|
"""Tests for classroom models."""
from __future__ import annotations
import types
from core.platform import models
from core.tests import test_utils
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import classroom_models
(base_models, classroom_models) = models.Registry.import_models([
models.Names.BASE_MODEL, models.Names.CLASSROOM
])
class ClassroomModelUnitTest(test_utils.GenericTestBase):
"""Test the ClassroomModel class."""
def setUp(self) -> None:
super().setUp()
self.classroom_model = classroom_models.ClassroomModel(
id='id',
name='math',
url_fragment='math',
course_details='Curated math foundations course.',
topic_list_intro='Start from the basics with our first topic.',
topic_id_to_prerequisite_topic_ids={}
)
self.classroom_model.update_timestamps()
self.classroom_model.put()
def test_create_new_model(self) -> None:
classroom_id = (
classroom_models.ClassroomModel.generate_new_classroom_id())
classroom_model_instance = (classroom_models.ClassroomModel.create(
classroom_id, 'physics', 'physics', 'Curated physics course.',
'Start from the basic physics.', {}))
self.assertEqual(classroom_model_instance.name, 'physics')
self.assertEqual(classroom_model_instance.url_fragment, 'physics')
self.assertEqual(
classroom_model_instance.course_details, 'Curated physics course.')
self.assertEqual(
classroom_model_instance.topic_list_intro,
'Start from the basic physics.')
def test_get_export_policy_not_applicable(self) -> None:
self.assertEqual(
classroom_models.ClassroomModel.get_export_policy(),
{
'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'course_details': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'topic_list_intro': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'topic_id_to_prerequisite_topic_ids': (
base_models.EXPORT_POLICY.NOT_APPLICABLE)
}
)
def test_get_model_association_to_user_not_corresponding_to_user(
self
) -> None:
self.assertEqual(
classroom_models.ClassroomModel.get_model_association_to_user(),
base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER)
def test_get_deletion_policy_not_applicable(self) -> None:
self.assertEqual(
classroom_models.ClassroomModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_get_model_by_url_fragment(self) -> None:
self.assertEqual(
classroom_models.ClassroomModel.get_by_url_fragment('math'),
self.classroom_model)
self.assertEqual(
classroom_models.ClassroomModel.get_by_url_fragment(
'incorrect_url_fragment'), None)
def test_get_model_by_name(self) -> None:
self.assertEqual(
classroom_models.ClassroomModel.get_by_name('math'),
self.classroom_model)
self.assertEqual(
classroom_models.ClassroomModel.get_by_name('incorrect_name'),
None)
def test_get_model_by_id(self) -> None:
self.assertEqual(
classroom_models.ClassroomModel.get_by_id('id'),
self.classroom_model)
self.assertEqual(
classroom_models.ClassroomModel.get_by_id('incorrect_id'),
None)
def test_raise_exception_by_mocking_collision(self) -> None:
"""Tests create and generate_new_classroom_id methods for raising
exception.
"""
classroom_model_cls = classroom_models.ClassroomModel
# Test create method.
with self.assertRaisesRegex(
Exception,
'A classroom with the given classroom ID already exists.'
):
# Swap dependent method get_by_id to simulate collision every time.
with self.swap(
classroom_model_cls, 'get_by_id',
types.MethodType(
lambda x, y: True,
classroom_model_cls
)
):
classroom_model_cls.create(
'classroom_id', 'math', 'math',
'Curated math foundations course.',
'Start from the basic math.', {}
)
# Test generate_new_classroom_id method.
with self.assertRaisesRegex(
Exception,
'New classroom id generator is producing too many collisions.'
):
# Swap dependent method get_by_id to simulate collision every time.
with self.swap(
classroom_model_cls, 'get_by_id',
types.MethodType(
lambda x, y: True,
classroom_model_cls
)
):
classroom_model_cls.generate_new_classroom_id()
|
{
"content_hash": "c1935865847e8f07229bd0803359963f",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 38.36879432624114,
"alnum_prop": 0.5975970425138633,
"repo_name": "oppia/oppia",
"id": "539028fbbac9200a99592a026d949533d67e9575",
"size": "6033",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/storage/classroom/gae_models_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
}
|
from typing import Iterator
from google.cloud import bigquery
import pytest
from conftest import prefixer
import update_with_dml
@pytest.fixture
def table_id(
bigquery_client: bigquery.Client, project_id: str, dataset_id: str
) -> Iterator[str]:
table_id = f"{prefixer.create_prefix()}_update_with_dml"
yield table_id
full_table_id = f"{project_id}.{dataset_id}.{table_id}"
bigquery_client.delete_table(full_table_id, not_found_ok=True)
def test_update_with_dml(
bigquery_client_patch: None, dataset_id: str, table_id: str
) -> None:
override_values = {
"dataset_id": dataset_id,
"table_id": table_id,
}
num_rows = update_with_dml.run_sample(override_values=override_values)
assert num_rows > 0
|
{
"content_hash": "690ee07d129a2f6501c35ebb1b53c3c4",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 27.035714285714285,
"alnum_prop": 0.6895640686922061,
"repo_name": "googleapis/python-bigquery",
"id": "ef5ec196ac8423a17ce508b5b8a81ed715136048",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/test_update_with_dml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2520564"
},
{
"name": "Shell",
"bytes": "31939"
}
],
"symlink_target": ""
}
|
from ....compiler import CompiledProgram
from ....data_feeder import DataFeeder
from .... import executor
from .graph_wrapper import GraphWrapper
__all__ = ['SlimGraphExecutor']
class SlimGraphExecutor(object):
"""
Wrapper of executor used to run GraphWrapper.
"""
def __init__(self, place):
self.exe = executor.Executor(place)
self.place = place
def run(self, graph, scope, data=None):
"""
Runing a graph with a batch of data.
Args:
graph(GraphWrapper): The graph to be executed.
scope(fluid.core.Scope): The scope to be used.
data(list<tuple>): A batch of data. Each tuple in this list is a sample.
It will feed the items of tuple to the in_nodes of graph.
Returns:
results(list): A list of result with the same order indicated by graph.out_nodes.
"""
assert isinstance(graph, GraphWrapper)
feed = None
if data is not None and isinstance(data[0], dict):
# return list = False
feed = data
elif data is not None:
feeder = DataFeeder(
feed_list=list(graph.in_nodes.values()),
place=self.place,
program=graph.program)
feed = feeder.feed(data)
fetch_list = list(graph.out_nodes.values())
program = graph.compiled_graph if graph.compiled_graph else graph.program
results = self.exe.run(program,
scope=scope,
fetch_list=fetch_list,
feed=feed)
return results
|
{
"content_hash": "39cdb0d5cbf4da638fa0bdfc73e7f9cc",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 93,
"avg_line_length": 35.46808510638298,
"alnum_prop": 0.563887222555489,
"repo_name": "chengduoZH/Paddle",
"id": "1573d3aa1ce5d28c58bd8dbeaf0bfda622b998e5",
"size": "2279",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/contrib/slim/graph/executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10146609"
},
{
"name": "CMake",
"bytes": "291349"
},
{
"name": "Cuda",
"bytes": "1192566"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7124331"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
}
|
"""
@file comm_ethernet.py
"""
##
# @addtogroup ethernet ethernet
# @brief This is ethernet component
# @{
# @addtogroup comm_ethernet comm_ethernet
# @brief This is comm_ethernet module
# @{
##
import time
import os
import string
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import shell_cmd_timeout
from oeqa.utils.decorators import tag
@tag(TestType="EFT")
class CommEthernet(oeRuntimeTest):
"""
@class CommEthernet
"""
def get_ipv6(self):
"""
@fn get_ipv6
@param self
@return
"""
time.sleep(1)
# Check ip address by ifconfig command
interface = "nothing"
(status, interface) = self.target.run("ifconfig | grep '^enp' | awk '{print $1}'")
(status, output) = self.target.run("ifconfig %s | grep 'inet6 addr:' | awk '{print $3}'" % interface)
if output.split('%')[0] == '':
assertEqual(status, 0, msg="Target ipv6 address get fail: %s" % output)
else:
return output.split('%')[0]
def get_ipv4(self):
"""
@fn get_ipv4
@param self
@return
"""
time.sleep(1)
# Check ip address by ifconfig command
interface = "nothing"
(status, interface) = self.target.run("ifconfig | grep '^enp' | awk '{print $1}'")
(status, output) = self.target.run("ifconfig %s | grep 'inet addr:' | awk '{print $2}'" % interface)
if output.split(':')[1] == '':
assertEqual(status, 0, msg="Target ipv4 address get fail: %s" % output)
else:
return output.split(':')[1]
def get_interface(self):
"""
@fn get_interface
@param self
@return
"""
# if user takes -s option, it is host's IP, directly use it
if '192.168.7.1' == self.target.server_ip:
# Get target ip address prefix of LAN, for example, 192.168.8.100 is 192.168.8
ipv4 = self.get_ipv4().split('.')
prefix = "%s.%s.%s" % (ipv4[0], ipv4[1], ipv4[2])
else:
prefix = self.target.server_ip
# Use this prefix to get corresponding interface of the host
(status, ifconfig) = shell_cmd_timeout('ifconfig')
for line in ifconfig.splitlines():
if type(line) is bytes:
linetemp = line.decode('ascii')
if "inet addr:%s" % prefix in linetemp:
index = ifconfig.splitlines().index(line)
interface = ifconfig.splitlines()[index - 1]
if type(interface) is bytes:
interface = interface.decode('ascii')
return interface.split()[0]
# if above return is not OK, there might be error, return Blank
self.assertEqual(1, 0, msg="Host interface with %s is not found" % prefix)
@tag(FeatureID="IOTOS-489")
def test_ethernet_ipv6_ping(self):
'''Ping other device via ipv6 address of the ethernet
@fn test_ethernet_ipv6_ping
@param self
@return
'''
# Get target ipv6 address
ip6_address = self.get_ipv6()
# ping6 needs host's ethernet interface by -I,
# because default gateway is only for ipv4
host_eth = self.get_interface()
if type(host_eth) is bytes:
host_eth = host_eth.decode("ascii")
cmd = "ping6 -I %s %s -c 1" % (host_eth, ip6_address)
status, output = shell_cmd_timeout(cmd, timeout=60)
##
# TESTPOINT: #1, test_ethernet_ipv6_ping
#
self.assertEqual(status, 0, msg="Error messages: %s" % output)
@tag(FeatureID="IOTOS-489")
def test_ethernet_ipv6_ssh(self):
'''SSH other device via ipv6 address of the ethernet
@fn test_ethernet_ipv6_ssh
@param self
@return
'''
# Get target ipv6 address
ip6_address = self.get_ipv6()
# Same as ping6, ssh with ipv6 also need host's ethernet interface
# ssh root@<ipv6 address>%<eth>
host_eth = self.get_interface()
if type(host_eth) is bytes:
host_eth = host_eth.decode("ascii")
exp = os.path.join(os.path.dirname(__file__), "files/ipv6_ssh.exp")
cmd = "expect %s %s %s %s" % (exp, ip6_address, "ostro", host_eth)
status, output = shell_cmd_timeout(cmd, timeout=60)
if type(output) is bytes:
output = output.decode("ascii")
# In expect, it will input yes and password while login. And do 'ls /'
# If see /home folder, it will return 2 as successful status.
##
# TESTPOINT: #1, test_ethernet_ipv6_ssh
#
self.assertEqual(status, 2, msg="Error messages: %s" % output)
##
# @}
# @}
##
|
{
"content_hash": "9e7aaca2847ad698eed839bc16c52798",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 109,
"avg_line_length": 34.63503649635037,
"alnum_prop": 0.5656480505795575,
"repo_name": "ostroproject/meta-iotqa",
"id": "e4d3a79b4388790ef845077e612bbad62f677302",
"size": "4745",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/oeqa/runtime/ethernet/comm_ethernet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "BitBake",
"bytes": "6776"
},
{
"name": "C",
"bytes": "5633"
},
{
"name": "Java",
"bytes": "504"
},
{
"name": "JavaScript",
"bytes": "25975"
},
{
"name": "M4",
"bytes": "5945"
},
{
"name": "Makefile",
"bytes": "392"
},
{
"name": "Python",
"bytes": "552176"
},
{
"name": "Shell",
"bytes": "6714"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os.path
import time
import json
from six.moves import input
def print_shafts(draft, connected):
"""
Print the shaft lift state, as for a table loom.
"""
up_shafts = [' ' if shaft in connected else '#'
for shaft in draft.shafts]
down_shafts = ['#' if shaft in connected else ' '
for shaft in draft.shafts]
up_lines = ' '.join((c * 4) for c in up_shafts)
down_lines = ' '.join((c * 4) for c in down_shafts)
print()
for n in range(5):
print(up_lines)
for n in range(5):
print(down_lines)
def describe_interval(secs):
"""
Return a string describing the supplied number of seconds in human-readable
time, e.g. "107 hours, 42 minutes".
"""
if secs <= 0:
return 'no time at all'
hours = secs // 3600
minutes = (secs - (hours * 3600)) // 60
parts = []
if hours > 0:
if hours == 1:
parts.append('1 hour')
else:
parts.append('%d hours' % hours)
if minutes > 0:
if minutes == 1:
parts.append('1 minute')
else:
parts.append('%d minutes' % minutes)
if not (hours or minutes):
parts.append('less than a minute')
return ', '.join(parts)
class StatCounter(object):
def __init__(self, total_picks, average_over=10):
self.pick_times = []
self.total_picks = total_picks
self.average_over = average_over
def start(self):
self.start_time = time.time()
def pick(self):
self.pick_times.append(time.time())
def print_pick_stats(self):
last_picks = self.pick_times[-self.average_over:]
if len(last_picks) >= self.average_over:
elapsed_secs = last_picks[-1] - last_picks[0]
else:
elapsed_secs = last_picks[-1] - self.start_time
picks_per_second = len(last_picks) / elapsed_secs
picks_per_minute = picks_per_second * 60.
picks_to_go = self.total_picks - len(self.pick_times)
est_remaining_secs = picks_to_go / picks_per_second
print("Weaving %0.2f picks/min, %d picks left, est remaining: %s" %
(picks_per_minute, picks_to_go,
describe_interval(est_remaining_secs)))
def print_session_stats(self):
elapsed_secs = self.pick_times[-1] - self.start_time
picks_done = len(self.pick_times)
picks_per_second = picks_done / elapsed_secs
picks_per_minute = picks_per_second * 60.
print("%d picks total, average %0.2f picks/min." %
(picks_done, picks_per_minute))
def wait_for_key():
input('... ')
def load_save_file(save_filename):
with open(save_filename) as f:
return json.load(f)
def write_save_file(save_filename, obj):
with open(save_filename, 'w') as f:
json.dump(obj, f)
def weaving(draft, repeats, start_repeat, start_pick, save_filename=None):
"""
Print weaving instructions. Liftplan only for now.
current_pick, start_repeat, and start_pick are 1-indexed.
"""
print("\n---- WEAVING INSTRUCTIONS ----\n")
picks_per_repeat = len(draft.weft)
if save_filename and os.path.exists(save_filename):
print("Resuming progress from %s." % save_filename)
state = load_save_file(save_filename)
current_repeat = state['current_repeat']
current_pick = state['current_pick']
else:
current_repeat = start_repeat
current_pick = start_pick
total_picks = (((repeats - current_repeat) * picks_per_repeat) +
(picks_per_repeat - current_pick)) + 1
stats = StatCounter(total_picks)
stats.start()
if save_filename:
if not os.path.exists(save_filename):
print("Saving progress to %s." % save_filename)
else:
print("Progress will be saved.")
else:
print("Not saving progress.")
print("NOTE: Assumes that the lowest-numbered thread is on the right -->.")
while True:
if (current_pick - 1) == len(draft.weft):
if current_repeat == repeats:
break
# Restart pattern
print("-" * 79)
print("REPEAT %d COMPLETE" % current_repeat)
print("Restarting pattern...")
print("-" * 79)
current_repeat += 1
current_pick = 1
from_right = draft.start_at_lowest_thread ^ ((current_pick - 1) % 2)
weft_thread = draft.weft[current_pick - 1]
weft_color = weft_thread.color
last_color = draft.weft[current_pick - 2].color
if weft_color != last_color:
print("COLOR CHANGE! %s -> %s" % (last_color, weft_color))
print("\nREPEAT %d, PICK %d\n" % (current_repeat, current_pick))
if from_right:
print((" " * 40) + "<--- SHUTTLE %s" % weft_color)
else:
print("%s SHUTTLE --->" % weft_color)
print_shafts(draft, weft_thread.connected_shafts)
if save_filename:
write_save_file(save_filename, {
'current_repeat': current_repeat,
'current_pick': current_pick,
})
try:
wait_for_key()
except EOFError:
stats.print_session_stats()
print("Ending session.")
return
current_pick += 1
stats.pick()
stats.print_pick_stats()
print("DONE!")
default_color_table = {}
default_colors = [
'red',
'yellow',
'blue',
'white',
]
for ii in range(64):
default_color_table[ii] = default_colors[ii % len(default_colors)]
def threading(draft, repeats=1, color_table=default_color_table):
"""
Print threading instructions.
"""
print("\n---- THREADING INSTRUCTIONS ----\n")
total_count = 0
for ii, shaft in enumerate(draft.shafts, start=1):
count = len([thread for thread in draft.warp if thread.shaft == shaft])
count *= repeats
total_count += count
color = color_table[ii - 1]
print("Heddles on shaft %d: %d\t\t%s" % (ii, count, color))
print("Total heddles required: %d" % total_count)
for __ in range(repeats):
for ii, warp_thread in enumerate(draft.warp, start=1):
shaft_no = draft.shafts.index(warp_thread.shaft) + 1
heddle_color = color_table[shaft_no - 1]
print("\nWarp thread %d: shaft %d\tthread: %s\theddle: %s" % (
ii, shaft_no, warp_thread.color.rgb, heddle_color))
wait_for_key()
print("DONE!")
def tieup(draft):
"""
Print tie-up instructions.
"""
raise NotImplementedError
|
{
"content_hash": "e2290c673365eb2d90c46090e49e488a",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 79,
"avg_line_length": 30.12,
"alnum_prop": 0.5680979784565442,
"repo_name": "storborg/pyweaving",
"id": "de56604d28531b6af4cbcb0dfd0ce1cd545cb74a",
"size": "6777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyweaving/instructions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83129"
}
],
"symlink_target": ""
}
|
from sqlalchemy.test.testing import eq_
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.test import testing
from test.orm import _fixtures
from test.orm._base import MappedTest, ComparableEntity
from sqlalchemy.test.schema import Table, Column
class SingleInheritanceTest(MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('employees', metadata,
Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)),
Column('manager_data', String(50)),
Column('engineer_info', String(50)),
Column('type', String(20)))
Table('reports', metadata,
Column('report_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('employee_id', ForeignKey('employees.employee_id')),
Column('name', String(50)),
)
@classmethod
def setup_classes(cls):
class Employee(ComparableEntity):
pass
class Manager(Employee):
pass
class Engineer(Employee):
pass
class JuniorEngineer(Engineer):
pass
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
mapper(Employee, employees, polymorphic_on=employees.c.type)
mapper(Manager, inherits=Employee, polymorphic_identity='manager')
mapper(Engineer, inherits=Employee, polymorphic_identity='engineer')
mapper(JuniorEngineer, inherits=Engineer, polymorphic_identity='juniorengineer')
@testing.resolve_artifact_names
def test_single_inheritance(self):
session = create_session()
m1 = Manager(name='Tom', manager_data='knows how to manage things')
e1 = Engineer(name='Kurt', engineer_info='knows how to hack')
e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed')
session.add_all([m1, e1, e2])
session.flush()
assert session.query(Employee).all() == [m1, e1, e2]
assert session.query(Engineer).all() == [e1, e2]
assert session.query(Manager).all() == [m1]
assert session.query(JuniorEngineer).all() == [e2]
m1 = session.query(Manager).one()
session.expire(m1, ['manager_data'])
eq_(m1.manager_data, "knows how to manage things")
row = session.query(Engineer.name, Engineer.employee_id).filter(Engineer.name=='Kurt').first()
assert row.name == 'Kurt'
assert row.employee_id == e1.employee_id
@testing.resolve_artifact_names
def test_multi_qualification(self):
session = create_session()
m1 = Manager(name='Tom', manager_data='knows how to manage things')
e1 = Engineer(name='Kurt', engineer_info='knows how to hack')
e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed')
session.add_all([m1, e1, e2])
session.flush()
ealias = aliased(Engineer)
eq_(
session.query(Manager, ealias).all(),
[(m1, e1), (m1, e2)]
)
eq_(
session.query(Manager.name).all(),
[("Tom",)]
)
eq_(
session.query(Manager.name, ealias.name).all(),
[("Tom", "Kurt"), ("Tom", "Ed")]
)
eq_(
session.query(func.upper(Manager.name), func.upper(ealias.name)).all(),
[("TOM", "KURT"), ("TOM", "ED")]
)
eq_(
session.query(Manager).add_entity(ealias).all(),
[(m1, e1), (m1, e2)]
)
eq_(
session.query(Manager.name).add_column(ealias.name).all(),
[("Tom", "Kurt"), ("Tom", "Ed")]
)
# TODO: I think raise error on this for now
# self.assertEquals(
# session.query(Employee.name, Manager.manager_data, Engineer.engineer_info).all(),
# []
# )
@testing.resolve_artifact_names
def test_select_from(self):
sess = create_session()
m1 = Manager(name='Tom', manager_data='data1')
m2 = Manager(name='Tom2', manager_data='data2')
e1 = Engineer(name='Kurt', engineer_info='knows how to hack')
e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed')
sess.add_all([m1, m2, e1, e2])
sess.flush()
eq_(
sess.query(Manager).select_from(employees.select().limit(10)).all(),
[m1, m2]
)
@testing.resolve_artifact_names
def test_count(self):
sess = create_session()
m1 = Manager(name='Tom', manager_data='data1')
m2 = Manager(name='Tom2', manager_data='data2')
e1 = Engineer(name='Kurt', engineer_info='data3')
e2 = JuniorEngineer(name='marvin', engineer_info='data4')
sess.add_all([m1, m2, e1, e2])
sess.flush()
eq_(sess.query(Manager).count(), 2)
eq_(sess.query(Engineer).count(), 2)
eq_(sess.query(Employee).count(), 4)
eq_(sess.query(Manager).filter(Manager.name.like('%m%')).count(), 2)
eq_(sess.query(Employee).filter(Employee.name.like('%m%')).count(), 3)
@testing.resolve_artifact_names
def test_type_filtering(self):
class Report(ComparableEntity): pass
mapper(Report, reports, properties={
'employee': relationship(Employee, backref='reports')})
sess = create_session()
m1 = Manager(name='Tom', manager_data='data1')
r1 = Report(employee=m1)
sess.add_all([m1, r1])
sess.flush()
rq = sess.query(Report)
assert len(rq.filter(Report.employee.of_type(Manager).has()).all()) == 1
assert len(rq.filter(Report.employee.of_type(Engineer).has()).all()) == 0
@testing.resolve_artifact_names
def test_type_joins(self):
class Report(ComparableEntity): pass
mapper(Report, reports, properties={
'employee': relationship(Employee, backref='reports')})
sess = create_session()
m1 = Manager(name='Tom', manager_data='data1')
r1 = Report(employee=m1)
sess.add_all([m1, r1])
sess.flush()
rq = sess.query(Report)
assert len(rq.join(Report.employee.of_type(Manager)).all()) == 1
assert len(rq.join(Report.employee.of_type(Engineer)).all()) == 0
class RelationshipToSingleTest(MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('employees', metadata,
Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)),
Column('manager_data', String(50)),
Column('engineer_info', String(50)),
Column('type', String(20)),
Column('company_id', Integer, ForeignKey('companies.company_id'))
)
Table('companies', metadata,
Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)),
)
@classmethod
def setup_classes(cls):
class Company(ComparableEntity):
pass
class Employee(ComparableEntity):
pass
class Manager(Employee):
pass
class Engineer(Employee):
pass
class JuniorEngineer(Engineer):
pass
@testing.resolve_artifact_names
def test_of_type(self):
mapper(Company, companies, properties={
'employees':relationship(Employee, backref='company')
})
mapper(Employee, employees, polymorphic_on=employees.c.type)
mapper(Manager, inherits=Employee, polymorphic_identity='manager')
mapper(Engineer, inherits=Employee, polymorphic_identity='engineer')
mapper(JuniorEngineer, inherits=Engineer, polymorphic_identity='juniorengineer')
sess = sessionmaker()()
c1 = Company(name='c1')
c2 = Company(name='c2')
m1 = Manager(name='Tom', manager_data='data1', company=c1)
m2 = Manager(name='Tom2', manager_data='data2', company=c2)
e1 = Engineer(name='Kurt', engineer_info='knows how to hack', company=c2)
e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed', company=c1)
sess.add_all([c1, c2, m1, m2, e1, e2])
sess.commit()
sess.expunge_all()
eq_(
sess.query(Company).filter(Company.employees.of_type(JuniorEngineer).any()).all(),
[
Company(name='c1'),
]
)
eq_(
sess.query(Company).join(Company.employees.of_type(JuniorEngineer)).all(),
[
Company(name='c1'),
]
)
@testing.resolve_artifact_names
def test_relationship_to_subclass(self):
mapper(Company, companies, properties={
'engineers':relationship(Engineer)
})
mapper(Employee, employees, polymorphic_on=employees.c.type, properties={
'company':relationship(Company)
})
mapper(Manager, inherits=Employee, polymorphic_identity='manager')
mapper(Engineer, inherits=Employee, polymorphic_identity='engineer')
mapper(JuniorEngineer, inherits=Engineer, polymorphic_identity='juniorengineer')
sess = sessionmaker()()
c1 = Company(name='c1')
c2 = Company(name='c2')
m1 = Manager(name='Tom', manager_data='data1', company=c1)
m2 = Manager(name='Tom2', manager_data='data2', company=c2)
e1 = Engineer(name='Kurt', engineer_info='knows how to hack', company=c2)
e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed', company=c1)
sess.add_all([c1, c2, m1, m2, e1, e2])
sess.commit()
eq_(c1.engineers, [e2])
eq_(c2.engineers, [e1])
sess.expunge_all()
eq_(sess.query(Company).order_by(Company.name).all(),
[
Company(name='c1', engineers=[JuniorEngineer(name='Ed')]),
Company(name='c2', engineers=[Engineer(name='Kurt')])
]
)
# eager load join should limit to only "Engineer"
sess.expunge_all()
eq_(sess.query(Company).options(joinedload('engineers')).order_by(Company.name).all(),
[
Company(name='c1', engineers=[JuniorEngineer(name='Ed')]),
Company(name='c2', engineers=[Engineer(name='Kurt')])
]
)
# join() to Company.engineers, Employee as the requested entity
sess.expunge_all()
eq_(sess.query(Company, Employee).join(Company.engineers).order_by(Company.name).all(),
[
(Company(name='c1'), JuniorEngineer(name='Ed')),
(Company(name='c2'), Engineer(name='Kurt'))
]
)
# join() to Company.engineers, Engineer as the requested entity.
# this actually applies the IN criterion twice which is less than ideal.
sess.expunge_all()
eq_(sess.query(Company, Engineer).join(Company.engineers).order_by(Company.name).all(),
[
(Company(name='c1'), JuniorEngineer(name='Ed')),
(Company(name='c2'), Engineer(name='Kurt'))
]
)
# join() to Company.engineers without any Employee/Engineer entity
sess.expunge_all()
eq_(sess.query(Company).join(Company.engineers).filter(Engineer.name.in_(['Tom', 'Kurt'])).all(),
[
Company(name='c2')
]
)
# this however fails as it does not limit the subtypes to just "Engineer".
# with joins constructed by filter(), we seem to be following a policy where
# we don't try to make decisions on how to join to the target class, whereas when using join() we
# seem to have a lot more capabilities.
# we might want to document "advantages of join() vs. straight filtering", or add a large
# section to "inheritance" laying out all the various behaviors Query has.
@testing.fails_on_everything_except()
def go():
sess.expunge_all()
eq_(sess.query(Company).\
filter(Company.company_id==Engineer.company_id).filter(Engineer.name.in_(['Tom', 'Kurt'])).all(),
[
Company(name='c2')
]
)
go()
class SingleOnJoinedTest(MappedTest):
@classmethod
def define_tables(cls, metadata):
global persons_table, employees_table
persons_table = Table('persons', metadata,
Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(20), nullable=False)
)
employees_table = Table('employees', metadata,
Column('person_id', Integer, ForeignKey('persons.person_id'),primary_key=True),
Column('employee_data', String(50)),
Column('manager_data', String(50)),
)
def test_single_on_joined(self):
class Person(_fixtures.Base):
pass
class Employee(Person):
pass
class Manager(Employee):
pass
mapper(Person, persons_table, polymorphic_on=persons_table.c.type, polymorphic_identity='person')
mapper(Employee, employees_table, inherits=Person,polymorphic_identity='engineer')
mapper(Manager, inherits=Employee,polymorphic_identity='manager')
sess = create_session()
sess.add(Person(name='p1'))
sess.add(Employee(name='e1', employee_data='ed1'))
sess.add(Manager(name='m1', employee_data='ed2', manager_data='md1'))
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).order_by(Person.person_id).all(), [
Person(name='p1'),
Employee(name='e1', employee_data='ed1'),
Manager(name='m1', employee_data='ed2', manager_data='md1')
])
sess.expunge_all()
eq_(sess.query(Employee).order_by(Person.person_id).all(), [
Employee(name='e1', employee_data='ed1'),
Manager(name='m1', employee_data='ed2', manager_data='md1')
])
sess.expunge_all()
eq_(sess.query(Manager).order_by(Person.person_id).all(), [
Manager(name='m1', employee_data='ed2', manager_data='md1')
])
sess.expunge_all()
def go():
eq_(sess.query(Person).with_polymorphic('*').order_by(Person.person_id).all(), [
Person(name='p1'),
Employee(name='e1', employee_data='ed1'),
Manager(name='m1', employee_data='ed2', manager_data='md1')
])
self.assert_sql_count(testing.db, go, 1)
|
{
"content_hash": "74e71f8bb3d674437e6fec1526ca22b3",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 113,
"avg_line_length": 37.17955112219451,
"alnum_prop": 0.57233885572473,
"repo_name": "dbbhattacharya/kitsune",
"id": "4b7078eb51d96cf6c0b507cdcc59a12c409d075d",
"size": "14909",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "vendor/packages/sqlalchemy/test/orm/inheritance/test_single.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
}
|
NAME_CHOICES = (
('Apparel/Accesory', 'Apparel/Accesory'),
('Entertainment', 'Entertainment'),
('Food/Beverage', 'Food/Beverage'),
('Skin care/Cosmetics', 'Skin care/Cosmetics'),
('Computer/Mobile', 'Computer/Mobile'),
('Books/Newspapers', 'Books/Newspapers'),
('Other', 'Other'),
)
|
{
"content_hash": "c448c3b770d979ce4293c1dce64fdaeb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 48,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6632302405498282,
"repo_name": "sSwiergosz/FinancialOrganiser",
"id": "09ee3eebecb15ef0965bd6120fa750ef7eef5425",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "organizer_project/organizer/choices.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4620"
},
{
"name": "HTML",
"bytes": "21061"
},
{
"name": "JavaScript",
"bytes": "319330"
},
{
"name": "Python",
"bytes": "31837"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
from datetime import timedelta
from numpy import random
from django.core.urlresolvers import reverse
from django.views.generic.base import TemplateView
from braces.views import JSONResponseMixin
from corehq.apps.reports_core.exceptions import FilterException
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import json_request
from no_exceptions.exceptions import Http403
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.api import ReportDataSource
from corehq.apps.reports_core.filters import DatespanFilter
Column = namedtuple("Column", ["slug", "display_name", "sortable"])
class TestReportData(ReportDataSource):
title = "Test Report"
slug = "test_report"
filters = [
DatespanFilter(name='datespan', required=False),
]
def columns(self):
return [
Column(slug="date", display_name="Date", sortable=True),
Column(slug="people_tested", display_name="People Tested", sortable=False),
]
@property
def total_days(self):
return int((self.config['datespan'].enddate - self.config['datespan'].startdate).days)
def daterange(self):
p = self.config['pagination']
days = range(self.total_days)[p.start:p.offset]
order = self.config['ordering'].get('date', None)
desc = order and order.desc
for n in days:
if desc:
yield self.config['datespan'].enddate - timedelta(n)
else:
yield self.config['datespan'].startdate + timedelta(n)
def get_data(self):
# replace this with a generic has_parameter method
if hasattr(self, 'datespan'):
for date in self.daterange():
yield {
'date': date,
'people_tested': random.randint(0, 50)
}
def get_total_records(self):
return self.total_days
class TestReport(JSONResponseMixin, TemplateView):
template_name = 'reports_core/base_template_new.html'
data_model = TestReportData
def dispatch(self, request, domain=None, **kwargs):
user = request.couch_user
if self.has_permissions(domain, user):
if request.is_ajax() or request.GET.get('format', None) == 'json':
return self.get_ajax(request, domain, **kwargs)
self.content_type = None
return super(TestReport, self).dispatch(request, domain, **kwargs)
else:
raise Http403()
def has_permissions(self, domain, user):
return True
def get_context_data(self, **kwargs):
# get filter context namespaced by slug
filter_context = {}
for filter in self.data_model.filters:
filter_context[filter.name] = filter.context(self.filter_params[filter.css_id])
return {
'project': self.domain,
'report': self.data_model,
'filter_context': filter_context,
'url': self.reverse(self.domain),
'headers': self.headers,
}
@property
def headers(self):
data = self.data_model()
def make_column(col):
return DataTablesColumn(col.display_name, data_slug=col.slug, sortable=col.sortable)
columns = map(make_column, data.columns())
return DataTablesHeader(*columns)
@property
def domain(self):
return getattr(self.request, 'domain', None)
@property
@memoized
def request_dict(self):
params = json_request(self.request.GET)
params['domain'] = self.domain
return params
@property
@memoized
def filter_params(self):
request_dict = self.request_dict
return {
filter.name: filter.get_value(request_dict)
for filter in self.data_model.filters
}
def get_ajax(self, request, domain=None, **kwargs):
try:
data = self.data_model()
params = self.filter_params
params['ordering'] = datatables_ordering(self.request_dict, data.columns())
params['pagination'] = datatables_paging(self.request_dict)
data.configure(params)
except FilterException as e:
return {
'error': e.message
}
total_records = data.get_total_records()
return self.render_json_response({
'data_keys': data.slugs(),
'aaData': list(data.get_data()),
"sEcho": self.request_dict.get('sEcho', 0),
"iTotalRecords": total_records,
"iTotalDisplayRecords": total_records,
})
def _get_initial(self, request, **kwargs):
pass
@classmethod
def reverse(cls, domain):
return reverse(cls.data_model.slug, args=[domain])
@classmethod
def url_pattern(cls):
from django.conf.urls import url
pattern = r'^{slug}/$'.format(slug=cls.data_model.slug)
return url(pattern, cls.as_view(), name=cls.data_model.slug)
OrderedColumn = namedtuple("OrderedColumn", ["slug", "desc"])
PaginationSpec = namedtuple("PaginationSpec", ["start", "limit", "offset"])
def datatables_ordering(request_dict, columns):
try:
i_sorting_cols = int(request_dict.get('iSortingCols', 0))
except ValueError:
i_sorting_cols = 0
ordering = {}
for i in range(i_sorting_cols):
try:
i_sort_col = int(request_dict.get('iSortCol_%s' % i))
except ValueError:
i_sort_col = 0
# sorting order
s_sort_dir = request_dict.get('sSortDir_%s' % i)
desc = s_sort_dir == 'desc'
slug = columns[i_sort_col].slug
ordering[slug] = OrderedColumn(slug, desc=desc)
return ordering
def datatables_paging(request_dict):
limit = int(request_dict.get('iDisplayLength', 10))
start = int(request_dict.get('iDisplayStart', 0))
offset = start + limit
return PaginationSpec(start=start, limit=limit, offset=offset)
|
{
"content_hash": "7d186f0d2b734cb644aa57d0e002c71d",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 96,
"avg_line_length": 31.625,
"alnum_prop": 0.6174242424242424,
"repo_name": "puttarajubr/commcare-hq",
"id": "5b37da68fd9541ffbd9d45246aa2028ec8626477",
"size": "6072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/example_reports/testreport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
}
|
r"""Convert old ("regex") regular expressions to new syntax ("re").
When imported as a module, there are two functions, with their own
strings:
convert(s, syntax=None) -- convert a regex regular expression to re syntax
quote(s) -- return a quoted string literal
When used as a script, read a Python string literal (or any other
expression evaluating to a string) from stdin, and write the
translated expression to stdout as a string literal. Unless stdout is
a tty, no trailing \n is written to stdout. This is done so that it
can be used with Emacs C-U M-| (shell-command-on-region with argument
which filters the region through the shell command).
No attempt has been made at coding for performance.
Translation table...
\( ( (unless RE_NO_BK_PARENS set)
\) ) (unless RE_NO_BK_PARENS set)
\| | (unless RE_NO_BK_VBAR set)
\< \b (not quite the same, but alla...)
\> \b (not quite the same, but alla...)
\` \A
\' \Z
Not translated...
.
^
$
*
+ (unless RE_BK_PLUS_QM set, then to \+)
? (unless RE_BK_PLUS_QM set, then to \?)
\
\b
\B
\w
\W
\1 ... \9
Special cases...
Non-printable characters are always replaced by their 3-digit
escape code (except \t, \n, \r, which use mnemonic escapes)
Newline is turned into | when RE_NEWLINE_OR is set
XXX To be done...
[...] (different treatment of backslashed items?)
[^...] (different treatment of backslashed items?)
^ $ * + ? (in some error contexts these are probably treated differently)
\vDD \DD (in the regex docs but only works when RE_ANSI_HEX set)
"""
import regex
from regex_syntax import * # RE_*
__all__ = ["convert","quote"]
# Default translation table
mastertable = {
r'\<': r'\b',
r'\>': r'\b',
r'\`': r'\A',
r'\'': r'\Z',
r'\(': '(',
r'\)': ')',
r'\|': '|',
'(': r'\(',
')': r'\)',
'|': r'\|',
'\t': r'\t',
'\n': r'\n',
'\r': r'\r',
}
def convert(s, syntax=None):
"""Convert a regex regular expression to re syntax.
The first argument is the regular expression, as a string object,
just like it would be passed to regex.compile(). (I.e., pass the
actual string object -- string quotes must already have been
removed and the standard escape processing has already been done,
e.g. by eval().)
The optional second argument is the regex syntax variant to be
used. This is an integer mask as passed to regex.set_syntax();
the flag bits are defined in regex_syntax. When not specified, or
when None is given, the current regex syntax mask (as retrieved by
regex.get_syntax()) is used -- which is 0 by default.
The return value is a regular expression, as a string object that
could be passed to re.compile(). (I.e., no string quotes have
been added -- use quote() below, or repr().)
The conversion is not always guaranteed to be correct. More
syntactical analysis should be performed to detect borderline
cases and decide what to do with them. For example, 'x*?' is not
translated correctly.
"""
table = mastertable.copy()
if syntax is None:
syntax = regex.get_syntax()
if syntax & RE_NO_BK_PARENS:
del table[r'\('], table[r'\)']
del table['('], table[')']
if syntax & RE_NO_BK_VBAR:
del table[r'\|']
del table['|']
if syntax & RE_BK_PLUS_QM:
table['+'] = r'\+'
table['?'] = r'\?'
table[r'\+'] = '+'
table[r'\?'] = '?'
if syntax & RE_NEWLINE_OR:
table['\n'] = '|'
res = ""
i = 0
end = len(s)
while i < end:
c = s[i]
i = i+1
if c == '\\':
c = s[i]
i = i+1
key = '\\' + c
key = table.get(key, key)
res = res + key
else:
c = table.get(c, c)
res = res + c
return res
def quote(s, quote=None):
"""Convert a string object to a quoted string literal.
This is similar to repr() but will return a "raw" string (r'...'
or r"...") when the string contains backslashes, instead of
doubling all backslashes. The resulting string does *not* always
evaluate to the same string as the original; however it will do
just the right thing when passed into re.compile().
The optional second argument forces the string quote; it must be
a single character which is a valid Python string quote.
"""
if quote is None:
q = "'"
altq = "'"
if q in s and altq not in s:
q = altq
else:
assert quote in ('"', "'")
q = quote
res = q
for c in s:
if c == q: c = '\\' + c
elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
res = res + c
res = res + q
if '\\' in res:
res = 'r' + res
return res
def main():
"""Main program -- called when run as a script."""
import sys
s = eval(sys.stdin.read())
sys.stdout.write(quote(convert(s)))
if sys.stdout.isatty():
sys.stdout.write("\n")
if __name__ == '__main__':
main()
|
{
"content_hash": "9b5e296b88fccce73eda5099cd02110f",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 77,
"avg_line_length": 27.903225806451612,
"alnum_prop": 0.5676300578034682,
"repo_name": "Integral-Technology-Solutions/ConfigNOW",
"id": "59708dac9f04a9d7eb9bfef5411a776706b40baf",
"size": "5217",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/reconvert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1499"
},
{
"name": "HTML",
"bytes": "2243"
},
{
"name": "Java",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2973691"
},
{
"name": "Shell",
"bytes": "5797"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import colino.digraph
class Condition(object):
def evaluate(self, context, event):
"""return true or false"""
raise NotImplemented
class RuleNode(digraph.Node):
def __init__(self, time_limit):
super(Node, self).__init__()
self.time_limit = time_limit
# last edge status used to implement repetition rules
self.last_edge = None
self.last_edge_count = 0
def update_last_edge(self, edge):
if self.last_edge == edge:
self.last_edge_count += 1
else:
self.last_edge = edge
self.last_edge_count = 1
class StartRuleNode(digraph.Node):
pass
class AcceptRuleNode(digraph.Node):
pass
class RejectRuleNode(digraph.Node):
pass
class RuleEdge(digraph.Edge):
def __init__(self, condition, threshold):
self.condition = condition
self.threshold = threshold
class RuleGraph(digraph.DiGraph):
def __init__(self):
self.start =
self.accept_rule_node =
|
{
"content_hash": "f7da3fa9f3e37eaf4f1ada09fbe3a90c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 66,
"avg_line_length": 21.339285714285715,
"alnum_prop": 0.5790794979079498,
"repo_name": "gmambro/colino",
"id": "52d3a29576eaa1204329e0985fb170f93737f80f",
"size": "1219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colino/rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28755"
}
],
"symlink_target": ""
}
|
class LocalFTP(object):
"""
Class adding recursive nlst() behavior to ftplib.FTP instance. The
ftplib.FTP instance is available through the connection attribute, and
is exposed through __getattr__.
The behavior added by this class (recursive directory listing) is most
appropriate for ftp connections on a local network over a fast connection,
or for small directories on remote ftp servers.
The class relies on an externally defined callable, which can parse the
lines returned by the ftplib.FTP.dir() method. This callable should be
bound to the 'dirparser' attribute on this object. The callable 'dirparser'
attribute can be initialized by passing it in to the constructor using the
keyword argument 'dirparser', or by attaching the callable to the
'dirparser' attribute after instantiation.
The callable should return parsed results as a dict. This class makes some
assumptions about the contents of the dict returned by the user-defined
dirparser callable:
-- the key 'trycwds' holds a list of booleans
-- the key 'names' holds a list of filenames in the dir() listing.
-- The two lists should be the same length. A True value in the list
referred to by the 'trycwds' key indicates the corresponding value
in the list referred to by the 'names' key is a directory.
-- The key names are based on fields in the ftpparse structure, from the
ftpparse module/C library.
-- Other keys can be included in the dict, but they are not used by the
rnlst() method.
-- The callable should return an empty dict() if there is nothing to return
from the dir listing.
This module provides two parsers which seem to work ok, but it should
be easy to create others if these don't work for some reason:
-- parse_windows parses the dir listing from Windows ftp servers.
-- parse_unix parses the dir listing from UNIX ftp servers.
"""
def __init__(self, host='', user='', passwd='', acct='',
dirparser=None):
self.connection = ftplib.FTP(host, user, passwd, acct)
self.remotepathsep = '/'
self.dirparser = dirparser
def __getattr__(self, name):
"""
Delegate most requests to the underlying FTP object.
"""
return getattr(self.connection, name)
def _dir(self,path):
"""
Call dir() on path, and use callback to accumulate
returned lines. Return list of lines.
"""
dirlist = []
try:
self.connection.dir(path, dirlist.append)
except ftplib.error_perm:
warnings.warn('Access denied for path %s'%path)
return dirlist
def parsedir(self, path=''):
"""
Method to parse the lines returned by the ftplib.FTP.dir(),
when called on supplied path. Uses callable dirparser
attribute.
"""
if self.dirparser is None:
msg = ('Must set dirparser attribute to a callable '
'before calling this method')
raise TypeError(msg)
dirlines = self._dir(path)
dirdict = self.dirparser(dirlines)
return dirdict
def _cleanpath(self, path):
"""
Clean up path - remove repeated and trailing separators.
"""
slashes = self.remotepathsep*2
while slashes in path:
path = path.replace(slashes,self.remotepathsep)
if path.endswith(self.remotepathsep):
path = path[:-1]
return path
def _rnlst(self, path, filelist):
"""
Recursively accumulate filelist starting at
path, on the server accessed through this object's
ftp connection.
"""
path = self._cleanpath(path)
dirdict = self.parsedir(path)
trycwds = dirdict.get('trycwds', [])
names = dirdict.get('names', [])
for trycwd, name in zip(trycwds, names):
if trycwd: # name is a directory
self._rnlst(self.remotepathsep.join([path, name]), filelist)
else:
filelist.append(self.remotepathsep.join([path, name]))
return filelist
def rnlst(self, path=''):
"""
Recursive nlst(). Return a list of filenames under path.
"""
filelist = []
return self._rnlst(path,filelist)
# Naive ftplib.FTP.dir() parsing functions, which may or may not work. (These
# happen to work for servers I connect to.) Create your own functions (perhaps
# using ftpparse) for more robust solutions.
def parse_windows(dirlines):
"""
Parse the lines returned by ftplib.FTP.dir(), when called
on a Windows ftp server. May not work for all servers, but it
works for the ones I need to connect to.
"""
typemap = {'<DIR>': True}
if not dirlines:
return dict()
maxlen = max(len(line) for line in dirlines)
columns = [slice(0, 9), slice(9, 17), slice(17, 29), slice(29, 38),
slice(38, maxlen+1)]
fields = 'dates times trycwds sizes names'.split()
values = []
for line in dirlines:
vals = [line[slc].strip() for slc in columns]
vals[2] = typemap.get(vals[2], False)
values.append(vals)
lists = zip(*values)
assert len(lists) == len(fields)
return dict(zip(fields, lists))
def parse_unix(dirlines,startindex=1):
"""
Parse the lines returned by ftplib.FTP.dir(), when called
on a UNIX ftp server. May not work for all servers, but it
works for the ones I need to connect to.
"""
dirlines = dirlines[startindex:]
if not dirlines:
return dict()
pattern = re.compile('(.)(.*?)\s+(.*?)\s+(.*?)\s+(.*?)\s+'
'(.*?)\s+(.*?\s+.*?\s+.*?)\s+(.*)')
fields = 'trycwds tryretrs inodes users groups sizes dates names'.split()
getmatches = lambda s:pattern.search(s)
matches = filter(getmatches, dirlines)
getfields = lambda s:pattern.findall(s)[0]
lists = zip(*map(getfields, matches))
# change the '-','d','l' values to booleans, where names referring
# to directories get True, and others get False.
lists[0] = ['d' == s for s in lists[0]]
assert len(lists) == len(fields)
return dict(zip(fields, lists))
|
{
"content_hash": "0ba99a40a28af82203543daa585e2d94",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 32.423645320197046,
"alnum_prop": 0.599969614099058,
"repo_name": "ActiveState/code",
"id": "f413263e6010c402888becace041b616a4adfa8c",
"size": "6582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/275594_Recursive_nlst_wFTP_Directory_Tree/recipe-275594.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from backend.models import Festival
from backend.tests.helpers import login, create_user, create_festival
from backend.tests.helpers import create_client
class UpdateFestivalInfoTests(TestCase):
def test_no_client_name_provided(self):
"""
update_festival_info() is to return "Client name not provided"
if no client name is provided
"""
login(self.client)
response = self.client.post('/backend/u/fest/', {'id': 3})
self.assertEqual(response.status_code, 200)
self.assertEqual('Client name not provided', response.content.decode('utf-8'))
def test_no_permissions(self):
"""
update_festival_info() should return "Permission not granted"
if the permissions necessary are not granted
"""
login(self.client)
client = create_client('test')
client.write_access = False
client.save()
response = self.client.post('/backend/u/fest/', {'client': 'test', 'id': 3})
self.assertEqual(response.status_code, 200)
self.assertEqual('Permission not granted', response.content.decode('utf-8'))
def test_not_owner(self):
"""
update_festival_info() should return "Permission not granted"
if the current user is different from the festival owner
"""
creating_user = create_user()
creating_user.save()
festival = create_festival('test', creating_user)
festival.save()
login(self.client)
client = create_client('test')
client.delete_access = True
client.save()
response = self.client.post('/backend/u/fest/', {'client': 'test', 'id': 3})
self.assertEqual(response.status_code, 200)
self.assertEqual('Permission not granted', response.content.decode('utf-8'))
def test_no_matching_festivals(self):
"""
update_festival_info() is to return "Invalid Festival ID" if festival is not found
No festivals are to be updated
"""
user = login(self.client)
client = create_client('test')
client.write_access = True
client.save()
festival = create_festival('test', user)
festival.save()
festival1 = create_festival('testest', user)
festival1.save()
festival2 = create_festival('testestest', user)
festival2.save()
response = self.client.post('/backend/u/fest/', {'client': 'test', 'id': 15})
self.assertEqual('Invalid Festival ID', response.content.decode('utf-8'))
def test_invalid_fields(self):
"""
update_festival_info() is to return "Incorrect input" if fields are input with wrong data
"""
user = login(self.client)
client = create_client('test')
client.write_access = True
client.save()
festival = create_festival('test', user)
festival.save()
response = self.client.post('/backend/u/fest/',
{'client': 'test',
'id': festival.pk,
'name': 'test',
'description': 'test',
'country':
'impossiblylongandabsurdnameforacountrythatreallyshouldntexist',
'city': 'test',
'address': 'test',
'genre': 'test',
'prices': '0e',
'owner': 'test',
'official': False})
self.assertEqual(response.status_code, 200)
self.assertEqual('Incorrect input', response.content.decode('utf-8'))
festival = Festival.objects.get(pk=festival.pk)
self.assertEqual('test', festival.country)
def test_correct_input(self):
"""
update_festival_info() is to return a list of the modified fields
Festival is to be modified
"""
user = login(self.client)
client = create_client('test')
client.write_access = True
client.save()
festival = create_festival('test', user)
festival.save()
response = self.client.post('/backend/u/fest/',
{'client': 'test',
'id': festival.pk,
'city': 'testest',
'description': 'testestest'
})
self.assertEqual(response.status_code, 200)
response_string = response.content.decode('utf-8')
self.assertTrue('city:testest\n' in response_string)
self.assertTrue('description:testestest\n' in response_string)
self.assertEqual(3, len(response_string.split('\n')))
festival = Festival.objects.get(pk=festival.pk)
self.assertEqual('testest', festival.city)
|
{
"content_hash": "8f131df54271d8b036368a4d66834a43",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 105,
"avg_line_length": 37.54814814814815,
"alnum_prop": 0.5480370881830736,
"repo_name": "amentis/FestPal-Server",
"id": "eae90e9c2cc7d0a6932ffdeffed4e712ea2b5762",
"size": "5069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/tests/test_update_festival_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "290"
},
{
"name": "Python",
"bytes": "99612"
}
],
"symlink_target": ""
}
|
import unittest
from django.test.client import Client
from lifeflow.models import *
import datetime
import pygments.lexers as lexers
#response = self.client.get('/api/case/retrieve/', {})
#self.assertEquals(response.content, 'etc')
class commentTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_organize_comments(self):
"models.py: test organize_comments method for Entry"
e = Entry(title="My Entry",
pub_date=datetime.datetime.now(),
summary="A summary",
body="Some text")
e.save()
c1 = Comment(entry=e, body="Some comment one.")
c1.save()
self.assertEquals([[c1, 0]], e.organize_comments())
c2 = Comment(entry=e, name="Two", body="Some comment two.")
c2.save()
self.assertEquals([[c2,0],[c1,0]], e.organize_comments())
c3 = Comment(entry=e, name="Three", parent=c1, body="Three")
c3.save()
self.assertEquals([[c2, 0], [c1,0], [c3,1]],
e.organize_comments())
c4 = Comment(entry=e, name="Four", parent=c2, body="Four")
c4.save()
self.assertEquals([[c2,0], [c4, 1], [c1,0], [c3,1]],
e.organize_comments())
class codeMarkupTest(unittest.TestCase):
def test_markup(self):
"markup/markdown.py: test markdown"
txt = "this is some text"
expected = u"<p>this is some text\n</p>"
rendered = dbc_markup(txt).strip("\n")
self.assertEqual(expected, rendered)
def test_code_markup(self):
"markup/code.py: test code markup"
txt = u" some code in a code block\n is nice\n"
expected = u'<pre><code>some code in a code block\nis nice\n</code></pre>'
self.assertEqual(expected, dbc_markup(txt))
txt = u"<pre>this is some stuff\nthat I am concerned about</pre>"
self.assertEqual(txt, dbc_markup(txt))
txt = u"@@ python\nx = 10 * 5\n@@\n"
expected = u'<div class="highlight"><pre><span class="n">x</span> <span class="o">=</span> <span class="mi">10</span> <span class="o">*</span> <span class="mi">5</span>\n</pre></div>'
self.assertEqual(expected, dbc_markup(txt))
txt = u"@@ python\ndef test(a,b):\n return x + y\n@@\n"
expected = u'<div class="highlight"><pre><span class="k">def</span> <span class="nf">test</span><span class="p">(</span><span class="n">a</span><span class="p">,</span><span class="n">b</span><span class="p">):</span>\n <span class="k">return</span> <span class="n">x</span> <span class="o">+</span> <span class="n">y</span>\n</pre></div>'
self.assertEqual(expected, dbc_markup(txt))
def test_using_non_existant_language(self):
"markup/code.py: test improperly formed code markup"
cases = (
u"@@\ndef test(a,b):\n@@\n",
u"@@ fake-language\n(+ 1 2 3)\n@@\n",
)
for case in cases:
self.assertRaises(lexers.ClassNotFound,
lambda : dbc_markup(case))
def test_lfmu(self):
"markup/lifeflowmarkdown.py: test lifeflow markup"
e = Entry(title="My Entry",
pub_date=datetime.datetime.now(),
summary="A summary",
body="Some text")
e.save()
a = Author(name="Will Larson",
slug="will-larson",
link="a")
a.save()
e2= Entry(title="My Entry",
pub_date=datetime.datetime.now(),
summary="A summary",
body="Some text",
)
e2.save()
e2.authors.add(a)
e2.save()
t = Tag(title="LifeFlow", slug="lifeflow")
t.save()
c1 = Comment(entry=e, body="Some comment one.")
c1.save()
p = Project(title="Lifeflow",
slug="lifeflow",
summary="A summary",
body="Some text")
p.save()
self.assertEqual(dbc_markup("[trying out a tag][tag lifeflow]", e),
u'<p><a href="/tags/lifeflow/">trying out a tag</a>\n</p>')
self.assertEqual(dbc_markup("[and the author][author]", e),
u'<p><a href="/author/">and the author</a>\n</p>')
self.assertEqual(dbc_markup("[about will][author]", e2),
u'<p><a href="/author/will-larson/">about will</a>\n</p>')
#self.assertEqual(dbc_markup("[the first comment][comment 1]", e),
# u'<p><a href="/entry/2008/jan/12//#comment_1">the first comment</a>\n</p>')
self.assertEqual(dbc_markup("[lf proj][project lifeflow]", e),
u'<p><a href="/projects/lifeflow/">lf proj</a>\n</p>')
# test for [file name]
# test for [f name]
|
{
"content_hash": "a451f3bb8a5ac90612359feec71366fb",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 350,
"avg_line_length": 38.77165354330709,
"alnum_prop": 0.5306661251015434,
"repo_name": "rajeev/lifeflow",
"id": "051e7ae1e4744bac9d5a8c29b21cd464cb189c46",
"size": "4924",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3940"
},
{
"name": "Python",
"bytes": "181896"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='librpc_lint',
version='1.0',
description='librpc IDL file linter',
author='Jakub Klama',
author_email='jakub.klama@twoporeguys.com',
url='https://github.com/twoporeguys/librpc',
include_package_data=True,
packages=['librpc_lint'],
entry_points={
'console_scripts': [
'rpclint=librpc_lint:main'
]
}
)
|
{
"content_hash": "1473a6ad3ba5393cabfe4b9de40a56e0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 48,
"avg_line_length": 22.88888888888889,
"alnum_prop": 0.616504854368932,
"repo_name": "twoporeguys/librpc",
"id": "f5237a5957ff87c289ffbd253b339e86e412620f",
"size": "1708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/rpclint/setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1245567"
},
{
"name": "C++",
"bytes": "19309"
},
{
"name": "CMake",
"bytes": "22252"
},
{
"name": "CSS",
"bytes": "148"
},
{
"name": "JavaScript",
"bytes": "1031"
},
{
"name": "Makefile",
"bytes": "7865"
},
{
"name": "Mako",
"bytes": "6378"
},
{
"name": "Objective-C",
"bytes": "69216"
},
{
"name": "Python",
"bytes": "160124"
},
{
"name": "Rust",
"bytes": "16491"
},
{
"name": "Shell",
"bytes": "2134"
},
{
"name": "Swift",
"bytes": "1923"
},
{
"name": "TypeScript",
"bytes": "38272"
},
{
"name": "XSLT",
"bytes": "3538"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.