body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def test_readTruncated(self) -> None:
'\n If the JSON text for a record is truncated, skip it.\n '
with StringIO('\x1e{"x": 1\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'y': 2})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(self.errorEvents[0]['log_format'], 'Unable to read truncated JSON record: {record!r}')
self.assertEqual(self.errorEvents[0]['record'], b'{"x": 1')
| 279,952,981,227,097,440
|
If the JSON text for a record is truncated, skip it.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readTruncated
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readTruncated(self) -> None:
'\n \n '
with StringIO('\x1e{"x": 1\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'y': 2})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(self.errorEvents[0]['log_format'], 'Unable to read truncated JSON record: {record!r}')
self.assertEqual(self.errorEvents[0]['record'], b'{"x": 1')
|
def test_readUnicode(self) -> None:
'\n If the file being read from vends L{str}, strings decode from JSON\n as-is.\n '
with StringIO('\x1e{"currency": "€"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'currency': '€'})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 0)
| 8,238,753,036,658,650,000
|
If the file being read from vends L{str}, strings decode from JSON
as-is.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readUnicode
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readUnicode(self) -> None:
'\n If the file being read from vends L{str}, strings decode from JSON\n as-is.\n '
with StringIO('\x1e{"currency": "€"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'currency': '€'})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 0)
|
def test_readUTF8Bytes(self) -> None:
'\n If the file being read from vends L{bytes}, strings decode from JSON as\n UTF-8.\n '
with BytesIO(b'\x1e{"currency": "\xe2\x82\xac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'currency': '€'})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 0)
| 1,131,651,271,710,283,900
|
If the file being read from vends L{bytes}, strings decode from JSON as
UTF-8.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readUTF8Bytes
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readUTF8Bytes(self) -> None:
'\n If the file being read from vends L{bytes}, strings decode from JSON as\n UTF-8.\n '
with BytesIO(b'\x1e{"currency": "\xe2\x82\xac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'currency': '€'})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 0)
|
def test_readTruncatedUTF8Bytes(self) -> None:
"\n If the JSON text for a record is truncated in the middle of a two-byte\n Unicode codepoint, we don't want to see a codec exception and the\n stream is read properly when the additional data arrives.\n "
with BytesIO(b'\x1e{"x": "\xe2\x82\xac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle, bufferSize=8))
self.assertEqual(next(events), {'x': '€'})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 0)
| 5,986,431,497,529,183,000
|
If the JSON text for a record is truncated in the middle of a two-byte
Unicode codepoint, we don't want to see a codec exception and the
stream is read properly when the additional data arrives.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readTruncatedUTF8Bytes
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readTruncatedUTF8Bytes(self) -> None:
"\n If the JSON text for a record is truncated in the middle of a two-byte\n Unicode codepoint, we don't want to see a codec exception and the\n stream is read properly when the additional data arrives.\n "
with BytesIO(b'\x1e{"x": "\xe2\x82\xac"}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle, bufferSize=8))
self.assertEqual(next(events), {'x': '€'})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 0)
|
def test_readInvalidUTF8Bytes(self) -> None:
'\n If the JSON text for a record contains invalid UTF-8 text, ignore that\n record.\n '
with BytesIO(b'\x1e{"x": "\xe2\xac"}\n\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'y': 2})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(self.errorEvents[0]['log_format'], 'Unable to decode UTF-8 for JSON record: {record!r}')
self.assertEqual(self.errorEvents[0]['record'], b'{"x": "\xe2\xac"}\n')
| 1,947,517,661,667,441,000
|
If the JSON text for a record contains invalid UTF-8 text, ignore that
record.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readInvalidUTF8Bytes
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readInvalidUTF8Bytes(self) -> None:
'\n If the JSON text for a record contains invalid UTF-8 text, ignore that\n record.\n '
with BytesIO(b'\x1e{"x": "\xe2\xac"}\n\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'y': 2})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(self.errorEvents[0]['log_format'], 'Unable to decode UTF-8 for JSON record: {record!r}')
self.assertEqual(self.errorEvents[0]['record'], b'{"x": "\xe2\xac"}\n')
|
def test_readInvalidJSON(self) -> None:
'\n If the JSON text for a record is invalid, skip it.\n '
with StringIO('\x1e{"x": }\n\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'y': 2})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(self.errorEvents[0]['log_format'], 'Unable to read JSON record: {record!r}')
self.assertEqual(self.errorEvents[0]['record'], b'{"x": }\n')
| 5,854,437,429,673,046,000
|
If the JSON text for a record is invalid, skip it.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readInvalidJSON
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readInvalidJSON(self) -> None:
'\n \n '
with StringIO('\x1e{"x": }\n\x1e{"y": 2}\n') as fileHandle:
events = iter(eventsFromJSONLogFile(fileHandle))
self.assertEqual(next(events), {'y': 2})
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(self.errorEvents[0]['log_format'], 'Unable to read JSON record: {record!r}')
self.assertEqual(self.errorEvents[0]['record'], b'{"x": }\n')
|
def test_readUnseparated(self) -> None:
'\n Multiple events without a record separator are skipped.\n '
with StringIO('\x1e{"x": 1}\n{"y": 2}\n') as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(self.errorEvents[0]['log_format'], 'Unable to read JSON record: {record!r}')
self.assertEqual(self.errorEvents[0]['record'], b'{"x": 1}\n{"y": 2}\n')
| 8,033,836,477,703,699,000
|
Multiple events without a record separator are skipped.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readUnseparated
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readUnseparated(self) -> None:
'\n \n '
with StringIO('\x1e{"x": 1}\n{"y": 2}\n') as fileHandle:
events = eventsFromJSONLogFile(fileHandle)
self.assertRaises(StopIteration, next, events)
self.assertEqual(len(self.errorEvents), 1)
self.assertEqual(self.errorEvents[0]['log_format'], 'Unable to read JSON record: {record!r}')
self.assertEqual(self.errorEvents[0]['record'], b'{"x": 1}\n{"y": 2}\n')
|
def test_roundTrip(self) -> None:
'\n Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver}\n and read by L{eventsFromJSONLogFile} is reconstructed properly.\n '
event = dict(x=1)
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
observer(event)
fileHandle.seek(0)
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(tuple(events), (event,))
self.assertEqual(len(self.errorEvents), 0)
| -3,193,581,932,947,256,000
|
Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver}
and read by L{eventsFromJSONLogFile} is reconstructed properly.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_roundTrip
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_roundTrip(self) -> None:
'\n Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver}\n and read by L{eventsFromJSONLogFile} is reconstructed properly.\n '
event = dict(x=1)
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
observer(event)
fileHandle.seek(0)
events = eventsFromJSONLogFile(fileHandle)
self.assertEqual(tuple(events), (event,))
self.assertEqual(len(self.errorEvents), 0)
|
def __init__(self, temboo_session):
'\n Create a new instance of the RetrieveCoupon Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n '
super(RetrieveCoupon, self).__init__(temboo_session, '/Library/Stripe/Coupons/RetrieveCoupon')
| -1,358,842,146,945,783,800
|
Create a new instance of the RetrieveCoupon Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
|
temboo/Library/Stripe/Coupons/RetrieveCoupon.py
|
__init__
|
jordanemedlock/psychtruths
|
python
|
def __init__(self, temboo_session):
'\n Create a new instance of the RetrieveCoupon Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n '
super(RetrieveCoupon, self).__init__(temboo_session, '/Library/Stripe/Coupons/RetrieveCoupon')
|
def set_APIKey(self, value):
'\n Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Stripe)\n '
super(RetrieveCouponInputSet, self)._set_input('APIKey', value)
| -9,215,145,329,280,337,000
|
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Stripe)
|
temboo/Library/Stripe/Coupons/RetrieveCoupon.py
|
set_APIKey
|
jordanemedlock/psychtruths
|
python
|
def set_APIKey(self, value):
'\n \n '
super(RetrieveCouponInputSet, self)._set_input('APIKey', value)
|
def set_CouponID(self, value):
'\n Set the value of the CouponID input for this Choreo. ((required, string) The unique identifier of the coupon you want to retrieve)\n '
super(RetrieveCouponInputSet, self)._set_input('CouponID', value)
| 7,054,623,783,622,366,000
|
Set the value of the CouponID input for this Choreo. ((required, string) The unique identifier of the coupon you want to retrieve)
|
temboo/Library/Stripe/Coupons/RetrieveCoupon.py
|
set_CouponID
|
jordanemedlock/psychtruths
|
python
|
def set_CouponID(self, value):
'\n \n '
super(RetrieveCouponInputSet, self)._set_input('CouponID', value)
|
def get_Response(self):
'\n Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Stripe)\n '
return self._output.get('Response', None)
| -1,092,654,301,331,646,000
|
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Stripe)
|
temboo/Library/Stripe/Coupons/RetrieveCoupon.py
|
get_Response
|
jordanemedlock/psychtruths
|
python
|
def get_Response(self):
'\n \n '
return self._output.get('Response', None)
|
def to_dict(self):
'Returns the model properties as a dict'
result = {self.name: self.value}
return result
| -8,500,420,641,616,171,000
|
Returns the model properties as a dict
|
ask-smapi-model/ask_smapi_model/v1/skill/status.py
|
to_dict
|
alexa-labs/alexa-apis-for-python
|
python
|
def to_dict(self):
result = {self.name: self.value}
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.value)
| 5,645,736,252,210,486,000
|
Returns the string representation of the model
|
ask-smapi-model/ask_smapi_model/v1/skill/status.py
|
to_str
|
alexa-labs/alexa-apis-for-python
|
python
|
def to_str(self):
return pprint.pformat(self.value)
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
ask-smapi-model/ask_smapi_model/v1/skill/status.py
|
__repr__
|
alexa-labs/alexa-apis-for-python
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, Status)):
return False
return (self.__dict__ == other.__dict__)
| 3,802,768,383,922,608,000
|
Returns true if both objects are equal
|
ask-smapi-model/ask_smapi_model/v1/skill/status.py
|
__eq__
|
alexa-labs/alexa-apis-for-python
|
python
|
def __eq__(self, other):
if (not isinstance(other, Status)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
ask-smapi-model/ask_smapi_model/v1/skill/status.py
|
__ne__
|
alexa-labs/alexa-apis-for-python
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def build_adjacency_map(regs):
'Build mapping from node IDs to child records\n :param regs: List of `Regulation` records\n '
ret = collections.defaultdict(list)
for reg in regs:
if (reg.parent_id is not None):
ret[reg.parent_id].append(reg)
return ret
| -6,706,548,376,030,048,000
|
Build mapping from node IDs to child records
:param regs: List of `Regulation` records
|
regcore/migrations/0012_migrate_documents.py
|
build_adjacency_map
|
18F/regulations-core
|
python
|
def build_adjacency_map(regs):
'Build mapping from node IDs to child records\n :param regs: List of `Regulation` records\n '
ret = collections.defaultdict(list)
for reg in regs:
if (reg.parent_id is not None):
ret[reg.parent_id].append(reg)
return ret
|
def treeify(node, tree_id, pos=1, level=0):
'Set tree properties in memory.\n '
node['tree_id'] = tree_id
node['level'] = level
node['left'] = pos
for child in node.get('children', []):
pos = treeify(child, tree_id, pos=(pos + 1), level=(level + 1))
pos = (pos + 1)
node['right'] = pos
return pos
| 4,334,308,389,043,390,000
|
Set tree properties in memory.
|
regcore/migrations/0012_migrate_documents.py
|
treeify
|
18F/regulations-core
|
python
|
def treeify(node, tree_id, pos=1, level=0):
'\n '
node['tree_id'] = tree_id
node['level'] = level
node['left'] = pos
for child in node.get('children', []):
pos = treeify(child, tree_id, pos=(pos + 1), level=(level + 1))
pos = (pos + 1)
node['right'] = pos
return pos
|
def _transform(self, reg, doc_type, version=None):
'Create the Django object'
return self.Document(id=build_id(reg, version), doc_type=doc_type, version=version, parent_id=(build_id(reg['parent'], version) if reg.get('parent') else None), tree_id=reg['tree_id'], level=reg['level'], lft=reg['left'], rght=reg['right'], label_string='-'.join(reg['label']), text=reg['text'], title=reg.get('title', ''), node_type=reg['node_type'], root=(len(reg['label']) == 1))
| -6,060,643,226,040,560,000
|
Create the Django object
|
regcore/migrations/0012_migrate_documents.py
|
_transform
|
18F/regulations-core
|
python
|
def _transform(self, reg, doc_type, version=None):
return self.Document(id=build_id(reg, version), doc_type=doc_type, version=version, parent_id=(build_id(reg['parent'], version) if reg.get('parent') else None), tree_id=reg['tree_id'], level=reg['level'], lft=reg['left'], rght=reg['right'], label_string='-'.join(reg['label']), text=reg['text'], title=reg.get('title', ), node_type=reg['node_type'], root=(len(reg['label']) == 1))
|
def get_workspace_connection(connection_name: Optional[str]=None, resource_group_name: Optional[str]=None, workspace_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetWorkspaceConnectionResult:
'\n Workspace connection.\n\n\n :param str connection_name: Friendly name of the workspace connection\n :param str resource_group_name: Name of the resource group in which workspace is located.\n :param str workspace_name: Name of Azure Machine Learning workspace.\n '
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20200601:getWorkspaceConnection', __args__, opts=opts, typ=GetWorkspaceConnectionResult).value
return AwaitableGetWorkspaceConnectionResult(auth_type=__ret__.auth_type, category=__ret__.category, id=__ret__.id, name=__ret__.name, target=__ret__.target, type=__ret__.type, value=__ret__.value)
| 7,841,102,505,399,053,000
|
Workspace connection.
:param str connection_name: Friendly name of the workspace connection
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
|
get_workspace_connection
|
pulumi/pulumi-azure-nextgen
|
python
|
def get_workspace_connection(connection_name: Optional[str]=None, resource_group_name: Optional[str]=None, workspace_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetWorkspaceConnectionResult:
'\n Workspace connection.\n\n\n :param str connection_name: Friendly name of the workspace connection\n :param str resource_group_name: Name of the resource group in which workspace is located.\n :param str workspace_name: Name of Azure Machine Learning workspace.\n '
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20200601:getWorkspaceConnection', __args__, opts=opts, typ=GetWorkspaceConnectionResult).value
return AwaitableGetWorkspaceConnectionResult(auth_type=__ret__.auth_type, category=__ret__.category, id=__ret__.id, name=__ret__.name, target=__ret__.target, type=__ret__.type, value=__ret__.value)
|
@property
@pulumi.getter(name='authType')
def auth_type(self) -> Optional[str]:
'\n Authorization type of the workspace connection.\n '
return pulumi.get(self, 'auth_type')
| -7,419,004,781,169,553,000
|
Authorization type of the workspace connection.
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
|
auth_type
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter(name='authType')
def auth_type(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'auth_type')
|
@property
@pulumi.getter
def category(self) -> Optional[str]:
'\n Category of the workspace connection.\n '
return pulumi.get(self, 'category')
| -7,120,483,721,124,468,000
|
Category of the workspace connection.
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
|
category
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def category(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'category')
|
@property
@pulumi.getter
def id(self) -> str:
'\n ResourceId of the workspace connection.\n '
return pulumi.get(self, 'id')
| 7,130,237,717,283,418,000
|
ResourceId of the workspace connection.
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
|
id
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')
|
@property
@pulumi.getter
def name(self) -> str:
'\n Friendly name of the workspace connection.\n '
return pulumi.get(self, 'name')
| -7,912,215,057,246,779,000
|
Friendly name of the workspace connection.
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
|
name
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def name(self) -> str:
'\n \n '
return pulumi.get(self, 'name')
|
@property
@pulumi.getter
def target(self) -> Optional[str]:
'\n Target of the workspace connection.\n '
return pulumi.get(self, 'target')
| -8,350,977,006,872,855,000
|
Target of the workspace connection.
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
|
target
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def target(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'target')
|
@property
@pulumi.getter
def type(self) -> str:
'\n Resource type of workspace connection.\n '
return pulumi.get(self, 'type')
| -6,533,051,044,412,921,000
|
Resource type of workspace connection.
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
|
type
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def type(self) -> str:
'\n \n '
return pulumi.get(self, 'type')
|
@property
@pulumi.getter
def value(self) -> Optional[str]:
'\n Value details of the workspace connection.\n '
return pulumi.get(self, 'value')
| -6,315,767,598,209,378,000
|
Value details of the workspace connection.
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
|
value
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def value(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'value')
|
def profile(self, recs: Iterator[dict], index: bool=False, deep: bool=False) -> TaggedProfilerSummary:
"Provides the most useful summary counts you'll likely want from the incoming record sequence.\n Optional :index and :deep flags allow us to return special indexing and cachinc structs which we'll describe later."
labels = list(self.tagmap.keys())
temp_cache: Dict[(int, Any)] = {}
temp_index: Dict[(str, Any)] = {k: defaultdict(int) for k in labels}
for status in self.evaluate(recs, deep):
temp_cache[status.offset] = (status.r if deep else 1)
temp_index[status.tag][status.offset] += 1
_total = len(temp_cache)
_histo: Dict[(str, int)] = {k: len(v) for (k, v) in temp_index.items()}
_index: Optional[Dict[(str, list)]] = None
_cache: Optional[Dict[(int, Any)]] = None
if temp_index:
_index = {k: list(v.keys()) for (k, v) in temp_index.items()}
if deep:
_cache = temp_cache
return TaggedProfilerSummary(_total, _histo, _index, _cache)
| -5,629,771,845,425,316,000
|
Provides the most useful summary counts you'll likely want from the incoming record sequence.
Optional :index and :deep flags allow us to return special indexing and cachinc structs which we'll describe later.
|
caixa/profile/tagged.py
|
profile
|
wstlabs/caixa
|
python
|
def profile(self, recs: Iterator[dict], index: bool=False, deep: bool=False) -> TaggedProfilerSummary:
"Provides the most useful summary counts you'll likely want from the incoming record sequence.\n Optional :index and :deep flags allow us to return special indexing and cachinc structs which we'll describe later."
labels = list(self.tagmap.keys())
temp_cache: Dict[(int, Any)] = {}
temp_index: Dict[(str, Any)] = {k: defaultdict(int) for k in labels}
for status in self.evaluate(recs, deep):
temp_cache[status.offset] = (status.r if deep else 1)
temp_index[status.tag][status.offset] += 1
_total = len(temp_cache)
_histo: Dict[(str, int)] = {k: len(v) for (k, v) in temp_index.items()}
_index: Optional[Dict[(str, list)]] = None
_cache: Optional[Dict[(int, Any)]] = None
if temp_index:
_index = {k: list(v.keys()) for (k, v) in temp_index.items()}
if deep:
_cache = temp_cache
return TaggedProfilerSummary(_total, _histo, _index, _cache)
|
def lower_items(self):
'Like iteritems(), but with all lowercase keys.'
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
| -7,711,003,964,588,617,000
|
Like iteritems(), but with all lowercase keys.
|
anillo/utils/structures.py
|
lower_items
|
jespino/anillo
|
python
|
def lower_items(self):
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
|
def __init__(self, schema, nbits=32, initial=0):
'\n :param schema:\n A list of FlagField objects representing the values to be packed\n into this object, in order from LSB to MSB of the underlying int\n\n :param nbits:\n An integer representing the total number of bits used for flags\n\n :param initial:\n The initial integer value of the flags field\n '
self._nbits = nbits
self._value = initial
self._schema = OrderedDict()
tot_bits = sum([item.nbits for item in schema])
if (tot_bits > nbits):
raise TypeError('Too many fields for {nbits}-bit field (schema defines {tot} bits)'.format(nbits=nbits, tot=tot_bits))
bitn = 0
for item in schema:
if (not isinstance(item, FlagField)):
raise TypeError('Schema must be composed of FlagField objects')
if (not issubclass(item.ftype, FlagBase)):
raise TypeError('Expected FlagBase, got {}'.format(item.ftype))
self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra)
bitn += item.nbits
| -7,730,676,815,879,763,000
|
:param schema:
A list of FlagField objects representing the values to be packed
into this object, in order from LSB to MSB of the underlying int
:param nbits:
An integer representing the total number of bits used for flags
:param initial:
The initial integer value of the flags field
|
pcapng/flags.py
|
__init__
|
Boolean263/python-pcapng
|
python
|
def __init__(self, schema, nbits=32, initial=0):
'\n :param schema:\n A list of FlagField objects representing the values to be packed\n into this object, in order from LSB to MSB of the underlying int\n\n :param nbits:\n An integer representing the total number of bits used for flags\n\n :param initial:\n The initial integer value of the flags field\n '
self._nbits = nbits
self._value = initial
self._schema = OrderedDict()
tot_bits = sum([item.nbits for item in schema])
if (tot_bits > nbits):
raise TypeError('Too many fields for {nbits}-bit field (schema defines {tot} bits)'.format(nbits=nbits, tot=tot_bits))
bitn = 0
for item in schema:
if (not isinstance(item, FlagField)):
raise TypeError('Schema must be composed of FlagField objects')
if (not issubclass(item.ftype, FlagBase)):
raise TypeError('Expected FlagBase, got {}'.format(item.ftype))
self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra)
bitn += item.nbits
|
def test_tweet_tokenizer(self):
'\n Test TweetTokenizer using words with special and accented characters.\n '
tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
s9 = "@myke: Let's test these words: resumé España München français"
tokens = tokenizer.tokenize(s9)
expected = [':', "Let's", 'test', 'these', 'words', ':', 'resumé', 'España', 'München', 'français']
assert (tokens == expected)
| 1,870,422,657,282,939,400
|
Test TweetTokenizer using words with special and accented characters.
|
nltk/test/unit/test_tokenize.py
|
test_tweet_tokenizer
|
Geolem/nltk
|
python
|
def test_tweet_tokenizer(self):
'\n \n '
tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
s9 = "@myke: Let's test these words: resumé España München français"
tokens = tokenizer.tokenize(s9)
expected = [':', "Let's", 'test', 'these', 'words', ':', 'resumé', 'España', 'München', 'français']
assert (tokens == expected)
|
def test_sonority_sequencing_syllable_tokenizer(self):
'\n Test SyllableTokenizer tokenizer.\n '
tokenizer = SyllableTokenizer()
tokens = tokenizer.tokenize('justification')
assert (tokens == ['jus', 'ti', 'fi', 'ca', 'tion'])
| 4,697,742,003,575,938,000
|
Test SyllableTokenizer tokenizer.
|
nltk/test/unit/test_tokenize.py
|
test_sonority_sequencing_syllable_tokenizer
|
Geolem/nltk
|
python
|
def test_sonority_sequencing_syllable_tokenizer(self):
'\n \n '
tokenizer = SyllableTokenizer()
tokens = tokenizer.tokenize('justification')
assert (tokens == ['jus', 'ti', 'fi', 'ca', 'tion'])
|
def test_legality_principle_syllable_tokenizer(self):
'\n Test LegalitySyllableTokenizer tokenizer.\n '
from nltk.corpus import words
test_word = 'wonderful'
tokenizer = LegalitySyllableTokenizer(words.words())
tokens = tokenizer.tokenize(test_word)
assert (tokens == ['won', 'der', 'ful'])
| 7,104,131,110,027,529,000
|
Test LegalitySyllableTokenizer tokenizer.
|
nltk/test/unit/test_tokenize.py
|
test_legality_principle_syllable_tokenizer
|
Geolem/nltk
|
python
|
def test_legality_principle_syllable_tokenizer(self):
'\n \n '
from nltk.corpus import words
test_word = 'wonderful'
tokenizer = LegalitySyllableTokenizer(words.words())
tokens = tokenizer.tokenize(test_word)
assert (tokens == ['won', 'der', 'ful'])
|
def test_stanford_segmenter_arabic(self):
'\n Test the Stanford Word Segmenter for Arabic (default config)\n '
try:
seg = StanfordSegmenter()
seg.default_config('ar')
sent = u'يبحث علم الحاسوب استخدام الحوسبة بجميع اشكالها لحل المشكلات'
segmented_sent = seg.segment(sent.split())
assert (segmented_sent.split() == ['يبحث', 'علم', 'الحاسوب', 'استخدام', 'الحوسبة', 'ب', 'جميع', 'اشكال', 'ها', 'ل', 'حل', 'المشكلات'])
except LookupError as e:
pytest.skip(str(e))
| 5,769,637,606,936,761,000
|
Test the Stanford Word Segmenter for Arabic (default config)
|
nltk/test/unit/test_tokenize.py
|
test_stanford_segmenter_arabic
|
Geolem/nltk
|
python
|
def test_stanford_segmenter_arabic(self):
'\n \n '
try:
seg = StanfordSegmenter()
seg.default_config('ar')
sent = u'يبحث علم الحاسوب استخدام الحوسبة بجميع اشكالها لحل المشكلات'
segmented_sent = seg.segment(sent.split())
assert (segmented_sent.split() == ['يبحث', 'علم', 'الحاسوب', 'استخدام', 'الحوسبة', 'ب', 'جميع', 'اشكال', 'ها', 'ل', 'حل', 'المشكلات'])
except LookupError as e:
pytest.skip(str(e))
|
def test_stanford_segmenter_chinese(self):
'\n Test the Stanford Word Segmenter for Chinese (default config)\n '
try:
seg = StanfordSegmenter()
seg.default_config('zh')
sent = u'这是斯坦福中文分词器测试'
segmented_sent = seg.segment(sent.split())
assert (segmented_sent.split() == ['这', '是', '斯坦福', '中文', '分词器', '测试'])
except LookupError as e:
pytest.skip(str(e))
| -5,476,380,497,024,454,000
|
Test the Stanford Word Segmenter for Chinese (default config)
|
nltk/test/unit/test_tokenize.py
|
test_stanford_segmenter_chinese
|
Geolem/nltk
|
python
|
def test_stanford_segmenter_chinese(self):
'\n \n '
try:
seg = StanfordSegmenter()
seg.default_config('zh')
sent = u'这是斯坦福中文分词器测试'
segmented_sent = seg.segment(sent.split())
assert (segmented_sent.split() == ['这', '是', '斯坦福', '中文', '分词器', '测试'])
except LookupError as e:
pytest.skip(str(e))
|
def test_phone_tokenizer(self):
'\n Test a string that resembles a phone number but contains a newline\n '
tokenizer = TweetTokenizer()
test1 = '(393) 928 -3010'
expected = ['(393) 928 -3010']
result = tokenizer.tokenize(test1)
assert (result == expected)
test2 = '(393)\n928 -3010'
expected = ['(', '393', ')', '928 -3010']
result = tokenizer.tokenize(test2)
assert (result == expected)
| -2,268,609,590,015,661,000
|
Test a string that resembles a phone number but contains a newline
|
nltk/test/unit/test_tokenize.py
|
test_phone_tokenizer
|
Geolem/nltk
|
python
|
def test_phone_tokenizer(self):
'\n \n '
tokenizer = TweetTokenizer()
test1 = '(393) 928 -3010'
expected = ['(393) 928 -3010']
result = tokenizer.tokenize(test1)
assert (result == expected)
test2 = '(393)\n928 -3010'
expected = ['(', '393', ')', '928 -3010']
result = tokenizer.tokenize(test2)
assert (result == expected)
|
def test_pad_asterisk(self):
'\n Test padding of asterisk for word tokenization.\n '
text = 'This is a, *weird sentence with *asterisks in it.'
expected = ['This', 'is', 'a', ',', '*', 'weird', 'sentence', 'with', '*', 'asterisks', 'in', 'it', '.']
assert (word_tokenize(text) == expected)
| 7,259,830,196,329,563,000
|
Test padding of asterisk for word tokenization.
|
nltk/test/unit/test_tokenize.py
|
test_pad_asterisk
|
Geolem/nltk
|
python
|
def test_pad_asterisk(self):
'\n \n '
text = 'This is a, *weird sentence with *asterisks in it.'
expected = ['This', 'is', 'a', ',', '*', 'weird', 'sentence', 'with', '*', 'asterisks', 'in', 'it', '.']
assert (word_tokenize(text) == expected)
|
def test_pad_dotdot(self):
'\n Test padding of dotdot* for word tokenization.\n '
text = 'Why did dotdot.. not get tokenized but dotdotdot... did? How about manydots.....'
expected = ['Why', 'did', 'dotdot', '..', 'not', 'get', 'tokenized', 'but', 'dotdotdot', '...', 'did', '?', 'How', 'about', 'manydots', '.....']
assert (word_tokenize(text) == expected)
| -2,890,376,719,830,129,000
|
Test padding of dotdot* for word tokenization.
|
nltk/test/unit/test_tokenize.py
|
test_pad_dotdot
|
Geolem/nltk
|
python
|
def test_pad_dotdot(self):
'\n \n '
text = 'Why did dotdot.. not get tokenized but dotdotdot... did? How about manydots.....'
expected = ['Why', 'did', 'dotdot', '..', 'not', 'get', 'tokenized', 'but', 'dotdotdot', '...', 'did', '?', 'How', 'about', 'manydots', '.....']
assert (word_tokenize(text) == expected)
|
def test_remove_handle(self):
'\n Test remove_handle() from casual.py with specially crafted edge cases\n '
tokenizer = TweetTokenizer(strip_handles=True)
test1 = '@twitter hello @twi_tter_. hi @12345 @123news'
expected = ['hello', '.', 'hi']
result = tokenizer.tokenize(test1)
assert (result == expected)
test2 = '@n`@n~@n(@n)@n-@n=@n+@n\\@n|@n[@n]@n{@n}@n;@n:@n\'@n"@n/@n?@n.@n,@n<@n>@n @n\n@n ñ@n.ü@n.ç@n.'
expected = ['`', '~', '(', ')', '-', '=', '+', '\\', '|', '[', ']', '{', '}', ';', ':', "'", '"', '/', '?', '.', ',', '<', '>', 'ñ', '.', 'ü', '.', 'ç', '.']
result = tokenizer.tokenize(test2)
assert (result == expected)
test3 = 'a@n j@n z@n A@n L@n Z@n 1@n 4@n 7@n 9@n 0@n _@n !@n @@n #@n $@n %@n &@n *@n'
expected = ['a', '@n', 'j', '@n', 'z', '@n', 'A', '@n', 'L', '@n', 'Z', '@n', '1', '@n', '4', '@n', '7', '@n', '9', '@n', '0', '@n', '_', '@n', '!', '@n', '@', '@n', '#', '@n', '$', '@n', '%', '@n', '&', '@n', '*', '@n']
result = tokenizer.tokenize(test3)
assert (result == expected)
test4 = '@n!a @n#a @n$a @n%a @n&a @n*a'
expected = ['!', 'a', '#', 'a', '$', 'a', '%', 'a', '&', 'a', '*', 'a']
result = tokenizer.tokenize(test4)
assert (result == expected)
test5 = '@n!@n @n#@n @n$@n @n%@n @n&@n @n*@n @n@n @@n @n@@n @n_@n @n7@n @nj@n'
expected = ['!', '@n', '#', '@n', '$', '@n', '%', '@n', '&', '@n', '*', '@n', '@n', '@n', '@', '@n', '@n', '@', '@n', '@n_', '@n', '@n7', '@n', '@nj', '@n']
result = tokenizer.tokenize(test5)
assert (result == expected)
test6 = '@abcdefghijklmnopqrstuvwxyz @abcdefghijklmnopqrst1234 @abcdefghijklmnopqrst_ @abcdefghijklmnopqrstendofhandle'
expected = ['uvwxyz', '1234', '_', 'endofhandle']
result = tokenizer.tokenize(test6)
assert (result == expected)
test7 = '@abcdefghijklmnopqrstu@abcde @abcdefghijklmnopqrst@abcde @abcdefghijklmnopqrst_@abcde @abcdefghijklmnopqrst5@abcde'
expected = ['u', '@abcde', '@abcdefghijklmnopqrst', '@abcde', '_', '@abcde', '5', '@abcde']
result = tokenizer.tokenize(test7)
assert (result == expected)
| 2,887,204,716,913,255,000
|
Test remove_handle() from casual.py with specially crafted edge cases
|
nltk/test/unit/test_tokenize.py
|
test_remove_handle
|
Geolem/nltk
|
python
|
def test_remove_handle(self):
'\n \n '
tokenizer = TweetTokenizer(strip_handles=True)
test1 = '@twitter hello @twi_tter_. hi @12345 @123news'
expected = ['hello', '.', 'hi']
result = tokenizer.tokenize(test1)
assert (result == expected)
test2 = '@n`@n~@n(@n)@n-@n=@n+@n\\@n|@n[@n]@n{@n}@n;@n:@n\'@n"@n/@n?@n.@n,@n<@n>@n @n\n@n ñ@n.ü@n.ç@n.'
expected = ['`', '~', '(', ')', '-', '=', '+', '\\', '|', '[', ']', '{', '}', ';', ':', "'", '"', '/', '?', '.', ',', '<', '>', 'ñ', '.', 'ü', '.', 'ç', '.']
result = tokenizer.tokenize(test2)
assert (result == expected)
test3 = 'a@n j@n z@n A@n L@n Z@n 1@n 4@n 7@n 9@n 0@n _@n !@n @@n #@n $@n %@n &@n *@n'
expected = ['a', '@n', 'j', '@n', 'z', '@n', 'A', '@n', 'L', '@n', 'Z', '@n', '1', '@n', '4', '@n', '7', '@n', '9', '@n', '0', '@n', '_', '@n', '!', '@n', '@', '@n', '#', '@n', '$', '@n', '%', '@n', '&', '@n', '*', '@n']
result = tokenizer.tokenize(test3)
assert (result == expected)
test4 = '@n!a @n#a @n$a @n%a @n&a @n*a'
expected = ['!', 'a', '#', 'a', '$', 'a', '%', 'a', '&', 'a', '*', 'a']
result = tokenizer.tokenize(test4)
assert (result == expected)
test5 = '@n!@n @n#@n @n$@n @n%@n @n&@n @n*@n @n@n @@n @n@@n @n_@n @n7@n @nj@n'
expected = ['!', '@n', '#', '@n', '$', '@n', '%', '@n', '&', '@n', '*', '@n', '@n', '@n', '@', '@n', '@n', '@', '@n', '@n_', '@n', '@n7', '@n', '@nj', '@n']
result = tokenizer.tokenize(test5)
assert (result == expected)
test6 = '@abcdefghijklmnopqrstuvwxyz @abcdefghijklmnopqrst1234 @abcdefghijklmnopqrst_ @abcdefghijklmnopqrstendofhandle'
expected = ['uvwxyz', '1234', '_', 'endofhandle']
result = tokenizer.tokenize(test6)
assert (result == expected)
test7 = '@abcdefghijklmnopqrstu@abcde @abcdefghijklmnopqrst@abcde @abcdefghijklmnopqrst_@abcde @abcdefghijklmnopqrst5@abcde'
expected = ['u', '@abcde', '@abcdefghijklmnopqrst', '@abcde', '_', '@abcde', '5', '@abcde']
result = tokenizer.tokenize(test7)
assert (result == expected)
|
def test_treebank_span_tokenizer(self):
'\n Test TreebankWordTokenizer.span_tokenize function\n '
tokenizer = TreebankWordTokenizer()
test1 = 'Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).'
expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)]
result = list(tokenizer.span_tokenize(test1))
assert (result == expected)
test2 = 'The DUP is similar to the "religious right" in the United States and takes a hardline stance on social issues'
expected = [(0, 3), (4, 7), (8, 10), (11, 18), (19, 21), (22, 25), (26, 27), (27, 36), (37, 42), (42, 43), (44, 46), (47, 50), (51, 57), (58, 64), (65, 68), (69, 74), (75, 76), (77, 85), (86, 92), (93, 95), (96, 102), (103, 109)]
result = list(tokenizer.span_tokenize(test2))
assert (result == expected)
test3 = 'The DUP is similar to the "religious right" in the United States and takes a ``hardline\'\' stance on social issues'
expected = [(0, 3), (4, 7), (8, 10), (11, 18), (19, 21), (22, 25), (26, 27), (27, 36), (37, 42), (42, 43), (44, 46), (47, 50), (51, 57), (58, 64), (65, 68), (69, 74), (75, 76), (77, 79), (79, 87), (87, 89), (90, 96), (97, 99), (100, 106), (107, 113)]
result = list(tokenizer.span_tokenize(test3))
assert (result == expected)
| -6,737,549,003,913,649,000
|
Test TreebankWordTokenizer.span_tokenize function
|
nltk/test/unit/test_tokenize.py
|
test_treebank_span_tokenizer
|
Geolem/nltk
|
python
|
def test_treebank_span_tokenizer(self):
'\n \n '
tokenizer = TreebankWordTokenizer()
test1 = 'Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).'
expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)]
result = list(tokenizer.span_tokenize(test1))
assert (result == expected)
test2 = 'The DUP is similar to the "religious right" in the United States and takes a hardline stance on social issues'
expected = [(0, 3), (4, 7), (8, 10), (11, 18), (19, 21), (22, 25), (26, 27), (27, 36), (37, 42), (42, 43), (44, 46), (47, 50), (51, 57), (58, 64), (65, 68), (69, 74), (75, 76), (77, 85), (86, 92), (93, 95), (96, 102), (103, 109)]
result = list(tokenizer.span_tokenize(test2))
assert (result == expected)
test3 = 'The DUP is similar to the "religious right" in the United States and takes a ``hardline\'\' stance on social issues'
expected = [(0, 3), (4, 7), (8, 10), (11, 18), (19, 21), (22, 25), (26, 27), (27, 36), (37, 42), (42, 43), (44, 46), (47, 50), (51, 57), (58, 64), (65, 68), (69, 74), (75, 76), (77, 79), (79, 87), (87, 89), (90, 96), (97, 99), (100, 106), (107, 113)]
result = list(tokenizer.span_tokenize(test3))
assert (result == expected)
|
def test_word_tokenize(self):
'\n Test word_tokenize function\n '
sentence = "The 'v', I've been fooled but I'll seek revenge."
expected = ['The', "'", 'v', "'", ',', 'I', "'ve", 'been', 'fooled', 'but', 'I', "'ll", 'seek', 'revenge', '.']
assert (word_tokenize(sentence) == expected)
sentence = "'v' 're'"
expected = ["'", 'v', "'", "'re", "'"]
assert (word_tokenize(sentence) == expected)
| -6,346,903,539,932,258,000
|
Test word_tokenize function
|
nltk/test/unit/test_tokenize.py
|
test_word_tokenize
|
Geolem/nltk
|
python
|
def test_word_tokenize(self):
'\n \n '
sentence = "The 'v', I've been fooled but I'll seek revenge."
expected = ['The', "'", 'v', "'", ',', 'I', "'ve", 'been', 'fooled', 'but', 'I', "'ll", 'seek', 'revenge', '.']
assert (word_tokenize(sentence) == expected)
sentence = "'v' 're'"
expected = ["'", 'v', "'", "'re", "'"]
assert (word_tokenize(sentence) == expected)
|
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
'\n DETR R50 with 6 encoder and 6 decoder layers.\n\n Achieves 42/62.4 AP/AP50 on COCO val5k.\n '
model = _make_detr('resnet50', dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model
| -1,557,097,367,251,757,800
|
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 42/62.4 AP/AP50 on COCO val5k.
|
detr/hubconf.py
|
detr_resnet50
|
justinkay/detr
|
python
|
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
'\n DETR R50 with 6 encoder and 6 decoder layers.\n\n Achieves 42/62.4 AP/AP50 on COCO val5k.\n '
model = _make_detr('resnet50', dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model
|
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
'\n DETR-DC5 R50 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-50 has dilation to increase\n output resolution.\n Achieves 43.3/63.1 AP/AP50 on COCO val5k.\n '
model = _make_detr('resnet50', dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model
| -611,423,504,267,708,000
|
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 43.3/63.1 AP/AP50 on COCO val5k.
|
detr/hubconf.py
|
detr_resnet50_dc5
|
justinkay/detr
|
python
|
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
'\n DETR-DC5 R50 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-50 has dilation to increase\n output resolution.\n Achieves 43.3/63.1 AP/AP50 on COCO val5k.\n '
model = _make_detr('resnet50', dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model
|
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
'\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n Achieves 43.5/63.8 AP/AP50 on COCO val5k.\n '
model = _make_detr('resnet101', dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model
| 3,371,921,880,259,945,000
|
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 43.5/63.8 AP/AP50 on COCO val5k.
|
detr/hubconf.py
|
detr_resnet101
|
justinkay/detr
|
python
|
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
'\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n Achieves 43.5/63.8 AP/AP50 on COCO val5k.\n '
model = _make_detr('resnet101', dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model
|
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
'\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-101 has dilation to increase\n output resolution.\n Achieves 44.9/64.7 AP/AP50 on COCO val5k.\n '
model = _make_detr('resnet101', dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model
| 518,860,349,525,525,250
|
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
The last block of ResNet-101 has dilation to increase
output resolution.
Achieves 44.9/64.7 AP/AP50 on COCO val5k.
|
detr/hubconf.py
|
detr_resnet101_dc5
|
justinkay/detr
|
python
|
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
'\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-101 has dilation to increase\n output resolution.\n Achieves 44.9/64.7 AP/AP50 on COCO val5k.\n '
model = _make_detr('resnet101', dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model
|
def detr_resnet50_panoptic(pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False):
'\n DETR R50 with 6 encoder and 6 decoder layers.\n Achieves 43.4 PQ on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n '
model = _make_detr('resnet50', dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: (i <= 90) for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcessPanoptic(is_thing_map, threshold=threshold))
return model
| -1,169,564,157,377,301,200
|
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 43.4 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
|
detr/hubconf.py
|
detr_resnet50_panoptic
|
justinkay/detr
|
python
|
def detr_resnet50_panoptic(pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False):
'\n DETR R50 with 6 encoder and 6 decoder layers.\n Achieves 43.4 PQ on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n '
model = _make_detr('resnet50', dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: (i <= 90) for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcessPanoptic(is_thing_map, threshold=threshold))
return model
|
def detr_resnet50_dc5_panoptic(pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False):
'\n DETR-DC5 R50 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-50 has dilation to increase\n output resolution.\n Achieves 44.6 on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n '
model = _make_detr('resnet50', dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: (i <= 90) for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcessPanoptic(is_thing_map, threshold=threshold))
return model
| -9,020,519,896,936,210,000
|
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 44.6 on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
|
detr/hubconf.py
|
detr_resnet50_dc5_panoptic
|
justinkay/detr
|
python
|
def detr_resnet50_dc5_panoptic(pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False):
'\n DETR-DC5 R50 with 6 encoder and 6 decoder layers.\n\n The last block of ResNet-50 has dilation to increase\n output resolution.\n Achieves 44.6 on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n '
model = _make_detr('resnet50', dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: (i <= 90) for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcessPanoptic(is_thing_map, threshold=threshold))
return model
|
def detr_resnet101_panoptic(pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False):
'\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n Achieves 45.1 PQ on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n '
model = _make_detr('resnet101', dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: (i <= 90) for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcessPanoptic(is_thing_map, threshold=threshold))
return model
| 6,549,886,152,426,739,000
|
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 45.1 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
|
detr/hubconf.py
|
detr_resnet101_panoptic
|
justinkay/detr
|
python
|
def detr_resnet101_panoptic(pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False):
'\n DETR-DC5 R101 with 6 encoder and 6 decoder layers.\n\n Achieves 45.1 PQ on COCO val5k.\n\n threshold is the minimum confidence required for keeping segments in the prediction\n '
model = _make_detr('resnet101', dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: (i <= 90) for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url='https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth', map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcessPanoptic(is_thing_map, threshold=threshold))
return model
|
def set_args(name, subparsers):
' add arguments, and their options '
parser = subparsers.add_parser(name)
arg = parser.add_argument
arg('name', help='Storage Name')
arg('--type', help='Storage Type', choices=['Replica1', 'Replica3', 'External', 'Replica2'], default=None)
arg('--device', help='Storage device in <node>:<device> format, Example: --device kube1.example.com:/dev/vdc', default=[], action='append')
arg('--path', help='Storage path in <node>:<path> format, Example: --path kube1.example.com:/exports/data', default=[], action='append')
arg('--pvc', help='Storage from pvc, Example: --pvc local-pvc-1', default=[], action='append')
arg('--external', help='Storage from external gluster, Example: --external gluster-node:/gluster-volname', default=None)
arg('--tiebreaker', help="If type is 'Replica2', one can have a tiebreaker node along with it. like '--tiebreaker tie-breaker-node-name:/data/tiebreaker'", default=None)
utils.add_global_flags(parser)
| 5,327,776,813,386,171,000
|
add arguments, and their options
|
cli/kubectl_kadalu/storage_add.py
|
set_args
|
Joibel/kadalu
|
python
|
def set_args(name, subparsers):
' '
parser = subparsers.add_parser(name)
arg = parser.add_argument
arg('name', help='Storage Name')
arg('--type', help='Storage Type', choices=['Replica1', 'Replica3', 'External', 'Replica2'], default=None)
arg('--device', help='Storage device in <node>:<device> format, Example: --device kube1.example.com:/dev/vdc', default=[], action='append')
arg('--path', help='Storage path in <node>:<path> format, Example: --path kube1.example.com:/exports/data', default=[], action='append')
arg('--pvc', help='Storage from pvc, Example: --pvc local-pvc-1', default=[], action='append')
arg('--external', help='Storage from external gluster, Example: --external gluster-node:/gluster-volname', default=None)
arg('--tiebreaker', help="If type is 'Replica2', one can have a tiebreaker node along with it. like '--tiebreaker tie-breaker-node-name:/data/tiebreaker'", default=None)
utils.add_global_flags(parser)
|
def validate(args):
' validate arguments '
if (args.external is not None):
if (args.type and (args.type != 'External')):
print("'--external' option is used only with '--type External'", file=sys.stderr)
sys.exit(1)
if (':' not in args.external):
print('Invalid external storage details. Please specify details in the format <node>:/<volname>', file=sys.stderr)
sys.exit(1)
args.type = 'External'
if args.tiebreaker:
if (args.type != 'Replica2'):
print("'--tiebreaker' option should be used only with type 'Replica2'", file=sys.stderr)
sys.exit(1)
if (':' not in args.tiebreaker):
print('Invalid tiebreaker details. Please specify details in the format <node>:/<path>', file=sys.stderr)
sys.exit(1)
else:
args.tiebreaker = 'tie-breaker.kadalu.io:/mnt'
if (not args.type):
args.type = 'Replica1'
num_storages = (((len(args.device) + len(args.path)) + len(args.pvc)) or (1 if (args.external is not None) else 0))
if (num_storages == 0):
print('Please specify at least one storage', file=sys.stderr)
sys.exit(1)
if (((args.type == 'Replica1') and (num_storages != 1)) or ((args.type == 'Replica2') and (num_storages != 2)) or ((args.type == 'Replica3') and (num_storages != 3))):
print(('Number of storages not matching for type=%s' % args.type), file=sys.stderr)
sys.exit(1)
kube_nodes = get_kube_nodes(args)
for dev in args.device:
if (':' not in dev):
print('Invalid storage device details. Please specify device details in the format <node>:<device>', file=sys.stderr)
sys.exit(1)
if ((not args.dry_run) and (dev.split(':')[0] not in kube_nodes)):
print(('Node name does not appear to be valid: ' + dev))
sys.exit(1)
for path in args.path:
if (':' not in path):
print('Invalid storage path details. Please specify path details in the format <node>:<path>', file=sys.stderr)
sys.exit(1)
if ((not args.dry_run) and (path.split(':')[0] not in kube_nodes)):
print(('Node name does not appear to be valid: ' + path))
sys.exit(1)
| 5,852,478,568,212,052,000
|
validate arguments
|
cli/kubectl_kadalu/storage_add.py
|
validate
|
Joibel/kadalu
|
python
|
def validate(args):
' '
if (args.external is not None):
if (args.type and (args.type != 'External')):
print("'--external' option is used only with '--type External'", file=sys.stderr)
sys.exit(1)
if (':' not in args.external):
print('Invalid external storage details. Please specify details in the format <node>:/<volname>', file=sys.stderr)
sys.exit(1)
args.type = 'External'
if args.tiebreaker:
if (args.type != 'Replica2'):
print("'--tiebreaker' option should be used only with type 'Replica2'", file=sys.stderr)
sys.exit(1)
if (':' not in args.tiebreaker):
print('Invalid tiebreaker details. Please specify details in the format <node>:/<path>', file=sys.stderr)
sys.exit(1)
else:
args.tiebreaker = 'tie-breaker.kadalu.io:/mnt'
if (not args.type):
args.type = 'Replica1'
num_storages = (((len(args.device) + len(args.path)) + len(args.pvc)) or (1 if (args.external is not None) else 0))
if (num_storages == 0):
print('Please specify at least one storage', file=sys.stderr)
sys.exit(1)
if (((args.type == 'Replica1') and (num_storages != 1)) or ((args.type == 'Replica2') and (num_storages != 2)) or ((args.type == 'Replica3') and (num_storages != 3))):
print(('Number of storages not matching for type=%s' % args.type), file=sys.stderr)
sys.exit(1)
kube_nodes = get_kube_nodes(args)
for dev in args.device:
if (':' not in dev):
print('Invalid storage device details. Please specify device details in the format <node>:<device>', file=sys.stderr)
sys.exit(1)
if ((not args.dry_run) and (dev.split(':')[0] not in kube_nodes)):
print(('Node name does not appear to be valid: ' + dev))
sys.exit(1)
for path in args.path:
if (':' not in path):
print('Invalid storage path details. Please specify path details in the format <node>:<path>', file=sys.stderr)
sys.exit(1)
if ((not args.dry_run) and (path.split(':')[0] not in kube_nodes)):
print(('Node name does not appear to be valid: ' + path))
sys.exit(1)
|
def get_kube_nodes(args):
' gets all nodes '
if args.dry_run:
return []
cmd = (utils.kubectl_cmd(args) + ['get', 'nodes', '-ojson'])
try:
resp = utils.execute(cmd)
data = json.loads(resp.stdout)
nodes = []
for nodedata in data['items']:
nodes.append(nodedata['metadata']['name'])
print(('The following nodes are available:\n %s' % ', '.join(nodes)))
print()
return nodes
except utils.CommandError as err:
utils.command_error(cmd, err.stderr)
except FileNotFoundError:
utils.kubectl_cmd_help(args.kubectl_cmd)
| -4,585,207,712,262,678,000
|
gets all nodes
|
cli/kubectl_kadalu/storage_add.py
|
get_kube_nodes
|
Joibel/kadalu
|
python
|
def get_kube_nodes(args):
' '
if args.dry_run:
return []
cmd = (utils.kubectl_cmd(args) + ['get', 'nodes', '-ojson'])
try:
resp = utils.execute(cmd)
data = json.loads(resp.stdout)
nodes = []
for nodedata in data['items']:
nodes.append(nodedata['metadata']['name'])
print(('The following nodes are available:\n %s' % ', '.join(nodes)))
print()
return nodes
except utils.CommandError as err:
utils.command_error(cmd, err.stderr)
except FileNotFoundError:
utils.kubectl_cmd_help(args.kubectl_cmd)
|
def storage_add_data(args):
' Build the config file '
content = {'apiVersion': 'kadalu-operator.storage/v1alpha1', 'kind': 'KadaluStorage', 'metadata': {'name': args.name}, 'spec': {'type': args.type, 'storage': []}}
if args.external:
(node, vol) = args.external.split(':')
content['spec']['details'] = [{'gluster_host': node, 'gluster_volname': vol.strip('/')}]
return content
if args.device:
for devdata in args.device:
(node, dev) = devdata.split(':')
content['spec']['storage'].append({'node': node, 'device': dev})
if args.path:
for pathdata in args.path:
(node, path) = pathdata.split(':')
content['spec']['storage'].append({'node': node, 'path': path})
if args.pvc:
for pvc in args.pvc:
content['spec']['storage'].append({'pvc': pvc})
if (args.type == 'Replica2'):
(node, path) = args.tiebreaker.split(':')
content['spec']['tiebreaker'] = {'node': node, 'path': path, 'port': 24007}
return content
| -2,107,132,921,841,534,200
|
Build the config file
|
cli/kubectl_kadalu/storage_add.py
|
storage_add_data
|
Joibel/kadalu
|
python
|
def storage_add_data(args):
' '
content = {'apiVersion': 'kadalu-operator.storage/v1alpha1', 'kind': 'KadaluStorage', 'metadata': {'name': args.name}, 'spec': {'type': args.type, 'storage': []}}
if args.external:
(node, vol) = args.external.split(':')
content['spec']['details'] = [{'gluster_host': node, 'gluster_volname': vol.strip('/')}]
return content
if args.device:
for devdata in args.device:
(node, dev) = devdata.split(':')
content['spec']['storage'].append({'node': node, 'device': dev})
if args.path:
for pathdata in args.path:
(node, path) = pathdata.split(':')
content['spec']['storage'].append({'node': node, 'path': path})
if args.pvc:
for pvc in args.pvc:
content['spec']['storage'].append({'pvc': pvc})
if (args.type == 'Replica2'):
(node, path) = args.tiebreaker.split(':')
content['spec']['tiebreaker'] = {'node': node, 'path': path, 'port': 24007}
return content
|
def run(args):
' Adds the subcommand arguments back to main CLI tool '
data = storage_add_data(args)
yaml_content = to_storage_yaml(data)
print('Storage Yaml file for your reference:\n')
print(yaml_content)
if args.dry_run:
return
if (not args.script_mode):
answer = ''
valid_answers = ['yes', 'no', 'n', 'y']
while (answer not in valid_answers):
answer = input('Is this correct?(Yes/No): ')
answer = answer.strip().lower()
if (answer in ['n', 'no']):
return
(config, tempfile_path) = tempfile.mkstemp(prefix='kadalu')
try:
with os.fdopen(config, 'w') as tmp:
tmp.write(yaml_content)
cmd = (utils.kubectl_cmd(args) + ['create', '-f', tempfile_path])
resp = utils.execute(cmd)
print('Storage add request sent successfully')
print(resp.stdout)
print()
except utils.CommandError as err:
os.remove(tempfile_path)
utils.command_error(cmd, err.stderr)
except FileNotFoundError:
os.remove(tempfile_path)
utils.kubectl_cmd_help(args.kubectl_cmd)
finally:
if os.path.exists(tempfile_path):
os.remove(tempfile_path)
| 8,097,305,963,234,970,000
|
Adds the subcommand arguments back to main CLI tool
|
cli/kubectl_kadalu/storage_add.py
|
run
|
Joibel/kadalu
|
python
|
def run(args):
' '
data = storage_add_data(args)
yaml_content = to_storage_yaml(data)
print('Storage Yaml file for your reference:\n')
print(yaml_content)
if args.dry_run:
return
if (not args.script_mode):
answer =
valid_answers = ['yes', 'no', 'n', 'y']
while (answer not in valid_answers):
answer = input('Is this correct?(Yes/No): ')
answer = answer.strip().lower()
if (answer in ['n', 'no']):
return
(config, tempfile_path) = tempfile.mkstemp(prefix='kadalu')
try:
with os.fdopen(config, 'w') as tmp:
tmp.write(yaml_content)
cmd = (utils.kubectl_cmd(args) + ['create', '-f', tempfile_path])
resp = utils.execute(cmd)
print('Storage add request sent successfully')
print(resp.stdout)
print()
except utils.CommandError as err:
os.remove(tempfile_path)
utils.command_error(cmd, err.stderr)
except FileNotFoundError:
os.remove(tempfile_path)
utils.kubectl_cmd_help(args.kubectl_cmd)
finally:
if os.path.exists(tempfile_path):
os.remove(tempfile_path)
|
def classproperty(func):
'The class property decorator'
if (not isinstance(func, (classmethod, staticmethod))):
func = classmethod(func)
return ClassPropertyDescriptor(func)
| 6,119,274,325,189,224,000
|
The class property decorator
|
pennylane/operation.py
|
classproperty
|
DanielPolatajko/pennylane
|
python
|
def classproperty(func):
if (not isinstance(func, (classmethod, staticmethod))):
func = classmethod(func)
return ClassPropertyDescriptor(func)
|
def operation_derivative(operation) -> np.ndarray:
'Calculate the derivative of an operation.\n\n For an operation :math:`e^{i \\hat{H} \\phi t}`, this function returns the matrix representation\n in the standard basis of its derivative with respect to :math:`t`, i.e.,\n\n .. math:: \\frac{d \\, e^{i \\hat{H} \\phi t}}{dt} = i \\phi \\hat{H} e^{i \\hat{H} \\phi t},\n\n where :math:`\\phi` is a real constant.\n\n Args:\n operation (.Operation): The operation to be differentiated.\n\n Returns:\n array: the derivative of the operation as a matrix in the standard basis\n\n Raises:\n ValueError: if the operation does not have a generator or is not composed of a single\n trainable parameter\n '
(generator, prefactor) = operation.generator
if (generator is None):
raise ValueError(f'Operation {operation.name} does not have a generator')
if (operation.num_params != 1):
raise ValueError(f'Operation {operation.name} is not written in terms of a single parameter')
if (not isinstance(generator, np.ndarray)):
generator = generator.matrix
if operation.inverse:
prefactor *= (- 1)
generator = generator.conj().T
return (((1j * prefactor) * generator) @ operation.matrix)
| -8,600,986,080,301,175,000
|
Calculate the derivative of an operation.
For an operation :math:`e^{i \hat{H} \phi t}`, this function returns the matrix representation
in the standard basis of its derivative with respect to :math:`t`, i.e.,
.. math:: \frac{d \, e^{i \hat{H} \phi t}}{dt} = i \phi \hat{H} e^{i \hat{H} \phi t},
where :math:`\phi` is a real constant.
Args:
operation (.Operation): The operation to be differentiated.
Returns:
array: the derivative of the operation as a matrix in the standard basis
Raises:
ValueError: if the operation does not have a generator or is not composed of a single
trainable parameter
|
pennylane/operation.py
|
operation_derivative
|
DanielPolatajko/pennylane
|
python
|
def operation_derivative(operation) -> np.ndarray:
'Calculate the derivative of an operation.\n\n For an operation :math:`e^{i \\hat{H} \\phi t}`, this function returns the matrix representation\n in the standard basis of its derivative with respect to :math:`t`, i.e.,\n\n .. math:: \\frac{d \\, e^{i \\hat{H} \\phi t}}{dt} = i \\phi \\hat{H} e^{i \\hat{H} \\phi t},\n\n where :math:`\\phi` is a real constant.\n\n Args:\n operation (.Operation): The operation to be differentiated.\n\n Returns:\n array: the derivative of the operation as a matrix in the standard basis\n\n Raises:\n ValueError: if the operation does not have a generator or is not composed of a single\n trainable parameter\n '
(generator, prefactor) = operation.generator
if (generator is None):
raise ValueError(f'Operation {operation.name} does not have a generator')
if (operation.num_params != 1):
raise ValueError(f'Operation {operation.name} is not written in terms of a single parameter')
if (not isinstance(generator, np.ndarray)):
generator = generator.matrix
if operation.inverse:
prefactor *= (- 1)
generator = generator.conj().T
return (((1j * prefactor) * generator) @ operation.matrix)
|
def __repr__(self):
'String representation of the return types.'
return str(self.value)
| -4,615,006,713,613,078,000
|
String representation of the return types.
|
pennylane/operation.py
|
__repr__
|
DanielPolatajko/pennylane
|
python
|
def __repr__(self):
return str(self.value)
|
def setter(self, func):
'Set the function as a class method, and store as an attribute.'
if (not isinstance(func, (classmethod, staticmethod))):
func = classmethod(func)
self.fset = func
return self
| -3,963,284,430,144,288,300
|
Set the function as a class method, and store as an attribute.
|
pennylane/operation.py
|
setter
|
DanielPolatajko/pennylane
|
python
|
def setter(self, func):
if (not isinstance(func, (classmethod, staticmethod))):
func = classmethod(func)
self.fset = func
return self
|
@classmethod
def _matrix(cls, *params):
'Matrix representation of the operator\n in the computational basis.\n\n This is a *class method* that should be defined for all\n new operations and observables, that returns the matrix representing\n the operator in the computational basis.\n\n This private method allows matrices to be computed\n directly without instantiating the operators first.\n\n To return the matrices of *instantiated* operators,\n please use the :attr:`~.Operator.matrix` property instead.\n\n **Example:**\n\n >>> qml.RY._matrix(0.5)\n >>> array([[ 0.96891242+0.j, -0.24740396+0.j],\n [ 0.24740396+0.j, 0.96891242+0.j]])\n\n Returns:\n array: matrix representation\n '
raise NotImplementedError
| -5,043,636,409,798,113,000
|
Matrix representation of the operator
in the computational basis.
This is a *class method* that should be defined for all
new operations and observables, that returns the matrix representing
the operator in the computational basis.
This private method allows matrices to be computed
directly without instantiating the operators first.
To return the matrices of *instantiated* operators,
please use the :attr:`~.Operator.matrix` property instead.
**Example:**
>>> qml.RY._matrix(0.5)
>>> array([[ 0.96891242+0.j, -0.24740396+0.j],
[ 0.24740396+0.j, 0.96891242+0.j]])
Returns:
array: matrix representation
|
pennylane/operation.py
|
_matrix
|
DanielPolatajko/pennylane
|
python
|
@classmethod
def _matrix(cls, *params):
'Matrix representation of the operator\n in the computational basis.\n\n This is a *class method* that should be defined for all\n new operations and observables, that returns the matrix representing\n the operator in the computational basis.\n\n This private method allows matrices to be computed\n directly without instantiating the operators first.\n\n To return the matrices of *instantiated* operators,\n please use the :attr:`~.Operator.matrix` property instead.\n\n **Example:**\n\n >>> qml.RY._matrix(0.5)\n >>> array([[ 0.96891242+0.j, -0.24740396+0.j],\n [ 0.24740396+0.j, 0.96891242+0.j]])\n\n Returns:\n array: matrix representation\n '
raise NotImplementedError
|
@property
def matrix(self):
'Matrix representation of an instantiated operator\n in the computational basis.\n\n **Example:**\n\n >>> U = qml.RY(0.5, wires=1)\n >>> U.matrix\n >>> array([[ 0.96891242+0.j, -0.24740396+0.j],\n [ 0.24740396+0.j, 0.96891242+0.j]])\n\n Returns:\n array: matrix representation\n '
return self._matrix(*self.parameters)
| 2,358,462,507,991,877,600
|
Matrix representation of an instantiated operator
in the computational basis.
**Example:**
>>> U = qml.RY(0.5, wires=1)
>>> U.matrix
>>> array([[ 0.96891242+0.j, -0.24740396+0.j],
[ 0.24740396+0.j, 0.96891242+0.j]])
Returns:
array: matrix representation
|
pennylane/operation.py
|
matrix
|
DanielPolatajko/pennylane
|
python
|
@property
def matrix(self):
'Matrix representation of an instantiated operator\n in the computational basis.\n\n **Example:**\n\n >>> U = qml.RY(0.5, wires=1)\n >>> U.matrix\n >>> array([[ 0.96891242+0.j, -0.24740396+0.j],\n [ 0.24740396+0.j, 0.96891242+0.j]])\n\n Returns:\n array: matrix representation\n '
return self._matrix(*self.parameters)
|
@classmethod
def _eigvals(cls, *params):
'Eigenvalues of the operator.\n\n This is a *class method* that should be defined for all\n new operations and observables that returns the eigenvalues\n of the operator. Note that the eigenvalues are not guaranteed\n to be in any particular order.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n The default implementation relies on the presence of the\n :attr:`_matrix` method.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.RZ._eigvals(0.5)\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigenvalue representation\n '
return np.linalg.eigvals(cls._matrix(*params))
| 7,738,007,183,040,574,000
|
Eigenvalues of the operator.
This is a *class method* that should be defined for all
new operations and observables that returns the eigenvalues
of the operator. Note that the eigenvalues are not guaranteed
to be in any particular order.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
The default implementation relies on the presence of the
:attr:`_matrix` method.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.RZ._eigvals(0.5)
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigenvalue representation
|
pennylane/operation.py
|
_eigvals
|
DanielPolatajko/pennylane
|
python
|
@classmethod
def _eigvals(cls, *params):
'Eigenvalues of the operator.\n\n This is a *class method* that should be defined for all\n new operations and observables that returns the eigenvalues\n of the operator. Note that the eigenvalues are not guaranteed\n to be in any particular order.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n The default implementation relies on the presence of the\n :attr:`_matrix` method.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.RZ._eigvals(0.5)\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigenvalue representation\n '
return np.linalg.eigvals(cls._matrix(*params))
|
@property
def eigvals(self):
'Eigenvalues of an instantiated operator.\n\n Note that the eigenvalues are not guaranteed to be in any\n particular order.\n\n **Example:**\n\n >>> U = qml.RZ(0.5, wires=1)\n >>> U.eigvals\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigvals representation\n '
return self._eigvals(*self.parameters)
| 651,473,486,978,879,200
|
Eigenvalues of an instantiated operator.
Note that the eigenvalues are not guaranteed to be in any
particular order.
**Example:**
>>> U = qml.RZ(0.5, wires=1)
>>> U.eigvals
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigvals representation
|
pennylane/operation.py
|
eigvals
|
DanielPolatajko/pennylane
|
python
|
@property
def eigvals(self):
'Eigenvalues of an instantiated operator.\n\n Note that the eigenvalues are not guaranteed to be in any\n particular order.\n\n **Example:**\n\n >>> U = qml.RZ(0.5, wires=1)\n >>> U.eigvals\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigvals representation\n '
return self._eigvals(*self.parameters)
|
@property
@abc.abstractmethod
def num_params(self):
'Number of parameters the operator takes.'
| 6,256,602,671,239,270,000
|
Number of parameters the operator takes.
|
pennylane/operation.py
|
num_params
|
DanielPolatajko/pennylane
|
python
|
@property
@abc.abstractmethod
def num_params(self):
|
@property
@abc.abstractmethod
def num_wires(self):
'Number of wires the operator acts on.'
| 8,770,263,230,962,755,000
|
Number of wires the operator acts on.
|
pennylane/operation.py
|
num_wires
|
DanielPolatajko/pennylane
|
python
|
@property
@abc.abstractmethod
def num_wires(self):
|
@property
@abc.abstractmethod
def par_domain(self):
"Domain of the gate parameters.\n\n * ``'N'``: natural numbers (including zero).\n * ``'R'``: floats.\n * ``'A'``: arrays of real or complex values.\n * ``'L'``: list of arrays of real or complex values.\n * ``None``: if there are no parameters.\n "
| -4,584,944,648,093,374,500
|
Domain of the gate parameters.
* ``'N'``: natural numbers (including zero).
* ``'R'``: floats.
* ``'A'``: arrays of real or complex values.
* ``'L'``: list of arrays of real or complex values.
* ``None``: if there are no parameters.
|
pennylane/operation.py
|
par_domain
|
DanielPolatajko/pennylane
|
python
|
@property
@abc.abstractmethod
def par_domain(self):
"Domain of the gate parameters.\n\n * ``'N'``: natural numbers (including zero).\n * ``'R'``: floats.\n * ``'A'``: arrays of real or complex values.\n * ``'L'``: list of arrays of real or complex values.\n * ``None``: if there are no parameters.\n "
|
@property
def name(self):
'String for the name of the operator.'
return self._name
| -1,300,107,690,590,741,200
|
String for the name of the operator.
|
pennylane/operation.py
|
name
|
DanielPolatajko/pennylane
|
python
|
@property
def name(self):
return self._name
|
def __repr__(self):
'Constructor-call-like representation.'
if self.parameters:
params = ', '.join([repr(p) for p in self.parameters])
return '{}({}, wires={})'.format(self.name, params, self.wires.tolist())
return '{}(wires={})'.format(self.name, self.wires.tolist())
| 3,506,241,360,823,687,000
|
Constructor-call-like representation.
|
pennylane/operation.py
|
__repr__
|
DanielPolatajko/pennylane
|
python
|
def __repr__(self):
if self.parameters:
params = ', '.join([repr(p) for p in self.parameters])
return '{}({}, wires={})'.format(self.name, params, self.wires.tolist())
return '{}(wires={})'.format(self.name, self.wires.tolist())
|
def check_domain(self, p, flattened=False):
"Check the validity of a parameter.\n\n :class:`.Variable` instances can represent any real scalars (but not arrays).\n\n Args:\n p (Number, array, Variable): parameter to check\n flattened (bool): True means p is an element of a flattened parameter\n sequence (affects the handling of 'A' parameters)\n Raises:\n TypeError: parameter is not an element of the expected domain\n ValueError: parameter is an element of an unknown domain\n Returns:\n Number, array, Variable: p\n "
if (isinstance(p, np.ndarray) and (p.ndim == 0)):
p = p.item()
if isinstance(p, Variable):
if (self.par_domain == 'A'):
raise TypeError('{}: Array parameter expected, got a Variable, which can only represent real scalars.'.format(self.name))
return p
if (self.par_domain == 'A'):
if flattened:
if isinstance(p, np.ndarray):
raise TypeError('{}: Flattened array parameter expected, got {}.'.format(self.name, type(p)))
elif (not isinstance(p, np.ndarray)):
raise TypeError('{}: Array parameter expected, got {}.'.format(self.name, type(p)))
elif (self.par_domain in ('R', 'N')):
if (not isinstance(p, numbers.Real)):
raise TypeError('{}: Real scalar parameter expected, got {}.'.format(self.name, type(p)))
if (self.par_domain == 'N'):
if (not isinstance(p, numbers.Integral)):
raise TypeError('{}: Natural number parameter expected, got {}.'.format(self.name, type(p)))
if (p < 0):
raise TypeError('{}: Natural number parameter expected, got {}.'.format(self.name, p))
elif (self.par_domain == 'L'):
if (not isinstance(p, list)):
raise TypeError('{}: List parameter expected, got {}.'.format(self.name, type(p)))
if (not all((isinstance(elem, np.ndarray) for elem in p))):
raise TypeError('List elements must be Numpy arrays.')
else:
raise ValueError("{}: Unknown parameter domain '{}'.".format(self.name, self.par_domain))
return p
| -5,429,508,503,588,694,000
|
Check the validity of a parameter.
:class:`.Variable` instances can represent any real scalars (but not arrays).
Args:
p (Number, array, Variable): parameter to check
flattened (bool): True means p is an element of a flattened parameter
sequence (affects the handling of 'A' parameters)
Raises:
TypeError: parameter is not an element of the expected domain
ValueError: parameter is an element of an unknown domain
Returns:
Number, array, Variable: p
|
pennylane/operation.py
|
check_domain
|
DanielPolatajko/pennylane
|
python
|
def check_domain(self, p, flattened=False):
"Check the validity of a parameter.\n\n :class:`.Variable` instances can represent any real scalars (but not arrays).\n\n Args:\n p (Number, array, Variable): parameter to check\n flattened (bool): True means p is an element of a flattened parameter\n sequence (affects the handling of 'A' parameters)\n Raises:\n TypeError: parameter is not an element of the expected domain\n ValueError: parameter is an element of an unknown domain\n Returns:\n Number, array, Variable: p\n "
if (isinstance(p, np.ndarray) and (p.ndim == 0)):
p = p.item()
if isinstance(p, Variable):
if (self.par_domain == 'A'):
raise TypeError('{}: Array parameter expected, got a Variable, which can only represent real scalars.'.format(self.name))
return p
if (self.par_domain == 'A'):
if flattened:
if isinstance(p, np.ndarray):
raise TypeError('{}: Flattened array parameter expected, got {}.'.format(self.name, type(p)))
elif (not isinstance(p, np.ndarray)):
raise TypeError('{}: Array parameter expected, got {}.'.format(self.name, type(p)))
elif (self.par_domain in ('R', 'N')):
if (not isinstance(p, numbers.Real)):
raise TypeError('{}: Real scalar parameter expected, got {}.'.format(self.name, type(p)))
if (self.par_domain == 'N'):
if (not isinstance(p, numbers.Integral)):
raise TypeError('{}: Natural number parameter expected, got {}.'.format(self.name, type(p)))
if (p < 0):
raise TypeError('{}: Natural number parameter expected, got {}.'.format(self.name, p))
elif (self.par_domain == 'L'):
if (not isinstance(p, list)):
raise TypeError('{}: List parameter expected, got {}.'.format(self.name, type(p)))
if (not all((isinstance(elem, np.ndarray) for elem in p))):
raise TypeError('List elements must be Numpy arrays.')
else:
raise ValueError("{}: Unknown parameter domain '{}'.".format(self.name, self.par_domain))
return p
|
@property
def wires(self):
'Wires of this operator.\n\n Returns:\n Wires: wires\n '
return self._wires
| -6,546,364,515,445,172,000
|
Wires of this operator.
Returns:
Wires: wires
|
pennylane/operation.py
|
wires
|
DanielPolatajko/pennylane
|
python
|
@property
def wires(self):
'Wires of this operator.\n\n Returns:\n Wires: wires\n '
return self._wires
|
@property
def parameters(self):
'Current parameter values.\n\n Fixed parameters are returned as is, free parameters represented by\n :class:`.Variable` instances are replaced by their\n current numerical value.\n\n Returns:\n list[Any]: parameter values\n '
def evaluate(p):
'Evaluate a single parameter.'
if isinstance(p, np.ndarray):
if (p.dtype == object):
temp = np.array([(x.val if isinstance(x, Variable) else x) for x in p.flat])
return temp.reshape(p.shape)
return p
if isinstance(p, list):
evaled_list = []
for arr in p:
if (arr.dtype == object):
temp = np.array([(x.val if isinstance(x, Variable) else x) for x in arr.flat])
evaled_list.append(temp.reshape(arr.shape))
return evaled_list
return p
if isinstance(p, Variable):
p = self.check_domain(p.val)
return p
return [evaluate(p) for p in self.data]
| 2,319,366,755,312,764,000
|
Current parameter values.
Fixed parameters are returned as is, free parameters represented by
:class:`.Variable` instances are replaced by their
current numerical value.
Returns:
list[Any]: parameter values
|
pennylane/operation.py
|
parameters
|
DanielPolatajko/pennylane
|
python
|
@property
def parameters(self):
'Current parameter values.\n\n Fixed parameters are returned as is, free parameters represented by\n :class:`.Variable` instances are replaced by their\n current numerical value.\n\n Returns:\n list[Any]: parameter values\n '
def evaluate(p):
'Evaluate a single parameter.'
if isinstance(p, np.ndarray):
if (p.dtype == object):
temp = np.array([(x.val if isinstance(x, Variable) else x) for x in p.flat])
return temp.reshape(p.shape)
return p
if isinstance(p, list):
evaled_list = []
for arr in p:
if (arr.dtype == object):
temp = np.array([(x.val if isinstance(x, Variable) else x) for x in arr.flat])
evaled_list.append(temp.reshape(arr.shape))
return evaled_list
return p
if isinstance(p, Variable):
p = self.check_domain(p.val)
return p
return [evaluate(p) for p in self.data]
|
def queue(self):
'Append the operator to the Operator queue.'
qml.QueuingContext.append(self)
return self
| -5,874,889,856,372,081,000
|
Append the operator to the Operator queue.
|
pennylane/operation.py
|
queue
|
DanielPolatajko/pennylane
|
python
|
def queue(self):
qml.QueuingContext.append(self)
return self
|
@property
def grad_method(self):
"Gradient computation method.\n\n * ``'A'``: analytic differentiation using the parameter-shift method.\n * ``'F'``: finite difference numerical differentiation.\n * ``None``: the operation may not be differentiated.\n\n Default is ``'F'``, or ``None`` if the Operation has zero parameters.\n "
return (None if (self.num_params == 0) else 'F')
| 1,339,282,163,917,399,600
|
Gradient computation method.
* ``'A'``: analytic differentiation using the parameter-shift method.
* ``'F'``: finite difference numerical differentiation.
* ``None``: the operation may not be differentiated.
Default is ``'F'``, or ``None`` if the Operation has zero parameters.
|
pennylane/operation.py
|
grad_method
|
DanielPolatajko/pennylane
|
python
|
@property
def grad_method(self):
"Gradient computation method.\n\n * ``'A'``: analytic differentiation using the parameter-shift method.\n * ``'F'``: finite difference numerical differentiation.\n * ``None``: the operation may not be differentiated.\n\n Default is ``'F'``, or ``None`` if the Operation has zero parameters.\n "
return (None if (self.num_params == 0) else 'F')
|
def get_parameter_shift(self, idx, shift=(np.pi / 2)):
'Multiplier and shift for the given parameter, based on its gradient recipe.\n\n Args:\n idx (int): parameter index\n\n Returns:\n float, float: multiplier, shift\n '
recipe = self.grad_recipe[idx]
multiplier = (0.5 / np.sin(shift))
a = 1
default_param_shift = [[multiplier, a, shift], [(- multiplier), a, (- shift)]]
param_shift = (default_param_shift if (recipe is None) else recipe)
if hasattr(self.data[idx], 'mult'):
var_mult = self.data[idx].mult
for elem in param_shift:
elem[0] *= var_mult
if (var_mult != 0):
elem[2] /= var_mult
return param_shift
| 1,544,099,430,586,102,000
|
Multiplier and shift for the given parameter, based on its gradient recipe.
Args:
idx (int): parameter index
Returns:
float, float: multiplier, shift
|
pennylane/operation.py
|
get_parameter_shift
|
DanielPolatajko/pennylane
|
python
|
def get_parameter_shift(self, idx, shift=(np.pi / 2)):
'Multiplier and shift for the given parameter, based on its gradient recipe.\n\n Args:\n idx (int): parameter index\n\n Returns:\n float, float: multiplier, shift\n '
recipe = self.grad_recipe[idx]
multiplier = (0.5 / np.sin(shift))
a = 1
default_param_shift = [[multiplier, a, shift], [(- multiplier), a, (- shift)]]
param_shift = (default_param_shift if (recipe is None) else recipe)
if hasattr(self.data[idx], 'mult'):
var_mult = self.data[idx].mult
for elem in param_shift:
elem[0] *= var_mult
if (var_mult != 0):
elem[2] /= var_mult
return param_shift
|
@property
def generator(self):
'Generator of the operation.\n\n A length-2 list ``[generator, scaling_factor]``, where\n\n * ``generator`` is an existing PennyLane\n operation class or :math:`2\\times 2` Hermitian array\n that acts as the generator of the current operation\n\n * ``scaling_factor`` represents a scaling factor applied\n to the generator operation\n\n For example, if :math:`U(\\theta)=e^{i0.7\\theta \\sigma_x}`, then\n :math:`\\sigma_x`, with scaling factor :math:`s`, is the generator\n of operator :math:`U(\\theta)`:\n\n .. code-block:: python\n\n generator = [PauliX, 0.7]\n\n Default is ``[None, 1]``, indicating the operation has no generator.\n '
return [None, 1]
| 7,035,912,216,515,094,000
|
Generator of the operation.
A length-2 list ``[generator, scaling_factor]``, where
* ``generator`` is an existing PennyLane
operation class or :math:`2\times 2` Hermitian array
that acts as the generator of the current operation
* ``scaling_factor`` represents a scaling factor applied
to the generator operation
For example, if :math:`U(\theta)=e^{i0.7\theta \sigma_x}`, then
:math:`\sigma_x`, with scaling factor :math:`s`, is the generator
of operator :math:`U(\theta)`:
.. code-block:: python
generator = [PauliX, 0.7]
Default is ``[None, 1]``, indicating the operation has no generator.
|
pennylane/operation.py
|
generator
|
DanielPolatajko/pennylane
|
python
|
@property
def generator(self):
'Generator of the operation.\n\n A length-2 list ``[generator, scaling_factor]``, where\n\n * ``generator`` is an existing PennyLane\n operation class or :math:`2\\times 2` Hermitian array\n that acts as the generator of the current operation\n\n * ``scaling_factor`` represents a scaling factor applied\n to the generator operation\n\n For example, if :math:`U(\\theta)=e^{i0.7\\theta \\sigma_x}`, then\n :math:`\\sigma_x`, with scaling factor :math:`s`, is the generator\n of operator :math:`U(\\theta)`:\n\n .. code-block:: python\n\n generator = [PauliX, 0.7]\n\n Default is ``[None, 1]``, indicating the operation has no generator.\n '
return [None, 1]
|
@property
def inverse(self):
'Boolean determining if the inverse of the operation was requested.'
return self._inverse
| 5,439,164,993,912,595,000
|
Boolean determining if the inverse of the operation was requested.
|
pennylane/operation.py
|
inverse
|
DanielPolatajko/pennylane
|
python
|
@property
def inverse(self):
return self._inverse
|
@staticmethod
def decomposition(*params, wires):
'Returns a template decomposing the operation into other\n quantum operations.'
raise NotImplementedError
| -1,747,484,373,177,490,700
|
Returns a template decomposing the operation into other
quantum operations.
|
pennylane/operation.py
|
decomposition
|
DanielPolatajko/pennylane
|
python
|
@staticmethod
def decomposition(*params, wires):
'Returns a template decomposing the operation into other\n quantum operations.'
raise NotImplementedError
|
def inv(self):
'Inverts the operation, such that the inverse will\n be used for the computations by the specific device.\n\n This method concatenates a string to the name of the operation,\n to indicate that the inverse will be used for computations.\n\n Any subsequent call of this method will toggle between the original\n operation and the inverse of the operation.\n\n Returns:\n :class:`Operator`: operation to be inverted\n '
self.inverse = (not self._inverse)
return self
| 6,227,083,401,545,439,000
|
Inverts the operation, such that the inverse will
be used for the computations by the specific device.
This method concatenates a string to the name of the operation,
to indicate that the inverse will be used for computations.
Any subsequent call of this method will toggle between the original
operation and the inverse of the operation.
Returns:
:class:`Operator`: operation to be inverted
|
pennylane/operation.py
|
inv
|
DanielPolatajko/pennylane
|
python
|
def inv(self):
'Inverts the operation, such that the inverse will\n be used for the computations by the specific device.\n\n This method concatenates a string to the name of the operation,\n to indicate that the inverse will be used for computations.\n\n Any subsequent call of this method will toggle between the original\n operation and the inverse of the operation.\n\n Returns:\n :class:`Operator`: operation to be inverted\n '
self.inverse = (not self._inverse)
return self
|
@property
def base_name(self):
'Get base name of the operator.'
return self.__class__.__name__
| -7,614,776,445,865,225,000
|
Get base name of the operator.
|
pennylane/operation.py
|
base_name
|
DanielPolatajko/pennylane
|
python
|
@property
def base_name(self):
return self.__class__.__name__
|
@property
def name(self):
'Get and set the name of the operator.'
return ((self._name + Operation.string_for_inverse) if self.inverse else self._name)
| 2,276,986,341,692,210,000
|
Get and set the name of the operator.
|
pennylane/operation.py
|
name
|
DanielPolatajko/pennylane
|
python
|
@property
def name(self):
return ((self._name + Operation.string_for_inverse) if self.inverse else self._name)
|
@classmethod
def _eigvals(cls, *params):
'Eigenvalues of the operator.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors.\n\n This is a *class method* that must be defined for all\n new diagonal operations, that returns the eigenvalues\n of the operator in the computational basis.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.RZ._eigvals(0.5)\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigenvalue representation\n '
raise NotImplementedError
| -2,015,392,278,655,215,600
|
Eigenvalues of the operator.
The order of the eigenvalues needs to match the order of
the computational basis vectors.
This is a *class method* that must be defined for all
new diagonal operations, that returns the eigenvalues
of the operator in the computational basis.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.RZ._eigvals(0.5)
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigenvalue representation
|
pennylane/operation.py
|
_eigvals
|
DanielPolatajko/pennylane
|
python
|
@classmethod
def _eigvals(cls, *params):
'Eigenvalues of the operator.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors.\n\n This is a *class method* that must be defined for all\n new diagonal operations, that returns the eigenvalues\n of the operator in the computational basis.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.RZ._eigvals(0.5)\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigenvalue representation\n '
raise NotImplementedError
|
@property
def eigvals(self):
'Eigenvalues of an instantiated diagonal operation.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors.\n\n **Example:**\n\n >>> U = qml.RZ(0.5, wires=1)\n >>> U.eigvals\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigvals representation\n '
return super().eigvals
| -7,338,995,670,297,597,000
|
Eigenvalues of an instantiated diagonal operation.
The order of the eigenvalues needs to match the order of
the computational basis vectors.
**Example:**
>>> U = qml.RZ(0.5, wires=1)
>>> U.eigvals
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigvals representation
|
pennylane/operation.py
|
eigvals
|
DanielPolatajko/pennylane
|
python
|
@property
def eigvals(self):
'Eigenvalues of an instantiated diagonal operation.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors.\n\n **Example:**\n\n >>> U = qml.RZ(0.5, wires=1)\n >>> U.eigvals\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigvals representation\n '
return super().eigvals
|
@classmethod
@abc.abstractmethod
def _kraus_matrices(cls, *params):
'Kraus matrices representing a quantum channel, specified in\n the computational basis.\n\n This is a class method that should be defined for all\n new channels. It returns the Kraus matrices representing\n the channel in the computational basis.\n\n This private method allows matrices to be computed\n directly without instantiating the channel first.\n\n **Example**\n\n >>> qml.AmplitudeDamping._kraus_matrices(0.1)\n >>> [array([[1. , 0. ],\n [0. , 0.9486833]]), array([[0. , 0.31622777],\n [0. , 0. ]])]\n\n To return the Kraus matrices of an *instantiated* channel,\n please use the :attr:`~.Operator.kraus_matrices` property instead.\n\n Returns:\n list(array): list of Kraus matrices\n '
raise NotImplementedError
| 3,718,151,499,255,414,300
|
Kraus matrices representing a quantum channel, specified in
the computational basis.
This is a class method that should be defined for all
new channels. It returns the Kraus matrices representing
the channel in the computational basis.
This private method allows matrices to be computed
directly without instantiating the channel first.
**Example**
>>> qml.AmplitudeDamping._kraus_matrices(0.1)
>>> [array([[1. , 0. ],
[0. , 0.9486833]]), array([[0. , 0.31622777],
[0. , 0. ]])]
To return the Kraus matrices of an *instantiated* channel,
please use the :attr:`~.Operator.kraus_matrices` property instead.
Returns:
list(array): list of Kraus matrices
|
pennylane/operation.py
|
_kraus_matrices
|
DanielPolatajko/pennylane
|
python
|
@classmethod
@abc.abstractmethod
def _kraus_matrices(cls, *params):
'Kraus matrices representing a quantum channel, specified in\n the computational basis.\n\n This is a class method that should be defined for all\n new channels. It returns the Kraus matrices representing\n the channel in the computational basis.\n\n This private method allows matrices to be computed\n directly without instantiating the channel first.\n\n **Example**\n\n >>> qml.AmplitudeDamping._kraus_matrices(0.1)\n >>> [array([[1. , 0. ],\n [0. , 0.9486833]]), array([[0. , 0.31622777],\n [0. , 0. ]])]\n\n To return the Kraus matrices of an *instantiated* channel,\n please use the :attr:`~.Operator.kraus_matrices` property instead.\n\n Returns:\n list(array): list of Kraus matrices\n '
raise NotImplementedError
|
@property
def kraus_matrices(self):
'Kraus matrices of an instantiated channel\n in the computational basis.\n\n ** Example**\n\n >>> U = qml.AmplitudeDamping(0.1, wires=1)\n >>> U.kraus_matrices\n >>> [array([[1. , 0. ],\n [0. , 0.9486833]]), array([[0. , 0.31622777],\n [0. , 0. ]])]\n\n Returns:\n list(array): list of Kraus matrices\n '
return self._kraus_matrices(*self.parameters)
| -1,815,043,535,912,530,200
|
Kraus matrices of an instantiated channel
in the computational basis.
** Example**
>>> U = qml.AmplitudeDamping(0.1, wires=1)
>>> U.kraus_matrices
>>> [array([[1. , 0. ],
[0. , 0.9486833]]), array([[0. , 0.31622777],
[0. , 0. ]])]
Returns:
list(array): list of Kraus matrices
|
pennylane/operation.py
|
kraus_matrices
|
DanielPolatajko/pennylane
|
python
|
@property
def kraus_matrices(self):
'Kraus matrices of an instantiated channel\n in the computational basis.\n\n ** Example**\n\n >>> U = qml.AmplitudeDamping(0.1, wires=1)\n >>> U.kraus_matrices\n >>> [array([[1. , 0. ],\n [0. , 0.9486833]]), array([[0. , 0.31622777],\n [0. , 0. ]])]\n\n Returns:\n list(array): list of Kraus matrices\n '
return self._kraus_matrices(*self.parameters)
|
@classmethod
def _eigvals(cls, *params):
'Eigenvalues of the observable.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors when the observable is\n diagonalized using :attr:`diagonalizing_gates`.\n\n This is a *class method* that must be defined for all\n new diagonal operations, that returns the eigenvalues\n of the operator in the computational basis.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.PauliZ._eigvals()\n >>> array([1, -1])\n\n Returns:\n array: eigenvalue representation\n '
raise NotImplementedError
| 6,488,598,606,783,885,000
|
Eigenvalues of the observable.
The order of the eigenvalues needs to match the order of
the computational basis vectors when the observable is
diagonalized using :attr:`diagonalizing_gates`.
This is a *class method* that must be defined for all
new diagonal operations, that returns the eigenvalues
of the operator in the computational basis.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.PauliZ._eigvals()
>>> array([1, -1])
Returns:
array: eigenvalue representation
|
pennylane/operation.py
|
_eigvals
|
DanielPolatajko/pennylane
|
python
|
@classmethod
def _eigvals(cls, *params):
'Eigenvalues of the observable.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors when the observable is\n diagonalized using :attr:`diagonalizing_gates`.\n\n This is a *class method* that must be defined for all\n new diagonal operations, that returns the eigenvalues\n of the operator in the computational basis.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.PauliZ._eigvals()\n >>> array([1, -1])\n\n Returns:\n array: eigenvalue representation\n '
raise NotImplementedError
|
@property
def eigvals(self):
'Eigenvalues of an instantiated observable.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors when the observable is\n diagonalized using :attr:`diagonalizing_gates`. This is a\n requirement for using qubit observables in quantum functions.\n\n **Example:**\n\n >>> U = qml.PauliZ(wires=1)\n >>> U.eigvals\n >>> array([1, -1])\n\n Returns:\n array: eigvals representation\n '
return super().eigvals
| -246,418,316,113,690,080
|
Eigenvalues of an instantiated observable.
The order of the eigenvalues needs to match the order of
the computational basis vectors when the observable is
diagonalized using :attr:`diagonalizing_gates`. This is a
requirement for using qubit observables in quantum functions.
**Example:**
>>> U = qml.PauliZ(wires=1)
>>> U.eigvals
>>> array([1, -1])
Returns:
array: eigvals representation
|
pennylane/operation.py
|
eigvals
|
DanielPolatajko/pennylane
|
python
|
@property
def eigvals(self):
'Eigenvalues of an instantiated observable.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors when the observable is\n diagonalized using :attr:`diagonalizing_gates`. This is a\n requirement for using qubit observables in quantum functions.\n\n **Example:**\n\n >>> U = qml.PauliZ(wires=1)\n >>> U.eigvals\n >>> array([1, -1])\n\n Returns:\n array: eigvals representation\n '
return super().eigvals
|
def __repr__(self):
'Constructor-call-like representation.'
temp = super().__repr__()
if (self.return_type is None):
return temp
if (self.return_type is Probability):
return (repr(self.return_type) + '(wires={})'.format(self.wires.tolist()))
return (((repr(self.return_type) + '(') + temp) + ')')
| -2,273,162,458,762,781,000
|
Constructor-call-like representation.
|
pennylane/operation.py
|
__repr__
|
DanielPolatajko/pennylane
|
python
|
def __repr__(self):
temp = super().__repr__()
if (self.return_type is None):
return temp
if (self.return_type is Probability):
return (repr(self.return_type) + '(wires={})'.format(self.wires.tolist()))
return (((repr(self.return_type) + '(') + temp) + ')')
|
def _obs_data(self):
'Extracts the data from a Observable or Tensor and serializes it in an order-independent fashion.\n\n This allows for comparison between observables that are equivalent, but are expressed\n in different orders. For example, `qml.PauliX(0) @ qml.PauliZ(1)` and\n `qml.PauliZ(1) @ qml.PauliX(0)` are equivalent observables with different orderings.\n\n **Example**\n\n >>> tensor = qml.PauliX(0) @ qml.PauliZ(1)\n >>> print(tensor._obs_data())\n {("PauliZ", <Wires = [1]>, ()), ("PauliX", <Wires = [0]>, ())}\n '
obs = Tensor(self).non_identity_obs
tensor = set()
for ob in obs:
parameters = tuple((param.tostring() for param in ob.parameters))
tensor.add((ob.name, ob.wires, parameters))
return tensor
| -8,014,557,137,972,118,000
|
Extracts the data from a Observable or Tensor and serializes it in an order-independent fashion.
This allows for comparison between observables that are equivalent, but are expressed
in different orders. For example, `qml.PauliX(0) @ qml.PauliZ(1)` and
`qml.PauliZ(1) @ qml.PauliX(0)` are equivalent observables with different orderings.
**Example**
>>> tensor = qml.PauliX(0) @ qml.PauliZ(1)
>>> print(tensor._obs_data())
{("PauliZ", <Wires = [1]>, ()), ("PauliX", <Wires = [0]>, ())}
|
pennylane/operation.py
|
_obs_data
|
DanielPolatajko/pennylane
|
python
|
def _obs_data(self):
'Extracts the data from a Observable or Tensor and serializes it in an order-independent fashion.\n\n This allows for comparison between observables that are equivalent, but are expressed\n in different orders. For example, `qml.PauliX(0) @ qml.PauliZ(1)` and\n `qml.PauliZ(1) @ qml.PauliX(0)` are equivalent observables with different orderings.\n\n **Example**\n\n >>> tensor = qml.PauliX(0) @ qml.PauliZ(1)\n >>> print(tensor._obs_data())\n {("PauliZ", <Wires = [1]>, ()), ("PauliX", <Wires = [0]>, ())}\n '
obs = Tensor(self).non_identity_obs
tensor = set()
for ob in obs:
parameters = tuple((param.tostring() for param in ob.parameters))
tensor.add((ob.name, ob.wires, parameters))
return tensor
|
def compare(self, other):
'Compares with another :class:`~.Hamiltonian`, :class:`~Tensor`, or :class:`~Observable`,\n to determine if they are equivalent.\n\n Observables/Hamiltonians are equivalent if they represent the same operator\n (their matrix representations are equal), and they are defined on the same wires.\n\n .. Warning::\n\n The compare method does **not** check if the matrix representation\n of a :class:`~.Hermitian` observable is equal to an equivalent\n observable expressed in terms of Pauli matrices.\n To do so would require the matrix form of Hamiltonians and Tensors\n be calculated, which would drastically increase runtime.\n\n Returns:\n (bool): True if equivalent.\n\n **Examples**\n\n >>> ob1 = qml.PauliX(0) @ qml.Identity(1)\n >>> ob2 = qml.Hamiltonian([1], [qml.PauliX(0)])\n >>> ob1.compare(ob2)\n True\n >>> ob1 = qml.PauliX(0)\n >>> ob2 = qml.Hermitian(np.array([[0, 1], [1, 0]]), 0)\n >>> ob1.compare(ob2)\n False\n '
if isinstance(other, (Tensor, Observable)):
return (other._obs_data() == self._obs_data())
if isinstance(other, qml.Hamiltonian):
return other.compare(self)
raise ValueError('Can only compare an Observable/Tensor, and a Hamiltonian/Observable/Tensor.')
| -5,943,546,845,245,784,000
|
Compares with another :class:`~.Hamiltonian`, :class:`~Tensor`, or :class:`~Observable`,
to determine if they are equivalent.
Observables/Hamiltonians are equivalent if they represent the same operator
(their matrix representations are equal), and they are defined on the same wires.
.. Warning::
The compare method does **not** check if the matrix representation
of a :class:`~.Hermitian` observable is equal to an equivalent
observable expressed in terms of Pauli matrices.
To do so would require the matrix form of Hamiltonians and Tensors
be calculated, which would drastically increase runtime.
Returns:
(bool): True if equivalent.
**Examples**
>>> ob1 = qml.PauliX(0) @ qml.Identity(1)
>>> ob2 = qml.Hamiltonian([1], [qml.PauliX(0)])
>>> ob1.compare(ob2)
True
>>> ob1 = qml.PauliX(0)
>>> ob2 = qml.Hermitian(np.array([[0, 1], [1, 0]]), 0)
>>> ob1.compare(ob2)
False
|
pennylane/operation.py
|
compare
|
DanielPolatajko/pennylane
|
python
|
def compare(self, other):
'Compares with another :class:`~.Hamiltonian`, :class:`~Tensor`, or :class:`~Observable`,\n to determine if they are equivalent.\n\n Observables/Hamiltonians are equivalent if they represent the same operator\n (their matrix representations are equal), and they are defined on the same wires.\n\n .. Warning::\n\n The compare method does **not** check if the matrix representation\n of a :class:`~.Hermitian` observable is equal to an equivalent\n observable expressed in terms of Pauli matrices.\n To do so would require the matrix form of Hamiltonians and Tensors\n be calculated, which would drastically increase runtime.\n\n Returns:\n (bool): True if equivalent.\n\n **Examples**\n\n >>> ob1 = qml.PauliX(0) @ qml.Identity(1)\n >>> ob2 = qml.Hamiltonian([1], [qml.PauliX(0)])\n >>> ob1.compare(ob2)\n True\n >>> ob1 = qml.PauliX(0)\n >>> ob2 = qml.Hermitian(np.array([[0, 1], [1, 0]]), 0)\n >>> ob1.compare(ob2)\n False\n '
if isinstance(other, (Tensor, Observable)):
return (other._obs_data() == self._obs_data())
if isinstance(other, qml.Hamiltonian):
return other.compare(self)
raise ValueError('Can only compare an Observable/Tensor, and a Hamiltonian/Observable/Tensor.')
|
def __add__(self, other):
'The addition operation between Observables/Tensors/qml.Hamiltonian objects.'
if isinstance(other, (Observable, Tensor)):
return qml.Hamiltonian([1, 1], [self, other], simplify=True)
if isinstance(other, qml.Hamiltonian):
return (other + self)
raise ValueError(f'Cannot add Observable and {type(other)}')
| -6,280,897,605,118,391,000
|
The addition operation between Observables/Tensors/qml.Hamiltonian objects.
|
pennylane/operation.py
|
__add__
|
DanielPolatajko/pennylane
|
python
|
def __add__(self, other):
if isinstance(other, (Observable, Tensor)):
return qml.Hamiltonian([1, 1], [self, other], simplify=True)
if isinstance(other, qml.Hamiltonian):
return (other + self)
raise ValueError(f'Cannot add Observable and {type(other)}')
|
def __mul__(self, a):
'The scalar multiplication operation between a scalar and an Observable/Tensor.'
if isinstance(a, (int, float)):
return qml.Hamiltonian([a], [self], simplify=True)
raise ValueError(f'Cannot multiply Observable by {type(a)}')
| -4,077,980,623,302,247,000
|
The scalar multiplication operation between a scalar and an Observable/Tensor.
|
pennylane/operation.py
|
__mul__
|
DanielPolatajko/pennylane
|
python
|
def __mul__(self, a):
if isinstance(a, (int, float)):
return qml.Hamiltonian([a], [self], simplify=True)
raise ValueError(f'Cannot multiply Observable by {type(a)}')
|
def __sub__(self, other):
'The subtraction operation between Observables/Tensors/qml.Hamiltonian objects.'
if isinstance(other, (Observable, Tensor, qml.Hamiltonian)):
return self.__add__(other.__mul__((- 1)))
raise ValueError(f'Cannot subtract {type(other)} from Observable')
| -3,128,460,909,550,183,000
|
The subtraction operation between Observables/Tensors/qml.Hamiltonian objects.
|
pennylane/operation.py
|
__sub__
|
DanielPolatajko/pennylane
|
python
|
def __sub__(self, other):
if isinstance(other, (Observable, Tensor, qml.Hamiltonian)):
return self.__add__(other.__mul__((- 1)))
raise ValueError(f'Cannot subtract {type(other)} from Observable')
|
def diagonalizing_gates(self):
'Returns the list of operations such that they\n diagonalize the observable in the computational basis.\n\n Returns:\n list(qml.Operation): A list of gates that diagonalize\n the observable in the computational basis.\n '
raise NotImplementedError
| 8,970,220,112,764,559,000
|
Returns the list of operations such that they
diagonalize the observable in the computational basis.
Returns:
list(qml.Operation): A list of gates that diagonalize
the observable in the computational basis.
|
pennylane/operation.py
|
diagonalizing_gates
|
DanielPolatajko/pennylane
|
python
|
def diagonalizing_gates(self):
'Returns the list of operations such that they\n diagonalize the observable in the computational basis.\n\n Returns:\n list(qml.Operation): A list of gates that diagonalize\n the observable in the computational basis.\n '
raise NotImplementedError
|
def __repr__(self):
'Constructor-call-like representation.'
s = ' @ '.join([repr(o) for o in self.obs])
if (self.return_type is None):
return s
if (self.return_type is Probability):
return (repr(self.return_type) + '(wires={})'.format(self.wires.tolist()))
return (((repr(self.return_type) + '(') + s) + ')')
| 5,895,522,948,243,490,000
|
Constructor-call-like representation.
|
pennylane/operation.py
|
__repr__
|
DanielPolatajko/pennylane
|
python
|
def __repr__(self):
s = ' @ '.join([repr(o) for o in self.obs])
if (self.return_type is None):
return s
if (self.return_type is Probability):
return (repr(self.return_type) + '(wires={})'.format(self.wires.tolist()))
return (((repr(self.return_type) + '(') + s) + ')')
|
@property
def name(self):
'All constituent observable names making up the tensor product.\n\n Returns:\n list[str]: list containing all observable names\n '
return [o.name for o in self.obs]
| 3,576,635,201,162,277,000
|
All constituent observable names making up the tensor product.
Returns:
list[str]: list containing all observable names
|
pennylane/operation.py
|
name
|
DanielPolatajko/pennylane
|
python
|
@property
def name(self):
'All constituent observable names making up the tensor product.\n\n Returns:\n list[str]: list containing all observable names\n '
return [o.name for o in self.obs]
|
@property
def num_wires(self):
'Number of wires the tensor product acts on.\n\n Returns:\n int: number of wires\n '
return len(self.wires)
| -8,860,691,384,198,224,000
|
Number of wires the tensor product acts on.
Returns:
int: number of wires
|
pennylane/operation.py
|
num_wires
|
DanielPolatajko/pennylane
|
python
|
@property
def num_wires(self):
'Number of wires the tensor product acts on.\n\n Returns:\n int: number of wires\n '
return len(self.wires)
|
@property
def wires(self):
'All wires in the system the tensor product acts on.\n\n Returns:\n Wires: wires addressed by the observables in the tensor product\n '
return Wires.all_wires([o.wires for o in self.obs])
| -1,003,641,241,214,833,300
|
All wires in the system the tensor product acts on.
Returns:
Wires: wires addressed by the observables in the tensor product
|
pennylane/operation.py
|
wires
|
DanielPolatajko/pennylane
|
python
|
@property
def wires(self):
'All wires in the system the tensor product acts on.\n\n Returns:\n Wires: wires addressed by the observables in the tensor product\n '
return Wires.all_wires([o.wires for o in self.obs])
|
@property
def data(self):
'Raw parameters of all constituent observables in the tensor product.\n\n Returns:\n list[Any]: flattened list containing all dependent parameters\n '
return [p for sublist in [o.data for o in self.obs] for p in sublist]
| 3,251,900,921,311,749,600
|
Raw parameters of all constituent observables in the tensor product.
Returns:
list[Any]: flattened list containing all dependent parameters
|
pennylane/operation.py
|
data
|
DanielPolatajko/pennylane
|
python
|
@property
def data(self):
'Raw parameters of all constituent observables in the tensor product.\n\n Returns:\n list[Any]: flattened list containing all dependent parameters\n '
return [p for sublist in [o.data for o in self.obs] for p in sublist]
|
@property
def num_params(self):
'Raw parameters of all constituent observables in the tensor product.\n\n Returns:\n list[Any]: flattened list containing all dependent parameters\n '
return len(self.data)
| 5,563,449,933,144,988,000
|
Raw parameters of all constituent observables in the tensor product.
Returns:
list[Any]: flattened list containing all dependent parameters
|
pennylane/operation.py
|
num_params
|
DanielPolatajko/pennylane
|
python
|
@property
def num_params(self):
'Raw parameters of all constituent observables in the tensor product.\n\n Returns:\n list[Any]: flattened list containing all dependent parameters\n '
return len(self.data)
|
@property
def parameters(self):
'Evaluated parameter values of all constituent observables in the tensor product.\n\n Returns:\n list[list[Any]]: nested list containing the parameters per observable\n in the tensor product\n '
return [o.parameters for o in self.obs]
| -6,726,044,344,866,530,000
|
Evaluated parameter values of all constituent observables in the tensor product.
Returns:
list[list[Any]]: nested list containing the parameters per observable
in the tensor product
|
pennylane/operation.py
|
parameters
|
DanielPolatajko/pennylane
|
python
|
@property
def parameters(self):
'Evaluated parameter values of all constituent observables in the tensor product.\n\n Returns:\n list[list[Any]]: nested list containing the parameters per observable\n in the tensor product\n '
return [o.parameters for o in self.obs]
|
@property
def non_identity_obs(self):
'Returns the non-identity observables contained in the tensor product.\n\n Returns:\n list[:class:`~.Observable`]: list containing the non-identity observables\n in the tensor product\n '
return [obs for obs in self.obs if (not isinstance(obs, qml.Identity))]
| -3,727,300,812,888,661,500
|
Returns the non-identity observables contained in the tensor product.
Returns:
list[:class:`~.Observable`]: list containing the non-identity observables
in the tensor product
|
pennylane/operation.py
|
non_identity_obs
|
DanielPolatajko/pennylane
|
python
|
@property
def non_identity_obs(self):
'Returns the non-identity observables contained in the tensor product.\n\n Returns:\n list[:class:`~.Observable`]: list containing the non-identity observables\n in the tensor product\n '
return [obs for obs in self.obs if (not isinstance(obs, qml.Identity))]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.