body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
330ebf10e3c534b919d758e426d284e9dc74971abd08f2352443c1765526271b | def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, OneOfSolidResultControlSolutionFields)):
return False
return (self.to_dict() == other.to_dict()) | Returns true if both objects are equal | simscale_sdk/models/one_of_solid_result_control_solution_fields.py | __eq__ | slainesimscale/simscale-python-sdk | 8 | python | def __eq__(self, other):
if (not isinstance(other, OneOfSolidResultControlSolutionFields)):
return False
return (self.to_dict() == other.to_dict()) | def __eq__(self, other):
if (not isinstance(other, OneOfSolidResultControlSolutionFields)):
return False
return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|> |
2b188ffaa13d5c0a8e66657a5d5ed5998eb5b8fe51edb1ad614d409175abaaaf | def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, OneOfSolidResultControlSolutionFields)):
return True
return (self.to_dict() != other.to_dict()) | Returns true if both objects are not equal | simscale_sdk/models/one_of_solid_result_control_solution_fields.py | __ne__ | slainesimscale/simscale-python-sdk | 8 | python | def __ne__(self, other):
if (not isinstance(other, OneOfSolidResultControlSolutionFields)):
return True
return (self.to_dict() != other.to_dict()) | def __ne__(self, other):
if (not isinstance(other, OneOfSolidResultControlSolutionFields)):
return True
return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|> |
f0e5d61e03c5ce617604516c7e3ac78b7408f209e21f0e2064b60c7e1ba6fea0 | def inask(question: str) -> str:
'\n this fnc made for test because we need user input but intest it cant typing itself os it need to call patch module\n but it need to create a sperate function and call it in function that we want to use\n Args:\n question (str): any string\n Returns:\n str: user input\n '
answer = input(question)
return answer | this fnc made for test because we need user input but intest it cant typing itself os it need to call patch module
but it need to create a sperate function and call it in function that we want to use
Args:
question (str): any string
Returns:
str: user input | src/main/student_data.py | inask | ta-assistant/Admin-CLI | 1 | python | def inask(question: str) -> str:
'\n this fnc made for test because we need user input but intest it cant typing itself os it need to call patch module\n but it need to create a sperate function and call it in function that we want to use\n Args:\n question (str): any string\n Returns:\n str: user input\n '
answer = input(question)
return answer | def inask(question: str) -> str:
'\n this fnc made for test because we need user input but intest it cant typing itself os it need to call patch module\n but it need to create a sperate function and call it in function that we want to use\n Args:\n question (str): any string\n Returns:\n str: user input\n '
answer = input(question)
return answer<|docstring|>this fnc made for test because we need user input but intest it cant typing itself os it need to call patch module
but it need to create a sperate function and call it in function that we want to use
Args:
question (str): any string
Returns:
str: user input<|endoftext|> |
3d1f6aaffd8f223279409ee196c36d5c10f52e8f671e962973b52d7b43303348 | def __init__(self, path: str, filename: str, draft: dict) -> None:
"\n init draft \n draft_file (str) \n draft_work (list)\n if user did not have draft.json it will return None\n Args:\n path (str): path of work directory\n filename (str): name of student's directory of file\n "
self.draft_file = draft['fileDraft']
self.draft_out = draft['outputDraft']
self.pre_data = None
self.filename = filename | init draft
draft_file (str)
draft_work (list)
if user did not have draft.json it will return None
Args:
path (str): path of work directory
filename (str): name of student's directory of file | src/main/student_data.py | __init__ | ta-assistant/Admin-CLI | 1 | python | def __init__(self, path: str, filename: str, draft: dict) -> None:
"\n init draft \n draft_file (str) \n draft_work (list)\n if user did not have draft.json it will return None\n Args:\n path (str): path of work directory\n filename (str): name of student's directory of file\n "
self.draft_file = draft['fileDraft']
self.draft_out = draft['outputDraft']
self.pre_data = None
self.filename = filename | def __init__(self, path: str, filename: str, draft: dict) -> None:
"\n init draft \n draft_file (str) \n draft_work (list)\n if user did not have draft.json it will return None\n Args:\n path (str): path of work directory\n filename (str): name of student's directory of file\n "
self.draft_file = draft['fileDraft']
self.draft_out = draft['outputDraft']
self.pre_data = None
self.filename = filename<|docstring|>init draft
draft_file (str)
draft_work (list)
if user did not have draft.json it will return None
Args:
path (str): path of work directory
filename (str): name of student's directory of file<|endoftext|> |
9ad8554b8c3962d5b9f7249bb4d989f75831d58478c3297138cb4473b36b0897 | def _filename_pre_data(self) -> dict:
'prepare filename to dict\n pseudo code:\n -get key word form file draft and store it in key\n -split filename with "_" so we will got list of student name, id, ex, etc.\n -we will zip it together and store into prework(dict) that keep student data and key word \n example: {"student_id": "1234567890", "name": "Alex", "ex": "ex1}\n Returns:\n dict: student data form file name\n '
key = []
remainder = ''
prework = {}
for i in self.draft_file:
if (i == '{'):
remainder = ''
elif (i == '}'):
key.append(remainder)
else:
remainder += i
list_filename = self.filename.split('_')
for (key, value) in zip(key, list_filename):
prework[key] = value
self.pre_data = prework | prepare filename to dict
pseudo code:
-get key word form file draft and store it in key
-split filename with "_" so we will got list of student name, id, ex, etc.
-we will zip it together and store into prework(dict) that keep student data and key word
example: {"student_id": "1234567890", "name": "Alex", "ex": "ex1}
Returns:
dict: student data form file name | src/main/student_data.py | _filename_pre_data | ta-assistant/Admin-CLI | 1 | python | def _filename_pre_data(self) -> dict:
'prepare filename to dict\n pseudo code:\n -get key word form file draft and store it in key\n -split filename with "_" so we will got list of student name, id, ex, etc.\n -we will zip it together and store into prework(dict) that keep student data and key word \n example: {"student_id": "1234567890", "name": "Alex", "ex": "ex1}\n Returns:\n dict: student data form file name\n '
key = []
remainder =
prework = {}
for i in self.draft_file:
if (i == '{'):
remainder =
elif (i == '}'):
key.append(remainder)
else:
remainder += i
list_filename = self.filename.split('_')
for (key, value) in zip(key, list_filename):
prework[key] = value
self.pre_data = prework | def _filename_pre_data(self) -> dict:
'prepare filename to dict\n pseudo code:\n -get key word form file draft and store it in key\n -split filename with "_" so we will got list of student name, id, ex, etc.\n -we will zip it together and store into prework(dict) that keep student data and key word \n example: {"student_id": "1234567890", "name": "Alex", "ex": "ex1}\n Returns:\n dict: student data form file name\n '
key = []
remainder =
prework = {}
for i in self.draft_file:
if (i == '{'):
remainder =
elif (i == '}'):
key.append(remainder)
else:
remainder += i
list_filename = self.filename.split('_')
for (key, value) in zip(key, list_filename):
prework[key] = value
self.pre_data = prework<|docstring|>prepare filename to dict
pseudo code:
-get key word form file draft and store it in key
-split filename with "_" so we will got list of student name, id, ex, etc.
-we will zip it together and store into prework(dict) that keep student data and key word
example: {"student_id": "1234567890", "name": "Alex", "ex": "ex1}
Returns:
dict: student data form file name<|endoftext|> |
3b99c88e867d24274c964fdc634d6506d7d6e06d645504d336a8f27f391c076d | def prepare_student_data(self) -> dict:
'make that studect_data(dict) ready for the next step by get the output draft \n and set it into student_data and have its value is "N/"A\n Returns:\n dict: empty student data that have only data from file name but another is "N/A"\n '
self._filename_pre_data()
empty_student = {}
empty_student['scoreTimestamp'] = 'N/A'
for i in self.draft_out:
empty_student[i] = 'N/A'
for i in self.pre_data:
empty_student[i] = self.pre_data[i]
self.pre_data = empty_student | make that studect_data(dict) ready for the next step by get the output draft
and set it into student_data and have its value is "N/"A
Returns:
dict: empty student data that have only data from file name but another is "N/A" | src/main/student_data.py | prepare_student_data | ta-assistant/Admin-CLI | 1 | python | def prepare_student_data(self) -> dict:
'make that studect_data(dict) ready for the next step by get the output draft \n and set it into student_data and have its value is "N/"A\n Returns:\n dict: empty student data that have only data from file name but another is "N/A"\n '
self._filename_pre_data()
empty_student = {}
empty_student['scoreTimestamp'] = 'N/A'
for i in self.draft_out:
empty_student[i] = 'N/A'
for i in self.pre_data:
empty_student[i] = self.pre_data[i]
self.pre_data = empty_student | def prepare_student_data(self) -> dict:
'make that studect_data(dict) ready for the next step by get the output draft \n and set it into student_data and have its value is "N/"A\n Returns:\n dict: empty student data that have only data from file name but another is "N/A"\n '
self._filename_pre_data()
empty_student = {}
empty_student['scoreTimestamp'] = 'N/A'
for i in self.draft_out:
empty_student[i] = 'N/A'
for i in self.pre_data:
empty_student[i] = self.pre_data[i]
self.pre_data = empty_student<|docstring|>make that studect_data(dict) ready for the next step by get the output draft
and set it into student_data and have its value is "N/"A
Returns:
dict: empty student data that have only data from file name but another is "N/A"<|endoftext|> |
9b4245110c243b05d3f7ffdda45fd0094c4792c7951a06b4deb045d2aa614245 | def data_input(self, post_student_data: dict) -> dict:
'get data form user and set into student data(dict)\n pseudo code:\n for loop post_student_data and if its "N/A" ask user for information\n and store it. But if the input was -99 it will skip that question to next one\n and when its finish it will return post_student_data\n example:\n {\'student_id\': \'6310546066\', \'name\': \'vitvara\', \'ex\': \'ex1\', \'score1\': \'10\', \'score2\': \'20\', \'comment\': \'nice work\'}\n Args:\n post_student_data (dict): empty_student_data\n Returns:\n dict: student data that ready to write\n '
for i in post_student_data:
if (post_student_data[i] == 'N/A'):
while True:
if (i == 'scoreTimestamp'):
post_student_data[i] = int(round((time.time() * 1000)))
break
data_input = input(f'Enter {i}: ')
if (data_input == '-99'):
break
if (i == 'score'):
try:
data_input = float(data_input)
except ValueError:
print('Value Error: please enter a numeric score.')
continue
post_student_data[i] = data_input
break
return post_student_data | get data form user and set into student data(dict)
pseudo code:
for loop post_student_data and if its "N/A" ask user for information
and store it. But if the input was -99 it will skip that question to next one
and when its finish it will return post_student_data
example:
{'student_id': '6310546066', 'name': 'vitvara', 'ex': 'ex1', 'score1': '10', 'score2': '20', 'comment': 'nice work'}
Args:
post_student_data (dict): empty_student_data
Returns:
dict: student data that ready to write | src/main/student_data.py | data_input | ta-assistant/Admin-CLI | 1 | python | def data_input(self, post_student_data: dict) -> dict:
'get data form user and set into student data(dict)\n pseudo code:\n for loop post_student_data and if its "N/A" ask user for information\n and store it. But if the input was -99 it will skip that question to next one\n and when its finish it will return post_student_data\n example:\n {\'student_id\': \'6310546066\', \'name\': \'vitvara\', \'ex\': \'ex1\', \'score1\': \'10\', \'score2\': \'20\', \'comment\': \'nice work\'}\n Args:\n post_student_data (dict): empty_student_data\n Returns:\n dict: student data that ready to write\n '
for i in post_student_data:
if (post_student_data[i] == 'N/A'):
while True:
if (i == 'scoreTimestamp'):
post_student_data[i] = int(round((time.time() * 1000)))
break
data_input = input(f'Enter {i}: ')
if (data_input == '-99'):
break
if (i == 'score'):
try:
data_input = float(data_input)
except ValueError:
print('Value Error: please enter a numeric score.')
continue
post_student_data[i] = data_input
break
return post_student_data | def data_input(self, post_student_data: dict) -> dict:
'get data form user and set into student data(dict)\n pseudo code:\n for loop post_student_data and if its "N/A" ask user for information\n and store it. But if the input was -99 it will skip that question to next one\n and when its finish it will return post_student_data\n example:\n {\'student_id\': \'6310546066\', \'name\': \'vitvara\', \'ex\': \'ex1\', \'score1\': \'10\', \'score2\': \'20\', \'comment\': \'nice work\'}\n Args:\n post_student_data (dict): empty_student_data\n Returns:\n dict: student data that ready to write\n '
for i in post_student_data:
if (post_student_data[i] == 'N/A'):
while True:
if (i == 'scoreTimestamp'):
post_student_data[i] = int(round((time.time() * 1000)))
break
data_input = input(f'Enter {i}: ')
if (data_input == '-99'):
break
if (i == 'score'):
try:
data_input = float(data_input)
except ValueError:
print('Value Error: please enter a numeric score.')
continue
post_student_data[i] = data_input
break
return post_student_data<|docstring|>get data form user and set into student data(dict)
pseudo code:
for loop post_student_data and if its "N/A" ask user for information
and store it. But if the input was -99 it will skip that question to next one
and when its finish it will return post_student_data
example:
{'student_id': '6310546066', 'name': 'vitvara', 'ex': 'ex1', 'score1': '10', 'score2': '20', 'comment': 'nice work'}
Args:
post_student_data (dict): empty_student_data
Returns:
dict: student data that ready to write<|endoftext|> |
837bdcf710f1bf4d15022ec4b0913500703c420aa0c084193588dcb9e5ac3730 | def ask(self) -> data_input:
'ask user for student data\n pseudo code:\n loop empty_student_data if its not "N/A" it will print out its key and value\n then it will call data_input\n Returns:\n data_input: return student data that ready to write\n '
print('===========================')
post_student_data = self.pre_data
for i in post_student_data:
if (post_student_data[i] != 'N/A'):
print(f'{i}: {post_student_data[i]}')
print('===========================')
post_data = self.data_input(post_student_data)
return post_data | ask user for student data
pseudo code:
loop empty_student_data if its not "N/A" it will print out its key and value
then it will call data_input
Returns:
data_input: return student data that ready to write | src/main/student_data.py | ask | ta-assistant/Admin-CLI | 1 | python | def ask(self) -> data_input:
'ask user for student data\n pseudo code:\n loop empty_student_data if its not "N/A" it will print out its key and value\n then it will call data_input\n Returns:\n data_input: return student data that ready to write\n '
print('===========================')
post_student_data = self.pre_data
for i in post_student_data:
if (post_student_data[i] != 'N/A'):
print(f'{i}: {post_student_data[i]}')
print('===========================')
post_data = self.data_input(post_student_data)
return post_data | def ask(self) -> data_input:
'ask user for student data\n pseudo code:\n loop empty_student_data if its not "N/A" it will print out its key and value\n then it will call data_input\n Returns:\n data_input: return student data that ready to write\n '
print('===========================')
post_student_data = self.pre_data
for i in post_student_data:
if (post_student_data[i] != 'N/A'):
print(f'{i}: {post_student_data[i]}')
print('===========================')
post_data = self.data_input(post_student_data)
return post_data<|docstring|>ask user for student data
pseudo code:
loop empty_student_data if its not "N/A" it will print out its key and value
then it will call data_input
Returns:
data_input: return student data that ready to write<|endoftext|> |
8517c6e645bdd989877eab2189a1d308fcc7de47497ee099a608d658e858c355 | def scan(self, port: int):
'Scans whether the given port is open.'
try:
logging.debug(f'Trying to connect to {self.host}:{port}.')
self.socket.connect((self.host, port))
self.open_ports.append(port)
logging.debug(f'Port {self.host}:{port} is open.')
except ConnectionRefusedError:
logging.debug(f'Port {self.host}:{port} is closed.')
except OSError as os_error:
logging.debug(os_error)
else:
self.socket.shutdown(socket.SHUT_RDWR)
logging.debug(f'Connection to port {self.host}:{port} has been shut down.') | Scans whether the given port is open. | multiport/scanner.py | scan | shimst3r/multiport | 0 | python | def scan(self, port: int):
try:
logging.debug(f'Trying to connect to {self.host}:{port}.')
self.socket.connect((self.host, port))
self.open_ports.append(port)
logging.debug(f'Port {self.host}:{port} is open.')
except ConnectionRefusedError:
logging.debug(f'Port {self.host}:{port} is closed.')
except OSError as os_error:
logging.debug(os_error)
else:
self.socket.shutdown(socket.SHUT_RDWR)
logging.debug(f'Connection to port {self.host}:{port} has been shut down.') | def scan(self, port: int):
try:
logging.debug(f'Trying to connect to {self.host}:{port}.')
self.socket.connect((self.host, port))
self.open_ports.append(port)
logging.debug(f'Port {self.host}:{port} is open.')
except ConnectionRefusedError:
logging.debug(f'Port {self.host}:{port} is closed.')
except OSError as os_error:
logging.debug(os_error)
else:
self.socket.shutdown(socket.SHUT_RDWR)
logging.debug(f'Connection to port {self.host}:{port} has been shut down.')<|docstring|>Scans whether the given port is open.<|endoftext|> |
1d393410f732b9d4b62a24ede49a51398a74f18836e2edcec15edcf74b33be1a | def ga4ghImportGlue():
'\n Call this method before importing a ga4gh module in the scripts dir.\n Otherwise, you will be using the installed package instead of\n the development package.\n Assumes a certain directory structure.\n '
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path) | Call this method before importing a ga4gh module in the scripts dir.
Otherwise, you will be using the installed package instead of
the development package.
Assumes a certain directory structure. | scripts/glue.py | ga4ghImportGlue | dkucsc/mavas | 0 | python | def ga4ghImportGlue():
'\n Call this method before importing a ga4gh module in the scripts dir.\n Otherwise, you will be using the installed package instead of\n the development package.\n Assumes a certain directory structure.\n '
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path) | def ga4ghImportGlue():
'\n Call this method before importing a ga4gh module in the scripts dir.\n Otherwise, you will be using the installed package instead of\n the development package.\n Assumes a certain directory structure.\n '
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path)<|docstring|>Call this method before importing a ga4gh module in the scripts dir.
Otherwise, you will be using the installed package instead of
the development package.
Assumes a certain directory structure.<|endoftext|> |
268e8c9d928c607ff6908d8031417b96b72059036efdb7f382a2189675f71d13 | def make_path(in_folder: Union[(str, Path)], *subnames: str, pth_obj: bool=False) -> Union[(str, Path)]:
'Dynamically set a path (e.g., for iteratively referencing\n year-specific geodatabases)\n Args:\n in_folder (str): String or Path\n subnames (list/tuple): A list of arguments to join in making the full path\n `{in_folder}/{subname_1}/.../{subname_n}\n Returns:\n Path\n '
pth = Path(in_folder, *subnames)
if pth_obj:
return pth
else:
return str(pth) | Dynamically set a path (e.g., for iteratively referencing
year-specific geodatabases)
Args:
in_folder (str): String or Path
subnames (list/tuple): A list of arguments to join in making the full path
`{in_folder}/{subname_1}/.../{subname_n}
Returns:
Path | src/d4_accessiblity/utils.py | make_path | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def make_path(in_folder: Union[(str, Path)], *subnames: str, pth_obj: bool=False) -> Union[(str, Path)]:
'Dynamically set a path (e.g., for iteratively referencing\n year-specific geodatabases)\n Args:\n in_folder (str): String or Path\n subnames (list/tuple): A list of arguments to join in making the full path\n `{in_folder}/{subname_1}/.../{subname_n}\n Returns:\n Path\n '
pth = Path(in_folder, *subnames)
if pth_obj:
return pth
else:
return str(pth) | def make_path(in_folder: Union[(str, Path)], *subnames: str, pth_obj: bool=False) -> Union[(str, Path)]:
'Dynamically set a path (e.g., for iteratively referencing\n year-specific geodatabases)\n Args:\n in_folder (str): String or Path\n subnames (list/tuple): A list of arguments to join in making the full path\n `{in_folder}/{subname_1}/.../{subname_n}\n Returns:\n Path\n '
pth = Path(in_folder, *subnames)
if pth_obj:
return pth
else:
return str(pth)<|docstring|>Dynamically set a path (e.g., for iteratively referencing
year-specific geodatabases)
Args:
in_folder (str): String or Path
subnames (list/tuple): A list of arguments to join in making the full path
`{in_folder}/{subname_1}/.../{subname_n}
Returns:
Path<|endoftext|> |
9535f4c4ad7217835c53640f1f8ce2fd2b1a2f9b5c28e92e688c9951e82f3355 | def validate_directory(directory: Union[(str, Path)], pth_obj: bool=False) -> Union[(str, Path)]:
'checks if a directory exists and creates if not\n\n Args:\n directory (str): path to a directory\n pth_obj (bool): if true return a Path object\n Returns:\n directory (str/Path): validated path to provided directory\n '
dir = Path(directory)
if dir.is_dir():
if pth_obj:
return dir
else:
return directory
else:
try:
dir.mkdir()
if pth_obj:
return dir
else:
return directory
except:
raise ValueError('could not create directory') | checks if a directory exists and creates if not
Args:
directory (str): path to a directory
pth_obj (bool): if true return a Path object
Returns:
directory (str/Path): validated path to provided directory | src/d4_accessiblity/utils.py | validate_directory | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def validate_directory(directory: Union[(str, Path)], pth_obj: bool=False) -> Union[(str, Path)]:
'checks if a directory exists and creates if not\n\n Args:\n directory (str): path to a directory\n pth_obj (bool): if true return a Path object\n Returns:\n directory (str/Path): validated path to provided directory\n '
dir = Path(directory)
if dir.is_dir():
if pth_obj:
return dir
else:
return directory
else:
try:
dir.mkdir()
if pth_obj:
return dir
else:
return directory
except:
raise ValueError('could not create directory') | def validate_directory(directory: Union[(str, Path)], pth_obj: bool=False) -> Union[(str, Path)]:
'checks if a directory exists and creates if not\n\n Args:\n directory (str): path to a directory\n pth_obj (bool): if true return a Path object\n Returns:\n directory (str/Path): validated path to provided directory\n '
dir = Path(directory)
if dir.is_dir():
if pth_obj:
return dir
else:
return directory
else:
try:
dir.mkdir()
if pth_obj:
return dir
else:
return directory
except:
raise ValueError('could not create directory')<|docstring|>checks if a directory exists and creates if not
Args:
directory (str): path to a directory
pth_obj (bool): if true return a Path object
Returns:
directory (str/Path): validated path to provided directory<|endoftext|> |
7b78faa5475f5f01c1349ef4a729a5a5c4e71177f33b10d7cc13d2ba755fb157 | def check_overwrite_path(file_path: Union[(str, Path)], overwrite: bool=False) -> None:
'evaluates an output object and deletes if overwrite is True\n Args:\n file_path (str): path to file object\n overwrite (bool):\n\n Returns:\n None\n '
if Path.exists(Path(file_path)):
if overwrite:
print(f'--- --- deleting existing file {file_path}')
Path(file_path).unlink()
else:
raise RuntimeError(f'Output file {file_path} already exists') | evaluates an output object and deletes if overwrite is True
Args:
file_path (str): path to file object
overwrite (bool):
Returns:
None | src/d4_accessiblity/utils.py | check_overwrite_path | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def check_overwrite_path(file_path: Union[(str, Path)], overwrite: bool=False) -> None:
'evaluates an output object and deletes if overwrite is True\n Args:\n file_path (str): path to file object\n overwrite (bool):\n\n Returns:\n None\n '
if Path.exists(Path(file_path)):
if overwrite:
print(f'--- --- deleting existing file {file_path}')
Path(file_path).unlink()
else:
raise RuntimeError(f'Output file {file_path} already exists') | def check_overwrite_path(file_path: Union[(str, Path)], overwrite: bool=False) -> None:
'evaluates an output object and deletes if overwrite is True\n Args:\n file_path (str): path to file object\n overwrite (bool):\n\n Returns:\n None\n '
if Path.exists(Path(file_path)):
if overwrite:
print(f'--- --- deleting existing file {file_path}')
Path(file_path).unlink()
else:
raise RuntimeError(f'Output file {file_path} already exists')<|docstring|>evaluates an output object and deletes if overwrite is True
Args:
file_path (str): path to file object
overwrite (bool):
Returns:
None<|endoftext|> |
7335c84e6f37e983724bf4c0f7c157e63814be51000806140f601dbc6e2d058e | def check_overwrite_path(output, overwrite=True):
'Non-arcpy version of check_overwrite_output'
output = Path(output)
if output.exists():
if overwrite:
if output.is_file():
print(f'--- --- deleting existing file {output.name}')
output.unlink()
if output.is_dir():
print(f'--- --- deleting existing folder {output.name}')
shutil.rmtree(output)
else:
print(f'Output file/folder {output} already exists') | Non-arcpy version of check_overwrite_output | src/d4_accessiblity/utils.py | check_overwrite_path | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def check_overwrite_path(output, overwrite=True):
output = Path(output)
if output.exists():
if overwrite:
if output.is_file():
print(f'--- --- deleting existing file {output.name}')
output.unlink()
if output.is_dir():
print(f'--- --- deleting existing folder {output.name}')
shutil.rmtree(output)
else:
print(f'Output file/folder {output} already exists') | def check_overwrite_path(output, overwrite=True):
output = Path(output)
if output.exists():
if overwrite:
if output.is_file():
print(f'--- --- deleting existing file {output.name}')
output.unlink()
if output.is_dir():
print(f'--- --- deleting existing folder {output.name}')
shutil.rmtree(output)
else:
print(f'Output file/folder {output} already exists')<|docstring|>Non-arcpy version of check_overwrite_output<|endoftext|> |
15a0b71d2409776fc947b14098128b3bcc79b2dcb4ba6c2a96ae820a89e844a7 | def shp_to_df(shp_path):
'Read a shapefile into a Pandas dataframe dropping geometry'
import shapefile
if isinstance(shp_path, pathlib.PurePath):
shp_path = str(shp_path)
sf = shapefile.Reader(shp_path)
fields = [x[0] for x in sf.fields if (x != 'geometry')][1:]
records = [list(i) for i in sf.records()]
return pd.DataFrame(columns=fields, data=records) | Read a shapefile into a Pandas dataframe dropping geometry | src/d4_accessiblity/utils.py | shp_to_df | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def shp_to_df(shp_path):
import shapefile
if isinstance(shp_path, pathlib.PurePath):
shp_path = str(shp_path)
sf = shapefile.Reader(shp_path)
fields = [x[0] for x in sf.fields if (x != 'geometry')][1:]
records = [list(i) for i in sf.records()]
return pd.DataFrame(columns=fields, data=records) | def shp_to_df(shp_path):
import shapefile
if isinstance(shp_path, pathlib.PurePath):
shp_path = str(shp_path)
sf = shapefile.Reader(shp_path)
fields = [x[0] for x in sf.fields if (x != 'geometry')][1:]
records = [list(i) for i in sf.records()]
return pd.DataFrame(columns=fields, data=records)<|docstring|>Read a shapefile into a Pandas dataframe dropping geometry<|endoftext|> |
b1f6bcaa4236dd68e12b6ca6cc767f1cba67b6f5d878071873d709cb0eacd55e | def copy_shapefiles(in_file, out_folder):
'Consistent method for copying shapefile data'
name = in_file.name
with fiona.open(in_file, 'r') as src:
meta = src.meta
out_file = Path(out_folder, name)
if out_file.exists():
prefix = in_file.parent.name
out_file = Path(out_folder, f'{prefix}_{name}')
print(f'...{in_file.name} already exists, makeing new copy with {prefix}')
with fiona.open(out_file, 'w', **meta) as dst:
for feature in src:
dst.write(feature) | Consistent method for copying shapefile data | src/d4_accessiblity/utils.py | copy_shapefiles | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def copy_shapefiles(in_file, out_folder):
name = in_file.name
with fiona.open(in_file, 'r') as src:
meta = src.meta
out_file = Path(out_folder, name)
if out_file.exists():
prefix = in_file.parent.name
out_file = Path(out_folder, f'{prefix}_{name}')
print(f'...{in_file.name} already exists, makeing new copy with {prefix}')
with fiona.open(out_file, 'w', **meta) as dst:
for feature in src:
dst.write(feature) | def copy_shapefiles(in_file, out_folder):
name = in_file.name
with fiona.open(in_file, 'r') as src:
meta = src.meta
out_file = Path(out_folder, name)
if out_file.exists():
prefix = in_file.parent.name
out_file = Path(out_folder, f'{prefix}_{name}')
print(f'...{in_file.name} already exists, makeing new copy with {prefix}')
with fiona.open(out_file, 'w', **meta) as dst:
for feature in src:
dst.write(feature)<|docstring|>Consistent method for copying shapefile data<|endoftext|> |
7f7580ac232ac5bfc56983fab87bae584d6667fb648691952ad3658fb8172e3b | def _not_none_and_len(string: str) -> bool:
'helper to figure out if not none and string is populated'
is_str = isinstance(string, str)
has_len = (False if (re.match('\\S{5,}', '') is None) else True)
status = (True if (has_len and is_str) else False)
return status | helper to figure out if not none and string is populated | src/d4_accessiblity/utils.py | _not_none_and_len | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def _not_none_and_len(string: str) -> bool:
is_str = isinstance(string, str)
has_len = (False if (re.match('\\S{5,}', ) is None) else True)
status = (True if (has_len and is_str) else False)
return status | def _not_none_and_len(string: str) -> bool:
is_str = isinstance(string, str)
has_len = (False if (re.match('\\S{5,}', ) is None) else True)
status = (True if (has_len and is_str) else False)
return status<|docstring|>helper to figure out if not none and string is populated<|endoftext|> |
37c58bfe10e160fe9813932fc558cbbc4bca80a520423c5ca844bcdd974aee76 | def get_gis():
'Try to get a GIS object first from an active_gis and then trying to create from the .env file.'
if isinstance(active_gis, GIS):
gis = active_gis
else:
url = os.getenv('ESRI_GIS_URL')
usr = os.getenv('ESRI_GIS_USERNAME')
pswd = os.getenv('ESRI_GIS_PASSWORD')
if ((url is not None) and (usr is not None) and (pswd is not None)):
gis = GIS(url, username=usr, password=pswd)
elif ((usr is not None) and (pswd is not None)):
gis = GIS(username=usr, password=pswd)
else:
gis = None
return gis | Try to get a GIS object first from an active_gis and then trying to create from the .env file. | src/d4_accessiblity/utils.py | get_gis | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def get_gis():
if isinstance(active_gis, GIS):
gis = active_gis
else:
url = os.getenv('ESRI_GIS_URL')
usr = os.getenv('ESRI_GIS_USERNAME')
pswd = os.getenv('ESRI_GIS_PASSWORD')
if ((url is not None) and (usr is not None) and (pswd is not None)):
gis = GIS(url, username=usr, password=pswd)
elif ((usr is not None) and (pswd is not None)):
gis = GIS(username=usr, password=pswd)
else:
gis = None
return gis | def get_gis():
if isinstance(active_gis, GIS):
gis = active_gis
else:
url = os.getenv('ESRI_GIS_URL')
usr = os.getenv('ESRI_GIS_USERNAME')
pswd = os.getenv('ESRI_GIS_PASSWORD')
if ((url is not None) and (usr is not None) and (pswd is not None)):
gis = GIS(url, username=usr, password=pswd)
elif ((usr is not None) and (pswd is not None)):
gis = GIS(username=usr, password=pswd)
else:
gis = None
return gis<|docstring|>Try to get a GIS object first from an active_gis and then trying to create from the .env file.<|endoftext|> |
aa66f65df5def2f5315b786074bc432489777c9f79f1bfcb88c645da573fc78a | def add_group(gis: GIS=None, group_name: str=None) -> Group:
'\n Add a group to the GIS for the project for saving resources.\n\n Args:\n gis: Optional\n arcgis.gis.GIS object instance.\n group_name: Optional\n Group to be added to the cloud GIS for storing project resources. Default\n is to load from the .env file. If a group name is not provided, and one is\n not located in the .env file, an exception will be raised.\n\n Returns: Group\n '
if (group_name is None):
group_name = os.getenv('ESRI_GIS_GROUP')
err_msg = 'A group name must either be defined in the .env file or explicitly provided.'
assert isinstance(group_name, str), err_msg
assert len(group_name), err_msg
gmgr = gis.groups
grp_srch = [g for g in gmgr.search() if (g.title.lower() == group_name.lower())]
if (len(grp_srch) == 0):
grp = gmgr.create(group_name)
assert isinstance(grp, Group), 'Failed to create the group in the Cloud GIS.'
else:
grp = grp_srch[0]
return grp | Add a group to the GIS for the project for saving resources.
Args:
gis: Optional
arcgis.gis.GIS object instance.
group_name: Optional
Group to be added to the cloud GIS for storing project resources. Default
is to load from the .env file. If a group name is not provided, and one is
not located in the .env file, an exception will be raised.
Returns: Group | src/d4_accessiblity/utils.py | add_group | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def add_group(gis: GIS=None, group_name: str=None) -> Group:
'\n Add a group to the GIS for the project for saving resources.\n\n Args:\n gis: Optional\n arcgis.gis.GIS object instance.\n group_name: Optional\n Group to be added to the cloud GIS for storing project resources. Default\n is to load from the .env file. If a group name is not provided, and one is\n not located in the .env file, an exception will be raised.\n\n Returns: Group\n '
if (group_name is None):
group_name = os.getenv('ESRI_GIS_GROUP')
err_msg = 'A group name must either be defined in the .env file or explicitly provided.'
assert isinstance(group_name, str), err_msg
assert len(group_name), err_msg
gmgr = gis.groups
grp_srch = [g for g in gmgr.search() if (g.title.lower() == group_name.lower())]
if (len(grp_srch) == 0):
grp = gmgr.create(group_name)
assert isinstance(grp, Group), 'Failed to create the group in the Cloud GIS.'
else:
grp = grp_srch[0]
return grp | def add_group(gis: GIS=None, group_name: str=None) -> Group:
'\n Add a group to the GIS for the project for saving resources.\n\n Args:\n gis: Optional\n arcgis.gis.GIS object instance.\n group_name: Optional\n Group to be added to the cloud GIS for storing project resources. Default\n is to load from the .env file. If a group name is not provided, and one is\n not located in the .env file, an exception will be raised.\n\n Returns: Group\n '
if (group_name is None):
group_name = os.getenv('ESRI_GIS_GROUP')
err_msg = 'A group name must either be defined in the .env file or explicitly provided.'
assert isinstance(group_name, str), err_msg
assert len(group_name), err_msg
gmgr = gis.groups
grp_srch = [g for g in gmgr.search() if (g.title.lower() == group_name.lower())]
if (len(grp_srch) == 0):
grp = gmgr.create(group_name)
assert isinstance(grp, Group), 'Failed to create the group in the Cloud GIS.'
else:
grp = grp_srch[0]
return grp<|docstring|>Add a group to the GIS for the project for saving resources.
Args:
gis: Optional
arcgis.gis.GIS object instance.
group_name: Optional
Group to be added to the cloud GIS for storing project resources. Default
is to load from the .env file. If a group name is not provided, and one is
not located in the .env file, an exception will be raised.
Returns: Group<|endoftext|> |
5df8cbcdc72aa3ce5bf416ec101189ff6c25dbdd90af3f85b4f0229651e6a314 | def add_directory_to_gis(dir_name: str=None, gis: GIS=None):
"Add a directory in a GIS user's content."
if (dir_name is None):
dir_name = os.getenv('PROJECT_NAME')
assert isinstance(dir_name, str), 'A name for the directory must be provided explicitly in the "dir_name" parameter if there is not a PROJECT_NAME specified in the .env file.'
if (gis is None):
gis = get_gis()
assert isinstance(gis, GIS), 'A GIS instance, either an active_gis in the session, credentials in the .env file, or an active GIS instance explicitly passed into the "gis" parameter.'
res = gis.content.create_folder(dir_name)
if (res is None):
status = True
else:
status = ('title' in res.keys())
return status | Add a directory in a GIS user's content. | src/d4_accessiblity/utils.py | add_directory_to_gis | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def add_directory_to_gis(dir_name: str=None, gis: GIS=None):
if (dir_name is None):
dir_name = os.getenv('PROJECT_NAME')
assert isinstance(dir_name, str), 'A name for the directory must be provided explicitly in the "dir_name" parameter if there is not a PROJECT_NAME specified in the .env file.'
if (gis is None):
gis = get_gis()
assert isinstance(gis, GIS), 'A GIS instance, either an active_gis in the session, credentials in the .env file, or an active GIS instance explicitly passed into the "gis" parameter.'
res = gis.content.create_folder(dir_name)
if (res is None):
status = True
else:
status = ('title' in res.keys())
return status | def add_directory_to_gis(dir_name: str=None, gis: GIS=None):
if (dir_name is None):
dir_name = os.getenv('PROJECT_NAME')
assert isinstance(dir_name, str), 'A name for the directory must be provided explicitly in the "dir_name" parameter if there is not a PROJECT_NAME specified in the .env file.'
if (gis is None):
gis = get_gis()
assert isinstance(gis, GIS), 'A GIS instance, either an active_gis in the session, credentials in the .env file, or an active GIS instance explicitly passed into the "gis" parameter.'
res = gis.content.create_folder(dir_name)
if (res is None):
status = True
else:
status = ('title' in res.keys())
return status<|docstring|>Add a directory in a GIS user's content.<|endoftext|> |
eddf2d23d998366185a912bfdd60d71dce1382e24c1a3c34774d6e0483530870 | def create_local_data_resources(data_pth: Path=None, mobile_geodatabases=False) -> Path:
'create all the data resources for the available environment'
if (data_pth is None):
data_pth = (Path(__file__).parent.parent.parent / 'data')
data_pth = (Path(data_pth) if isinstance(data_pth, str) else data_pth)
for data_name in ['interim', 'raw', 'processed', 'external']:
dir_pth = (data_pth / data_name)
if (not dir_pth.exists()):
dir_pth.mkdir(parents=True)
if has_arcpy:
fgdb_pth = (dir_pth / f'{data_name}.gdb')
if fgdb_pth.exists():
shutil.rmtree(fgdb_pth)
arcpy.management.CreateFileGDB(str(dir_pth), f'{data_name}.gdb')
if mobile_geodatabases:
gdb_pth = (dir_pth / f'{data_name}.geodatabase')
if gdb_pth.exists():
gdb_pth.unlink()
arcpy.management.CreateMobileGDB(str(dir_pth), f'{data_name}.geodatabase')
return data_pth | create all the data resources for the available environment | src/d4_accessiblity/utils.py | create_local_data_resources | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def create_local_data_resources(data_pth: Path=None, mobile_geodatabases=False) -> Path:
if (data_pth is None):
data_pth = (Path(__file__).parent.parent.parent / 'data')
data_pth = (Path(data_pth) if isinstance(data_pth, str) else data_pth)
for data_name in ['interim', 'raw', 'processed', 'external']:
dir_pth = (data_pth / data_name)
if (not dir_pth.exists()):
dir_pth.mkdir(parents=True)
if has_arcpy:
fgdb_pth = (dir_pth / f'{data_name}.gdb')
if fgdb_pth.exists():
shutil.rmtree(fgdb_pth)
arcpy.management.CreateFileGDB(str(dir_pth), f'{data_name}.gdb')
if mobile_geodatabases:
gdb_pth = (dir_pth / f'{data_name}.geodatabase')
if gdb_pth.exists():
gdb_pth.unlink()
arcpy.management.CreateMobileGDB(str(dir_pth), f'{data_name}.geodatabase')
return data_pth | def create_local_data_resources(data_pth: Path=None, mobile_geodatabases=False) -> Path:
if (data_pth is None):
data_pth = (Path(__file__).parent.parent.parent / 'data')
data_pth = (Path(data_pth) if isinstance(data_pth, str) else data_pth)
for data_name in ['interim', 'raw', 'processed', 'external']:
dir_pth = (data_pth / data_name)
if (not dir_pth.exists()):
dir_pth.mkdir(parents=True)
if has_arcpy:
fgdb_pth = (dir_pth / f'{data_name}.gdb')
if fgdb_pth.exists():
shutil.rmtree(fgdb_pth)
arcpy.management.CreateFileGDB(str(dir_pth), f'{data_name}.gdb')
if mobile_geodatabases:
gdb_pth = (dir_pth / f'{data_name}.geodatabase')
if gdb_pth.exists():
gdb_pth.unlink()
arcpy.management.CreateMobileGDB(str(dir_pth), f'{data_name}.geodatabase')
return data_pth<|docstring|>create all the data resources for the available environment<|endoftext|> |
64ea9c3f4dfe6f6f68b5a8927f61876d6b001418c70a954e501188377b9fcf41 | def create_aoi_mask_layer(aoi_feature_layer, output_feature_class, style_layer=None):
'Create a visibility mask to focus on an Area of Interest in a map.'
assert has_arcpy, 'ArcPy is required (environment with arcpy referencing ArcGIS Pro functionality) to create an AOI mask.'
styl_lyr = ((Paths.dir_arcgis_lyrs / 'aoi_mask.lyrx') if (style_layer is None) else style_layer)
geom_typ = arcpy.Describe(aoi_feature_layer).shapeType
assert (geom_typ == 'Polygon'), 'The area of interest must be a polygon.'
if (int(arcpy.management.GetCount(aoi_feature_layer)[0]) > 1):
aoi_feature_layer = arcpy.analysis.PairwiseDissolve(aoi_feature_layer, arcpy.Geometry())
desc = arcpy.Describe(aoi_feature_layer)
tol_val = (((desc.extent.width + desc.extent.height) / 2) * 0.01)
smpl_feat = arcpy.cartography.SimplifyPolygon(aoi_feature_layer, out_feature_class=arcpy.Geometry(), algorithm='POINT_REMOVE', tolerance=tol_val, collapsed_point_option='NO_KEEP').split(';')[0]
coord_lst = [[(- 180.0), (- 90.0)], [(- 180.0), 90.0], [180.0, 90.0], [180.0, (- 90.0)], [(- 180.0), (- 90.0)]]
coord_arr = arcpy.Array((arcpy.Point(x, y) for (x, y) in coord_lst))
mask_geom = [arcpy.Polygon(coord_arr, arcpy.SpatialReference(4326))]
mask_fc = arcpy.analysis.Erase(mask_geom, smpl_feat, output_feature_class)
strt_lyr = arcpy.management.MakeFeatureLayer(mask_fc)[0]
styl_lyr = (str(styl_lyr) if isinstance(styl_lyr, Path) else styl_lyr)
lyr = arcpy.management.ApplySymbologyFromLayer(strt_lyr, styl_lyr)[0]
return lyr | Create a visibility mask to focus on an Area of Interest in a map. | src/d4_accessiblity/utils.py | create_aoi_mask_layer | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def create_aoi_mask_layer(aoi_feature_layer, output_feature_class, style_layer=None):
assert has_arcpy, 'ArcPy is required (environment with arcpy referencing ArcGIS Pro functionality) to create an AOI mask.'
styl_lyr = ((Paths.dir_arcgis_lyrs / 'aoi_mask.lyrx') if (style_layer is None) else style_layer)
geom_typ = arcpy.Describe(aoi_feature_layer).shapeType
assert (geom_typ == 'Polygon'), 'The area of interest must be a polygon.'
if (int(arcpy.management.GetCount(aoi_feature_layer)[0]) > 1):
aoi_feature_layer = arcpy.analysis.PairwiseDissolve(aoi_feature_layer, arcpy.Geometry())
desc = arcpy.Describe(aoi_feature_layer)
tol_val = (((desc.extent.width + desc.extent.height) / 2) * 0.01)
smpl_feat = arcpy.cartography.SimplifyPolygon(aoi_feature_layer, out_feature_class=arcpy.Geometry(), algorithm='POINT_REMOVE', tolerance=tol_val, collapsed_point_option='NO_KEEP').split(';')[0]
coord_lst = [[(- 180.0), (- 90.0)], [(- 180.0), 90.0], [180.0, 90.0], [180.0, (- 90.0)], [(- 180.0), (- 90.0)]]
coord_arr = arcpy.Array((arcpy.Point(x, y) for (x, y) in coord_lst))
mask_geom = [arcpy.Polygon(coord_arr, arcpy.SpatialReference(4326))]
mask_fc = arcpy.analysis.Erase(mask_geom, smpl_feat, output_feature_class)
strt_lyr = arcpy.management.MakeFeatureLayer(mask_fc)[0]
styl_lyr = (str(styl_lyr) if isinstance(styl_lyr, Path) else styl_lyr)
lyr = arcpy.management.ApplySymbologyFromLayer(strt_lyr, styl_lyr)[0]
return lyr | def create_aoi_mask_layer(aoi_feature_layer, output_feature_class, style_layer=None):
assert has_arcpy, 'ArcPy is required (environment with arcpy referencing ArcGIS Pro functionality) to create an AOI mask.'
styl_lyr = ((Paths.dir_arcgis_lyrs / 'aoi_mask.lyrx') if (style_layer is None) else style_layer)
geom_typ = arcpy.Describe(aoi_feature_layer).shapeType
assert (geom_typ == 'Polygon'), 'The area of interest must be a polygon.'
if (int(arcpy.management.GetCount(aoi_feature_layer)[0]) > 1):
aoi_feature_layer = arcpy.analysis.PairwiseDissolve(aoi_feature_layer, arcpy.Geometry())
desc = arcpy.Describe(aoi_feature_layer)
tol_val = (((desc.extent.width + desc.extent.height) / 2) * 0.01)
smpl_feat = arcpy.cartography.SimplifyPolygon(aoi_feature_layer, out_feature_class=arcpy.Geometry(), algorithm='POINT_REMOVE', tolerance=tol_val, collapsed_point_option='NO_KEEP').split(';')[0]
coord_lst = [[(- 180.0), (- 90.0)], [(- 180.0), 90.0], [180.0, 90.0], [180.0, (- 90.0)], [(- 180.0), (- 90.0)]]
coord_arr = arcpy.Array((arcpy.Point(x, y) for (x, y) in coord_lst))
mask_geom = [arcpy.Polygon(coord_arr, arcpy.SpatialReference(4326))]
mask_fc = arcpy.analysis.Erase(mask_geom, smpl_feat, output_feature_class)
strt_lyr = arcpy.management.MakeFeatureLayer(mask_fc)[0]
styl_lyr = (str(styl_lyr) if isinstance(styl_lyr, Path) else styl_lyr)
lyr = arcpy.management.ApplySymbologyFromLayer(strt_lyr, styl_lyr)[0]
return lyr<|docstring|>Create a visibility mask to focus on an Area of Interest in a map.<|endoftext|> |
2e0186daf1c162258db5e99e70a054ea95d6de90506755456d1059cbb4a25255 | @property
def abspath(self):
'Absolute path to the local storage'
return self.path.absolute() | Absolute path to the local storage | src/d4_accessiblity/utils.py | abspath | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | @property
def abspath(self):
return self.path.absolute() | @property
def abspath(self):
return self.path.absolute()<|docstring|>Absolute path to the local storage<|endoftext|> |
37d91e4f3404a4590b0fa191a9cc9992663f82ac65c1f165f25d571a767d1731 | @property
def registry_files(self):
'List of file names in the registry cache'
return [f for f in self.data_dir.glob('**/*') if f.is_file()] | List of file names in the registry cache | src/d4_accessiblity/utils.py | registry_files | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | @property
def registry_files(self):
return [f for f in self.data_dir.glob('**/*') if f.is_file()] | @property
def registry_files(self):
return [f for f in self.data_dir.glob('**/*') if f.is_file()]<|docstring|>List of file names in the registry cache<|endoftext|> |
7d80255f6805cefccad5587b6317e9fcd1541e1a74d0b176f7a87fdfada8de55 | @property
def local_file_paths(self):
'List of file paths for registry items based on tag'
local_paths = {}
for record in self.filenames:
local_paths[record['tag']] = Path(self.data_dir, record['name'])
return local_paths | List of file paths for registry items based on tag | src/d4_accessiblity/utils.py | local_file_paths | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | @property
def local_file_paths(self):
local_paths = {}
for record in self.filenames:
local_paths[record['tag']] = Path(self.data_dir, record['name'])
return local_paths | @property
def local_file_paths(self):
local_paths = {}
for record in self.filenames:
local_paths[record['tag']] = Path(self.data_dir, record['name'])
return local_paths<|docstring|>List of file paths for registry items based on tag<|endoftext|> |
ed005e21de32dc4e0285b0bbbf55511345ab3400bca0b393dc5ae76c1e49662f | @property
def tags(self):
'List of tags in registry file'
return self.reg_df.tag.unique().tolist() | List of tags in registry file | src/d4_accessiblity/utils.py | tags | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | @property
def tags(self):
return self.reg_df.tag.unique().tolist() | @property
def tags(self):
return self.reg_df.tag.unique().tolist()<|docstring|>List of tags in registry file<|endoftext|> |
1978ffe4c4f2b65de1ea42df3d7ed45b91853345c1cc957852fd90dbdc43f2c3 | def copy_file(self, in_file, out_dir=None):
'Copies data from on location to another'
if isinstance(in_file, str):
in_file = Path(in_file)
if (out_dir is None):
out_dir = self.data_dir
if (os.path.splitext(in_file) == '.shp'):
copy_shapefiles(in_file, out_dir)
else:
name = in_file.name
out_file = Path(out_dir, name)
if out_file.exists():
prefix = random_prefix(7)
out_file = Path(out_dir, f'{prefix}_{name}')
shutil.copyfile(src=in_file, dst=out_file)
return out_file | Copies data from on location to another | src/d4_accessiblity/utils.py | copy_file | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def copy_file(self, in_file, out_dir=None):
if isinstance(in_file, str):
in_file = Path(in_file)
if (out_dir is None):
out_dir = self.data_dir
if (os.path.splitext(in_file) == '.shp'):
copy_shapefiles(in_file, out_dir)
else:
name = in_file.name
out_file = Path(out_dir, name)
if out_file.exists():
prefix = random_prefix(7)
out_file = Path(out_dir, f'{prefix}_{name}')
shutil.copyfile(src=in_file, dst=out_file)
return out_file | def copy_file(self, in_file, out_dir=None):
if isinstance(in_file, str):
in_file = Path(in_file)
if (out_dir is None):
out_dir = self.data_dir
if (os.path.splitext(in_file) == '.shp'):
copy_shapefiles(in_file, out_dir)
else:
name = in_file.name
out_file = Path(out_dir, name)
if out_file.exists():
prefix = random_prefix(7)
out_file = Path(out_dir, f'{prefix}_{name}')
shutil.copyfile(src=in_file, dst=out_file)
return out_file<|docstring|>Copies data from on location to another<|endoftext|> |
e10fc159cd42bd1133b4dfe6787609d64eeaa21188f440b5dee8e8f6024d8766 | @staticmethod
def _create_resource(pth: Path) -> Path:
'Internal function to create resources.'
is_gdb = ((pth.suffix == '.gdb') or (pth.suffix == '.geodatabase'))
pth_dir = (pth.parent if is_gdb else pth)
if (not pth_dir.exists()):
pth_dir.mkdir(parents=True)
if is_gdb:
gdb_exists = arcpy.Exists(str(pth))
if ((pth.suffix == '.gdb') and (not gdb_exists)):
arcpy.management.CreateFileGDB(pth_dir, pth.stem)
if ((pth.suffix == '.geodatabase') and (not gdb_exists)):
arcpy.management.CreateMobileGDB(pth_dir, pth.stem)
return pth | Internal function to create resources. | src/d4_accessiblity/utils.py | _create_resource | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | @staticmethod
def _create_resource(pth: Path) -> Path:
is_gdb = ((pth.suffix == '.gdb') or (pth.suffix == '.geodatabase'))
pth_dir = (pth.parent if is_gdb else pth)
if (not pth_dir.exists()):
pth_dir.mkdir(parents=True)
if is_gdb:
gdb_exists = arcpy.Exists(str(pth))
if ((pth.suffix == '.gdb') and (not gdb_exists)):
arcpy.management.CreateFileGDB(pth_dir, pth.stem)
if ((pth.suffix == '.geodatabase') and (not gdb_exists)):
arcpy.management.CreateMobileGDB(pth_dir, pth.stem)
return pth | @staticmethod
def _create_resource(pth: Path) -> Path:
is_gdb = ((pth.suffix == '.gdb') or (pth.suffix == '.geodatabase'))
pth_dir = (pth.parent if is_gdb else pth)
if (not pth_dir.exists()):
pth_dir.mkdir(parents=True)
if is_gdb:
gdb_exists = arcpy.Exists(str(pth))
if ((pth.suffix == '.gdb') and (not gdb_exists)):
arcpy.management.CreateFileGDB(pth_dir, pth.stem)
if ((pth.suffix == '.geodatabase') and (not gdb_exists)):
arcpy.management.CreateMobileGDB(pth_dir, pth.stem)
return pth<|docstring|>Internal function to create resources.<|endoftext|> |
a5c9a06a952855100f8cfd0c69487481b57c57a7b4af807060197bf8a234cf2c | def create_resources(self):
'Create data storage resources if they do not already exist.'
pth_lst = [p for p in dir(self) if isinstance(p, Path)]
for pth in pth_lst:
self._create_resource(pth)
return | Create data storage resources if they do not already exist. | src/d4_accessiblity/utils.py | create_resources | renaissanceplanning/PROJECT_D4_Accessibility | 0 | python | def create_resources(self):
pth_lst = [p for p in dir(self) if isinstance(p, Path)]
for pth in pth_lst:
self._create_resource(pth)
return | def create_resources(self):
pth_lst = [p for p in dir(self) if isinstance(p, Path)]
for pth in pth_lst:
self._create_resource(pth)
return<|docstring|>Create data storage resources if they do not already exist.<|endoftext|> |
40a6091f62717a87b451b4d8c59177e409d7a87dcb41b2dacd4b695313b00fa6 | def __init__(self, myname: str, ws: serversocketbase.base_server_socket) -> None:
'\n\n Args:\n myname: the name of the main program\n ws: the websocket instance used to communicate to the server.\n '
super().__init__(myname)
self.numlst: typing.List[int] = []
ws.addObserver(self, base.MSGD_SERVER_MSG)
self.wcstatus: typing.Optional[wcstatus.WCstatus] = None
self.init_view(ws) | Args:
myname: the name of the main program
ws: the websocket instance used to communicate to the server. | stocky-devel/stocky/webclient/wccontroller.py | __init__ | cfe-lab/stocky | 1 | python | def __init__(self, myname: str, ws: serversocketbase.base_server_socket) -> None:
'\n\n Args:\n myname: the name of the main program\n ws: the websocket instance used to communicate to the server.\n '
super().__init__(myname)
self.numlst: typing.List[int] = []
ws.addObserver(self, base.MSGD_SERVER_MSG)
self.wcstatus: typing.Optional[wcstatus.WCstatus] = None
self.init_view(ws) | def __init__(self, myname: str, ws: serversocketbase.base_server_socket) -> None:
'\n\n Args:\n myname: the name of the main program\n ws: the websocket instance used to communicate to the server.\n '
super().__init__(myname)
self.numlst: typing.List[int] = []
ws.addObserver(self, base.MSGD_SERVER_MSG)
self.wcstatus: typing.Optional[wcstatus.WCstatus] = None
self.init_view(ws)<|docstring|>Args:
myname: the name of the main program
ws: the websocket instance used to communicate to the server.<|endoftext|> |
d25c5eeaec4d32fc7c32b14c1de7ee00f96f94b655ad5a379d73ebd65106da63 | def init_view(self, ws: serversocketbase.base_server_socket) -> None:
'Initialise the servers html elements as required for the application.\n\n This method must initialise self.wcstatus, passing it the server websocket.\n '
pass | Initialise the servers html elements as required for the application.
This method must initialise self.wcstatus, passing it the server websocket. | stocky-devel/stocky/webclient/wccontroller.py | init_view | cfe-lab/stocky | 1 | python | def init_view(self, ws: serversocketbase.base_server_socket) -> None:
'Initialise the servers html elements as required for the application.\n\n This method must initialise self.wcstatus, passing it the server websocket.\n '
pass | def init_view(self, ws: serversocketbase.base_server_socket) -> None:
'Initialise the servers html elements as required for the application.\n\n This method must initialise self.wcstatus, passing it the server websocket.\n '
pass<|docstring|>Initialise the servers html elements as required for the application.
This method must initialise self.wcstatus, passing it the server websocket.<|endoftext|> |
ab07f7ead0781f12760f954f11577d6fb408bf88989b27595007a4585cc18f23 | def setradardata(self, radarinfo: typing.List[typing.Tuple[(str, int, float)]]):
'This is a list of string tuples. (epc code, RI) '
radar_view = self.switch.getView(RADAR_VIEW_NAME)
radar_view.set_radardata(radarinfo) | This is a list of string tuples. (epc code, RI) | stocky-devel/stocky/webclient/wccontroller.py | setradardata | cfe-lab/stocky | 1 | python | def setradardata(self, radarinfo: typing.List[typing.Tuple[(str, int, float)]]):
' '
radar_view = self.switch.getView(RADAR_VIEW_NAME)
radar_view.set_radardata(radarinfo) | def setradardata(self, radarinfo: typing.List[typing.Tuple[(str, int, float)]]):
' '
radar_view = self.switch.getView(RADAR_VIEW_NAME)
radar_view.set_radardata(radarinfo)<|docstring|>This is a list of string tuples. (epc code, RI)<|endoftext|> |
2ae15b1339df6f180ae102a7e396443edf81acdb249c34b81758637a5b0fd747 | def set_login_status(self, resdct: dict) -> None:
'Display the login status in the window'
self.loginform.set_login_response(resdct)
if (self.wcstatus is not None):
self.wcstatus.set_login_response(resdct) | Display the login status in the window | stocky-devel/stocky/webclient/wccontroller.py | set_login_status | cfe-lab/stocky | 1 | python | def set_login_status(self, resdct: dict) -> None:
self.loginform.set_login_response(resdct)
if (self.wcstatus is not None):
self.wcstatus.set_login_response(resdct) | def set_login_status(self, resdct: dict) -> None:
self.loginform.set_login_response(resdct)
if (self.wcstatus is not None):
self.wcstatus.set_login_response(resdct)<|docstring|>Display the login status in the window<|endoftext|> |
3c69e291e5eba351723649aca75560a774db32f982a54810b5a22ac894455b83 | def start_QAI_download(self):
'Tell server to start download of QAI data...'
self.send_WS_msg(CommonMSG(CommonMSG.MSG_WC_STOCK_INFO_REQ, dict(do_update=True))) | Tell server to start download of QAI data... | stocky-devel/stocky/webclient/wccontroller.py | start_QAI_download | cfe-lab/stocky | 1 | python | def start_QAI_download(self):
self.send_WS_msg(CommonMSG(CommonMSG.MSG_WC_STOCK_INFO_REQ, dict(do_update=True))) | def start_QAI_download(self):
self.send_WS_msg(CommonMSG(CommonMSG.MSG_WC_STOCK_INFO_REQ, dict(do_update=True)))<|docstring|>Tell server to start download of QAI data...<|endoftext|> |
bf37f1f49e7768039d5f75fa8406deebab0220f0bd4f5c3f7177c068a839bdc1 | def addnewstock(self, url: str):
'Redirect to a new window with the given URL to allow the user to\n add stock.'
vv = self.switch.getView(ADDSTOCK_VIEW_NAME)
vv.redirect(url) | Redirect to a new window with the given URL to allow the user to
add stock. | stocky-devel/stocky/webclient/wccontroller.py | addnewstock | cfe-lab/stocky | 1 | python | def addnewstock(self, url: str):
'Redirect to a new window with the given URL to allow the user to\n add stock.'
vv = self.switch.getView(ADDSTOCK_VIEW_NAME)
vv.redirect(url) | def addnewstock(self, url: str):
'Redirect to a new window with the given URL to allow the user to\n add stock.'
vv = self.switch.getView(ADDSTOCK_VIEW_NAME)
vv.redirect(url)<|docstring|>Redirect to a new window with the given URL to allow the user to
add stock.<|endoftext|> |
95cbaae55b74a34529c81a807c8e4deaca92f91b1be8a27c33e80def3e59cbc1 | def set_qai_update(self, resdct: dict) -> None:
'The server has told us about a new QAI update.\n ==> send the new data to wcstatus\n ==? also tell the download view.\n '
if (self.wcstatus is not None):
self.wcstatus.set_QAIupdate_state(resdct)
dnl_view = self.switch.getView(QAI_DOWNLOAD_VIEW_NAME)
dnl_view.stop_download(resdct) | The server has told us about a new QAI update.
==> send the new data to wcstatus
==? also tell the download view. | stocky-devel/stocky/webclient/wccontroller.py | set_qai_update | cfe-lab/stocky | 1 | python | def set_qai_update(self, resdct: dict) -> None:
'The server has told us about a new QAI update.\n ==> send the new data to wcstatus\n ==? also tell the download view.\n '
if (self.wcstatus is not None):
self.wcstatus.set_QAIupdate_state(resdct)
dnl_view = self.switch.getView(QAI_DOWNLOAD_VIEW_NAME)
dnl_view.stop_download(resdct) | def set_qai_update(self, resdct: dict) -> None:
'The server has told us about a new QAI update.\n ==> send the new data to wcstatus\n ==? also tell the download view.\n '
if (self.wcstatus is not None):
self.wcstatus.set_QAIupdate_state(resdct)
dnl_view = self.switch.getView(QAI_DOWNLOAD_VIEW_NAME)
dnl_view.stop_download(resdct)<|docstring|>The server has told us about a new QAI update.
==> send the new data to wcstatus
==? also tell the download view.<|endoftext|> |
3273af2870e30bc26d22d7a45fe13d29e98f905264e7875d791542f2debeac07 | def set_locmut_update(self, rdct: dict, newhash: str) -> None:
'The server has told us about a new locmut dict,\n or the server has told us there have been no changes since we last\n polled (rdct will be None in this case).\n ==> send the data to wcstatus if required.\n ==> also tell the locmut view to change its busy status.\n '
if ((rdct is not None) and (self.wcstatus is not None)):
self.wcstatus.set_locmut_dct(rdct, newhash)
locmut_view = self.switch.getView(LOCMUT_UPLOAD_VIEW_NAME)
locmut_view.stop_locmut_download() | The server has told us about a new locmut dict,
or the server has told us there have been no changes since we last
polled (rdct will be None in this case).
==> send the data to wcstatus if required.
==> also tell the locmut view to change its busy status. | stocky-devel/stocky/webclient/wccontroller.py | set_locmut_update | cfe-lab/stocky | 1 | python | def set_locmut_update(self, rdct: dict, newhash: str) -> None:
'The server has told us about a new locmut dict,\n or the server has told us there have been no changes since we last\n polled (rdct will be None in this case).\n ==> send the data to wcstatus if required.\n ==> also tell the locmut view to change its busy status.\n '
if ((rdct is not None) and (self.wcstatus is not None)):
self.wcstatus.set_locmut_dct(rdct, newhash)
locmut_view = self.switch.getView(LOCMUT_UPLOAD_VIEW_NAME)
locmut_view.stop_locmut_download() | def set_locmut_update(self, rdct: dict, newhash: str) -> None:
'The server has told us about a new locmut dict,\n or the server has told us there have been no changes since we last\n polled (rdct will be None in this case).\n ==> send the data to wcstatus if required.\n ==> also tell the locmut view to change its busy status.\n '
if ((rdct is not None) and (self.wcstatus is not None)):
self.wcstatus.set_locmut_dct(rdct, newhash)
locmut_view = self.switch.getView(LOCMUT_UPLOAD_VIEW_NAME)
locmut_view.stop_locmut_download()<|docstring|>The server has told us about a new locmut dict,
or the server has told us there have been no changes since we last
polled (rdct will be None in this case).
==> send the data to wcstatus if required.
==> also tell the locmut view to change its busy status.<|endoftext|> |
4aad993a870c79190533d76015d8885a0f01015d33f9958c9eda78f9cdbff5c8 | def add_literal(self, v):
' For use in the pseudo instruction LDR r0, =SOMESYM '
assert (type(v) is str)
self.lit_counter += 1
label_name = '_lit_{}'.format(self.lit_counter)
self.lit_pool.append(Label(label_name))
self.lit_pool.append(dcd(v))
return label_name | For use in the pseudo instruction LDR r0, =SOMESYM | ppci/arch/riscv/arch.py | add_literal | kl4w3i/ppci | 161 | python | def add_literal(self, v):
' '
assert (type(v) is str)
self.lit_counter += 1
label_name = '_lit_{}'.format(self.lit_counter)
self.lit_pool.append(Label(label_name))
self.lit_pool.append(dcd(v))
return label_name | def add_literal(self, v):
' '
assert (type(v) is str)
self.lit_counter += 1
label_name = '_lit_{}'.format(self.lit_counter)
self.lit_pool.append(Label(label_name))
self.lit_pool.append(dcd(v))
return label_name<|docstring|>For use in the pseudo instruction LDR r0, =SOMESYM<|endoftext|> |
1b0ae6c33f2a2b5eb2ab377e1a964d7da4d241e8034a88c4c6bc43949a4f9334 | def get_runtime(self):
' Implement compiler runtime functions '
from ...api import asm
asm_src = '\n __sdiv:\n ; Divide x12 by x13\n ; x14 is a work register.\n ; x10 is the quotient\n\n mv x10, x0 ; Initialize the result\n li x14, 1 ; mov divisor into temporary register.\n\n ; Blow up part: blow up divisor until it is larger than the divident.\n __shiftl:\n bge x13, x12, __cont1\n slli x13, x13, 1\n slli x14, x14, 1\n j __shiftl\n\n ; Repeatedly substract shifted versions of divisor\n __cont1:\n beq x14, x0, __exit\n blt x12, x13, __skip\n sub x12, x12, x13\n or x10, x10, x14\n __skip:\n srli x13, x13, 1\n srli x14, x14, 1\n j __cont1\n\n __exit:\n jalr x0,ra,0\n '
return asm(io.StringIO(asm_src), self) | Implement compiler runtime functions | ppci/arch/riscv/arch.py | get_runtime | kl4w3i/ppci | 161 | python | def get_runtime(self):
' '
from ...api import asm
asm_src = '\n __sdiv:\n ; Divide x12 by x13\n ; x14 is a work register.\n ; x10 is the quotient\n\n mv x10, x0 ; Initialize the result\n li x14, 1 ; mov divisor into temporary register.\n\n ; Blow up part: blow up divisor until it is larger than the divident.\n __shiftl:\n bge x13, x12, __cont1\n slli x13, x13, 1\n slli x14, x14, 1\n j __shiftl\n\n ; Repeatedly substract shifted versions of divisor\n __cont1:\n beq x14, x0, __exit\n blt x12, x13, __skip\n sub x12, x12, x13\n or x10, x10, x14\n __skip:\n srli x13, x13, 1\n srli x14, x14, 1\n j __cont1\n\n __exit:\n jalr x0,ra,0\n '
return asm(io.StringIO(asm_src), self) | def get_runtime(self):
' '
from ...api import asm
asm_src = '\n __sdiv:\n ; Divide x12 by x13\n ; x14 is a work register.\n ; x10 is the quotient\n\n mv x10, x0 ; Initialize the result\n li x14, 1 ; mov divisor into temporary register.\n\n ; Blow up part: blow up divisor until it is larger than the divident.\n __shiftl:\n bge x13, x12, __cont1\n slli x13, x13, 1\n slli x14, x14, 1\n j __shiftl\n\n ; Repeatedly substract shifted versions of divisor\n __cont1:\n beq x14, x0, __exit\n blt x12, x13, __skip\n sub x12, x12, x13\n or x10, x10, x14\n __skip:\n srli x13, x13, 1\n srli x14, x14, 1\n j __cont1\n\n __exit:\n jalr x0,ra,0\n '
return asm(io.StringIO(asm_src), self)<|docstring|>Implement compiler runtime functions<|endoftext|> |
d7a81402ba52bd24df05923b3fb95a72a4ce545f594a3bbd85dbee90b4b4ecb0 | def move(self, dst, src):
' Generate a move from src to dst '
if self.has_option('rvc'):
return CMovr(dst, src, ismove=True)
elif (isinstance(dst, RiscvFRegister) and isinstance(src, RiscvFRegister) and self.has_option('rvf')):
return movf(dst, src)
else:
return Movr(dst, src, ismove=True) | Generate a move from src to dst | ppci/arch/riscv/arch.py | move | kl4w3i/ppci | 161 | python | def move(self, dst, src):
' '
if self.has_option('rvc'):
return CMovr(dst, src, ismove=True)
elif (isinstance(dst, RiscvFRegister) and isinstance(src, RiscvFRegister) and self.has_option('rvf')):
return movf(dst, src)
else:
return Movr(dst, src, ismove=True) | def move(self, dst, src):
' '
if self.has_option('rvc'):
return CMovr(dst, src, ismove=True)
elif (isinstance(dst, RiscvFRegister) and isinstance(src, RiscvFRegister) and self.has_option('rvf')):
return movf(dst, src)
else:
return Movr(dst, src, ismove=True)<|docstring|>Generate a move from src to dst<|endoftext|> |
4c2e1dc906eb3794aa4e5b7bddf127f161d8111b25b47041f23e8682b1b14f16 | def gen_call(self, frame, label, args, rv):
' Implement actual call and save / restore live registers '
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
stack_size = 0
for (arg_loc, arg2) in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, (RiscvRegister, RiscvFRegister)):
(yield self.move(arg_loc, arg))
elif isinstance(arg_loc, StackLocation):
stack_size += arg_loc.size
if isinstance(arg, RiscvRegister):
(yield Sw(arg, arg_loc.offset, SP))
elif isinstance(arg, StackLocation):
p1 = frame.new_reg(RiscvRegister)
p2 = frame.new_reg(RiscvRegister)
v3 = frame.new_reg(RiscvRegister)
(yield instructions.Addi(p1, SP, arg_loc.offset))
(yield instructions.Addi(p2, self.fp, ((arg.offset + round_up((frame.stacksize + 8))) - 8)))
for instruction in self.gen_riscv_memcpy(p1, p2, v3, arg.size):
(yield instruction)
else:
raise NotImplementedError('Parameters in memory not impl')
frame.add_out_call(stack_size)
arg_regs = set((arg_loc for arg_loc in arg_locs if isinstance(arg_loc, Register)))
(yield RegisterUseDef(uses=arg_regs))
(yield self.branch(LR, label))
if rv:
retval_loc = self.determine_rv_location(rv[0])
(yield RegisterUseDef(defs=(retval_loc,)))
(yield self.move(rv[1], retval_loc)) | Implement actual call and save / restore live registers | ppci/arch/riscv/arch.py | gen_call | kl4w3i/ppci | 161 | python | def gen_call(self, frame, label, args, rv):
' '
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
stack_size = 0
for (arg_loc, arg2) in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, (RiscvRegister, RiscvFRegister)):
(yield self.move(arg_loc, arg))
elif isinstance(arg_loc, StackLocation):
stack_size += arg_loc.size
if isinstance(arg, RiscvRegister):
(yield Sw(arg, arg_loc.offset, SP))
elif isinstance(arg, StackLocation):
p1 = frame.new_reg(RiscvRegister)
p2 = frame.new_reg(RiscvRegister)
v3 = frame.new_reg(RiscvRegister)
(yield instructions.Addi(p1, SP, arg_loc.offset))
(yield instructions.Addi(p2, self.fp, ((arg.offset + round_up((frame.stacksize + 8))) - 8)))
for instruction in self.gen_riscv_memcpy(p1, p2, v3, arg.size):
(yield instruction)
else:
raise NotImplementedError('Parameters in memory not impl')
frame.add_out_call(stack_size)
arg_regs = set((arg_loc for arg_loc in arg_locs if isinstance(arg_loc, Register)))
(yield RegisterUseDef(uses=arg_regs))
(yield self.branch(LR, label))
if rv:
retval_loc = self.determine_rv_location(rv[0])
(yield RegisterUseDef(defs=(retval_loc,)))
(yield self.move(rv[1], retval_loc)) | def gen_call(self, frame, label, args, rv):
' '
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
stack_size = 0
for (arg_loc, arg2) in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, (RiscvRegister, RiscvFRegister)):
(yield self.move(arg_loc, arg))
elif isinstance(arg_loc, StackLocation):
stack_size += arg_loc.size
if isinstance(arg, RiscvRegister):
(yield Sw(arg, arg_loc.offset, SP))
elif isinstance(arg, StackLocation):
p1 = frame.new_reg(RiscvRegister)
p2 = frame.new_reg(RiscvRegister)
v3 = frame.new_reg(RiscvRegister)
(yield instructions.Addi(p1, SP, arg_loc.offset))
(yield instructions.Addi(p2, self.fp, ((arg.offset + round_up((frame.stacksize + 8))) - 8)))
for instruction in self.gen_riscv_memcpy(p1, p2, v3, arg.size):
(yield instruction)
else:
raise NotImplementedError('Parameters in memory not impl')
frame.add_out_call(stack_size)
arg_regs = set((arg_loc for arg_loc in arg_locs if isinstance(arg_loc, Register)))
(yield RegisterUseDef(uses=arg_regs))
(yield self.branch(LR, label))
if rv:
retval_loc = self.determine_rv_location(rv[0])
(yield RegisterUseDef(defs=(retval_loc,)))
(yield self.move(rv[1], retval_loc))<|docstring|>Implement actual call and save / restore live registers<|endoftext|> |
0bcce3cafc32e74e39bba44869ddaf8927908fa51d23ea06dcdaa9bf9ecaa880 | def determine_arg_locations(self, arg_types):
'\n Given a set of argument types, determine location for argument\n ABI:\n pass args in R12-R17\n return values in R10\n '
locations = []
regs = [R12, R13, R14, R15, R16, R17]
fregs = [F12, F13, F14, F15, F16, F17]
offset = 0
for a in arg_types:
if a.is_blob:
r = StackLocation(offset, a.size)
offset += a.size
elif ((a in [ir.f32, ir.f64]) and self.has_option('rvf')):
if fregs:
r = fregs.pop(0)
else:
arg_size = self.info.get_size(a)
r = StackLocation(offset, a.size)
offset += arg_size
elif regs:
r = regs.pop(0)
else:
arg_size = self.info.get_size(a)
r = StackLocation(offset, arg_size)
offset += arg_size
locations.append(r)
return locations | Given a set of argument types, determine location for argument
ABI:
pass args in R12-R17
return values in R10 | ppci/arch/riscv/arch.py | determine_arg_locations | kl4w3i/ppci | 161 | python | def determine_arg_locations(self, arg_types):
'\n Given a set of argument types, determine location for argument\n ABI:\n pass args in R12-R17\n return values in R10\n '
locations = []
regs = [R12, R13, R14, R15, R16, R17]
fregs = [F12, F13, F14, F15, F16, F17]
offset = 0
for a in arg_types:
if a.is_blob:
r = StackLocation(offset, a.size)
offset += a.size
elif ((a in [ir.f32, ir.f64]) and self.has_option('rvf')):
if fregs:
r = fregs.pop(0)
else:
arg_size = self.info.get_size(a)
r = StackLocation(offset, a.size)
offset += arg_size
elif regs:
r = regs.pop(0)
else:
arg_size = self.info.get_size(a)
r = StackLocation(offset, arg_size)
offset += arg_size
locations.append(r)
return locations | def determine_arg_locations(self, arg_types):
'\n Given a set of argument types, determine location for argument\n ABI:\n pass args in R12-R17\n return values in R10\n '
locations = []
regs = [R12, R13, R14, R15, R16, R17]
fregs = [F12, F13, F14, F15, F16, F17]
offset = 0
for a in arg_types:
if a.is_blob:
r = StackLocation(offset, a.size)
offset += a.size
elif ((a in [ir.f32, ir.f64]) and self.has_option('rvf')):
if fregs:
r = fregs.pop(0)
else:
arg_size = self.info.get_size(a)
r = StackLocation(offset, a.size)
offset += arg_size
elif regs:
r = regs.pop(0)
else:
arg_size = self.info.get_size(a)
r = StackLocation(offset, arg_size)
offset += arg_size
locations.append(r)
return locations<|docstring|>Given a set of argument types, determine location for argument
ABI:
pass args in R12-R17
return values in R10<|endoftext|> |
bd336114ff3da06adc5739d134c43f04436f5bf870036c458f2da998a6e42f5b | def gen_prologue(self, frame):
' Returns prologue instruction sequence '
(yield Label(frame.name))
ssize = round_up((frame.stacksize + 8))
if (self.has_option('rvc') and isinsrange(10, (- ssize))):
(yield CAddi16sp((- ssize)))
else:
(yield Addi(SP, SP, (- ssize)))
if self.has_option('rvc'):
(yield CSwsp(LR, 4))
(yield CSwsp(FP, 0))
else:
(yield Sw(LR, 4, SP))
(yield Sw(FP, 0, SP))
if self.has_option('rvc'):
(yield CAddi4spn(FP, 8))
else:
(yield Addi(FP, SP, 8))
saved_registers = self.get_callee_saved(frame)
rsize = (4 * len(saved_registers))
rsize = round_up(rsize)
if (self.has_option('rvc') and isinsrange(10, rsize)):
(yield CAddi16sp((- rsize)))
else:
(yield Addi(SP, SP, (- rsize)))
i = 0
for register in saved_registers:
i -= 4
if self.has_option('rvc'):
(yield CSwsp(register, (i + rsize)))
else:
(yield Sw(register, (i + rsize), SP))
extras = (max(frame.out_calls) if frame.out_calls else 0)
if extras:
ssize = round_up(extras)
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp((- ssize)))
else:
(yield Addi(SP, SP, (- ssize))) | Returns prologue instruction sequence | ppci/arch/riscv/arch.py | gen_prologue | kl4w3i/ppci | 161 | python | def gen_prologue(self, frame):
' '
(yield Label(frame.name))
ssize = round_up((frame.stacksize + 8))
if (self.has_option('rvc') and isinsrange(10, (- ssize))):
(yield CAddi16sp((- ssize)))
else:
(yield Addi(SP, SP, (- ssize)))
if self.has_option('rvc'):
(yield CSwsp(LR, 4))
(yield CSwsp(FP, 0))
else:
(yield Sw(LR, 4, SP))
(yield Sw(FP, 0, SP))
if self.has_option('rvc'):
(yield CAddi4spn(FP, 8))
else:
(yield Addi(FP, SP, 8))
saved_registers = self.get_callee_saved(frame)
rsize = (4 * len(saved_registers))
rsize = round_up(rsize)
if (self.has_option('rvc') and isinsrange(10, rsize)):
(yield CAddi16sp((- rsize)))
else:
(yield Addi(SP, SP, (- rsize)))
i = 0
for register in saved_registers:
i -= 4
if self.has_option('rvc'):
(yield CSwsp(register, (i + rsize)))
else:
(yield Sw(register, (i + rsize), SP))
extras = (max(frame.out_calls) if frame.out_calls else 0)
if extras:
ssize = round_up(extras)
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp((- ssize)))
else:
(yield Addi(SP, SP, (- ssize))) | def gen_prologue(self, frame):
' '
(yield Label(frame.name))
ssize = round_up((frame.stacksize + 8))
if (self.has_option('rvc') and isinsrange(10, (- ssize))):
(yield CAddi16sp((- ssize)))
else:
(yield Addi(SP, SP, (- ssize)))
if self.has_option('rvc'):
(yield CSwsp(LR, 4))
(yield CSwsp(FP, 0))
else:
(yield Sw(LR, 4, SP))
(yield Sw(FP, 0, SP))
if self.has_option('rvc'):
(yield CAddi4spn(FP, 8))
else:
(yield Addi(FP, SP, 8))
saved_registers = self.get_callee_saved(frame)
rsize = (4 * len(saved_registers))
rsize = round_up(rsize)
if (self.has_option('rvc') and isinsrange(10, rsize)):
(yield CAddi16sp((- rsize)))
else:
(yield Addi(SP, SP, (- rsize)))
i = 0
for register in saved_registers:
i -= 4
if self.has_option('rvc'):
(yield CSwsp(register, (i + rsize)))
else:
(yield Sw(register, (i + rsize), SP))
extras = (max(frame.out_calls) if frame.out_calls else 0)
if extras:
ssize = round_up(extras)
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp((- ssize)))
else:
(yield Addi(SP, SP, (- ssize)))<|docstring|>Returns prologue instruction sequence<|endoftext|> |
ca4121dc85014979861de233cba55a9eb8120868a85938974956631f0ab44bb9 | def litpool(self, frame):
' Generate instruction for the current literals '
(yield Section('data'))
if frame.constants:
(yield Align(4))
while frame.constants:
(label, value) = frame.constants.pop(0)
(yield Label(label))
if isinstance(value, (int, str)):
(yield dcd(value))
elif isinstance(value, bytes):
for byte in value:
(yield DByte(byte))
(yield Align(4))
else:
raise NotImplementedError('Constant of type {}'.format(value))
(yield Section('code')) | Generate instruction for the current literals | ppci/arch/riscv/arch.py | litpool | kl4w3i/ppci | 161 | python | def litpool(self, frame):
' '
(yield Section('data'))
if frame.constants:
(yield Align(4))
while frame.constants:
(label, value) = frame.constants.pop(0)
(yield Label(label))
if isinstance(value, (int, str)):
(yield dcd(value))
elif isinstance(value, bytes):
for byte in value:
(yield DByte(byte))
(yield Align(4))
else:
raise NotImplementedError('Constant of type {}'.format(value))
(yield Section('code')) | def litpool(self, frame):
' '
(yield Section('data'))
if frame.constants:
(yield Align(4))
while frame.constants:
(label, value) = frame.constants.pop(0)
(yield Label(label))
if isinstance(value, (int, str)):
(yield dcd(value))
elif isinstance(value, bytes):
for byte in value:
(yield DByte(byte))
(yield Align(4))
else:
raise NotImplementedError('Constant of type {}'.format(value))
(yield Section('code'))<|docstring|>Generate instruction for the current literals<|endoftext|> |
6d659860d1b2f88bb1f7846c6f8f06e942fd26001c1d0fbee0b1e70377a53cd4 | def gen_epilogue(self, frame):
'Return epilogue sequence for a frame. Adjust frame pointer\n and add constant pool\n '
extras = (max(frame.out_calls) if frame.out_calls else 0)
if extras:
ssize = round_up(extras)
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp(ssize))
else:
(yield Addi(SP, SP, ssize))
saved_registers = self.get_callee_saved(frame)
rsize = (4 * len(saved_registers))
rsize = round_up(rsize)
i = 0
for register in saved_registers:
i -= 4
if self.has_option('rvc'):
(yield CLwsp(register, (i + rsize)))
else:
(yield Lw(register, (i + rsize), SP))
if (self.has_option('rvc') and isinsrange(10, rsize)):
(yield CAddi16sp(rsize))
else:
(yield Addi(SP, SP, rsize))
if self.has_option('rvc'):
(yield CLwsp(LR, 4))
(yield CLwsp(FP, 0))
else:
(yield Lw(LR, 4, SP))
(yield Lw(FP, 0, SP))
ssize = round_up((frame.stacksize + 8))
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp(ssize))
else:
(yield Addi(SP, SP, ssize))
if self.has_option('rvc'):
(yield CJr(LR))
else:
(yield Blr(R0, LR, 0))
for instruction in self.litpool(frame):
(yield instruction)
(yield Align(4)) | Return epilogue sequence for a frame. Adjust frame pointer
and add constant pool | ppci/arch/riscv/arch.py | gen_epilogue | kl4w3i/ppci | 161 | python | def gen_epilogue(self, frame):
'Return epilogue sequence for a frame. Adjust frame pointer\n and add constant pool\n '
extras = (max(frame.out_calls) if frame.out_calls else 0)
if extras:
ssize = round_up(extras)
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp(ssize))
else:
(yield Addi(SP, SP, ssize))
saved_registers = self.get_callee_saved(frame)
rsize = (4 * len(saved_registers))
rsize = round_up(rsize)
i = 0
for register in saved_registers:
i -= 4
if self.has_option('rvc'):
(yield CLwsp(register, (i + rsize)))
else:
(yield Lw(register, (i + rsize), SP))
if (self.has_option('rvc') and isinsrange(10, rsize)):
(yield CAddi16sp(rsize))
else:
(yield Addi(SP, SP, rsize))
if self.has_option('rvc'):
(yield CLwsp(LR, 4))
(yield CLwsp(FP, 0))
else:
(yield Lw(LR, 4, SP))
(yield Lw(FP, 0, SP))
ssize = round_up((frame.stacksize + 8))
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp(ssize))
else:
(yield Addi(SP, SP, ssize))
if self.has_option('rvc'):
(yield CJr(LR))
else:
(yield Blr(R0, LR, 0))
for instruction in self.litpool(frame):
(yield instruction)
(yield Align(4)) | def gen_epilogue(self, frame):
'Return epilogue sequence for a frame. Adjust frame pointer\n and add constant pool\n '
extras = (max(frame.out_calls) if frame.out_calls else 0)
if extras:
ssize = round_up(extras)
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp(ssize))
else:
(yield Addi(SP, SP, ssize))
saved_registers = self.get_callee_saved(frame)
rsize = (4 * len(saved_registers))
rsize = round_up(rsize)
i = 0
for register in saved_registers:
i -= 4
if self.has_option('rvc'):
(yield CLwsp(register, (i + rsize)))
else:
(yield Lw(register, (i + rsize), SP))
if (self.has_option('rvc') and isinsrange(10, rsize)):
(yield CAddi16sp(rsize))
else:
(yield Addi(SP, SP, rsize))
if self.has_option('rvc'):
(yield CLwsp(LR, 4))
(yield CLwsp(FP, 0))
else:
(yield Lw(LR, 4, SP))
(yield Lw(FP, 0, SP))
ssize = round_up((frame.stacksize + 8))
if (self.has_option('rvc') and isinsrange(10, ssize)):
(yield CAddi16sp(ssize))
else:
(yield Addi(SP, SP, ssize))
if self.has_option('rvc'):
(yield CJr(LR))
else:
(yield Blr(R0, LR, 0))
for instruction in self.litpool(frame):
(yield instruction)
(yield Align(4))<|docstring|>Return epilogue sequence for a frame. Adjust frame pointer
and add constant pool<|endoftext|> |
cea8cfce375a43e62f7dccb80394042a750e1d6fa1f9d259723e3e3397bae412 | def qnode(device, interface='autograd', diff_method='best', caching=0, **diff_options):
'Decorator for creating QNodes.\n\n This decorator is used to indicate to PennyLane that the decorated function contains a\n :ref:`quantum variational circuit <glossary_variational_circuit>` that should be bound to a\n compatible device.\n\n The QNode calls the quantum function to construct a :class:`~.JacobianTape` instance representing\n the quantum circuit.\n\n .. note::\n\n The quantum tape is an *experimental* feature. QNodes that use the quantum\n tape have access to advanced features, such as in-QNode classical processing,\n but do not yet have feature parity with the standard PennyLane QNode.\n\n This quantum tape-comaptible QNode can either be created directly,\n\n >>> import pennylane as qml\n >>> @qml.tape.qnode(dev)\n\n or enabled globally via :func:`~.enable_tape` without changing your PennyLane code:\n\n >>> qml.enable_tape()\n\n For more details, see :mod:`pennylane.tape`.\n\n Args:\n func (callable): a quantum function\n device (~.Device): a PennyLane-compatible device\n interface (str): The interface that will be used for classical backpropagation.\n This affects the types of objects that can be passed to/returned from the QNode:\n\n * ``interface=\'autograd\'``: Allows autograd to backpropogate\n through the QNode. The QNode accepts default Python types\n (floats, ints, lists) as well as NumPy array arguments,\n and returns NumPy arrays.\n\n * ``interface=\'torch\'``: Allows PyTorch to backpropogate\n through the QNode. The QNode accepts and returns Torch tensors.\n\n * ``interface=\'tf\'``: Allows TensorFlow in eager mode to backpropogate\n through the QNode. The QNode accepts and returns\n TensorFlow ``tf.Variable`` and ``tf.tensor`` objects.\n\n * ``None``: The QNode accepts default Python types\n (floats, ints, lists) as well as NumPy array arguments,\n and returns NumPy arrays. It does not connect to any\n machine learning library automatically for backpropagation.\n\n diff_method (str, None): the method of differentiation to use in the created QNode.\n\n * ``"best"``: Best available method. Uses classical backpropagation or the\n device directly to compute the gradient if supported, otherwise will use\n the analytic parameter-shift rule where possible with finite-difference as a fallback.\n\n * ``"backprop"``: Use classical backpropagation. Only allowed on simulator\n devices that are classically end-to-end differentiable, for example\n :class:`default.tensor.tf <~.DefaultTensorTF>`. Note that the returned\n QNode can only be used with the machine-learning framework supported\n by the device; a separate ``interface`` argument should not be passed.\n\n * ``"reversible"``: Uses a reversible method for computing the gradient.\n This method is similar to ``"backprop"``, but trades off increased\n runtime with significantly lower memory usage. Compared to the\n parameter-shift rule, the reversible method can be faster or slower,\n depending on the density and location of parametrized gates in a circuit.\n Only allowed on (simulator) devices with the "reversible" capability,\n for example :class:`default.qubit <~.DefaultQubit>`.\n\n * ``"device"``: Queries the device directly for the gradient.\n Only allowed on devices that provide their own gradient rules.\n\n * ``"parameter-shift"``: Use the analytic parameter-shift\n rule for all supported quantum operation arguments, with finite-difference\n as a fallback.\n\n * ``"finite-diff"``: Uses numerical finite-differences for all quantum\n operation arguments.\n\n caching (int): Number of device executions to store in a cache to speed up subsequent\n executions. A value of ``0`` indicates that no caching will take place. Once filled,\n older elements of the cache are removed and replaced with the most recent device\n executions to keep the cache up to date.\n\n Keyword Args:\n h=1e-7 (float): Step size for the finite difference method.\n order=1 (int): The order of the finite difference method to use. ``1`` corresponds\n to forward finite differences, ``2`` to centered finite differences.\n\n **Example**\n\n >>> qml.enable_tape()\n >>> dev = qml.device("default.qubit", wires=1)\n >>> @qml.qnode(dev)\n >>> def circuit(x):\n >>> qml.RX(x, wires=0)\n >>> return expval(qml.PauliZ(0))\n '
@lru_cache()
def qfunc_decorator(func):
'The actual decorator'
qn = QNode(func, device, interface=interface, diff_method=diff_method, caching=caching, **diff_options)
return update_wrapper(qn, func)
return qfunc_decorator | Decorator for creating QNodes.
This decorator is used to indicate to PennyLane that the decorated function contains a
:ref:`quantum variational circuit <glossary_variational_circuit>` that should be bound to a
compatible device.
The QNode calls the quantum function to construct a :class:`~.JacobianTape` instance representing
the quantum circuit.
.. note::
The quantum tape is an *experimental* feature. QNodes that use the quantum
tape have access to advanced features, such as in-QNode classical processing,
but do not yet have feature parity with the standard PennyLane QNode.
This quantum tape-comaptible QNode can either be created directly,
>>> import pennylane as qml
>>> @qml.tape.qnode(dev)
or enabled globally via :func:`~.enable_tape` without changing your PennyLane code:
>>> qml.enable_tape()
For more details, see :mod:`pennylane.tape`.
Args:
func (callable): a quantum function
device (~.Device): a PennyLane-compatible device
interface (str): The interface that will be used for classical backpropagation.
This affects the types of objects that can be passed to/returned from the QNode:
* ``interface='autograd'``: Allows autograd to backpropogate
through the QNode. The QNode accepts default Python types
(floats, ints, lists) as well as NumPy array arguments,
and returns NumPy arrays.
* ``interface='torch'``: Allows PyTorch to backpropogate
through the QNode. The QNode accepts and returns Torch tensors.
* ``interface='tf'``: Allows TensorFlow in eager mode to backpropogate
through the QNode. The QNode accepts and returns
TensorFlow ``tf.Variable`` and ``tf.tensor`` objects.
* ``None``: The QNode accepts default Python types
(floats, ints, lists) as well as NumPy array arguments,
and returns NumPy arrays. It does not connect to any
machine learning library automatically for backpropagation.
diff_method (str, None): the method of differentiation to use in the created QNode.
* ``"best"``: Best available method. Uses classical backpropagation or the
device directly to compute the gradient if supported, otherwise will use
the analytic parameter-shift rule where possible with finite-difference as a fallback.
* ``"backprop"``: Use classical backpropagation. Only allowed on simulator
devices that are classically end-to-end differentiable, for example
:class:`default.tensor.tf <~.DefaultTensorTF>`. Note that the returned
QNode can only be used with the machine-learning framework supported
by the device; a separate ``interface`` argument should not be passed.
* ``"reversible"``: Uses a reversible method for computing the gradient.
This method is similar to ``"backprop"``, but trades off increased
runtime with significantly lower memory usage. Compared to the
parameter-shift rule, the reversible method can be faster or slower,
depending on the density and location of parametrized gates in a circuit.
Only allowed on (simulator) devices with the "reversible" capability,
for example :class:`default.qubit <~.DefaultQubit>`.
* ``"device"``: Queries the device directly for the gradient.
Only allowed on devices that provide their own gradient rules.
* ``"parameter-shift"``: Use the analytic parameter-shift
rule for all supported quantum operation arguments, with finite-difference
as a fallback.
* ``"finite-diff"``: Uses numerical finite-differences for all quantum
operation arguments.
caching (int): Number of device executions to store in a cache to speed up subsequent
executions. A value of ``0`` indicates that no caching will take place. Once filled,
older elements of the cache are removed and replaced with the most recent device
executions to keep the cache up to date.
Keyword Args:
h=1e-7 (float): Step size for the finite difference method.
order=1 (int): The order of the finite difference method to use. ``1`` corresponds
to forward finite differences, ``2`` to centered finite differences.
**Example**
>>> qml.enable_tape()
>>> dev = qml.device("default.qubit", wires=1)
>>> @qml.qnode(dev)
>>> def circuit(x):
>>> qml.RX(x, wires=0)
>>> return expval(qml.PauliZ(0)) | pennylane/tape/qnode.py | qnode | anthayes92/pennylane | 1 | python | def qnode(device, interface='autograd', diff_method='best', caching=0, **diff_options):
'Decorator for creating QNodes.\n\n This decorator is used to indicate to PennyLane that the decorated function contains a\n :ref:`quantum variational circuit <glossary_variational_circuit>` that should be bound to a\n compatible device.\n\n The QNode calls the quantum function to construct a :class:`~.JacobianTape` instance representing\n the quantum circuit.\n\n .. note::\n\n The quantum tape is an *experimental* feature. QNodes that use the quantum\n tape have access to advanced features, such as in-QNode classical processing,\n but do not yet have feature parity with the standard PennyLane QNode.\n\n This quantum tape-comaptible QNode can either be created directly,\n\n >>> import pennylane as qml\n >>> @qml.tape.qnode(dev)\n\n or enabled globally via :func:`~.enable_tape` without changing your PennyLane code:\n\n >>> qml.enable_tape()\n\n For more details, see :mod:`pennylane.tape`.\n\n Args:\n func (callable): a quantum function\n device (~.Device): a PennyLane-compatible device\n interface (str): The interface that will be used for classical backpropagation.\n This affects the types of objects that can be passed to/returned from the QNode:\n\n * ``interface=\'autograd\'``: Allows autograd to backpropogate\n through the QNode. The QNode accepts default Python types\n (floats, ints, lists) as well as NumPy array arguments,\n and returns NumPy arrays.\n\n * ``interface=\'torch\'``: Allows PyTorch to backpropogate\n through the QNode. The QNode accepts and returns Torch tensors.\n\n * ``interface=\'tf\'``: Allows TensorFlow in eager mode to backpropogate\n through the QNode. The QNode accepts and returns\n TensorFlow ``tf.Variable`` and ``tf.tensor`` objects.\n\n * ``None``: The QNode accepts default Python types\n (floats, ints, lists) as well as NumPy array arguments,\n and returns NumPy arrays. It does not connect to any\n machine learning library automatically for backpropagation.\n\n diff_method (str, None): the method of differentiation to use in the created QNode.\n\n * ``"best"``: Best available method. Uses classical backpropagation or the\n device directly to compute the gradient if supported, otherwise will use\n the analytic parameter-shift rule where possible with finite-difference as a fallback.\n\n * ``"backprop"``: Use classical backpropagation. Only allowed on simulator\n devices that are classically end-to-end differentiable, for example\n :class:`default.tensor.tf <~.DefaultTensorTF>`. Note that the returned\n QNode can only be used with the machine-learning framework supported\n by the device; a separate ``interface`` argument should not be passed.\n\n * ``"reversible"``: Uses a reversible method for computing the gradient.\n This method is similar to ``"backprop"``, but trades off increased\n runtime with significantly lower memory usage. Compared to the\n parameter-shift rule, the reversible method can be faster or slower,\n depending on the density and location of parametrized gates in a circuit.\n Only allowed on (simulator) devices with the "reversible" capability,\n for example :class:`default.qubit <~.DefaultQubit>`.\n\n * ``"device"``: Queries the device directly for the gradient.\n Only allowed on devices that provide their own gradient rules.\n\n * ``"parameter-shift"``: Use the analytic parameter-shift\n rule for all supported quantum operation arguments, with finite-difference\n as a fallback.\n\n * ``"finite-diff"``: Uses numerical finite-differences for all quantum\n operation arguments.\n\n caching (int): Number of device executions to store in a cache to speed up subsequent\n executions. A value of ``0`` indicates that no caching will take place. Once filled,\n older elements of the cache are removed and replaced with the most recent device\n executions to keep the cache up to date.\n\n Keyword Args:\n h=1e-7 (float): Step size for the finite difference method.\n order=1 (int): The order of the finite difference method to use. ``1`` corresponds\n to forward finite differences, ``2`` to centered finite differences.\n\n **Example**\n\n >>> qml.enable_tape()\n >>> dev = qml.device("default.qubit", wires=1)\n >>> @qml.qnode(dev)\n >>> def circuit(x):\n >>> qml.RX(x, wires=0)\n >>> return expval(qml.PauliZ(0))\n '
@lru_cache()
def qfunc_decorator(func):
'The actual decorator'
qn = QNode(func, device, interface=interface, diff_method=diff_method, caching=caching, **diff_options)
return update_wrapper(qn, func)
return qfunc_decorator | def qnode(device, interface='autograd', diff_method='best', caching=0, **diff_options):
'Decorator for creating QNodes.\n\n This decorator is used to indicate to PennyLane that the decorated function contains a\n :ref:`quantum variational circuit <glossary_variational_circuit>` that should be bound to a\n compatible device.\n\n The QNode calls the quantum function to construct a :class:`~.JacobianTape` instance representing\n the quantum circuit.\n\n .. note::\n\n The quantum tape is an *experimental* feature. QNodes that use the quantum\n tape have access to advanced features, such as in-QNode classical processing,\n but do not yet have feature parity with the standard PennyLane QNode.\n\n This quantum tape-comaptible QNode can either be created directly,\n\n >>> import pennylane as qml\n >>> @qml.tape.qnode(dev)\n\n or enabled globally via :func:`~.enable_tape` without changing your PennyLane code:\n\n >>> qml.enable_tape()\n\n For more details, see :mod:`pennylane.tape`.\n\n Args:\n func (callable): a quantum function\n device (~.Device): a PennyLane-compatible device\n interface (str): The interface that will be used for classical backpropagation.\n This affects the types of objects that can be passed to/returned from the QNode:\n\n * ``interface=\'autograd\'``: Allows autograd to backpropogate\n through the QNode. The QNode accepts default Python types\n (floats, ints, lists) as well as NumPy array arguments,\n and returns NumPy arrays.\n\n * ``interface=\'torch\'``: Allows PyTorch to backpropogate\n through the QNode. The QNode accepts and returns Torch tensors.\n\n * ``interface=\'tf\'``: Allows TensorFlow in eager mode to backpropogate\n through the QNode. The QNode accepts and returns\n TensorFlow ``tf.Variable`` and ``tf.tensor`` objects.\n\n * ``None``: The QNode accepts default Python types\n (floats, ints, lists) as well as NumPy array arguments,\n and returns NumPy arrays. It does not connect to any\n machine learning library automatically for backpropagation.\n\n diff_method (str, None): the method of differentiation to use in the created QNode.\n\n * ``"best"``: Best available method. Uses classical backpropagation or the\n device directly to compute the gradient if supported, otherwise will use\n the analytic parameter-shift rule where possible with finite-difference as a fallback.\n\n * ``"backprop"``: Use classical backpropagation. Only allowed on simulator\n devices that are classically end-to-end differentiable, for example\n :class:`default.tensor.tf <~.DefaultTensorTF>`. Note that the returned\n QNode can only be used with the machine-learning framework supported\n by the device; a separate ``interface`` argument should not be passed.\n\n * ``"reversible"``: Uses a reversible method for computing the gradient.\n This method is similar to ``"backprop"``, but trades off increased\n runtime with significantly lower memory usage. Compared to the\n parameter-shift rule, the reversible method can be faster or slower,\n depending on the density and location of parametrized gates in a circuit.\n Only allowed on (simulator) devices with the "reversible" capability,\n for example :class:`default.qubit <~.DefaultQubit>`.\n\n * ``"device"``: Queries the device directly for the gradient.\n Only allowed on devices that provide their own gradient rules.\n\n * ``"parameter-shift"``: Use the analytic parameter-shift\n rule for all supported quantum operation arguments, with finite-difference\n as a fallback.\n\n * ``"finite-diff"``: Uses numerical finite-differences for all quantum\n operation arguments.\n\n caching (int): Number of device executions to store in a cache to speed up subsequent\n executions. A value of ``0`` indicates that no caching will take place. Once filled,\n older elements of the cache are removed and replaced with the most recent device\n executions to keep the cache up to date.\n\n Keyword Args:\n h=1e-7 (float): Step size for the finite difference method.\n order=1 (int): The order of the finite difference method to use. ``1`` corresponds\n to forward finite differences, ``2`` to centered finite differences.\n\n **Example**\n\n >>> qml.enable_tape()\n >>> dev = qml.device("default.qubit", wires=1)\n >>> @qml.qnode(dev)\n >>> def circuit(x):\n >>> qml.RX(x, wires=0)\n >>> return expval(qml.PauliZ(0))\n '
@lru_cache()
def qfunc_decorator(func):
'The actual decorator'
qn = QNode(func, device, interface=interface, diff_method=diff_method, caching=caching, **diff_options)
return update_wrapper(qn, func)
return qfunc_decorator<|docstring|>Decorator for creating QNodes.
This decorator is used to indicate to PennyLane that the decorated function contains a
:ref:`quantum variational circuit <glossary_variational_circuit>` that should be bound to a
compatible device.
The QNode calls the quantum function to construct a :class:`~.JacobianTape` instance representing
the quantum circuit.
.. note::
The quantum tape is an *experimental* feature. QNodes that use the quantum
tape have access to advanced features, such as in-QNode classical processing,
but do not yet have feature parity with the standard PennyLane QNode.
This quantum tape-comaptible QNode can either be created directly,
>>> import pennylane as qml
>>> @qml.tape.qnode(dev)
or enabled globally via :func:`~.enable_tape` without changing your PennyLane code:
>>> qml.enable_tape()
For more details, see :mod:`pennylane.tape`.
Args:
func (callable): a quantum function
device (~.Device): a PennyLane-compatible device
interface (str): The interface that will be used for classical backpropagation.
This affects the types of objects that can be passed to/returned from the QNode:
* ``interface='autograd'``: Allows autograd to backpropogate
through the QNode. The QNode accepts default Python types
(floats, ints, lists) as well as NumPy array arguments,
and returns NumPy arrays.
* ``interface='torch'``: Allows PyTorch to backpropogate
through the QNode. The QNode accepts and returns Torch tensors.
* ``interface='tf'``: Allows TensorFlow in eager mode to backpropogate
through the QNode. The QNode accepts and returns
TensorFlow ``tf.Variable`` and ``tf.tensor`` objects.
* ``None``: The QNode accepts default Python types
(floats, ints, lists) as well as NumPy array arguments,
and returns NumPy arrays. It does not connect to any
machine learning library automatically for backpropagation.
diff_method (str, None): the method of differentiation to use in the created QNode.
* ``"best"``: Best available method. Uses classical backpropagation or the
device directly to compute the gradient if supported, otherwise will use
the analytic parameter-shift rule where possible with finite-difference as a fallback.
* ``"backprop"``: Use classical backpropagation. Only allowed on simulator
devices that are classically end-to-end differentiable, for example
:class:`default.tensor.tf <~.DefaultTensorTF>`. Note that the returned
QNode can only be used with the machine-learning framework supported
by the device; a separate ``interface`` argument should not be passed.
* ``"reversible"``: Uses a reversible method for computing the gradient.
This method is similar to ``"backprop"``, but trades off increased
runtime with significantly lower memory usage. Compared to the
parameter-shift rule, the reversible method can be faster or slower,
depending on the density and location of parametrized gates in a circuit.
Only allowed on (simulator) devices with the "reversible" capability,
for example :class:`default.qubit <~.DefaultQubit>`.
* ``"device"``: Queries the device directly for the gradient.
Only allowed on devices that provide their own gradient rules.
* ``"parameter-shift"``: Use the analytic parameter-shift
rule for all supported quantum operation arguments, with finite-difference
as a fallback.
* ``"finite-diff"``: Uses numerical finite-differences for all quantum
operation arguments.
caching (int): Number of device executions to store in a cache to speed up subsequent
executions. A value of ``0`` indicates that no caching will take place. Once filled,
older elements of the cache are removed and replaced with the most recent device
executions to keep the cache up to date.
Keyword Args:
h=1e-7 (float): Step size for the finite difference method.
order=1 (int): The order of the finite difference method to use. ``1`` corresponds
to forward finite differences, ``2`` to centered finite differences.
**Example**
>>> qml.enable_tape()
>>> dev = qml.device("default.qubit", wires=1)
>>> @qml.qnode(dev)
>>> def circuit(x):
>>> qml.RX(x, wires=0)
>>> return expval(qml.PauliZ(0))<|endoftext|> |
3626d7e2344e3e31600e9a350aa43f2cae099a9134310b59d1c73305e6606f67 | @staticmethod
def get_tape(device, interface, diff_method='best'):
'Determine the best JacobianTape, differentiation method, and interface\n for a requested device, interface, and diff method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n diff_method (str): The requested method of differentiation. One of\n ``"best"``, ``"backprop"``, ``"reversible"``, ``"device"``,\n ``"parameter-shift"``, or ``"finite-diff"``.\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n '
if (diff_method == 'best'):
return QNode.get_best_method(device, interface)
if (diff_method == 'backprop'):
return QNode._validate_backprop_method(device, interface)
if (diff_method == 'reversible'):
return QNode._validate_reversible_method(device, interface)
if (diff_method == 'device'):
return QNode._validate_device_method(device, interface)
if (diff_method == 'parameter-shift'):
return (QNode._get_parameter_shift_tape(device), interface, 'analytic')
if (diff_method == 'finite-diff'):
return (JacobianTape, interface, 'numeric')
raise qml.QuantumFunctionError(f"Differentiation method {diff_method} not recognized. Allowed options are ('best', 'parameter-shift', 'backprop', 'finite-diff', 'device', 'reversible').") | Determine the best JacobianTape, differentiation method, and interface
for a requested device, interface, and diff method.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
diff_method (str): The requested method of differentiation. One of
``"best"``, ``"backprop"``, ``"reversible"``, ``"device"``,
``"parameter-shift"``, or ``"finite-diff"``.
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method | pennylane/tape/qnode.py | get_tape | anthayes92/pennylane | 1 | python | @staticmethod
def get_tape(device, interface, diff_method='best'):
'Determine the best JacobianTape, differentiation method, and interface\n for a requested device, interface, and diff method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n diff_method (str): The requested method of differentiation. One of\n ``"best"``, ``"backprop"``, ``"reversible"``, ``"device"``,\n ``"parameter-shift"``, or ``"finite-diff"``.\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n '
if (diff_method == 'best'):
return QNode.get_best_method(device, interface)
if (diff_method == 'backprop'):
return QNode._validate_backprop_method(device, interface)
if (diff_method == 'reversible'):
return QNode._validate_reversible_method(device, interface)
if (diff_method == 'device'):
return QNode._validate_device_method(device, interface)
if (diff_method == 'parameter-shift'):
return (QNode._get_parameter_shift_tape(device), interface, 'analytic')
if (diff_method == 'finite-diff'):
return (JacobianTape, interface, 'numeric')
raise qml.QuantumFunctionError(f"Differentiation method {diff_method} not recognized. Allowed options are ('best', 'parameter-shift', 'backprop', 'finite-diff', 'device', 'reversible').") | @staticmethod
def get_tape(device, interface, diff_method='best'):
'Determine the best JacobianTape, differentiation method, and interface\n for a requested device, interface, and diff method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n diff_method (str): The requested method of differentiation. One of\n ``"best"``, ``"backprop"``, ``"reversible"``, ``"device"``,\n ``"parameter-shift"``, or ``"finite-diff"``.\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n '
if (diff_method == 'best'):
return QNode.get_best_method(device, interface)
if (diff_method == 'backprop'):
return QNode._validate_backprop_method(device, interface)
if (diff_method == 'reversible'):
return QNode._validate_reversible_method(device, interface)
if (diff_method == 'device'):
return QNode._validate_device_method(device, interface)
if (diff_method == 'parameter-shift'):
return (QNode._get_parameter_shift_tape(device), interface, 'analytic')
if (diff_method == 'finite-diff'):
return (JacobianTape, interface, 'numeric')
raise qml.QuantumFunctionError(f"Differentiation method {diff_method} not recognized. Allowed options are ('best', 'parameter-shift', 'backprop', 'finite-diff', 'device', 'reversible').")<|docstring|>Determine the best JacobianTape, differentiation method, and interface
for a requested device, interface, and diff method.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
diff_method (str): The requested method of differentiation. One of
``"best"``, ``"backprop"``, ``"reversible"``, ``"device"``,
``"parameter-shift"``, or ``"finite-diff"``.
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method<|endoftext|> |
d1aea3a60a85edc40a29270ffbd414e65cb31593a762dc28ae75b927b40f6da4 | @staticmethod
def get_best_method(device, interface):
'Returns the \'best\' JacobianTape and differentiation method\n for a particular device and interface combination.\n\n This method attempts to determine support for differentiation\n methods using the following order:\n\n * ``"backprop"``\n * ``"device"``\n * ``"parameter-shift"``\n * ``"finite-diff"``\n\n The first differentiation method that is supported (going from\n top to bottom) will be returned.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n '
try:
return QNode._validate_backprop_method(device, interface)
except qml.QuantumFunctionError:
try:
return QNode._validate_device_method(device, interface)
except qml.QuantumFunctionError:
try:
return (QNode._get_parameter_shift_tape(device), interface, 'best')
except qml.QuantumFunctionError:
return (JacobianTape, interface, 'numeric') | Returns the 'best' JacobianTape and differentiation method
for a particular device and interface combination.
This method attempts to determine support for differentiation
methods using the following order:
* ``"backprop"``
* ``"device"``
* ``"parameter-shift"``
* ``"finite-diff"``
The first differentiation method that is supported (going from
top to bottom) will be returned.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method | pennylane/tape/qnode.py | get_best_method | anthayes92/pennylane | 1 | python | @staticmethod
def get_best_method(device, interface):
'Returns the \'best\' JacobianTape and differentiation method\n for a particular device and interface combination.\n\n This method attempts to determine support for differentiation\n methods using the following order:\n\n * ``"backprop"``\n * ``"device"``\n * ``"parameter-shift"``\n * ``"finite-diff"``\n\n The first differentiation method that is supported (going from\n top to bottom) will be returned.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n '
try:
return QNode._validate_backprop_method(device, interface)
except qml.QuantumFunctionError:
try:
return QNode._validate_device_method(device, interface)
except qml.QuantumFunctionError:
try:
return (QNode._get_parameter_shift_tape(device), interface, 'best')
except qml.QuantumFunctionError:
return (JacobianTape, interface, 'numeric') | @staticmethod
def get_best_method(device, interface):
'Returns the \'best\' JacobianTape and differentiation method\n for a particular device and interface combination.\n\n This method attempts to determine support for differentiation\n methods using the following order:\n\n * ``"backprop"``\n * ``"device"``\n * ``"parameter-shift"``\n * ``"finite-diff"``\n\n The first differentiation method that is supported (going from\n top to bottom) will be returned.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n '
try:
return QNode._validate_backprop_method(device, interface)
except qml.QuantumFunctionError:
try:
return QNode._validate_device_method(device, interface)
except qml.QuantumFunctionError:
try:
return (QNode._get_parameter_shift_tape(device), interface, 'best')
except qml.QuantumFunctionError:
return (JacobianTape, interface, 'numeric')<|docstring|>Returns the 'best' JacobianTape and differentiation method
for a particular device and interface combination.
This method attempts to determine support for differentiation
methods using the following order:
* ``"backprop"``
* ``"device"``
* ``"parameter-shift"``
* ``"finite-diff"``
The first differentiation method that is supported (going from
top to bottom) will be returned.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method<|endoftext|> |
bdc351f5a85c48382ca9965058ce9d5c3a2664a4adf983c4afe5ec0857745240 | @staticmethod
def _validate_backprop_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"backprop"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not support backpropagation, or the\n interface provided is not compatible with the device\n '
backprop_interface = device.capabilities().get('passthru_interface', None)
if (backprop_interface is not None):
if (interface == backprop_interface):
return (JacobianTape, None, 'backprop')
raise qml.QuantumFunctionError(f"Device {device.short_name} only supports diff_method='backprop' when using the {backprop_interface} interface.")
raise qml.QuantumFunctionError(f'The {device.short_name} device does not support native computations with autodifferentiation frameworks.') | Validates whether a particular device and JacobianTape interface
supports the ``"backprop"`` differentiation method.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method
Raises:
qml.QuantumFunctionError: if the device does not support backpropagation, or the
interface provided is not compatible with the device | pennylane/tape/qnode.py | _validate_backprop_method | anthayes92/pennylane | 1 | python | @staticmethod
def _validate_backprop_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"backprop"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not support backpropagation, or the\n interface provided is not compatible with the device\n '
backprop_interface = device.capabilities().get('passthru_interface', None)
if (backprop_interface is not None):
if (interface == backprop_interface):
return (JacobianTape, None, 'backprop')
raise qml.QuantumFunctionError(f"Device {device.short_name} only supports diff_method='backprop' when using the {backprop_interface} interface.")
raise qml.QuantumFunctionError(f'The {device.short_name} device does not support native computations with autodifferentiation frameworks.') | @staticmethod
def _validate_backprop_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"backprop"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not support backpropagation, or the\n interface provided is not compatible with the device\n '
backprop_interface = device.capabilities().get('passthru_interface', None)
if (backprop_interface is not None):
if (interface == backprop_interface):
return (JacobianTape, None, 'backprop')
raise qml.QuantumFunctionError(f"Device {device.short_name} only supports diff_method='backprop' when using the {backprop_interface} interface.")
raise qml.QuantumFunctionError(f'The {device.short_name} device does not support native computations with autodifferentiation frameworks.')<|docstring|>Validates whether a particular device and JacobianTape interface
supports the ``"backprop"`` differentiation method.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method
Raises:
qml.QuantumFunctionError: if the device does not support backpropagation, or the
interface provided is not compatible with the device<|endoftext|> |
3335a3ec20efe14a880377456be443c9a21f5725c05cb798ab3d482175c59b0d | @staticmethod
def _validate_reversible_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"reversible"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not support reversible backprop\n '
supports_reverse = device.capabilities().get('supports_reversible_diff', False)
supports_reverse = (supports_reverse or device.capabilities().get('reversible_diff', False))
if (not supports_reverse):
raise ValueError(f'The {device.short_name} device does not support reversible differentiation.')
return (ReversibleTape, interface, 'analytic') | Validates whether a particular device and JacobianTape interface
supports the ``"reversible"`` differentiation method.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method
Raises:
qml.QuantumFunctionError: if the device does not support reversible backprop | pennylane/tape/qnode.py | _validate_reversible_method | anthayes92/pennylane | 1 | python | @staticmethod
def _validate_reversible_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"reversible"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not support reversible backprop\n '
supports_reverse = device.capabilities().get('supports_reversible_diff', False)
supports_reverse = (supports_reverse or device.capabilities().get('reversible_diff', False))
if (not supports_reverse):
raise ValueError(f'The {device.short_name} device does not support reversible differentiation.')
return (ReversibleTape, interface, 'analytic') | @staticmethod
def _validate_reversible_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"reversible"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not support reversible backprop\n '
supports_reverse = device.capabilities().get('supports_reversible_diff', False)
supports_reverse = (supports_reverse or device.capabilities().get('reversible_diff', False))
if (not supports_reverse):
raise ValueError(f'The {device.short_name} device does not support reversible differentiation.')
return (ReversibleTape, interface, 'analytic')<|docstring|>Validates whether a particular device and JacobianTape interface
supports the ``"reversible"`` differentiation method.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method
Raises:
qml.QuantumFunctionError: if the device does not support reversible backprop<|endoftext|> |
648458fe4826ecf2304a97c2624031ece0cdcd634159f08e31b75ee6131a1d7d | @staticmethod
def _validate_device_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"device"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not provide a native method for computing\n the Jacobian\n '
provides_jacobian = device.capabilities().get('provides_jacobian', False)
if (not provides_jacobian):
raise qml.QuantumFunctionError(f'The {device.short_name} device does not provide a native method for computing the jacobian.')
return (JacobianTape, interface, 'device') | Validates whether a particular device and JacobianTape interface
supports the ``"device"`` differentiation method.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method
Raises:
qml.QuantumFunctionError: if the device does not provide a native method for computing
the Jacobian | pennylane/tape/qnode.py | _validate_device_method | anthayes92/pennylane | 1 | python | @staticmethod
def _validate_device_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"device"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not provide a native method for computing\n the Jacobian\n '
provides_jacobian = device.capabilities().get('provides_jacobian', False)
if (not provides_jacobian):
raise qml.QuantumFunctionError(f'The {device.short_name} device does not provide a native method for computing the jacobian.')
return (JacobianTape, interface, 'device') | @staticmethod
def _validate_device_method(device, interface):
'Validates whether a particular device and JacobianTape interface\n supports the ``"device"`` differentiation method.\n\n Args:\n device (.Device): PennyLane device\n interface (str): name of the requested interface\n\n Returns:\n tuple[.JacobianTape, str, str]: tuple containing the compatible\n JacobianTape, the interface to apply, and the method argument\n to pass to the ``JacobianTape.jacobian`` method\n\n Raises:\n qml.QuantumFunctionError: if the device does not provide a native method for computing\n the Jacobian\n '
provides_jacobian = device.capabilities().get('provides_jacobian', False)
if (not provides_jacobian):
raise qml.QuantumFunctionError(f'The {device.short_name} device does not provide a native method for computing the jacobian.')
return (JacobianTape, interface, 'device')<|docstring|>Validates whether a particular device and JacobianTape interface
supports the ``"device"`` differentiation method.
Args:
device (.Device): PennyLane device
interface (str): name of the requested interface
Returns:
tuple[.JacobianTape, str, str]: tuple containing the compatible
JacobianTape, the interface to apply, and the method argument
to pass to the ``JacobianTape.jacobian`` method
Raises:
qml.QuantumFunctionError: if the device does not provide a native method for computing
the Jacobian<|endoftext|> |
7d3fb97b879fd46ce8703538a2f17413e57191d7064e76c9eadfc344d17e2a1d | @staticmethod
def _get_parameter_shift_tape(device):
'Validates whether a particular device\n supports the parameter-shift differentiation method, and returns\n the correct tape.\n\n Args:\n device (.Device): PennyLane device\n\n Returns:\n .JacobianTape: the compatible JacobianTape\n\n Raises:\n qml.QuantumFunctionError: if the device model does not have a corresponding\n parameter-shift rule\n '
model = device.capabilities().get('model', None)
if (model == 'qubit'):
return QubitParamShiftTape
if (model == 'cv'):
return CVParamShiftTape
raise qml.QuantumFunctionError(f"Device {device.short_name} uses an unknown model ('{model}') that does not support the parameter-shift rule.") | Validates whether a particular device
supports the parameter-shift differentiation method, and returns
the correct tape.
Args:
device (.Device): PennyLane device
Returns:
.JacobianTape: the compatible JacobianTape
Raises:
qml.QuantumFunctionError: if the device model does not have a corresponding
parameter-shift rule | pennylane/tape/qnode.py | _get_parameter_shift_tape | anthayes92/pennylane | 1 | python | @staticmethod
def _get_parameter_shift_tape(device):
'Validates whether a particular device\n supports the parameter-shift differentiation method, and returns\n the correct tape.\n\n Args:\n device (.Device): PennyLane device\n\n Returns:\n .JacobianTape: the compatible JacobianTape\n\n Raises:\n qml.QuantumFunctionError: if the device model does not have a corresponding\n parameter-shift rule\n '
model = device.capabilities().get('model', None)
if (model == 'qubit'):
return QubitParamShiftTape
if (model == 'cv'):
return CVParamShiftTape
raise qml.QuantumFunctionError(f"Device {device.short_name} uses an unknown model ('{model}') that does not support the parameter-shift rule.") | @staticmethod
def _get_parameter_shift_tape(device):
'Validates whether a particular device\n supports the parameter-shift differentiation method, and returns\n the correct tape.\n\n Args:\n device (.Device): PennyLane device\n\n Returns:\n .JacobianTape: the compatible JacobianTape\n\n Raises:\n qml.QuantumFunctionError: if the device model does not have a corresponding\n parameter-shift rule\n '
model = device.capabilities().get('model', None)
if (model == 'qubit'):
return QubitParamShiftTape
if (model == 'cv'):
return CVParamShiftTape
raise qml.QuantumFunctionError(f"Device {device.short_name} uses an unknown model ('{model}') that does not support the parameter-shift rule.")<|docstring|>Validates whether a particular device
supports the parameter-shift differentiation method, and returns
the correct tape.
Args:
device (.Device): PennyLane device
Returns:
.JacobianTape: the compatible JacobianTape
Raises:
qml.QuantumFunctionError: if the device model does not have a corresponding
parameter-shift rule<|endoftext|> |
8e3b2b63bc2e23f84e0fd252d0393bd4e5b9f2b8951457819b143d3cbd610a50 | def construct(self, args, kwargs):
'Call the quantum function with a tape context, ensuring the operations get queued.'
self.qtape = self._tape(caching=self._caching)
with self.qtape:
measurement_processes = self.func(*args, **kwargs)
if (not isinstance(measurement_processes, Sequence)):
measurement_processes = (measurement_processes,)
if (not all((isinstance(m, qml.tape.MeasurementProcess) for m in measurement_processes))):
raise qml.QuantumFunctionError('A quantum function must return either a single measurement, or a nonempty sequence of measurements.')
state_returns = any([(m.return_type is State) for m in measurement_processes])
if (self.interface is not None):
if (state_returns and (self.interface in ['torch', 'tf'])):
self.INTERFACE_MAP[self.interface](self, dtype=np.complex128)
else:
self.INTERFACE_MAP[self.interface](self)
if (not all(((ret == m) for (ret, m) in zip(measurement_processes, self.qtape.measurements)))):
raise qml.QuantumFunctionError('All measurements must be returned in the order they are measured.')
self.qtape.jacobian_options = self.diff_options
stop_at = self.device.operations
if isinstance(self.qtape, QubitParamShiftTape):
stop_at = (set(self.device.operations) - {'CRX', 'CRZ', 'CRY', 'CRot'})
if (not {op.name for op in self.qtape.operations}.issubset(stop_at)):
self.qtape = self.qtape.expand(depth=self.max_expansion, stop_at=(lambda obj: (obj.name in stop_at))) | Call the quantum function with a tape context, ensuring the operations get queued. | pennylane/tape/qnode.py | construct | anthayes92/pennylane | 1 | python | def construct(self, args, kwargs):
self.qtape = self._tape(caching=self._caching)
with self.qtape:
measurement_processes = self.func(*args, **kwargs)
if (not isinstance(measurement_processes, Sequence)):
measurement_processes = (measurement_processes,)
if (not all((isinstance(m, qml.tape.MeasurementProcess) for m in measurement_processes))):
raise qml.QuantumFunctionError('A quantum function must return either a single measurement, or a nonempty sequence of measurements.')
state_returns = any([(m.return_type is State) for m in measurement_processes])
if (self.interface is not None):
if (state_returns and (self.interface in ['torch', 'tf'])):
self.INTERFACE_MAP[self.interface](self, dtype=np.complex128)
else:
self.INTERFACE_MAP[self.interface](self)
if (not all(((ret == m) for (ret, m) in zip(measurement_processes, self.qtape.measurements)))):
raise qml.QuantumFunctionError('All measurements must be returned in the order they are measured.')
self.qtape.jacobian_options = self.diff_options
stop_at = self.device.operations
if isinstance(self.qtape, QubitParamShiftTape):
stop_at = (set(self.device.operations) - {'CRX', 'CRZ', 'CRY', 'CRot'})
if (not {op.name for op in self.qtape.operations}.issubset(stop_at)):
self.qtape = self.qtape.expand(depth=self.max_expansion, stop_at=(lambda obj: (obj.name in stop_at))) | def construct(self, args, kwargs):
self.qtape = self._tape(caching=self._caching)
with self.qtape:
measurement_processes = self.func(*args, **kwargs)
if (not isinstance(measurement_processes, Sequence)):
measurement_processes = (measurement_processes,)
if (not all((isinstance(m, qml.tape.MeasurementProcess) for m in measurement_processes))):
raise qml.QuantumFunctionError('A quantum function must return either a single measurement, or a nonempty sequence of measurements.')
state_returns = any([(m.return_type is State) for m in measurement_processes])
if (self.interface is not None):
if (state_returns and (self.interface in ['torch', 'tf'])):
self.INTERFACE_MAP[self.interface](self, dtype=np.complex128)
else:
self.INTERFACE_MAP[self.interface](self)
if (not all(((ret == m) for (ret, m) in zip(measurement_processes, self.qtape.measurements)))):
raise qml.QuantumFunctionError('All measurements must be returned in the order they are measured.')
self.qtape.jacobian_options = self.diff_options
stop_at = self.device.operations
if isinstance(self.qtape, QubitParamShiftTape):
stop_at = (set(self.device.operations) - {'CRX', 'CRZ', 'CRY', 'CRot'})
if (not {op.name for op in self.qtape.operations}.issubset(stop_at)):
self.qtape = self.qtape.expand(depth=self.max_expansion, stop_at=(lambda obj: (obj.name in stop_at)))<|docstring|>Call the quantum function with a tape context, ensuring the operations get queued.<|endoftext|> |
3aeded623094a2c661cc0e2f2ab3407452a32866986b46bdd2ae90c2f01c9867 | def draw(self, charset='unicode'):
'Draw the quantum tape as a circuit diagram.\n\n Consider the following circuit as an example:\n\n .. code-block:: python3\n\n @qml.qnode(dev)\n def circuit(a, w):\n qml.Hadamard(0)\n qml.CRX(a, wires=[0, 1])\n qml.Rot(*w, wires=[1])\n qml.CRX(-a, wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n We can draw the QNode after execution:\n\n >>> result = circuit(2.3, [1.2, 3.2, 0.7])\n >>> print(circuit.draw())\n 0: ──H──╭C────────────────────────────╭C─────────╭┤ ⟨Z ⊗ Z⟩\n 1: ─────╰RX(2.3)──Rot(1.2, 3.2, 0.7)──╰RX(-2.3)──╰┤ ⟨Z ⊗ Z⟩\n >>> print(circuit.draw(charset="ascii"))\n 0: --H--+C----------------------------+C---------+| <Z @ Z>\n 1: -----+RX(2.3)--Rot(1.2, 3.2, 0.7)--+RX(-2.3)--+| <Z @ Z>\n\n Args:\n charset (str, optional): The charset that should be used. Currently, "unicode" and\n "ascii" are supported.\n\n Raises:\n ValueError: if the given charset is not supported\n .QuantumFunctionError: drawing is impossible because the underlying\n quantum tape has not yet been constructed\n\n Returns:\n str: the circuit representation of the tape\n\n '
if (self.qtape is None):
raise qml.QuantumFunctionError('The QNode can only be drawn after its quantum tape has been constructed.')
return self.qtape.draw(charset=charset) | Draw the quantum tape as a circuit diagram.
Consider the following circuit as an example:
.. code-block:: python3
@qml.qnode(dev)
def circuit(a, w):
qml.Hadamard(0)
qml.CRX(a, wires=[0, 1])
qml.Rot(*w, wires=[1])
qml.CRX(-a, wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
We can draw the QNode after execution:
>>> result = circuit(2.3, [1.2, 3.2, 0.7])
>>> print(circuit.draw())
0: ──H──╭C────────────────────────────╭C─────────╭┤ ⟨Z ⊗ Z⟩
1: ─────╰RX(2.3)──Rot(1.2, 3.2, 0.7)──╰RX(-2.3)──╰┤ ⟨Z ⊗ Z⟩
>>> print(circuit.draw(charset="ascii"))
0: --H--+C----------------------------+C---------+| <Z @ Z>
1: -----+RX(2.3)--Rot(1.2, 3.2, 0.7)--+RX(-2.3)--+| <Z @ Z>
Args:
charset (str, optional): The charset that should be used. Currently, "unicode" and
"ascii" are supported.
Raises:
ValueError: if the given charset is not supported
.QuantumFunctionError: drawing is impossible because the underlying
quantum tape has not yet been constructed
Returns:
str: the circuit representation of the tape | pennylane/tape/qnode.py | draw | anthayes92/pennylane | 1 | python | def draw(self, charset='unicode'):
'Draw the quantum tape as a circuit diagram.\n\n Consider the following circuit as an example:\n\n .. code-block:: python3\n\n @qml.qnode(dev)\n def circuit(a, w):\n qml.Hadamard(0)\n qml.CRX(a, wires=[0, 1])\n qml.Rot(*w, wires=[1])\n qml.CRX(-a, wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n We can draw the QNode after execution:\n\n >>> result = circuit(2.3, [1.2, 3.2, 0.7])\n >>> print(circuit.draw())\n 0: ──H──╭C────────────────────────────╭C─────────╭┤ ⟨Z ⊗ Z⟩\n 1: ─────╰RX(2.3)──Rot(1.2, 3.2, 0.7)──╰RX(-2.3)──╰┤ ⟨Z ⊗ Z⟩\n >>> print(circuit.draw(charset="ascii"))\n 0: --H--+C----------------------------+C---------+| <Z @ Z>\n 1: -----+RX(2.3)--Rot(1.2, 3.2, 0.7)--+RX(-2.3)--+| <Z @ Z>\n\n Args:\n charset (str, optional): The charset that should be used. Currently, "unicode" and\n "ascii" are supported.\n\n Raises:\n ValueError: if the given charset is not supported\n .QuantumFunctionError: drawing is impossible because the underlying\n quantum tape has not yet been constructed\n\n Returns:\n str: the circuit representation of the tape\n\n '
if (self.qtape is None):
raise qml.QuantumFunctionError('The QNode can only be drawn after its quantum tape has been constructed.')
return self.qtape.draw(charset=charset) | def draw(self, charset='unicode'):
'Draw the quantum tape as a circuit diagram.\n\n Consider the following circuit as an example:\n\n .. code-block:: python3\n\n @qml.qnode(dev)\n def circuit(a, w):\n qml.Hadamard(0)\n qml.CRX(a, wires=[0, 1])\n qml.Rot(*w, wires=[1])\n qml.CRX(-a, wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n We can draw the QNode after execution:\n\n >>> result = circuit(2.3, [1.2, 3.2, 0.7])\n >>> print(circuit.draw())\n 0: ──H──╭C────────────────────────────╭C─────────╭┤ ⟨Z ⊗ Z⟩\n 1: ─────╰RX(2.3)──Rot(1.2, 3.2, 0.7)──╰RX(-2.3)──╰┤ ⟨Z ⊗ Z⟩\n >>> print(circuit.draw(charset="ascii"))\n 0: --H--+C----------------------------+C---------+| <Z @ Z>\n 1: -----+RX(2.3)--Rot(1.2, 3.2, 0.7)--+RX(-2.3)--+| <Z @ Z>\n\n Args:\n charset (str, optional): The charset that should be used. Currently, "unicode" and\n "ascii" are supported.\n\n Raises:\n ValueError: if the given charset is not supported\n .QuantumFunctionError: drawing is impossible because the underlying\n quantum tape has not yet been constructed\n\n Returns:\n str: the circuit representation of the tape\n\n '
if (self.qtape is None):
raise qml.QuantumFunctionError('The QNode can only be drawn after its quantum tape has been constructed.')
return self.qtape.draw(charset=charset)<|docstring|>Draw the quantum tape as a circuit diagram.
Consider the following circuit as an example:
.. code-block:: python3
@qml.qnode(dev)
def circuit(a, w):
qml.Hadamard(0)
qml.CRX(a, wires=[0, 1])
qml.Rot(*w, wires=[1])
qml.CRX(-a, wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
We can draw the QNode after execution:
>>> result = circuit(2.3, [1.2, 3.2, 0.7])
>>> print(circuit.draw())
0: ──H──╭C────────────────────────────╭C─────────╭┤ ⟨Z ⊗ Z⟩
1: ─────╰RX(2.3)──Rot(1.2, 3.2, 0.7)──╰RX(-2.3)──╰┤ ⟨Z ⊗ Z⟩
>>> print(circuit.draw(charset="ascii"))
0: --H--+C----------------------------+C---------+| <Z @ Z>
1: -----+RX(2.3)--Rot(1.2, 3.2, 0.7)--+RX(-2.3)--+| <Z @ Z>
Args:
charset (str, optional): The charset that should be used. Currently, "unicode" and
"ascii" are supported.
Raises:
ValueError: if the given charset is not supported
.QuantumFunctionError: drawing is impossible because the underlying
quantum tape has not yet been constructed
Returns:
str: the circuit representation of the tape<|endoftext|> |
8456a682947ff6cc72f5dcb94e1a87282d16702875762b601d19a744de056fad | def to_tf(self, dtype=None):
'Apply the TensorFlow interface to the internal quantum tape.\n\n Args:\n dtype (tf.dtype): The dtype that the TensorFlow QNode should\n output. If not provided, the default is ``tf.float64``.\n\n Raises:\n .QuantumFunctionError: if TensorFlow >= 2.1 is not installed\n '
try:
import tensorflow as tf
from pennylane.tape.interfaces.tf import TFInterface
self.interface = 'tf'
if (not isinstance(self.dtype, tf.DType)):
self.dtype = None
self.dtype = (dtype or self.dtype or TFInterface.dtype)
if (self.qtape is not None):
TFInterface.apply(self.qtape, dtype=tf.as_dtype(self.dtype))
except ImportError as e:
raise qml.QuantumFunctionError("TensorFlow not found. Please install the latest version of TensorFlow to enable the 'tf' interface.") from e | Apply the TensorFlow interface to the internal quantum tape.
Args:
dtype (tf.dtype): The dtype that the TensorFlow QNode should
output. If not provided, the default is ``tf.float64``.
Raises:
.QuantumFunctionError: if TensorFlow >= 2.1 is not installed | pennylane/tape/qnode.py | to_tf | anthayes92/pennylane | 1 | python | def to_tf(self, dtype=None):
'Apply the TensorFlow interface to the internal quantum tape.\n\n Args:\n dtype (tf.dtype): The dtype that the TensorFlow QNode should\n output. If not provided, the default is ``tf.float64``.\n\n Raises:\n .QuantumFunctionError: if TensorFlow >= 2.1 is not installed\n '
try:
import tensorflow as tf
from pennylane.tape.interfaces.tf import TFInterface
self.interface = 'tf'
if (not isinstance(self.dtype, tf.DType)):
self.dtype = None
self.dtype = (dtype or self.dtype or TFInterface.dtype)
if (self.qtape is not None):
TFInterface.apply(self.qtape, dtype=tf.as_dtype(self.dtype))
except ImportError as e:
raise qml.QuantumFunctionError("TensorFlow not found. Please install the latest version of TensorFlow to enable the 'tf' interface.") from e | def to_tf(self, dtype=None):
'Apply the TensorFlow interface to the internal quantum tape.\n\n Args:\n dtype (tf.dtype): The dtype that the TensorFlow QNode should\n output. If not provided, the default is ``tf.float64``.\n\n Raises:\n .QuantumFunctionError: if TensorFlow >= 2.1 is not installed\n '
try:
import tensorflow as tf
from pennylane.tape.interfaces.tf import TFInterface
self.interface = 'tf'
if (not isinstance(self.dtype, tf.DType)):
self.dtype = None
self.dtype = (dtype or self.dtype or TFInterface.dtype)
if (self.qtape is not None):
TFInterface.apply(self.qtape, dtype=tf.as_dtype(self.dtype))
except ImportError as e:
raise qml.QuantumFunctionError("TensorFlow not found. Please install the latest version of TensorFlow to enable the 'tf' interface.") from e<|docstring|>Apply the TensorFlow interface to the internal quantum tape.
Args:
dtype (tf.dtype): The dtype that the TensorFlow QNode should
output. If not provided, the default is ``tf.float64``.
Raises:
.QuantumFunctionError: if TensorFlow >= 2.1 is not installed<|endoftext|> |
ee9f8dd94fd9ae8a43edab42ab94bdd382985d51885c9c6e247bb630a0f2b60d | def to_torch(self, dtype=None):
'Apply the Torch interface to the internal quantum tape.\n\n Args:\n dtype (tf.dtype): The dtype that the Torch QNode should\n output. If not provided, the default is ``torch.float64``.\n\n Raises:\n .QuantumFunctionError: if PyTorch >= 1.3 is not installed\n '
try:
import torch
from pennylane.tape.interfaces.torch import TorchInterface
self.interface = 'torch'
if (not isinstance(self.dtype, torch.dtype)):
self.dtype = None
self.dtype = (dtype or self.dtype or TorchInterface.dtype)
if (self.dtype is np.complex128):
self.dtype = torch.complex128
if (self.qtape is not None):
TorchInterface.apply(self.qtape, dtype=self.dtype)
except ImportError as e:
raise qml.QuantumFunctionError("PyTorch not found. Please install the latest version of PyTorch to enable the 'torch' interface.") from e | Apply the Torch interface to the internal quantum tape.
Args:
dtype (tf.dtype): The dtype that the Torch QNode should
output. If not provided, the default is ``torch.float64``.
Raises:
.QuantumFunctionError: if PyTorch >= 1.3 is not installed | pennylane/tape/qnode.py | to_torch | anthayes92/pennylane | 1 | python | def to_torch(self, dtype=None):
'Apply the Torch interface to the internal quantum tape.\n\n Args:\n dtype (tf.dtype): The dtype that the Torch QNode should\n output. If not provided, the default is ``torch.float64``.\n\n Raises:\n .QuantumFunctionError: if PyTorch >= 1.3 is not installed\n '
try:
import torch
from pennylane.tape.interfaces.torch import TorchInterface
self.interface = 'torch'
if (not isinstance(self.dtype, torch.dtype)):
self.dtype = None
self.dtype = (dtype or self.dtype or TorchInterface.dtype)
if (self.dtype is np.complex128):
self.dtype = torch.complex128
if (self.qtape is not None):
TorchInterface.apply(self.qtape, dtype=self.dtype)
except ImportError as e:
raise qml.QuantumFunctionError("PyTorch not found. Please install the latest version of PyTorch to enable the 'torch' interface.") from e | def to_torch(self, dtype=None):
'Apply the Torch interface to the internal quantum tape.\n\n Args:\n dtype (tf.dtype): The dtype that the Torch QNode should\n output. If not provided, the default is ``torch.float64``.\n\n Raises:\n .QuantumFunctionError: if PyTorch >= 1.3 is not installed\n '
try:
import torch
from pennylane.tape.interfaces.torch import TorchInterface
self.interface = 'torch'
if (not isinstance(self.dtype, torch.dtype)):
self.dtype = None
self.dtype = (dtype or self.dtype or TorchInterface.dtype)
if (self.dtype is np.complex128):
self.dtype = torch.complex128
if (self.qtape is not None):
TorchInterface.apply(self.qtape, dtype=self.dtype)
except ImportError as e:
raise qml.QuantumFunctionError("PyTorch not found. Please install the latest version of PyTorch to enable the 'torch' interface.") from e<|docstring|>Apply the Torch interface to the internal quantum tape.
Args:
dtype (tf.dtype): The dtype that the Torch QNode should
output. If not provided, the default is ``torch.float64``.
Raises:
.QuantumFunctionError: if PyTorch >= 1.3 is not installed<|endoftext|> |
3e29056219e98687dcf37319d421bc013f838c1b859bc66120b90c975d2ddd26 | def to_autograd(self):
'Apply the Autograd interface to the internal quantum tape.'
self.interface = 'autograd'
self.dtype = AutogradInterface.dtype
if (self.qtape is not None):
AutogradInterface.apply(self.qtape) | Apply the Autograd interface to the internal quantum tape. | pennylane/tape/qnode.py | to_autograd | anthayes92/pennylane | 1 | python | def to_autograd(self):
self.interface = 'autograd'
self.dtype = AutogradInterface.dtype
if (self.qtape is not None):
AutogradInterface.apply(self.qtape) | def to_autograd(self):
self.interface = 'autograd'
self.dtype = AutogradInterface.dtype
if (self.qtape is not None):
AutogradInterface.apply(self.qtape)<|docstring|>Apply the Autograd interface to the internal quantum tape.<|endoftext|> |
c0398c28a7de8580731b06b533bc256f90d0356d590579f004bf45e86df3bfee | @property
def caching(self):
'float: number of device executions to store in a cache to speed up subsequent\n executions. If set to zero, no caching occurs.'
return self._caching | float: number of device executions to store in a cache to speed up subsequent
executions. If set to zero, no caching occurs. | pennylane/tape/qnode.py | caching | anthayes92/pennylane | 1 | python | @property
def caching(self):
'float: number of device executions to store in a cache to speed up subsequent\n executions. If set to zero, no caching occurs.'
return self._caching | @property
def caching(self):
'float: number of device executions to store in a cache to speed up subsequent\n executions. If set to zero, no caching occurs.'
return self._caching<|docstring|>float: number of device executions to store in a cache to speed up subsequent
executions. If set to zero, no caching occurs.<|endoftext|> |
d4947426eba1b07c385c24c72766298d8c7bfd41ee321edf368ab106de62241a | @lru_cache()
def qfunc_decorator(func):
'The actual decorator'
qn = QNode(func, device, interface=interface, diff_method=diff_method, caching=caching, **diff_options)
return update_wrapper(qn, func) | The actual decorator | pennylane/tape/qnode.py | qfunc_decorator | anthayes92/pennylane | 1 | python | @lru_cache()
def qfunc_decorator(func):
qn = QNode(func, device, interface=interface, diff_method=diff_method, caching=caching, **diff_options)
return update_wrapper(qn, func) | @lru_cache()
def qfunc_decorator(func):
qn = QNode(func, device, interface=interface, diff_method=diff_method, caching=caching, **diff_options)
return update_wrapper(qn, func)<|docstring|>The actual decorator<|endoftext|> |
df2514de2a324ce0f4a04c81b546eebb55a43fdff2d7e6b8bad47a7ef8d48884 | def get_lammps_relax(cell: tuple, pair_style: str, pair_coeff: str=None, pot_file: str=None, is_relax_lattice: bool=True, is_relax_z: bool=False, run_lammps: bool=True) -> LammpsStatic:
"\n Get lammpkits before run_lammps is called.\n\n Args:\n cell: Input cell.\n pair_style: Key 'pair_style' setting for lammps input.\n pair_coeff: Key 'pair_coeff' setting for lammps input.\n pot_file: Potential file path from potentials directory.\n This setting becomes activated only when pair_coeff is None.\n is_relax_lattice: If True, lattice is relaxed.\n run_lammps: If True, run lamms.\n "
lmp_stc = LammpsStatic()
lmp_stc.add_structure(cell=cell)
if pair_coeff:
lmp_stc.add_potential_from_string(pair_style=pair_style, pair_coeff=pair_coeff)
else:
lmp_stc.add_potential_from_database(pair_style=pair_style, pot_file=pot_file)
lmp_stc.add_thermo(thermo=10)
lmp_stc.add_variables(add_energy=True, add_stress=True)
lmp_stc.add_relax_settings(is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
if run_lammps:
lmp_stc.run_lammps()
return lmp_stc | Get lammpkits before run_lammps is called.
Args:
cell: Input cell.
pair_style: Key 'pair_style' setting for lammps input.
pair_coeff: Key 'pair_coeff' setting for lammps input.
pot_file: Potential file path from potentials directory.
This setting becomes activated only when pair_coeff is None.
is_relax_lattice: If True, lattice is relaxed.
run_lammps: If True, run lamms. | twinpy/interfaces/lammps.py | get_lammps_relax | kei0822kei/twinpy | 0 | python | def get_lammps_relax(cell: tuple, pair_style: str, pair_coeff: str=None, pot_file: str=None, is_relax_lattice: bool=True, is_relax_z: bool=False, run_lammps: bool=True) -> LammpsStatic:
"\n Get lammpkits before run_lammps is called.\n\n Args:\n cell: Input cell.\n pair_style: Key 'pair_style' setting for lammps input.\n pair_coeff: Key 'pair_coeff' setting for lammps input.\n pot_file: Potential file path from potentials directory.\n This setting becomes activated only when pair_coeff is None.\n is_relax_lattice: If True, lattice is relaxed.\n run_lammps: If True, run lamms.\n "
lmp_stc = LammpsStatic()
lmp_stc.add_structure(cell=cell)
if pair_coeff:
lmp_stc.add_potential_from_string(pair_style=pair_style, pair_coeff=pair_coeff)
else:
lmp_stc.add_potential_from_database(pair_style=pair_style, pot_file=pot_file)
lmp_stc.add_thermo(thermo=10)
lmp_stc.add_variables(add_energy=True, add_stress=True)
lmp_stc.add_relax_settings(is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
if run_lammps:
lmp_stc.run_lammps()
return lmp_stc | def get_lammps_relax(cell: tuple, pair_style: str, pair_coeff: str=None, pot_file: str=None, is_relax_lattice: bool=True, is_relax_z: bool=False, run_lammps: bool=True) -> LammpsStatic:
"\n Get lammpkits before run_lammps is called.\n\n Args:\n cell: Input cell.\n pair_style: Key 'pair_style' setting for lammps input.\n pair_coeff: Key 'pair_coeff' setting for lammps input.\n pot_file: Potential file path from potentials directory.\n This setting becomes activated only when pair_coeff is None.\n is_relax_lattice: If True, lattice is relaxed.\n run_lammps: If True, run lamms.\n "
lmp_stc = LammpsStatic()
lmp_stc.add_structure(cell=cell)
if pair_coeff:
lmp_stc.add_potential_from_string(pair_style=pair_style, pair_coeff=pair_coeff)
else:
lmp_stc.add_potential_from_database(pair_style=pair_style, pot_file=pot_file)
lmp_stc.add_thermo(thermo=10)
lmp_stc.add_variables(add_energy=True, add_stress=True)
lmp_stc.add_relax_settings(is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
if run_lammps:
lmp_stc.run_lammps()
return lmp_stc<|docstring|>Get lammpkits before run_lammps is called.
Args:
cell: Input cell.
pair_style: Key 'pair_style' setting for lammps input.
pair_coeff: Key 'pair_coeff' setting for lammps input.
pot_file: Potential file path from potentials directory.
This setting becomes activated only when pair_coeff is None.
is_relax_lattice: If True, lattice is relaxed.
run_lammps: If True, run lamms.<|endoftext|> |
bab26bc71dd63b1c7e243e11b27145c4e8bed0e66542ecc6028f17274723af44 | def get_relax_analyzer_from_lammps_static(lammps_static: LammpsStatic, original_cell: tuple=None, no_standardize: bool=False, move_atoms_into_unitcell: bool=True):
"\n Get relax analyzer from lammps.\n\n Args:\n lammps_static: LammpsStatic class object.\n original_cell: Original cell.\n no_standardize: See docstring in RelaxAnalyzer.\n If no_standardize is True, input 'original_cell' is\n ignored and original_cell and input_cell becomes\n identical.\n "
if (not lammps_static.is_run_finished):
lammps_static.run_lammps()
initial_cell = lammps_static.get_initial_cell()
(lattice, frac_coords, symbols) = lammps_static.get_final_cell()
if move_atoms_into_unitcell:
frac_coords = np.round(frac_coords, decimals=6)
frac_coords %= 1.0
final_cell = (lattice, frac_coords, symbols)
forces = lammps_static.get_forces()
energy = lammps_static.get_energy()
rlx_analyzer = RelaxAnalyzer(initial_cell=initial_cell, final_cell=final_cell, original_cell=original_cell, forces=forces, energy=energy, no_standardize=no_standardize)
return rlx_analyzer | Get relax analyzer from lammps.
Args:
lammps_static: LammpsStatic class object.
original_cell: Original cell.
no_standardize: See docstring in RelaxAnalyzer.
If no_standardize is True, input 'original_cell' is
ignored and original_cell and input_cell becomes
identical. | twinpy/interfaces/lammps.py | get_relax_analyzer_from_lammps_static | kei0822kei/twinpy | 0 | python | def get_relax_analyzer_from_lammps_static(lammps_static: LammpsStatic, original_cell: tuple=None, no_standardize: bool=False, move_atoms_into_unitcell: bool=True):
"\n Get relax analyzer from lammps.\n\n Args:\n lammps_static: LammpsStatic class object.\n original_cell: Original cell.\n no_standardize: See docstring in RelaxAnalyzer.\n If no_standardize is True, input 'original_cell' is\n ignored and original_cell and input_cell becomes\n identical.\n "
if (not lammps_static.is_run_finished):
lammps_static.run_lammps()
initial_cell = lammps_static.get_initial_cell()
(lattice, frac_coords, symbols) = lammps_static.get_final_cell()
if move_atoms_into_unitcell:
frac_coords = np.round(frac_coords, decimals=6)
frac_coords %= 1.0
final_cell = (lattice, frac_coords, symbols)
forces = lammps_static.get_forces()
energy = lammps_static.get_energy()
rlx_analyzer = RelaxAnalyzer(initial_cell=initial_cell, final_cell=final_cell, original_cell=original_cell, forces=forces, energy=energy, no_standardize=no_standardize)
return rlx_analyzer | def get_relax_analyzer_from_lammps_static(lammps_static: LammpsStatic, original_cell: tuple=None, no_standardize: bool=False, move_atoms_into_unitcell: bool=True):
"\n Get relax analyzer from lammps.\n\n Args:\n lammps_static: LammpsStatic class object.\n original_cell: Original cell.\n no_standardize: See docstring in RelaxAnalyzer.\n If no_standardize is True, input 'original_cell' is\n ignored and original_cell and input_cell becomes\n identical.\n "
if (not lammps_static.is_run_finished):
lammps_static.run_lammps()
initial_cell = lammps_static.get_initial_cell()
(lattice, frac_coords, symbols) = lammps_static.get_final_cell()
if move_atoms_into_unitcell:
frac_coords = np.round(frac_coords, decimals=6)
frac_coords %= 1.0
final_cell = (lattice, frac_coords, symbols)
forces = lammps_static.get_forces()
energy = lammps_static.get_energy()
rlx_analyzer = RelaxAnalyzer(initial_cell=initial_cell, final_cell=final_cell, original_cell=original_cell, forces=forces, energy=energy, no_standardize=no_standardize)
return rlx_analyzer<|docstring|>Get relax analyzer from lammps.
Args:
lammps_static: LammpsStatic class object.
original_cell: Original cell.
no_standardize: See docstring in RelaxAnalyzer.
If no_standardize is True, input 'original_cell' is
ignored and original_cell and input_cell becomes
identical.<|endoftext|> |
20bec2cd5b0005f1462f6b72ca8b2820806fc2d2862aad3416f76be48db71827 | def get_phonon_analyzer_from_lammps_static(lammps_static: LammpsStatic, supercell_matrix: np.array, primitive_matrix: np.array=np.identity(3), original_cell: tuple=None, no_standardize: bool=False):
"\n Get phonon analyzer from lammps.\n\n Args:\n lammps_static: LammpsStatic class object.\n original_cell: Original cell.\n no_standardize: See docstring in RelaxAnalyzer.\n If no_standardize is True, input 'original_cell' is\n ignored and original_cell and input_cell becomes\n identical.\n "
rlx_analyzer = get_relax_analyzer_from_lammps_static(lammps_static=lammps_static, original_cell=original_cell, no_standardize=no_standardize)
ph_lammps_input = lammps_static.get_lammps_input_for_phonolammps()
ph_lmp = Phonolammps(lammps_input=ph_lammps_input, supercell_matrix=supercell_matrix, primitive_matrix=primitive_matrix)
phonon = get_phonon_from_phonolammps(ph_lmp)
ph_analyzer = PhononAnalyzer(phonon=phonon, relax_analyzer=rlx_analyzer)
return ph_analyzer | Get phonon analyzer from lammps.
Args:
lammps_static: LammpsStatic class object.
original_cell: Original cell.
no_standardize: See docstring in RelaxAnalyzer.
If no_standardize is True, input 'original_cell' is
ignored and original_cell and input_cell becomes
identical. | twinpy/interfaces/lammps.py | get_phonon_analyzer_from_lammps_static | kei0822kei/twinpy | 0 | python | def get_phonon_analyzer_from_lammps_static(lammps_static: LammpsStatic, supercell_matrix: np.array, primitive_matrix: np.array=np.identity(3), original_cell: tuple=None, no_standardize: bool=False):
"\n Get phonon analyzer from lammps.\n\n Args:\n lammps_static: LammpsStatic class object.\n original_cell: Original cell.\n no_standardize: See docstring in RelaxAnalyzer.\n If no_standardize is True, input 'original_cell' is\n ignored and original_cell and input_cell becomes\n identical.\n "
rlx_analyzer = get_relax_analyzer_from_lammps_static(lammps_static=lammps_static, original_cell=original_cell, no_standardize=no_standardize)
ph_lammps_input = lammps_static.get_lammps_input_for_phonolammps()
ph_lmp = Phonolammps(lammps_input=ph_lammps_input, supercell_matrix=supercell_matrix, primitive_matrix=primitive_matrix)
phonon = get_phonon_from_phonolammps(ph_lmp)
ph_analyzer = PhononAnalyzer(phonon=phonon, relax_analyzer=rlx_analyzer)
return ph_analyzer | def get_phonon_analyzer_from_lammps_static(lammps_static: LammpsStatic, supercell_matrix: np.array, primitive_matrix: np.array=np.identity(3), original_cell: tuple=None, no_standardize: bool=False):
"\n Get phonon analyzer from lammps.\n\n Args:\n lammps_static: LammpsStatic class object.\n original_cell: Original cell.\n no_standardize: See docstring in RelaxAnalyzer.\n If no_standardize is True, input 'original_cell' is\n ignored and original_cell and input_cell becomes\n identical.\n "
rlx_analyzer = get_relax_analyzer_from_lammps_static(lammps_static=lammps_static, original_cell=original_cell, no_standardize=no_standardize)
ph_lammps_input = lammps_static.get_lammps_input_for_phonolammps()
ph_lmp = Phonolammps(lammps_input=ph_lammps_input, supercell_matrix=supercell_matrix, primitive_matrix=primitive_matrix)
phonon = get_phonon_from_phonolammps(ph_lmp)
ph_analyzer = PhononAnalyzer(phonon=phonon, relax_analyzer=rlx_analyzer)
return ph_analyzer<|docstring|>Get phonon analyzer from lammps.
Args:
lammps_static: LammpsStatic class object.
original_cell: Original cell.
no_standardize: See docstring in RelaxAnalyzer.
If no_standardize is True, input 'original_cell' is
ignored and original_cell and input_cell becomes
identical.<|endoftext|> |
088d0e3bfe2ce1a349076947335ba950ca3d9c812d67318886f5ee61ff21fc26 | def get_twinboundary_analyzer_from_lammps(twinboundary_structure, pair_style: str, pair_coeff: str=None, pot_file: str=None, is_relax_lattice: bool=False, is_relax_z: bool=False, move_atoms_into_unitcell: bool=True, no_standardize: bool=True, is_run_phonon: bool=True, supercell_matrix: np.array=np.eye(3), hexagonal_relax_analyzer: RelaxAnalyzer=None, hexagonal_phonon_analyzer: PhononAnalyzer=None):
'\n Set twinboundary_analyzer from lammps.\n '
if (not no_standardize):
raise RuntimeError('Currently no_standardize=False is not supported.')
cell = twinboundary_structure.get_cell_for_export(get_lattice=False, move_atoms_into_unitcell=move_atoms_into_unitcell)
if is_run_phonon:
rlx_analyzer = None
ph_analyzer = get_phonon_analyzer_from_lammps(cell=cell, pair_style=pair_style, supercell_matrix=supercell_matrix, pair_coeff=pair_coeff, pot_file=pot_file, is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
else:
lmp_stc = get_lammps_relax(cell=cell, pair_style=pair_style, pair_coeff=pair_coeff, pot_file=pot_file, is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
rlx_analyzer = get_relax_analyzer_from_lammps_static(lammps_static=lmp_stc, original_cell=None, no_standardize=no_standardize)
ph_analyzer = None
tb_analyzer = TwinBoundaryAnalyzer(twinboundary_structure=twinboundary_structure, twinboundary_relax_analyzer=rlx_analyzer, twinboundary_phonon_analyzer=ph_analyzer, hexagonal_relax_analyzer=hexagonal_relax_analyzer, hexagonal_phonon_analyzer=hexagonal_phonon_analyzer)
return tb_analyzer | Set twinboundary_analyzer from lammps. | twinpy/interfaces/lammps.py | get_twinboundary_analyzer_from_lammps | kei0822kei/twinpy | 0 | python | def get_twinboundary_analyzer_from_lammps(twinboundary_structure, pair_style: str, pair_coeff: str=None, pot_file: str=None, is_relax_lattice: bool=False, is_relax_z: bool=False, move_atoms_into_unitcell: bool=True, no_standardize: bool=True, is_run_phonon: bool=True, supercell_matrix: np.array=np.eye(3), hexagonal_relax_analyzer: RelaxAnalyzer=None, hexagonal_phonon_analyzer: PhononAnalyzer=None):
'\n \n '
if (not no_standardize):
raise RuntimeError('Currently no_standardize=False is not supported.')
cell = twinboundary_structure.get_cell_for_export(get_lattice=False, move_atoms_into_unitcell=move_atoms_into_unitcell)
if is_run_phonon:
rlx_analyzer = None
ph_analyzer = get_phonon_analyzer_from_lammps(cell=cell, pair_style=pair_style, supercell_matrix=supercell_matrix, pair_coeff=pair_coeff, pot_file=pot_file, is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
else:
lmp_stc = get_lammps_relax(cell=cell, pair_style=pair_style, pair_coeff=pair_coeff, pot_file=pot_file, is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
rlx_analyzer = get_relax_analyzer_from_lammps_static(lammps_static=lmp_stc, original_cell=None, no_standardize=no_standardize)
ph_analyzer = None
tb_analyzer = TwinBoundaryAnalyzer(twinboundary_structure=twinboundary_structure, twinboundary_relax_analyzer=rlx_analyzer, twinboundary_phonon_analyzer=ph_analyzer, hexagonal_relax_analyzer=hexagonal_relax_analyzer, hexagonal_phonon_analyzer=hexagonal_phonon_analyzer)
return tb_analyzer | def get_twinboundary_analyzer_from_lammps(twinboundary_structure, pair_style: str, pair_coeff: str=None, pot_file: str=None, is_relax_lattice: bool=False, is_relax_z: bool=False, move_atoms_into_unitcell: bool=True, no_standardize: bool=True, is_run_phonon: bool=True, supercell_matrix: np.array=np.eye(3), hexagonal_relax_analyzer: RelaxAnalyzer=None, hexagonal_phonon_analyzer: PhononAnalyzer=None):
'\n \n '
if (not no_standardize):
raise RuntimeError('Currently no_standardize=False is not supported.')
cell = twinboundary_structure.get_cell_for_export(get_lattice=False, move_atoms_into_unitcell=move_atoms_into_unitcell)
if is_run_phonon:
rlx_analyzer = None
ph_analyzer = get_phonon_analyzer_from_lammps(cell=cell, pair_style=pair_style, supercell_matrix=supercell_matrix, pair_coeff=pair_coeff, pot_file=pot_file, is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
else:
lmp_stc = get_lammps_relax(cell=cell, pair_style=pair_style, pair_coeff=pair_coeff, pot_file=pot_file, is_relax_lattice=is_relax_lattice, is_relax_z=is_relax_z)
rlx_analyzer = get_relax_analyzer_from_lammps_static(lammps_static=lmp_stc, original_cell=None, no_standardize=no_standardize)
ph_analyzer = None
tb_analyzer = TwinBoundaryAnalyzer(twinboundary_structure=twinboundary_structure, twinboundary_relax_analyzer=rlx_analyzer, twinboundary_phonon_analyzer=ph_analyzer, hexagonal_relax_analyzer=hexagonal_relax_analyzer, hexagonal_phonon_analyzer=hexagonal_phonon_analyzer)
return tb_analyzer<|docstring|>Set twinboundary_analyzer from lammps.<|endoftext|> |
a35e09afab6e5a7fc77ee0eb1d81d6457c5199e101d185c23e300fb1f39caeb5 | def get_phonon_from_phonolammps(phonolammps) -> Phonopy:
'\n Get Phonopy class object from PhonoLammps.\n\n Args:\n phonolammps: Phonlammps class object.\n '
unitcell = phonolammps.get_unitcell()
force_constants = phonolammps.get_force_constants()
supercell_matrix = phonolammps.get_supercell_matrix()
primitive_matrix = phonolammps.get_primitve_matrix()
phonon = Phonopy(unitcell=unitcell, primitive_matrix=primitive_matrix, supercell_matrix=supercell_matrix)
dataset = phonolammps.get_force_constants(include_data_set=True)[1]
phonon.dataset = dataset
phonon.produce_force_constants()
return phonon | Get Phonopy class object from PhonoLammps.
Args:
phonolammps: Phonlammps class object. | twinpy/interfaces/lammps.py | get_phonon_from_phonolammps | kei0822kei/twinpy | 0 | python | def get_phonon_from_phonolammps(phonolammps) -> Phonopy:
'\n Get Phonopy class object from PhonoLammps.\n\n Args:\n phonolammps: Phonlammps class object.\n '
unitcell = phonolammps.get_unitcell()
force_constants = phonolammps.get_force_constants()
supercell_matrix = phonolammps.get_supercell_matrix()
primitive_matrix = phonolammps.get_primitve_matrix()
phonon = Phonopy(unitcell=unitcell, primitive_matrix=primitive_matrix, supercell_matrix=supercell_matrix)
dataset = phonolammps.get_force_constants(include_data_set=True)[1]
phonon.dataset = dataset
phonon.produce_force_constants()
return phonon | def get_phonon_from_phonolammps(phonolammps) -> Phonopy:
'\n Get Phonopy class object from PhonoLammps.\n\n Args:\n phonolammps: Phonlammps class object.\n '
unitcell = phonolammps.get_unitcell()
force_constants = phonolammps.get_force_constants()
supercell_matrix = phonolammps.get_supercell_matrix()
primitive_matrix = phonolammps.get_primitve_matrix()
phonon = Phonopy(unitcell=unitcell, primitive_matrix=primitive_matrix, supercell_matrix=supercell_matrix)
dataset = phonolammps.get_force_constants(include_data_set=True)[1]
phonon.dataset = dataset
phonon.produce_force_constants()
return phonon<|docstring|>Get Phonopy class object from PhonoLammps.
Args:
phonolammps: Phonlammps class object.<|endoftext|> |
f96120395b270f006cfc7816223ed8306ca2f01427597fdc33262f1f4cdb26f3 | @require(network=True)
@plugin('translate')
def translate(jarvis, s):
'\n translates from one language to another.\n '
jarvis.say('\nEnter source language ')
srcs = jarvis.input().lower().strip()
while ((srcs not in LANGUAGES) and (srcs not in SPECIAL_CASES) and (srcs not in LANGCODES)):
if (srcs in SPECIAL_CASES):
srcs = SPECIAL_CASES[srcs]
elif (srcs in LANGCODES):
srcs = LANGCODES[srcs]
else:
jarvis.say('\nInvalid source language\nEnter again')
srcs = jarvis.input().lower()
jarvis.say('\nEnter destination language ')
des = jarvis.input().lower().strip()
while ((des not in LANGUAGES) and (des not in SPECIAL_CASES) and (des not in LANGCODES)):
if (des in SPECIAL_CASES):
des = SPECIAL_CASES[des]
elif (des in LANGCODES):
des = LANGCODES[des]
else:
jarvis.say('\nInvalid destination language\nEnter again')
des = jarvis.input().lower()
jarvis.say('\nEnter text ')
tex = jarvis.input()
translator = Translator()
result = translator.translate(tex, dest=des, src=srcs)
result = u'\n[{src}] {original}\n ->\n[{dest}] {text}\n[pron.] {pronunciation}\n '.strip().format(src=result.src, dest=result.dest, original=result.origin, text=result.text, pronunciation=result.pronunciation)
print(result) | translates from one language to another. | jarviscli/plugins/translate.py | translate | singhpriya2498/Jarvis | 2 | python | @require(network=True)
@plugin('translate')
def translate(jarvis, s):
'\n \n '
jarvis.say('\nEnter source language ')
srcs = jarvis.input().lower().strip()
while ((srcs not in LANGUAGES) and (srcs not in SPECIAL_CASES) and (srcs not in LANGCODES)):
if (srcs in SPECIAL_CASES):
srcs = SPECIAL_CASES[srcs]
elif (srcs in LANGCODES):
srcs = LANGCODES[srcs]
else:
jarvis.say('\nInvalid source language\nEnter again')
srcs = jarvis.input().lower()
jarvis.say('\nEnter destination language ')
des = jarvis.input().lower().strip()
while ((des not in LANGUAGES) and (des not in SPECIAL_CASES) and (des not in LANGCODES)):
if (des in SPECIAL_CASES):
des = SPECIAL_CASES[des]
elif (des in LANGCODES):
des = LANGCODES[des]
else:
jarvis.say('\nInvalid destination language\nEnter again')
des = jarvis.input().lower()
jarvis.say('\nEnter text ')
tex = jarvis.input()
translator = Translator()
result = translator.translate(tex, dest=des, src=srcs)
result = u'\n[{src}] {original}\n ->\n[{dest}] {text}\n[pron.] {pronunciation}\n '.strip().format(src=result.src, dest=result.dest, original=result.origin, text=result.text, pronunciation=result.pronunciation)
print(result) | @require(network=True)
@plugin('translate')
def translate(jarvis, s):
'\n \n '
jarvis.say('\nEnter source language ')
srcs = jarvis.input().lower().strip()
while ((srcs not in LANGUAGES) and (srcs not in SPECIAL_CASES) and (srcs not in LANGCODES)):
if (srcs in SPECIAL_CASES):
srcs = SPECIAL_CASES[srcs]
elif (srcs in LANGCODES):
srcs = LANGCODES[srcs]
else:
jarvis.say('\nInvalid source language\nEnter again')
srcs = jarvis.input().lower()
jarvis.say('\nEnter destination language ')
des = jarvis.input().lower().strip()
while ((des not in LANGUAGES) and (des not in SPECIAL_CASES) and (des not in LANGCODES)):
if (des in SPECIAL_CASES):
des = SPECIAL_CASES[des]
elif (des in LANGCODES):
des = LANGCODES[des]
else:
jarvis.say('\nInvalid destination language\nEnter again')
des = jarvis.input().lower()
jarvis.say('\nEnter text ')
tex = jarvis.input()
translator = Translator()
result = translator.translate(tex, dest=des, src=srcs)
result = u'\n[{src}] {original}\n ->\n[{dest}] {text}\n[pron.] {pronunciation}\n '.strip().format(src=result.src, dest=result.dest, original=result.origin, text=result.text, pronunciation=result.pronunciation)
print(result)<|docstring|>translates from one language to another.<|endoftext|> |
699cd30c4ecd488c5903eaaf63406f91e415974b9f106aca0dba65614a61ba0c | @abstractclassmethod
def find(cls, frame):
' Find an object in the frame\n\n Args:\n frame: an openCV frame in BGR\n\n Returns:\n (x, y, size, frames)\n the location of the object relative to center(in pixel) and frames for debug\n ' | Find an object in the frame
Args:
frame: an openCV frame in BGR
Returns:
(x, y, size, frames)
the location of the object relative to center(in pixel) and frames for debug | src/tracking/cores.py | find | heyuhang0/SAUVC2019 | 1 | python | @abstractclassmethod
def find(cls, frame):
' Find an object in the frame\n\n Args:\n frame: an openCV frame in BGR\n\n Returns:\n (x, y, size, frames)\n the location of the object relative to center(in pixel) and frames for debug\n ' | @abstractclassmethod
def find(cls, frame):
' Find an object in the frame\n\n Args:\n frame: an openCV frame in BGR\n\n Returns:\n (x, y, size, frames)\n the location of the object relative to center(in pixel) and frames for debug\n '<|docstring|>Find an object in the frame
Args:
frame: an openCV frame in BGR
Returns:
(x, y, size, frames)
the location of the object relative to center(in pixel) and frames for debug<|endoftext|> |
57daa94e87907e89b2c42093505674b67a69af4aca510d8663d936e415289105 | def ClearSearchTables(self):
'Clear search tables stub.'
self.search_tables_ = [] | Clear search tables stub. | earth_enterprise/src/fusion/portableglobe/servers/stub_search.py | ClearSearchTables | augustocamaral2003/earthenterprise | 2,661 | python | def ClearSearchTables(self):
self.search_tables_ = [] | def ClearSearchTables(self):
self.search_tables_ = []<|docstring|>Clear search tables stub.<|endoftext|> |
1886f99558d9b73b6cf7d02a04d8634eb0c50d6a6540668796d4892c61c57110 | def LoadSearchTable(self, table_name, content):
'Load data for search stub.'
self.search_tables_.append(table_name) | Load data for search stub. | earth_enterprise/src/fusion/portableglobe/servers/stub_search.py | LoadSearchTable | augustocamaral2003/earthenterprise | 2,661 | python | def LoadSearchTable(self, table_name, content):
self.search_tables_.append(table_name) | def LoadSearchTable(self, table_name, content):
self.search_tables_.append(table_name)<|docstring|>Load data for search stub.<|endoftext|> |
8755ff79d19de268428f6b99ec848189943dd0c9086db1a9fd5beee21877e64a | def KmlSearch(self, search_term):
'Search stub for search that returns results as kml.'
return '' | Search stub for search that returns results as kml. | earth_enterprise/src/fusion/portableglobe/servers/stub_search.py | KmlSearch | augustocamaral2003/earthenterprise | 2,661 | python | def KmlSearch(self, search_term):
return | def KmlSearch(self, search_term):
return <|docstring|>Search stub for search that returns results as kml.<|endoftext|> |
0a2ebce2e8a599410a69e660609c08994a390467569dcd83ac1485eb04862aa8 | def JsonSearch(self, search_term):
'Search stub for search that returns results as json.'
return '' | Search stub for search that returns results as json. | earth_enterprise/src/fusion/portableglobe/servers/stub_search.py | JsonSearch | augustocamaral2003/earthenterprise | 2,661 | python | def JsonSearch(self, search_term):
return | def JsonSearch(self, search_term):
return <|docstring|>Search stub for search that returns results as json.<|endoftext|> |
533a95e9202597e6e45db429330bbf4cb3d0d4a12ce929bc363eeab89d50183f | def get_file_name(self) -> str:
'\n :return: Associate filename\n '
return bytearray(self.source.FileName.string[:(- 2)]).decode('utf-16le') | :return: Associate filename | etl/parsers/kernel/file.py | get_file_name | IMULMUL/etl-parser | 104 | python | def get_file_name(self) -> str:
'\n \n '
return bytearray(self.source.FileName.string[:(- 2)]).decode('utf-16le') | def get_file_name(self) -> str:
'\n \n '
return bytearray(self.source.FileName.string[:(- 2)]).decode('utf-16le')<|docstring|>:return: Associate filename<|endoftext|> |
7f21bf99d0cebe2f149b71705470cbee4c3b71fe3366ad0f34cf462eb51d7297 | @command('')
def cli(args):
'\n Top level base command dummy function\n ' | Top level base command dummy function | leapp/cli/__init__.py | cli | dhodovsk/leapp | 29 | python | @command()
def cli(args):
'\n \n ' | @command()
def cli(args):
'\n \n '<|docstring|>Top level base command dummy function<|endoftext|> |
c03c0aba533d39a937c4faf9cdb56b9531936849a32ac459bcddd4bf5330f36e | def main():
'\n leapp entry point\n '
os.environ['LEAPP_HOSTNAME'] = socket.getfqdn()
_load_commands(cli.command)
cli.command.execute('leapp version {}'.format(VERSION)) | leapp entry point | leapp/cli/__init__.py | main | dhodovsk/leapp | 29 | python | def main():
'\n \n '
os.environ['LEAPP_HOSTNAME'] = socket.getfqdn()
_load_commands(cli.command)
cli.command.execute('leapp version {}'.format(VERSION)) | def main():
'\n \n '
os.environ['LEAPP_HOSTNAME'] = socket.getfqdn()
_load_commands(cli.command)
cli.command.execute('leapp version {}'.format(VERSION))<|docstring|>leapp entry point<|endoftext|> |
a71f84e955963163c8a3b8cf2eeb0ff48430c0eeb11900de0a4162189bdafdfe | @post_dump
def remove_none(self, data, **kwargs):
'Prevents from dumping attributes that are None,\n thus making the dump more compact.\n '
return OrderedDict(((key, value) for (key, value) in data.items() if (value is not None))) | Prevents from dumping attributes that are None,
thus making the dump more compact. | sdk/ml/azure-ai-ml/azure/ai/ml/_schema/core/schema_meta.py | remove_none | jalauzon-msft/azure-sdk-for-python | 1 | python | @post_dump
def remove_none(self, data, **kwargs):
'Prevents from dumping attributes that are None,\n thus making the dump more compact.\n '
return OrderedDict(((key, value) for (key, value) in data.items() if (value is not None))) | @post_dump
def remove_none(self, data, **kwargs):
'Prevents from dumping attributes that are None,\n thus making the dump more compact.\n '
return OrderedDict(((key, value) for (key, value) in data.items() if (value is not None)))<|docstring|>Prevents from dumping attributes that are None,
thus making the dump more compact.<|endoftext|> |
d587a6faff2dd39d1b8b6bc04d1e3483a8d92cd26ca72c55a74274b8b5c7f78e | @requires_firmware_version('1.1.2018091003')
def stream_buffered_data(self, length_of_time_in_seconds, sample_rate_in_ms):
'Yield a generator object for the buffered field data.\n Useful for getting the data in real time when doing a lengthy acquisition.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to stream the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n Returns:\n A generator object that returns the data as datapoint tuples\n '
self.command(('SENSE:AVERAGE:COUNT ' + str((sample_rate_in_ms / 10))))
length_of_time_in_seconds = round(length_of_time_in_seconds, 2)
total_number_of_samples = int(round(((length_of_time_in_seconds * 1000) / sample_rate_in_ms), 0))
number_of_samples = 0
self.query('FETC:BUFF:DC?', check_errors=False)
while (number_of_samples < total_number_of_samples):
response = self.query('FETC:BUFF:DC?', check_errors=False).strip('"')
if (';' in response):
data_points = response.rstrip(';').split(';')
for point in data_points:
point_data = point.split(',')
for (count, _) in enumerate(point_data):
if (count == 0):
point_data[count] = iso8601.parse_date(point_data[count])
elif (count == (len(point_data) - 1)):
point_data[count] = int(point_data[count])
else:
point_data[count] = float(point_data[count])
if (len(point_data) == 6):
input_state = point_data.pop()
point_data.append(0.0)
point_data.append(input_state)
number_of_samples += 1
elapsed_time_in_seconds = ((sample_rate_in_ms * number_of_samples) / 1000)
if (number_of_samples > total_number_of_samples):
break
new_point = DataPoint(elapsed_time_in_seconds, *point_data)
(yield new_point) | Yield a generator object for the buffered field data.
Useful for getting the data in real time when doing a lengthy acquisition.
Args:
length_of_time_in_seconds (float):
The period of time over which to stream the data.
sample_rate_in_ms (int):
The averaging window (sampling period) of the instrument.
Returns:
A generator object that returns the data as datapoint tuples | lakeshore/teslameter.py | stream_buffered_data | lakeshorecryotronics/python-driver | 6 | python | @requires_firmware_version('1.1.2018091003')
def stream_buffered_data(self, length_of_time_in_seconds, sample_rate_in_ms):
'Yield a generator object for the buffered field data.\n Useful for getting the data in real time when doing a lengthy acquisition.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to stream the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n Returns:\n A generator object that returns the data as datapoint tuples\n '
self.command(('SENSE:AVERAGE:COUNT ' + str((sample_rate_in_ms / 10))))
length_of_time_in_seconds = round(length_of_time_in_seconds, 2)
total_number_of_samples = int(round(((length_of_time_in_seconds * 1000) / sample_rate_in_ms), 0))
number_of_samples = 0
self.query('FETC:BUFF:DC?', check_errors=False)
while (number_of_samples < total_number_of_samples):
response = self.query('FETC:BUFF:DC?', check_errors=False).strip('"')
if (';' in response):
data_points = response.rstrip(';').split(';')
for point in data_points:
point_data = point.split(',')
for (count, _) in enumerate(point_data):
if (count == 0):
point_data[count] = iso8601.parse_date(point_data[count])
elif (count == (len(point_data) - 1)):
point_data[count] = int(point_data[count])
else:
point_data[count] = float(point_data[count])
if (len(point_data) == 6):
input_state = point_data.pop()
point_data.append(0.0)
point_data.append(input_state)
number_of_samples += 1
elapsed_time_in_seconds = ((sample_rate_in_ms * number_of_samples) / 1000)
if (number_of_samples > total_number_of_samples):
break
new_point = DataPoint(elapsed_time_in_seconds, *point_data)
(yield new_point) | @requires_firmware_version('1.1.2018091003')
def stream_buffered_data(self, length_of_time_in_seconds, sample_rate_in_ms):
'Yield a generator object for the buffered field data.\n Useful for getting the data in real time when doing a lengthy acquisition.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to stream the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n Returns:\n A generator object that returns the data as datapoint tuples\n '
self.command(('SENSE:AVERAGE:COUNT ' + str((sample_rate_in_ms / 10))))
length_of_time_in_seconds = round(length_of_time_in_seconds, 2)
total_number_of_samples = int(round(((length_of_time_in_seconds * 1000) / sample_rate_in_ms), 0))
number_of_samples = 0
self.query('FETC:BUFF:DC?', check_errors=False)
while (number_of_samples < total_number_of_samples):
response = self.query('FETC:BUFF:DC?', check_errors=False).strip('"')
if (';' in response):
data_points = response.rstrip(';').split(';')
for point in data_points:
point_data = point.split(',')
for (count, _) in enumerate(point_data):
if (count == 0):
point_data[count] = iso8601.parse_date(point_data[count])
elif (count == (len(point_data) - 1)):
point_data[count] = int(point_data[count])
else:
point_data[count] = float(point_data[count])
if (len(point_data) == 6):
input_state = point_data.pop()
point_data.append(0.0)
point_data.append(input_state)
number_of_samples += 1
elapsed_time_in_seconds = ((sample_rate_in_ms * number_of_samples) / 1000)
if (number_of_samples > total_number_of_samples):
break
new_point = DataPoint(elapsed_time_in_seconds, *point_data)
(yield new_point)<|docstring|>Yield a generator object for the buffered field data.
Useful for getting the data in real time when doing a lengthy acquisition.
Args:
length_of_time_in_seconds (float):
The period of time over which to stream the data.
sample_rate_in_ms (int):
The averaging window (sampling period) of the instrument.
Returns:
A generator object that returns the data as datapoint tuples<|endoftext|> |
b3bafe3a78e22c8172824c8c13d5c5a66d065456236c39ee937c2d9f750f5e37 | @requires_firmware_version('1.1.2018091003')
def get_buffered_data_points(self, length_of_time_in_seconds, sample_rate_in_ms):
'Returns a list of namedtuples that contain the buffered data.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to collect the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n Returns:\n The data as a list of datapoint tuples\n '
return list(self.stream_buffered_data(length_of_time_in_seconds, sample_rate_in_ms)) | Returns a list of namedtuples that contain the buffered data.
Args:
length_of_time_in_seconds (float):
The period of time over which to collect the data.
sample_rate_in_ms (int):
The averaging window (sampling period) of the instrument.
Returns:
The data as a list of datapoint tuples | lakeshore/teslameter.py | get_buffered_data_points | lakeshorecryotronics/python-driver | 6 | python | @requires_firmware_version('1.1.2018091003')
def get_buffered_data_points(self, length_of_time_in_seconds, sample_rate_in_ms):
'Returns a list of namedtuples that contain the buffered data.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to collect the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n Returns:\n The data as a list of datapoint tuples\n '
return list(self.stream_buffered_data(length_of_time_in_seconds, sample_rate_in_ms)) | @requires_firmware_version('1.1.2018091003')
def get_buffered_data_points(self, length_of_time_in_seconds, sample_rate_in_ms):
'Returns a list of namedtuples that contain the buffered data.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to collect the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n Returns:\n The data as a list of datapoint tuples\n '
return list(self.stream_buffered_data(length_of_time_in_seconds, sample_rate_in_ms))<|docstring|>Returns a list of namedtuples that contain the buffered data.
Args:
length_of_time_in_seconds (float):
The period of time over which to collect the data.
sample_rate_in_ms (int):
The averaging window (sampling period) of the instrument.
Returns:
The data as a list of datapoint tuples<|endoftext|> |
e3025a3c199aff48d5a0aa0ec2ef96ebdfad271054bce0a28ed58a764e0f914c | @requires_firmware_version('1.1.2018091003')
def log_buffered_data_to_file(self, length_of_time_in_seconds, sample_rate_in_ms, file):
'Creates or appends a CSV file with the buffered data and excel-friendly timestamps.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to collect the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n file (file_object):\n Field measurement data will be written to this file object in a CSV format.\n '
file.write(('time elapsed,date,time,' + 'magnitude,x,y,z,field control set point,input state\n'))
data_stream_generator = self.stream_buffered_data(length_of_time_in_seconds, sample_rate_in_ms)
for point in data_stream_generator:
column_values = []
for (count, data) in enumerate(point):
if (count != 1):
column_values.append(str(data))
else:
column_values.append(datetime.strftime(data, '%m/%d/%Y'))
column_values.append(datetime.strftime(data, '%H:%M:%S.%f'))
file.write((','.join(column_values) + '\n')) | Creates or appends a CSV file with the buffered data and excel-friendly timestamps.
Args:
length_of_time_in_seconds (float):
The period of time over which to collect the data.
sample_rate_in_ms (int):
The averaging window (sampling period) of the instrument.
file (file_object):
Field measurement data will be written to this file object in a CSV format. | lakeshore/teslameter.py | log_buffered_data_to_file | lakeshorecryotronics/python-driver | 6 | python | @requires_firmware_version('1.1.2018091003')
def log_buffered_data_to_file(self, length_of_time_in_seconds, sample_rate_in_ms, file):
'Creates or appends a CSV file with the buffered data and excel-friendly timestamps.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to collect the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n file (file_object):\n Field measurement data will be written to this file object in a CSV format.\n '
file.write(('time elapsed,date,time,' + 'magnitude,x,y,z,field control set point,input state\n'))
data_stream_generator = self.stream_buffered_data(length_of_time_in_seconds, sample_rate_in_ms)
for point in data_stream_generator:
column_values = []
for (count, data) in enumerate(point):
if (count != 1):
column_values.append(str(data))
else:
column_values.append(datetime.strftime(data, '%m/%d/%Y'))
column_values.append(datetime.strftime(data, '%H:%M:%S.%f'))
file.write((','.join(column_values) + '\n')) | @requires_firmware_version('1.1.2018091003')
def log_buffered_data_to_file(self, length_of_time_in_seconds, sample_rate_in_ms, file):
'Creates or appends a CSV file with the buffered data and excel-friendly timestamps.\n\n Args:\n length_of_time_in_seconds (float):\n The period of time over which to collect the data.\n\n sample_rate_in_ms (int):\n The averaging window (sampling period) of the instrument.\n\n file (file_object):\n Field measurement data will be written to this file object in a CSV format.\n '
file.write(('time elapsed,date,time,' + 'magnitude,x,y,z,field control set point,input state\n'))
data_stream_generator = self.stream_buffered_data(length_of_time_in_seconds, sample_rate_in_ms)
for point in data_stream_generator:
column_values = []
for (count, data) in enumerate(point):
if (count != 1):
column_values.append(str(data))
else:
column_values.append(datetime.strftime(data, '%m/%d/%Y'))
column_values.append(datetime.strftime(data, '%H:%M:%S.%f'))
file.write((','.join(column_values) + '\n'))<|docstring|>Creates or appends a CSV file with the buffered data and excel-friendly timestamps.
Args:
length_of_time_in_seconds (float):
The period of time over which to collect the data.
sample_rate_in_ms (int):
The averaging window (sampling period) of the instrument.
file (file_object):
Field measurement data will be written to this file object in a CSV format.<|endoftext|> |
e69100cfd3593ead9d8f1b3b10286be69e7002f2a80cce60836e44c99023b084 | def get_dc_field(self):
'Returns the DC field reading.'
return float(self.query('FETCH:DC?')) | Returns the DC field reading. | lakeshore/teslameter.py | get_dc_field | lakeshorecryotronics/python-driver | 6 | python | def get_dc_field(self):
return float(self.query('FETCH:DC?')) | def get_dc_field(self):
return float(self.query('FETCH:DC?'))<|docstring|>Returns the DC field reading.<|endoftext|> |
dcc66b51cfd15669229d5d88729baabcf2c5ed8884fc6a0a6f4339705ae135a9 | def get_dc_field_xyz(self):
'Returns the DC field reading.'
response = self.query('FETCH:DC? ALL')
xyz_values = [float(channel_value) for channel_value in response.split(',')]
return tuple(xyz_values) | Returns the DC field reading. | lakeshore/teslameter.py | get_dc_field_xyz | lakeshorecryotronics/python-driver | 6 | python | def get_dc_field_xyz(self):
response = self.query('FETCH:DC? ALL')
xyz_values = [float(channel_value) for channel_value in response.split(',')]
return tuple(xyz_values) | def get_dc_field_xyz(self):
response = self.query('FETCH:DC? ALL')
xyz_values = [float(channel_value) for channel_value in response.split(',')]
return tuple(xyz_values)<|docstring|>Returns the DC field reading.<|endoftext|> |
cb305ad020c077dcc6c9f3e72d754bea649aad175d5aec18e65010d9cba558e2 | def get_rms_field(self):
'Returns the RMS field reading.'
return float(self.query('FETCH:RMS?')) | Returns the RMS field reading. | lakeshore/teslameter.py | get_rms_field | lakeshorecryotronics/python-driver | 6 | python | def get_rms_field(self):
return float(self.query('FETCH:RMS?')) | def get_rms_field(self):
return float(self.query('FETCH:RMS?'))<|docstring|>Returns the RMS field reading.<|endoftext|> |
7cd45895b12175cea617160788f46bded42b78e274b8f18a4a8304953e12b024 | def get_rms_field_xyz(self):
'Returns the RMS field reading.'
response = self.query('FETCH:RMS? ALL')
xyz_values = [float(channel_value) for channel_value in response.split(',')]
return tuple(xyz_values) | Returns the RMS field reading. | lakeshore/teslameter.py | get_rms_field_xyz | lakeshorecryotronics/python-driver | 6 | python | def get_rms_field_xyz(self):
response = self.query('FETCH:RMS? ALL')
xyz_values = [float(channel_value) for channel_value in response.split(',')]
return tuple(xyz_values) | def get_rms_field_xyz(self):
response = self.query('FETCH:RMS? ALL')
xyz_values = [float(channel_value) for channel_value in response.split(',')]
return tuple(xyz_values)<|docstring|>Returns the RMS field reading.<|endoftext|> |
119d20a547aca8ebd9a7f92c851872e665ce59b1ea8a96f6073d392082a9b052 | def get_frequency(self):
'Returns the field frequency reading.'
return float(self.query('FETCH:FREQ?')) | Returns the field frequency reading. | lakeshore/teslameter.py | get_frequency | lakeshorecryotronics/python-driver | 6 | python | def get_frequency(self):
return float(self.query('FETCH:FREQ?')) | def get_frequency(self):
return float(self.query('FETCH:FREQ?'))<|docstring|>Returns the field frequency reading.<|endoftext|> |
cf984036475d450856b151d987fc98556f027da07126d49d5d43095c92bc7fb2 | def get_max_min(self):
'Returns the maximum and minimum field readings respectively.'
response = self.query('FETCH:MAX?', 'FETCH:MIN?')
separated_response = response.split(';')
return (float(separated_response[0]), float(separated_response[1])) | Returns the maximum and minimum field readings respectively. | lakeshore/teslameter.py | get_max_min | lakeshorecryotronics/python-driver | 6 | python | def get_max_min(self):
response = self.query('FETCH:MAX?', 'FETCH:MIN?')
separated_response = response.split(';')
return (float(separated_response[0]), float(separated_response[1])) | def get_max_min(self):
response = self.query('FETCH:MAX?', 'FETCH:MIN?')
separated_response = response.split(';')
return (float(separated_response[0]), float(separated_response[1]))<|docstring|>Returns the maximum and minimum field readings respectively.<|endoftext|> |
c41ba718497600a65eba7ab2749b4afa3bad42f6e39c9abcd991ee52361078ea | def get_max_min_peaks(self):
'Returns the maximum and minimum peak field readings respectively.'
response = self.query('FETCH:MAXP?', 'FETCH:MINP?')
separated_response = response.split(';')
return (float(separated_response[0]), float(separated_response[1])) | Returns the maximum and minimum peak field readings respectively. | lakeshore/teslameter.py | get_max_min_peaks | lakeshorecryotronics/python-driver | 6 | python | def get_max_min_peaks(self):
response = self.query('FETCH:MAXP?', 'FETCH:MINP?')
separated_response = response.split(';')
return (float(separated_response[0]), float(separated_response[1])) | def get_max_min_peaks(self):
response = self.query('FETCH:MAXP?', 'FETCH:MINP?')
separated_response = response.split(';')
return (float(separated_response[0]), float(separated_response[1]))<|docstring|>Returns the maximum and minimum peak field readings respectively.<|endoftext|> |
a7f8fc3c544ab1b67905628dd9046f95af411470f783c63ae04c2c64deb30c78 | def reset_max_min(self):
'Resets the maximum and minimum field readings to the present field reading.'
self.command('SENS:MRESET') | Resets the maximum and minimum field readings to the present field reading. | lakeshore/teslameter.py | reset_max_min | lakeshorecryotronics/python-driver | 6 | python | def reset_max_min(self):
self.command('SENS:MRESET') | def reset_max_min(self):
self.command('SENS:MRESET')<|docstring|>Resets the maximum and minimum field readings to the present field reading.<|endoftext|> |
6f80161da1460a71443b67a410fd1ccd8931e5f18b313729e729c7528e93a537 | def get_temperature(self):
'Returns the temperature reading.'
return float(self.query('FETCH:TEMP?')) | Returns the temperature reading. | lakeshore/teslameter.py | get_temperature | lakeshorecryotronics/python-driver | 6 | python | def get_temperature(self):
return float(self.query('FETCH:TEMP?')) | def get_temperature(self):
return float(self.query('FETCH:TEMP?'))<|docstring|>Returns the temperature reading.<|endoftext|> |
62c81882f65f7a111d113f1f1e945216c82d69712cfb19632332626740d655de | def get_probe_information(self):
'Returns a dictionary of probe data.'
probe_data = {'model_number': self.query('PROBE:MODEL?'), 'serial_number': self.query('PROBE:SNUM?'), 'probe_type': self.query('PROBE:PTYPE?'), 'sensor_type': self.query('PROBE:STYPE?'), 'sensor_orientation': self.query('PROBE:SOR?'), 'number_of_axes': self.query('PROBE:AXES?'), 'calibration_date': self.query('PROBE:CALDATE?')}
return probe_data | Returns a dictionary of probe data. | lakeshore/teslameter.py | get_probe_information | lakeshorecryotronics/python-driver | 6 | python | def get_probe_information(self):
probe_data = {'model_number': self.query('PROBE:MODEL?'), 'serial_number': self.query('PROBE:SNUM?'), 'probe_type': self.query('PROBE:PTYPE?'), 'sensor_type': self.query('PROBE:STYPE?'), 'sensor_orientation': self.query('PROBE:SOR?'), 'number_of_axes': self.query('PROBE:AXES?'), 'calibration_date': self.query('PROBE:CALDATE?')}
return probe_data | def get_probe_information(self):
probe_data = {'model_number': self.query('PROBE:MODEL?'), 'serial_number': self.query('PROBE:SNUM?'), 'probe_type': self.query('PROBE:PTYPE?'), 'sensor_type': self.query('PROBE:STYPE?'), 'sensor_orientation': self.query('PROBE:SOR?'), 'number_of_axes': self.query('PROBE:AXES?'), 'calibration_date': self.query('PROBE:CALDATE?')}
return probe_data<|docstring|>Returns a dictionary of probe data.<|endoftext|> |
97eaf0d6bb5ee0cca42f1c0192012445dc4ccdf5b196177292b51c83d3a2edcf | def get_relative_field(self):
'Returns the relative field value.'
return float(self.query('FETCH:RELATIVE?')) | Returns the relative field value. | lakeshore/teslameter.py | get_relative_field | lakeshorecryotronics/python-driver | 6 | python | def get_relative_field(self):
return float(self.query('FETCH:RELATIVE?')) | def get_relative_field(self):
return float(self.query('FETCH:RELATIVE?'))<|docstring|>Returns the relative field value.<|endoftext|> |
313a2b1f6309b988839e622ce76f561f43e092c548ca160f18280d181b09521d | def tare_relative_field(self):
'Copies the current field reading to the relative baseline value.'
self.command('SENS:RELATIVE:TARE') | Copies the current field reading to the relative baseline value. | lakeshore/teslameter.py | tare_relative_field | lakeshorecryotronics/python-driver | 6 | python | def tare_relative_field(self):
self.command('SENS:RELATIVE:TARE') | def tare_relative_field(self):
self.command('SENS:RELATIVE:TARE')<|docstring|>Copies the current field reading to the relative baseline value.<|endoftext|> |
cec49488708dc4c852727b47f4294a4e71ee40cebf437fb5fa0a7479083cf503 | def get_relative_field_baseline(self):
'Returns the relative field baseline value.'
return float(self.query('SENS:RELATIVE:BASELINE?')) | Returns the relative field baseline value. | lakeshore/teslameter.py | get_relative_field_baseline | lakeshorecryotronics/python-driver | 6 | python | def get_relative_field_baseline(self):
return float(self.query('SENS:RELATIVE:BASELINE?')) | def get_relative_field_baseline(self):
return float(self.query('SENS:RELATIVE:BASELINE?'))<|docstring|>Returns the relative field baseline value.<|endoftext|> |
f8996af074d3acd0a1f0c965b81075b80a8b5bd4281b9622af3758486ee5175a | def set_relative_field_baseline(self, baseline_field):
'Configures the relative baseline value.\n\n Args:\n baseline_field (float):\n A field units value that will act as the zero field for the relative measurement.\n '
self.command(('SENS:RELATIVE:BASELINE ' + str(baseline_field))) | Configures the relative baseline value.
Args:
baseline_field (float):
A field units value that will act as the zero field for the relative measurement. | lakeshore/teslameter.py | set_relative_field_baseline | lakeshorecryotronics/python-driver | 6 | python | def set_relative_field_baseline(self, baseline_field):
'Configures the relative baseline value.\n\n Args:\n baseline_field (float):\n A field units value that will act as the zero field for the relative measurement.\n '
self.command(('SENS:RELATIVE:BASELINE ' + str(baseline_field))) | def set_relative_field_baseline(self, baseline_field):
'Configures the relative baseline value.\n\n Args:\n baseline_field (float):\n A field units value that will act as the zero field for the relative measurement.\n '
self.command(('SENS:RELATIVE:BASELINE ' + str(baseline_field)))<|docstring|>Configures the relative baseline value.
Args:
baseline_field (float):
A field units value that will act as the zero field for the relative measurement.<|endoftext|> |
311e8b901b24ff74a587eeec1c71c53b46a2fa2d2411de5dbcf2351b325b8802 | def configure_field_measurement_setup(self, mode='DC', autorange=True, expected_field=None, averaging_samples=20):
'Configures the field measurement settings.\n\n Args:\n mode (str):\n * Modes are as follows:\n * "DC"\n * "AC" (0.1 - 500 Hz)\n * "HIFR" (50 Hz - 100 kHz)\n\n autorange (bool):\n Chooses whether the instrument automatically selects the best range for the measured value\n\n expected_field (float):\n When autorange is False, the expected_field is the largest field expected to be measured.\n It sets the lowest instrument field range capable of measuring the value.\n\n averaging_samples (int):\n The number of field samples to average. Each sample is 10 milliseconds of field information.\n\n '
self.command(('SENS:MODE ' + mode))
self.command(('SENS:RANGE:AUTO ' + str(int(autorange))))
if (expected_field is not None):
self.command(('SENS:RANGE ' + str(expected_field)))
self.command(('SENS:AVERAGE:COUNT ' + str(averaging_samples))) | Configures the field measurement settings.
Args:
mode (str):
* Modes are as follows:
* "DC"
* "AC" (0.1 - 500 Hz)
* "HIFR" (50 Hz - 100 kHz)
autorange (bool):
Chooses whether the instrument automatically selects the best range for the measured value
expected_field (float):
When autorange is False, the expected_field is the largest field expected to be measured.
It sets the lowest instrument field range capable of measuring the value.
averaging_samples (int):
The number of field samples to average. Each sample is 10 milliseconds of field information. | lakeshore/teslameter.py | configure_field_measurement_setup | lakeshorecryotronics/python-driver | 6 | python | def configure_field_measurement_setup(self, mode='DC', autorange=True, expected_field=None, averaging_samples=20):
'Configures the field measurement settings.\n\n Args:\n mode (str):\n * Modes are as follows:\n * "DC"\n * "AC" (0.1 - 500 Hz)\n * "HIFR" (50 Hz - 100 kHz)\n\n autorange (bool):\n Chooses whether the instrument automatically selects the best range for the measured value\n\n expected_field (float):\n When autorange is False, the expected_field is the largest field expected to be measured.\n It sets the lowest instrument field range capable of measuring the value.\n\n averaging_samples (int):\n The number of field samples to average. Each sample is 10 milliseconds of field information.\n\n '
self.command(('SENS:MODE ' + mode))
self.command(('SENS:RANGE:AUTO ' + str(int(autorange))))
if (expected_field is not None):
self.command(('SENS:RANGE ' + str(expected_field)))
self.command(('SENS:AVERAGE:COUNT ' + str(averaging_samples))) | def configure_field_measurement_setup(self, mode='DC', autorange=True, expected_field=None, averaging_samples=20):
'Configures the field measurement settings.\n\n Args:\n mode (str):\n * Modes are as follows:\n * "DC"\n * "AC" (0.1 - 500 Hz)\n * "HIFR" (50 Hz - 100 kHz)\n\n autorange (bool):\n Chooses whether the instrument automatically selects the best range for the measured value\n\n expected_field (float):\n When autorange is False, the expected_field is the largest field expected to be measured.\n It sets the lowest instrument field range capable of measuring the value.\n\n averaging_samples (int):\n The number of field samples to average. Each sample is 10 milliseconds of field information.\n\n '
self.command(('SENS:MODE ' + mode))
self.command(('SENS:RANGE:AUTO ' + str(int(autorange))))
if (expected_field is not None):
self.command(('SENS:RANGE ' + str(expected_field)))
self.command(('SENS:AVERAGE:COUNT ' + str(averaging_samples)))<|docstring|>Configures the field measurement settings.
Args:
mode (str):
* Modes are as follows:
* "DC"
* "AC" (0.1 - 500 Hz)
* "HIFR" (50 Hz - 100 kHz)
autorange (bool):
Chooses whether the instrument automatically selects the best range for the measured value
expected_field (float):
When autorange is False, the expected_field is the largest field expected to be measured.
It sets the lowest instrument field range capable of measuring the value.
averaging_samples (int):
The number of field samples to average. Each sample is 10 milliseconds of field information.<|endoftext|> |
ac14cac3a21826af111c7405bb27d870d116c954fff1e8d8964d6a57763ffeb3 | def get_field_measurement_setup(self):
'Returns the mode, autoranging state, range, and number of averaging samples as a dictionary.'
measurement_setup = {'mode': self.query('SENS:MODE?'), 'autorange': bool(int(self.query('SENS:RANGE:AUTO?'))), 'expected_field': float(self.query('SENS:RANGE?')), 'averaging_samples': int(self.query('SENS:AVERAGE:COUNT?'))}
return measurement_setup | Returns the mode, autoranging state, range, and number of averaging samples as a dictionary. | lakeshore/teslameter.py | get_field_measurement_setup | lakeshorecryotronics/python-driver | 6 | python | def get_field_measurement_setup(self):
measurement_setup = {'mode': self.query('SENS:MODE?'), 'autorange': bool(int(self.query('SENS:RANGE:AUTO?'))), 'expected_field': float(self.query('SENS:RANGE?')), 'averaging_samples': int(self.query('SENS:AVERAGE:COUNT?'))}
return measurement_setup | def get_field_measurement_setup(self):
measurement_setup = {'mode': self.query('SENS:MODE?'), 'autorange': bool(int(self.query('SENS:RANGE:AUTO?'))), 'expected_field': float(self.query('SENS:RANGE?')), 'averaging_samples': int(self.query('SENS:AVERAGE:COUNT?'))}
return measurement_setup<|docstring|>Returns the mode, autoranging state, range, and number of averaging samples as a dictionary.<|endoftext|> |
4228d30ae4ff974487d3067302e5914d715de31186e2892aed2df7a3a56d0e23 | def configure_temperature_compensation(self, temperature_source='PROBE', manual_temperature=None):
'Configures how temperature compensation is applied to the field readings.\n\n Args:\n temperature_source (str):\n * Determines where the temperature measurement is drawn from. Options are:\n * "PROBE" (Compensation is based on measurement of a thermistor in the probe)\n * "MTEM" (Compensation is based on a manual temperature value provided by the user)\n * "NONE" (Temperature compensation is not applied)\n\n manual_temperature (float):\n Sets the temperature provided by the user for MTEMP (manual temperature) source in Celsius.\n\n '
self.command(('SENS:TCOM:TSOURCE ' + temperature_source))
if (manual_temperature is not None):
self.command(('SENS:TCOM:MTEM ' + str(manual_temperature))) | Configures how temperature compensation is applied to the field readings.
Args:
temperature_source (str):
* Determines where the temperature measurement is drawn from. Options are:
* "PROBE" (Compensation is based on measurement of a thermistor in the probe)
* "MTEM" (Compensation is based on a manual temperature value provided by the user)
* "NONE" (Temperature compensation is not applied)
manual_temperature (float):
Sets the temperature provided by the user for MTEMP (manual temperature) source in Celsius. | lakeshore/teslameter.py | configure_temperature_compensation | lakeshorecryotronics/python-driver | 6 | python | def configure_temperature_compensation(self, temperature_source='PROBE', manual_temperature=None):
'Configures how temperature compensation is applied to the field readings.\n\n Args:\n temperature_source (str):\n * Determines where the temperature measurement is drawn from. Options are:\n * "PROBE" (Compensation is based on measurement of a thermistor in the probe)\n * "MTEM" (Compensation is based on a manual temperature value provided by the user)\n * "NONE" (Temperature compensation is not applied)\n\n manual_temperature (float):\n Sets the temperature provided by the user for MTEMP (manual temperature) source in Celsius.\n\n '
self.command(('SENS:TCOM:TSOURCE ' + temperature_source))
if (manual_temperature is not None):
self.command(('SENS:TCOM:MTEM ' + str(manual_temperature))) | def configure_temperature_compensation(self, temperature_source='PROBE', manual_temperature=None):
'Configures how temperature compensation is applied to the field readings.\n\n Args:\n temperature_source (str):\n * Determines where the temperature measurement is drawn from. Options are:\n * "PROBE" (Compensation is based on measurement of a thermistor in the probe)\n * "MTEM" (Compensation is based on a manual temperature value provided by the user)\n * "NONE" (Temperature compensation is not applied)\n\n manual_temperature (float):\n Sets the temperature provided by the user for MTEMP (manual temperature) source in Celsius.\n\n '
self.command(('SENS:TCOM:TSOURCE ' + temperature_source))
if (manual_temperature is not None):
self.command(('SENS:TCOM:MTEM ' + str(manual_temperature)))<|docstring|>Configures how temperature compensation is applied to the field readings.
Args:
temperature_source (str):
* Determines where the temperature measurement is drawn from. Options are:
* "PROBE" (Compensation is based on measurement of a thermistor in the probe)
* "MTEM" (Compensation is based on a manual temperature value provided by the user)
* "NONE" (Temperature compensation is not applied)
manual_temperature (float):
Sets the temperature provided by the user for MTEMP (manual temperature) source in Celsius.<|endoftext|> |
8fdbda9cd4ca679d80a2b60ac160257494fdd5f37580b15b0306e6734aed3499 | def get_temperature_compensation_source(self):
'Returns the source of temperature measurement for field compensation.'
return self.query('SENS:TCOM:TSOURCE?') | Returns the source of temperature measurement for field compensation. | lakeshore/teslameter.py | get_temperature_compensation_source | lakeshorecryotronics/python-driver | 6 | python | def get_temperature_compensation_source(self):
return self.query('SENS:TCOM:TSOURCE?') | def get_temperature_compensation_source(self):
return self.query('SENS:TCOM:TSOURCE?')<|docstring|>Returns the source of temperature measurement for field compensation.<|endoftext|> |
8d689018842d569ad8dc06f9e647d7f115c466118736434c0b91be7605787f05 | def get_temperature_compensation_manual_temp(self):
'Returns the manual temperature setting value in Celsius.'
return float(self.query('SENS:TCOM:MTEM?')) | Returns the manual temperature setting value in Celsius. | lakeshore/teslameter.py | get_temperature_compensation_manual_temp | lakeshorecryotronics/python-driver | 6 | python | def get_temperature_compensation_manual_temp(self):
return float(self.query('SENS:TCOM:MTEM?')) | def get_temperature_compensation_manual_temp(self):
return float(self.query('SENS:TCOM:MTEM?'))<|docstring|>Returns the manual temperature setting value in Celsius.<|endoftext|> |
73eeb25aeca1fc99cd025b03eea14c67700e655d07892e0da180691150eb25cf | def configure_field_units(self, units='TESLA'):
'Configures the field measurement units of the instrument.\n\n Args:\n units (str):\n * A unit of magnetic field. Options are:\n * "TESLA"\n * "GAUSS"\n\n '
self.command(('UNIT:FIELD ' + units)) | Configures the field measurement units of the instrument.
Args:
units (str):
* A unit of magnetic field. Options are:
* "TESLA"
* "GAUSS" | lakeshore/teslameter.py | configure_field_units | lakeshorecryotronics/python-driver | 6 | python | def configure_field_units(self, units='TESLA'):
'Configures the field measurement units of the instrument.\n\n Args:\n units (str):\n * A unit of magnetic field. Options are:\n * "TESLA"\n * "GAUSS"\n\n '
self.command(('UNIT:FIELD ' + units)) | def configure_field_units(self, units='TESLA'):
'Configures the field measurement units of the instrument.\n\n Args:\n units (str):\n * A unit of magnetic field. Options are:\n * "TESLA"\n * "GAUSS"\n\n '
self.command(('UNIT:FIELD ' + units))<|docstring|>Configures the field measurement units of the instrument.
Args:
units (str):
* A unit of magnetic field. Options are:
* "TESLA"
* "GAUSS"<|endoftext|> |
fa9e5853c7d0f190e2d781cdcd9468328b507b5b17acd63ec0079104a042c29c | def get_field_units(self):
'Returns the magnetic field units of the instrument.'
return self.query('UNIT:FIELD?') | Returns the magnetic field units of the instrument. | lakeshore/teslameter.py | get_field_units | lakeshorecryotronics/python-driver | 6 | python | def get_field_units(self):
return self.query('UNIT:FIELD?') | def get_field_units(self):
return self.query('UNIT:FIELD?')<|docstring|>Returns the magnetic field units of the instrument.<|endoftext|> |
79736d8eb683705d1923763d96eb566d57bb155a19db3adb9be9503cb34e4f49 | @requires_firmware_version('1.1.2018091003')
def configure_field_control_limits(self, voltage_limit=10.0, slew_rate_limit=10.0):
'Configures the limits of the field control output.\n\n Args:\n voltage_limit (float):\n The maximum voltage permitted at the field control output. Must be between 0 and 10V.\n\n slew_rate_limit (float):\n The maximum rate of change of the field control output voltage in volts per second.\n\n '
self.command(('SOURCE:FIELD:VLIMIT ' + str(voltage_limit)))
self.command(('SOURCE:FIELD:SLEW ' + str(slew_rate_limit))) | Configures the limits of the field control output.
Args:
voltage_limit (float):
The maximum voltage permitted at the field control output. Must be between 0 and 10V.
slew_rate_limit (float):
The maximum rate of change of the field control output voltage in volts per second. | lakeshore/teslameter.py | configure_field_control_limits | lakeshorecryotronics/python-driver | 6 | python | @requires_firmware_version('1.1.2018091003')
def configure_field_control_limits(self, voltage_limit=10.0, slew_rate_limit=10.0):
'Configures the limits of the field control output.\n\n Args:\n voltage_limit (float):\n The maximum voltage permitted at the field control output. Must be between 0 and 10V.\n\n slew_rate_limit (float):\n The maximum rate of change of the field control output voltage in volts per second.\n\n '
self.command(('SOURCE:FIELD:VLIMIT ' + str(voltage_limit)))
self.command(('SOURCE:FIELD:SLEW ' + str(slew_rate_limit))) | @requires_firmware_version('1.1.2018091003')
def configure_field_control_limits(self, voltage_limit=10.0, slew_rate_limit=10.0):
'Configures the limits of the field control output.\n\n Args:\n voltage_limit (float):\n The maximum voltage permitted at the field control output. Must be between 0 and 10V.\n\n slew_rate_limit (float):\n The maximum rate of change of the field control output voltage in volts per second.\n\n '
self.command(('SOURCE:FIELD:VLIMIT ' + str(voltage_limit)))
self.command(('SOURCE:FIELD:SLEW ' + str(slew_rate_limit)))<|docstring|>Configures the limits of the field control output.
Args:
voltage_limit (float):
The maximum voltage permitted at the field control output. Must be between 0 and 10V.
slew_rate_limit (float):
The maximum rate of change of the field control output voltage in volts per second.<|endoftext|> |
5e579168c57f223e508d21f2ef5e6eef41e8f52107407a9a04ffac07b3778585 | @requires_firmware_version('1.1.2018091003')
def get_field_control_limits(self):
'Returns the field control output voltage limit and slew rate limit.'
limits = {'voltage_limit': float(self.query('SOURCE:FIELD:VLIMIT?')), 'slew_rate_limit': float(self.query('SOURCE:FIELD:SLEW?'))}
return limits | Returns the field control output voltage limit and slew rate limit. | lakeshore/teslameter.py | get_field_control_limits | lakeshorecryotronics/python-driver | 6 | python | @requires_firmware_version('1.1.2018091003')
def get_field_control_limits(self):
limits = {'voltage_limit': float(self.query('SOURCE:FIELD:VLIMIT?')), 'slew_rate_limit': float(self.query('SOURCE:FIELD:SLEW?'))}
return limits | @requires_firmware_version('1.1.2018091003')
def get_field_control_limits(self):
limits = {'voltage_limit': float(self.query('SOURCE:FIELD:VLIMIT?')), 'slew_rate_limit': float(self.query('SOURCE:FIELD:SLEW?'))}
return limits<|docstring|>Returns the field control output voltage limit and slew rate limit.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.