docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Authenticate to Google with the provided credentials.
Args:
email (str): The account to use.
password (str): The account password.
android_id (str): An identifier for this client.
Raises:
LoginException: If there was a problem logging in.
|
def login(self, email, password, android_id):
self._email = email
self._android_id = android_id
res = gpsoauth.perform_master_login(self._email, password, self._android_id)
if 'Token' not in res:
raise exception.LoginException(res.get('Error'), res.get('ErrorDetail'))
self._master_token = res['Token']
self.refresh()
return True
| 323,046
|
Authenticate to Google with the provided master token.
Args:
email (str): The account to use.
master_token (str): The master token.
android_id (str): An identifier for this client.
Raises:
LoginException: If there was a problem logging in.
|
def load(self, email, master_token, android_id):
self._email = email
self._android_id = android_id
self._master_token = master_token
self.refresh()
return True
| 323,047
|
Send an authenticated request to a Google API.
Automatically retries if the access token has expired.
Args:
**req_kwargs: Arbitrary keyword arguments to pass to Requests.
Return:
dict: The parsed JSON response.
Raises:
APIException: If the server returns an error.
LoginException: If :py:meth:`login` has not been called.
|
def send(self, **req_kwargs):
i = 0
while True:
response = self._send(**req_kwargs).json()
if 'error' not in response:
break
error = response['error']
if error['code'] != 401:
raise exception.APIException(error['code'], error)
if i >= self.RETRY_CNT:
raise exception.APIException(error['code'], error)
logger.info('Refreshing access token')
self._auth.refresh()
i += 1
return response
| 323,051
|
Send an authenticated request to a Google API.
Args:
**req_kwargs: Arbitrary keyword arguments to pass to Requests.
Return:
requests.Response: The raw response.
Raises:
LoginException: If :py:meth:`login` has not been called.
|
def _send(self, **req_kwargs):
auth_token = self._auth.getAuthToken()
if auth_token is None:
raise exception.LoginException('Not logged in')
req_kwargs.setdefault('headers', {
'Authorization': 'OAuth ' + auth_token
})
return self._session.request(**req_kwargs)
| 323,052
|
Sync up (and down) all changes.
Args:
target_version (str): The local change version.
nodes (List[dict]): A list of nodes to sync up to the server.
labels (List[dict]): A list of labels to sync up to the server.
Return:
dict: Description of all changes.
Raises:
APIException: If the server returns an error.
|
def changes(self, target_version=None, nodes=None, labels=None):
if nodes is None:
nodes = []
if labels is None:
labels = []
current_time = time.time()
params = {
'nodes': nodes,
'clientTimestamp': _node.NodeTimestamps.int_to_str(current_time),
'requestHeader': {
'clientSessionId': self._session_id,
'clientPlatform': 'ANDROID',
'clientVersion': {
'major': '9',
'minor': '9',
'build': '9',
'revision': '9'
},
'capabilities': [
{'type': 'NC'}, # Color support (Send note color)
{'type': 'PI'}, # Pinned support (Send note pinned)
{'type': 'LB'}, # Labels support (Send note labels)
{'type': 'AN'}, # Annotations support (Send annotations)
{'type': 'SH'}, # Sharing support
{'type': 'DR'}, # Drawing support
{'type': 'TR'}, # Trash support (Stop setting the delete timestamp)
{'type': 'IN'}, # Indentation support (Send listitem parent)
{'type': 'SNB'}, # Allows modification of shared notes?
{'type': 'MI'}, # Concise blob info?
{'type': 'CO'}, # VSS_SUCCEEDED when off?
# TODO: Figure out what these do:
# {'type': 'EC'}, # ???
# {'type': 'RB'}, # Rollback?
# {'type': 'EX'}, # ???
]
},
}
if target_version is not None:
params['targetVersion'] = target_version
if labels:
params['userInfo'] = {
'labels': labels
}
logger.debug('Syncing %d labels and %d nodes', len(labels), len(nodes))
return self.send(
url=self._base_url + 'changes',
method='POST',
json=params
)
| 323,054
|
Get the canonical link to a media blob.
Args:
blob (gkeepapi.node.Blob): The blob.
Returns:
str: A link to the media.
|
def get(self, blob):
return self._send(
url=self._base_url + blob.parent.server_id + '/' + blob.server_id + '?s=0',
method='GET',
allow_redirects=False
).headers.get('Location')
| 323,056
|
Authenticate to Google with the provided credentials & sync.
Args:
email (str): The account to use.
password (str): The account password.
state (dict): Serialized state to load.
Raises:
LoginException: If there was a problem logging in.
|
def login(self, username, password, state=None, sync=True):
auth = APIAuth(self.OAUTH_SCOPES)
ret = auth.login(username, password, get_mac())
if ret:
self.load(auth, state, sync)
return ret
| 323,064
|
Authenticate to Google with the provided master token & sync.
Args:
email (str): The account to use.
master_token (str): The master token.
state (dict): Serialized state to load.
Raises:
LoginException: If there was a problem logging in.
|
def resume(self, email, master_token, state=None, sync=True):
auth = APIAuth(self.OAUTH_SCOPES)
ret = auth.load(email, master_token, android_id=get_mac())
if ret:
self.load(auth, state, sync)
return ret
| 323,065
|
Authenticate to Google with a prepared authentication object & sync.
Args:
auth (APIAuth): Authentication object.
state (dict): Serialized state to load.
Raises:
LoginException: If there was a problem logging in.
|
def load(self, auth, state=None, sync=True):
self._keep_api.setAuth(auth)
self._reminders_api.setAuth(auth)
self._media_api.setAuth(auth)
if state is not None:
self.restore(state)
if sync:
self.sync(True)
| 323,066
|
Serialize note data.
Args:
state (dict): Serialized state to load.
|
def dump(self):
# Find all nodes manually, as the Keep object isn't aware of new ListItems
# until they've been synced to the server.
nodes = []
for node in self.all():
nodes.append(node)
for child in node.children:
nodes.append(child)
return {
'keep_version': self._keep_version,
'labels': [label.save(False) for label in self.labels()],
'nodes': [node.save(False) for node in nodes]
}
| 323,067
|
Unserialize saved note data.
Args:
state (dict): Serialized state to load.
|
def restore(self, state):
self._clear()
self._parseUserInfo({'labels': state['labels']})
self._parseNodes(state['nodes'])
self._keep_version = state['keep_version']
| 323,068
|
Get a note with the given ID.
Args:
node_id (str): The note ID.
Returns:
gkeepapi.node.TopLevelNode: The Note or None if not found.
|
def get(self, node_id):
return \
self._nodes[_node.Root.ID].get(node_id) or \
self._nodes[_node.Root.ID].get(self._sid_map.get(node_id))
| 323,069
|
Register a top level node (and its children) for syncing up to the server. There's no need to call this for nodes created by
:py:meth:`createNote` or :py:meth:`createList` as they are automatically added.
LoginException: If :py:meth:`login` has not been called.
Args:
node (gkeepapi.node.Node): The node to sync.
Raises:
Invalid: If the parent node is not found.
|
def add(self, node):
if node.parent_id != _node.Root.ID:
raise exception.InvalidException('Not a top level node')
self._nodes[node.id] = node
self._nodes[node.parent_id].append(node, False)
| 323,070
|
Create a new managed note. Any changes to the note will be uploaded when :py:meth:`sync` is called.
Args:
title (str): The title of the note.
text (str): The text of the note.
Returns:
gkeepapi.node.List: The new note.
|
def createNote(self, title=None, text=None):
node = _node.Note()
if title is not None:
node.title = title
if text is not None:
node.text = text
self.add(node)
return node
| 323,072
|
Create a new list and populate it. Any changes to the note will be uploaded when :py:meth:`sync` is called.
Args:
title (str): The title of the list.
items (List[(str, bool)]): A list of tuples. Each tuple represents the text and checked status of the listitem.
Returns:
gkeepapi.node.List: The new list.
|
def createList(self, title=None, items=None):
if items is None:
items = []
node = _node.List()
if title is not None:
node.title = title
for text, checked in items:
node.add(text, checked)
self.add(node)
return node
| 323,073
|
Create a new label.
Args:
name (str): Label name.
Returns:
gkeepapi.node.Label: The new label.
Raises:
LabelException: If the label exists.
|
def createLabel(self, name):
if self.findLabel(name):
raise exception.LabelException('Label exists')
node = _node.Label()
node.name = name
self._labels[node.id] = node # pylint: disable=protected-access
return node
| 323,074
|
Find a label with the given name.
Args:
name (Union[_sre.SRE_Pattern, str]): A str or regular expression to match against the name.
create (bool): Whether to create the label if it doesn't exist (only if name is a str).
Returns:
Union[gkeepapi.node.Label, None]: The label.
|
def findLabel(self, query, create=False):
if isinstance(query, six.string_types):
query = query.lower()
for label in self._labels.values():
if (isinstance(query, six.string_types) and query == label.name.lower()) or \
(isinstance(query, Pattern) and query.search(label.name)):
return label
return self.createLabel(query) if create and isinstance(query, six.string_types) else None
| 323,075
|
Deletes a label.
Args:
label_id (str): Label id.
|
def deleteLabel(self, label_id):
if label_id not in self._labels:
return
label = self._labels[label_id]
label.delete()
for node in self.all():
node.labels.remove(label)
| 323,076
|
Sync the local Keep tree with the server. If resyncing, local changes will be detroyed. Otherwise, local changes to notes, labels and reminders will be detected and synced up.
Args:
resync (bool): Whether to resync data.
Raises:
SyncException: If there is a consistency issue.
|
def sync(self, resync=False):
if resync:
self._clear()
while True:
logger.debug('Starting reminder sync: %s', self._reminder_version)
changes = self._reminders_api.list()
if 'task' in changes:
self._parseTasks(changes['task'])
self._reminder_version = changes['storageVersion']
logger.debug('Finishing sync: %s', self._reminder_version)
history = self._reminders_api.history(self._reminder_version)
if self._reminder_version == history['highestStorageVersion']:
break
while True:
logger.debug('Starting keep sync: %s', self._keep_version)
labels_updated = any((i.dirty for i in self._labels.values()))
changes = self._keep_api.changes(
target_version=self._keep_version,
nodes=[i.save() for i in self._findDirtyNodes()],
labels=[i.save() for i in self._labels.values()] if labels_updated else None,
)
if changes.get('forceFullResync'):
raise exception.ResyncRequiredException('Full resync required')
if changes.get('upgradeRecommended'):
raise exception.UpgradeRecommendedException('Upgrade recommended')
if 'userInfo' in changes:
self._parseUserInfo(changes['userInfo'])
if 'nodes' in changes:
self._parseNodes(changes['nodes'])
self._keep_version = changes['toVersion']
logger.debug('Finishing sync: %s', self._keep_version)
if not changes['truncated']:
break
if _node.DEBUG:
self._clean()
| 323,077
|
Helper to construct a node from a dict.
Args:
raw (dict): Raw node representation.
Returns:
Node: A Node object or None.
|
def from_json(raw):
ncls = None
_type = raw.get('type')
try:
ncls = _type_map[NodeType(_type)]
except (KeyError, ValueError) as e:
logger.warning('Unknown node type: %s', _type)
if DEBUG:
raise_from(exception.ParseException('Parse error for %s' % (_type), raw), e)
return None
node = ncls()
node.load(raw)
return node
| 323,083
|
Unserialize from raw representation. (Wrapper)
Args:
raw (dict): Raw.
Raises:
ParseException: If there was an error parsing data.
|
def load(self, raw):
try:
self._load(raw)
except (KeyError, ValueError) as e:
raise_from(exception.ParseException('Parse error in %s' % (type(self)), raw), e)
| 323,085
|
Serialize into raw representation. Clears the dirty bit by default.
Args:
clean (bool): Whether to clear the dirty bit.
Returns:
dict: Raw.
|
def save(self, clean=True):
ret = {}
if clean:
self._dirty = False
else:
ret['_dirty'] = self._dirty
return ret
| 323,086
|
Helper to construct an annotation from a dict.
Args:
raw (dict): Raw annotation representation.
Returns:
Node: An Annotation object or None.
|
def from_json(cls, raw):
bcls = None
if 'webLink' in raw:
bcls = WebLink
elif 'topicCategory' in raw:
bcls = Category
elif 'taskAssist' in raw:
bcls = TaskAssist
elif 'context' in raw:
bcls = Context
if bcls is None:
logger.warning('Unknown annotation type: %s', raw.keys())
return None
annotation = bcls()
annotation.load(raw)
return annotation
| 323,101
|
Add an annotation.
Args:
annotation (gkeepapi.node.Annotation): An Annotation object.
Returns:
gkeepapi.node.Annotation: The Annotation.
|
def append(self, annotation):
self._annotations[annotation.id] = annotation
self._dirty = True
return annotation
| 323,107
|
Removes an annotation.
Args:
annotation (gkeepapi.node.Annotation): An Annotation object.
Returns:
gkeepapi.node.Annotation: The Annotation.
|
def remove(self, annotation):
if annotation.id in self._annotations:
del self._annotations[annotation.id]
self._dirty = True
| 323,108
|
Add a collaborator.
Args:
str : Collaborator email address.
|
def add(self, email):
if email not in self._collaborators:
self._collaborators[email] = ShareRequestValue.Add
self._dirty = True
| 323,118
|
Remove a Collaborator.
Args:
str : Collaborator email address.
|
def remove(self, email):
if email in self._collaborators:
if self._collaborators[email] == ShareRequestValue.Add:
del self._collaborators[email]
else:
self._collaborators[email] = ShareRequestValue.Remove
self._dirty = True
| 323,119
|
Add a label.
Args:
label (gkeepapi.node.Label): The Label object.
|
def add(self, label):
self._labels[label.id] = label
self._dirty = True
| 323,123
|
Remove a label.
Args:
label (gkeepapi.node.Label): The Label object.
|
def remove(self, label):
if label.id in self._labels:
self._labels[label.id] = None
self._dirty = True
| 323,124
|
Mark the node as dirty.
Args:
edited (bool): Whether to set the edited time.
|
def touch(self, edited=False):
self._dirty = True
dt = datetime.datetime.utcnow()
self.timestamps.updated = dt
if edited:
self.timestamps.edited = dt
| 323,125
|
Set the text value.
Args:
value (str): Text value.
|
def text(self, value):
self._text = value
self.timestamps.edited = datetime.datetime.utcnow()
self.touch(True)
| 323,131
|
Add a new child node.
Args:
node (gkeepapi.Node): Node to add.
dirty (bool): Whether this node should be marked dirty.
|
def append(self, node, dirty=True):
self._children[node.id] = node
node.parent = self
if dirty:
self.touch()
return node
| 323,133
|
Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty.
|
def remove(self, node, dirty=True):
if node.id in self._children:
self._children[node.id].parent = None
del self._children[node.id]
if dirty:
self.touch()
| 323,134
|
Add a new item to the list.
Args:
text (str): The text.
checked (bool): Whether this item is checked.
sort (int): Item id for sorting.
|
def add(self, text, checked=False, sort=None):
node = ListItem(parent_id=self.id, parent_server_id=self.server_id)
node.checked = checked
node.text = text
if sort is not None:
node.sort = sort
self.append(node, True)
self.touch(True)
return node
| 323,145
|
Sort list items, taking into account parent items.
Args:
items (list[gkeepapi.node.ListItem]): Items to sort.
Returns:
list[gkeepapi.node.ListItem]: Sorted items.
|
def items_sort(cls, items):
class t(tuple):
def __cmp__(self, other):
for a, b in six.moves.zip_longest(self, other):
if a != b:
if a is None:
return 1
if b is None:
return -1
return a - b
return 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __gt_(self, other):
return self.__cmp__(other) > 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __ge_(self, other):
return self.__cmp__(other) >= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def key_func(x):
if x.indented:
return t((int(x.parent_item.sort), int(x.sort)))
return t((int(x.sort), ))
return sorted(items, key=key_func, reverse=True)
| 323,146
|
Add a new sub item to the list. This item must already be attached to a list.
Args:
text (str): The text.
checked (bool): Whether this item is checked.
sort (int): Item id for sorting.
|
def add(self, text, checked=False, sort=None):
if self.parent is None:
raise exception.InvalidException('Item has no parent')
node = self.parent.add(text, checked, sort)
self.indent(node)
return node
| 323,152
|
Indent an item. Does nothing if the target has subitems.
Args:
node (gkeepapi.node.ListItem): Item to indent.
dirty (bool): Whether this node should be marked dirty.
|
def indent(self, node, dirty=True):
if node.subitems:
return
self._subitems[node.id] = node
node.super_list_item_id = self.id
node.parent_item = self
if dirty:
node.touch(True)
| 323,153
|
Dedent an item. Does nothing if the target is not indented under this item.
Args:
node (gkeepapi.node.ListItem): Item to dedent.
dirty (bool): Whether this node should be marked dirty.
|
def dedent(self, node, dirty=True):
if node.id not in self._subitems:
return
del self._subitems[node.id]
node.super_list_item_id = None
node.parent_item = None
if dirty:
node.touch(True)
| 323,154
|
Helper to construct a blob from a dict.
Args:
raw (dict): Raw blob representation.
Returns:
NodeBlob: A NodeBlob object or None.
|
def from_json(cls, raw):
if raw is None:
return None
bcls = None
_type = raw.get('type')
try:
bcls = cls._blob_type_map[BlobType(_type)]
except (KeyError, ValueError) as e:
logger.warning('Unknown blob type: %s', _type)
if DEBUG:
raise_from(exception.ParseException('Parse error for %s' % (_type), raw), e)
return None
blob = bcls()
blob.load(raw)
return blob
| 323,172
|
Form a complex number.
Arguments:
f -- comma delimited file w/ x,y coordinates
xs -- if f not specified this is a file w/ x coordinates
ys -- if f not specified this is a filew / y coordinates
size -- size of the plot
pch -- shape of the points (any character)
colour -- colour of the points
title -- title of the plot
|
def plot_scatter(f, xs, ys, size, pch, colour, title):
cs = None
if f:
if isinstance(f, str):
with open(f) as fh:
data = [tuple(line.strip().split(',')) for line in fh]
else:
data = [tuple(line.strip().split(',')) for line in f]
xs = [float(i[0]) for i in data]
ys = [float(i[1]) for i in data]
if len(data[0]) > 2:
cs = [i[2].strip() for i in data]
elif isinstance(xs, list) and isinstance(ys, list):
pass
else:
with open(xs) as fh:
xs = [float(str(row).strip()) for row in fh]
with open(ys) as fh:
ys = [float(str(row).strip()) for row in fh]
_plot_scatter(xs, ys, size, pch, colour, title, cs)
| 323,886
|
Computes Levenshtein Distance between two words
Args:
:param w1: str
:param w2: str
:return: int
Examples:
>>> Levenshtein.Levenshtein_Distance('noctis', 'noctem')
2
>>> Levenshtein.Levenshtein_Distance('nox', 'nochem')
4
>>> Levenshtein.Levenshtein_Distance('orbis', 'robis')
2
|
def Levenshtein_Distance(w1, w2):
m, n = len(w1), len(w2)
v1 = [i for i in range(n + 1)]
v2 = [0 for i in range(n + 1)]
for i in range(m):
v2[0] = i + 1
for j in range(n):
delCost = v1[j + 1] + 1
insCost = v2[j] + 1
subCost = v1[j]
if w1[i] != w2[j]: subCost += 1
v2[j + 1] = min(delCost, insCost, subCost)
v1, v2 = v2, v1
return v1[-1]
| 323,903
|
Set up a unicode character.
Arguments:
unicodeHexValue -- an integer that should correspond to a
Unicode code point.
block -- the CharacterBlock this character belongs to.
Raises:
ValueError -- if unicodeHexValue is not a valid code point.
|
def __init__(self, unicodeHexValue, block):
if unicodeHexValue < 0 or unicodeHexValue > 0x10FFFF:
raise ValueError("numeric value outside Unicode range")
self.unicodeHexValue = unicodeHexValue
self.unichr = py23char(self.unicodeHexValue)
self.name = unicodedata.name(self.unichr)
self.equivalents = {}
self._block = block
| 324,415
|
Initializes the data collection wrapper.
Args:
env: The environment to monitor.
directory: Where to store collected data.
collect_freq: How often to save simulation state, in terms of environment steps.
flush_freq: How frequently to dump data to disk, in terms of environment steps.
|
def __init__(self, env, directory, collect_freq=1, flush_freq=100):
super().__init__(env)
# the base directory for all logging
self.directory = directory
# in-memory cache for simulation states and action info
self.states = []
self.action_infos = [] # stores information about actions taken
# how often to save simulation state, in terms of environment steps
self.collect_freq = collect_freq
# how frequently to dump data to disk, in terms of environment steps
self.flush_freq = flush_freq
if not os.path.exists(directory):
print("DataCollectionWrapper: making new directory at {}".format(directory))
os.makedirs(directory)
# store logging directory for current episode
self.ep_directory = None
# remember whether any environment interaction has occurred
self.has_interaction = False
| 324,561
|
Loads a mujoco xml from file.
Args:
fname (str): path to the MJCF xml file.
|
def __init__(self, fname):
self.file = fname
self.folder = os.path.dirname(fname)
self.tree = ET.parse(fname)
self.root = self.tree.getroot()
self.name = self.root.get("model")
self.worldbody = self.create_default_element("worldbody")
self.actuator = self.create_default_element("actuator")
self.asset = self.create_default_element("asset")
self.equality = self.create_default_element("equality")
self.contact = self.create_default_element("contact")
self.default = self.create_default_element("default")
self.resolve_asset_dependency()
| 324,566
|
Default merge method.
Args:
other: another MujocoXML instance
raises XML error if @other is not a MujocoXML instance.
merges <worldbody/>, <actuator/> and <asset/> of @other into @self
merge_body: True if merging child bodies of @other. Defaults to True.
|
def merge(self, other, merge_body=True):
if not isinstance(other, MujocoXML):
raise XMLError("{} is not a MujocoXML instance.".format(type(other)))
if merge_body:
for body in other.worldbody:
self.worldbody.append(body)
self.merge_asset(other)
for one_actuator in other.actuator:
self.actuator.append(one_actuator)
for one_equality in other.equality:
self.equality.append(one_equality)
for one_contact in other.contact:
self.contact.append(one_contact)
for one_default in other.default:
self.default.append(one_default)
| 324,569
|
Saves the xml to file.
Args:
fname: output file location
pretty: attempts!! to pretty print the output
|
def save_model(self, fname, pretty=False):
with open(fname, "w") as f:
xml_str = ET.tostring(self.root, encoding="unicode")
if pretty:
# TODO: get a better pretty print library
parsed_xml = xml.dom.minidom.parseString(xml_str)
xml_str = parsed_xml.toprettyxml(newl="")
f.write(xml_str)
| 324,572
|
Initializes the Gym wrapper.
Args:
env (MujocoEnv instance): The environment to wrap.
keys (list of strings): If provided, each observation will
consist of concatenated keys from the wrapped environment's
observation dictionary. Defaults to robot-state and object-state.
|
def __init__(self, env, keys=None):
self.env = env
if keys is None:
assert self.env.use_object_obs, "Object observations need to be enabled."
keys = ["robot-state", "object-state"]
self.keys = keys
# set up observation and action spaces
flat_ob = self._flatten_obs(self.env.reset(), verbose=True)
self.obs_dim = flat_ob.size
high = np.inf * np.ones(self.obs_dim)
low = -high
self.observation_space = spaces.Box(low=low, high=high)
low, high = self.env.action_spec
self.action_space = spaces.Box(low=low, high=high)
| 324,574
|
Filters keys of interest out and concatenate the information.
Args:
obs_dict: ordered dictionary of observations
|
def _flatten_obs(self, obs_dict, verbose=False):
ob_lst = []
for key in obs_dict:
if key in self.keys:
if verbose:
print("adding key: {}".format(key))
ob_lst.append(obs_dict[key])
return np.concatenate(ob_lst)
| 324,575
|
Reward function for the task.
The dense reward has three components.
Reaching: in [0, 1], to encourage the arm to reach the cube
Grasping: in {0, 0.25}, non-zero if arm is grasping the cube
Lifting: in {0, 1}, non-zero if arm has lifted the cube
The sparse reward only consists of the lifting component.
Args:
action (np array): unused for this task
Returns:
reward (float): the reward
|
def reward(self, action=None):
reward = 0.
# sparse completion reward
if self._check_success():
reward = 1.0
# use a shaping reward
if self.reward_shaping:
# reaching reward
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dist = np.linalg.norm(gripper_site_pos - cube_pos)
reaching_reward = 1 - np.tanh(10.0 * dist)
reward += reaching_reward
# grasping reward
touch_left_finger = False
touch_right_finger = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if c.geom1 in self.l_finger_geom_ids and c.geom2 == self.cube_geom_id:
touch_left_finger = True
if c.geom1 == self.cube_geom_id and c.geom2 in self.l_finger_geom_ids:
touch_left_finger = True
if c.geom1 in self.r_finger_geom_ids and c.geom2 == self.cube_geom_id:
touch_right_finger = True
if c.geom1 == self.cube_geom_id and c.geom2 in self.r_finger_geom_ids:
touch_right_finger = True
if touch_left_finger and touch_right_finger:
reward += 0.25
return reward
| 324,596
|
Use the device (keyboard or SpaceNav 3D mouse) to collect a demonstration.
The rollout trajectory is saved to files in npz format.
Modify the DataCollectionWrapper wrapper to add new fields or change data formats.
Args:
env: environment to control
device (instance of Device class): to receive controls from the device
|
def collect_human_trajectory(env, device):
obs = env.reset()
# rotate the gripper so we can see it easily
env.set_robot_joint_positions([0, -1.18, 0.00, 2.18, 0.00, 0.57, 1.5708])
env.viewer.set_camera(camera_id=2)
env.render()
is_first = True
# episode terminates on a spacenav reset input or if task is completed
reset = False
task_completion_hold_count = -1 # counter to collect 10 timesteps after reaching goal
device.start_control()
while not reset:
state = device.get_controller_state()
dpos, rotation, grasp, reset = (
state["dpos"],
state["rotation"],
state["grasp"],
state["reset"],
)
# convert into a suitable end effector action for the environment
current = env._right_hand_orn
drotation = current.T.dot(rotation) # relative rotation of desired from current
dquat = T.mat2quat(drotation)
grasp = grasp - 1. # map 0 to -1 (open) and 1 to 0 (closed halfway)
action = np.concatenate([dpos, dquat, [grasp]])
obs, reward, done, info = env.step(action)
if is_first:
is_first = False
# We grab the initial model xml and state and reload from those so that
# we can support deterministic playback of actions from our demonstrations.
# This is necessary due to rounding issues with the model xml and with
# env.sim.forward(). We also have to do this after the first action is
# applied because the data collector wrapper only starts recording
# after the first action has been played.
initial_mjstate = env.sim.get_state().flatten()
xml_str = env.model.get_xml()
env.reset_from_xml_string(xml_str)
env.sim.reset()
env.sim.set_state_from_flattened(initial_mjstate)
env.sim.forward()
env.viewer.set_camera(camera_id=2)
env.render()
if task_completion_hold_count == 0:
break
# state machine to check for having a success for 10 consecutive timesteps
if env._check_success():
if task_completion_hold_count > 0:
task_completion_hold_count -= 1 # latched state, decrement count
else:
task_completion_hold_count = 10 # reset count on first success timestep
else:
task_completion_hold_count = -1 # null the counter if there's no success
# cleanup for end of data collection episodes
env.close()
| 324,599
|
Initialize a SpaceMouse handler.
Args:
vendor_id: HID device vendor id
product_id: HID device product id
Note:
Use hid.enumerate() to view all USB human interface devices (HID).
Make sure SpaceMouse is detected before running the script.
You can look up its vendor/product id from this method.
|
def __init__(self, vendor_id=9583, product_id=50735):
print("Opening SpaceMouse device")
self.device = hid.device()
self.device.open(vendor_id, product_id) # SpaceMouse
print("Manufacturer: %s" % self.device.get_manufacturer_string())
print("Product: %s" % self.device.get_product_string())
self._display_controls()
self.single_click_and_hold = False
self._control = [0., 0., 0., 0., 0., 0.]
self._reset_state = 0
self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
self._enabled = False
# launch a new listener thread to listen to SpaceMouse
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
| 324,626
|
Mounts gripper to arm.
Throws error if robot already has a gripper or gripper type is incorrect.
Args:
arm_name (str): name of arm mount
gripper (MujocoGripper instance): gripper MJCF model
|
def add_gripper(self, arm_name, gripper):
if arm_name in self.grippers:
raise ValueError("Attempts to add multiple grippers to one body")
arm_subtree = self.worldbody.find(".//body[@name='{}']".format(arm_name))
for actuator in gripper.actuator:
if actuator.get("name") is None:
raise XMLError("Actuator has no name")
if not actuator.get("name").startswith("gripper"):
raise XMLError(
"Actuator name {} does not have prefix 'gripper'".format(
actuator.get("name")
)
)
for body in gripper.worldbody:
arm_subtree.append(body)
self.merge(gripper, merge_body=False)
self.grippers[arm_name] = gripper
| 324,629
|
Genreator for grippers
Creates a Gripper instance with the provided name.
Args:
name: the name of the gripper class
Returns:
gripper: Gripper instance
Raises:
XMLError: [description]
|
def gripper_factory(name):
if name == "TwoFingerGripper":
return TwoFingerGripper()
if name == "LeftTwoFingerGripper":
return LeftTwoFingerGripper()
if name == "PR2Gripper":
return PR2Gripper()
if name == "RobotiqGripper":
return RobotiqGripper()
if name == "PushingGripper":
return PushingGripper()
if name == "RobotiqThreeFingerGripper":
return RobotiqThreeFingerGripper()
raise ValueError("Unkown gripper name {}".format(name))
| 324,663
|
Creates an actuator tag with attributes specified by @**kwargs.
Args:
joint: type of actuator transmission.
see all types here: http://mujoco.org/book/modeling.html#actuator
act_type (str): actuator type. Defaults to "actuator"
|
def new_actuator(joint, act_type="actuator", **kwargs):
element = ET.Element(act_type, attrib=kwargs)
element.set("joint", joint)
return element
| 324,676
|
Creates a site element with attributes specified by @**kwargs.
Args:
name (str): site name.
rgba: color and transparency. Defaults to solid red.
pos: 3d position of the site.
size ([float]): site size (sites are spherical by default).
|
def new_site(name, rgba=RED, pos=(0, 0, 0), size=(0.005,), **kwargs):
kwargs["rgba"] = array_to_string(rgba)
kwargs["pos"] = array_to_string(pos)
kwargs["size"] = array_to_string(size)
kwargs["name"] = name
element = ET.Element("site", attrib=kwargs)
return element
| 324,677
|
Creates a geom element with attributes specified by @**kwargs.
Args:
geom_type (str): type of the geom.
see all types here: http://mujoco.org/book/modeling.html#geom
size: geom size parameters.
pos: 3d position of the geom frame.
rgba: color and transparency. Defaults to solid red.
group: the integrer group that the geom belongs to. useful for
separating visual and physical elements.
|
def new_geom(geom_type, size, pos=(0, 0, 0), rgba=RED, group=0, **kwargs):
kwargs["type"] = str(geom_type)
kwargs["size"] = array_to_string(size)
kwargs["rgba"] = array_to_string(rgba)
kwargs["group"] = str(group)
kwargs["pos"] = array_to_string(pos)
element = ET.Element("geom", attrib=kwargs)
return element
| 324,678
|
Creates a body element with attributes specified by @**kwargs.
Args:
name (str): body name.
pos: 3d position of the body frame.
|
def new_body(name=None, pos=None, **kwargs):
if name is not None:
kwargs["name"] = name
if pos is not None:
kwargs["pos"] = array_to_string(pos)
element = ET.Element("body", attrib=kwargs)
return element
| 324,679
|
Creates a inertial element with attributes specified by @**kwargs.
Args:
mass: The mass of inertial
|
def new_inertial(name=None, pos=(0, 0, 0), mass=None, **kwargs):
if mass is not None:
kwargs["mass"] = str(mass)
kwargs["pos"] = array_to_string(pos)
element = ET.Element("inertial", attrib=kwargs)
return element
| 324,680
|
Force the internal robot model to match the provided joint angles.
Args:
joint_positions (list): a list or flat numpy array of joint positions.
simulate (bool): If True, actually use physics simulation, else
write to physics state directly.
sync_last (bool): If False, don't sync the last joint angle. This
is useful for directly controlling the roll at the end effector.
|
def sync_ik_robot(self, joint_positions, simulate=False, sync_last=True):
num_joints = len(joint_positions)
if not sync_last:
num_joints -= 1
for i in range(num_joints):
if simulate:
p.setJointMotorControl2(
self.ik_robot,
self.actual[i],
p.POSITION_CONTROL,
targetVelocity=0,
targetPosition=joint_positions[i],
force=500,
positionGain=0.5,
velocityGain=1.,
)
else:
# Note that we use self.actual[i], and not i
p.resetJointState(self.ik_robot, self.actual[i], joint_positions[i])
| 324,686
|
Convert a pose in the base frame to a pose in the world frame.
Args:
pose_in_base: a (pos, orn) tuple.
Returns:
pose_in world: a (pos, orn) tuple.
|
def bullet_base_pose_to_world_pose(self, pose_in_base):
pose_in_base = T.pose2mat(pose_in_base)
base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
pose_in_world = T.pose_in_A_to_pose_in_B(
pose_A=pose_in_base, pose_A_in_B=base_pose_in_world
)
return T.mat2pose(pose_in_world)
| 324,688
|
Converts given rotation matrix to quaternion.
Args:
rmat: 3x3 rotation matrix
precise: If isprecise is True, the input matrix is assumed to be a precise
rotation matrix and a faster algorithm is used.
Returns:
vec4 float quaternion angles
|
def mat2quat(rmat, precise=False):
M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3]
if precise:
q = np.empty((4,))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array(
[
[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],
]
)
K /= 3.0
# quaternion is Eigen vector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q[[1, 2, 3, 0]]
| 324,703
|
Converts given rotation matrix to euler angles in radian.
Args:
rmat: 3x3 rotation matrix
axes: One of 24 axis sequences as string or encoded tuple
Returns:
converted euler angles in radian vec3 float
|
def mat2euler(rmat, axes="sxyz"):
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k])
if sy > EPS:
ax = math.atan2(M[i, j], M[i, k])
ay = math.atan2(sy, M[i, i])
az = math.atan2(M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i])
if cy > EPS:
ax = math.atan2(M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2(M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return vec((ax, ay, az))
| 324,704
|
Converts pose to homogeneous matrix.
Args:
pose: a (pos, orn) tuple where pos is vec3 float cartesian, and
orn is vec4 float quaternion.
Returns:
4x4 homogeneous matrix
|
def pose2mat(pose):
homo_pose_mat = np.zeros((4, 4), dtype=np.float32)
homo_pose_mat[:3, :3] = quat2mat(pose[1])
homo_pose_mat[:3, 3] = np.array(pose[0], dtype=np.float32)
homo_pose_mat[3, 3] = 1.
return homo_pose_mat
| 324,705
|
Converts given quaternion (x, y, z, w) to matrix.
Args:
quaternion: vec4 float angles
Returns:
3x3 rotation matrix
|
def quat2mat(quaternion):
q = np.array(quaternion, dtype=np.float32, copy=True)[[3, 0, 1, 2]]
n = np.dot(q, q)
if n < EPS:
return np.identity(3)
q *= math.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array(
[
[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]],
]
)
| 324,706
|
Computes the inverse of a homogenous matrix corresponding to the pose of some
frame B in frame A. The inverse is the pose of frame A in frame B.
Args:
pose: numpy array of shape (4,4) for the pose to inverse
Returns:
numpy array of shape (4,4) for the inverse pose
|
def pose_inv(pose):
# Note, the inverse of a pose matrix is the following
# [R t; 0 1]^-1 = [R.T -R.T*t; 0 1]
# Intuitively, this makes sense.
# The original pose matrix translates by t, then rotates by R.
# We just invert the rotation by applying R-1 = R.T, and also translate back.
# Since we apply translation first before rotation, we need to translate by
# -t in the original frame, which is -R-1*t in the new frame, and then rotate back by
# R-1 to align the axis again.
pose_inv = np.zeros((4, 4))
pose_inv[:3, :3] = pose[:3, :3].T
pose_inv[:3, 3] = -pose_inv[:3, :3].dot(pose[:3, 3])
pose_inv[3, 3] = 1.0
return pose_inv
| 324,707
|
Converts linear and angular velocity of a point in frame A to the equivalent in frame B.
Args:
vel_A: 3-dim iterable for linear velocity in A
ang_vel_A: 3-dim iterable for angular velocity in A
pose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B
Returns:
vel_B, ang_vel_B: two numpy arrays of shape (3,) for the velocities in B
|
def vel_in_A_to_vel_in_B(vel_A, ang_vel_A, pose_A_in_B):
pos_A_in_B = pose_A_in_B[:3, 3]
rot_A_in_B = pose_A_in_B[:3, :3]
skew_symm = _skew_symmetric_translation(pos_A_in_B)
vel_B = rot_A_in_B.dot(vel_A) + skew_symm.dot(rot_A_in_B.dot(ang_vel_A))
ang_vel_B = rot_A_in_B.dot(ang_vel_A)
return vel_B, ang_vel_B
| 324,709
|
Converts linear and rotational force at a point in frame A to the equivalent in frame B.
Args:
force_A: 3-dim iterable for linear force in A
torque_A: 3-dim iterable for rotational force (moment) in A
pose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B
Returns:
force_B, torque_B: two numpy arrays of shape (3,) for the forces in B
|
def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B):
pos_A_in_B = pose_A_in_B[:3, 3]
rot_A_in_B = pose_A_in_B[:3, :3]
skew_symm = _skew_symmetric_translation(pos_A_in_B)
force_B = rot_A_in_B.T.dot(force_A)
torque_B = -rot_A_in_B.T.dot(skew_symm.dot(force_A)) + rot_A_in_B.T.dot(torque_A)
return force_B, torque_B
| 324,710
|
Makes a homogenous pose matrix from a translation vector and a rotation matrix.
Args:
translation: a 3-dim iterable
rotation: a 3x3 matrix
Returns:
pose: a 4x4 homogenous matrix
|
def make_pose(translation, rotation):
pose = np.zeros((4, 4))
pose[:3, :3] = rotation
pose[:3, 3] = translation
pose[3, 3] = 1.0
return pose
| 324,712
|
Returns the difference between two quaternion orientations as a 3 DOF numpy array.
For use in an impedance controller / task-space PD controller.
Args:
target_orn: 4-dim iterable, desired orientation as a (x, y, z, w) quaternion
current_orn: 4-dim iterable, current orientation as a (x, y, z, w) quaternion
Returns:
orn_error: 3-dim numpy array for current orientation error, corresponds to
(target_orn - current_orn)
|
def get_orientation_error(target_orn, current_orn):
current_orn = np.array(
[current_orn[3], current_orn[0], current_orn[1], current_orn[2]]
)
target_orn = np.array([target_orn[3], target_orn[0], target_orn[1], target_orn[2]])
pinv = np.zeros((3, 4))
pinv[0, :] = [-current_orn[1], current_orn[0], -current_orn[3], current_orn[2]]
pinv[1, :] = [-current_orn[2], current_orn[3], current_orn[0], -current_orn[1]]
pinv[2, :] = [-current_orn[3], -current_orn[2], current_orn[1], current_orn[0]]
orn_error = 2.0 * pinv.dot(np.array(target_orn))
return orn_error
| 324,713
|
Computes the error corresponding to target pose - current pose as a 6-dim vector.
The first 3 components correspond to translational error while the last 3 components
correspond to the rotational error.
Args:
target_pose: a 4x4 homogenous matrix for the target pose
current_pose: a 4x4 homogenous matrix for the current pose
Returns:
A 6-dim numpy array for the pose error.
|
def get_pose_error(target_pose, current_pose):
error = np.zeros(6)
# compute translational error
target_pos = target_pose[:3, 3]
current_pos = current_pose[:3, 3]
pos_err = target_pos - current_pos
# compute rotational error
r1 = current_pose[:3, 0]
r2 = current_pose[:3, 1]
r3 = current_pose[:3, 2]
r1d = target_pose[:3, 0]
r2d = target_pose[:3, 1]
r3d = target_pose[:3, 2]
rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d))
error[:3] = pos_err
error[3:] = rot_err
return error
| 324,714
|
Finds contact between two geom groups.
Args:
geoms_1: a list of geom names (string)
geoms_2: another list of geom names (string)
Returns:
iterator of all contacts between @geoms_1 and @geoms_2
|
def find_contacts(self, geoms_1, geoms_2):
for contact in self.sim.data.contact[0 : self.sim.data.ncon]:
# check contact geom in geoms
c1_in_g1 = self.sim.model.geom_id2name(contact.geom1) in geoms_1
c2_in_g2 = self.sim.model.geom_id2name(contact.geom2) in geoms_2
# check contact geom in geoms (flipped)
c2_in_g1 = self.sim.model.geom_id2name(contact.geom2) in geoms_1
c1_in_g2 = self.sim.model.geom_id2name(contact.geom1) in geoms_2
if (c1_in_g1 and c2_in_g2) or (c1_in_g2 and c2_in_g1):
yield contact
| 324,724
|
Overrides the superclass method to actuate the robot with the
passed joint velocities and gripper control.
Args:
action (numpy array): The control to apply to the robot. The first
@self.mujoco_robot.dof dimensions should be the desired
normalized joint velocities and if the robot has
a gripper, the next @self.gripper.dof dimensions should be
actuation controls for the gripper.
|
def _pre_action(self, action):
# clip actions into valid range
assert len(action) == self.dof, "environment got invalid action dimension"
low, high = self.action_spec
action = np.clip(action, low, high)
if self.has_gripper:
arm_action = action[: self.mujoco_robot.dof]
gripper_action_in = action[
self.mujoco_robot.dof : self.mujoco_robot.dof + self.gripper.dof
]
gripper_action_actual = self.gripper.format_action(gripper_action_in)
action = np.concatenate([arm_action, gripper_action_actual])
# rescale normalized action to control ranges
ctrl_range = self.sim.model.actuator_ctrlrange
bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])
weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])
applied_action = bias + weight * action
self.sim.data.ctrl[:] = applied_action
# gravity compensation
self.sim.data.qfrc_applied[
self._ref_joint_vel_indexes
] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]
if self.use_indicator_object:
self.sim.data.qfrc_applied[
self._ref_indicator_vel_low : self._ref_indicator_vel_high
] = self.sim.data.qfrc_bias[
self._ref_indicator_vel_low : self._ref_indicator_vel_high
]
| 324,735
|
Playback data from an episode.
Args:
ep_dir: The path to the directory containing data for an episode.
|
def playback_trajectory(env, ep_dir):
# first reload the model from the xml
xml_path = os.path.join(ep_dir, "model.xml")
with open(xml_path, "r") as f:
env.reset_from_xml_string(f.read())
state_paths = os.path.join(ep_dir, "state_*.npz")
# read states back, load them one by one, and render
t = 0
for state_file in sorted(glob(state_paths)):
print(state_file)
dic = np.load(state_file)
states = dic["states"]
for state in states:
env.sim.set_state_from_flattened(state)
env.sim.forward()
env.render()
t += 1
if t % 100 == 0:
print(t)
| 324,739
|
Loads client secrets from the given filename.
Args:
filename: The name of the file containing the JSON secret key.
Returns:
A 2-tuple, the first item containing the client id, and the second
item containing a client secret.
|
def _load_client_secrets(filename):
client_type, client_info = clientsecrets.loadfile(filename)
if client_type != clientsecrets.TYPE_WEB:
raise ValueError(
'The flow specified in {} is not supported, only the WEB flow '
'type is supported.'.format(client_type))
return client_info['client_id'], client_info['client_secret']
| 325,477
|
Initializes client id and client secret based on the settings.
Args:
settings_instance: An instance of ``django.conf.settings``.
Returns:
A 2-tuple, the first item is the client id and the second
item is the client secret.
|
def _get_oauth2_client_id_and_secret(settings_instance):
secret_json = getattr(settings_instance,
'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None)
if secret_json is not None:
return _load_client_secrets(secret_json)
else:
client_id = getattr(settings_instance, "GOOGLE_OAUTH2_CLIENT_ID",
None)
client_secret = getattr(settings_instance,
"GOOGLE_OAUTH2_CLIENT_SECRET", None)
if client_id is not None and client_secret is not None:
return client_id, client_secret
else:
raise exceptions.ImproperlyConfigured(
"Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or "
"both GOOGLE_OAUTH2_CLIENT_ID and "
"GOOGLE_OAUTH2_CLIENT_SECRET in settings.py")
| 325,478
|
Gets a Credentials storage object provided by the Django OAuth2 Helper
object.
Args:
request: Reference to the current request object.
Returns:
An :class:`oauth2.client.Storage` object.
|
def get_storage(request):
storage_model = oauth2_settings.storage_model
user_property = oauth2_settings.storage_model_user_property
credentials_property = oauth2_settings.storage_model_credentials_property
if storage_model:
module_name, class_name = storage_model.rsplit('.', 1)
module = importlib.import_module(module_name)
storage_model_class = getattr(module, class_name)
return storage.DjangoORMStorage(storage_model_class,
user_property,
request.user,
credentials_property)
else:
# use session
return dictionary_storage.DictionaryStorage(
request.session, key=_CREDENTIALS_KEY)
| 325,480
|
Helper method to create a redirect response with URL params.
This builds a redirect string that converts kwargs into a
query string.
Args:
url_name: The name of the url to redirect to.
kwargs: the query string param and their values to build.
Returns:
A properly formatted redirect string.
|
def _redirect_with_params(url_name, *args, **kwargs):
url = urlresolvers.reverse(url_name, args=args)
params = parse.urlencode(kwargs, True)
return "{0}?{1}".format(url, params)
| 325,481
|
Initialize the Oauth2 Object.
Args:
request: Django request object.
scopes: Scopes desired for this OAuth2 flow.
return_url: The url to return to after the OAuth flow is complete,
defaults to the request's current URL path.
|
def __init__(self, request, scopes=None, return_url=None):
self.request = request
self.return_url = return_url or request.get_full_path()
if scopes:
self._scopes = set(oauth2_settings.scopes) | set(scopes)
else:
self._scopes = set(oauth2_settings.scopes)
| 325,484
|
Write a credentials to the SQLAlchemy datastore.
Args:
credentials: :class:`oauth2client.Credentials`
|
def locked_put(self, credentials):
filters = {self.key_name: self.key_value}
query = self.session.query(self.model_class).filter_by(**filters)
entity = query.first()
if not entity:
entity = self.model_class(**filters)
setattr(entity, self.property_name, credentials)
self.session.add(entity)
| 325,489
|
Deserialize a JSON-serialized instance.
Inverse to :meth:`to_json`.
Args:
json_data: dict or string, Serialized JSON (as a string or an
already parsed dictionary) representing a credential.
Returns:
ServiceAccountCredentials from the serialized data.
|
def from_json(cls, json_data):
if not isinstance(json_data, dict):
json_data = json.loads(_helpers._from_bytes(json_data))
private_key_pkcs8_pem = None
pkcs12_val = json_data.get(_PKCS12_KEY)
password = None
if pkcs12_val is None:
private_key_pkcs8_pem = json_data['_private_key_pkcs8_pem']
signer = crypt.Signer.from_string(private_key_pkcs8_pem)
else:
# NOTE: This assumes that private_key_pkcs8_pem is not also
# in the serialized data. This would be very incorrect
# state.
pkcs12_val = base64.b64decode(pkcs12_val)
password = json_data['_private_key_password']
signer = crypt.Signer.from_string(pkcs12_val, password)
credentials = cls(
json_data['_service_account_email'],
signer,
scopes=json_data['_scopes'],
private_key_id=json_data['_private_key_id'],
client_id=json_data['client_id'],
user_agent=json_data['_user_agent'],
**json_data['_kwargs']
)
if private_key_pkcs8_pem is not None:
credentials._private_key_pkcs8_pem = private_key_pkcs8_pem
if pkcs12_val is not None:
credentials._private_key_pkcs12 = pkcs12_val
if password is not None:
credentials._private_key_password = password
credentials.invalid = json_data['invalid']
credentials.access_token = json_data['access_token']
credentials.token_uri = json_data['token_uri']
credentials.revoke_uri = json_data['revoke_uri']
token_expiry = json_data.get('token_expiry', None)
if token_expiry is not None:
credentials.token_expiry = datetime.datetime.strptime(
token_expiry, client.EXPIRY_FORMAT)
return credentials
| 325,504
|
Create credentials that specify additional claims.
Args:
claims: dict, key-value pairs for claims.
Returns:
ServiceAccountCredentials, a copy of the current service account
credentials with updated claims to use when obtaining access
tokens.
|
def create_with_claims(self, claims):
new_kwargs = dict(self._kwargs)
new_kwargs.update(claims)
result = self.__class__(self._service_account_email,
self._signer,
scopes=self._scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
**new_kwargs)
result.token_uri = self.token_uri
result.revoke_uri = self.revoke_uri
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
result._private_key_pkcs12 = self._private_key_pkcs12
result._private_key_password = self._private_key_password
return result
| 325,506
|
Create a signed jwt.
Args:
http: unused
additional_claims: dict, additional claims to add to
the payload of the JWT.
Returns:
An AccessTokenInfo with the signed jwt
|
def get_access_token(self, http=None, additional_claims=None):
if additional_claims is None:
if self.access_token is None or self.access_token_expired:
self.refresh(None)
return client.AccessTokenInfo(
access_token=self.access_token, expires_in=self._expires_in())
else:
# Create a 1 time token
token, unused_expiry = self._create_token(additional_claims)
return client.AccessTokenInfo(
access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)
| 325,508
|
Saves a file with read-write permissions on for the owner.
Args:
filename: String. Absolute path to file.
json_contents: JSON serializable object to be saved.
|
def _save_private_file(filename, json_contents):
temp_filename = tempfile.mktemp()
file_desc = os.open(temp_filename, os.O_WRONLY | os.O_CREAT, 0o600)
with os.fdopen(file_desc, 'w') as file_handle:
json.dump(json_contents, file_handle, sort_keys=True,
indent=2, separators=(',', ': '))
shutil.move(temp_filename, filename)
| 325,515
|
Save the provided GoogleCredentials to the well known file.
Args:
credentials: the credentials to be saved to the well known file;
it should be an instance of GoogleCredentials
well_known_file: the name of the file where the credentials are to be
saved; this parameter is supposed to be used for
testing only
|
def save_to_well_known_file(credentials, well_known_file=None):
# TODO(orestica): move this method to tools.py
# once the argparse import gets fixed (it is not present in Python 2.6)
if well_known_file is None:
well_known_file = _get_well_known_file()
config_dir = os.path.dirname(well_known_file)
if not os.path.isdir(config_dir):
raise OSError(
'Config directory does not exist: {0}'.format(config_dir))
credentials_data = credentials.serialization_data
_save_private_file(well_known_file, credentials_data)
| 325,516
|
Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string or bytestring, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
|
def _extract_id_token(id_token):
if type(id_token) == bytes:
segments = id_token.split(b'.')
else:
segments = id_token.split(u'.')
if len(segments) != 3:
raise VerifyJwtTokenError(
'Wrong number of segments in token: {0}'.format(id_token))
return json.loads(
_helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1])))
| 325,521
|
Parses response of an exchange token request.
Most providers return JSON but some (e.g. Facebook) return a
url-encoded string.
Args:
content: The body of a response
Returns:
Content as a dictionary object. Note that the dict could be empty,
i.e. {}. That basically indicates a failure.
|
def _parse_exchange_token_response(content):
resp = {}
content = _helpers._from_bytes(content)
try:
resp = json.loads(content)
except Exception:
# different JSON libs raise different exceptions,
# so we just do a catch-all here
resp = _helpers.parse_unique_urlencoded(content)
# some providers respond with 'expires', others with 'expires_in'
if resp and 'expires' in resp:
resp['expires_in'] = resp.pop('expires')
return resp
| 325,522
|
Utility class method to instantiate a Credentials subclass from JSON.
Expects the JSON string to have been produced by to_json().
Args:
json_data: string or bytes, JSON from to_json().
Returns:
An instance of the subclass of Credentials that was serialized with
to_json().
|
def new_from_json(cls, json_data):
json_data_as_unicode = _helpers._from_bytes(json_data)
data = json.loads(json_data_as_unicode)
# Find and call the right classmethod from_json() to restore
# the object.
module_name = data['_module']
try:
module_obj = __import__(module_name)
except ImportError:
# In case there's an object from the old package structure,
# update it
module_name = module_name.replace('.googleapiclient', '')
module_obj = __import__(module_name)
module_obj = __import__(module_name,
fromlist=module_name.split('.')[:-1])
kls = getattr(module_obj, data['_class'])
return kls.from_json(json_data_as_unicode)
| 325,529
|
Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
|
def put(self, credentials):
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
| 325,530
|
Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
json_data: string or bytes, JSON to deserialize.
Returns:
An instance of a Credentials subclass.
|
def from_json(cls, json_data):
data = json.loads(_helpers._from_bytes(json_data))
if (data.get('token_expiry') and
not isinstance(data['token_expiry'], datetime.datetime)):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except ValueError:
data['token_expiry'] = None
retval = cls(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
revoke_uri=data.get('revoke_uri', None),
id_token=data.get('id_token', None),
id_token_jwt=data.get('id_token_jwt', None),
token_response=data.get('token_response', None),
scopes=data.get('scopes', None),
token_info_uri=data.get('token_info_uri', None))
retval.invalid = data['invalid']
return retval
| 325,533
|
Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http: an object to be used to make HTTP requests.
Raises:
HttpAccessTokenRefreshError: When the refresh fails.
|
def _refresh(self, http):
if not self.store:
self._do_refresh_request(http)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.access_token != self.access_token and
not new_cred.access_token_expired):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http)
finally:
self.store.release_lock()
| 325,538
|
Refresh the access_token using the refresh_token.
Args:
http: an object to be used to make HTTP requests.
Raises:
HttpAccessTokenRefreshError: When the refresh fails.
|
def _do_refresh_request(self, http):
body = self._generate_refresh_request_body()
headers = self._generate_refresh_request_headers()
logger.info('Refreshing access_token')
resp, content = transport.request(
http, self.token_uri, method='POST',
body=body, headers=headers)
content = _helpers._from_bytes(content)
if resp.status == http_client.OK:
d = json.loads(content)
self.token_response = d
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
delta = datetime.timedelta(seconds=int(d['expires_in']))
self.token_expiry = delta + _UTCNOW()
else:
self.token_expiry = None
if 'id_token' in d:
self.id_token = _extract_id_token(d['id_token'])
self.id_token_jwt = d['id_token']
else:
self.id_token = None
self.id_token_jwt = None
# On temporary refresh errors, the user does not actually have to
# re-authorize, so we unflag here.
self.invalid = False
if self.store:
self.store.locked_put(self)
else:
# An {'error':...} response body means the token is expired or
# revoked, so we flag the credentials as such.
logger.info('Failed to retrieve access token: %s', content)
error_msg = 'Invalid response {0}.'.format(resp.status)
try:
d = json.loads(content)
if 'error' in d:
error_msg = d['error']
if 'error_description' in d:
error_msg += ': ' + d['error_description']
self.invalid = True
if self.store is not None:
self.store.locked_put(self)
except (TypeError, ValueError):
pass
raise HttpAccessTokenRefreshError(error_msg, status=resp.status)
| 325,539
|
Revokes this credential and deletes the stored copy (if it exists).
Args:
http: an object to be used to make HTTP requests.
|
def _revoke(self, http):
self._do_revoke(http, self.refresh_token or self.access_token)
| 325,540
|
Revokes this credential and deletes the stored copy (if it exists).
Args:
http: an object to be used to make HTTP requests.
token: A string used as the token to be revoked. Can be either an
access_token or refresh_token.
Raises:
TokenRevokeError: If the revoke request does not return with a
200 OK.
|
def _do_revoke(self, http, token):
logger.info('Revoking token')
query_params = {'token': token}
token_revoke_uri = _helpers.update_query_params(
self.revoke_uri, query_params)
resp, content = transport.request(http, token_revoke_uri)
if resp.status == http_client.METHOD_NOT_ALLOWED:
body = urllib.parse.urlencode(query_params)
resp, content = transport.request(http, token_revoke_uri,
method='POST', body=body)
if resp.status == http_client.OK:
self.invalid = True
else:
error_msg = 'Invalid response {0}.'.format(resp.status)
try:
d = json.loads(_helpers._from_bytes(content))
if 'error' in d:
error_msg = d['error']
except (TypeError, ValueError):
pass
raise TokenRevokeError(error_msg)
if self.store:
self.store.delete()
| 325,541
|
Retrieves the list of authorized scopes from the OAuth2 provider.
Args:
http: an object to be used to make HTTP requests.
token: A string used as the token to identify the credentials to
the provider.
Raises:
Error: When refresh fails, indicating the the access token is
invalid.
|
def _do_retrieve_scopes(self, http, token):
logger.info('Refreshing scopes')
query_params = {'access_token': token, 'fields': 'scope'}
token_info_uri = _helpers.update_query_params(
self.token_info_uri, query_params)
resp, content = transport.request(http, token_info_uri)
content = _helpers._from_bytes(content)
if resp.status == http_client.OK:
d = json.loads(content)
self.scopes = set(_helpers.string_to_scopes(d.get('scope', '')))
else:
error_msg = 'Invalid response {0}.'.format(resp.status)
try:
d = json.loads(content)
if 'error_description' in d:
error_msg = d['error_description']
except (TypeError, ValueError):
pass
raise Error(error_msg)
| 325,542
|
Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this
application.
revoke_uri: string, URI for revoke endpoint. Defaults to None; a
token can't be revoked if this is None.
|
def __init__(self, access_token, user_agent, revoke_uri=None):
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent,
revoke_uri=revoke_uri)
| 325,543
|
Create a Credentials object by reading information from a file.
It returns an object of type GoogleCredentials.
Args:
credential_filename: the path to the file from where the
credentials are to be read
Raises:
ApplicationDefaultCredentialsError: raised when the credentials
fail to be retrieved.
|
def from_stream(credential_filename):
if credential_filename and os.path.isfile(credential_filename):
try:
return _get_application_default_credential_from_file(
credential_filename)
except (ApplicationDefaultCredentialsError, ValueError) as error:
extra_help = (' (provided as parameter to the '
'from_stream() method)')
_raise_exception_for_reading_json(credential_filename,
extra_help,
error)
else:
raise ApplicationDefaultCredentialsError(
'The parameter passed to the from_stream() '
'method should point to a file.')
| 325,549
|
Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
|
def verify(self, message, signature):
message = _helpers._to_bytes(message, encoding='utf-8')
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
| 325,557
|
Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
|
def sign(self, message):
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
| 325,559
|
Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
|
def from_string(cls, key, password='notasecret'):
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
| 325,560
|
Writes credentials to a file.
Refer to :func:`_load_credentials_file` for the format.
Args:
credentials_file: An open file handle, must be read/write.
credentials: A dictionary mapping user-defined keys to an instance of
:class:`oauth2client.client.Credentials`.
|
def _write_credentials_file(credentials_file, credentials):
data = {'file_version': 2, 'credentials': {}}
for key, credential in iteritems(credentials):
credential_json = credential.to_json()
encoded_credential = _helpers._from_bytes(base64.b64encode(
_helpers._to_bytes(credential_json)))
data['credentials'][key] = encoded_credential
credentials_file.seek(0)
json.dump(data, credentials_file)
credentials_file.truncate()
| 325,563
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.