Search is not available for this dataset
text
stringlengths 75
104k
|
|---|
def extract_objects(self, fname, type_filter=None):
'''Extract objects from a source file
Args:
fname(str): Name of file to read from
type_filter (class, optional): Object class to filter results
Returns:
List of objects extracted from the file.
'''
objects = []
if fname in self.object_cache:
objects = self.object_cache[fname]
else:
with io.open(fname, 'rt', encoding='utf-8') as fh:
text = fh.read()
objects = parse_verilog(text)
self.object_cache[fname] = objects
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)]
return objects
|
def extract_objects_from_source(self, text, type_filter=None):
'''Extract object declarations from a text buffer
Args:
text (str): Source code to parse
type_filter (class, optional): Object class to filter results
Returns:
List of parsed objects.
'''
objects = parse_verilog(text)
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)]
return objects
|
def load_json_from_file(file_path):
"""Load schema from a JSON file"""
try:
with open(file_path) as f:
json_data = json.load(f)
except ValueError as e:
raise ValueError('Given file {} is not a valid JSON file: {}'.format(file_path, e))
else:
return json_data
|
def load_json_from_string(string):
"""Load schema from JSON string"""
try:
json_data = json.loads(string)
except ValueError as e:
raise ValueError('Given string is not valid JSON: {}'.format(e))
else:
return json_data
|
def _generate_one_fake(self, schema):
"""
Recursively traverse schema dictionary and for each "leaf node", evaluate the fake
value
Implementation:
For each key-value pair:
1) If value is not an iterable (i.e. dict or list), evaluate the fake data (base case)
2) If value is a dictionary, recurse
3) If value is a list, iteratively recurse over each item
"""
data = {}
for k, v in schema.items():
if isinstance(v, dict):
data[k] = self._generate_one_fake(v)
elif isinstance(v, list):
data[k] = [self._generate_one_fake(item) for item in v]
else:
data[k] = getattr(self._faker, v)()
return data
|
def fetch(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return the contents of the response."""
params = urlencode(_prepare_params(params, params_prefix))
binary_params = params.encode('ASCII')
# build the HTTP request
url = "https://%s/%s.xml" % (CHALLONGE_API_URL, uri)
req = Request(url, binary_params)
req.get_method = lambda: method
# use basic authentication
user, api_key = get_credentials()
auth_handler = HTTPBasicAuthHandler()
auth_handler.add_password(
realm="Application",
uri=req.get_full_url(),
user=user,
passwd=api_key
)
opener = build_opener(auth_handler)
try:
response = opener.open(req)
except HTTPError as e:
if e.code != 422:
raise
# wrap up application-level errors
doc = ElementTree.parse(e).getroot()
if doc.tag != "errors":
raise
errors = [e.text for e in doc]
raise ChallongeException(*errors)
return response
|
def fetch_and_parse(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return the root Element of the response."""
doc = ElementTree.parse(fetch(method, uri, params_prefix, **params))
return _parse(doc.getroot())
|
def _parse(root):
"""Recursively convert an Element into python data types"""
if root.tag == "nil-classes":
return []
elif root.get("type") == "array":
return [_parse(child) for child in root]
d = {}
for child in root:
type = child.get("type") or "string"
if child.get("nil"):
value = None
elif type == "boolean":
value = True if child.text.lower() == "true" else False
elif type == "dateTime":
value = iso8601.parse_date(child.text)
elif type == "decimal":
value = decimal.Decimal(child.text)
elif type == "integer":
value = int(child.text)
else:
value = child.text
d[child.tag] = value
return d
|
def _prepare_params(dirty_params, prefix=None):
"""Prepares parameters to be sent to challonge.com.
The `prefix` can be used to convert parameters with keys that
look like ("name", "url", "tournament_type") into something like
("tournament[name]", "tournament[url]", "tournament[tournament_type]"),
which is how challonge.com expects parameters describing specific
objects.
"""
params = {}
for k, v in dirty_params.items():
if hasattr(v, "isoformat"):
v = v.isoformat()
elif isinstance(v, bool):
# challonge.com only accepts lowercase true/false
v = str(v).lower()
if prefix:
params["%s[%s]" % (prefix, k)] = v
else:
params[k] = v
return params
|
def expandDescendants(self, branches):
"""
Expand descendants from list of branches
:param list branches: list of immediate children as TreeOfContents objs
:return: list of all descendants
"""
return sum([b.descendants() for b in branches], []) + \
[b.source for b in branches]
|
def parseBranches(self, descendants):
"""
Parse top level of markdown
:param list elements: list of source objects
:return: list of filtered TreeOfContents objects
"""
parsed, parent, cond = [], False, lambda b: (b.string or '').strip()
for branch in filter(cond, descendants):
if self.getHeadingLevel(branch) == self.depth:
parsed.append({'root':branch.string, 'source':branch})
parent = True
elif not parent:
parsed.append({'root':branch.string, 'source':branch})
else:
parsed[-1].setdefault('descendants', []).append(branch)
return [TOC(depth=self.depth+1, **kwargs) for kwargs in parsed]
|
def fromMarkdown(md, *args, **kwargs):
"""
Creates abstraction using path to file
:param str path: path to markdown file
:return: TreeOfContents object
"""
return TOC.fromHTML(markdown(md, *args, **kwargs))
|
def fromHTML(html, *args, **kwargs):
"""
Creates abstraction using HTML
:param str html: HTML
:return: TreeOfContents object
"""
source = BeautifulSoup(html, 'html.parser', *args, **kwargs)
return TOC('[document]',
source=source,
descendants=source.children)
|
def from_data(cls, type, **data):
"""Create an attachment from data.
:param str type: attachment type
:param kwargs data: additional attachment data
:return: an attachment subclass object
:rtype: `~groupy.api.attachments.Attachment`
"""
try:
return cls._types[type](**data)
except KeyError:
return cls(type=type, **data)
except TypeError as e:
error = 'could not create {!r} attachment'.format(type)
raise TypeError('{}: {}'.format(error, e.args[0]))
|
def from_file(self, fp):
"""Create a new image attachment from an image file.
:param file fp: a file object containing binary image data
:return: an image attachment
:rtype: :class:`~groupy.api.attachments.Image`
"""
image_urls = self.upload(fp)
return Image(image_urls['url'], source_url=image_urls['picture_url'])
|
def upload(self, fp):
"""Upload image data to the image service.
Call this, rather than :func:`from_file`, you don't want to
create an attachment of the image.
:param file fp: a file object containing binary image data
:return: the URLs for the image uploaded
:rtype: dict
"""
url = utils.urljoin(self.url, 'pictures')
response = self.session.post(url, data=fp.read())
image_urls = response.data
return image_urls
|
def download(self, image, url_field='url', suffix=None):
"""Download the binary data of an image attachment.
:param image: an image attachment
:type image: :class:`~groupy.api.attachments.Image`
:param str url_field: the field of the image with the right URL
:param str suffix: an optional URL suffix
:return: binary image data
:rtype: bytes
"""
url = getattr(image, url_field)
if suffix is not None:
url = '.'.join(url, suffix)
response = self.session.get(url)
return response.content
|
def download_preview(self, image, url_field='url'):
"""Downlaod the binary data of an image attachment at preview size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes
"""
return self.download(image, url_field=url_field, suffix='preview')
|
def download_large(self, image, url_field='url'):
"""Downlaod the binary data of an image attachment at large size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes
"""
return self.download(image, url_field=url_field, suffix='large')
|
def download_avatar(self, image, url_field='url'):
"""Downlaod the binary data of an image attachment at avatar size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes
"""
return self.download(image, url_field=url_field, suffix='avatar')
|
def autopage(self):
"""Iterate through results from all pages.
:return: all results
:rtype: generator
"""
while self.items:
yield from self.items
self.items = self.fetch_next()
|
def detect_mode(cls, **params):
"""Detect which listing mode of the given params.
:params kwargs params: the params
:return: one of the available modes
:rtype: str
:raises ValueError: if multiple modes are detected
"""
modes = []
for mode in cls.modes:
if params.get(mode) is not None:
modes.append(mode)
if len(modes) > 1:
error_message = 'ambiguous mode, must be one of {}'
modes_csv = ', '.join(list(cls.modes))
raise ValueError(error_message.format(modes_csv))
return modes[0] if modes else cls.default_mode
|
def set_next_page_params(self):
"""Set the params so that the next page is fetched."""
if self.items:
index = self.get_last_item_index()
self.params[self.mode] = self.get_next_page_param(self.items[index])
|
def list(self):
"""List the users you have blocked.
:return: a list of :class:`~groupy.api.blocks.Block`'s
:rtype: :class:`list`
"""
params = {'user': self.user_id}
response = self.session.get(self.url, params=params)
blocks = response.data['blocks']
return [Block(self, **block) for block in blocks]
|
def between(self, other_user_id):
"""Check if there is a block between you and the given user.
:return: ``True`` if the given user has been blocked
:rtype: bool
"""
params = {'user': self.user_id, 'otherUser': other_user_id}
response = self.session.get(self.url, params=params)
return response.data['between']
|
def block(self, other_user_id):
"""Block the given user.
:param str other_user_id: the ID of the user to block
:return: the block created
:rtype: :class:`~groupy.api.blocks.Block`
"""
params = {'user': self.user_id, 'otherUser': other_user_id}
response = self.session.post(self.url, params=params)
block = response.data['block']
return Block(self, **block)
|
def unblock(self, other_user_id):
"""Unblock the given user.
:param str other_user_id: the ID of the user to unblock
:return: ``True`` if successful
:rtype: bool
"""
params = {'user': self.user_id, 'otherUser': other_user_id}
response = self.session.delete(self.url, params=params)
return response.ok
|
def list(self, page=1, per_page=10):
"""List a page of chats.
:param int page: which page
:param int per_page: how many chats per page
:return: chats with other users
:rtype: :class:`~groupy.pagers.ChatList`
"""
return pagers.ChatList(self, self._raw_list, per_page=per_page,
page=page)
|
def list(self, before_id=None, since_id=None, after_id=None, limit=20):
"""Return a page of group messages.
The messages come in reversed order (newest first). Note you can only
provide _one_ of ``before_id``, ``since_id``, or ``after_id``.
:param str before_id: message ID for paging backwards
:param str after_id: message ID for paging forwards
:param str since_id: message ID for most recent messages since
:param int limit: maximum number of messages per page
:return: group messages
:rtype: :class:`~groupy.pagers.MessageList`
"""
return pagers.MessageList(self, self._raw_list, before_id=before_id,
after_id=after_id, since_id=since_id,
limit=limit)
|
def list_since(self, message_id, limit=None):
"""Return a page of group messages created since a message.
This is used to fetch the most recent messages after another. There
may exist messages between the one given and the ones returned. Use
:func:`list_after` to retrieve newer messages without skipping any.
:param str message_id: the ID of a message
:param int limit: maximum number of messages per page
:return: group messages
:rtype: :class:`~groupy.pagers.MessageList`
"""
return self.list(since_id=message_id, limit=limit)
|
def list_after(self, message_id, limit=None):
"""Return a page of group messages created after a message.
This is used to page forwards through messages.
:param str message_id: the ID of a message
:param int limit: maximum number of messages per page
:return: group messages
:rtype: :class:`~groupy.pagers.MessageList`
"""
return self.list(after_id=message_id, limit=limit)
|
def list_all_before(self, message_id, limit=None):
"""Return all group messages created before a message.
:param str message_id: the ID of a message
:param int limit: maximum number of messages per page
:return: group messages
:rtype: generator
"""
return self.list_before(message_id, limit=limit).autopage()
|
def list_all_after(self, message_id, limit=None):
"""Return all group messages created after a message.
:param str message_id: the ID of a message
:param int limit: maximum number of messages per page
:return: group messages
:rtype: generator
"""
return self.list_after(message_id, limit=limit).autopage()
|
def create(self, text=None, attachments=None, source_guid=None):
"""Create a new message in the group.
:param str text: the text of the message
:param attachments: a list of attachments
:type attachments: :class:`list`
:param str source_guid: a unique identifier for the message
:return: the created message
:rtype: :class:`~groupy.api.messages.Message`
"""
message = {
'source_guid': source_guid or str(time.time()),
}
if text is not None:
message['text'] = text
if attachments is not None:
message['attachments'] = [a.to_json() for a in attachments]
payload = {'message': message}
response = self.session.post(self.url, json=payload)
message = response.data['message']
return Message(self, **message)
|
def list(self, before_id=None, since_id=None, **kwargs):
"""Return a page of direct messages.
The messages come in reversed order (newest first). Note you can only
provide _one_ of ``before_id``, ``since_id``.
:param str before_id: message ID for paging backwards
:param str since_id: message ID for most recent messages since
:return: direct messages
:rtype: :class:`~groupy.pagers.MessageList`
"""
return pagers.MessageList(self, self._raw_list, before_id=before_id,
since_id=since_id, **kwargs)
|
def list_all(self, before_id=None, since_id=None, **kwargs):
"""Return all direct messages.
The messages come in reversed order (newest first). Note you can only
provide _one_ of ``before_id``, ``since_id``.
:param str before_id: message ID for paging backwards
:param str since_id: message ID for most recent messages since
:return: direct messages
:rtype: generator
"""
return self.list(before_id=before_id, since_id=since_id, **kwargs).autopage()
|
def add(self, nickname, email=None, phone_number=None, user_id=None):
"""Add a user to the group.
You must provide either the email, phone number, or user_id that
uniquely identifies a user.
:param str nickname: new name for the user in the group
:param str email: email address of the user
:param str phone_number: phone number of the user
:param str user_id: user_id of the user
:return: a membership request
:rtype: :class:`MembershipRequest`
"""
member = {
'nickname': nickname,
'email': email,
'phone_number': phone_number,
'user_id': user_id,
}
return self.add_multiple(member)
|
def add_multiple(self, *users):
"""Add multiple users to the group at once.
Each given user must be a dictionary containing a nickname and either
an email, phone number, or user_id.
:param args users: the users to add
:return: a membership request
:rtype: :class:`MembershipRequest`
"""
guid = uuid.uuid4()
for i, user_ in enumerate(users):
user_['guid'] = '{}-{}'.format(guid, i)
payload = {'members': users}
url = utils.urljoin(self.url, 'add')
response = self.session.post(url, json=payload)
return MembershipRequest(self, *users, group_id=self.group_id,
**response.data)
|
def check(self, results_id):
"""Check for results of a membership request.
:param str results_id: the ID of a membership request
:return: successfully created memberships
:rtype: :class:`list`
:raises groupy.exceptions.ResultsNotReady: if the results are not ready
:raises groupy.exceptions.ResultsExpired: if the results have expired
"""
path = 'results/{}'.format(results_id)
url = utils.urljoin(self.url, path)
response = self.session.get(url)
if response.status_code == 503:
raise exceptions.ResultsNotReady(response)
if response.status_code == 404:
raise exceptions.ResultsExpired(response)
return response.data['members']
|
def update(self, nickname=None, **kwargs):
"""Update your own membership.
Note that this fails on former groups.
:param str nickname: new nickname
:return: updated membership
:rtype: :class:`~groupy.api.memberships.Member`
"""
url = self.url + 'hips/update'
payload = {
'membership': {
'nickname': nickname,
},
}
payload['membership'].update(kwargs)
response = self.session.post(url, json=payload)
return Member(self, self.group_id, **response.data)
|
def remove(self, membership_id):
"""Remove a member from the group.
:param str membership_id: the ID of a member in this group
:return: ``True`` if the member was successfully removed
:rtype: bool
"""
path = '{}/remove'.format(membership_id)
url = utils.urljoin(self.url, path)
payload = {'membership_id': membership_id}
response = self.session.post(url, json=payload)
return response.ok
|
def post(self, text=None, attachments=None, source_guid=None):
"""Post a direct message to the user.
:param str text: the message content
:param attachments: message attachments
:param str source_guid: a client-side unique ID for the message
:return: the message sent
:rtype: :class:`~groupy.api.messages.DirectMessage`
"""
return self.messages.create(text=text, attachments=attachments,
source_guid=source_guid)
|
def add_to_group(self, group_id, nickname=None):
"""Add the member to another group.
If a nickname is not provided the member's current nickname is used.
:param str group_id: the group_id of a group
:param str nickname: a new nickname
:return: a membership request
:rtype: :class:`MembershipRequest`
"""
if nickname is None:
nickname = self.nickname
memberships = Memberships(self.manager.session, group_id=group_id)
return memberships.add(nickname, user_id=self.user_id)
|
def check_if_ready(self):
"""Check for and fetch the results if ready."""
try:
results = self.manager.check(self.results_id)
except exceptions.ResultsNotReady as e:
self._is_ready = False
self._not_ready_exception = e
except exceptions.ResultsExpired as e:
self._is_ready = True
self._expired_exception = e
else:
failures = self.get_failed_requests(results)
members = self.get_new_members(results)
self.results = self.__class__.Results(list(members), list(failures))
self._is_ready = True
self._not_ready_exception = None
|
def get_failed_requests(self, results):
"""Return the requests that failed.
:param results: the results of a membership request check
:type results: :class:`list`
:return: the failed requests
:rtype: generator
"""
data = {member['guid']: member for member in results}
for request in self.requests:
if request['guid'] not in data:
yield request
|
def get_new_members(self, results):
"""Return the newly added members.
:param results: the results of a membership request check
:type results: :class:`list`
:return: the successful requests, as :class:`~groupy.api.memberships.Members`
:rtype: generator
"""
for member in results:
guid = member.pop('guid')
yield Member(self.manager, self.group_id, **member)
member['guid'] = guid
|
def is_ready(self, check=True):
"""Return ``True`` if the results are ready.
If you pass ``check=False``, no attempt is made to check again for
results.
:param bool check: whether to query for the results
:return: ``True`` if the results are ready
:rtype: bool
"""
if not self._is_ready and check:
self.check_if_ready()
return self._is_ready
|
def poll(self, timeout=30, interval=2):
"""Return the results when they become ready.
:param int timeout: the maximum time to wait for the results
:param float interval: the number of seconds between checks
:return: the membership request result
:rtype: :class:`~groupy.api.memberships.MembershipResult.Results`
"""
time.sleep(interval)
start = time.time()
while time.time() - start < timeout and not self.is_ready():
time.sleep(interval)
return self.get()
|
def get(self):
"""Return the results now.
:return: the membership request results
:rtype: :class:`~groupy.api.memberships.MembershipResult.Results`
:raises groupy.exceptions.ResultsNotReady: if the results are not ready
:raises groupy.exceptions.ResultsExpired: if the results have expired
"""
if self._expired_exception:
raise self._expired_exception
if self._not_ready_exception:
raise self._not_ready_exception
return self.results
|
def list(self, page=1, per_page=10, omit=None):
"""List groups by page.
The API allows certain fields to be excluded from the results so that
very large groups can be fetched without exceeding the maximum
response size. At the time of this writing, only 'memberships' is
supported.
:param int page: page number
:param int per_page: number of groups per page
:param int omit: a comma-separated list of fields to exclude
:return: a list of groups
:rtype: :class:`~groupy.pagers.GroupList`
"""
return pagers.GroupList(self, self._raw_list, page=page,
per_page=per_page, omit=omit)
|
def list_all(self, per_page=10, omit=None):
"""List all groups.
Since the order of groups is determined by recent activity, this is the
recommended way to obtain a list of all groups. See
:func:`~groupy.api.groups.Groups.list` for details about ``omit``.
:param int per_page: number of groups per page
:param int omit: a comma-separated list of fields to exclude
:return: a list of groups
:rtype: :class:`~groupy.pagers.GroupList`
"""
return self.list(per_page=per_page, omit=omit).autopage()
|
def list_former(self):
"""List all former groups.
:return: a list of groups
:rtype: :class:`list`
"""
url = utils.urljoin(self.url, 'former')
response = self.session.get(url)
return [Group(self, **group) for group in response.data]
|
def get(self, id):
"""Get a single group by ID.
:param str id: a group ID
:return: a group
:rtype: :class:`~groupy.api.groups.Group`
"""
url = utils.urljoin(self.url, id)
response = self.session.get(url)
return Group(self, **response.data)
|
def create(self, name, description=None, image_url=None, share=None, **kwargs):
"""Create a new group.
Note that, although possible, there may be issues when not using an
image URL from GroupMe's image service.
:param str name: group name (140 characters maximum)
:param str description: short description (255 characters maximum)
:param str image_url: GroupMe image service URL
:param bool share: whether to generate a share URL
:return: a new group
:rtype: :class:`~groupy.api.groups.Group`
"""
payload = {
'name': name,
'description': description,
'image_url': image_url,
'share': share,
}
payload.update(kwargs)
response = self.session.post(self.url, json=payload)
return Group(self, **response.data)
|
def update(self, id, name=None, description=None, image_url=None,
office_mode=None, share=None, **kwargs):
"""Update the details of a group.
.. note::
There are significant bugs in this endpoint!
1. not providing ``name`` produces 400: "Topic can't be blank"
2. not providing ``office_mode`` produces 500: "sql: Scan error on
column index 14: sql/driver: couldn't convert <nil> (<nil>) into
type bool"
Note that these issues are "handled" automatically when calling
update on a :class:`~groupy.api.groups.Group` object.
:param str id: group ID
:param str name: group name (140 characters maximum)
:param str description: short description (255 characters maximum)
:param str image_url: GroupMe image service URL
:param bool office_mode: (undocumented)
:param bool share: whether to generate a share URL
:return: an updated group
:rtype: :class:`~groupy.api.groups.Group`
"""
path = '{}/update'.format(id)
url = utils.urljoin(self.url, path)
payload = {
'name': name,
'description': description,
'image_url': image_url,
'office_mode': office_mode,
'share': share,
}
payload.update(kwargs)
response = self.session.post(url, json=payload)
return Group(self, **response.data)
|
def destroy(self, id):
"""Destroy a group.
:param str id: a group ID
:return: ``True`` if successful
:rtype: bool
"""
path = '{}/destroy'.format(id)
url = utils.urljoin(self.url, path)
response = self.session.post(url)
return response.ok
|
def join(self, group_id, share_token):
"""Join a group using a share token.
:param str group_id: the group_id of a group
:param str share_token: the share token
:return: the group
:rtype: :class:`~groupy.api.groups.Group`
"""
path = '{}/join/{}'.format(group_id, share_token)
url = utils.urljoin(self.url, path)
response = self.session.post(url)
group = response.data['group']
return Group(self, **group)
|
def rejoin(self, group_id):
"""Rejoin a former group.
:param str group_id: the group_id of a group
:return: the group
:rtype: :class:`~groupy.api.groups.Group`
"""
url = utils.urljoin(self.url, 'join')
payload = {'group_id': group_id}
response = self.session.post(url, json=payload)
return Group(self, **response.data)
|
def change_owners(self, group_id, owner_id):
"""Change the owner of a group.
.. note:: you must be the owner to change owners
:param str group_id: the group_id of a group
:param str owner_id: the ID of the new owner
:return: the result
:rtype: :class:`~groupy.api.groups.ChangeOwnersResult`
"""
url = utils.urljoin(self.url, 'change_owners')
payload = {
'requests': [{
'group_id': group_id,
'owner_id': owner_id,
}],
}
response = self.session.post(url, json=payload)
result, = response.data['results'] # should be exactly one
return ChangeOwnersResult(**result)
|
def update(self, name=None, description=None, image_url=None,
office_mode=None, share=None, **kwargs):
"""Update the details of the group.
:param str name: group name (140 characters maximum)
:param str description: short description (255 characters maximum)
:param str image_url: GroupMe image service URL
:param bool office_mode: (undocumented)
:param bool share: whether to generate a share URL
:return: an updated group
:rtype: :class:`~groupy.api.groups.Group`
"""
# note we default to the current values for name and office_mode as a
# work-around for issues with the group update endpoint
if name is None:
name = self.name
if office_mode is None:
office_mode = self.office_mode
return self.manager.update(id=self.id, name=name, description=description,
image_url=image_url, office_mode=office_mode,
share=share, **kwargs)
|
def refresh_from_server(self):
"""Refresh the group from the server in place."""
group = self.manager.get(id=self.id)
self.__init__(self.manager, **group.data)
|
def create_bot(self, name, avatar_url=None, callback_url=None, dm_notification=None,
**kwargs):
"""Create a new bot in a particular group.
:param str name: bot name
:param str avatar_url: the URL of an image to use as an avatar
:param str callback_url: a POST-back URL for each new message
:param bool dm_notification: whether to POST-back for direct messages?
:return: the new bot
:rtype: :class:`~groupy.api.bots.Bot`
"""
return self._bots.create(name=name, group_id=self.group_id,
avatar_url=avatar_url, callback_url=callback_url,
dm_notification=dm_notification)
|
def get_membership(self):
"""Get your membership.
Note that your membership may not exist. For example, you do not have
a membership in a former group. Also, the group returned by the API
when rejoining a former group does not contain your membership. You
must call :func:`refresh_from_server` to update the list of members.
:return: your membership in the group
:rtype: :class:`~groupy.api.memberships.Member`
:raises groupy.exceptions.MissingMembershipError: if your membership is
not in the group data
"""
user_id = self._user.me['user_id']
for member in self.members:
if member.user_id == user_id:
return member
raise exceptions.MissingMembershipError(self.group_id, user_id)
|
def update_membership(self, nickname=None, **kwargs):
"""Update your own membership.
Note that this fails on former groups.
:param str nickname: new nickname
:return: updated membership
:rtype: :class:`~groupy.api.members.Member`
"""
return self.memberships.update(nickname=nickname, **kwargs)
|
def urljoin(base, path=None):
"""Join a base url with a relative path."""
# /foo/bar + baz makes /foo/bar/baz instead of /foo/baz
if path is None:
url = base
else:
if not base.endswith('/'):
base += '/'
url = urllib.parse.urljoin(base, str(path))
return url
|
def parse_share_url(share_url):
"""Return the group_id and share_token in a group's share url.
:param str share_url: the share url of a group
"""
*__, group_id, share_token = share_url.rstrip('/').split('/')
return group_id, share_token
|
def get_rfc3339(when):
"""Return an RFC 3339 timestamp.
:param datetime.datetime when: a datetime in UTC
:return: RFC 3339 timestamp
:rtype: str
"""
microseconds = format(when.microsecond, '04d')[:4]
rfc3339 = '%Y-%m-%dT%H:%M:%S.{}Z'
return when.strftime(rfc3339.format(microseconds))
|
def make_filter(**tests):
"""Create a filter from keyword arguments."""
tests = [AttrTest(k, v) for k, v in tests.items()]
return Filter(tests)
|
def find(self, objects):
"""Find exactly one match in the list of objects.
:param objects: objects to filter
:type objects: :class:`list`
:return: the one matching object
:raises groupy.exceptions.NoMatchesError: if no objects match
:raises groupy.exceptions.MultipleMatchesError: if multiple objects match
"""
matches = list(self.__call__(objects))
if not matches:
raise exceptions.NoMatchesError(objects, self.tests)
elif len(matches) > 1:
raise exceptions.MultipleMatchesError(objects, self.tests,
matches=matches)
return matches[0]
|
def list(self):
"""Return a list of bots.
:return: all of your bots
:rtype: :class:`list`
"""
response = self.session.get(self.url)
return [Bot(self, **bot) for bot in response.data]
|
def create(self, name, group_id, avatar_url=None, callback_url=None,
dm_notification=None, **kwargs):
"""Create a new bot in a particular group.
:param str name: bot name
:param str group_id: the group_id of a group
:param str avatar_url: the URL of an image to use as an avatar
:param str callback_url: a POST-back URL for each new message
:param bool dm_notification: whether to POST-back for direct messages?
:return: the new bot
:rtype: :class:`~groupy.api.bots.Bot`
"""
payload = {
'bot': {
'name': name,
'group_id': group_id,
'avatar_url': avatar_url,
'callback_url': callback_url,
'dm_notification': dm_notification,
},
}
payload['bot'].update(kwargs)
response = self.session.post(self.url, json=payload)
bot = response.data['bot']
return Bot(self, **bot)
|
def post(self, bot_id, text, attachments=None):
"""Post a new message as a bot to its room.
:param str bot_id: the ID of the bot
:param str text: the text of the message
:param attachments: a list of attachments
:type attachments: :class:`list`
:return: ``True`` if successful
:rtype: bool
"""
url = utils.urljoin(self.url, 'post')
payload = dict(bot_id=bot_id, text=text)
if attachments:
payload['attachments'] = [a.to_json() for a in attachments]
response = self.session.post(url, json=payload)
return response.ok
|
def destroy(self, bot_id):
"""Destroy a bot.
:param str bot_id: the ID of the bot to destroy
:return: ``True`` if successful
:rtype: bool
"""
url = utils.urljoin(self.url, 'destroy')
payload = {'bot_id': bot_id}
response = self.session.post(url, json=payload)
return response.ok
|
def post(self, text, attachments=None):
"""Post a message as the bot.
:param str text: the text of the message
:param attachments: a list of attachments
:type attachments: :class:`list`
:return: ``True`` if successful
:rtype: bool
"""
return self.manager.post(self.bot_id, text, attachments)
|
def flatten_until(is_leaf, xs):
"""
Flatten a nested sequence. A sequence could be a nested list of lists
or tuples or a combination of both
:param is_leaf: Predicate. Predicate to determine whether an item
in the iterable `xs` is a leaf node or not.
:param xs: Iterable. Nested lists or tuples
:return: list.
"""
def _flatten_until(items):
if isinstance(Iterable, items) and not is_leaf(items):
for item in items:
for i in _flatten_until(item):
yield i
else:
yield items
return list(_flatten_until(xs))
|
def flip(f):
"""
Calls the function f by flipping the first two positional
arguments
"""
def wrapped(*args, **kwargs):
return f(*flip_first_two(args), **kwargs)
f_spec = make_func_curry_spec(f)
return curry_by_spec(f_spec, wrapped)
|
def cachier(stale_after=None, next_time=False, pickle_reload=True,
mongetter=None):
"""A persistent, stale-free memoization decorator.
The positional and keyword arguments to the wrapped function must be
hashable (i.e. Python's immutable built-in objects, not mutable
containers). Also, notice that since objects which are instances of
user-defined classes are hashable but all compare unequal (their hash
value is their id), equal objects across different sessions will not yield
identical keys.
Arguments
---------
stale_after (optional) : datetime.timedelta
The time delta afterwhich a cached result is considered stale. Calls
made after the result goes stale will trigger a recalculation of the
result, but whether a stale or fresh result will be returned is
determined by the optional next_time argument.
next_time (optional) : bool
If set to True, a stale result will be returned when finding one, not
waiting for the calculation of the fresh result to return. Defaults to
False.
pickle_reload (optional) : bool
If set to True, in-memory cache will be reloaded on each cache read,
enabling different threads to share cache. Should be set to False for
faster reads in single-thread programs. Defaults to True.
mongetter (optional) : callable
A callable that takes no arguments and returns a pymongo.Collection
object with writing permissions. If unset a local pickle cache is used
instead.
"""
# print('Inside the wrapper maker')
# print('mongetter={}'.format(mongetter))
# print('stale_after={}'.format(stale_after))
# print('next_time={}'.format(next_time))
if mongetter:
core = _MongoCore(mongetter, stale_after, next_time)
else:
core = _PickleCore( # pylint: disable=R0204
stale_after, next_time, pickle_reload)
def _cachier_decorator(func):
core.set_func(func)
@wraps(func)
def func_wrapper(*args, **kwds): # pylint: disable=C0111,R0911
# print('Inside general wrapper for {}.'.format(func.__name__))
ignore_cache = kwds.pop('ignore_cache', False)
overwrite_cache = kwds.pop('overwrite_cache', False)
verbose_cache = kwds.pop('verbose_cache', False)
_print = lambda x: None
if verbose_cache:
_print = print
if ignore_cache:
return func(*args, **kwds)
key, entry = core.get_entry(args, kwds)
if overwrite_cache:
return _calc_entry(core, key, func, args, kwds)
if entry is not None: # pylint: disable=R0101
_print('Entry found.')
if entry.get('value', None) is not None:
_print('Cached result found.')
if stale_after:
now = datetime.datetime.now()
if now - entry['time'] > stale_after:
_print('But it is stale... :(')
if entry['being_calculated']:
if next_time:
_print('Returning stale.')
return entry['value'] # return stale val
_print('Already calc. Waiting on change.')
try:
return core.wait_on_entry_calc(key)
except RecalculationNeeded:
return _calc_entry(core, key, func, args, kwds)
if next_time:
_print('Async calc and return stale')
try:
core.mark_entry_being_calculated(key)
_get_executor().submit(
_function_thread, core, key, func,
args, kwds)
finally:
core.mark_entry_not_calculated(key)
return entry['value']
_print('Calling decorated function and waiting')
return _calc_entry(core, key, func, args, kwds)
_print('And it is fresh!')
return entry['value']
if entry['being_calculated']:
_print('No value but being calculated. Waiting.')
try:
return core.wait_on_entry_calc(key)
except RecalculationNeeded:
return _calc_entry(core, key, func, args, kwds)
_print('No entry found. No current calc. Calling like a boss.')
return _calc_entry(core, key, func, args, kwds)
def clear_cache():
"""Clear the cache."""
core.clear_cache()
def clear_being_calculated():
"""Marks all entries in this cache as not being calculated."""
core.clear_being_calculated()
func_wrapper.clear_cache = clear_cache
func_wrapper.clear_being_calculated = clear_being_calculated
return func_wrapper
return _cachier_decorator
|
def defineID(defid):
"""Search for UD's definition ID and return list of UrbanDefinition objects.
Keyword arguments:
defid -- definition ID to search for (int or str)
"""
json = _get_urban_json(UD_DEFID_URL + urlquote(str(defid)))
return _parse_urban_json(json)
|
def has_external_dependency(name):
'Check that a non-Python dependency is installed.'
for directory in os.environ['PATH'].split(':'):
if os.path.exists(os.path.join(directory, name)):
return True
return False
|
def with_vtk(plot=True):
""" Tests VTK interface and mesh repair of Stanford Bunny Mesh """
mesh = vtki.PolyData(bunny_scan)
meshfix = pymeshfix.MeshFix(mesh)
if plot:
print('Plotting input mesh')
meshfix.plot()
meshfix.repair()
if plot:
print('Plotting repaired mesh')
meshfix.plot()
return meshfix.mesh
|
def load_arrays(self, v, f):
"""Loads triangular mesh from vertex and face numpy arrays.
Both vertex and face arrays should be 2D arrays with each
vertex containing XYZ data and each face containing three
points.
Parameters
----------
v : np.ndarray
n x 3 vertex array.
f : np.ndarray
n x 3 face array.
"""
# Check inputs
if not isinstance(v, np.ndarray):
try:
v = np.asarray(v, np.float)
if v.ndim != 2 and v.shape[1] != 3:
raise Exception('Invalid vertex format. Shape ' +
'should be (npoints, 3)')
except BaseException:
raise Exception(
'Unable to convert vertex input to valid numpy array')
if not isinstance(f, np.ndarray):
try:
f = np.asarray(f, ctypes.c_int)
if f.ndim != 2 and f.shape[1] != 3:
raise Exception('Invalid face format. ' +
'Shape should be (nfaces, 3)')
except BaseException:
raise Exception('Unable to convert face input to valid' +
' numpy array')
self.v = v
self.f = f
|
def mesh(self):
"""Return the surface mesh"""
triangles = np.empty((self.f.shape[0], 4))
triangles[:, -3:] = self.f
triangles[:, 0] = 3
return vtki.PolyData(self.v, triangles, deep=False)
|
def plot(self, show_holes=True):
"""
Plot the mesh.
Parameters
----------
show_holes : bool, optional
Shows boundaries. Default True
"""
if show_holes:
edges = self.mesh.extract_edges(boundary_edges=True,
feature_edges=False,
manifold_edges=False)
plotter = vtki.Plotter()
plotter.add_mesh(self.mesh, label='mesh')
plotter.add_mesh(edges, 'r', label='edges')
plotter.plot()
else:
self.mesh.plot(show_edges=True)
|
def repair(self, verbose=False, joincomp=False,
remove_smallest_components=True):
"""Performs mesh repair using MeshFix's default repair
process.
Parameters
----------
verbose : bool, optional
Enables or disables debug printing. Disabled by default.
joincomp : bool, optional
Attempts to join nearby open components.
remove_smallest_components : bool, optional
Remove all but the largest isolated component from the
mesh before beginning the repair process. Default True
Notes
-----
Vertex and face arrays are updated inplace. Access them with:
meshfix.v
meshfix.f
"""
assert self.f.shape[1] == 3, 'Face array must contain three columns'
assert self.f.ndim == 2, 'Face array must be 2D'
self.v, self.f = _meshfix.clean_from_arrays(self.v, self.f,
verbose, joincomp,
remove_smallest_components)
|
def write(self, filename, binary=True):
"""Writes a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh
file.
Parameters
----------
filename : str
Filename of mesh to be written. Filetype is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.ply, .stl,
.vtk)
ftype : str, optional
Filetype. Inferred from filename unless specified with a
three character string. Can be one of the following:
'ply', 'stl', or 'vtk'.
Notes
-----
Binary files write much faster than ASCII.
"""
self.mesh.write(filename, binary)
|
def scrape(url, params=None, user_agent=None):
'''
Scrape a URL optionally with parameters.
This is effectively a wrapper around urllib2.urlopen.
'''
headers = {}
if user_agent:
headers['User-Agent'] = user_agent
data = params and six.moves.urllib.parse.urlencode(params) or None
req = six.moves.urllib.request.Request(url, data=data, headers=headers)
f = six.moves.urllib.request.urlopen(req)
text = f.read()
f.close()
return text
|
def pdftoxml(pdfdata, options=""):
"""converts pdf file to xml file"""
pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')
pdffout.write(pdfdata)
pdffout.flush()
xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')
tmpxml = xmlin.name # "temph.xml"
cmd = 'pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes %s "%s" "%s"' % (
options, pdffout.name, os.path.splitext(tmpxml)[0])
# can't turn off output, so throw away even stderr yeuch
cmd = cmd + " >/dev/null 2>&1"
os.system(cmd)
pdffout.close()
#xmlfin = open(tmpxml)
xmldata = xmlin.read()
xmlin.close()
return xmldata.decode('utf-8')
|
def execute(query, data=None):
"""
Execute an arbitrary SQL query given by query, returning any
results as a list of OrderedDicts. A list of values can be supplied as an,
additional argument, which will be substituted into question marks in the
query.
"""
connection = _State.connection()
_State.new_transaction()
if data is None:
data = []
result = connection.execute(query, data)
_State.table = None
_State.metadata = None
try:
del _State.table_pending
except AttributeError:
pass
if not result.returns_rows:
return {u'data': [], u'keys': []}
return {u'data': result.fetchall(), u'keys': list(result.keys())}
|
def select(query, data=None):
"""
Perform a sql select statement with the given query (without 'select') and
return any results as a list of OrderedDicts.
"""
connection = _State.connection()
_State.new_transaction()
if data is None:
data = []
result = connection.execute('select ' + query, data)
rows = []
for row in result:
rows.append(dict(list(row.items())))
return rows
|
def save(unique_keys, data, table_name='swdata'):
"""
Save the given data to the table specified by `table_name`
(which defaults to 'swdata'). The data must be a mapping
or an iterable of mappings. Unique keys is a list of keys that exist
for all rows and for which a unique index will be created.
"""
_set_table(table_name)
connection = _State.connection()
if isinstance(data, Mapping):
# Is a single datum
data = [data]
elif not isinstance(data, Iterable):
raise TypeError("Data must be a single mapping or an iterable "
"of mappings")
insert = _State.table.insert(prefixes=['OR REPLACE'])
for row in data:
if not isinstance(row, Mapping):
raise TypeError("Elements of data must be mappings, got {}".format(
type(row)))
fit_row(connection, row, unique_keys)
connection.execute(insert.values(row))
_State.check_last_committed()
|
def _set_table(table_name):
"""
Specify the table to work on.
"""
_State.connection()
_State.reflect_metadata()
_State.table = sqlalchemy.Table(table_name, _State.metadata,
extend_existing=True)
if list(_State.table.columns.keys()) == []:
_State.table_pending = True
else:
_State.table_pending = False
|
def show_tables():
"""
Return the names of the tables currently in the database.
"""
_State.connection()
_State.reflect_metadata()
metadata = _State.metadata
response = select('name, sql from sqlite_master where type="table"')
return {row['name']: row['sql'] for row in response}
|
def save_var(name, value):
"""
Save a variable to the table specified by _State.vars_table_name. Key is
the name of the variable, and value is the value.
"""
connection = _State.connection()
_State.reflect_metadata()
vars_table = sqlalchemy.Table(
_State.vars_table_name, _State.metadata,
sqlalchemy.Column('name', sqlalchemy.types.Text, primary_key=True),
sqlalchemy.Column('value_blob', sqlalchemy.types.LargeBinary),
sqlalchemy.Column('type', sqlalchemy.types.Text),
keep_existing=True
)
vars_table.create(bind=connection, checkfirst=True)
column_type = get_column_type(value)
if column_type == sqlalchemy.types.LargeBinary:
value_blob = value
else:
value_blob = unicode(value).encode('utf-8')
values = dict(name=name,
value_blob=value_blob,
# value_blob=Blob(value),
type=column_type.__visit_name__.lower())
vars_table.insert(prefixes=['OR REPLACE']).values(**values).execute()
|
def get_var(name, default=None):
"""
Returns the variable with the provided key from the
table specified by _State.vars_table_name.
"""
alchemytypes = {"text": lambda x: x.decode('utf-8'),
"big_integer": lambda x: int(x),
"date": lambda x: x.decode('utf-8'),
"datetime": lambda x: x.decode('utf-8'),
"float": lambda x: float(x),
"large_binary": lambda x: x,
"boolean": lambda x: x==b'True'}
connection = _State.connection()
_State.new_transaction()
if _State.vars_table_name not in list(_State.metadata.tables.keys()):
return None
table = sqlalchemy.Table(_State.vars_table_name, _State.metadata)
s = sqlalchemy.select([table.c.value_blob, table.c.type])
s = s.where(table.c.name == name)
result = connection.execute(s).fetchone()
if not result:
return None
return alchemytypes[result[1]](result[0])
# This is to do the variable type conversion through the SQL engine
execute = connection.execute
execute("CREATE TEMPORARY TABLE _sw_tmp ('value' {})".format(result.type))
execute("INSERT INTO _sw_tmp VALUES (:value)", value=result.value_blob)
var = execute('SELECT value FROM _sw_tmp').fetchone().value
execute("DROP TABLE _sw_tmp")
return var.decode('utf-8')
|
def create_index(column_names, unique=False):
"""
Create a new index of the columns in column_names, where column_names is
a list of strings. If unique is True, it will be a
unique index.
"""
connection = _State.connection()
_State.reflect_metadata()
table_name = _State.table.name
table = _State.table
index_name = re.sub(r'[^a-zA-Z0-9]', '', table_name) + '_'
index_name += '_'.join(re.sub(r'[^a-zA-Z0-9]', '', x)
for x in column_names)
if unique:
index_name += '_unique'
columns = []
for column_name in column_names:
columns.append(table.columns[column_name])
current_indices = [x.name for x in table.indexes]
index = sqlalchemy.schema.Index(index_name, *columns, unique=unique)
if index.name not in current_indices:
index.create(bind=_State.engine)
|
def fit_row(connection, row, unique_keys):
"""
Takes a row and checks to make sure it fits in the columns of the
current table. If it does not fit, adds the required columns.
"""
new_columns = []
for column_name, column_value in list(row.items()):
new_column = sqlalchemy.Column(column_name,
get_column_type(column_value))
if not column_name in list(_State.table.columns.keys()):
new_columns.append(new_column)
_State.table.append_column(new_column)
if _State.table_pending:
create_table(unique_keys)
return
for new_column in new_columns:
add_column(connection, new_column)
|
def create_table(unique_keys):
"""
Save the table currently waiting to be created.
"""
_State.new_transaction()
_State.table.create(bind=_State.engine, checkfirst=True)
if unique_keys != []:
create_index(unique_keys, unique=True)
_State.table_pending = False
_State.reflect_metadata()
|
def add_column(connection, column):
"""
Add a column to the current table.
"""
stmt = alembic.ddl.base.AddColumn(_State.table.name, column)
connection.execute(stmt)
_State.reflect_metadata()
|
def drop():
"""
Drop the current table if it exists
"""
# Ensure the connection is up
_State.connection()
_State.table.drop(checkfirst=True)
_State.metadata.remove(_State.table)
_State.table = None
_State.new_transaction()
|
def attach_attrs_table(key, value, fmt, meta):
"""Extracts attributes and attaches them to element."""
# We can't use attach_attrs_factory() because Table is a block-level element
if key in ['Table']:
assert len(value) == 5
caption = value[0] # caption, align, x, head, body
# Set n to the index where the attributes start
n = 0
while n < len(caption) and not \
(caption[n]['t'] == 'Str' and caption[n]['c'].startswith('{')):
n += 1
try:
attrs = extract_attrs(caption, n)
value.insert(0, attrs)
except (ValueError, IndexError):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.