text stringlengths 81 112k |
|---|
find all matching data files in search_path
search_path: path of directories to load from
codecs: allow to override from list of installed
returns array of tuples (codec_object, filename)
def find_datafile(name, search_path, codecs=get_codecs()):
"""
find all matching data files in search_path
search_path: path of directories to load from
codecs: allow to override from list of installed
returns array of tuples (codec_object, filename)
"""
return munge.find_datafile(name, search_path, codecs) |
find datafile and load them from codec
TODO only does the first one
kwargs:
default = if passed will return that on failure instead of throwing
def load_datafile(name, search_path, codecs=get_codecs(), **kwargs):
"""
find datafile and load them from codec
TODO only does the first one
kwargs:
default = if passed will return that on failure instead of throwing
"""
return munge.load_datafile(name, search_path, codecs, **kwargs) |
Initialize what requires credentials/secret files.
:param secrets_dir: dir to expect credentials in and store logs/history in.
:param log: logger to use for log output.
:param bot_name: name of this bot,
used for various kinds of labelling.
:returns: none.
def cred_init(
self,
*,
secrets_dir: str,
log: Logger,
bot_name: str,
) -> None:
"""
Initialize what requires credentials/secret files.
:param secrets_dir: dir to expect credentials in and store logs/history in.
:param log: logger to use for log output.
:param bot_name: name of this bot,
used for various kinds of labelling.
:returns: none.
"""
super().__init__(secrets_dir=secrets_dir, log=log, bot_name=bot_name)
self.ldebug("Retrieving CONSUMER_KEY...")
with open(path.join(self.secrets_dir, "CONSUMER_KEY")) as f:
CONSUMER_KEY = f.read().strip()
self.ldebug("Retrieving CONSUMER_SECRET...")
with open(path.join(self.secrets_dir, "CONSUMER_SECRET")) as f:
CONSUMER_SECRET = f.read().strip()
self.ldebug("Retrieving ACCESS_TOKEN...")
with open(path.join(self.secrets_dir, "ACCESS_TOKEN")) as f:
ACCESS_TOKEN = f.read().strip()
self.ldebug("Retrieving ACCESS_SECRET...")
with open(path.join(self.secrets_dir, "ACCESS_SECRET")) as f:
ACCESS_SECRET = f.read().strip()
self.ldebug("Looking for OWNER_HANDLE...")
owner_handle_path = path.join(self.secrets_dir, "OWNER_HANDLE")
if path.isfile(owner_handle_path):
with open(owner_handle_path) as f:
self.owner_handle = f.read().strip()
else:
self.ldebug("Couldn't find OWNER_HANDLE, unable to DM...")
self.owner_handle = ""
self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
self.api = tweepy.API(self.auth) |
Send birdsite message.
:param text: text to send in post.
:returns: list of output records,
each corresponding to either a single post,
or an error.
def send(
self,
*,
text: str,
) -> List[OutputRecord]:
"""
Send birdsite message.
:param text: text to send in post.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
try:
status = self.api.update_status(text)
self.ldebug(f"Status object from tweet: {status}.")
return [TweetRecord(record_data={"tweet_id": status._json["id"], "text": text})]
except tweepy.TweepError as e:
return [self.handle_error(
message=(f"Bot {self.bot_name} encountered an error when "
f"sending post {text} without media:\n{e}\n"),
error=e)] |
Upload media to birdsite,
and send status and media,
and captions if present.
:param text: tweet text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error.
def send_with_media(
self,
*,
text: str,
files: List[str],
captions: List[str]=[]
) -> List[OutputRecord]:
"""
Upload media to birdsite,
and send status and media,
and captions if present.
:param text: tweet text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
# upload media
media_ids = None
try:
self.ldebug(f"Uploading files {files}.")
media_ids = [self.api.media_upload(file).media_id_string for file in files]
except tweepy.TweepError as e:
return [self.handle_error(
message=f"Bot {self.bot_name} encountered an error when uploading {files}:\n{e}\n",
error=e)]
# apply captions, if present
self._handle_caption_upload(media_ids=media_ids, captions=captions)
# send status
try:
status = self.api.update_status(status=text, media_ids=media_ids)
self.ldebug(f"Status object from tweet: {status}.")
return [TweetRecord(record_data={
"tweet_id": status._json["id"],
"text": text,
"media_ids": media_ids,
"captions": captions,
"files": files
})]
except tweepy.TweepError as e:
return [self.handle_error(
message=(f"Bot {self.bot_name} encountered an error when "
f"sending post {text} with media ids {media_ids}:\n{e}\n"),
error=e)] |
Performs batch reply on target account.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param target: the id of the target account.
:param lookback_limit: a lookback limit of how many messages to consider.
:returns: list of output records,
each corresponding to either a single post,
or an error.
def perform_batch_reply(
self,
*,
callback: Callable[..., str],
lookback_limit: int,
target_handle: str,
) -> List[OutputRecord]:
"""
Performs batch reply on target account.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param target: the id of the target account.
:param lookback_limit: a lookback limit of how many messages to consider.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
self.log.info(f"Attempting to batch reply to birdsite user {target_handle}")
if "@" in target_handle:
base_target_handle = target_handle[1:]
else:
base_target_handle = target_handle
records: List[OutputRecord] = []
statuses = self.api.user_timeline(screen_name=base_target_handle, count=lookback_limit)
self.log.debug(f"Retrieved {len(statuses)} statuses.")
for i, status in enumerate(statuses):
self.log.debug(f"Processing status {i} of {len(statuses)}")
status_id = status.id
# find possible replies we've made.
# the 10 * lookback_limit is a guess,
# might not be enough and I'm not sure we can guarantee it is.
our_statuses = self.api.user_timeline(since_id=status_id,
count=lookback_limit * 10)
in_reply_to_ids = list(map(lambda x: x.in_reply_to_status_id, our_statuses))
if status_id not in in_reply_to_ids:
# the twitter API and tweepy will attempt to give us the truncated text of the
# message if we don't do this roundabout thing.
encoded_status_text = self.api.get_status(status_id,
tweet_mode="extended")._json["full_text"]
status_text = html.unescape(encoded_status_text)
message = callback(message_id=status_id, message=status_text, extra_keys={})
full_message = f"@{base_target_handle} {message}"
self.log.info(f"Trying to reply with {message} to status {status_id} "
f"from {target_handle}.")
try:
new_status = self.api.update_status(status=full_message,
in_reply_to_status_id=status_id)
records.append(TweetRecord(record_data={
"tweet_id": new_status.id,
"in_reply_to": f"@{base_target_handle}",
"in_reply_to_id": status_id,
"text": full_message,
}))
except tweepy.TweepError as e:
records.append(self.handle_error(
message=(f"Bot {self.bot_name} encountered an error when "
f"trying to reply to {status_id} with {message}:\n{e}\n"),
error=e))
else:
self.log.info(f"Not replying to status {status_id} from {target_handle} "
f"- we already replied.")
return records |
Send DM to owner if something happens.
:param message: message to send to owner.
:returns: None.
def send_dm_sos(self, message: str) -> None:
"""
Send DM to owner if something happens.
:param message: message to send to owner.
:returns: None.
"""
if self.owner_handle:
try:
# twitter changed the DM API and tweepy (as of 2019-03-08)
# has not adapted.
# fixing with
# https://github.com/tweepy/tweepy/issues/1081#issuecomment-423486837
owner_id = self.api.get_user(screen_name=self.owner_handle).id
event = {
"event": {
"type": "message_create",
"message_create": {
"target": {
"recipient_id": f"{owner_id}",
},
"message_data": {
"text": message
}
}
}
}
self._send_direct_message_new(event)
except tweepy.TweepError as de:
self.lerror(f"Error trying to send DM about error!: {de}")
else:
self.lerror("Can't send DM SOS, no owner handle.") |
Handle error while trying to do something.
:param message: message to send in DM regarding error.
:param e: tweepy error object.
:returns: OutputRecord containing an error.
def handle_error(
self,
*,
message: str,
error: tweepy.TweepError,
) -> OutputRecord:
"""
Handle error while trying to do something.
:param message: message to send in DM regarding error.
:param e: tweepy error object.
:returns: OutputRecord containing an error.
"""
self.lerror(f"Got an error! {error}")
# Handle errors if we know how.
try:
code = error[0]["code"]
if code in self.handled_errors:
self.handled_errors[code]
else:
self.send_dm_sos(message)
except Exception:
self.send_dm_sos(message)
return TweetRecord(error=error) |
Handle uploading all captions.
:param media_ids: media ids of uploads to attach captions to.
:param captions: captions to be attached to those media ids.
:returns: None.
def _handle_caption_upload(
self,
*,
media_ids: List[str],
captions: Optional[List[str]],
) -> None:
"""
Handle uploading all captions.
:param media_ids: media ids of uploads to attach captions to.
:param captions: captions to be attached to those media ids.
:returns: None.
"""
if captions is None:
captions = []
if len(media_ids) > len(captions):
captions.extend([self.default_caption_message] * (len(media_ids) - len(captions)))
for i, media_id in enumerate(media_ids):
caption = captions[i]
self._upload_caption(media_id=media_id, caption=caption) |
:reference: https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-event.html
def _send_direct_message_new(self, messageobject: Dict[str, Dict]) -> Any:
"""
:reference: https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-event.html
"""
headers, post_data = _buildmessageobject(messageobject)
newdm_path = "/direct_messages/events/new.json"
return tweepy.binder.bind_api(
api=self.api,
path=newdm_path,
method="POST",
require_auth=True,
)(post_data=post_data, headers=headers) |
Takes crash data via stdin and generates a Socorro signature
def main():
"""Takes crash data via stdin and generates a Socorro signature"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
args = parser.parse_args()
generator = SignatureGenerator(debug=args.verbose)
crash_data = json.loads(sys.stdin.read())
ret = generator.generate(crash_data)
print(json.dumps(ret, indent=2)) |
Update internel configuration dict with config and recheck
def add_config(self, config):
"""
Update internel configuration dict with config and recheck
"""
for attr in self.__fixed_attrs:
if attr in config:
raise Exception("cannot set '%s' outside of init", attr)
# pre checkout
stages = config.get('stages', None)
if stages:
self.stages = stages
# maybe pre checkout
# validate options
self.__dry_run = config.get('dry_run', False)
self.system = str.lower(platform.system())
self.__start = config.get('start', None)
self.__end = config.get('end', None)
self.__only = config.get('only', None)
self.__build_docs = config.get('build_docs', False)
self.__chatty = config.get('chatty', False)
self.__clean = config.get('clean', False)
self.__devel = config.get('devel', False)
self.__debug = config.get('debug', False)
self.__skip_libcheck = config.get('skip_libcheck', False)
self.__debuginfo = config.get('debuginfo', False)
self.__release = config.get('release', False)
self.__skip_unit = config.get('skip_unit', False)
self.__static = config.get('static', False)
self.__make_dash_j = int(config.get('j', 0))
self.__target_only = config.get('target_only', None)
bits = config.get('bits', None)
if bits:
self.bits = int(bits)
else:
self.bits = self.sys_bits
self.compiler = config.get('compiler', None)
self.test_config = config.get('test_config', '-')
if not self.test_config:
self.test_config = '-'
self.use_ccache = config.get('use_ccache', False)
self.tmpl_engine = config.get('tmpl_engine', 'jinja2')
self.__write_codec = config.get('write_codec', None)
self.__codec = None
# TODO move out of init
if not config.get('skip_env_check', False):
if "LD_LIBRARY_PATH" in os.environ:
raise Exception("environment variable LD_LIBRARY_PATH is set")
self.check_config() |
called after config was modified to sanity check
raises on error
def check_config(self):
"""
called after config was modified to sanity check
raises on error
"""
# sanity checks - no config access past here
if not getattr(self, 'stages', None):
raise NotImplementedError("member variable 'stages' must be defined")
# start at stage
if self.__start:
self.__stage_start = self.find_stage(self.__start)
else:
self.__stage_start = 0
# end at stage
if self.__end:
self.__stage_end = self.find_stage(self.__end) + 1
self.opt_end = self.__end
else:
self.__stage_end = len(self.stages)
# only stage
if self.__only:
if self.__start or self.__end:
raise Exception(
"stage option 'only' cannot be used with start or end")
self.__stage_start = self.find_stage(self.__only)
self.__stage_end = self.__stage_start + 1
if self.__devel:
self.__devel = True
# force deploy skip
if self.__stage_end >= len(self.stages):
self.status_msg("removing deploy stage for development build")
# XXX self.__stage_end = self.__stage_end - 1
if self.stage_start >= self.stage_end:
raise Exception("start and end produce no stages")
if self.bits not in [32, 64]:
raise Exception(
"can't do a %d bit build: unknown build process" % self.bits)
if self.bits == 64 and not self.is_64b:
raise Exception(
"this machine is not 64 bit, cannot perform 64 bit build")
if self.system == 'windows':
self.compilertag = 'vc10'
elif self.system == 'linux':
self.compilertag = 'gcc44'
else:
raise RuntimeError("can't decide compilertag on " + self.system)
self.build_suffix = ''
if not self.is_unixy:
if self.__static:
runtime = 'MT'
else:
runtime = 'MD'
if self.__release:
self.configuration_name = 'Release'
else:
runtime += 'd'
self.configuration_name = 'Debug'
self.build_suffix = '-' + runtime
self.runtime = runtime
else:
self.configuration_name = 'CFNAME_INVALID_ON_LINUX'
self.runtime = 'RUNTIME_INVALID_ON_LINUX'
if self.test_config != '-':
self.test_config = os.path.abspath(self.test_config)
# split version
if self.version:
ver = self.version.split('.')
self.version_major = int(ver[0])
self.version_minor = int(ver[1])
self.version_patch = int(ver[2])
if(len(ver) == 4):
self.version_build = int(ver[3]) |
called after Defintion was loaded to sanity check
raises on error
def check_definition(self):
"""
called after Defintion was loaded to sanity check
raises on error
"""
if not self.write_codec:
self.__write_codec = self.defined.data_ext
# TODO need to add back a class scope target limited for subprojects with sub target sets
targets = self.get_defined_targets()
if self.__target_only:
if self.__target_only not in targets:
raise RuntimeError("invalid target '%s'" % self.__target_only)
self.targets = [self.__target_only]
else:
self.targets = targets |
find all matching data files in search_path
returns array of tuples (codec_object, filename)
def find_datafile(self, name, search_path=None):
"""
find all matching data files in search_path
returns array of tuples (codec_object, filename)
"""
if not search_path:
search_path = self.define_dir
return codec.find_datafile(name, search_path) |
find datafile and load them from codec
def load_datafile(self, name, search_path=None, **kwargs):
"""
find datafile and load them from codec
"""
if not search_path:
search_path = self.define_dir
self.debug_msg('loading datafile %s from %s' % (name, str(search_path)))
return codec.load_datafile(name, search_path, **kwargs) |
run all configured stages
def run(self):
""" run all configured stages """
self.sanity_check()
# TODO - check for devel
# if not self.version:
# raise Exception("no version")
# XXX check attr exist
if not self.release_environment:
raise Exception("no instance name")
time_start = time.time()
cwd = os.getcwd()
who = getpass.getuser()
self._make_outdirs()
append_notices = ""
if hasattr(self, 'opt_end'):
append_notices = ". shortened push, only to %s stage" % self.opt_end
if self.is_devel:
append_notices += ". devel build"
if hasattr(self, 'append_notices'):
append_notices += self.append_notices
line = "%s %s %s by %s%s" % (
sys.argv[0], self.version, self.release_environment, who, append_notices)
b = 'deploy begin %s' % line
e = 'deploy done %s' % line
if self.chatty:
self.alact(b)
ok = False
stage_passed = None
try:
for stage in self.stages[self.stage_start:self.stage_end]:
self.debug_msg("stage %s starting" % (stage,))
getattr(self, stage)()
self.chdir(cwd)
stage_passed = stage
self.debug_msg("stage %s complete" % (stage,))
ok = True
finally:
if not ok:
if self.chatty:
if not stage_passed:
self.alact(
'deploy failed %s. completed no stages' % line)
else:
self.alact('deploy failed %s. completed %s' %
(line, stage_passed))
self.status_msg('[OK]')
if self.chatty:
self.alact('%s in %0.3f sec' % (e, time.time() - time_start))
return 0 |
Return POSIX timestamp as float.
>>> timestamp(datetime.datetime.now()) > 1494638812
True
>>> timestamp(datetime.datetime.now()) % 1 > 0
True
def timestamp(dt):
"""
Return POSIX timestamp as float.
>>> timestamp(datetime.datetime.now()) > 1494638812
True
>>> timestamp(datetime.datetime.now()) % 1 > 0
True
"""
if dt.tzinfo is None:
return time.mktime((
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
-1, -1, -1)) + dt.microsecond / 1e6
else:
return (dt - _EPOCH).total_seconds() |
Rate limit a function.
def rate_limited(max_per_hour: int, *args: Any) -> Callable[..., Any]:
"""Rate limit a function."""
return util.rate_limited(max_per_hour, *args) |
Repair a corrupted IterationRecord with a specific known issue.
def _repair(record: Dict[str, Any]) -> Dict[str, Any]:
"""Repair a corrupted IterationRecord with a specific known issue."""
output_records = record.get("output_records")
if record.get("_type", None) == "IterationRecord" and output_records is not None:
birdsite_record = output_records.get("birdsite")
# check for the bug
if isinstance(birdsite_record, dict) and birdsite_record.get("_type") == "IterationRecord":
# get to the bottom of the corrupted record
failed = False
while birdsite_record.get("_type") == "IterationRecord":
sub_record = birdsite_record.get("output_records")
if sub_record is None:
failed = True
break
birdsite_record = sub_record.get("birdsite")
if birdsite_record is None:
failed = True
break
if failed:
return record
# add type
birdsite_record["_type"] = TweetRecord.__name__
# lift extra keys, just in case
if "extra_keys" in birdsite_record:
record_extra_values = record.get("extra_keys", {})
for key, value in birdsite_record["extra_keys"].items():
if key not in record_extra_values:
record_extra_values[key] = value
record["extra_keys"] = record_extra_values
del birdsite_record["extra_keys"]
output_records["birdsite"] = birdsite_record
# pull that correct record up to the top level, fixing corruption
record["output_records"] = output_records
return record |
Get object back from dict.
def from_dict(cls, obj_dict: Dict[str, Any]) -> "IterationRecord":
"""Get object back from dict."""
obj = cls()
for key, item in obj_dict.items():
obj.__dict__[key] = item
return obj |
Post text-only to all outputs.
:param args: positional arguments.
expected: text to send as message in post.
keyword text argument is preferred over this.
:param text: text to send as message in post.
:returns: new record of iteration
def send(
self,
*args: str,
text: str=None,
) -> IterationRecord:
"""
Post text-only to all outputs.
:param args: positional arguments.
expected: text to send as message in post.
keyword text argument is preferred over this.
:param text: text to send as message in post.
:returns: new record of iteration
"""
if text is not None:
final_text = text
else:
if len(args) == 0:
raise BotSkeletonException(("Please provide text either as a positional arg or "
"as a keyword arg (text=TEXT)"))
else:
final_text = args[0]
# TODO there could be some annotation stuff here.
record = IterationRecord(extra_keys=self.extra_keys)
for key, output in self.outputs.items():
if output["active"]:
self.log.info(f"Output {key} is active, calling send on it.")
entry: Any = output["obj"]
output_result = entry.send(text=final_text)
record.output_records[key] = output_result
else:
self.log.info(f"Output {key} is inactive. Not sending.")
self.history.append(record)
self.update_history()
return record |
Post with one media item to all outputs.
Provide filename so outputs can handle their own uploads.
:param args: positional arguments.
expected:
text to send as message in post.
file to be uploaded.
caption to be paired with file.
keyword arguments preferred over positional ones.
:param text: text to send as message in post.
:param file: file to be uploaded in post.
:param caption: caption to be uploaded alongside file.
:returns: new record of iteration
def send_with_one_media(
self,
*args: str,
text: str=None,
file: str=None,
caption: str=None,
) -> IterationRecord:
"""
Post with one media item to all outputs.
Provide filename so outputs can handle their own uploads.
:param args: positional arguments.
expected:
text to send as message in post.
file to be uploaded.
caption to be paired with file.
keyword arguments preferred over positional ones.
:param text: text to send as message in post.
:param file: file to be uploaded in post.
:param caption: caption to be uploaded alongside file.
:returns: new record of iteration
"""
final_text = text
if final_text is None:
if len(args) < 1:
raise TypeError(("Please provide either positional argument "
"TEXT, or keyword argument text=TEXT"))
else:
final_text = args[0]
final_file = file
if final_file is None:
if len(args) < 2:
raise TypeError(("Please provide either positional argument "
"FILE, or keyword argument file=FILE"))
else:
final_file = args[1]
# this arg is ACTUALLY optional,
# so the pattern is changed.
final_caption = caption
if final_caption is None:
if len(args) >= 3:
final_caption = args[2]
# TODO more error checking like this.
if final_caption is None or final_caption == "":
captions:List[str] = []
else:
captions = [final_caption]
record = IterationRecord(extra_keys=self.extra_keys)
for key, output in self.outputs.items():
if output["active"]:
self.log.info(f"Output {key} is active, calling media send on it.")
entry: Any = output["obj"]
output_result = entry.send_with_media(text=final_text,
files=[final_file],
captions=captions)
record.output_records[key] = output_result
else:
self.log.info(f"Output {key} is inactive. Not sending with media.")
self.history.append(record)
self.update_history()
return record |
Post with several media.
Provide filenames so outputs can handle their own uploads.
:param args: positional arguments.
expected:
text to send as message in post.
files to be uploaded.
captions to be paired with files.
keyword arguments preferred over positional ones.
:param text: text to send as message in post.
:param files: files to be uploaded in post.
:param captions: captions to be uploaded alongside files.
:returns: new record of iteration
def send_with_many_media(
self,
*args: str,
text: str=None,
files: List[str]=None,
captions: List[str]=[],
) -> IterationRecord:
"""
Post with several media.
Provide filenames so outputs can handle their own uploads.
:param args: positional arguments.
expected:
text to send as message in post.
files to be uploaded.
captions to be paired with files.
keyword arguments preferred over positional ones.
:param text: text to send as message in post.
:param files: files to be uploaded in post.
:param captions: captions to be uploaded alongside files.
:returns: new record of iteration
"""
if text is None:
if len(args) < 1:
raise TypeError(("Please provide either required positional argument "
"TEXT, or keyword argument text=TEXT"))
else:
final_text = args[0]
else:
final_text = text
if files is None:
if len(args) < 2:
raise TypeError(("Please provide either positional argument "
"FILES, or keyword argument files=FILES"))
else:
final_files = list(args[1:])
else:
final_files = files
# captions have never been permitted to be provided as positional args
# (kind of backed myself into that)
# so they just get defaulted and it's fine.
record = IterationRecord(extra_keys=self.extra_keys)
for key, output in self.outputs.items():
if output["active"]:
self.log.info(f"Output {key} is active, calling media send on it.")
entry: Any = output["obj"]
output_result = entry.send_with_media(text=final_text,
files=final_files,
captions=captions)
record.output_records[key] = output_result
else:
self.log.info(f"Output {key} is inactive. Not sending with media.")
self.history.append(record)
self.update_history()
return record |
Performs batch reply on target accounts.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param targets: a dictionary of service names to target handles
(currently only one per service).
:param lookback_limit: a lookback limit of how many messages to consider (optional).
:param per_service_lookback: and a dictionary of service names to per-service
lookback limits.
takes preference over lookback_limit (optional).
:returns: new record of iteration
:raises BotSkeletonException: raises BotSkeletonException if batch reply fails or cannot be
performed
def perform_batch_reply(
self,
*,
callback: Callable[..., str]=None,
target_handles: Dict[str, str]=None,
lookback_limit: int=20,
per_service_lookback_limit: Dict[str, int]=None,
) -> IterationRecord:
"""
Performs batch reply on target accounts.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param targets: a dictionary of service names to target handles
(currently only one per service).
:param lookback_limit: a lookback limit of how many messages to consider (optional).
:param per_service_lookback: and a dictionary of service names to per-service
lookback limits.
takes preference over lookback_limit (optional).
:returns: new record of iteration
:raises BotSkeletonException: raises BotSkeletonException if batch reply fails or cannot be
performed
"""
if callback is None:
raise BotSkeletonException("Callback must be provided.""")
if target_handles is None:
raise BotSkeletonException("Targets must be provided.""")
if lookback_limit > self.lookback_limit:
raise BotSkeletonException(
f"Lookback_limit cannot exceed {self.lookback_limit}, " +
f"but it was {lookback_limit}"
)
# use per-service lookback dict for convenience in a moment.
# if necessary, use lookback_limit to fill it out.
lookback_dict = per_service_lookback_limit
if (lookback_dict is None):
lookback_dict = {}
record = IterationRecord(extra_keys=self.extra_keys)
for key, output in self.outputs.items():
if key not in lookback_dict:
lookback_dict[key] = lookback_limit
if target_handles.get(key, None) is None:
self.log.info(f"No target for output {key}, skipping this output.")
elif not output.get("active", False):
self.log.info(f"Output {key} is inactive. Not calling batch reply.")
elif output["active"]:
self.log.info(f"Output {key} is active, calling batch reply on it.")
entry: Any = output["obj"]
output_result = entry.perform_batch_reply(callback=callback,
target_handle=target_handles[key],
lookback_limit=lookback_dict[key],
)
record.output_records[key] = output_result
self.history.append(record)
self.update_history()
return record |
Go to sleep for the duration of self.delay.
:returns: None
def nap(self) -> None:
"""
Go to sleep for the duration of self.delay.
:returns: None
"""
self.log.info(f"Sleeping for {self.delay} seconds.")
for _ in progress.bar(range(self.delay)):
time.sleep(1) |
Store some extra value in the messaging storage.
:param key: key of dictionary entry to add.
:param value: value of dictionary entry to add.
:returns: None
def store_extra_info(self, key: str, value: Any) -> None:
"""
Store some extra value in the messaging storage.
:param key: key of dictionary entry to add.
:param value: value of dictionary entry to add.
:returns: None
"""
self.extra_keys[key] = value |
Store several extra values in the messaging storage.
:param d: dictionary entry to merge with current self.extra_keys.
:returns: None
def store_extra_keys(self, d: Dict[str, Any]) -> None:
"""
Store several extra values in the messaging storage.
:param d: dictionary entry to merge with current self.extra_keys.
:returns: None
"""
new_dict = dict(self.extra_keys, **d)
self.extra_keys = new_dict.copy() |
Update messaging history on disk.
:returns: None
def update_history(self) -> None:
"""
Update messaging history on disk.
:returns: None
"""
self.log.debug(f"Saving history. History is: \n{self.history}")
jsons = []
for item in self.history:
json_item = item.__dict__
# Convert sub-entries into JSON as well.
json_item["output_records"] = self._parse_output_records(item)
jsons.append(json_item)
if not path.isfile(self.history_filename):
open(self.history_filename, "a+").close()
with open(self.history_filename, "w") as f:
json.dump(jsons, f, default=lambda x: x.__dict__.copy(), sort_keys=True, indent=4)
f.write("\n") |
Load messaging history from disk to self.
:returns: List of iteration records comprising history.
def load_history(self) -> List["IterationRecord"]:
"""
Load messaging history from disk to self.
:returns: List of iteration records comprising history.
"""
if path.isfile(self.history_filename):
with open(self.history_filename, "r") as f:
try:
dicts = json.load(f)
except json.decoder.JSONDecodeError as e:
self.log.error(f"Got error \n{e}\n decoding JSON history, overwriting it.\n"
f"Former history available in {self.history_filename}.bak")
copyfile(self.history_filename, f"{self.history_filename}.bak")
return []
history: List[IterationRecord] = []
for hdict_pre in dicts:
if "_type" in hdict_pre and hdict_pre["_type"] == IterationRecord.__name__:
# repair any corrupted entries
hdict = _repair(hdict_pre)
record = IterationRecord.from_dict(hdict)
history.append(record)
# Be sure to handle legacy tweetrecord-only histories.
# Assume anything without our new _type (which should have been there from the
# start, whoops) is a legacy history.
else:
item = IterationRecord()
# Lift extra keys up to upper record (if they exist).
extra_keys = hdict_pre.pop("extra_keys", {})
item.extra_keys = extra_keys
hdict_obj = TweetRecord.from_dict(hdict_pre)
# Lift timestamp up to upper record.
item.timestamp = hdict_obj.timestamp
item.output_records["birdsite"] = hdict_obj
history.append(item)
self.log.debug(f"Loaded history:\n {history}")
return history
else:
return [] |
Set up all output methods. Provide them credentials and anything else they need.
def _setup_all_outputs(self) -> None:
"""Set up all output methods. Provide them credentials and anything else they need."""
# The way this is gonna work is that we assume an output should be set up iff it has a
# credentials_ directory under our secrets dir.
for key in self.outputs.keys():
credentials_dir = path.join(self.secrets_dir, f"credentials_{key}")
# special-case birdsite for historical reasons.
if key == "birdsite" and not path.isdir(credentials_dir) \
and path.isfile(path.join(self.secrets_dir, "CONSUMER_KEY")):
credentials_dir = self.secrets_dir
if path.isdir(credentials_dir):
output_skeleton = self.outputs[key]
output_skeleton["active"] = True
obj: Any = output_skeleton["obj"]
obj.cred_init(secrets_dir=credentials_dir, log=self.log, bot_name=self.bot_name)
output_skeleton["obj"] = obj
self.outputs[key] = output_skeleton |
Parse output records into dicts ready for JSON.
def _parse_output_records(self, item: IterationRecord) -> Dict[str, Any]:
"""Parse output records into dicts ready for JSON."""
output_records = {}
for key, sub_item in item.output_records.items():
if isinstance(sub_item, dict) or isinstance(sub_item, list):
output_records[key] = sub_item
else:
output_records[key] = sub_item.__dict__
return output_records |
Create the directory of a fully qualified file name if it does not exist.
:param fname: File name
:type fname: string
Equivalent to these Bash shell commands:
.. code-block:: bash
$ fname="${HOME}/mydir/myfile.txt"
$ dir=$(dirname "${fname}")
$ mkdir -p "${dir}"
:param fname: Fully qualified file name
:type fname: string
def make_dir(fname):
"""
Create the directory of a fully qualified file name if it does not exist.
:param fname: File name
:type fname: string
Equivalent to these Bash shell commands:
.. code-block:: bash
$ fname="${HOME}/mydir/myfile.txt"
$ dir=$(dirname "${fname}")
$ mkdir -p "${dir}"
:param fname: Fully qualified file name
:type fname: string
"""
file_path, fname = os.path.split(os.path.abspath(fname))
if not os.path.exists(file_path):
os.makedirs(file_path) |
r"""
Fix potential problems with a Microsoft Windows file name.
Superfluous backslashes are removed and unintended escape sequences are
converted to their equivalent (presumably correct and intended)
representation, for example :code:`r'\\\\x07pps'` is transformed to
:code:`r'\\\\\\\\apps'`. A file name is considered network shares if the
file does not include a drive letter and they start with a double backslash
(:code:`'\\\\\\\\'`)
:param fname: File name
:type fname: string
:rtype: string
def normalize_windows_fname(fname, _force=False):
r"""
Fix potential problems with a Microsoft Windows file name.
Superfluous backslashes are removed and unintended escape sequences are
converted to their equivalent (presumably correct and intended)
representation, for example :code:`r'\\\\x07pps'` is transformed to
:code:`r'\\\\\\\\apps'`. A file name is considered network shares if the
file does not include a drive letter and they start with a double backslash
(:code:`'\\\\\\\\'`)
:param fname: File name
:type fname: string
:rtype: string
"""
if (platform.system().lower() != "windows") and (not _force): # pragma: no cover
return fname
# Replace unintended escape sequences that could be in
# the file name, like "C:\appdata"
rchars = {
"\x07": r"\\a",
"\x08": r"\\b",
"\x0C": r"\\f",
"\x0A": r"\\n",
"\x0D": r"\\r",
"\x09": r"\\t",
"\x0B": r"\\v",
}
ret = ""
for char in os.path.normpath(fname):
ret = ret + rchars.get(char, char)
# Remove superfluous double backslashes
network_share = False
tmp = None
network_share = fname.startswith(r"\\")
while tmp != ret:
tmp, ret = ret, ret.replace(r"\\\\", r"\\")
ret = ret.replace(r"\\\\", r"\\")
# Put back network share if needed
if network_share:
ret = r"\\" + ret.lstrip(r"\\")
return ret |
Enforce line separators to be the right one depending on platform.
def _homogenize_linesep(line):
"""Enforce line separators to be the right one depending on platform."""
token = str(uuid.uuid4())
line = line.replace(os.linesep, token).replace("\n", "").replace("\r", "")
return line.replace(token, os.linesep) |
Process line range tokens.
def _proc_token(spec, mlines):
"""Process line range tokens."""
spec = spec.strip().replace(" ", "")
regexp = re.compile(r".*[^0123456789\-,]+.*")
tokens = spec.split(",")
cond = any([not item for item in tokens])
if ("--" in spec) or ("-," in spec) or (",-" in spec) or cond or regexp.match(spec):
raise RuntimeError("Argument `lrange` is not valid")
lines = []
for token in tokens:
if token.count("-") > 1:
raise RuntimeError("Argument `lrange` is not valid")
if "-" in token:
subtokens = token.split("-")
lmin, lmax = (
int(subtokens[0]),
int(subtokens[1]) if subtokens[1] else mlines,
)
for num in range(lmin, lmax + 1):
lines.append(num)
else:
lines.append(int(token))
if lines != sorted(lines):
raise RuntimeError("Argument `lrange` is not valid")
return lines |
r"""
Return a Python source file formatted in reStructuredText.
.. role:: bash(code)
:language: bash
:param fname: File name, relative to environment variable
:bash:`PKG_DOC_DIR`
:type fname: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
other functions can be used for debugging
:type fpointer: function object
:param lrange: Line range to include, similar to Sphinx
`literalinclude <http://www.sphinx-doc.org/en/master/usage
/restructuredtext/directives.html
#directive-literalinclude>`_ directive
:type lrange: string
:param sdir: Source file directory. If None the :bash:`PKG_DOC_DIR`
environment variable is used if it is defined, otherwise
the directory where the module is located is used
:type sdir: string
For example:
.. code-block:: python
def func():
\"\"\"
This is a docstring. This file shows how to use it:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('func_example.py', cog.out)
.. =]=
.. code-block:: python
# func_example.py
if __name__ == '__main__':
func()
.. =[=end=]=
\"\"\"
return 'This is func output'
def incfile(fname, fpointer, lrange=None, sdir=None):
r"""
Return a Python source file formatted in reStructuredText.
.. role:: bash(code)
:language: bash
:param fname: File name, relative to environment variable
:bash:`PKG_DOC_DIR`
:type fname: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
other functions can be used for debugging
:type fpointer: function object
:param lrange: Line range to include, similar to Sphinx
`literalinclude <http://www.sphinx-doc.org/en/master/usage
/restructuredtext/directives.html
#directive-literalinclude>`_ directive
:type lrange: string
:param sdir: Source file directory. If None the :bash:`PKG_DOC_DIR`
environment variable is used if it is defined, otherwise
the directory where the module is located is used
:type sdir: string
For example:
.. code-block:: python
def func():
\"\"\"
This is a docstring. This file shows how to use it:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('func_example.py', cog.out)
.. =]=
.. code-block:: python
# func_example.py
if __name__ == '__main__':
func()
.. =[=end=]=
\"\"\"
return 'This is func output'
"""
# pylint: disable=R0914
# Read file
file_dir = (
sdir
if sdir
else os.environ.get("PKG_DOC_DIR", os.path.abspath(os.path.dirname(__file__)))
)
fname = os.path.join(file_dir, fname)
with open(fname, "r") as fobj:
lines = fobj.readlines()
# Eliminate spurious carriage returns in Microsoft Windows
lines = [_homogenize_linesep(line) for line in lines]
# Parse line specification
inc_lines = (
_proc_token(lrange, len(lines)) if lrange else list(range(1, len(lines) + 1))
)
# Produce output
fpointer(".. code-block:: python" + os.linesep)
fpointer(os.linesep)
for num, line in enumerate(lines):
if num + 1 in inc_lines:
fpointer(
" " + line.replace("\t", " ").rstrip() + os.linesep
if line.strip()
else os.linesep
)
fpointer(os.linesep) |
Print STDOUT of a shell command formatted in reStructuredText.
This is a simplified version of :py:func:`pmisc.term_echo`.
:param command: Shell command (relative to **mdir** if **env** is not given)
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory, used if **env** is not given
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param env: Environment dictionary. If not provided, the environment
dictionary is the key "PKG_BIN_DIR" with the value of the
**mdir**
:type env: dictionary
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: console
$ ${PKG_BIN_DIR}/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
$
.. ]]]
def ste(command, nindent, mdir, fpointer, env=None):
"""
Print STDOUT of a shell command formatted in reStructuredText.
This is a simplified version of :py:func:`pmisc.term_echo`.
:param command: Shell command (relative to **mdir** if **env** is not given)
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory, used if **env** is not given
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param env: Environment dictionary. If not provided, the environment
dictionary is the key "PKG_BIN_DIR" with the value of the
**mdir**
:type env: dictionary
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: console
$ ${PKG_BIN_DIR}/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
$
.. ]]]
"""
sdir = LDELIM + "PKG_BIN_DIR" + RDELIM
command = (
sdir + ("{sep}{cmd}".format(sep=os.path.sep, cmd=command))
if env is None
else command
)
env = {"PKG_BIN_DIR": mdir} if env is None else env
term_echo(command, nindent, env, fpointer) |
Print STDOUT of a shell command formatted in reStructuredText.
.. role:: bash(code)
:language: bash
:param command: Shell command
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param env: Environment variable replacement dictionary. The
command is pre-processed and any environment variable
represented in the full notation (:bash:`${...}` in Linux and
OS X or :bash:`%...%` in Windows) is replaced. The dictionary
key is the environment variable name and the dictionary value
is the replacement value. For example, if **command** is
:code:`'${PYTHON_CMD} -m "x=5"'` and **env** is
:code:`{'PYTHON_CMD':'python3'}` the actual command issued
is :code:`'python3 -m "x=5"'`
:type env: dictionary
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param cols: Number of columns of output
:type cols: integer
def term_echo(command, nindent=0, env=None, fpointer=None, cols=60):
"""
Print STDOUT of a shell command formatted in reStructuredText.
.. role:: bash(code)
:language: bash
:param command: Shell command
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param env: Environment variable replacement dictionary. The
command is pre-processed and any environment variable
represented in the full notation (:bash:`${...}` in Linux and
OS X or :bash:`%...%` in Windows) is replaced. The dictionary
key is the environment variable name and the dictionary value
is the replacement value. For example, if **command** is
:code:`'${PYTHON_CMD} -m "x=5"'` and **env** is
:code:`{'PYTHON_CMD':'python3'}` the actual command issued
is :code:`'python3 -m "x=5"'`
:type env: dictionary
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param cols: Number of columns of output
:type cols: integer
"""
# pylint: disable=R0204
# Set argparse width so that output does not need horizontal scroll
# bar in narrow windows or displays
os.environ["COLUMNS"] = str(cols)
command_int = command
if env:
for var, repl in env.items():
command_int = command_int.replace('"' + LDELIM + var + RDELIM + '"', repl)
command_int = command_int.replace(LDELIM + var + RDELIM, repl)
tokens = command_int.split(" ")
# Add Python interpreter executable for Python scripts on Windows since
# the shebang does not work
if (platform.system().lower() == "windows") and (
tokens[0].endswith(".py")
): # pragma: no cover
tokens = [sys.executable] + tokens
proc = subprocess.Popen(tokens, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = proc.communicate()[0]
if sys.hexversion >= 0x03000000: # pragma: no cover
stdout = stdout.decode("utf-8")
stdout = stdout.split("\n")
indent = nindent * " "
fpointer(os.linesep)
fpointer("{0}.. code-block:: console{1}".format(indent, os.linesep))
fpointer(os.linesep)
fpointer("{0} $ {1}{2}".format(indent, command, os.linesep))
for line in stdout:
line = _homogenize_linesep(line)
if line.strip():
fpointer(indent + " " + line.replace("\t", " ") + os.linesep)
else:
fpointer(os.linesep) |
Return the string given by param formatted with the callers locals.
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals) |
Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner |
Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/')) |
Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}')) |
Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
def tox(args=''):
'''Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
'''
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor_python: '3.6'
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && '
'python{highest_minor_python} -m tox {args}')) |
Build package and upload to pypi.
def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*')) |
Check that none of the input column numbers is out of range.
(Instead of defining this function, we could depend on Python's built-in
IndexError exception for this issue, but the IndexError exception wouldn't
include line number information, which is helpful for users to find exactly
which line is the culprit.)
def chk_col_numbers(line_num, num_cols, tax_id_col, id_col, symbol_col):
"""
Check that none of the input column numbers is out of range.
(Instead of defining this function, we could depend on Python's built-in
IndexError exception for this issue, but the IndexError exception wouldn't
include line number information, which is helpful for users to find exactly
which line is the culprit.)
"""
bad_col = ''
if tax_id_col >= num_cols:
bad_col = 'tax_id_col'
elif id_col >= num_cols:
bad_col = 'discontinued_id_col'
elif symbol_col >= num_cols:
bad_col = 'discontinued_symbol_col'
if bad_col:
raise Exception(
'Input file line #%d: column number of %s is out of range' %
(line_num, bad_col)) |
Read input gene history file into the database.
Note that the arguments tax_id_col, id_col and symbol_col have been
converted into 0-based column indexes.
def import_gene_history(file_handle, tax_id, tax_id_col, id_col, symbol_col):
"""
Read input gene history file into the database.
Note that the arguments tax_id_col, id_col and symbol_col have been
converted into 0-based column indexes.
"""
# Make sure that tax_id is not "" or " "
if not tax_id or tax_id.isspace():
raise Exception("Input tax_id is blank")
# Make sure that tax_id exists in Organism table in the database.
try:
organism = Organism.objects.get(taxonomy_id=tax_id)
except Organism.DoesNotExist:
raise Exception('Input tax_id %s does NOT exist in Organism table. '
'Please add it into Organism table first.' % tax_id)
if tax_id_col < 0 or id_col < 0 or symbol_col < 0:
raise Exception(
'tax_id_col, id_col and symbol_col must be positive integers')
for line_index, line in enumerate(file_handle):
if line.startswith('#'): # Skip comment lines.
continue
fields = line.rstrip().split('\t')
# Check input column numbers.
chk_col_numbers(line_index + 1, len(fields), tax_id_col, id_col,
symbol_col)
# Skip lines whose tax_id's do not match input tax_id.
if tax_id != fields[tax_id_col]:
continue
entrez_id = fields[id_col]
# If the gene already exists in database, set its "obsolete" attribute
# to True; otherwise create a new obsolete Gene record in database.
try:
gene = Gene.objects.get(entrezid=entrez_id)
if not gene.obsolete:
gene.obsolete = True
gene.save()
except Gene.DoesNotExist:
Gene.objects.create(entrezid=entrez_id, organism=organism,
systematic_name=fields[symbol_col],
obsolete=True) |
Initialize what requires credentials/secret files.
def cred_init(
self,
*,
secrets_dir: str,
log: Logger,
bot_name: str="",
) -> None:
"""Initialize what requires credentials/secret files."""
super().__init__(secrets_dir=secrets_dir, log=log, bot_name=bot_name)
self.ldebug("Retrieving ACCESS_TOKEN ...")
with open(path.join(self.secrets_dir, "ACCESS_TOKEN")) as f:
ACCESS_TOKEN = f.read().strip()
# Instance base url optional.
self.ldebug("Looking for INSTANCE_BASE_URL ...")
instance_base_url_path = path.join(self.secrets_dir, "INSTANCE_BASE_URL")
if path.isfile(instance_base_url_path):
with open(instance_base_url_path) as f:
self.instance_base_url = f.read().strip()
else:
self.ldebug("Couldn't find INSTANCE_BASE_URL, defaulting to mastodon.social.")
self.instance_base_url = "https://mastodon.social"
self.api = mastodon.Mastodon(access_token=ACCESS_TOKEN,
api_base_url=self.instance_base_url)
self.html_re = re.compile("<.*?>") |
Send mastodon message.
:param text: text to send in post.
:returns: list of output records,
each corresponding to either a single post,
or an error.
def send(
self,
*,
text: str,
) -> List[OutputRecord]:
"""
Send mastodon message.
:param text: text to send in post.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
try:
status = self.api.status_post(status=text)
return [TootRecord(record_data={
"toot_id": status["id"],
"text": text
})]
except mastodon.MastodonError as e:
return [self.handle_error((f"Bot {self.bot_name} encountered an error when "
f"sending post {text} without media:\n{e}\n"),
e)] |
Upload media to mastodon,
and send status and media,
and captions if present.
:param text: post text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error.
def send_with_media(
self,
*,
text: str,
files: List[str],
captions: List[str]=[],
) -> List[OutputRecord]:
"""
Upload media to mastodon,
and send status and media,
and captions if present.
:param text: post text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
try:
self.ldebug(f"Uploading files {files}.")
if captions is None:
captions = []
if len(files) > len(captions):
captions.extend([self.default_caption_message] * (len(files) - len(captions)))
media_dicts = []
for i, file in enumerate(files):
caption = captions[i]
media_dicts.append(self.api.media_post(file, description=caption))
self.ldebug(f"Media ids {media_dicts}")
except mastodon.MastodonError as e:
return [self.handle_error(
f"Bot {self.bot_name} encountered an error when uploading {files}:\n{e}\n", e
)]
try:
status = self.api.status_post(status=text, media_ids=media_dicts)
self.ldebug(f"Status object from toot: {status}.")
return [TootRecord(record_data={
"toot_id": status["id"],
"text": text,
"media_ids": media_dicts,
"captions": captions
})]
except mastodon.MastodonError as e:
return [self.handle_error((f"Bot {self.bot_name} encountered an error when "
f"sending post {text} with media dicts {media_dicts}:"
f"\n{e}\n"),
e)] |
Performs batch reply on target account.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param target: the id of the target account.
:param lookback_limit: a lookback limit of how many messages to consider.
:returns: list of output records,
each corresponding to either a single post,
or an error.
def perform_batch_reply(
self,
*,
callback: Callable[..., str],
lookback_limit: int,
target_handle: str,
) -> List[OutputRecord]:
"""
Performs batch reply on target account.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param target: the id of the target account.
:param lookback_limit: a lookback limit of how many messages to consider.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
self.log.info(f"Attempting to batch reply to mastodon user {target_handle}")
# target handle should be able to be provided either as @user or @user@domain
# note that this produces an empty first chunk
handle_chunks = target_handle.split("@")
target_base_handle = handle_chunks[1]
records: List[OutputRecord] = []
our_id = self.api.account_verify_credentials()["id"]
# be careful here - we're using a search to do this,
# and if we're not careful we'll pull up people just mentioning the target.
possible_accounts = self.api.account_search(target_handle, following=True)
their_id = None
for account in possible_accounts:
if account["username"] == target_base_handle:
their_id = account["id"]
break
if their_id is None:
return [self.handle_error(f"Could not find target handle {target_handle}!", None)]
statuses = self.api.account_statuses(their_id, limit=lookback_limit)
for status in statuses:
status_id = status.id
# find possible replies we've made.
our_statuses = self.api.account_statuses(our_id, since_id=status_id)
in_reply_to_ids = list(map(lambda x: x.in_reply_to_id, our_statuses))
if status_id not in in_reply_to_ids:
encoded_status_text = re.sub(self.html_re, "", status.content)
status_text = html.unescape(encoded_status_text)
message = callback(message_id=status_id, message=status_text, extra_keys={})
self.log.info(f"Replying {message} to status {status_id} from {target_handle}.")
try:
new_status = self.api.status_post(status=message, in_reply_to_id=status_id)
records.append(TootRecord(record_data={
"toot_id": new_status.id,
"in_reply_to": target_handle,
"in_reply_to_id": status_id,
"text": message,
}))
except mastodon.MastodonError as e:
records.append(
self.handle_error((f"Bot {self.bot_name} encountered an error when "
f"sending post {message} during a batch reply "
f":\n{e}\n"),
e))
else:
self.log.info(f"Not replying to status {status_id} from {target_handle} "
f"- we already replied.")
return records |
Handle error while trying to do something.
def handle_error(self, message: str, e: mastodon.MastodonError) -> OutputRecord:
"""Handle error while trying to do something."""
self.lerror(f"Got an error! {e}")
# Handle errors if we know how.
try:
code = e[0]["code"]
if code in self.handled_errors:
self.handled_errors[code]
else:
pass
except Exception:
pass
return TootRecord(error=e) |
Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count |
def _read_header(self):
'''
Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count |
'''
self._fh.seek(0)
buf = self._fh.read(4*2)
fc, dc = struct.unpack("<II", buf)
return fc, dc |
Request a callback for value modification.
Parameters
----------
you : object
An instance having ``__call__`` attribute.
def listen(self, you):
"""
Request a callback for value modification.
Parameters
----------
you : object
An instance having ``__call__`` attribute.
"""
self._listeners.append(you)
self.raw.talk_to(you) |
Simple utility function to load a term.
def _get_term_by_id(self, id):
'''Simple utility function to load a term.
'''
url = (self.url + '/%s.json') % id
r = self.session.get(url)
return r.json() |
Returns all concepts or collections that form the top-level of a display
hierarchy.
As opposed to the :meth:`get_top_concepts`, this method can possibly
return both concepts and collections.
:rtype: Returns a list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present.
def get_top_display(self, **kwargs):
'''
Returns all concepts or collections that form the top-level of a display
hierarchy.
As opposed to the :meth:`get_top_concepts`, this method can possibly
return both concepts and collections.
:rtype: Returns a list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present.
'''
language = self._get_language(**kwargs)
url = self.url + '/lijst.json'
args = {'type[]': ['HR']}
r = self.session.get(url, params=args)
result = r.json()
items = result
top = self.get_by_id(items[0]['id'])
res = []
def expand_coll(res, coll):
for nid in coll.members:
c = self.get_by_id(nid)
res.append({
'id': c.id,
'label': c.label(language)
})
return res
return expand_coll(res, top) |
Return a list of concepts or collections that should be displayed
under this concept or collection.
:param id: A concept or collection id.
:rtype: A list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present. If the id does not
exist, return `False`.
def get_children_display(self, id, **kwargs):
'''
Return a list of concepts or collections that should be displayed
under this concept or collection.
:param id: A concept or collection id.
:rtype: A list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present. If the id does not
exist, return `False`.
'''
language = self._get_language(**kwargs)
item = self.get_by_id(id)
res = []
if isinstance(item, Collection):
for mid in item.members:
m = self.get_by_id(mid)
res.append({
'id': m.id,
'label': m.label(language)
})
else:
for cid in item.narrower:
c = self.get_by_id(cid)
res.append({
'id': c.id,
'label': c.label(language)
})
return res |
Pass a list of identifiers (id_list), the name of the database ('Entrez',
'Symbol', 'Standard name', 'Systematic name' or a loaded crossreference
database) that you wish to translate from, and the name of the database
that you wish to translate to.
def translate_genes(id_list=None, from_id=None, to_id=None, organism=None):
"""
Pass a list of identifiers (id_list), the name of the database ('Entrez',
'Symbol', 'Standard name', 'Systematic name' or a loaded crossreference
database) that you wish to translate from, and the name of the database
that you wish to translate to.
"""
ids = set(id_list)
# Initialize set of identifiers not found by this translate_genes method.
not_found = set()
from_ids = None # Get the map of from_ids to the gene pks
if organism is not None:
gene_objects_manager = Gene.objects.filter(
organism__scientific_name=organism)
else:
gene_objects_manager = Gene.objects
if (from_id == 'Entrez'):
int_list = []
for x in ids:
try:
int_list.append(int(x))
except(ValueError):
not_found.add(x)
ids = set(int_list)
from_ids = gene_objects_manager.filter(entrezid__in=ids).values_list(
'entrezid', 'id')
elif (from_id == 'Systematic name'):
from_ids = gene_objects_manager.filter(
systematic_name__in=ids).values_list('systematic_name', 'id')
elif (from_id == 'Standard name'):
from_ids = gene_objects_manager.filter(
standard_name__in=ids).values_list('standard_name', 'id')
elif (from_id == 'Symbol'):
# If standard_name exists, symbol will be standard_name; otherwise
# symbol will be systematic_name.
from_ids = gene_objects_manager.annotate(
symbol=Coalesce('standard_name', 'systematic_name')).filter(
symbol__in=ids).values_list('symbol', 'id')
else: # a crossreference db?
xrdb = CrossRefDB.objects.get(name=from_id)
from_ids = CrossRef.objects.filter(crossrefdb=xrdb).values_list(
'xrid', 'gene__id')
# Dictionary that maps from type ID passed by user to gene__id.
from_id_map = {}
gene_ids = []
for item in from_ids:
from_id_map[item[0]] = item[1]
gene_ids.append(item[1])
# Now let's figure out what we need to go to:
to_ids = None
if (to_id == 'Entrez'):
to_ids = Gene.objects.filter(id__in=gene_ids).values_list(
'id', 'entrezid')
elif (to_id == 'Systematic name'):
to_ids = Gene.objects.filter(id__in=gene_ids).values_list(
'id', 'systematic_name')
elif (to_id == 'Standard name'):
to_ids = Gene.objects.filter(id__in=gene_ids).values_list(
'id', 'standard_name')
elif (to_id == 'Symbol'):
# If standard_name exists, symbol will be standard_name; otherwise
# symbol will be systematic_name.
to_ids = Gene.objects.annotate(
symbol=Coalesce('standard_name', 'systematic_name')).filter(
id__in=gene_ids).values_list('id', 'symbol')
else: # A crossreference db?
xrdb = CrossRefDB.objects.get(name=to_id)
to_ids = CrossRef.objects.filter(crossrefdb=xrdb).values_list(
'gene__id', 'xrid')
to_id_map = {}
for item in to_ids:
if not item[0] in to_id_map:
to_id_map[item[0]] = [item[1], ]
else:
to_id_map[item[0]].append(item[1])
from_to = {}
for item in ids:
try:
gene_id = from_id_map[item]
except KeyError:
not_found.add(item)
continue
to_id = to_id_map[gene_id]
from_to[item] = to_id
from_to['not_found'] = list(not_found)
return from_to |
must be applied to all inner functions that return contexts.
Wraps all instances of pygame.Surface in the input in Surface
def _inner_func_anot(func):
"""must be applied to all inner functions that return contexts.
Wraps all instances of pygame.Surface in the input in Surface"""
@wraps(func)
def new_func(*args):
return func(*_lmap(_wrap_surface, args))
return new_func |
Draws a cross centered in the target area
:param width: width of the lines of the cross in pixels
:type width: int
:param color: color of the lines of the cross
:type color: pygame.Color
def Cross(width=3, color=0):
"""Draws a cross centered in the target area
:param width: width of the lines of the cross in pixels
:type width: int
:param color: color of the lines of the cross
:type color: pygame.Color
"""
return Overlay(Line("h", width, color), Line("v", width, color)) |
Top level function to create a surface.
:param target: the pygame.Surface to blit on. Or a (width, height) tuple
in which case a new surface will be created
:type target: -
def compose(target, root=None):
"""Top level function to create a surface.
:param target: the pygame.Surface to blit on. Or a (width, height) tuple
in which case a new surface will be created
:type target: -
"""
if type(root) == Surface:
raise ValueError("A Surface may not be used as root, please add "
+"it as a single child i.e. compose(...)(Surface(...))")
@_inner_func_anot
def inner_compose(*children):
if root:
root_context = root(*children)
else:
assert len(children) == 1
root_context = children[0]
if type(target) == pygame.Surface:
surface = target
size = target.get_size()
else:
size = target
surface = pygame.Surface(size)
root_context._draw(surface, pygame.Rect(0, 0, *size))
return surface
return inner_compose |
Unifies loading of fonts.
:param name: name of system-font or filepath, if None is passed the default
system-font is loaded
:type name: str
:param source: "sys" for system font, or "file" to load a file
:type source: str
def Font(name=None, source="sys", italic=False, bold=False, size=20):
"""Unifies loading of fonts.
:param name: name of system-font or filepath, if None is passed the default
system-font is loaded
:type name: str
:param source: "sys" for system font, or "file" to load a file
:type source: str
"""
assert source in ["sys", "file"]
if not name:
return pygame.font.SysFont(pygame.font.get_default_font(),
size, bold=bold, italic=italic)
if source == "sys":
return pygame.font.SysFont(name,
size, bold=bold, italic=italic)
else:
f = pygame.font.Font(name, size)
f.set_italic(italic)
f.set_bold(bold)
return f |
Renders a text. Supports multiline text, the background will be transparent.
:param align: text-alignment must be "center", "left", or "righ"
:type align: str
:return: the input text
:rtype: pygame.Surface
def Text(text, font, color=pygame.Color(0, 0, 0), antialias=False, align="center"):
"""Renders a text. Supports multiline text, the background will be transparent.
:param align: text-alignment must be "center", "left", or "righ"
:type align: str
:return: the input text
:rtype: pygame.Surface
"""
assert align in ["center", "left", "right"]
margin_l, margin_r = 1, 1
if align == "left": margin_l = 0
elif align == "right": margin_r = 0
margin = Margin(margin_l, margin_r)
color_key = pygame.Color(0, 0, 1) if pygame.Color(0, 0, 1) != color else 0x000002
text_surfaces = _lmap(lambda text: _text(text, font=font,
color=color, antialias=antialias),
map(methodcaller("strip"), text.split("\n")))
w = max(surf.get_rect().w for surf in text_surfaces)
h = sum(surf.get_rect().h for surf in text_surfaces)
surf = compose((w, h), Fill(color_key))(LinLayout("v")(
*_lmap(lambda s: Surface(margin)(s), text_surfaces)))
surf.set_colorkey(color_key)
return surf.convert_alpha() |
Creates a padding by the remaining space after scaling the content.
E.g. Padding.from_scale(0.5) would produce Padding(0.25, 0.25, 0.25, 0.25) and
Padding.from_scale(0.5, 1) would produce Padding(0.25, 0.25, 0, 0)
because the content would not be scaled (since scale_h=1) and therefore
there would be no vertical padding.
If scale_h is not specified scale_h=scale_w is used as default
:param scale_w: horizontal scaling factors
:type scale_w: float
:param scale_h: vertical scaling factor
:type scale_h: float
def from_scale(scale_w, scale_h=None):
"""Creates a padding by the remaining space after scaling the content.
E.g. Padding.from_scale(0.5) would produce Padding(0.25, 0.25, 0.25, 0.25) and
Padding.from_scale(0.5, 1) would produce Padding(0.25, 0.25, 0, 0)
because the content would not be scaled (since scale_h=1) and therefore
there would be no vertical padding.
If scale_h is not specified scale_h=scale_w is used as default
:param scale_w: horizontal scaling factors
:type scale_w: float
:param scale_h: vertical scaling factor
:type scale_h: float
"""
if not scale_h: scale_h = scale_w
w_padding = [(1 - scale_w) * 0.5] * 2
h_padding = [(1 - scale_h) * 0.5] * 2
return Padding(*w_padding, *h_padding) |
:param levels string: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
http://www.wanikani.com/api/v1.2#radicals-list
def radicals(self, levels=None):
"""
:param levels string: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
http://www.wanikani.com/api/v1.2#radicals-list
"""
url = WANIKANI_BASE.format(self.api_key, 'radicals')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
for item in data['requested_information']:
yield Radical(item) |
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#kanji-list
def kanji(self, levels=None):
"""
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#kanji-list
"""
url = WANIKANI_BASE.format(self.api_key, 'kanji')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
for item in data['requested_information']:
yield Kanji(item) |
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list
def vocabulary(self, levels=None):
"""
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list
"""
url = WANIKANI_BASE.format(self.api_key, 'vocabulary')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
if 'general' in data['requested_information']:
for item in data['requested_information']['general']:
yield Vocabulary(item)
else:
for item in data['requested_information']:
yield Vocabulary(item) |
Test if the argument is a string representing a valid hexadecimal digit.
:param obj: Object
:type obj: any
:rtype: boolean
def ishex(obj):
"""
Test if the argument is a string representing a valid hexadecimal digit.
:param obj: Object
:type obj: any
:rtype: boolean
"""
return isinstance(obj, str) and (len(obj) == 1) and (obj in string.hexdigits) |
Test if the argument is a number (complex, float or integer).
:param obj: Object
:type obj: any
:rtype: boolean
def isnumber(obj):
"""
Test if the argument is a number (complex, float or integer).
:param obj: Object
:type obj: any
:rtype: boolean
"""
return (
(obj is not None)
and (not isinstance(obj, bool))
and isinstance(obj, (int, float, complex))
) |
Test if the argument is a real number (float or integer).
:param obj: Object
:type obj: any
:rtype: boolean
def isreal(obj):
"""
Test if the argument is a real number (float or integer).
:param obj: Object
:type obj: any
:rtype: boolean
"""
return (
(obj is not None)
and (not isinstance(obj, bool))
and isinstance(obj, (int, float))
) |
Create and return an API context
def create_api_context(self, cls):
"""Create and return an API context"""
return self.api_context_schema().load({
"name": cls.name,
"cls": cls,
"inst": [],
"conf": self.conf.get_api_service(cls.name),
"calls": self.conf.get_api_calls(),
"shared": {}, # Used per-API to monitor state
"log_level": self.conf.get_log_level(),
"callback": self.receive
}) |
Pass an API result down the pipeline
def receive(self, data, api_context):
"""Pass an API result down the pipeline"""
self.log.debug(f"Putting data on the pipeline: {data}")
result = {
"api_contexts": self.api_contexts,
"api_context": api_context,
"strategy": dict(), # Shared strategy data
"result": data,
"log_level": api_context["log_level"],
}
self.strat.execute(self.strategy_context_schema().load(result).data) |
Shut it down
def shutdown(self, signum, frame): # pylint: disable=unused-argument
"""Shut it down"""
if not self.exit:
self.exit = True
self.log.debug(f"SIGTRAP!{signum};{frame}")
self.api.shutdown()
self.strat.shutdown() |
Course this node belongs to
def course(self):
"""
Course this node belongs to
"""
course = self.parent
while course.parent:
course = course.parent
return course |
Path of this node on Studip. Looks like Coures/folder/folder/document. Respects the renaming policies defined in the namemap
def path(self):
"""
Path of this node on Studip. Looks like Coures/folder/folder/document. Respects the renaming policies defined in the namemap
"""
if self.parent is None:
return self.title
return join(self.parent.path, self.title) |
get title of this node. If an entry for this course is found in the configuration namemap it is used, otherwise the default
value from stud.ip is used.
def title(self):
"""
get title of this node. If an entry for this course is found in the configuration namemap it is used, otherwise the default
value from stud.ip is used.
"""
tmp = c.namemap_lookup(self.id) if c.namemap_lookup(self.id) is not None else self._title
return secure_filename(tmp) |
list of all documents find in subtrees of this node
def deep_documents(self):
"""
list of all documents find in subtrees of this node
"""
tree = []
for entry in self.contents:
if isinstance(entry, Document):
tree.append(entry)
else:
tree += entry.deep_documents
return tree |
The title of the course. If no entry in the namemap of the configuration is found a new entry is created with name=$STUD.IP_NAME + $SEMESTER_NAME
def title(self):
"""
The title of the course. If no entry in the namemap of the configuration is found a new entry is created with name=$STUD.IP_NAME + $SEMESTER_NAME
"""
name = c.namemap_lookup(self.id)
if name is None:
name = self._title + " " + client.get_semester_title(self)
c.namemap_set(self.id, name)
return secure_filename(name) |
run a get request against an url. Returns the response which can optionally be streamed
def _get(self, route, stream=False):
"""
run a get request against an url. Returns the response which can optionally be streamed
"""
log.debug("Running GET request against %s" % route)
return r.get(self._url(route), auth=c.auth, stream=stream) |
List all contents of a folder. Returns a list of all Documents and Folders (in this order) in the folder.
def get_contents(self, folder: Folder):
"""
List all contents of a folder. Returns a list of all Documents and Folders (in this order) in the folder.
"""
log.debug("Listing Contents of %s/%s" % (folder.course.id, folder.id))
if isinstance(folder, Course):
response = json.loads(self._get('/api/documents/%s/folder' % folder.course.id).text)
else:
response = json.loads(self._get('/api/documents/%s/folder/%s' % (folder.course.id, folder.id)).text)
log.debug("Got response: %s" % response)
documents = [Document.from_response(response, folder) for response in response["documents"]]
folders = [Folder.from_response(response, folder) for response in response["folders"]]
return documents + folders |
Download a document to the given path. if no path is provided the path is constructed frome the base_url + stud.ip path + filename.
If overwrite is set the local version will be overwritten if the file was changed on studip since the last check
def download_document(self, document: Document, overwrite=True, path=None):
"""
Download a document to the given path. if no path is provided the path is constructed frome the base_url + stud.ip path + filename.
If overwrite is set the local version will be overwritten if the file was changed on studip since the last check
"""
if not path:
path = os.path.join(os.path.expanduser(c["base_path"]), document.path)
if (self.modified(document) and overwrite) or not os.path.exists(join(path, document.title)):
log.info("Downloading %s" % join(path, document.title))
file = self._get('/api/documents/%s/download' % document.id, stream=True)
os.makedirs(path, exist_ok=True)
with open(join(path, document.title), 'wb') as f:
shutil.copyfileobj(file.raw, f) |
get the semester of a node
def get_semester_title(self, node: BaseNode):
"""
get the semester of a node
"""
log.debug("Getting Semester Title for %s" % node.course.id)
return self._get_semester_from_id(node.course.semester) |
use the base_url and auth data from the configuration to list all courses the user is subscribed to
def get_courses(self):
"""
use the base_url and auth data from the configuration to list all courses the user is subscribed to
"""
log.info("Listing Courses...")
courses = json.loads(self._get('/api/courses').text)["courses"]
courses = [Course.from_response(course) for course in courses]
log.debug("Courses: %s" % [str(entry) for entry in courses])
return courses |
Minimize a scalar function using Brent's method.
Parameters
----------
verbose : bool
``True`` for verbose output; ``False`` otherwise.
def _minimize_scalar(
self, desc="Progress", rtol=1.4902e-08, atol=1.4902e-08, verbose=True
):
"""
Minimize a scalar function using Brent's method.
Parameters
----------
verbose : bool
``True`` for verbose output; ``False`` otherwise.
"""
from tqdm import tqdm
from numpy import asarray
from brent_search import minimize as brent_minimize
variables = self._variables.select(fixed=False)
if len(variables) != 1:
raise ValueError("The number of variables must be equal to one.")
var = variables[variables.names()[0]]
progress = tqdm(desc=desc, disable=not verbose)
def func(x):
progress.update(1)
var.value = x
return self.__sign * self.value()
r = asarray(
brent_minimize(func, a=var.bounds[0], b=var.bounds[1], rtol=rtol, atol=atol)
)
var.value = r[0]
progress.close() |
return a url-safe hash of the string, optionally (and by default) base64-encoded
alg='sha256' = the hash algorithm, must be in hashlib
b64=True = whether to base64-encode the output
strip=True = whether to strip trailing '=' from the base64 output
Using the default arguments returns a url-safe base64-encoded SHA-256 hash of the string.
Length of the digest with different algorithms, using b64=True and strip=True:
* SHA224 = 38
* SHA256 = 43 (DEFAULT)
* SHA384 = 64
* SHA512 = 86
def digest(self, alg='sha256', b64=True, strip=True):
"""return a url-safe hash of the string, optionally (and by default) base64-encoded
alg='sha256' = the hash algorithm, must be in hashlib
b64=True = whether to base64-encode the output
strip=True = whether to strip trailing '=' from the base64 output
Using the default arguments returns a url-safe base64-encoded SHA-256 hash of the string.
Length of the digest with different algorithms, using b64=True and strip=True:
* SHA224 = 38
* SHA256 = 43 (DEFAULT)
* SHA384 = 64
* SHA512 = 86
"""
import base64, hashlib
h = hashlib.new(alg)
h.update(str(self).encode('utf-8'))
if b64 == True:
# this returns a string with a predictable amount of = padding at the end
b = base64.urlsafe_b64encode(h.digest()).decode('ascii')
if strip == True:
b = b.rstrip('=')
return b
else:
return h.hexdigest() |
turn a string to CamelCase, omitting non-word characters
def camelify(self):
"""turn a string to CamelCase, omitting non-word characters"""
outstring = self.titleify(allwords=True)
outstring = re.sub(r"&[^;]+;", " ", outstring)
outstring = re.sub(r"\W+", "", outstring)
return String(outstring) |
takes a string and makes a title from it
def titleify(self, lang='en', allwords=False, lastword=True):
"""takes a string and makes a title from it"""
if lang in LOWERCASE_WORDS:
lc_words = LOWERCASE_WORDS[lang]
else:
lc_words = []
s = str(self).strip()
l = re.split(r"([_\W]+)", s)
for i in range(len(l)):
l[i] = l[i].lower()
if (
allwords == True
or i == 0
or (lastword == True and i == len(l) - 1)
or l[i].lower() not in lc_words
):
w = l[i]
if len(w) > 1:
w = w[0].upper() + w[1:]
else:
w = w.upper()
l[i] = w
s = "".join(l)
return String(s) |
return a python identifier from the string (underscore separators)
def identifier(self, camelsplit=False, ascii=True):
"""return a python identifier from the string (underscore separators)"""
return self.nameify(camelsplit=camelsplit, ascii=ascii, sep='_') |
return an XML name (hyphen-separated by default, initial underscore if non-letter)
def nameify(self, camelsplit=False, ascii=True, sep='-'):
"""return an XML name (hyphen-separated by default, initial underscore if non-letter)"""
s = String(str(self)) # immutable
if camelsplit == True:
s = s.camelsplit()
s = s.hyphenify(ascii=ascii).replace('-', sep)
if len(s) == 0 or re.match("[A-Za-z_]", s[0]) is None:
s = "_" + s
return String(s) |
Turn non-word characters (incl. underscore) into single hyphens.
If ascii=True, return ASCII-only.
If also lossless=True, use the UTF-8 codes for the non-ASCII characters.
def hyphenify(self, ascii=False):
"""Turn non-word characters (incl. underscore) into single hyphens.
If ascii=True, return ASCII-only.
If also lossless=True, use the UTF-8 codes for the non-ASCII characters.
"""
s = str(self)
s = re.sub("""['"\u2018\u2019\u201c\u201d]""", '', s) # quotes
s = re.sub(r'(?:\s|%20)+', '-', s) # whitespace
if ascii == True: # ASCII-only
s = s.encode('ascii', 'xmlcharrefreplace').decode('ascii') # use entities
s = re.sub("&?([^;]*?);", r'.\1-', s) # entities
s = s.replace('#', 'u')
s = re.sub(r"\W+", '-', s).strip(' -')
return String(s) |
Turn a CamelCase string into a string with spaces
def camelsplit(self):
"""Turn a CamelCase string into a string with spaces"""
s = str(self)
for i in range(len(s) - 1, -1, -1):
if i != 0 and (
(s[i].isupper() and s[i - 1].isalnum() and not s[i - 1].isupper())
or (s[i].isnumeric() and s[i - 1].isalpha())
):
s = s[:i] + ' ' + s[i:]
return String(s.strip()) |
Pyramid pluggable and discoverable function.
def includeme(config):
"""Pyramid pluggable and discoverable function."""
global_settings = config.registry.settings
settings = local_settings(global_settings, PREFIX)
try:
file = settings['file']
except KeyError:
raise KeyError("Must supply '{}.file' configuration value "
"in order to configure logging via '{}'."
.format(PREFIX, PROJECT))
with open(file, 'r') as f:
logging_config = yaml.load(f)
dictConfig(logging_config)
# Enable transit logging?
if asbool(settings.get('transit_logging.enabled?', False)):
config.add_tween('pyramid_sawing.main.TransitLogger') |
Executed on startup of application
def run(self):
"""Executed on startup of application"""
self.api = self.context.get("cls")(self.context)
self.context["inst"].append(self) # Adapters used by strategies
for call, calldata in self.context.get("calls", {}).items():
def loop():
"""Loop on event scheduler, calling calls"""
while not self.stopped.wait(calldata.get("delay", None)):
self.call(call, calldata.get("arguments", None))
self.thread[call] = Process(target=loop)
self.thread[call].start() |
Executed on each scheduled iteration
def call(self, callname, arguments=None):
"""Executed on each scheduled iteration"""
# See if a method override exists
action = getattr(self.api, callname, None)
if action is None:
try:
action = self.api.ENDPOINT_OVERRIDES.get(callname, None)
except AttributeError:
action = callname
if not callable(action):
request = self._generate_request(action, arguments)
if action is None:
return self._generate_result(
callname, self.api.call(*call_args(callname, arguments)))
return self._generate_result(
callname, self.api.call(*call_args(action, arguments)))
request = self._generate_request(callname, arguments)
return self._generate_result(callname, action(request)) |
Generate a request object for delivery to the API
def _generate_request(self, callname, request):
"""Generate a request object for delivery to the API"""
# Retrieve path from API class
schema = self.api.request_schema()
schema.context['callname'] = callname
return schema.dump(request).data.get("payload") |
Generate a results object for delivery to the context object
def _generate_result(self, callname, result):
"""Generate a results object for delivery to the context object"""
# Retrieve path from API class
schema = self.api.result_schema()
schema.context['callname'] = callname
self.callback(schema.load(result), self.context) |
create a key for index by converting index into a base-26 number, using A-Z as the characters.
def excel_key(index):
"""create a key for index by converting index into a base-26 number, using A-Z as the characters."""
X = lambda n: ~n and X((n // 26)-1) + chr(65 + (n % 26)) or ''
return X(int(index)) |
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
def convert_to_crash_data(raw_crash, processed_crash):
"""
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
"""
# We want to generate fresh signatures, so we remove the "normalized" field
# from stack frames from the processed crash because this is essentially
# cached data from previous processing
for thread in glom(processed_crash, 'json_dump.threads', default=[]):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
crash_data = {
# JavaStackTrace or None
'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),
# int or None
'crashing_thread': glom(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
# list of CStackTrace or None
'threads': glom(processed_crash, 'json_dump.threads', default=None),
# int or None
'hang_type': glom(processed_crash, 'hang_type', default=None),
# text or None
'os': glom(processed_crash, 'json_dump.system_info.os', default=None),
# int or None
'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),
# text or None
'abort_message': glom(raw_crash, 'AbortMessage', default=None),
# text or None
'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),
# text json with "phase", "conditions" (complicated--see code) or None
'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),
# text or None
'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),
# text or None
'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),
# text or None
'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),
# text
'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),
# text; comma-delimited e.g. "browser,flash1,flash2"
'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),
# pull out the original signature if there was one
'original_signature': glom(processed_crash, 'signature', default='')
}
return crash_data |
Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text |
Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
def parse_source_file(source_file):
"""Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
"""
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.