nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
hughw19/NOCS_CVPR2019
14dbce775c3c7c45bb7b19269bd53d68efb8f73f
utils.py
python
compute_coords_aps
(final_results, synset_names, iou_thresholds, coord_thresholds)
Compute Average Precision at a set IoU threshold (default 0.5). Returns: mAP: Mean Average Precision precisions: List of precisions at different class score thresholds. recalls: List of recall values at different class score thresholds. overlaps: [pred_boxes, gt_boxes] IoU overlaps.
Compute Average Precision at a set IoU threshold (default 0.5). Returns: mAP: Mean Average Precision precisions: List of precisions at different class score thresholds. recalls: List of recall values at different class score thresholds. overlaps: [pred_boxes, gt_boxes] IoU overlaps.
[ "Compute", "Average", "Precision", "at", "a", "set", "IoU", "threshold", "(", "default", "0", ".", "5", ")", ".", "Returns", ":", "mAP", ":", "Mean", "Average", "Precision", "precisions", ":", "List", "of", "precisions", "at", "different", "class", "score"...
def compute_coords_aps(final_results, synset_names, iou_thresholds, coord_thresholds): """Compute Average Precision at a set IoU threshold (default 0.5). Returns: mAP: Mean Average Precision precisions: List of precisions at different class score thresholds. recalls: List of recall values at different class score thresholds. overlaps: [pred_boxes, gt_boxes] IoU overlaps. """ num_classes = len(synset_names) num_iou_thres = len(iou_thresholds) num_coord_thres = len(coord_thresholds) mean_coord_dist_cls = {} # pred_matches_all = {} # pred_scores_all = {} # gt_matches_all = {} for cls_id in range(1, num_classes): mean_coord_dist_cls[cls_id] = [] # pred_matches_all[cls_id] = [[[] for _ in range(num_shift_thres)] for _ in range(num_degree_thres)] # gt_matches_all[cls_id] = [[[] for _ in range(num_shift_thres)] for _ in range(num_degree_thres)] # pred_scores_all[cls_id] = [[[] for _ in range(num_shift_thres)] for _ in range(num_degree_thres)] progress = 0 for progress, result in enumerate(final_results): print(progress, len(final_results)) gt_class_ids = result['gt_class_ids'].astype(np.int32) gt_bboxes = np.array(result['gt_bboxes']) gt_masks = np.array(result['gt_masks']) gt_coords = np.array(result['gt_coords']) #print(gt_class_ids.shape, gt_bboxes.shape, gt_masks.shape, gt_coords.shape) #gt_scales = result['gt_scale'] pred_class_ids = result['pred_class_ids'].astype(np.int32) pred_bboxes = np.array(result['pred_bboxes']) pred_masks = np.array(result['pred_masks']) pred_coords = np.array(result['pred_coords']) pred_scores = result['pred_scores'] #print(pred_class_ids.shape, pred_bboxes.shape, pred_masks.shape, pred_coords.shape) for cls_id in range(1, num_classes): # get gt and predictions in this class if(len(gt_class_ids)): cls_gt_class_ids = gt_class_ids[gt_class_ids==cls_id] cls_gt_bboxes = gt_bboxes[gt_class_ids==cls_id] cls_gt_masks = gt_masks[..., gt_class_ids==cls_id] cls_gt_coords = gt_coords[..., gt_class_ids==cls_id, :] else: cls_gt_class_ids = [] cls_gt_bboxes = [] cls_gt_masks = [] cls_gt_coords = [] if(len(pred_class_ids)): cls_pred_class_ids = pred_class_ids[pred_class_ids==cls_id] cls_pred_bboxes = pred_bboxes[pred_class_ids==cls_id] cls_pred_scores = pred_scores[pred_class_ids==cls_id] cls_pred_masks = pred_masks[:, :, pred_class_ids==cls_id] cls_pred_coords = pred_coords[:, :, pred_class_ids==cls_id, :] else: cls_pred_class_ids = [] cls_pred_bboxes = [] cls_pred_scores = [] cls_pred_masks = [] cls_pred_coords = [] # calculate the overlap between each gt instance and pred instance gt_match, pred_match, overlaps, pred_indices = compute_matches(cls_gt_bboxes, cls_gt_class_ids, cls_gt_masks, cls_pred_bboxes, cls_pred_class_ids, cls_pred_scores, cls_pred_masks, 0.5) if len(gt_match) and len(pred_match): cls_pred_masks_sorted = cls_pred_masks[..., pred_indices] cls_pred_coords_sorted = cls_pred_coords[..., pred_indices, :] for i in range(len(pred_match)): if pred_match[i] > -1: j = int(pred_match[i]) mean_coord_dist = compute_mean_l1_coord_diff(cls_pred_masks_sorted[..., i], cls_gt_masks[..., j], cls_pred_coords_sorted[..., i, :], cls_gt_coords[..., j, :], synset_names, cls_id) #print(mean_coord_dist) mean_coord_dist_cls[cls_id].append(mean_coord_dist) #print(mean_coord_dist_cls[cls_id]) for cls_id in range(1, num_classes): mean_coord_dist_cls[cls_id] = np.array(mean_coord_dist_cls[cls_id]) print('mean coord dist of {} class: {}'.format(synset_names[cls_id], np.mean(mean_coord_dist_cls[cls_id])))
[ "def", "compute_coords_aps", "(", "final_results", ",", "synset_names", ",", "iou_thresholds", ",", "coord_thresholds", ")", ":", "num_classes", "=", "len", "(", "synset_names", ")", "num_iou_thres", "=", "len", "(", "iou_thresholds", ")", "num_coord_thres", "=", ...
https://github.com/hughw19/NOCS_CVPR2019/blob/14dbce775c3c7c45bb7b19269bd53d68efb8f73f/utils.py#L2285-L2388
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/_pyio.py
python
FileIO.seek
(self, pos, whence=SEEK_SET)
return os.lseek(self._fd, pos, whence)
Move to new file position. Argument offset is a byte count. Optional argument whence defaults to SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values are SEEK_CUR or 1 (move relative to current position, positive or negative), and SEEK_END or 2 (move relative to end of file, usually negative, although many platforms allow seeking beyond the end of a file). Note that not all file objects are seekable.
Move to new file position.
[ "Move", "to", "new", "file", "position", "." ]
def seek(self, pos, whence=SEEK_SET): """Move to new file position. Argument offset is a byte count. Optional argument whence defaults to SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values are SEEK_CUR or 1 (move relative to current position, positive or negative), and SEEK_END or 2 (move relative to end of file, usually negative, although many platforms allow seeking beyond the end of a file). Note that not all file objects are seekable. """ if isinstance(pos, float): raise TypeError('an integer is required') self._checkClosed() return os.lseek(self._fd, pos, whence)
[ "def", "seek", "(", "self", ",", "pos", ",", "whence", "=", "SEEK_SET", ")", ":", "if", "isinstance", "(", "pos", ",", "float", ")", ":", "raise", "TypeError", "(", "'an integer is required'", ")", "self", ".", "_checkClosed", "(", ")", "return", "os", ...
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/_pyio.py#L1649-L1663
MaurizioFD/RecSys2019_DeepLearning_Evaluation
0fb6b7f5c396f8525316ed66cf9c9fdb03a5fa9b
Base/BaseRecommender.py
python
BaseRecommender._compute_item_score
(self, user_id_array, items_to_compute = None)
:param user_id_array: array containing the user indices whose recommendations need to be computed :param items_to_compute: array containing the items whose scores are to be computed. If None, all items are computed, otherwise discarded items will have as score -np.inf :return: array (len(user_id_array), n_items) with the score.
[]
def _compute_item_score(self, user_id_array, items_to_compute = None): """ :param user_id_array: array containing the user indices whose recommendations need to be computed :param items_to_compute: array containing the items whose scores are to be computed. If None, all items are computed, otherwise discarded items will have as score -np.inf :return: array (len(user_id_array), n_items) with the score. """ raise NotImplementedError("BaseRecommender: compute_item_score not assigned for current recommender, unable to compute prediction scores")
[ "def", "_compute_item_score", "(", "self", ",", "user_id_array", ",", "items_to_compute", "=", "None", ")", ":", "raise", "NotImplementedError", "(", "\"BaseRecommender: compute_item_score not assigned for current recommender, unable to compute prediction scores\"", ")" ]
https://github.com/MaurizioFD/RecSys2019_DeepLearning_Evaluation/blob/0fb6b7f5c396f8525316ed66cf9c9fdb03a5fa9b/Base/BaseRecommender.py#L120-L128
broadinstitute/viral-ngs
e144969e4c57060d53f38a4c3a270e8227feace1
util/misc.py
python
load_yaml_or_json
(fname)
Load a dictionary from either a yaml or a json file
Load a dictionary from either a yaml or a json file
[ "Load", "a", "dictionary", "from", "either", "a", "yaml", "or", "a", "json", "file" ]
def load_yaml_or_json(fname): '''Load a dictionary from either a yaml or a json file''' with open(fname) as f: if fname.upper().endswith('.YAML'): return yaml.safe_load(f) or {} if fname.upper().endswith('.JSON'): return json.load(f) or {} raise TypeError('Unsupported dict file format: ' + fname)
[ "def", "load_yaml_or_json", "(", "fname", ")", ":", "with", "open", "(", "fname", ")", "as", "f", ":", "if", "fname", ".", "upper", "(", ")", ".", "endswith", "(", "'.YAML'", ")", ":", "return", "yaml", ".", "safe_load", "(", "f", ")", "or", "{", ...
https://github.com/broadinstitute/viral-ngs/blob/e144969e4c57060d53f38a4c3a270e8227feace1/util/misc.py#L493-L498
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/gevent/greenlet.py
python
Greenlet.start
(self)
Schedule the greenlet to run in this loop iteration
Schedule the greenlet to run in this loop iteration
[ "Schedule", "the", "greenlet", "to", "run", "in", "this", "loop", "iteration" ]
def start(self): """Schedule the greenlet to run in this loop iteration""" if self._start_event is None: self._start_event = self.parent.loop.run_callback(self.switch)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "_start_event", "is", "None", ":", "self", ".", "_start_event", "=", "self", ".", "parent", ".", "loop", ".", "run_callback", "(", "self", ".", "switch", ")" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/gevent/greenlet.py#L184-L187
leancloud/satori
701caccbd4fe45765001ca60435c0cb499477c03
satori-rules/plugin/libs/gevent/greenlet.py
python
Greenlet.__start_cancelled_by_kill
(self)
return self._start_event is _cancelled_start_event
[]
def __start_cancelled_by_kill(self): return self._start_event is _cancelled_start_event
[ "def", "__start_cancelled_by_kill", "(", "self", ")", ":", "return", "self", ".", "_start_event", "is", "_cancelled_start_event" ]
https://github.com/leancloud/satori/blob/701caccbd4fe45765001ca60435c0cb499477c03/satori-rules/plugin/libs/gevent/greenlet.py#L209-L210
cbrgm/telegram-robot-rss
58fe98de427121fdc152c8df0721f1891174e6c9
util/database.py
python
DatabaseHandler.add_user
(self, telegram_id, username, firstname, lastname, language_code, is_bot, is_active)
Adds a user to sqlite database Args: param1 (int): The telegram_id of a user. param2 (str): The username of a user. param3 (str): The firstname of a user. param4 (str): The lastname of a user. param5 (str): The language_code of a user. param6 (str): The is_bot flag of a user.
Adds a user to sqlite database
[ "Adds", "a", "user", "to", "sqlite", "database" ]
def add_user(self, telegram_id, username, firstname, lastname, language_code, is_bot, is_active): """Adds a user to sqlite database Args: param1 (int): The telegram_id of a user. param2 (str): The username of a user. param3 (str): The firstname of a user. param4 (str): The lastname of a user. param5 (str): The language_code of a user. param6 (str): The is_bot flag of a user. """ conn = sqlite3.connect(self.database_path) cursor = conn.cursor() cursor.execute("INSERT OR IGNORE INTO user VALUES (?,?,?,?,?,?,?)", (telegram_id, username, firstname, lastname, language_code, is_bot, is_active)) conn.commit() conn.close()
[ "def", "add_user", "(", "self", ",", "telegram_id", ",", "username", ",", "firstname", ",", "lastname", ",", "language_code", ",", "is_bot", ",", "is_active", ")", ":", "conn", "=", "sqlite3", ".", "connect", "(", "self", ".", "database_path", ")", "cursor...
https://github.com/cbrgm/telegram-robot-rss/blob/58fe98de427121fdc152c8df0721f1891174e6c9/util/database.py#L22-L41
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/idlelib/PyShell.py
python
PyShell.open_stack_viewer
(self, event=None)
[]
def open_stack_viewer(self, event=None): if self.interp.rpcclt: return self.interp.remote_stack_viewer() try: sys.last_traceback except: tkMessageBox.showerror("No stack trace", "There is no stack trace yet.\n" "(sys.last_traceback is not defined)", parent=self.text) return from idlelib.StackViewer import StackBrowser StackBrowser(self.root, self.flist)
[ "def", "open_stack_viewer", "(", "self", ",", "event", "=", "None", ")", ":", "if", "self", ".", "interp", ".", "rpcclt", ":", "return", "self", ".", "interp", ".", "remote_stack_viewer", "(", ")", "try", ":", "sys", ".", "last_traceback", "except", ":",...
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/idlelib/PyShell.py#L1250-L1262
tijme/angularjs-csti-scanner
dcf5bf3d36a0a6ee3a9db1340e68de95b33e4615
acstis/actions/TraverseUrlAction.py
python
TraverseUrlAction.get_action_items_derived
(self)
return items
Get new queue items based on this action. Returns: list(:class:`nyawc.QueueItem`): A list of possibly vulnerable queue items.
Get new queue items based on this action.
[ "Get", "new", "queue", "items", "based", "on", "this", "action", "." ]
def get_action_items_derived(self): """Get new queue items based on this action. Returns: list(:class:`nyawc.QueueItem`): A list of possibly vulnerable queue items. """ items = [] path = self.get_parsed_url().path filename = self.get_filename() if filename: path[0:-len(filename)] parts = list(filter(None, path.split("/"))) for index in range(0, len(parts)): for payload in self.__payloads: queue_item = self.get_item_copy() verify_item = self.get_item_copy() path = "/".join(parts[0:index]) path_with_affix = ("/" if path else "") + path + "/" + payload["value"] parsed = self.get_parsed_url(queue_item.request.url) parsed = parsed._replace(path=path_with_affix, query="") queue_item.request.url = parsed.geturl() queue_item.payload = payload path_with_affix = ("/" if path else "") + path + "/" + Payloads.get_verify_payload(payload)["value"] parsed = self.get_parsed_url(verify_item.request.url) parsed = parsed._replace(path=path_with_affix, query="") verify_item.request.url = parsed.geturl() verify_item.payload = Payloads.get_verify_payload(payload) queue_item.verify_item = verify_item items.append(queue_item) return items
[ "def", "get_action_items_derived", "(", "self", ")", ":", "items", "=", "[", "]", "path", "=", "self", ".", "get_parsed_url", "(", ")", ".", "path", "filename", "=", "self", ".", "get_filename", "(", ")", "if", "filename", ":", "path", "[", "0", ":", ...
https://github.com/tijme/angularjs-csti-scanner/blob/dcf5bf3d36a0a6ee3a9db1340e68de95b33e4615/acstis/actions/TraverseUrlAction.py#L47-L86
nwcell/psycopg2-windows
5698844286001962f3eeeab58164301898ef48e9
psycopg2/_range.py
python
RangeCaster._from_db
(self, name, pyrange, conn_or_curs)
return RangeCaster(name, pyrange, oid=type, subtype_oid=subtype, array_oid=array)
Return a `RangeCaster` instance for the type *pgrange*. Raise `ProgrammingError` if the type is not found.
Return a `RangeCaster` instance for the type *pgrange*.
[ "Return", "a", "RangeCaster", "instance", "for", "the", "type", "*", "pgrange", "*", "." ]
def _from_db(self, name, pyrange, conn_or_curs): """Return a `RangeCaster` instance for the type *pgrange*. Raise `ProgrammingError` if the type is not found. """ from psycopg2.extensions import STATUS_IN_TRANSACTION from psycopg2.extras import _solve_conn_curs conn, curs = _solve_conn_curs(conn_or_curs) if conn.server_version < 90200: raise ProgrammingError("range types not available in version %s" % conn.server_version) # Store the transaction status of the connection to revert it after use conn_status = conn.status # Use the correct schema if '.' in name: schema, tname = name.split('.', 1) else: tname = name schema = 'public' # get the type oid and attributes try: curs.execute("""\ select rngtypid, rngsubtype, (select typarray from pg_type where oid = rngtypid) from pg_range r join pg_type t on t.oid = rngtypid join pg_namespace ns on ns.oid = typnamespace where typname = %s and ns.nspname = %s; """, (tname, schema)) except ProgrammingError: if not conn.autocommit: conn.rollback() raise else: rec = curs.fetchone() # revert the status of the connection as before the command if (conn_status != STATUS_IN_TRANSACTION and not conn.autocommit): conn.rollback() if not rec: raise ProgrammingError( "PostgreSQL type '%s' not found" % name) type, subtype, array = rec return RangeCaster(name, pyrange, oid=type, subtype_oid=subtype, array_oid=array)
[ "def", "_from_db", "(", "self", ",", "name", ",", "pyrange", ",", "conn_or_curs", ")", ":", "from", "psycopg2", ".", "extensions", "import", "STATUS_IN_TRANSACTION", "from", "psycopg2", ".", "extras", "import", "_solve_conn_curs", "conn", ",", "curs", "=", "_s...
https://github.com/nwcell/psycopg2-windows/blob/5698844286001962f3eeeab58164301898ef48e9/psycopg2/_range.py#L310-L363
Source-Python-Dev-Team/Source.Python
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
addons/source-python/Python3/calendar.py
python
Calendar.yeardayscalendar
(self, year, width=3)
return [months[i:i+width] for i in range(0, len(months), width) ]
Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero.
Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero.
[ "Return", "the", "data", "for", "the", "specified", "year", "ready", "for", "formatting", "(", "similar", "to", "yeardatescalendar", "()", ")", ".", "Entries", "in", "the", "week", "lists", "are", "day", "numbers", ".", "Day", "numbers", "outside", "this", ...
def yeardayscalendar(self, year, width=3): """ Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero. """ months = [ self.monthdayscalendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ]
[ "def", "yeardayscalendar", "(", "self", ",", "year", ",", "width", "=", "3", ")", ":", "months", "=", "[", "self", ".", "monthdayscalendar", "(", "year", ",", "i", ")", "for", "i", "in", "range", "(", "January", ",", "January", "+", "12", ")", "]",...
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/Python3/calendar.py#L247-L257
Floobits/flootty
731fb4516da3ad34c724440787e57c97229838e8
flootty/floo/common/lib/diff_match_patch.py
python
diff_match_patch.diff_halfMatch
(self, text1, text2)
return (text1_a, text1_b, text2_a, text2_b, mid_common)
Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs. Args: text1: First string. text2: Second string. Returns: Five element Array, containing the prefix of text1, the suffix of text1, the prefix of text2, the suffix of text2 and the common middle. Or None if there was no match.
Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs.
[ "Do", "the", "two", "texts", "share", "a", "substring", "which", "is", "at", "least", "half", "the", "length", "of", "the", "longer", "text?", "This", "speedup", "can", "produce", "non", "-", "minimal", "diffs", "." ]
def diff_halfMatch(self, text1, text2): """Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs. Args: text1: First string. text2: Second string. Returns: Five element Array, containing the prefix of text1, the suffix of text1, the prefix of text2, the suffix of text2 and the common middle. Or None if there was no match. """ if self.Diff_Timeout <= 0: # Don't risk returning a non-optimal diff if we have unlimited time. return None if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) if len(longtext) < 4 or len(shorttext) * 2 < len(longtext): return None # Pointless. def diff_halfMatchI(longtext, shorttext, i): """Does a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? Closure, but does not reference any external variables. Args: longtext: Longer string. shorttext: Shorter string. i: Start index of quarter length substring within longtext. Returns: Five element Array, containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle. Or None if there was no match. """ seed = longtext[i:i + len(longtext) // 4] best_common = '' j = shorttext.find(seed) while j != -1: prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:]) suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j]) if len(best_common) < suffixLength + prefixLength: best_common = (shorttext[j - suffixLength:j] + shorttext[j:j + prefixLength]) best_longtext_a = longtext[:i - suffixLength] best_longtext_b = longtext[i + prefixLength:] best_shorttext_a = shorttext[:j - suffixLength] best_shorttext_b = shorttext[j + prefixLength:] j = shorttext.find(seed, j + 1) if len(best_common) * 2 >= len(longtext): return (best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b, best_common) else: return None # First check if the second quarter is the seed for a half-match. hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4) # Check again based on the third quarter. hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2) if not hm1 and not hm2: return None elif not hm2: hm = hm1 elif not hm1: hm = hm2 else: # Both matched. Select the longest. if len(hm1[4]) > len(hm2[4]): hm = hm1 else: hm = hm2 # A half-match was found, sort out the return data. if len(text1) > len(text2): (text1_a, text1_b, text2_a, text2_b, mid_common) = hm else: (text2_a, text2_b, text1_a, text1_b, mid_common) = hm return (text1_a, text1_b, text2_a, text2_b, mid_common)
[ "def", "diff_halfMatch", "(", "self", ",", "text1", ",", "text2", ")", ":", "if", "self", ".", "Diff_Timeout", "<=", "0", ":", "# Don't risk returning a non-optimal diff if we have unlimited time.", "return", "None", "if", "len", "(", "text1", ")", ">", "len", "...
https://github.com/Floobits/flootty/blob/731fb4516da3ad34c724440787e57c97229838e8/flootty/floo/common/lib/diff_match_patch.py#L565-L646
gramps-project/gramps
04d4651a43eb210192f40a9f8c2bad8ee8fa3753
gramps/plugins/textreport/detancestralreport.py
python
DetAncestorReport.write_marriage
(self, person)
Output marriage sentence.
Output marriage sentence.
[ "Output", "marriage", "sentence", "." ]
def write_marriage(self, person): """ Output marriage sentence. """ is_first = True for family_handle in person.get_family_handle_list(): family = self._db.get_family_from_handle(family_handle) spouse_handle = utils.find_spouse(person, family) if spouse_handle: spouse = self._db.get_person_from_handle(spouse_handle) spouse_mark = utils.get_person_mark(self._db, spouse) else: spouse_mark = None text = self.__narrator.get_married_string(family, is_first, self._nd) if text: self.doc.write_text_citation(text, spouse_mark) if self.want_ids: self.doc.write_text(' (%s)' % family.get_gramps_id()) is_first = False
[ "def", "write_marriage", "(", "self", ",", "person", ")", ":", "is_first", "=", "True", "for", "family_handle", "in", "person", ".", "get_family_handle_list", "(", ")", ":", "family", "=", "self", ".", "_db", ".", "get_family_from_handle", "(", "family_handle"...
https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/plugins/textreport/detancestralreport.py#L551-L572
snakemake/snakemake
987282dde8a2db5174414988c134a39ae8836a61
snakemake/benchmark.py
python
BenchmarkTimer._update_record
(self)
Perform the actual measurement
Perform the actual measurement
[ "Perform", "the", "actual", "measurement" ]
def _update_record(self): """Perform the actual measurement""" import psutil # Memory measurements rss, vms, uss, pss = 0, 0, 0, 0 # I/O measurements io_in, io_out = 0, 0 check_io = True # CPU seconds cpu_usages = 0 # CPU usage time cpu_time = 0 # Iterate over process and all children try: this_time = time.time() for proc in chain((self.main,), self.main.children(recursive=True)): proc = self.procs.setdefault(proc.pid, proc) with proc.oneshot(): if self.bench_record.prev_time: cpu_usages += proc.cpu_percent() * ( this_time - self.bench_record.prev_time ) meminfo = proc.memory_full_info() rss += meminfo.rss vms += meminfo.vms uss += meminfo.uss pss += meminfo.pss if check_io: try: ioinfo = proc.io_counters() io_in += ioinfo.read_bytes io_out += ioinfo.write_bytes except NotImplementedError as nie: # OS doesn't track IO check_io = False cpu_times = proc.cpu_times() cpu_time += cpu_times.user + cpu_times.system self.bench_record.prev_time = this_time if not self.bench_record.first_time: self.bench_record.prev_time = this_time rss /= 1024 * 1024 vms /= 1024 * 1024 uss /= 1024 * 1024 pss /= 1024 * 1024 if check_io: io_in /= 1024 * 1024 io_out /= 1024 * 1024 else: io_in = None io_out = None except psutil.Error as e: return # Update benchmark record's RSS and VMS self.bench_record.max_rss = max(self.bench_record.max_rss or 0, rss) self.bench_record.max_vms = max(self.bench_record.max_vms or 0, vms) self.bench_record.max_uss = max(self.bench_record.max_uss or 0, uss) self.bench_record.max_pss = max(self.bench_record.max_pss or 0, pss) self.bench_record.io_in = io_in self.bench_record.io_out = io_out self.bench_record.cpu_usages += cpu_usages self.bench_record.cpu_time = cpu_time
[ "def", "_update_record", "(", "self", ")", ":", "import", "psutil", "# Memory measurements", "rss", ",", "vms", ",", "uss", ",", "pss", "=", "0", ",", "0", ",", "0", ",", "0", "# I/O measurements", "io_in", ",", "io_out", "=", "0", ",", "0", "check_io"...
https://github.com/snakemake/snakemake/blob/987282dde8a2db5174414988c134a39ae8836a61/snakemake/benchmark.py#L215-L287
frappe/frappe
b64cab6867dfd860f10ccaf41a4ec04bc890b583
frappe/sessions.py
python
Session.start
(self)
start a new session
start a new session
[ "start", "a", "new", "session" ]
def start(self): """start a new session""" # generate sid if self.user=='Guest': sid = 'Guest' else: sid = frappe.generate_hash() self.data.user = self.user self.data.sid = sid self.data.data.user = self.user self.data.data.session_ip = frappe.local.request_ip if self.user != "Guest": self.data.data.update({ "last_updated": frappe.utils.now(), "session_expiry": get_expiry_period(self.device), "full_name": self.full_name, "user_type": self.user_type, "device": self.device, "session_country": get_geo_ip_country(frappe.local.request_ip) if frappe.local.request_ip else None, }) # insert session if self.user!="Guest": self.insert_session_record() # update user user = frappe.get_doc("User", self.data['user']) frappe.db.sql("""UPDATE `tabUser` SET last_login = %(now)s, last_ip = %(ip)s, last_active = %(now)s WHERE name=%(name)s""", { 'now': frappe.utils.now(), 'ip': frappe.local.request_ip, 'name': self.data['user'] }) user.run_notifications("before_change") user.run_notifications("on_update") frappe.db.commit()
[ "def", "start", "(", "self", ")", ":", "# generate sid", "if", "self", ".", "user", "==", "'Guest'", ":", "sid", "=", "'Guest'", "else", ":", "sid", "=", "frappe", ".", "generate_hash", "(", ")", "self", ".", "data", ".", "user", "=", "self", ".", ...
https://github.com/frappe/frappe/blob/b64cab6867dfd860f10ccaf41a4ec04bc890b583/frappe/sessions.py#L214-L254
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
dist/lib/python2.7/SimpleXMLRPCServer.py
python
SimpleXMLRPCDispatcher.register_instance
(self, instance, allow_dotted_names=False)
Registers an instance to respond to XML-RPC requests. Only one instance can be installed at a time. If the registered instance has a _dispatch method then that method will be called with the name of the XML-RPC method and its parameters as a tuple e.g. instance._dispatch('add',(2,3)) If the registered instance does not have a _dispatch method then the instance will be searched to find a matching method and, if found, will be called. Methods beginning with an '_' are considered private and will not be called by SimpleXMLRPCServer. If a registered function matches a XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the instance does not have a _dispatch method, method names containing dots are supported and resolved, as long as none of the name segments start with an '_'. *** SECURITY WARNING: *** Enabling the allow_dotted_names options allows intruders to access your module's global variables and may allow intruders to execute arbitrary code on your machine. Only use this option on a secure, closed network.
Registers an instance to respond to XML-RPC requests.
[ "Registers", "an", "instance", "to", "respond", "to", "XML", "-", "RPC", "requests", "." ]
def register_instance(self, instance, allow_dotted_names=False): """Registers an instance to respond to XML-RPC requests. Only one instance can be installed at a time. If the registered instance has a _dispatch method then that method will be called with the name of the XML-RPC method and its parameters as a tuple e.g. instance._dispatch('add',(2,3)) If the registered instance does not have a _dispatch method then the instance will be searched to find a matching method and, if found, will be called. Methods beginning with an '_' are considered private and will not be called by SimpleXMLRPCServer. If a registered function matches a XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the instance does not have a _dispatch method, method names containing dots are supported and resolved, as long as none of the name segments start with an '_'. *** SECURITY WARNING: *** Enabling the allow_dotted_names options allows intruders to access your module's global variables and may allow intruders to execute arbitrary code on your machine. Only use this option on a secure, closed network. """ self.instance = instance self.allow_dotted_names = allow_dotted_names
[ "def", "register_instance", "(", "self", ",", "instance", ",", "allow_dotted_names", "=", "False", ")", ":", "self", ".", "instance", "=", "instance", "self", ".", "allow_dotted_names", "=", "allow_dotted_names" ]
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/SimpleXMLRPCServer.py#L175-L209
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/library/oo_iam_kms.py
python
AwsIamKms.get_all_kms_info
(self)
return aliases
fetch all kms info and return them list_keys doesn't have information regarding aliases list_aliases doesn't have the full kms arn fetch both and join them on the targetKeyId
fetch all kms info and return them
[ "fetch", "all", "kms", "info", "and", "return", "them" ]
def get_all_kms_info(self): '''fetch all kms info and return them list_keys doesn't have information regarding aliases list_aliases doesn't have the full kms arn fetch both and join them on the targetKeyId ''' aliases = self.kms_client.list_aliases()['Aliases'] keys = self.kms_client.list_keys()['Keys'] for alias in aliases: for key in keys: if 'TargetKeyId' in alias and 'KeyId' in key: if alias['TargetKeyId'] == key['KeyId']: alias.update(key) return aliases
[ "def", "get_all_kms_info", "(", "self", ")", ":", "aliases", "=", "self", ".", "kms_client", ".", "list_aliases", "(", ")", "[", "'Aliases'", "]", "keys", "=", "self", ".", "kms_client", ".", "list_keys", "(", ")", "[", "'Keys'", "]", "for", "alias", "...
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/library/oo_iam_kms.py#L55-L72
LexPredict/openedgar
1d1b8bc8faa3c59e05d9883e53039b6328b7d831
lexpredict_openedgar/openedgar/tasks.py
python
create_filing_error
(row, filing_path: str)
return True
Create a Filing error record from an index row. :param row: :param filing_path: :return:
Create a Filing error record from an index row. :param row: :param filing_path: :return:
[ "Create", "a", "Filing", "error", "record", "from", "an", "index", "row", ".", ":", "param", "row", ":", ":", "param", "filing_path", ":", ":", "return", ":" ]
def create_filing_error(row, filing_path: str): """ Create a Filing error record from an index row. :param row: :param filing_path: :return: """ # Get vars cik = row["CIK"] company_name = row["Company Name"] form_type = row["Form Type"] try: date_filed = dateutil.parser.parse(str(row["Date Filed"])).date() except ValueError: date_filed = None except IndexError: date_filed = None # Create empty error filing record filing = Filing() filing.form_type = form_type filing.date_filed = date_filed filing.s3_path = filing_path filing.is_error = True filing.is_processed = False # Get company info try: company = Company.objects.get(cik=cik) try: _ = CompanyInfo.objects.get(company=company, date=date_filed) except CompanyInfo.DoesNotExist: # Create company info record company_info = CompanyInfo() company_info.company = company company_info.name = company_name company_info.sic = None company_info.state_incorporation = None company_info.state_location = None company_info.date = date_filed company_info.save() except Company.DoesNotExist: # Create company company = Company() company.cik = cik try: company.save() except django.db.utils.IntegrityError: return create_filing_error(row, filing_path) # Create company info record company_info = CompanyInfo() company_info.company = company company_info.name = company_name company_info.sic = None company_info.state_incorporation = None company_info.state_location = None company_info.date = date_filed company_info.save() # Finally update company and save filing.company = company filing.save() return True
[ "def", "create_filing_error", "(", "row", ",", "filing_path", ":", "str", ")", ":", "# Get vars", "cik", "=", "row", "[", "\"CIK\"", "]", "company_name", "=", "row", "[", "\"Company Name\"", "]", "form_type", "=", "row", "[", "\"Form Type\"", "]", "try", "...
https://github.com/LexPredict/openedgar/blob/1d1b8bc8faa3c59e05d9883e53039b6328b7d831/lexpredict_openedgar/openedgar/tasks.py#L119-L185
adamcaudill/EquationGroupLeak
52fa871c89008566c27159bd48f2a8641260c984
Firewall/EXPLOITS/EPBA/EPICBANANA/pexpect.py
python
spawn.sendintr
(self)
This sends a SIGINT to the child. It does not require the SIGINT to be the first character on a line.
This sends a SIGINT to the child. It does not require the SIGINT to be the first character on a line.
[ "This", "sends", "a", "SIGINT", "to", "the", "child", ".", "It", "does", "not", "require", "the", "SIGINT", "to", "be", "the", "first", "character", "on", "a", "line", "." ]
def sendintr(self): """This sends a SIGINT to the child. It does not require the SIGINT to be the first character on a line. """ if hasattr(termios, 'VINTR'): char = termios.tcgetattr(self.child_fd)[6][termios.VINTR] else: # platform does not define VINTR so assume CTRL-C char = chr(3) self.send (char)
[ "def", "sendintr", "(", "self", ")", ":", "if", "hasattr", "(", "termios", ",", "'VINTR'", ")", ":", "char", "=", "termios", ".", "tcgetattr", "(", "self", ".", "child_fd", ")", "[", "6", "]", "[", "termios", ".", "VINTR", "]", "else", ":", "# plat...
https://github.com/adamcaudill/EquationGroupLeak/blob/52fa871c89008566c27159bd48f2a8641260c984/Firewall/EXPLOITS/EPBA/EPICBANANA/pexpect.py#L1025-L1035
ynhacler/RedKindle
7c970920dc840f869e38cbda480d630cc2e7b200
web/template.py
python
TemplateResult._prepare_body
(self)
Prepare value of __body__ by joining parts.
Prepare value of __body__ by joining parts.
[ "Prepare", "value", "of", "__body__", "by", "joining", "parts", "." ]
def _prepare_body(self): """Prepare value of __body__ by joining parts. """ if self._parts: value = u"".join(self._parts) self._parts[:] = [] body = self._d.get('__body__') if body: self._d['__body__'] = body + value else: self._d['__body__'] = value
[ "def", "_prepare_body", "(", "self", ")", ":", "if", "self", ".", "_parts", ":", "value", "=", "u\"\"", ".", "join", "(", "self", ".", "_parts", ")", "self", ".", "_parts", "[", ":", "]", "=", "[", "]", "body", "=", "self", ".", "_d", ".", "get...
https://github.com/ynhacler/RedKindle/blob/7c970920dc840f869e38cbda480d630cc2e7b200/web/template.py#L1250-L1260
tf-encrypted/tf-encrypted
8b7cfb32c426e9a6f56769a1b47626bd1be03a66
tf_encrypted/protocol/pond/pond.py
python
PondTensor.add
(self, other)
return self.prot.add(self, other)
Add `other` to this PondTensor. This can be another tensor with the same backing or a primitive. This function returns a new PondTensor and does not modify this one. :param PondTensor other: a or primitive (e.g. a float) :return: A new PondTensor with `other` added. :rtype: PondTensor
Add `other` to this PondTensor. This can be another tensor with the same backing or a primitive.
[ "Add", "other", "to", "this", "PondTensor", ".", "This", "can", "be", "another", "tensor", "with", "the", "same", "backing", "or", "a", "primitive", "." ]
def add(self, other): """ Add `other` to this PondTensor. This can be another tensor with the same backing or a primitive. This function returns a new PondTensor and does not modify this one. :param PondTensor other: a or primitive (e.g. a float) :return: A new PondTensor with `other` added. :rtype: PondTensor """ return self.prot.add(self, other)
[ "def", "add", "(", "self", ",", "other", ")", ":", "return", "self", ".", "prot", ".", "add", "(", "self", ",", "other", ")" ]
https://github.com/tf-encrypted/tf-encrypted/blob/8b7cfb32c426e9a6f56769a1b47626bd1be03a66/tf_encrypted/protocol/pond/pond.py#L1658-L1669
niftools/blender_niftools_addon
fc28f567e1fa431ec6633cb2a138898136090b29
io_scene_niftools/modules/nif_export/animation/transform.py
python
TransformAnimation.export_transforms
(self, parent_block, b_obj, b_action, bone=None)
If bone == None, object level animation is exported. If a bone is given, skeletal animation is exported.
If bone == None, object level animation is exported. If a bone is given, skeletal animation is exported.
[ "If", "bone", "==", "None", "object", "level", "animation", "is", "exported", ".", "If", "a", "bone", "is", "given", "skeletal", "animation", "is", "exported", "." ]
def export_transforms(self, parent_block, b_obj, b_action, bone=None): """ If bone == None, object level animation is exported. If a bone is given, skeletal animation is exported. """ # b_action may be None, then nothing is done. if not b_action: return # blender object must exist assert b_obj # if a bone is given, b_obj must be an armature if bone: assert type(b_obj.data) == bpy.types.Armature # just for more detailed error reporting later on bonestr = "" # skeletal animation - with bone correction & coordinate corrections if bone and bone.name in b_action.groups: # get bind matrix for bone bind_matrix = math.get_object_bind(bone) exp_fcurves = b_action.groups[bone.name].channels # just for more detailed error reporting later on bonestr = f" in bone {bone.name}" target_name = block_store.get_full_name(bone) priority = bone.niftools.priority # object level animation - no coordinate corrections elif not bone: # raise error on any objects parented to bones if b_obj.parent and b_obj.parent_type == "BONE": raise NifError( f"{b_obj.name} is parented to a bone AND has animations. The nif format does not support this!") target_name = block_store.get_full_name(b_obj) priority = 0 # we have either a root object (Scene Root), in which case we take the coordinates without modification # or a generic object parented to an empty = node # objects may have an offset from their parent that is not apparent in the user input (ie. UI values and keyframes) # we want to export matrix_local, and the keyframes are in matrix_basis, so do: # matrix_local = matrix_parent_inverse * matrix_basis bind_matrix = b_obj.matrix_parent_inverse exp_fcurves = [fcu for fcu in b_action.fcurves if fcu.data_path in ("rotation_quaternion", "rotation_euler", "location", "scale")] else: # bone isn't keyframed in this action, nothing to do here return # decompose the bind matrix bind_scale, bind_rot, bind_trans = math.decompose_srt(bind_matrix) n_kfc, n_kfi = self.create_controller(parent_block, target_name, priority) # fill in the non-trivial values start_frame, stop_frame = b_action.frame_range self.set_flags_and_timing(n_kfc, exp_fcurves, start_frame, stop_frame) # get the desired fcurves for each data type from exp_fcurves quaternions = [fcu for fcu in exp_fcurves if fcu.data_path.endswith("quaternion")] translations = [fcu for fcu in exp_fcurves if fcu.data_path.endswith("location")] eulers = [fcu for fcu in exp_fcurves if fcu.data_path.endswith("euler")] scales = [fcu for fcu in exp_fcurves if fcu.data_path.endswith("scale")] # ensure that those groups that are present have all their fcurves for fcus, num_fcus in ((quaternions, 4), (eulers, 3), (translations, 3), (scales, 3)): if fcus and len(fcus) != num_fcus: raise NifError( f"Incomplete key set {bonestr} for action {b_action.name}." f"Ensure that if a bone is keyframed for a property, all channels are keyframed.") # go over all fcurves collected above and transform and store all their keys quat_curve = [] euler_curve = [] trans_curve = [] scale_curve = [] for frame, quat in self.iter_frame_key(quaternions, mathutils.Quaternion): quat = math.export_keymat(bind_rot, quat.to_matrix().to_4x4(), bone).to_quaternion() quat_curve.append((frame, quat)) for frame, euler in self.iter_frame_key(eulers, mathutils.Euler): keymat = math.export_keymat(bind_rot, euler.to_matrix().to_4x4(), bone) euler = keymat.to_euler("XYZ", euler) euler_curve.append((frame, euler)) for frame, trans in self.iter_frame_key(translations, mathutils.Vector): keymat = math.export_keymat(bind_rot, mathutils.Matrix.Translation(trans), bone) trans = keymat.to_translation() + bind_trans trans_curve.append((frame, trans)) for frame, scale in self.iter_frame_key(scales, mathutils.Vector): # just use the first scale curve and assume even scale over all curves scale_curve.append((frame, scale[0])) if n_kfi: # set the default transforms of the interpolator as the bone's bind pose n_kfi.translation.x, n_kfi.translation.y, n_kfi.translation.z = bind_trans n_kfi.rotation.w, n_kfi.rotation.x, n_kfi.rotation.y, n_kfi.rotation.z = bind_rot.to_quaternion() n_kfi.scale = bind_scale if max(len(c) for c in (quat_curve, euler_curve, trans_curve, scale_curve)) > 0: # number of frames is > 0, so add transform data n_kfd = block_store.create_block("NiTransformData", exp_fcurves) n_kfi.data = n_kfd else: # no need to add any keys, done return else: # add the keyframe data n_kfd = block_store.create_block("NiKeyframeData", exp_fcurves) n_kfc.data = n_kfd # TODO [animation] support other interpolation modes, get interpolation from blender? # probably requires additional data like tangents and stuff # finally we can export the data calculated above if euler_curve: n_kfd.rotation_type = NifFormat.KeyType.XYZ_ROTATION_KEY n_kfd.num_rotation_keys = 1 # *NOT* len(frames) this crashes the engine! for i, coord in enumerate(n_kfd.xyz_rotations): coord.num_keys = len(euler_curve) coord.interpolation = NifFormat.KeyType.LINEAR_KEY coord.keys.update_size() for key, (frame, euler) in zip(coord.keys, euler_curve): key.time = frame / self.fps key.value = euler[i] elif quat_curve: n_kfd.rotation_type = NifFormat.KeyType.QUADRATIC_KEY n_kfd.num_rotation_keys = len(quat_curve) n_kfd.quaternion_keys.update_size() for key, (frame, quat) in zip(n_kfd.quaternion_keys, quat_curve): key.time = frame / self.fps key.value.w = quat.w key.value.x = quat.x key.value.y = quat.y key.value.z = quat.z n_kfd.translations.interpolation = NifFormat.KeyType.LINEAR_KEY n_kfd.translations.num_keys = len(trans_curve) n_kfd.translations.keys.update_size() for key, (frame, trans) in zip(n_kfd.translations.keys, trans_curve): key.time = frame / self.fps key.value.x, key.value.y, key.value.z = trans n_kfd.scales.interpolation = NifFormat.KeyType.LINEAR_KEY n_kfd.scales.num_keys = len(scale_curve) n_kfd.scales.keys.update_size() for key, (frame, scale) in zip(n_kfd.scales.keys, scale_curve): key.time = frame / self.fps key.value = scale
[ "def", "export_transforms", "(", "self", ",", "parent_block", ",", "b_obj", ",", "b_action", ",", "bone", "=", "None", ")", ":", "# b_action may be None, then nothing is done.", "if", "not", "b_action", ":", "return", "# blender object must exist", "assert", "b_obj", ...
https://github.com/niftools/blender_niftools_addon/blob/fc28f567e1fa431ec6633cb2a138898136090b29/io_scene_niftools/modules/nif_export/animation/transform.py#L122-L275
statsmodels/statsmodels
debbe7ea6ba28fe5bdb78f09f8cac694bef98722
statsmodels/tsa/statespace/mlemodel.py
python
MLEModel.clone
(self, endog, exog=None, **kwargs)
Clone state space model with new data and optionally new specification Parameters ---------- endog : array_like The observed time-series process :math:`y` k_states : int The dimension of the unobserved state process. exog : array_like, optional Array of exogenous regressors, shaped nobs x k. Default is no exogenous regressors. kwargs Keyword arguments to pass to the new model class to change the model specification. Returns ------- model : MLEModel subclass Notes ----- This method must be implemented
Clone state space model with new data and optionally new specification
[ "Clone", "state", "space", "model", "with", "new", "data", "and", "optionally", "new", "specification" ]
def clone(self, endog, exog=None, **kwargs): """ Clone state space model with new data and optionally new specification Parameters ---------- endog : array_like The observed time-series process :math:`y` k_states : int The dimension of the unobserved state process. exog : array_like, optional Array of exogenous regressors, shaped nobs x k. Default is no exogenous regressors. kwargs Keyword arguments to pass to the new model class to change the model specification. Returns ------- model : MLEModel subclass Notes ----- This method must be implemented """ raise NotImplementedError('This method is not implemented in the base' ' class and must be set up by each specific' ' model.')
[ "def", "clone", "(", "self", ",", "endog", ",", "exog", "=", "None", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "(", "'This method is not implemented in the base'", "' class and must be set up by each specific'", "' model.'", ")" ]
https://github.com/statsmodels/statsmodels/blob/debbe7ea6ba28fe5bdb78f09f8cac694bef98722/statsmodels/tsa/statespace/mlemodel.py#L254-L281
awslabs/aws-config-rules
8dfeacf9d9e5e5f0fbb1b8545ff702dea700ea7a
python/EMR_KERBEROS_ENABLED/EMR_KERBEROS_ENABLED.py
python
get_client
(service, event)
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'] )
Return the service boto client. It should be used instead of directly calling the client. Keyword arguments: service -- the service name used for calling the boto.client() event -- the event variable given in the lambda handler
Return the service boto client. It should be used instead of directly calling the client.
[ "Return", "the", "service", "boto", "client", ".", "It", "should", "be", "used", "instead", "of", "directly", "calling", "the", "client", "." ]
def get_client(service, event): """Return the service boto client. It should be used instead of directly calling the client. Keyword arguments: service -- the service name used for calling the boto.client() event -- the event variable given in the lambda handler """ if not ASSUME_ROLE_MODE: return boto3.client(service) credentials = get_assume_role_credentials(event["executionRoleArn"]) return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'] )
[ "def", "get_client", "(", "service", ",", "event", ")", ":", "if", "not", "ASSUME_ROLE_MODE", ":", "return", "boto3", ".", "client", "(", "service", ")", "credentials", "=", "get_assume_role_credentials", "(", "event", "[", "\"executionRoleArn\"", "]", ")", "r...
https://github.com/awslabs/aws-config-rules/blob/8dfeacf9d9e5e5f0fbb1b8545ff702dea700ea7a/python/EMR_KERBEROS_ENABLED/EMR_KERBEROS_ENABLED.py#L269-L282
pycrypto/pycrypto
7acba5f3a6ff10f1424c309d0d34d2b713233019
lib/Crypto/Hash/HMAC.py
python
HMAC.digest
(self)
return h.digest()
Return the **binary** (non-printable) MAC of the message that has been authenticated so far. This method does not change the state of the MAC object. You can continue updating the object after calling this function. :Return: A byte string of `digest_size` bytes. It may contain non-ASCII characters, including null bytes.
Return the **binary** (non-printable) MAC of the message that has been authenticated so far.
[ "Return", "the", "**", "binary", "**", "(", "non", "-", "printable", ")", "MAC", "of", "the", "message", "that", "has", "been", "authenticated", "so", "far", "." ]
def digest(self): """Return the **binary** (non-printable) MAC of the message that has been authenticated so far. This method does not change the state of the MAC object. You can continue updating the object after calling this function. :Return: A byte string of `digest_size` bytes. It may contain non-ASCII characters, including null bytes. """ h = self.outer.copy() h.update(self.inner.digest()) return h.digest()
[ "def", "digest", "(", "self", ")", ":", "h", "=", "self", ".", "outer", ".", "copy", "(", ")", "h", ".", "update", "(", "self", ".", "inner", ".", "digest", "(", ")", ")", "return", "h", ".", "digest", "(", ")" ]
https://github.com/pycrypto/pycrypto/blob/7acba5f3a6ff10f1424c309d0d34d2b713233019/lib/Crypto/Hash/HMAC.py#L184-L197
wucng/TensorExpand
4ea58f64f5c5082b278229b799c9f679536510b7
TensorExpand/Object detection/faster rcnn/CharlesShang-TFFRCNN-master/lib/datasets/pascal_voc2.py
python
pascal_voc._load_rpn_roidb
(self, gt_roidb, model)
return self.create_roidb_from_box_list(box_list, gt_roidb)
[]
def _load_rpn_roidb(self, gt_roidb, model): # set the prefix if self._image_set == 'test': prefix = model + '/testing' else: prefix = model + '/training' box_list = [] for index in self.image_index: filename = os.path.join(self._pascal_path, 'region_proposals', prefix, index + '.txt') assert os.path.exists(filename), \ 'RPN data not found at: {}'.format(filename) raw_data = np.loadtxt(filename, dtype=float) if len(raw_data.shape) == 1: if raw_data.size == 0: raw_data = raw_data.reshape((0, 5)) else: raw_data = raw_data.reshape((1, 5)) x1 = raw_data[:, 0] y1 = raw_data[:, 1] x2 = raw_data[:, 2] y2 = raw_data[:, 3] score = raw_data[:, 4] inds = np.where((x2 > x1) & (y2 > y1))[0] raw_data = raw_data[inds,:4] self._num_boxes_proposal += raw_data.shape[0] box_list.append(raw_data) return self.create_roidb_from_box_list(box_list, gt_roidb)
[ "def", "_load_rpn_roidb", "(", "self", ",", "gt_roidb", ",", "model", ")", ":", "# set the prefix", "if", "self", ".", "_image_set", "==", "'test'", ":", "prefix", "=", "model", "+", "'/testing'", "else", ":", "prefix", "=", "model", "+", "'/training'", "b...
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/Object detection/faster rcnn/CharlesShang-TFFRCNN-master/lib/datasets/pascal_voc2.py#L478-L507
skyfielders/python-skyfield
0e68757a5c1081f784c58fd7a76635c6deb98451
skyfield/timelib.py
python
Time.gmst
(self)
return sidereal_time(self)
Greenwich Mean Sidereal Time (GMST) in hours.
Greenwich Mean Sidereal Time (GMST) in hours.
[ "Greenwich", "Mean", "Sidereal", "Time", "(", "GMST", ")", "in", "hours", "." ]
def gmst(self): """Greenwich Mean Sidereal Time (GMST) in hours.""" return sidereal_time(self)
[ "def", "gmst", "(", "self", ")", ":", "return", "sidereal_time", "(", "self", ")" ]
https://github.com/skyfielders/python-skyfield/blob/0e68757a5c1081f784c58fd7a76635c6deb98451/skyfield/timelib.py#L824-L826
4shadoww/hakkuframework
409a11fc3819d251f86faa3473439f8c19066a21
lib/dns/rrset.py
python
RRset.match
(self, *args, **kwargs)
Does this rrset match the specified attributes? Behaves as :py:func:`full_match()` if the first argument is a ``dns.name.Name``, and as :py:func:`dns.rdataset.Rdataset.match()` otherwise. (This behavior fixes a design mistake where the signature of this method became incompatible with that of its superclass. The fix makes RRsets matchable as Rdatasets while preserving backwards compatibility.)
Does this rrset match the specified attributes?
[ "Does", "this", "rrset", "match", "the", "specified", "attributes?" ]
def match(self, *args, **kwargs): """Does this rrset match the specified attributes? Behaves as :py:func:`full_match()` if the first argument is a ``dns.name.Name``, and as :py:func:`dns.rdataset.Rdataset.match()` otherwise. (This behavior fixes a design mistake where the signature of this method became incompatible with that of its superclass. The fix makes RRsets matchable as Rdatasets while preserving backwards compatibility.) """ if isinstance(args[0], dns.name.Name): return self.full_match(*args, **kwargs) else: return super().match(*args, **kwargs)
[ "def", "match", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "args", "[", "0", "]", ",", "dns", ".", "name", ".", "Name", ")", ":", "return", "self", ".", "full_match", "(", "*", "args", ",", "*", ...
https://github.com/4shadoww/hakkuframework/blob/409a11fc3819d251f86faa3473439f8c19066a21/lib/dns/rrset.py#L79-L94
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/python27/1.0/lib/email/message.py
python
Message.get_charset
(self)
return self._charset
Return the Charset instance associated with the message's payload.
Return the Charset instance associated with the message's payload.
[ "Return", "the", "Charset", "instance", "associated", "with", "the", "message", "s", "payload", "." ]
def get_charset(self): """Return the Charset instance associated with the message's payload. """ return self._charset
[ "def", "get_charset", "(", "self", ")", ":", "return", "self", ".", "_charset" ]
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/python27/1.0/lib/email/message.py#L273-L276
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/nifi/models/processor_diagnostics_entity.py
python
ProcessorDiagnosticsEntity.uri
(self, uri)
Sets the uri of this ProcessorDiagnosticsEntity. The URI for futures requests to the component. :param uri: The uri of this ProcessorDiagnosticsEntity. :type: str
Sets the uri of this ProcessorDiagnosticsEntity. The URI for futures requests to the component.
[ "Sets", "the", "uri", "of", "this", "ProcessorDiagnosticsEntity", ".", "The", "URI", "for", "futures", "requests", "to", "the", "component", "." ]
def uri(self, uri): """ Sets the uri of this ProcessorDiagnosticsEntity. The URI for futures requests to the component. :param uri: The uri of this ProcessorDiagnosticsEntity. :type: str """ self._uri = uri
[ "def", "uri", "(", "self", ",", "uri", ")", ":", "self", ".", "_uri", "=", "uri" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/processor_diagnostics_entity.py#L144-L153
python-diamond/Diamond
7000e16cfdf4508ed9291fc4b3800592557b2431
src/collectors/iodrivesnmp/iodrivesnmp.py
python
IODriveSNMPCollector.get_bytes
(self, s)
return struct.unpack('%sB' % len(s), s)
Turns a string into a list of byte values
Turns a string into a list of byte values
[ "Turns", "a", "string", "into", "a", "list", "of", "byte", "values" ]
def get_bytes(self, s): """Turns a string into a list of byte values""" return struct.unpack('%sB' % len(s), s)
[ "def", "get_bytes", "(", "self", ",", "s", ")", ":", "return", "struct", ".", "unpack", "(", "'%sB'", "%", "len", "(", "s", ")", ",", "s", ")" ]
https://github.com/python-diamond/Diamond/blob/7000e16cfdf4508ed9291fc4b3800592557b2431/src/collectors/iodrivesnmp/iodrivesnmp.py#L94-L96
glitchdotcom/WebPutty
4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7
ziplibs/wtforms/ext/appengine/db.py
python
convert_EmailProperty
(model, prop, kwargs)
return get_TextField(kwargs)
Returns a form field for a ``db.EmailProperty``.
Returns a form field for a ``db.EmailProperty``.
[ "Returns", "a", "form", "field", "for", "a", "db", ".", "EmailProperty", "." ]
def convert_EmailProperty(model, prop, kwargs): """Returns a form field for a ``db.EmailProperty``.""" kwargs['validators'].append(validators.email()) return get_TextField(kwargs)
[ "def", "convert_EmailProperty", "(", "model", ",", "prop", ",", "kwargs", ")", ":", "kwargs", "[", "'validators'", "]", ".", "append", "(", "validators", ".", "email", "(", ")", ")", "return", "get_TextField", "(", "kwargs", ")" ]
https://github.com/glitchdotcom/WebPutty/blob/4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7/ziplibs/wtforms/ext/appengine/db.py#L217-L220
Azure/azure-devops-cli-extension
11334cd55806bef0b99c3bee5a438eed71e44037
azure-devops/azext_devops/dev/team/user.py
python
update_user_entitlement
(user, license_type, organization=None, detect=None)
return user_entitlement_update.user_entitlement
Update license type for a user. :param user: Email ID or ID of the user. :type user: str :param license_type: License type for the user. :type license_type: str :rtype: UserEntitlementsPatchResponse
Update license type for a user. :param user: Email ID or ID of the user. :type user: str :param license_type: License type for the user. :type license_type: str :rtype: UserEntitlementsPatchResponse
[ "Update", "license", "type", "for", "a", "user", ".", ":", "param", "user", ":", "Email", "ID", "or", "ID", "of", "the", "user", ".", ":", "type", "user", ":", "str", ":", "param", "license_type", ":", "License", "type", "for", "the", "user", ".", ...
def update_user_entitlement(user, license_type, organization=None, detect=None): """Update license type for a user. :param user: Email ID or ID of the user. :type user: str :param license_type: License type for the user. :type license_type: str :rtype: UserEntitlementsPatchResponse """ patch_document = [] value = {} value['accountLicenseType'] = license_type patch_document.append(_create_patch_operation('replace', '/accessLevel', value)) organization = resolve_instance(detect=detect, organization=organization) if '@' in user: user = resolve_identity_as_id(user, organization) client = get_member_entitlement_management_client(organization) user_entitlement_update = client.update_user_entitlement(document=patch_document, user_id=user) if user_entitlement_update.is_success is False and \ user_entitlement_update.operation_results[0].errors[0] is not None: raise CLIError(user_entitlement_update.operation_results[0].errors[0]['value']) return user_entitlement_update.user_entitlement
[ "def", "update_user_entitlement", "(", "user", ",", "license_type", ",", "organization", "=", "None", ",", "detect", "=", "None", ")", ":", "patch_document", "=", "[", "]", "value", "=", "{", "}", "value", "[", "'accountLicenseType'", "]", "=", "license_type...
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/dev/team/user.py#L54-L74
isce-framework/isce2
0e5114a8bede3caf1d533d98e44dfe4b983e3f48
contrib/geo_autoRIFT/geogrid/GeogridOptical.py
python
GeogridOptical.finalize
(self)
Clean up all the C pointers.
Clean up all the C pointers.
[ "Clean", "up", "all", "the", "C", "pointers", "." ]
def finalize(self): ''' Clean up all the C pointers. ''' from . import geogridOptical geogridOptical.destroyGeoGridOptical_Py(self._geogridOptical) self._geogridOptical = None
[ "def", "finalize", "(", "self", ")", ":", "from", ".", "import", "geogridOptical", "geogridOptical", ".", "destroyGeoGridOptical_Py", "(", "self", ".", "_geogridOptical", ")", "self", ".", "_geogridOptical", "=", "None" ]
https://github.com/isce-framework/isce2/blob/0e5114a8bede3caf1d533d98e44dfe4b983e3f48/contrib/geo_autoRIFT/geogrid/GeogridOptical.py#L259-L267
natashamjaques/neural_chat
ddb977bb4602a67c460d02231e7bbf7b2cb49a97
ParlAI/parlai/mturk/core/legacy_2018/agents.py
python
MTurkAgent.wait_completion_timeout
(self, iterations)
return
Suspends the thread waiting for hit completion for some number of iterations on the THREAD_MTURK_POLLING_SLEEP time
Suspends the thread waiting for hit completion for some number of iterations on the THREAD_MTURK_POLLING_SLEEP time
[ "Suspends", "the", "thread", "waiting", "for", "hit", "completion", "for", "some", "number", "of", "iterations", "on", "the", "THREAD_MTURK_POLLING_SLEEP", "time" ]
def wait_completion_timeout(self, iterations): """Suspends the thread waiting for hit completion for some number of iterations on the THREAD_MTURK_POLLING_SLEEP time""" # Determine number of sleep iterations for the amount of time # we want to wait before syncing with MTurk. Start with 10 seconds # of waiting iters = ( shared_utils.THREAD_MTURK_POLLING_SLEEP / shared_utils.THREAD_MEDIUM_SLEEP ) i = 0 # Wait for the desired number of MTURK_POLLING_SLEEP iterations while not self.hit_is_complete and i < iters * iterations: time.sleep(shared_utils.THREAD_SHORT_SLEEP) i += 1 return
[ "def", "wait_completion_timeout", "(", "self", ",", "iterations", ")", ":", "# Determine number of sleep iterations for the amount of time", "# we want to wait before syncing with MTurk. Start with 10 seconds", "# of waiting", "iters", "=", "(", "shared_utils", ".", "THREAD_MTURK_POL...
https://github.com/natashamjaques/neural_chat/blob/ddb977bb4602a67c460d02231e7bbf7b2cb49a97/ParlAI/parlai/mturk/core/legacy_2018/agents.py#L569-L584
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/xlrd-2.0.1/xlrd/__init__.py
python
dump
(filename, outfile=sys.stdout, unnumbered=False)
For debugging: dump an XLS file's BIFF records in char & hex. :param filename: The path to the file to be dumped. :param outfile: An open file, to which the dump is written. :param unnumbered: If true, omit offsets (for meaningful diffs).
For debugging: dump an XLS file's BIFF records in char & hex.
[ "For", "debugging", ":", "dump", "an", "XLS", "file", "s", "BIFF", "records", "in", "char", "&", "hex", "." ]
def dump(filename, outfile=sys.stdout, unnumbered=False): """ For debugging: dump an XLS file's BIFF records in char & hex. :param filename: The path to the file to be dumped. :param outfile: An open file, to which the dump is written. :param unnumbered: If true, omit offsets (for meaningful diffs). """ from .biffh import biff_dump bk = Book() bk.biff2_8_load(filename=filename, logfile=outfile, ) biff_dump(bk.mem, bk.base, bk.stream_len, 0, outfile, unnumbered)
[ "def", "dump", "(", "filename", ",", "outfile", "=", "sys", ".", "stdout", ",", "unnumbered", "=", "False", ")", ":", "from", ".", "biffh", "import", "biff_dump", "bk", "=", "Book", "(", ")", "bk", ".", "biff2_8_load", "(", "filename", "=", "filename",...
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/xlrd-2.0.1/xlrd/__init__.py#L188-L199
OCA/l10n-brazil
6faefc04c7b0de3de3810a7ab137493d933fb579
l10n_br_stock/hooks.py
python
pre_init_hook
(cr)
Import XML data to change core data
Import XML data to change core data
[ "Import", "XML", "data", "to", "change", "core", "data" ]
def pre_init_hook(cr): """Import XML data to change core data""" if not tools.config["without_demo"]: _logger.info(_("Loading l10n_br_stock warehouse external ids...")) with api.Environment.manage(): env = api.Environment(cr, SUPERUSER_ID, {}) set_stock_warehouse_external_ids( env, "l10n_br_base.empresa_simples_nacional" ) set_stock_warehouse_external_ids( env, "l10n_br_base.empresa_lucro_presumido" )
[ "def", "pre_init_hook", "(", "cr", ")", ":", "if", "not", "tools", ".", "config", "[", "\"without_demo\"", "]", ":", "_logger", ".", "info", "(", "_", "(", "\"Loading l10n_br_stock warehouse external ids...\"", ")", ")", "with", "api", ".", "Environment", ".",...
https://github.com/OCA/l10n-brazil/blob/6faefc04c7b0de3de3810a7ab137493d933fb579/l10n_br_stock/hooks.py#L84-L96
keras-team/keras-applications
bc89834ed36935ab4a4994446e34ff81c0d8e1b7
keras_applications/mobilenet.py
python
preprocess_input
(x, **kwargs)
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
Preprocesses a numpy array encoding a batch of images. # Arguments x: a 4D numpy array consists of RGB values within [0, 255]. # Returns Preprocessed array.
Preprocesses a numpy array encoding a batch of images.
[ "Preprocesses", "a", "numpy", "array", "encoding", "a", "batch", "of", "images", "." ]
def preprocess_input(x, **kwargs): """Preprocesses a numpy array encoding a batch of images. # Arguments x: a 4D numpy array consists of RGB values within [0, 255]. # Returns Preprocessed array. """ return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
[ "def", "preprocess_input", "(", "x", ",", "*", "*", "kwargs", ")", ":", "return", "imagenet_utils", ".", "preprocess_input", "(", "x", ",", "mode", "=", "'tf'", ",", "*", "*", "kwargs", ")" ]
https://github.com/keras-team/keras-applications/blob/bc89834ed36935ab4a4994446e34ff81c0d8e1b7/keras_applications/mobilenet.py#L75-L84
sfu-db/dataprep
6dfb9c659e8bf73f07978ae195d0372495c6f118
dataprep/clean/clean_duplication_utils.py
python
Clusterer.set_cluster_params
(self, ngram: int, radius: int, block_size: int)
Set clustering parameters.
Set clustering parameters.
[ "Set", "clustering", "parameters", "." ]
def set_cluster_params(self, ngram: int, radius: int, block_size: int) -> None: """ Set clustering parameters. """ self._ngram = ngram self._radius = radius self._block_size = block_size
[ "def", "set_cluster_params", "(", "self", ",", "ngram", ":", "int", ",", "radius", ":", "int", ",", "block_size", ":", "int", ")", "->", "None", ":", "self", ".", "_ngram", "=", "ngram", "self", ".", "_radius", "=", "radius", "self", ".", "_block_size"...
https://github.com/sfu-db/dataprep/blob/6dfb9c659e8bf73f07978ae195d0372495c6f118/dataprep/clean/clean_duplication_utils.py#L315-L321
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/extensions_v1beta1_ingress_list.py
python
ExtensionsV1beta1IngressList.api_version
(self, api_version)
Sets the api_version of this ExtensionsV1beta1IngressList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this ExtensionsV1beta1IngressList. # noqa: E501 :type: str
Sets the api_version of this ExtensionsV1beta1IngressList.
[ "Sets", "the", "api_version", "of", "this", "ExtensionsV1beta1IngressList", "." ]
def api_version(self, api_version): """Sets the api_version of this ExtensionsV1beta1IngressList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this ExtensionsV1beta1IngressList. # noqa: E501 :type: str """ self._api_version = api_version
[ "def", "api_version", "(", "self", ",", "api_version", ")", ":", "self", ".", "_api_version", "=", "api_version" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/extensions_v1beta1_ingress_list.py#L81-L90
digidotcom/xbee-python
0757f4be0017530c205175fbee8f9f61be9614d1
digi/xbee/filesystem.py
python
FileSystemElement.path
(self)
return self._path.decode(encoding='utf8', errors='ignore')
Returns the file system element absolute path. Returns: String: File system element absolute path.
Returns the file system element absolute path.
[ "Returns", "the", "file", "system", "element", "absolute", "path", "." ]
def path(self): """ Returns the file system element absolute path. Returns: String: File system element absolute path. """ return self._path.decode(encoding='utf8', errors='ignore')
[ "def", "path", "(", "self", ")", ":", "return", "self", ".", "_path", ".", "decode", "(", "encoding", "=", "'utf8'", ",", "errors", "=", "'ignore'", ")" ]
https://github.com/digidotcom/xbee-python/blob/0757f4be0017530c205175fbee8f9f61be9614d1/digi/xbee/filesystem.py#L238-L245
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/asyncore.py
python
dispatcher.handle_error
(self)
[]
def handle_error(self): nil, t, v, tbinfo = compact_traceback() # sometimes a user repr method will crash. try: self_repr = repr(self) except: self_repr = '<__repr__(self) failed for object at %0x>' % id(self) self.log_info( 'uncaptured python exception, closing channel %s (%s:%s %s)' % ( self_repr, t, v, tbinfo ), 'error' ) self.handle_close()
[ "def", "handle_error", "(", "self", ")", ":", "nil", ",", "t", ",", "v", ",", "tbinfo", "=", "compact_traceback", "(", ")", "# sometimes a user repr method will crash.", "try", ":", "self_repr", "=", "repr", "(", "self", ")", "except", ":", "self_repr", "=",...
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/asyncore.py#L483-L501
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/setuptools/package_index.py
python
ContentChecker.is_valid
(self)
return True
Check the hash. Return False if validation fails.
Check the hash. Return False if validation fails.
[ "Check", "the", "hash", ".", "Return", "False", "if", "validation", "fails", "." ]
def is_valid(self): """ Check the hash. Return False if validation fails. """ return True
[ "def", "is_valid", "(", "self", ")", ":", "return", "True" ]
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/setuptools/package_index.py#L251-L255
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/xml/sax/xmlreader.py
python
InputSource.getSystemId
(self)
return self.__system_id
Returns the system identifier of this InputSource.
Returns the system identifier of this InputSource.
[ "Returns", "the", "system", "identifier", "of", "this", "InputSource", "." ]
def getSystemId(self): "Returns the system identifier of this InputSource." return self.__system_id
[ "def", "getSystemId", "(", "self", ")", ":", "return", "self", ".", "__system_id" ]
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/xml/sax/xmlreader.py#L222-L224
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
examples/research_projects/rag/callbacks_rag.py
python
Seq2SeqLoggingCallback._write_logs
( self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True )
[]
def _write_logs( self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True ) -> None: logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") metrics = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) # Log results od = Path(pl_module.hparams.output_dir) if type_path == "test": results_file = od / "test_results.txt" generations_file = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=True) generations_file.parent.mkdir(exist_ok=True) with open(results_file, "a+") as writer: for key in sorted(metrics): if key in ["log", "progress_bar", "preds"]: continue val = metrics[key] if isinstance(val, torch.Tensor): val = val.item() msg = f"{key}: {val:.6f}\n" writer.write(msg) if not save_generations: return if "preds" in metrics: content = "\n".join(metrics["preds"]) generations_file.open("w+").write(content)
[ "def", "_write_logs", "(", "self", ",", "trainer", ":", "pl", ".", "Trainer", ",", "pl_module", ":", "pl", ".", "LightningModule", ",", "type_path", ":", "str", ",", "save_generations", "=", "True", ")", "->", "None", ":", "logger", ".", "info", "(", "...
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/examples/research_projects/rag/callbacks_rag.py#L61-L94
fregu856/2D_detection
1f22a6d604d39f8f79fe916fcdbf40b5b668a39a
model.py
python
SqueezeDet_model.tensor_IOU
(self, box1, box2)
return IOU
[]
def tensor_IOU(self, box1, box2): # intersection: xmin = tf.maximum(box1[0], box2[0]) ymin = tf.maximum(box1[1], box2[1]) xmax = tf.minimum(box1[2], box2[2]) ymax = tf.minimum(box1[3], box2[3]) w = tf.maximum(0.0, xmax - xmin) h = tf.maximum(0.0, ymax - ymin) intersection_area = w*h # union: w1 = box1[2] - box1[0] h1 = box1[3] - box1[1] w2 = box2[2] - box2[0] h2 = box2[3] - box2[1] union_area = w1*h1 + w2*h2 - intersection_area IOU = intersection_area/(union_area + self.epsilon) # # (don't think self.epsilon is actually needed here) return IOU
[ "def", "tensor_IOU", "(", "self", ",", "box1", ",", "box2", ")", ":", "# intersection:", "xmin", "=", "tf", ".", "maximum", "(", "box1", "[", "0", "]", ",", "box2", "[", "0", "]", ")", "ymin", "=", "tf", ".", "maximum", "(", "box1", "[", "1", "...
https://github.com/fregu856/2D_detection/blob/1f22a6d604d39f8f79fe916fcdbf40b5b668a39a/model.py#L463-L483
spectacles/CodeComplice
8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62
libs/koXMLTreeService.py
python
TreeService.getTreeForURI
(self, uri, content=None)
return tree
[]
def getTreeForURI(self, uri, content=None): if not uri and not content: return None tree = None if uri and uri in self.__treeMap: tree = self.__treeMap[uri] # if tree is not None: # print "tree cache hit for [%s]"%uri if not content: return tree if not tree: if not content: # get the content try: f = open(uri, 'r') content = f.read(-1) f.close() except IOError as e: # ignore file errors and return an empty tree content = "" if not content.startswith("<?xml"): tree = HTMLDocument() if not tree: tree = XMLDocument() # raise Exception("NOT IMPLEMENTED YET") if content: tree.parse(content) if uri: self.__treeMap[uri] = tree return tree
[ "def", "getTreeForURI", "(", "self", ",", "uri", ",", "content", "=", "None", ")", ":", "if", "not", "uri", "and", "not", "content", ":", "return", "None", "tree", "=", "None", "if", "uri", "and", "uri", "in", "self", ".", "__treeMap", ":", "tree", ...
https://github.com/spectacles/CodeComplice/blob/8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62/libs/koXMLTreeService.py#L523-L553
QuantumFractal/Data-Structure-Zoo
3884d55fac0cccf3b17d18550ec52a285f909c2c
0-Object-Oriented Programming/objects.py
python
Truck.vroom
(self)
return 'Vroooooom'
[]
def vroom(self): return 'Vroooooom'
[ "def", "vroom", "(", "self", ")", ":", "return", "'Vroooooom'" ]
https://github.com/QuantumFractal/Data-Structure-Zoo/blob/3884d55fac0cccf3b17d18550ec52a285f909c2c/0-Object-Oriented Programming/objects.py#L45-L46
JetBrains/python-skeletons
95ad24b666e475998e5d1cc02ed53a2188036167
builtins.py
python
str.swapcase
(self)
return ''
Return a copy of the string with uppercase characters converted to lowercase and vice versa. :rtype: str
Return a copy of the string with uppercase characters converted to lowercase and vice versa.
[ "Return", "a", "copy", "of", "the", "string", "with", "uppercase", "characters", "converted", "to", "lowercase", "and", "vice", "versa", "." ]
def swapcase(self): """Return a copy of the string with uppercase characters converted to lowercase and vice versa. :rtype: str """ return ''
[ "def", "swapcase", "(", "self", ")", ":", "return", "''" ]
https://github.com/JetBrains/python-skeletons/blob/95ad24b666e475998e5d1cc02ed53a2188036167/builtins.py#L1701-L1707
yantisj/netgrph
0ebc72efbd970e0ce76d44d91a27d167103633b5
nglib/query/__init__.py
python
display_mgmt_groups
()
Print all Management Groups to the Screen
Print all Management Groups to the Screen
[ "Print", "all", "Management", "Groups", "to", "the", "Screen" ]
def display_mgmt_groups(): """Print all Management Groups to the Screen""" mgmt = nglib.py2neo_ses.cypher.execute( 'MATCH (s:Switch) RETURN DISTINCT(s.mgmt) as name ORDER BY name') if len(mgmt) > 0: print("Available Groups:") for s in mgmt.records: print("> " + str(s.name)) else: print("No management groups found in DB")
[ "def", "display_mgmt_groups", "(", ")", ":", "mgmt", "=", "nglib", ".", "py2neo_ses", ".", "cypher", ".", "execute", "(", "'MATCH (s:Switch) RETURN DISTINCT(s.mgmt) as name ORDER BY name'", ")", "if", "len", "(", "mgmt", ")", ">", "0", ":", "print", "(", "\"Avai...
https://github.com/yantisj/netgrph/blob/0ebc72efbd970e0ce76d44d91a27d167103633b5/nglib/query/__init__.py#L68-L79
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/elasticsearch/client/indices.py
python
IndicesClient.clear_cache
(self, index=None, params=None)
return data
Clear either all caches or specific cached associated with one ore more indices. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-clearcache.html>`_ :arg index: A comma-separated list of index name to limit the operation :arg field_data: Clear field data :arg fielddata: Clear field data :arg fields: A comma-separated list of fields to clear when using the `field_data` parameter (default: all) :arg filter: Clear filter caches :arg filter_cache: Clear filter caches :arg filter_keys: A comma-separated list of keys to clear when using the `filter_cache` parameter (default: all) :arg id: Clear ID caches for parent/child :arg id_cache: Clear ID caches for parent/child :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :arg ignore_indices: When performed on multiple indices, allows to ignore `missing` ones (default: none) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg index: A comma-separated list of index name to limit the operation :arg recycler: Clear the recycler cache
Clear either all caches or specific cached associated with one ore more indices. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-clearcache.html>`_
[ "Clear", "either", "all", "caches", "or", "specific", "cached", "associated", "with", "one", "ore", "more", "indices", ".", "<http", ":", "//", "www", ".", "elasticsearch", ".", "org", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", ...
def clear_cache(self, index=None, params=None): """ Clear either all caches or specific cached associated with one ore more indices. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-clearcache.html>`_ :arg index: A comma-separated list of index name to limit the operation :arg field_data: Clear field data :arg fielddata: Clear field data :arg fields: A comma-separated list of fields to clear when using the `field_data` parameter (default: all) :arg filter: Clear filter caches :arg filter_cache: Clear filter caches :arg filter_keys: A comma-separated list of keys to clear when using the `filter_cache` parameter (default: all) :arg id: Clear ID caches for parent/child :arg id_cache: Clear ID caches for parent/child :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :arg ignore_indices: When performed on multiple indices, allows to ignore `missing` ones (default: none) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg index: A comma-separated list of index name to limit the operation :arg recycler: Clear the recycler cache """ _, data = self.transport.perform_request('POST', _make_path(index, '_cache', 'clear'), params=params) return data
[ "def", "clear_cache", "(", "self", ",", "index", "=", "None", ",", "params", "=", "None", ")", ":", "_", ",", "data", "=", "self", ".", "transport", ".", "perform_request", "(", "'POST'", ",", "_make_path", "(", "index", ",", "'_cache'", ",", "'clear'"...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/elasticsearch/client/indices.py#L772-L802
selfteaching/selfteaching-python-camp
9982ee964b984595e7d664b07c389cddaf158f1e
exercises/1901100108/d07/mymodule/stats_word.py
python
stats_text
(text)
return stats_text_en(text) + stats_text_cn(text)
合并 英文词频 和 中文字频 的结果
合并 英文词频 和 中文字频 的结果
[ "合并", "英文词频", "和", "中文字频", "的结果" ]
def stats_text(text): ''' 合并 英文词频 和 中文字频 的结果 ''' return stats_text_en(text) + stats_text_cn(text)
[ "def", "stats_text", "(", "text", ")", ":", "return", "stats_text_en", "(", "text", ")", "+", "stats_text_cn", "(", "text", ")" ]
https://github.com/selfteaching/selfteaching-python-camp/blob/9982ee964b984595e7d664b07c389cddaf158f1e/exercises/1901100108/d07/mymodule/stats_word.py#L36-L40
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py
python
SQLiteLockFile.__init__
(self, path, threaded=True, timeout=None)
>>> lock = SQLiteLockFile('somefile') >>> lock = SQLiteLockFile('somefile', threaded=False)
>>> lock = SQLiteLockFile('somefile') >>> lock = SQLiteLockFile('somefile', threaded=False)
[ ">>>", "lock", "=", "SQLiteLockFile", "(", "somefile", ")", ">>>", "lock", "=", "SQLiteLockFile", "(", "somefile", "threaded", "=", "False", ")" ]
def __init__(self, path, threaded=True, timeout=None): """ >>> lock = SQLiteLockFile('somefile') >>> lock = SQLiteLockFile('somefile', threaded=False) """ LockBase.__init__(self, path, threaded, timeout) self.lock_file = unicode(self.lock_file) self.unique_name = unicode(self.unique_name) if SQLiteLockFile.testdb is None: import tempfile _fd, testdb = tempfile.mkstemp() os.close(_fd) os.unlink(testdb) del _fd, tempfile SQLiteLockFile.testdb = testdb import sqlite3 self.connection = sqlite3.connect(SQLiteLockFile.testdb) c = self.connection.cursor() try: c.execute("create table locks" "(" " lock_file varchar(32)," " unique_name varchar(32)" ")") except sqlite3.OperationalError: pass else: self.connection.commit() import atexit atexit.register(os.unlink, SQLiteLockFile.testdb)
[ "def", "__init__", "(", "self", ",", "path", ",", "threaded", "=", "True", ",", "timeout", "=", "None", ")", ":", "LockBase", ".", "__init__", "(", "self", ",", "path", ",", "threaded", ",", "timeout", ")", "self", ".", "lock_file", "=", "unicode", "...
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py#L19-L51
pypa/pip
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
src/pip/_vendor/rich/ansi.py
python
_ansi_tokenize
(ansi_text: str)
Tokenize a string in to plain text and ANSI codes. Args: ansi_text (str): A String containing ANSI codes. Yields: AnsiToken: A named tuple of (plain, sgr, osc)
Tokenize a string in to plain text and ANSI codes.
[ "Tokenize", "a", "string", "in", "to", "plain", "text", "and", "ANSI", "codes", "." ]
def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]: """Tokenize a string in to plain text and ANSI codes. Args: ansi_text (str): A String containing ANSI codes. Yields: AnsiToken: A named tuple of (plain, sgr, osc) """ def remove_csi(ansi_text: str) -> str: """Remove unknown CSI sequences.""" return re_csi.sub("", ansi_text) position = 0 for match in re_ansi.finditer(ansi_text): start, end = match.span(0) sgr, osc = match.groups() if start > position: yield _AnsiToken(remove_csi(ansi_text[position:start])) yield _AnsiToken("", sgr, osc) position = end if position < len(ansi_text): yield _AnsiToken(remove_csi(ansi_text[position:]))
[ "def", "_ansi_tokenize", "(", "ansi_text", ":", "str", ")", "->", "Iterable", "[", "_AnsiToken", "]", ":", "def", "remove_csi", "(", "ansi_text", ":", "str", ")", "->", "str", ":", "\"\"\"Remove unknown CSI sequences.\"\"\"", "return", "re_csi", ".", "sub", "(...
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_vendor/rich/ansi.py#L21-L44
apache/superset
3d829fc3c838358dd8c798ecaeefd34c502edca0
superset/connectors/sqla/views.py
python
TableModelView.edit
(self, pk: str)
return redirect("/superset/explore/table/{}/".format(pk))
Simple hack to redirect to explore view after saving
Simple hack to redirect to explore view after saving
[ "Simple", "hack", "to", "redirect", "to", "explore", "view", "after", "saving" ]
def edit(self, pk: str) -> FlaskResponse: """Simple hack to redirect to explore view after saving""" resp = super().edit(pk) if isinstance(resp, str): return resp return redirect("/superset/explore/table/{}/".format(pk))
[ "def", "edit", "(", "self", ",", "pk", ":", "str", ")", "->", "FlaskResponse", ":", "resp", "=", "super", "(", ")", ".", "edit", "(", "pk", ")", "if", "isinstance", "(", "resp", ",", "str", ")", ":", "return", "resp", "return", "redirect", "(", "...
https://github.com/apache/superset/blob/3d829fc3c838358dd8c798ecaeefd34c502edca0/superset/connectors/sqla/views.py#L557-L562
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/query/spans.py
python
Span.__init__
(self, start, end=None, startchar=None, endchar=None, boost=1.0)
[]
def __init__(self, start, end=None, startchar=None, endchar=None, boost=1.0): if end is None: end = start assert start <= end self.start = start self.end = end self.startchar = startchar self.endchar = endchar self.boost = boost
[ "def", "__init__", "(", "self", ",", "start", ",", "end", "=", "None", ",", "startchar", "=", "None", ",", "endchar", "=", "None", ",", "boost", "=", "1.0", ")", ":", "if", "end", "is", "None", ":", "end", "=", "start", "assert", "start", "<=", "...
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/query/spans.py#L56-L65
catalyst-team/catalyst
678dc06eda1848242df010b7f34adb572def2598
catalyst/contrib/losses/regression.py
python
_ce_with_logits
(logits, target)
return torch.sum(-target * torch.log_softmax(logits, -1), -1)
Returns cross entropy for giving logits
Returns cross entropy for giving logits
[ "Returns", "cross", "entropy", "for", "giving", "logits" ]
def _ce_with_logits(logits, target): """Returns cross entropy for giving logits""" return torch.sum(-target * torch.log_softmax(logits, -1), -1)
[ "def", "_ce_with_logits", "(", "logits", ",", "target", ")", ":", "return", "torch", ".", "sum", "(", "-", "target", "*", "torch", ".", "log_softmax", "(", "logits", ",", "-", "1", ")", ",", "-", "1", ")" ]
https://github.com/catalyst-team/catalyst/blob/678dc06eda1848242df010b7f34adb572def2598/catalyst/contrib/losses/regression.py#L6-L8
podgorniy/alfred-translate
752a7374b7b3a4ed1f01cbad096f36202e1df77f
src/translate.py
python
get_translation_suggestions
(input_string, spelling_suggestions, vocabulary_article)
return res
Returns XML with translate suggestions
Returns XML with translate suggestions
[ "Returns", "XML", "with", "translate", "suggestions" ]
def get_translation_suggestions(input_string, spelling_suggestions, vocabulary_article): """Returns XML with translate suggestions""" res = [] if len(spelling_suggestions) == 0 and len(vocabulary_article) == 0: return res if len(vocabulary_article['def']) != 0: for article in vocabulary_article['def']: for translation in article['tr']: if 'ts' in article.keys(): subtitle = article['ts'] elif 'ts' in translation.keys(): subtitle = translation['ts'] else: subtitle = '' res.append({ 'translation': translation['text'], 'transcription': subtitle, }) return res
[ "def", "get_translation_suggestions", "(", "input_string", ",", "spelling_suggestions", ",", "vocabulary_article", ")", ":", "res", "=", "[", "]", "if", "len", "(", "spelling_suggestions", ")", "==", "0", "and", "len", "(", "vocabulary_article", ")", "==", "0", ...
https://github.com/podgorniy/alfred-translate/blob/752a7374b7b3a4ed1f01cbad096f36202e1df77f/src/translate.py#L57-L77
yuzhoujr/leetcode
6a2ad1fc11225db18f68bfadd21a7419d2cb52a4
tree/EricD/111. Minimum Depth of Binary Tree - EricD.py
python
minDepth
(self, root)
:type root: TreeNode :rtype: int
:type root: TreeNode :rtype: int
[ ":", "type", "root", ":", "TreeNode", ":", "rtype", ":", "int" ]
def minDepth(self, root): """ :type root: TreeNode :rtype: int """ if root: if root.left is None and root.right is None: return 1 else: l = self.minDepth(root.left) if root.left else sys.maxint r = self.minDepth(root.right) if root.right else sys.maxint return min(l,r)+1 else: return 0
[ "def", "minDepth", "(", "self", ",", "root", ")", ":", "if", "root", ":", "if", "root", ".", "left", "is", "None", "and", "root", ".", "right", "is", "None", ":", "return", "1", "else", ":", "l", "=", "self", ".", "minDepth", "(", "root", ".", ...
https://github.com/yuzhoujr/leetcode/blob/6a2ad1fc11225db18f68bfadd21a7419d2cb52a4/tree/EricD/111. Minimum Depth of Binary Tree - EricD.py#L2-L16
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
src/components/sciutils.py
python
genLinesBwd
(offset, styledText, position)
r"""Generate styled lines backwards from the given position. "offset" is the position offset from the start of the source SciMoz buffer at which "styledText" begins. I.e. the whole buffer is not necessarily passed in. "styledText" is the styled text from which to extract. "position" is the position (NOT offset) from which to begin pulling out lines. Yields 2-tuples of the following form: (<scimoz position at start of line>, <line/style data of the line>)
r"""Generate styled lines backwards from the given position. "offset" is the position offset from the start of the source SciMoz buffer at which "styledText" begins. I.e. the whole buffer is not necessarily passed in. "styledText" is the styled text from which to extract. "position" is the position (NOT offset) from which to begin pulling out lines. Yields 2-tuples of the following form: (<scimoz position at start of line>, <line/style data of the line>)
[ "r", "Generate", "styled", "lines", "backwards", "from", "the", "given", "position", ".", "offset", "is", "the", "position", "offset", "from", "the", "start", "of", "the", "source", "SciMoz", "buffer", "at", "which", "styledText", "begins", ".", "I", ".", ...
def genLinesBwd(offset, styledText, position): r"""Generate styled lines backwards from the given position. "offset" is the position offset from the start of the source SciMoz buffer at which "styledText" begins. I.e. the whole buffer is not necessarily passed in. "styledText" is the styled text from which to extract. "position" is the position (NOT offset) from which to begin pulling out lines. Yields 2-tuples of the following form: (<scimoz position at start of line>, <line/style data of the line>) """ DEBUG = 0 if DEBUG: print _printBanner("genLinesBwd(offset=%d, styledText, position=%d)"\ % (offset, position)) _printBufferContext(offset, styledText, position) index = position - offset EOLCHARS = tuple("\r\n") # The first line is slightly special in that there is no EOL to scan # first. (If "position" is at the start of a line, then we return the # empty string as the first line.) i = index*2 - 2 end = index*2 while i >= 0 and styledText[i] not in EOLCHARS: i -= 2 start = i + 2 line = styledText[start:end] if DEBUG: print "yield:%d-%d:%s" % (start/2+offset, end/2+offset, _data2text(line)) yield (start/2+offset, line) while i >= 0: end = start # Scan over the EOL. if styledText[i] == '\n' and i-2 > 0 and styledText[i-2] == '\r': i -= 4 else: i -= 2 # Scan to start of line. while i >= 0 and styledText[i] not in EOLCHARS: i -= 2 start = i + 2 line = styledText[start:end] if DEBUG: print "yield:%d-%d:%s" % (start/2+offset, end/2+offset, _data2text(line)) yield (start/2+offset, line) if DEBUG: _printBanner("done")
[ "def", "genLinesBwd", "(", "offset", ",", "styledText", ",", "position", ")", ":", "DEBUG", "=", "0", "if", "DEBUG", ":", "print", "_printBanner", "(", "\"genLinesBwd(offset=%d, styledText, position=%d)\"", "%", "(", "offset", ",", "position", ")", ")", "_printB...
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/components/sciutils.py#L330-L381
geekan/scrapy-examples
edb1cb116bd6def65a6ef01f953b58eb43e54305
alexa_topsites/alexa_topsites/spiders/spider.py
python
alexa_topsitesSpider.parse_1
(self, response)
[]
def parse_1(self, response): info('Parse '+response.url) x = self.parse_with_rules(response, self.list_css_rules, dict) # x = self.parse_with_rules(response, self.content_css_rules, dict) print(json.dumps(x, ensure_ascii=False, indent=2))
[ "def", "parse_1", "(", "self", ",", "response", ")", ":", "info", "(", "'Parse '", "+", "response", ".", "url", ")", "x", "=", "self", ".", "parse_with_rules", "(", "response", ",", "self", ".", "list_css_rules", ",", "dict", ")", "# x = self.parse_with_ru...
https://github.com/geekan/scrapy-examples/blob/edb1cb116bd6def65a6ef01f953b58eb43e54305/alexa_topsites/alexa_topsites/spiders/spider.py#L47-L51
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/pip/commands/__init__.py
python
_sort_commands
(cmddict, order)
return sorted(cmddict.items(), key=keyfn)
[]
def _sort_commands(cmddict, order): def keyfn(key): try: return order.index(key[1]) except ValueError: # unordered items should come last return 0xff return sorted(cmddict.items(), key=keyfn)
[ "def", "_sort_commands", "(", "cmddict", ",", "order", ")", ":", "def", "keyfn", "(", "key", ")", ":", "try", ":", "return", "order", ".", "index", "(", "key", "[", "1", "]", ")", "except", "ValueError", ":", "# unordered items should come last", "return",...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/commands/__init__.py#L78-L86
hudson-and-thames/mlfinlab
79dcc7120ec84110578f75b025a75850eb72fc73
mlfinlab/codependence/information.py
python
get_optimal_number_of_bins
(num_obs: int, corr_coef: float = None)
Calculates optimal number of bins for discretization based on number of observations and correlation coefficient (univariate case). Algorithms used in this function were originally proposed in the works of Hacine-Gharbi et al. (2012) and Hacine-Gharbi and Ravier (2018). They are described in the Cornell lecture notes: https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3512994&download=yes (p.26) :param num_obs: (int) Number of observations. :param corr_coef: (float) Correlation coefficient, used to estimate the number of bins for univariate case. :return: (int) Optimal number of bins.
Calculates optimal number of bins for discretization based on number of observations and correlation coefficient (univariate case).
[ "Calculates", "optimal", "number", "of", "bins", "for", "discretization", "based", "on", "number", "of", "observations", "and", "correlation", "coefficient", "(", "univariate", "case", ")", "." ]
def get_optimal_number_of_bins(num_obs: int, corr_coef: float = None) -> int: """ Calculates optimal number of bins for discretization based on number of observations and correlation coefficient (univariate case). Algorithms used in this function were originally proposed in the works of Hacine-Gharbi et al. (2012) and Hacine-Gharbi and Ravier (2018). They are described in the Cornell lecture notes: https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3512994&download=yes (p.26) :param num_obs: (int) Number of observations. :param corr_coef: (float) Correlation coefficient, used to estimate the number of bins for univariate case. :return: (int) Optimal number of bins. """ pass
[ "def", "get_optimal_number_of_bins", "(", "num_obs", ":", "int", ",", "corr_coef", ":", "float", "=", "None", ")", "->", "int", ":", "pass" ]
https://github.com/hudson-and-thames/mlfinlab/blob/79dcc7120ec84110578f75b025a75850eb72fc73/mlfinlab/codependence/information.py#L12-L26
i3visio/osrframework
e02a6e9b1346ab5a01244c0d19bcec8232bf1a37
osrframework/domainfy.py
python
create_domains
(tlds, nicks=None, nicks_file=None)
return domain_candidates
Method that globally permits to generate the domains to be checked Args: tlds (list): List of tlds. nicks (list): List of aliases. nicks_file (str): The filepath to the aliases file. Returns: list: The list of domains to be checked.
Method that globally permits to generate the domains to be checked
[ "Method", "that", "globally", "permits", "to", "generate", "the", "domains", "to", "be", "checked" ]
def create_domains(tlds, nicks=None, nicks_file=None): """Method that globally permits to generate the domains to be checked Args: tlds (list): List of tlds. nicks (list): List of aliases. nicks_file (str): The filepath to the aliases file. Returns: list: The list of domains to be checked. """ domain_candidates = [] if nicks is not None: for nick in nicks: for tld in tlds: tmp = { "domain" : nick + tld["tld"], "type" : tld["type"], "tld": tld["tld"] } domain_candidates.append(tmp) elif nicks_file is not None: with open(nicks_file, "r") as file: nicks = file.read().splitlines() for nick in nicks: for tld in tlds: tmp = { "domain" : nick + tld["tld"], "type" : tld["type"], "tld": tld["tld"] } domain_candidates.append(tmp) return domain_candidates
[ "def", "create_domains", "(", "tlds", ",", "nicks", "=", "None", ",", "nicks_file", "=", "None", ")", ":", "domain_candidates", "=", "[", "]", "if", "nicks", "is", "not", "None", ":", "for", "nick", "in", "nicks", ":", "for", "tld", "in", "tlds", ":"...
https://github.com/i3visio/osrframework/blob/e02a6e9b1346ab5a01244c0d19bcec8232bf1a37/osrframework/domainfy.py#L207-L239
eliben/pyelftools
8f7a0becaface09435c4374947548b7851e3d1a2
elftools/dwarf/callframe.py
python
CFIEntry._decode_CFI_table
(self)
return DecodedCallFrameTable(table=table, reg_order=reg_order)
Decode the instructions contained in the given CFI entry and return a DecodedCallFrameTable.
Decode the instructions contained in the given CFI entry and return a DecodedCallFrameTable.
[ "Decode", "the", "instructions", "contained", "in", "the", "given", "CFI", "entry", "and", "return", "a", "DecodedCallFrameTable", "." ]
def _decode_CFI_table(self): """ Decode the instructions contained in the given CFI entry and return a DecodedCallFrameTable. """ if isinstance(self, CIE): # For a CIE, initialize cur_line to an "empty" line cie = self cur_line = dict(pc=0, cfa=CFARule(reg=None, offset=0)) reg_order = [] else: # FDE # For a FDE, we need to decode the attached CIE first, because its # decoded table is needed. Its "initial instructions" describe a # line that serves as the base (first) line in the FDE's table. cie = self.cie cie_decoded_table = cie.get_decoded() if len(cie_decoded_table.table) > 0: last_line_in_CIE = copy.copy(cie_decoded_table.table[-1]) cur_line = copy.copy(last_line_in_CIE) else: cur_line = dict(cfa=CFARule(reg=None, offset=0)) cur_line['pc'] = self['initial_location'] reg_order = copy.copy(cie_decoded_table.reg_order) table = [] # Keeps a stack for the use of DW_CFA_{remember|restore}_state # instructions. line_stack = [] def _add_to_order(regnum): # DW_CFA_restore and others remove registers from cur_line, # but they stay in reg_order. Avoid duplicates. if regnum not in reg_order: reg_order.append(regnum) for instr in self.instructions: # Throughout this loop, cur_line is the current line. Some # instructions add it to the table, but most instructions just # update it without adding it to the table. name = instruction_name(instr.opcode) if name == 'DW_CFA_set_loc': table.append(copy.copy(cur_line)) cur_line['pc'] = instr.args[0] elif name in ( 'DW_CFA_advance_loc1', 'DW_CFA_advance_loc2', 'DW_CFA_advance_loc4', 'DW_CFA_advance_loc'): table.append(copy.copy(cur_line)) cur_line['pc'] += instr.args[0] * cie['code_alignment_factor'] elif name == 'DW_CFA_def_cfa': cur_line['cfa'] = CFARule( reg=instr.args[0], offset=instr.args[1]) elif name == 'DW_CFA_def_cfa_sf': cur_line['cfa'] = CFARule( reg=instr.args[0], offset=instr.args[1] * cie['code_alignment_factor']) elif name == 'DW_CFA_def_cfa_register': cur_line['cfa'] = CFARule( reg=instr.args[0], offset=cur_line['cfa'].offset) elif name == 'DW_CFA_def_cfa_offset': cur_line['cfa'] = CFARule( reg=cur_line['cfa'].reg, offset=instr.args[0]) elif name == 'DW_CFA_def_cfa_expression': cur_line['cfa'] = CFARule(expr=instr.args[0]) elif name == 'DW_CFA_undefined': _add_to_order(instr.args[0]) cur_line[instr.args[0]] = RegisterRule(RegisterRule.UNDEFINED) elif name == 'DW_CFA_same_value': _add_to_order(instr.args[0]) cur_line[instr.args[0]] = RegisterRule(RegisterRule.SAME_VALUE) elif name in ( 'DW_CFA_offset', 'DW_CFA_offset_extended', 'DW_CFA_offset_extended_sf'): _add_to_order(instr.args[0]) cur_line[instr.args[0]] = RegisterRule( RegisterRule.OFFSET, instr.args[1] * cie['data_alignment_factor']) elif name in ('DW_CFA_val_offset', 'DW_CFA_val_offset_sf'): _add_to_order(instr.args[0]) cur_line[instr.args[0]] = RegisterRule( RegisterRule.VAL_OFFSET, instr.args[1] * cie['data_alignment_factor']) elif name == 'DW_CFA_register': _add_to_order(instr.args[0]) cur_line[instr.args[0]] = RegisterRule( RegisterRule.REGISTER, instr.args[1]) elif name == 'DW_CFA_expression': _add_to_order(instr.args[0]) cur_line[instr.args[0]] = RegisterRule( RegisterRule.EXPRESSION, instr.args[1]) elif name == 'DW_CFA_val_expression': _add_to_order(instr.args[0]) cur_line[instr.args[0]] = RegisterRule( RegisterRule.VAL_EXPRESSION, instr.args[1]) elif name in ('DW_CFA_restore', 'DW_CFA_restore_extended'): _add_to_order(instr.args[0]) dwarf_assert( isinstance(self, FDE), '%s instruction must be in a FDE' % name) if instr.args[0] in last_line_in_CIE: cur_line[instr.args[0]] = last_line_in_CIE[instr.args[0]] else: cur_line.pop(instr.args[0], None) elif name == 'DW_CFA_remember_state': line_stack.append(copy.deepcopy(cur_line)) elif name == 'DW_CFA_restore_state': pc = cur_line['pc'] cur_line = line_stack.pop() cur_line['pc'] = pc # The current line is appended to the table after all instructions # have ended, if there were instructions. if cur_line['cfa'].reg is not None or len(cur_line) > 2: table.append(cur_line) return DecodedCallFrameTable(table=table, reg_order=reg_order)
[ "def", "_decode_CFI_table", "(", "self", ")", ":", "if", "isinstance", "(", "self", ",", "CIE", ")", ":", "# For a CIE, initialize cur_line to an \"empty\" line", "cie", "=", "self", "cur_line", "=", "dict", "(", "pc", "=", "0", ",", "cfa", "=", "CFARule", "...
https://github.com/eliben/pyelftools/blob/8f7a0becaface09435c4374947548b7851e3d1a2/elftools/dwarf/callframe.py#L505-L625
hyperledger/aries-cloudagent-python
2f36776e99f6053ae92eed8123b5b1b2e891c02a
aries_cloudagent/messaging/models/base.py
python
BaseModel.validate
(self, unknown: str = None)
return self
Validate a constructed model.
Validate a constructed model.
[ "Validate", "a", "constructed", "model", "." ]
def validate(self, unknown: str = None): """Validate a constructed model.""" schema = self.Schema(unknown=unknown) errors = schema.validate(self.serialize()) if errors: raise ValidationError(errors) return self
[ "def", "validate", "(", "self", ",", "unknown", ":", "str", "=", "None", ")", ":", "schema", "=", "self", ".", "Schema", "(", "unknown", "=", "unknown", ")", "errors", "=", "schema", ".", "validate", "(", "self", ".", "serialize", "(", ")", ")", "i...
https://github.com/hyperledger/aries-cloudagent-python/blob/2f36776e99f6053ae92eed8123b5b1b2e891c02a/aries_cloudagent/messaging/models/base.py#L182-L188
skyfielders/python-skyfield
0e68757a5c1081f784c58fd7a76635c6deb98451
skyfield/toposlib.py
python
GeographicPosition.lst_hours_at
(self, t)
return (t.gast + self.longitude._hours + sprime / 54000.0) % 24.0
Return the Local Apparent Sidereal Time, in hours, at time ``t``. This location’s Local Apparent Sidereal Time (LAST) is the right ascension of the zenith at the time ``t``, as measured against the “true” Earth equator and equinox (rather than the fictional “mean” equator and equinox, which ignore the Earth’s nutation).
Return the Local Apparent Sidereal Time, in hours, at time ``t``.
[ "Return", "the", "Local", "Apparent", "Sidereal", "Time", "in", "hours", "at", "time", "t", "." ]
def lst_hours_at(self, t): """Return the Local Apparent Sidereal Time, in hours, at time ``t``. This location’s Local Apparent Sidereal Time (LAST) is the right ascension of the zenith at the time ``t``, as measured against the “true” Earth equator and equinox (rather than the fictional “mean” equator and equinox, which ignore the Earth’s nutation). """ sprime = -47.0e-6 * (t.whole - T0 + t.tdb_fraction) / 36525.0 return (t.gast + self.longitude._hours + sprime / 54000.0) % 24.0
[ "def", "lst_hours_at", "(", "self", ",", "t", ")", ":", "sprime", "=", "-", "47.0e-6", "*", "(", "t", ".", "whole", "-", "T0", "+", "t", ".", "tdb_fraction", ")", "/", "36525.0", "return", "(", "t", ".", "gast", "+", "self", ".", "longitude", "."...
https://github.com/skyfielders/python-skyfield/blob/0e68757a5c1081f784c58fd7a76635c6deb98451/skyfield/toposlib.py#L87-L97
maas/maas
db2f89970c640758a51247c59bf1ec6f60cf4ab5
src/maasserver/models/filesystem.py
python
Filesystem.get_physical_block_devices
(self)
return devices
Return PhysicalBlockDevices backing the filesystem.
Return PhysicalBlockDevices backing the filesystem.
[ "Return", "PhysicalBlockDevices", "backing", "the", "filesystem", "." ]
def get_physical_block_devices(self): """Return PhysicalBlockDevices backing the filesystem.""" from maasserver.models.virtualblockdevice import VirtualBlockDevice devices = [] parent = self.get_parent() if isinstance(parent, PhysicalBlockDevice): devices.append(parent) elif isinstance(parent, VirtualBlockDevice): for grandparent in parent.get_parents(): if isinstance(grandparent, Partition): grandparent = grandparent.partition_table.block_device device = grandparent.actual_instance if isinstance(device, PhysicalBlockDevice): devices.append(device) return devices
[ "def", "get_physical_block_devices", "(", "self", ")", ":", "from", "maasserver", ".", "models", ".", "virtualblockdevice", "import", "VirtualBlockDevice", "devices", "=", "[", "]", "parent", "=", "self", ".", "get_parent", "(", ")", "if", "isinstance", "(", "...
https://github.com/maas/maas/blob/db2f89970c640758a51247c59bf1ec6f60cf4ab5/src/maasserver/models/filesystem.py#L172-L187
materialsproject/pymatgen
8128f3062a334a2edd240e4062b5b9bdd1ae6f58
pymatgen/analysis/phase_diagram.py
python
PhaseDiagram.get_all_chempots
(self, comp)
return chempots
Get chemical potentials at a given compositon. Args: comp (Composition): Composition Returns: Chemical potentials.
Get chemical potentials at a given compositon.
[ "Get", "chemical", "potentials", "at", "a", "given", "compositon", "." ]
def get_all_chempots(self, comp): """ Get chemical potentials at a given compositon. Args: comp (Composition): Composition Returns: Chemical potentials. """ all_facets = self._get_all_facets_and_simplexes(comp) chempots = {} for facet in all_facets: facet_name = "-".join([self.qhull_entries[j].name for j in facet]) chempots[facet_name] = self._get_facet_chempots(facet) return chempots
[ "def", "get_all_chempots", "(", "self", ",", "comp", ")", ":", "all_facets", "=", "self", ".", "_get_all_facets_and_simplexes", "(", "comp", ")", "chempots", "=", "{", "}", "for", "facet", "in", "all_facets", ":", "facet_name", "=", "\"-\"", ".", "join", "...
https://github.com/materialsproject/pymatgen/blob/8128f3062a334a2edd240e4062b5b9bdd1ae6f58/pymatgen/analysis/phase_diagram.py#L897-L914
asciidisco/plugin.video.netflix
ceb2638a9676f5839250dadfd079b9e4e4bdd759
resources/lib/NetflixSession.py
python
NetflixSession.parse_video_list_ids_entry
(self, id, entry)
return { id: { 'id': id, 'index': entry['index'], 'name': entry['context'], 'displayName': entry['displayName'], 'size': entry['length'] } }
Parse a video id entry e.g. rip out the parts we need Parameters ---------- response_data : :obj:`dict` of :obj:`str` Dictionary entry from the ´fetch_video_list_ids´ call Returns ------- id : :obj:`str` Unique id of the video list entry : :obj:`dict` of :obj:`str` Video list entry in the format: "3589e2c6-ca3b-48b4-a72d-34f2c09ffbf4_11568382": { "displayName": "Passend zu Family Guy", "id": "3589e2c6-ca3b-48b4-a72d-34f2c09ffbf4_11568382", "index": 18, "name": "similars", "size": 33 }
Parse a video id entry e.g. rip out the parts we need
[ "Parse", "a", "video", "id", "entry", "e", ".", "g", ".", "rip", "out", "the", "parts", "we", "need" ]
def parse_video_list_ids_entry(self, id, entry): """Parse a video id entry e.g. rip out the parts we need Parameters ---------- response_data : :obj:`dict` of :obj:`str` Dictionary entry from the ´fetch_video_list_ids´ call Returns ------- id : :obj:`str` Unique id of the video list entry : :obj:`dict` of :obj:`str` Video list entry in the format: "3589e2c6-ca3b-48b4-a72d-34f2c09ffbf4_11568382": { "displayName": "Passend zu Family Guy", "id": "3589e2c6-ca3b-48b4-a72d-34f2c09ffbf4_11568382", "index": 18, "name": "similars", "size": 33 } """ return { id: { 'id': id, 'index': entry['index'], 'name': entry['context'], 'displayName': entry['displayName'], 'size': entry['length'] } }
[ "def", "parse_video_list_ids_entry", "(", "self", ",", "id", ",", "entry", ")", ":", "return", "{", "id", ":", "{", "'id'", ":", "id", ",", "'index'", ":", "entry", "[", "'index'", "]", ",", "'name'", ":", "entry", "[", "'context'", "]", ",", "'displ...
https://github.com/asciidisco/plugin.video.netflix/blob/ceb2638a9676f5839250dadfd079b9e4e4bdd759/resources/lib/NetflixSession.py#L580-L612
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/lib-tk/Tix.py
python
Tk.destroy
(self)
[]
def destroy(self): # For safety, remove an delete_window binding before destroy self.protocol("WM_DELETE_WINDOW", "") Tkinter.Tk.destroy(self)
[ "def", "destroy", "(", "self", ")", ":", "# For safety, remove an delete_window binding before destroy", "self", ".", "protocol", "(", "\"WM_DELETE_WINDOW\"", ",", "\"\"", ")", "Tkinter", ".", "Tk", ".", "destroy", "(", "self", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/lib-tk/Tix.py#L223-L226
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-build/python-libs/gdata/src/gdata/spreadsheet/service.py
python
SpreadsheetsService.GetListFeed
(self, key, wksht_id='default', row_id=None, query=None, visibility='private', projection='full')
Gets a list feed or a specific entry if a row_id is defined Args: key: string The spreadsheet key defined in /ccc?key= wksht_id: string The id for a specific worksheet entry row_id: string (optional) The row_id of a row in the list query: DocumentQuery (optional) Query parameters Returns: If there is no row_id, then a SpreadsheetsListFeed. If there is a row_id, then a SpreadsheetsList.
Gets a list feed or a specific entry if a row_id is defined Args: key: string The spreadsheet key defined in /ccc?key= wksht_id: string The id for a specific worksheet entry row_id: string (optional) The row_id of a row in the list query: DocumentQuery (optional) Query parameters Returns: If there is no row_id, then a SpreadsheetsListFeed. If there is a row_id, then a SpreadsheetsList.
[ "Gets", "a", "list", "feed", "or", "a", "specific", "entry", "if", "a", "row_id", "is", "defined", "Args", ":", "key", ":", "string", "The", "spreadsheet", "key", "defined", "in", "/", "ccc?key", "=", "wksht_id", ":", "string", "The", "id", "for", "a",...
def GetListFeed(self, key, wksht_id='default', row_id=None, query=None, visibility='private', projection='full'): """Gets a list feed or a specific entry if a row_id is defined Args: key: string The spreadsheet key defined in /ccc?key= wksht_id: string The id for a specific worksheet entry row_id: string (optional) The row_id of a row in the list query: DocumentQuery (optional) Query parameters Returns: If there is no row_id, then a SpreadsheetsListFeed. If there is a row_id, then a SpreadsheetsList. """ uri = ('http://%s/feeds/list/%s/%s/%s/%s' % (self.server, key, wksht_id, visibility, projection)) if row_id is not None: uri = '%s/%s' % (uri, row_id) if query is not None: query.feed = uri uri = query.ToUri() if row_id: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsListFromString) else: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsListFeedFromString)
[ "def", "GetListFeed", "(", "self", ",", "key", ",", "wksht_id", "=", "'default'", ",", "row_id", "=", "None", ",", "query", "=", "None", ",", "visibility", "=", "'private'", ",", "projection", "=", "'full'", ")", ":", "uri", "=", "(", "'http://%s/feeds/l...
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-build/python-libs/gdata/src/gdata/spreadsheet/service.py#L220-L249
brightmart/xlnet_zh
75e39630ec0856509cde45aa0505195e32564466
src/run_classifier.py
python
ImdbProcessor.get_dev_examples
(self, data_dir)
return self._create_examples(os.path.join(data_dir, "test"))
[]
def get_dev_examples(self, data_dir): return self._create_examples(os.path.join(data_dir, "test"))
[ "def", "get_dev_examples", "(", "self", ",", "data_dir", ")", ":", "return", "self", ".", "_create_examples", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"test\"", ")", ")" ]
https://github.com/brightmart/xlnet_zh/blob/75e39630ec0856509cde45aa0505195e32564466/src/run_classifier.py#L307-L308
elfi-dev/elfi
07ac0ed5e81d5d5fb42de63db3cf9ccc9135b88c
elfi/methods/inference/romc.py
python
ROMC._filter_solutions
(self, eps_filter)
Filter out the solutions over eps threshold. Parameters ---------- eps_filter: float the threshold for filtering out solutions
Filter out the solutions over eps threshold.
[ "Filter", "out", "the", "solutions", "over", "eps", "threshold", "." ]
def _filter_solutions(self, eps_filter): """Filter out the solutions over eps threshold. Parameters ---------- eps_filter: float the threshold for filtering out solutions """ # checks assert self.inference_state["_has_solved_problems"] # getters n1 = self.inference_args["N1"] solved = self.inference_state["solved"] optim_problems = self.optim_problems accepted = [] for i in range(n1): if solved[i] and (optim_problems[i].result.f_min < eps_filter): accepted.append(True) else: accepted.append(False) # update status self.inference_args["eps_filter"] = eps_filter self.inference_state["accepted"] = accepted self.inference_state["_has_filtered_solutions"] = True
[ "def", "_filter_solutions", "(", "self", ",", "eps_filter", ")", ":", "# checks", "assert", "self", ".", "inference_state", "[", "\"_has_solved_problems\"", "]", "# getters", "n1", "=", "self", ".", "inference_args", "[", "\"N1\"", "]", "solved", "=", "self", ...
https://github.com/elfi-dev/elfi/blob/07ac0ed5e81d5d5fb42de63db3cf9ccc9135b88c/elfi/methods/inference/romc.py#L726-L753
google-research/tensorflow_constrained_optimization
723d63f8567aaa988c4ce4761152beee2b462e1d
tensorflow_constrained_optimization/python/rates/loss.py
python
HingeLoss.__init__
(self, margin=1.0)
Creates a new HingeLoss object with the given margin. The margin determines how far a prediction must be from the decision boundary in order for it to be penalized. When the margin is zero, this threshold is exactly the decision boundary. When the margin is at least one, the hinge loss upper bounds the zero-one loss. Args: margin: non-negative float, the margin of the hinge loss. Defaults to 1. Raises: ValueError: if the margin is negative.
Creates a new HingeLoss object with the given margin.
[ "Creates", "a", "new", "HingeLoss", "object", "with", "the", "given", "margin", "." ]
def __init__(self, margin=1.0): """Creates a new HingeLoss object with the given margin. The margin determines how far a prediction must be from the decision boundary in order for it to be penalized. When the margin is zero, this threshold is exactly the decision boundary. When the margin is at least one, the hinge loss upper bounds the zero-one loss. Args: margin: non-negative float, the margin of the hinge loss. Defaults to 1. Raises: ValueError: if the margin is negative. """ super(HingeLoss, self).__init__() self._margin = float(margin) if margin < 0.0: raise ValueError("margin must be non-negative")
[ "def", "__init__", "(", "self", ",", "margin", "=", "1.0", ")", ":", "super", "(", "HingeLoss", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "_margin", "=", "float", "(", "margin", ")", "if", "margin", "<", "0.0", ":", "raise", "ValueE...
https://github.com/google-research/tensorflow_constrained_optimization/blob/723d63f8567aaa988c4ce4761152beee2b462e1d/tensorflow_constrained_optimization/python/rates/loss.py#L358-L376
Epistimio/orion
732e739d99561020dbe620760acf062ade746006
src/orion/algo/asha.py
python
ASHABracket.is_filled
(self)
return False
ASHA's first rung can always sample new trials
ASHA's first rung can always sample new trials
[ "ASHA", "s", "first", "rung", "can", "always", "sample", "new", "trials" ]
def is_filled(self): """ASHA's first rung can always sample new trials""" return False
[ "def", "is_filled", "(", "self", ")", ":", "return", "False" ]
https://github.com/Epistimio/orion/blob/732e739d99561020dbe620760acf062ade746006/src/orion/algo/asha.py#L256-L258
minerllabs/minerl
0123527c334c96ebb3f0cf313df1552fa4302691
minerl/herobraine/hero/handlers/agent/observations/location_stats.py
python
_ZPositionObservation.__init__
(self)
[]
def __init__(self): super().__init__(key_list=['zpos'], space=spaces.Box(low=-640000.0, high=640000.0, shape=(), dtype=np.float), default_if_missing=0.0)
[ "def", "__init__", "(", "self", ")", ":", "super", "(", ")", ".", "__init__", "(", "key_list", "=", "[", "'zpos'", "]", ",", "space", "=", "spaces", ".", "Box", "(", "low", "=", "-", "640000.0", ",", "high", "=", "640000.0", ",", "shape", "=", "(...
https://github.com/minerllabs/minerl/blob/0123527c334c96ebb3f0cf313df1552fa4302691/minerl/herobraine/hero/handlers/agent/observations/location_stats.py#L98-L100
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/combinat/finite_state_machine.py
python
FiniteStateMachine.prepone_output
(self)
For all paths, shift the output of the path from one transition to the earliest possible preceding transition of the path. INPUT: Nothing. OUTPUT: Nothing. Apply the following to each state `s` (except initial states) of the finite state machine as often as possible: If the letter `a` is a prefix of the output label of all transitions from `s` (including the final output of `s`), then remove it from all these labels and append it to all output labels of all transitions leading to `s`. We assume that the states have no output labels, but final outputs are allowed. EXAMPLES:: sage: A = Transducer([('A', 'B', 1, 1), ....: ('B', 'B', 0, 0), ....: ('B', 'C', 1, 0)], ....: initial_states=['A'], ....: final_states=['C']) sage: A.prepone_output() sage: A.transitions() [Transition from 'A' to 'B': 1|1,0, Transition from 'B' to 'B': 0|0, Transition from 'B' to 'C': 1|-] :: sage: B = Transducer([('A', 'B', 0, 1), ....: ('B', 'C', 1, [1, 1]), ....: ('B', 'C', 0, 1)], ....: initial_states=['A'], ....: final_states=['C']) sage: B.prepone_output() sage: B.transitions() [Transition from 'A' to 'B': 0|1,1, Transition from 'B' to 'C': 1|1, Transition from 'B' to 'C': 0|-] If initial states are not labeled as such, unexpected results may be obtained:: sage: C = Transducer([(0,1,0,0)]) sage: C.prepone_output() verbose 0 (...: finite_state_machine.py, prepone_output) All transitions leaving state 0 have an output label with prefix 0. However, there is no inbound transition and it is not an initial state. This routine (possibly called by simplification) therefore erased this prefix from all outbound transitions. sage: C.transitions() [Transition from 0 to 1: 0|-] Also the final output of final states can be changed:: sage: T = Transducer([('A', 'B', 0, 1), ....: ('B', 'C', 1, [1, 1]), ....: ('B', 'C', 0, 1)], ....: initial_states=['A'], ....: final_states=['B']) sage: T.state('B').final_word_out = [1] sage: T.prepone_output() sage: T.transitions() [Transition from 'A' to 'B': 0|1,1, Transition from 'B' to 'C': 1|1, Transition from 'B' to 'C': 0|-] sage: T.state('B').final_word_out [] :: sage: S = Transducer([('A', 'B', 0, 1), ....: ('B', 'C', 1, [1, 1]), ....: ('B', 'C', 0, 1)], ....: initial_states=['A'], ....: final_states=['B']) sage: S.state('B').final_word_out = [0] sage: S.prepone_output() sage: S.transitions() [Transition from 'A' to 'B': 0|1, Transition from 'B' to 'C': 1|1,1, Transition from 'B' to 'C': 0|1] sage: S.state('B').final_word_out [0] Output labels do not have to be hashable:: sage: C = Transducer([(0, 1, 0, []), ....: (1, 0, 0, [vector([0, 0]), 0]), ....: (1, 1, 1, [vector([0, 0]), 1]), ....: (0, 0, 1, 0)], ....: determine_alphabets=False, ....: initial_states=[0]) sage: C.prepone_output() sage: sorted(C.transitions()) [Transition from 0 to 1: 0|(0, 0), Transition from 0 to 0: 1|0, Transition from 1 to 0: 0|0, Transition from 1 to 1: 1|1,(0, 0)]
For all paths, shift the output of the path from one transition to the earliest possible preceding transition of the path.
[ "For", "all", "paths", "shift", "the", "output", "of", "the", "path", "from", "one", "transition", "to", "the", "earliest", "possible", "preceding", "transition", "of", "the", "path", "." ]
def prepone_output(self): """ For all paths, shift the output of the path from one transition to the earliest possible preceding transition of the path. INPUT: Nothing. OUTPUT: Nothing. Apply the following to each state `s` (except initial states) of the finite state machine as often as possible: If the letter `a` is a prefix of the output label of all transitions from `s` (including the final output of `s`), then remove it from all these labels and append it to all output labels of all transitions leading to `s`. We assume that the states have no output labels, but final outputs are allowed. EXAMPLES:: sage: A = Transducer([('A', 'B', 1, 1), ....: ('B', 'B', 0, 0), ....: ('B', 'C', 1, 0)], ....: initial_states=['A'], ....: final_states=['C']) sage: A.prepone_output() sage: A.transitions() [Transition from 'A' to 'B': 1|1,0, Transition from 'B' to 'B': 0|0, Transition from 'B' to 'C': 1|-] :: sage: B = Transducer([('A', 'B', 0, 1), ....: ('B', 'C', 1, [1, 1]), ....: ('B', 'C', 0, 1)], ....: initial_states=['A'], ....: final_states=['C']) sage: B.prepone_output() sage: B.transitions() [Transition from 'A' to 'B': 0|1,1, Transition from 'B' to 'C': 1|1, Transition from 'B' to 'C': 0|-] If initial states are not labeled as such, unexpected results may be obtained:: sage: C = Transducer([(0,1,0,0)]) sage: C.prepone_output() verbose 0 (...: finite_state_machine.py, prepone_output) All transitions leaving state 0 have an output label with prefix 0. However, there is no inbound transition and it is not an initial state. This routine (possibly called by simplification) therefore erased this prefix from all outbound transitions. sage: C.transitions() [Transition from 0 to 1: 0|-] Also the final output of final states can be changed:: sage: T = Transducer([('A', 'B', 0, 1), ....: ('B', 'C', 1, [1, 1]), ....: ('B', 'C', 0, 1)], ....: initial_states=['A'], ....: final_states=['B']) sage: T.state('B').final_word_out = [1] sage: T.prepone_output() sage: T.transitions() [Transition from 'A' to 'B': 0|1,1, Transition from 'B' to 'C': 1|1, Transition from 'B' to 'C': 0|-] sage: T.state('B').final_word_out [] :: sage: S = Transducer([('A', 'B', 0, 1), ....: ('B', 'C', 1, [1, 1]), ....: ('B', 'C', 0, 1)], ....: initial_states=['A'], ....: final_states=['B']) sage: S.state('B').final_word_out = [0] sage: S.prepone_output() sage: S.transitions() [Transition from 'A' to 'B': 0|1, Transition from 'B' to 'C': 1|1,1, Transition from 'B' to 'C': 0|1] sage: S.state('B').final_word_out [0] Output labels do not have to be hashable:: sage: C = Transducer([(0, 1, 0, []), ....: (1, 0, 0, [vector([0, 0]), 0]), ....: (1, 1, 1, [vector([0, 0]), 1]), ....: (0, 0, 1, 0)], ....: determine_alphabets=False, ....: initial_states=[0]) sage: C.prepone_output() sage: sorted(C.transitions()) [Transition from 0 to 1: 0|(0, 0), Transition from 0 to 0: 1|0, Transition from 1 to 0: 0|0, Transition from 1 to 1: 1|1,(0, 0)] """ def find_common_output(state): if (any(transition for transition in self.transitions(state) if not transition.word_out) or state.is_final and not state.final_word_out): return tuple() first_letters = [transition.word_out[0] for transition in self.transitions(state)] if state.is_final: first_letters = first_letters + [state.final_word_out[0]] if not first_letters: return tuple() first_item = first_letters.pop() if all(item == first_item for item in first_letters): return (first_item,) return tuple() changed = 1 iteration = 0 while changed > 0: changed = 0 iteration += 1 for state in self.iter_states(): if state.is_initial: continue if state.word_out: raise NotImplementedError( "prepone_output assumes that all states have " "empty output word, but state %s has output " "word %s" % (state, state.word_out)) common_output = find_common_output(state) if common_output: changed += 1 if state.is_final: assert state.final_word_out[0] == common_output[0] state.final_word_out = state.final_word_out[1:] for transition in self.transitions(state): assert transition.word_out[0] == common_output[0] transition.word_out = transition.word_out[1:] found_inbound_transition = False for transition in self.iter_transitions(): if transition.to_state == state: transition.word_out = transition.word_out \ + [common_output[0]] found_inbound_transition = True if not found_inbound_transition: verbose( "All transitions leaving state %s have an " "output label with prefix %s. However, " "there is no inbound transition and it is " "not an initial state. This routine " "(possibly called by simplification) " "therefore erased this prefix from all " "outbound transitions." % (state, common_output[0]), level=0)
[ "def", "prepone_output", "(", "self", ")", ":", "def", "find_common_output", "(", "state", ")", ":", "if", "(", "any", "(", "transition", "for", "transition", "in", "self", ".", "transitions", "(", "state", ")", "if", "not", "transition", ".", "word_out", ...
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/finite_state_machine.py#L8845-L9011
lazylibrarian/LazyLibrarian
ae3c14e9db9328ce81765e094ab2a14ed7155624
lib/pythontwitter/__init__.py
python
User.GetStatus
(self)
return self._status
Get the latest twitter.Status of this user. Returns: The latest twitter.Status of this user
Get the latest twitter.Status of this user.
[ "Get", "the", "latest", "twitter", ".", "Status", "of", "this", "user", "." ]
def GetStatus(self): '''Get the latest twitter.Status of this user. Returns: The latest twitter.Status of this user ''' return self._status
[ "def", "GetStatus", "(", "self", ")", ":", "return", "self", ".", "_status" ]
https://github.com/lazylibrarian/LazyLibrarian/blob/ae3c14e9db9328ce81765e094ab2a14ed7155624/lib/pythontwitter/__init__.py#L1127-L1133
ustayready/CredKing
68b612e4cdf01d2b65b14ab2869bb8a5531056ee
plugins/gmail/lxml/html/diff.py
python
cleanup_html
(html)
return html
This 'cleans' the HTML, meaning that any page structure is removed (only the contents of <body> are used, if there is any <body). Also <ins> and <del> tags are removed.
This 'cleans' the HTML, meaning that any page structure is removed (only the contents of <body> are used, if there is any <body). Also <ins> and <del> tags are removed.
[ "This", "cleans", "the", "HTML", "meaning", "that", "any", "page", "structure", "is", "removed", "(", "only", "the", "contents", "of", "<body", ">", "are", "used", "if", "there", "is", "any", "<body", ")", ".", "Also", "<ins", ">", "and", "<del", ">", ...
def cleanup_html(html): """ This 'cleans' the HTML, meaning that any page structure is removed (only the contents of <body> are used, if there is any <body). Also <ins> and <del> tags are removed. """ match = _body_re.search(html) if match: html = html[match.end():] match = _end_body_re.search(html) if match: html = html[:match.start()] html = _ins_del_re.sub('', html) return html
[ "def", "cleanup_html", "(", "html", ")", ":", "match", "=", "_body_re", ".", "search", "(", "html", ")", "if", "match", ":", "html", "=", "html", "[", "match", ".", "end", "(", ")", ":", "]", "match", "=", "_end_body_re", ".", "search", "(", "html"...
https://github.com/ustayready/CredKing/blob/68b612e4cdf01d2b65b14ab2869bb8a5531056ee/plugins/gmail/lxml/html/diff.py#L557-L568
Ulauncher/Ulauncher
44735d8d4c2b4b8a8d28f95ab6a6c6d577c859e5
ulauncher/utils/desktop/reader.py
python
filter_app
(app, disable_desktop_filters=False)
return app and app.get_string('Name') and app.get_string('Type') == 'Application' \ and (app.get_show_in() or disable_desktop_filters) and not app.get_nodisplay() and not app.get_is_hidden()
:param Gio.DesktopAppInfo app: :returns: True if app can be added to the database
:param Gio.DesktopAppInfo app: :returns: True if app can be added to the database
[ ":", "param", "Gio", ".", "DesktopAppInfo", "app", ":", ":", "returns", ":", "True", "if", "app", "can", "be", "added", "to", "the", "database" ]
def filter_app(app, disable_desktop_filters=False): """ :param Gio.DesktopAppInfo app: :returns: True if app can be added to the database """ return app and app.get_string('Name') and app.get_string('Type') == 'Application' \ and (app.get_show_in() or disable_desktop_filters) and not app.get_nodisplay() and not app.get_is_hidden()
[ "def", "filter_app", "(", "app", ",", "disable_desktop_filters", "=", "False", ")", ":", "return", "app", "and", "app", ".", "get_string", "(", "'Name'", ")", "and", "app", ".", "get_string", "(", "'Type'", ")", "==", "'Application'", "and", "(", "app", ...
https://github.com/Ulauncher/Ulauncher/blob/44735d8d4c2b4b8a8d28f95ab6a6c6d577c859e5/ulauncher/utils/desktop/reader.py#L57-L63
aianaconda/TensorFlow_Engineering_Implementation
cb787e359da9ac5a08d00cd2458fecb4cb5a3a31
tf2code/Chapter8/code8.4/Code8-9/backend.py
python
moving_average_update
(x, value, momentum)
return moving_averages.assign_moving_average( x, value, momentum, zero_debias=zero_debias)
Compute the moving average of a variable. Arguments: x: A Variable. value: A tensor with the same shape as `variable`. momentum: The moving average momentum. Returns: An Operation to update the variable.
Compute the moving average of a variable.
[ "Compute", "the", "moving", "average", "of", "a", "variable", "." ]
def moving_average_update(x, value, momentum): """Compute the moving average of a variable. Arguments: x: A Variable. value: A tensor with the same shape as `variable`. momentum: The moving average momentum. Returns: An Operation to update the variable. """ zero_debias = not tf2.enabled() return moving_averages.assign_moving_average( x, value, momentum, zero_debias=zero_debias)
[ "def", "moving_average_update", "(", "x", ",", "value", ",", "momentum", ")", ":", "zero_debias", "=", "not", "tf2", ".", "enabled", "(", ")", "return", "moving_averages", ".", "assign_moving_average", "(", "x", ",", "value", ",", "momentum", ",", "zero_debi...
https://github.com/aianaconda/TensorFlow_Engineering_Implementation/blob/cb787e359da9ac5a08d00cd2458fecb4cb5a3a31/tf2code/Chapter8/code8.4/Code8-9/backend.py#L1595-L1608
asyml/texar-pytorch
b83d3ec17e19da08fc5f81996d02f91176e55e54
texar/torch/core/attention_mechanism.py
python
monotonic_attention
(p_choose_i: torch.Tensor, previous_attention: torch.Tensor, mode: str)
return attention
r"""Compute monotonic attention distribution from choosing probabilities. Monotonic attention implies that the input sequence is processed in an explicitly left-to-right manner when generating the output sequence. In addition, once an input sequence element is attended to at a given output time step, elements occurring before it cannot be attended to at subsequent output time steps. This function generates attention distributions according to these assumptions. For more information, see `Online and Linear-Time Attention by Enforcing Monotonic Alignments`. Args: p_choose_i: Probability of choosing input sequence/memory element i. Should be of shape (batch_size, input_sequence_length), and should all be in the range [0, 1]. previous_attention: The attention distribution from the previous output time step. Should be of shape (batch_size, input_sequence_length). For the first output time step, `previous_attention[n]` should be `[1, 0, 0, ..., 0] for all n in [0, ... batch_size - 1]`. mode: How to compute the attention distribution. Must be one of ``"recursive"``, ``"parallel"``, or ``"hard"``: - ``"recursive"`` recursively computes the distribution. This is slowest but is exact, general, and does not suffer from numerical instabilities. - ``"parallel"`` uses parallelized cumulative-sum and cumulative-product operations to compute a closed-form solution to the recurrence relation defining the attention distribution. This makes it more efficient than ``"recursive"``, but it requires numerical checks which make the distribution non-exact. This can be a problem in particular when input sequence is long and/or :attr:`p_choose_i` has entries very close to 0 or 1. - ``"hard"`` requires that the probabilities in :attr:`p_choose_i` are all either 0 or 1, and subsequently uses a more efficient and exact solution. Returns: A tensor of shape (batch_size, input_sequence_length) representing the attention distributions for each sequence in the batch. Raises: ValueError: mode is not one of ``"recursive"``, ``"parallel"``, ``"hard"``.
r"""Compute monotonic attention distribution from choosing probabilities. Monotonic attention implies that the input sequence is processed in an explicitly left-to-right manner when generating the output sequence. In addition, once an input sequence element is attended to at a given output time step, elements occurring before it cannot be attended to at subsequent output time steps. This function generates attention distributions according to these assumptions. For more information, see `Online and Linear-Time Attention by Enforcing Monotonic Alignments`.
[ "r", "Compute", "monotonic", "attention", "distribution", "from", "choosing", "probabilities", ".", "Monotonic", "attention", "implies", "that", "the", "input", "sequence", "is", "processed", "in", "an", "explicitly", "left", "-", "to", "-", "right", "manner", "...
def monotonic_attention(p_choose_i: torch.Tensor, previous_attention: torch.Tensor, mode: str) -> torch.Tensor: r"""Compute monotonic attention distribution from choosing probabilities. Monotonic attention implies that the input sequence is processed in an explicitly left-to-right manner when generating the output sequence. In addition, once an input sequence element is attended to at a given output time step, elements occurring before it cannot be attended to at subsequent output time steps. This function generates attention distributions according to these assumptions. For more information, see `Online and Linear-Time Attention by Enforcing Monotonic Alignments`. Args: p_choose_i: Probability of choosing input sequence/memory element i. Should be of shape (batch_size, input_sequence_length), and should all be in the range [0, 1]. previous_attention: The attention distribution from the previous output time step. Should be of shape (batch_size, input_sequence_length). For the first output time step, `previous_attention[n]` should be `[1, 0, 0, ..., 0] for all n in [0, ... batch_size - 1]`. mode: How to compute the attention distribution. Must be one of ``"recursive"``, ``"parallel"``, or ``"hard"``: - ``"recursive"`` recursively computes the distribution. This is slowest but is exact, general, and does not suffer from numerical instabilities. - ``"parallel"`` uses parallelized cumulative-sum and cumulative-product operations to compute a closed-form solution to the recurrence relation defining the attention distribution. This makes it more efficient than ``"recursive"``, but it requires numerical checks which make the distribution non-exact. This can be a problem in particular when input sequence is long and/or :attr:`p_choose_i` has entries very close to 0 or 1. - ``"hard"`` requires that the probabilities in :attr:`p_choose_i` are all either 0 or 1, and subsequently uses a more efficient and exact solution. Returns: A tensor of shape (batch_size, input_sequence_length) representing the attention distributions for each sequence in the batch. Raises: ValueError: mode is not one of ``"recursive"``, ``"parallel"``, ``"hard"``. """ # Force things to be tensors if not isinstance(p_choose_i, torch.Tensor): p_choose_i = torch.tensor(p_choose_i) if not isinstance(previous_attention, torch.Tensor): previous_attention = torch.tensor(previous_attention) if mode == "recursive": # Use .shape[0] when it's not None, or fall back on symbolic shape batch_size = p_choose_i.shape[0] # Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., # 1 - p_choose_i[-2]] shifted_1mp_choose_i = torch.cat((p_choose_i.new_ones(batch_size, 1), 1 - p_choose_i[:, :-1]), 1) # Compute attention distribution recursively as # q[i] = (1 - p_choose_i[i - 1])*q[i - 1] + previous_attention[i] # attention[i] = p_choose_i[i]*q[i] def f(x, yz): return torch.reshape(yz[0] * x + yz[1], (batch_size,)) x_tmp = f(torch.zeros((batch_size,)), torch.transpose( shifted_1mp_choose_i, 0, 1)) x_tmp = f(x_tmp, torch.transpose(previous_attention, 0, 1)) attention = p_choose_i * torch.transpose(x_tmp, 0, 1) elif mode == "parallel": batch_size = p_choose_i.shape[0] shifted_1mp_choose_i = torch.cat((p_choose_i.new_ones(batch_size, 1), 1 - p_choose_i[:, :-1]), 1) # safe_cumprod computes cumprod in logspace with numeric checks cumprod_1mp_choose_i = safe_cumprod(shifted_1mp_choose_i, dim=1) # Compute recurrence relation solution attention = p_choose_i * cumprod_1mp_choose_i * torch.cumsum( previous_attention / cumprod_1mp_choose_i.clamp(min=1e-10, max=1.), dim=1) elif mode == "hard": # Remove any probabilities before the index chosen last time step p_choose_i *= torch.cumsum(previous_attention, dim=1) # Now, use exclusive cumprod to remove probabilities after the first # chosen index, like so: # p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1] # cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0] # Product of above: [0, 0, 0, 1, 0, 0, 0, 0] batch_size = p_choose_i.shape[0] shifted_1mp_choose_i = torch.cat((p_choose_i.new_ones(batch_size, 1), 1 - p_choose_i[:, :-1]), 1) attention = p_choose_i * torch.cumprod(shifted_1mp_choose_i, dim=1) else: raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.") return attention
[ "def", "monotonic_attention", "(", "p_choose_i", ":", "torch", ".", "Tensor", ",", "previous_attention", ":", "torch", ".", "Tensor", ",", "mode", ":", "str", ")", "->", "torch", ".", "Tensor", ":", "# Force things to be tensors", "if", "not", "isinstance", "(...
https://github.com/asyml/texar-pytorch/blob/b83d3ec17e19da08fc5f81996d02f91176e55e54/texar/torch/core/attention_mechanism.py#L498-L594
andyzsf/TuShare
92787ad0cd492614bdb6389b71a19c80d1c8c9ae
tushare/datayes/fundamental.py
python
Fundamental.FdmtCFInsu
(self, reportType='', secID='', ticker='', beginDate='', endDate='', publishDateBegin='', publishDateEnd='', field='')
return _ret_data(code, result)
1、根据2007年新会计准则制定的保险业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是保险业上市公司) 2、仅收集合并报表数据,包括本期和上期数据; 3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示; 4、本表中单位为人民币元; 5、每季更新。
1、根据2007年新会计准则制定的保险业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是保险业上市公司) 2、仅收集合并报表数据,包括本期和上期数据; 3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示; 4、本表中单位为人民币元; 5、每季更新。
[ "1、根据2007年新会计准则制定的保险业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是保险业上市公司)", "2、仅收集合并报表数据,包括本期和上期数据;", "3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;", "4、本表中单位为人民币元;", "5、每季更新。" ]
def FdmtCFInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='', publishDateBegin='', publishDateEnd='', field=''): """ 1、根据2007年新会计准则制定的保险业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是保险业上市公司) 2、仅收集合并报表数据,包括本期和上期数据; 3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示; 4、本表中单位为人民币元; 5、每季更新。 """ code, result = self.client.getData(vs.FDMTCFINSU%(reportType, secID, ticker, beginDate, endDate, publishDateBegin, publishDateEnd, field)) return _ret_data(code, result)
[ "def", "FdmtCFInsu", "(", "self", ",", "reportType", "=", "''", ",", "secID", "=", "''", ",", "ticker", "=", "''", ",", "beginDate", "=", "''", ",", "endDate", "=", "''", ",", "publishDateBegin", "=", "''", ",", "publishDateEnd", "=", "''", ",", "fie...
https://github.com/andyzsf/TuShare/blob/92787ad0cd492614bdb6389b71a19c80d1c8c9ae/tushare/datayes/fundamental.py#L157-L169
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
python
_get_binary
(data, position, obj_end, opts, dummy1)
return value, end
Decode a BSON binary to bson.binary.Binary or python UUID.
Decode a BSON binary to bson.binary.Binary or python UUID.
[ "Decode", "a", "BSON", "binary", "to", "bson", ".", "binary", ".", "Binary", "or", "python", "UUID", "." ]
def _get_binary(data, position, obj_end, opts, dummy1): """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE(data[position:position + 5]) position += 5 if subtype == 2: length2 = _UNPACK_INT(data[position:position + 4])[0] position += 4 if length2 != length - 4: raise InvalidBSON("invalid binary (st 2) - lengths don't match!") length = length2 end = position + length if length < 0 or end > obj_end: raise InvalidBSON('bad binary object length') if subtype in (3, 4): # Java Legacy uuid_representation = opts.uuid_representation if uuid_representation == JAVA_LEGACY: java = data[position:end] value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1]) # C# legacy elif uuid_representation == CSHARP_LEGACY: value = uuid.UUID(bytes_le=data[position:end]) # Python else: value = uuid.UUID(bytes=data[position:end]) return value, end # Python3 special case. Decode subtype 0 to 'bytes'. if PY3 and subtype == 0: value = data[position:end] else: value = Binary(data[position:end], subtype) return value, end
[ "def", "_get_binary", "(", "data", ",", "position", ",", "obj_end", ",", "opts", ",", "dummy1", ")", ":", "length", ",", "subtype", "=", "_UNPACK_LENGTH_SUBTYPE", "(", "data", "[", "position", ":", "position", "+", "5", "]", ")", "position", "+=", "5", ...
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L190-L221
mrlesmithjr/Ansible
d44f0dc0d942bdf3bf7334b307e6048f0ee16e36
roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py
python
TreeBuilder.getDocument
(self)
return self.document
Return the final tree
Return the final tree
[ "Return", "the", "final", "tree" ]
def getDocument(self): "Return the final tree" return self.document
[ "def", "getDocument", "(", "self", ")", ":", "return", "self", ".", "document" ]
https://github.com/mrlesmithjr/Ansible/blob/d44f0dc0d942bdf3bf7334b307e6048f0ee16e36/roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py#L369-L371
wulczer/txpostgres
bd56a3648574bfd0c24543e4d9e4a611458c2da1
txpostgres/retrying.py
python
RetryingCall.start
(self, backoffIterator=None, failureTester=None)
return self._deferred
Start the call and retry it until it succeeds and fails. :param backoffIterator: A zero-argument callable that should return a iterator yielding reconnection delay periods. If :class:`None` then :func:`.simpleBackoffIterator` will be used. :type backoffIterator: callable :param failureTester: A one-argument callable that will be called with a :tm:`Failure <python.failure.Failure>` instance each time the function being retried fails. It should return :class:`None` if the call should be retried or a :tm:`Failure <python.failure.Failure>` if the retrying process should be stopped. If :class:`None` is used for this parameter, retrying will never stop until the backoff iterator is exhausted. :type failureTester: callable
Start the call and retry it until it succeeds and fails.
[ "Start", "the", "call", "and", "retry", "it", "until", "it", "succeeds", "and", "fails", "." ]
def start(self, backoffIterator=None, failureTester=None): """ Start the call and retry it until it succeeds and fails. :param backoffIterator: A zero-argument callable that should return a iterator yielding reconnection delay periods. If :class:`None` then :func:`.simpleBackoffIterator` will be used. :type backoffIterator: callable :param failureTester: A one-argument callable that will be called with a :tm:`Failure <python.failure.Failure>` instance each time the function being retried fails. It should return :class:`None` if the call should be retried or a :tm:`Failure <python.failure.Failure>` if the retrying process should be stopped. If :class:`None` is used for this parameter, retrying will never stop until the backoff iterator is exhausted. :type failureTester: callable """ self.resetBackoff(backoffIterator) if failureTester is None: failureTester = lambda _: None self._failureTester = failureTester self._deferred = defer.Deferred(self._cancel) self._inProgress = None self.failure = None self.cancelled = False self._call() return self._deferred
[ "def", "start", "(", "self", ",", "backoffIterator", "=", "None", ",", "failureTester", "=", "None", ")", ":", "self", ".", "resetBackoff", "(", "backoffIterator", ")", "if", "failureTester", "is", "None", ":", "failureTester", "=", "lambda", "_", ":", "No...
https://github.com/wulczer/txpostgres/blob/bd56a3648574bfd0c24543e4d9e4a611458c2da1/txpostgres/retrying.py#L135-L166
bubbliiiing/Semantic-Segmentation
4cc89a22ffc9018d2b44e69e85672c7bdd1ab706
deeplab_Mobile/nets/deeplab.py
python
SepConv_BN
(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3)
return x
[]
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' if not depth_activation: x = Activation('relu')(x) # 首先使用3x3的深度可分离卷积 x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) # 利用1x1卷积进行通道数调整 x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
[ "def", "SepConv_BN", "(", "x", ",", "filters", ",", "prefix", ",", "stride", "=", "1", ",", "kernel_size", "=", "3", ",", "rate", "=", "1", ",", "depth_activation", "=", "False", ",", "epsilon", "=", "1e-3", ")", ":", "if", "stride", "==", "1", ":"...
https://github.com/bubbliiiing/Semantic-Segmentation/blob/4cc89a22ffc9018d2b44e69e85672c7bdd1ab706/deeplab_Mobile/nets/deeplab.py#L16-L43
google-research/language
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
language/conpono/evals/classifier_utils.py
python
LCQMCPairClassificationProcessor.get_labels
(self)
return ["0", "1"]
See base class.
See base class.
[ "See", "base", "class", "." ]
def get_labels(self): """See base class.""" return ["0", "1"]
[ "def", "get_labels", "(", "self", ")", ":", "return", "[", "\"0\"", ",", "\"1\"", "]" ]
https://github.com/google-research/language/blob/61fa7260ac7d690d11ef72ca863e45a37c0bdc80/language/conpono/evals/classifier_utils.py#L572-L574
PokemonGoF/PokemonGo-Bot-Desktop
4bfa94f0183406c6a86f93645eff7abd3ad4ced8
build/pywin/Lib/email/charset.py
python
Charset.to_splittable
(self, s)
Convert a possibly multibyte string to a safely splittable format. Uses the input_codec to try and convert the string to Unicode, so it can be safely split on character boundaries (even for multibyte characters). Returns the string as-is if it isn't known how to convert it to Unicode with the input_charset. Characters that could not be converted to Unicode will be replaced with the Unicode replacement character U+FFFD.
Convert a possibly multibyte string to a safely splittable format.
[ "Convert", "a", "possibly", "multibyte", "string", "to", "a", "safely", "splittable", "format", "." ]
def to_splittable(self, s): """Convert a possibly multibyte string to a safely splittable format. Uses the input_codec to try and convert the string to Unicode, so it can be safely split on character boundaries (even for multibyte characters). Returns the string as-is if it isn't known how to convert it to Unicode with the input_charset. Characters that could not be converted to Unicode will be replaced with the Unicode replacement character U+FFFD. """ if isinstance(s, unicode) or self.input_codec is None: return s try: return unicode(s, self.input_codec, 'replace') except LookupError: # Input codec not installed on system, so return the original # string unchanged. return s
[ "def", "to_splittable", "(", "self", ",", "s", ")", ":", "if", "isinstance", "(", "s", ",", "unicode", ")", "or", "self", ".", "input_codec", "is", "None", ":", "return", "s", "try", ":", "return", "unicode", "(", "s", ",", "self", ".", "input_codec"...
https://github.com/PokemonGoF/PokemonGo-Bot-Desktop/blob/4bfa94f0183406c6a86f93645eff7abd3ad4ced8/build/pywin/Lib/email/charset.py#L277-L297
pydata/patsy
5fc881104b749b720b08e393a5505d6e69d72f95
patsy/missing.py
python
NAAction.is_numerical_NA
(self, arr)
return mask
Returns a 1-d mask array indicating which rows in an array of numerical values contain at least one NA value. Note that here `arr` is a numpy array or pandas DataFrame.
Returns a 1-d mask array indicating which rows in an array of numerical values contain at least one NA value.
[ "Returns", "a", "1", "-", "d", "mask", "array", "indicating", "which", "rows", "in", "an", "array", "of", "numerical", "values", "contain", "at", "least", "one", "NA", "value", "." ]
def is_numerical_NA(self, arr): """Returns a 1-d mask array indicating which rows in an array of numerical values contain at least one NA value. Note that here `arr` is a numpy array or pandas DataFrame.""" mask = np.zeros(arr.shape, dtype=bool) if "NaN" in self.NA_types: mask |= np.isnan(arr) if mask.ndim > 1: mask = np.any(mask, axis=1) return mask
[ "def", "is_numerical_NA", "(", "self", ",", "arr", ")", ":", "mask", "=", "np", ".", "zeros", "(", "arr", ".", "shape", ",", "dtype", "=", "bool", ")", "if", "\"NaN\"", "in", "self", ".", "NA_types", ":", "mask", "|=", "np", ".", "isnan", "(", "a...
https://github.com/pydata/patsy/blob/5fc881104b749b720b08e393a5505d6e69d72f95/patsy/missing.py#L129-L139
jhetherly/EnglishSpeechUpsampler
1265fa3ec68fa1e662b5d7f9c61ca730f9beee99
models.py
python
build_downsampling_block
(input_tensor, filter_size, stride, layer_number, act=tf.nn.relu, is_training=True, depth=None, padding='VALID', tensorboard_output=False, name=None)
return l
[]
def build_downsampling_block(input_tensor, filter_size, stride, layer_number, act=tf.nn.relu, is_training=True, depth=None, padding='VALID', tensorboard_output=False, name=None): # assume this layer is twice the depth of the previous layer if no depth # information is given if depth is None: depth = 2*input_tensor.get_shape().as_list()[-1] with tf.name_scope('{}_layer_weights'.format(layer_number)): W = weight_variable([filter_size, input_tensor.get_shape().as_list()[-1], depth]) if tensorboard_output: histogram_variable_summaries(W) with tf.name_scope('{}_layer_biases'.format(layer_number)): b = bias_variable([depth]) if tensorboard_output: histogram_variable_summaries(b) with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)): l = tf.nn.conv1d(input_tensor, W, stride=stride, padding=padding, name=name) + b if tensorboard_output: histogram_variable_summaries(l) with tf.name_scope('{}_layer_batch_norm'.format(layer_number)) as scope: # l = tf.nn.dropout(l, keep_prob=0.25) l = batch_norm(l, is_training, scope) with tf.name_scope('{}_layer_conv_activation'.format(layer_number)): l = act(l, name=name) if tensorboard_output: histogram_variable_summaries(l) return l
[ "def", "build_downsampling_block", "(", "input_tensor", ",", "filter_size", ",", "stride", ",", "layer_number", ",", "act", "=", "tf", ".", "nn", ".", "relu", ",", "is_training", "=", "True", ",", "depth", "=", "None", ",", "padding", "=", "'VALID'", ",", ...
https://github.com/jhetherly/EnglishSpeechUpsampler/blob/1265fa3ec68fa1e662b5d7f9c61ca730f9beee99/models.py#L176-L214
saturday06/VRM_Addon_for_Blender
0fc59703bb203dca760501221d34ecc4a566e64f
io_scene_vrm/editor/vrm0/migration.py
python
migrate_vrm0_first_person
( first_person_props: bpy.types.PropertyGroup, first_person_dict: Any, )
[]
def migrate_vrm0_first_person( first_person_props: bpy.types.PropertyGroup, first_person_dict: Any, ) -> None: if not isinstance(first_person_dict, dict): return first_person_bone = first_person_dict.get("firstPersonBone") if isinstance(first_person_bone, str): first_person_props.first_person_bone.value = first_person_bone first_person_bone_offset = convert.vrm_json_vector3_to_tuple( first_person_dict.get("firstPersonBoneOffset") ) if first_person_bone_offset is not None: # Axis confusing (x, y, z) = first_person_bone_offset first_person_props.first_person_bone_offset = (x, z, y) mesh_annotations = first_person_dict.get("meshAnnotations") if isinstance(mesh_annotations, collections.Iterable): for mesh_annotation_dict in mesh_annotations: mesh_annotation_props = first_person_props.mesh_annotations.add() if not isinstance(mesh_annotation_dict, dict): continue mesh = mesh_annotation_dict.get("mesh") if isinstance(mesh, str) and mesh in bpy.data.meshes: mesh_annotation_props.mesh.value = bpy.data.meshes[mesh].name first_person_flag = mesh_annotation_dict.get("firstPersonFlag") if isinstance(first_person_flag, str): mesh_annotation_props.first_person_flag = first_person_flag look_at_type_name = first_person_dict.get("lookAtTypeName") if look_at_type_name in ["Bone", "BlendShape"]: first_person_props.look_at_type_name = look_at_type_name for (look_at_props, look_at_dict) in [ ( first_person_props.look_at_horizontal_inner, first_person_dict.get("lookAtHorizontalInner"), ), ( first_person_props.look_at_horizontal_outer, first_person_dict.get("lookAtHorizontalOuter"), ), ( first_person_props.look_at_vertical_down, first_person_dict.get("lookAtVerticalDown"), ), ( first_person_props.look_at_vertical_up, first_person_dict.get("lookAtVerticalUp"), ), ]: if not isinstance(look_at_dict, dict): continue curve = convert.vrm_json_curve_to_list(look_at_dict.get("curve")) if curve is not None: look_at_props.curve = curve x_range = look_at_dict.get("xRange") if isinstance(x_range, (float, int)): look_at_props.x_range = x_range y_range = look_at_dict.get("yRange") if isinstance(y_range, (float, int)): look_at_props.y_range = y_range
[ "def", "migrate_vrm0_first_person", "(", "first_person_props", ":", "bpy", ".", "types", ".", "PropertyGroup", ",", "first_person_dict", ":", "Any", ",", ")", "->", "None", ":", "if", "not", "isinstance", "(", "first_person_dict", ",", "dict", ")", ":", "retur...
https://github.com/saturday06/VRM_Addon_for_Blender/blob/0fc59703bb203dca760501221d34ecc4a566e64f/io_scene_vrm/editor/vrm0/migration.py#L125-L195
bshillingford/python-torchfile
fbd434a5b5562c88b91a95e6476e11dbb7735436
torchfile.py
python
TorchObject.__dir__
(self)
return keys
[]
def __dir__(self): keys = list(self._obj.keys()) keys.append('torch_typename') return keys
[ "def", "__dir__", "(", "self", ")", ":", "keys", "=", "list", "(", "self", ".", "_obj", ".", "keys", "(", ")", ")", "keys", ".", "append", "(", "'torch_typename'", ")", "return", "keys" ]
https://github.com/bshillingford/python-torchfile/blob/fbd434a5b5562c88b91a95e6476e11dbb7735436/torchfile.py#L120-L123
pencil1/ApiTestManage
851a54d5629456b7e967e15186244409ddf783cc
app/models.py
python
User.init_user
()
[]
def init_user(): user = User.query.filter_by(name='管理员').first() if user: print('The administrator account already exists') print('--' * 30) return else: user = User(name=u'管理员', account='admin', password='123456', status=1, role_id=2) db.session.add(user) db.session.commit() print('Administrator account created successfully') print('--' * 30)
[ "def", "init_user", "(", ")", ":", "user", "=", "User", ".", "query", ".", "filter_by", "(", "name", "=", "'管理员').firs", "t", "(", ")", "", "", "if", "user", ":", "print", "(", "'The administrator account already exists'", ")", "print", "(", "'--'", "*",...
https://github.com/pencil1/ApiTestManage/blob/851a54d5629456b7e967e15186244409ddf783cc/app/models.py#L126-L137
nucleic/enaml
65c2a2a2d765e88f2e1103046680571894bb41ed
enaml/qt/qt_color_dialog.py
python
QtColorDialog.set_custom_color
(index, color)
Set the custom color for the given index.
Set the custom color for the given index.
[ "Set", "the", "custom", "color", "for", "the", "given", "index", "." ]
def set_custom_color(index, color): """ Set the custom color for the given index. """ QColorDialog.setCustomColor(index, color.argb)
[ "def", "set_custom_color", "(", "index", ",", "color", ")", ":", "QColorDialog", ".", "setCustomColor", "(", "index", ",", "color", ".", "argb", ")" ]
https://github.com/nucleic/enaml/blob/65c2a2a2d765e88f2e1103046680571894bb41ed/enaml/qt/qt_color_dialog.py#L145-L149
PyHDI/veriloggen
2382d200deabf59cfcfd741f5eba371010aaf2bb
veriloggen/seq/seq.py
python
Seq.implement
(self)
[]
def implement(self): if self.as_module: self.make_module() return self.make_always()
[ "def", "implement", "(", "self", ")", ":", "if", "self", ".", "as_module", ":", "self", ".", "make_module", "(", ")", "return", "self", ".", "make_always", "(", ")" ]
https://github.com/PyHDI/veriloggen/blob/2382d200deabf59cfcfd741f5eba371010aaf2bb/veriloggen/seq/seq.py#L363-L368
openstack/neutron
fb229fb527ac8b95526412f7762d90826ac41428
neutron/db/l3_db.py
python
L3_NAT_db_mixin.disassociate_floatingips
(self, context, port_id, do_notify=True)
return router_ids
Disassociate all floating IPs linked to specific port. @param port_id: ID of the port to disassociate floating IPs. @param do_notify: whether we should notify routers right away. @return: set of router-ids that require notification updates if do_notify is False, otherwise None.
Disassociate all floating IPs linked to specific port.
[ "Disassociate", "all", "floating", "IPs", "linked", "to", "specific", "port", "." ]
def disassociate_floatingips(self, context, port_id, do_notify=True): """Disassociate all floating IPs linked to specific port. @param port_id: ID of the port to disassociate floating IPs. @param do_notify: whether we should notify routers right away. @return: set of router-ids that require notification updates if do_notify is False, otherwise None. """ router_ids = super(L3_NAT_db_mixin, self).disassociate_floatingips( context, port_id, do_notify) if do_notify: self.notify_routers_updated(context, router_ids) # since caller assumes that we handled notifications on its # behalf, return nothing return return router_ids
[ "def", "disassociate_floatingips", "(", "self", ",", "context", ",", "port_id", ",", "do_notify", "=", "True", ")", ":", "router_ids", "=", "super", "(", "L3_NAT_db_mixin", ",", "self", ")", ".", "disassociate_floatingips", "(", "context", ",", "port_id", ",",...
https://github.com/openstack/neutron/blob/fb229fb527ac8b95526412f7762d90826ac41428/neutron/db/l3_db.py#L2155-L2171
pandaproject/panda
133baa47882a289773a30c9656e2ea4efe569387
panda/models/dataset.py
python
Dataset.import_data
(self, user, upload, external_id_field_index=None)
Import data into this ``Dataset`` from a given ``DataUpload``.
Import data into this ``Dataset`` from a given ``DataUpload``.
[ "Import", "data", "into", "this", "Dataset", "from", "a", "given", "DataUpload", "." ]
def import_data(self, user, upload, external_id_field_index=None): """ Import data into this ``Dataset`` from a given ``DataUpload``. """ self.lock() try: if upload.imported: raise DataImportError(_('This file has already been imported.')) task_type = get_import_task_type_for_upload(upload) if not task_type: # This is normally caught on the client. raise DataImportError(_('This file type is not supported for data import.')) if self.column_schema: # This is normally caught on the client. if upload.columns != [c['name'] for c in self.column_schema]: raise DataImportError(_('The columns in this file do not match those in the dataset.')) else: self.column_schema = make_column_schema(upload.columns, types=upload.guessed_types) if self.sample_data is None: self.sample_data = upload.sample_data # If this is the first import and the API hasn't been used, save that information if self.initial_upload is None and self.row_count is None: self.initial_upload = upload self.current_task = TaskStatus.objects.create( task_name=task_type.name, task_description=_('Import data from %(filename)s into %(slug)s.') \ % {'filename': upload.filename, 'slug': self.slug}, creator=user ) self.save() task_type.apply_async( args=[self.slug, upload.id], kwargs={ 'external_id_field_index': external_id_field_index }, task_id=self.current_task.id ) except: self.unlock() raise
[ "def", "import_data", "(", "self", ",", "user", ",", "upload", ",", "external_id_field_index", "=", "None", ")", ":", "self", ".", "lock", "(", ")", "try", ":", "if", "upload", ".", "imported", ":", "raise", "DataImportError", "(", "_", "(", "'This file ...
https://github.com/pandaproject/panda/blob/133baa47882a289773a30c9656e2ea4efe569387/panda/models/dataset.py#L180-L225
dgilland/pydash
24ad0e43b51b367d00447c45baa68c9c03ad1a52
src/pydash/helpers.py
python
parse_iteratee
(iteratee_keyword, *args, **kwargs)
return iteratee, args
Try to find iteratee function passed in either as a keyword argument or as the last positional argument in `args`.
Try to find iteratee function passed in either as a keyword argument or as the last positional argument in `args`.
[ "Try", "to", "find", "iteratee", "function", "passed", "in", "either", "as", "a", "keyword", "argument", "or", "as", "the", "last", "positional", "argument", "in", "args", "." ]
def parse_iteratee(iteratee_keyword, *args, **kwargs): """Try to find iteratee function passed in either as a keyword argument or as the last positional argument in `args`.""" iteratee = kwargs.get(iteratee_keyword) last_arg = args[-1] if iteratee is None and ( callable(last_arg) or isinstance(last_arg, str) or isinstance(last_arg, dict) or last_arg is None ): iteratee = last_arg args = args[:-1] return iteratee, args
[ "def", "parse_iteratee", "(", "iteratee_keyword", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "iteratee", "=", "kwargs", ".", "get", "(", "iteratee_keyword", ")", "last_arg", "=", "args", "[", "-", "1", "]", "if", "iteratee", "is", "None", "an...
https://github.com/dgilland/pydash/blob/24ad0e43b51b367d00447c45baa68c9c03ad1a52/src/pydash/helpers.py#L236-L251