text stringlengths 81 112k |
|---|
computes the hash of all of the trigrams in the chunk using a window
of length 5
def process(self, chunk):
"""
computes the hash of all of the trigrams in the chunk using a window
of length 5
"""
self._digest = None
if isinstance(chunk, text_type):
chunk = chunk.encode('utf-8')
# chunk is a byte string
for char in chunk:
self.num_char += 1
if PY3:
# In Python 3, iterating over bytes yields integers
c = char
else:
c = ord(char)
if len(self.window) > 1: # seen at least three characters
self.acc[self.tran_hash(c, self.window[0], self.window[1], 0)] += 1
if len(self.window) > 2: # seen at least four characters
self.acc[self.tran_hash(c, self.window[0], self.window[2], 1)] += 1
self.acc[self.tran_hash(c, self.window[1], self.window[2], 2)] += 1
if len(self.window) > 3: # have a full window
self.acc[self.tran_hash(c, self.window[0], self.window[3], 3)] += 1
self.acc[self.tran_hash(c, self.window[1], self.window[3], 4)] += 1
self.acc[self.tran_hash(c, self.window[2], self.window[3], 5)] += 1
# duplicate hashes, used to maintain 8 trigrams per character
self.acc[self.tran_hash(self.window[3], self.window[0], c, 6)] += 1
self.acc[self.tran_hash(self.window[3], self.window[2], c, 7)] += 1
# add current character to the window, remove the previous character
if len(self.window) < 4:
self.window = [c] + self.window
else:
self.window = [c] + self.window[:3] |
using a threshold (mean of the accumulator), computes the nilsimsa digest
def compute_digest(self):
"""
using a threshold (mean of the accumulator), computes the nilsimsa digest
"""
num_trigrams = 0
if self.num_char == 3: # 3 chars -> 1 trigram
num_trigrams = 1
elif self.num_char == 4: # 4 chars -> 4 trigrams
num_trigrams = 4
elif self.num_char > 4: # > 4 chars -> 8 for each char
num_trigrams = 8 * self.num_char - 28
# threshhold is the mean of the acc buckets
threshold = num_trigrams / 256.0
digest = [0] * 32
for i in range(256):
if self.acc[i] > threshold:
digest[i >> 3] += 1 << (i & 7) # equivalent to i/8, 2**(i mod 7)
self._digest = digest[::-1] |
read in a file and compute digest
def from_file(self, fname):
"""read in a file and compute digest"""
f = open(fname, "rb")
data = f.read()
self.update(data)
f.close() |
returns difference between the nilsimsa digests between the current
object and a given digest
def compare(self, digest_2, is_hex = False):
"""
returns difference between the nilsimsa digests between the current
object and a given digest
"""
# convert hex string to list of ints
if is_hex:
digest_2 = convert_hex_to_ints(digest_2)
bit_diff = 0
for i in range(len(self.digest)):
bit_diff += POPC[self.digest[i] ^ digest_2[i]] #computes the bit diff between the i'th position of the digests
return 128 - bit_diff |
Login api
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
def login(self, data, api_version="v2.0"):
"""
Login api
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/login".format(api_version)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data, sensitive=True) |
Forgot password API
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
def tenant_forgot_password_login(self, data, tenant_id=None, api_version="v2.0"):
"""
Forgot password API
**Parameters:**:
- **data**: Dictionary containing data to POST as JSON
- **tenant_id**: Tenant ID
- **api_version**: API version to use (default v2.0)
**Returns:** requests.Response object extended with cgx_status and cgx_content properties.
"""
if tenant_id is None and self._parent_class.tenant_id:
# Pull tenant_id from parent namespace cache.
tenant_id = self._parent_class.tenant_id
elif not tenant_id:
# No value for tenant_id.
raise TypeError("tenant_id is required but not set or cached.")
cur_ctlr = self._parent_class.controller
url = str(cur_ctlr) + "/{}/api/tenants/{}/login/password/forgot".format(api_version,
tenant_id)
api_logger.debug("URL = %s", url)
return self._parent_class.rest_call(url, "post", data=data, sensitive=True) |
verify the validity of the given file. Never trust the End-User
def is_valid_file(parser,arg):
"""verify the validity of the given file. Never trust the End-User"""
if not os.path.exists(arg):
parser.error("File %s not found"%arg)
else:
return arg |
Get the language ID of the input file language
def getID(code_file):
"""Get the language ID of the input file language"""
json_path = ghostfolder+'/'+json_file
if os.path.exists(json_path):
pass
else:
download_file('https://ghostbin.com/languages.json')
lang = detect_lang(code_file)
json_data = json.load(file(json_path))#don't think i need this though
ID = ''
for i in range(len(json_data)):
temp = len(json_data[i]['languages'])
for j in range(temp):
if json_data[i]['languages'][j]['name'].lower() == lang.lower():
ID = json_data[i]['languages'][j]['id']
print('Gotten language ID from \'languages.json\': {0}'.format(ID))
return ID |
Detect the language used in the given file.
def detect_lang(path):
"""Detect the language used in the given file."""
blob = FileBlob(path, os.getcwd())
if blob.is_text:
print('Programming language of the file detected: {0}'.format(blob.language.name))
return blob.language.name
else:#images, binary and what-have-you won't be pasted
print('File not a text file. Exiting...')
sys.exit() |
Specify given *android_serial* device to perform test.
You do not have to specify the device when there is only one device connects to the computer.
When you need to use multiple devices, do not use this keyword to switch between devices in test execution.
Using different library name when importing this library according to http://robotframework.googlecode.com/hg/doc/userguide/RobotFrameworkUserGuide.html?r=2.8.5.
| Setting | Value | Value | Value |
| Library | Mobile | WITH NAME | Mobile1 |
| Library | Mobile | WITH NAME | Mobile2 |
And set the serial to each library.
| Test Case | Action | Argument |
| Multiple Devices | Mobile1.Set Serial | device_1's serial |
| | Mobile2.Set Serial | device_2's serial |
def set_serial(self, android_serial):
"""
Specify given *android_serial* device to perform test.
You do not have to specify the device when there is only one device connects to the computer.
When you need to use multiple devices, do not use this keyword to switch between devices in test execution.
Using different library name when importing this library according to http://robotframework.googlecode.com/hg/doc/userguide/RobotFrameworkUserGuide.html?r=2.8.5.
| Setting | Value | Value | Value |
| Library | Mobile | WITH NAME | Mobile1 |
| Library | Mobile | WITH NAME | Mobile2 |
And set the serial to each library.
| Test Case | Action | Argument |
| Multiple Devices | Mobile1.Set Serial | device_1's serial |
| | Mobile2.Set Serial | device_2's serial |
"""
self.adb = ADB(android_serial)
self.device = Device(android_serial)
self.test_helper = TestHelper(self.adb) |
Click at (x,y) coordinates.
def click_at_coordinates(self, x, y):
"""
Click at (x,y) coordinates.
"""
self.device.click(int(x), int(y)) |
Swipe from (sx, sy) to (ex, ey) with *steps* .
Example:
| Swipe By Coordinates | 540 | 1340 | 940 | 1340 | | # Swipe from (540, 1340) to (940, 100) with default steps 10 |
| Swipe By Coordinates | 540 | 1340 | 940 | 1340 | 100 | # Swipe from (540, 1340) to (940, 100) with steps 100 |
def swipe_by_coordinates(self, sx, sy, ex, ey, steps=10):
"""
Swipe from (sx, sy) to (ex, ey) with *steps* .
Example:
| Swipe By Coordinates | 540 | 1340 | 940 | 1340 | | # Swipe from (540, 1340) to (940, 100) with default steps 10 |
| Swipe By Coordinates | 540 | 1340 | 940 | 1340 | 100 | # Swipe from (540, 1340) to (940, 100) with steps 100 |
"""
self.device.swipe(sx, sy, ex, ey, steps) |
Swipe the UI object with *selectors* from center to left.
Example:
| Swipe Left | description=Home screen 3 | | # swipe the UI object left |
| Swipe Left | 5 | description=Home screen 3 | # swipe the UI object left with steps=5 |
See `introduction` for details about Identified UI object.
def swipe_left(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to left.
Example:
| Swipe Left | description=Home screen 3 | | # swipe the UI object left |
| Swipe Left | 5 | description=Home screen 3 | # swipe the UI object left with steps=5 |
See `introduction` for details about Identified UI object.
"""
self.device(**selectors).swipe.left(steps=steps) |
Swipe the UI object with *selectors* from center to right
See `Swipe Left` for more details.
def swipe_right(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to right
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.right(steps=steps) |
Swipe the UI object with *selectors* from center to top
See `Swipe Left` for more details.
def swipe_top(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to top
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.up(steps=steps) |
Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details.
def swipe_bottom(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to bottom
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.down(steps=steps) |
Drag from (sx, sy) to (ex, ey) with steps
See `Swipe By Coordinates` also.
def drag_by_coordinates(self,sx, sy, ex, ey, steps=10):
"""
Drag from (sx, sy) to (ex, ey) with steps
See `Swipe By Coordinates` also.
"""
self.device.drag(sx, sy, ex, ey, steps) |
Wait for the object which has *selectors* within the given timeout.
Return true if the object *appear* in the given timeout. Else return false.
def wait_for_exists(self, timeout=0, *args, **selectors):
"""
Wait for the object which has *selectors* within the given timeout.
Return true if the object *appear* in the given timeout. Else return false.
"""
return self.device(**selectors).wait.exists(timeout=timeout) |
Wait for the object which has *selectors* within the given timeout.
Return true if the object *disappear* in the given timeout. Else return false.
def wait_until_gone(self, timeout=0, *args, **selectors):
"""
Wait for the object which has *selectors* within the given timeout.
Return true if the object *disappear* in the given timeout. Else return false.
"""
return self.device(**selectors).wait.gone(timeout=timeout) |
Perform fling forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
def fling_forward_horizontally(self, *args, **selectors):
"""
Perform fling forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.horiz.forward() |
Perform fling backward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
def fling_backward_horizontally(self, *args, **selectors):
"""
Perform fling backward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.horiz.backward() |
Perform fling forward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
def fling_forward_vertically(self, *args, **selectors):
"""
Perform fling forward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.vert.forward() |
Perform fling backward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
def fling_backward_vertically(self, *args, **selectors):
"""
Perform fling backward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.vert.backward() |
Scroll the object which has *selectors* attributes to *beginning* horizontally.
See `Scroll Forward Vertically` for more details.
def scroll_to_beginning_horizontally(self, steps=10, *args,**selectors):
"""
Scroll the object which has *selectors* attributes to *beginning* horizontally.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.horiz.toBeginning(steps=steps) |
Scroll the object which has *selectors* attributes to *end* horizontally.
See `Scroll Forward Vertically` for more details.
def scroll_to_end_horizontally(self, steps=10, *args, **selectors):
"""
Scroll the object which has *selectors* attributes to *end* horizontally.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.horiz.toEnd(steps=steps) |
Perform scroll forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
def scroll_forward_horizontally(self, steps=10, *args, **selectors):
"""
Perform scroll forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.horiz.forward(steps=steps) |
Perform scroll backward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
def scroll_backward_horizontally(self, steps=10, *args, **selectors):
"""
Perform scroll backward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.horiz.backward(steps=steps) |
Scroll(horizontally) on the object: obj to specific UI object which has *selectors* attributes appears.
Return true if the UI object, else return false.
See `Scroll To Vertically` for more details.
def scroll_to_horizontally(self, obj, *args,**selectors):
"""
Scroll(horizontally) on the object: obj to specific UI object which has *selectors* attributes appears.
Return true if the UI object, else return false.
See `Scroll To Vertically` for more details.
"""
return obj.scroll.horiz.to(**selectors) |
Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details.
def scroll_to_beginning_vertically(self, steps=10, *args,**selectors):
"""
Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.toBeginning(steps=steps) |
Scroll the object which has *selectors* attributes to *end* vertically.
See `Scroll Forward Vertically` for more details.
def scroll_to_end_vertically(self, steps=10, *args, **selectors):
"""
Scroll the object which has *selectors* attributes to *end* vertically.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.toEnd(steps=steps) |
Perform scroll forward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
Example:
| ${can_be_scroll} | Scroll Forward Vertically | className=android.widget.ListView | | # Scroll forward the UI object with class name |
| ${can_be_scroll} | Scroll Forward Vertically | 100 | className=android.widget.ListView | # Scroll with steps |
def scroll_forward_vertically(self, steps=10, *args, **selectors):
"""
Perform scroll forward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
Example:
| ${can_be_scroll} | Scroll Forward Vertically | className=android.widget.ListView | | # Scroll forward the UI object with class name |
| ${can_be_scroll} | Scroll Forward Vertically | 100 | className=android.widget.ListView | # Scroll with steps |
"""
return self.device(**selectors).scroll.vert.forward(steps=steps) |
Perform scroll backward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
def scroll_backward_vertically(self, steps=10, *args, **selectors):
"""
Perform scroll backward (vertically)action on the object which has *selectors* attributes.
Return whether the object can be Scroll or not.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.backward(steps=steps) |
Scroll(vertically) on the object: obj to specific UI object which has *selectors* attributes appears.
Return true if the UI object, else return false.
Example:
| ${list} | Get Object | className=android.widget.ListView | | # Get the list object |
| ${is_web_view} | Scroll To Vertically | ${list} | text=WebView | # Scroll to text:WebView. |
def scroll_to_vertically(self, obj, *args,**selectors):
"""
Scroll(vertically) on the object: obj to specific UI object which has *selectors* attributes appears.
Return true if the UI object, else return false.
Example:
| ${list} | Get Object | className=android.widget.ListView | | # Get the list object |
| ${is_web_view} | Scroll To Vertically | ${list} | text=WebView | # Scroll to text:WebView. |
"""
return obj.scroll.vert.to(**selectors) |
Take a screenshot of device and log in the report with timestamp, scale for screenshot size and quality for screenshot quality
default scale=1.0 quality=100
def screenshot(self, scale=None, quality=None):
"""
Take a screenshot of device and log in the report with timestamp, scale for screenshot size and quality for screenshot quality
default scale=1.0 quality=100
"""
output_dir = BuiltIn().get_variable_value('${OUTPUTDIR}')
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S')
screenshot_path = '%s%s%s.png' % (output_dir, os.sep, st)
self.device.screenshot(screenshot_path, scale, quality)
logger.info('\n<a href="%s">%s</a><br><img src="%s">' % (screenshot_path, st, screenshot_path), html=True) |
The watcher click on the object which has the *selectors* when conditions match.
def register_click_watcher(self, watcher_name, selectors, *condition_list):
"""
The watcher click on the object which has the *selectors* when conditions match.
"""
watcher = self.device.watcher(watcher_name)
for condition in condition_list:
watcher.when(**self.__unicode_to_dict(condition))
watcher.click(**self.__unicode_to_dict(selectors))
self.device.watchers.run() |
The watcher perform *press_keys* action sequentially when conditions match.
def register_press_watcher(self, watcher_name, press_keys, *condition_list):
"""
The watcher perform *press_keys* action sequentially when conditions match.
"""
def unicode_to_list(a_unicode):
a_list = list()
comma_count = a_unicode.count(',')
for count in range(comma_count + 1):
comma_position = a_unicode.find(',')
if comma_position == -1:
a_list.append(str(a_unicode))
else:
a_list.append(a_unicode[0:comma_position])
a_unicode = a_unicode[comma_position + 1:]
return a_list
watcher = self.device.watcher(watcher_name)
for condition in condition_list:
watcher.when(**self.__unicode_to_dict(condition))
watcher.press(*unicode_to_list(press_keys))
self.device.watchers.run() |
Remove watcher with *watcher_name* or remove all watchers.
def remove_watchers(self, watcher_name = None):
"""
Remove watcher with *watcher_name* or remove all watchers.
"""
if watcher_name == None:
self.device.watchers.remove()
else:
self.device.watchers.remove(watcher_name) |
Return the count of UI object with *selectors*
Example:
| ${count} | Get Count | text=Accessibility | # Get the count of UI object text=Accessibility |
| ${accessibility_text} | Get Object | text=Accessibility | # These two keywords combination |
| ${count} | Get Count Of Object | ${accessibility_text} | # do the same thing. |
def get_count(self, *args, **selectors):
"""
Return the count of UI object with *selectors*
Example:
| ${count} | Get Count | text=Accessibility | # Get the count of UI object text=Accessibility |
| ${accessibility_text} | Get Object | text=Accessibility | # These two keywords combination |
| ${count} | Get Count Of Object | ${accessibility_text} | # do the same thing. |
"""
obj = self.get_object(**selectors)
return self.get_count_of_object(obj) |
return info dictionary of the *obj*
The info example:
{
u'contentDescription': u'',
u'checked': False,
u'scrollable': True,
u'text': u'',
u'packageName': u'com.android.launcher',
u'selected': False,
u'enabled': True,
u'bounds':
{
u'top': 231,
u'left': 0,
u'right': 1080,
u'bottom': 1776
},
u'className': u'android.view.View',
u'focusable': False,
u'focused': False,
u'clickable': False,
u'checkable': False,
u'chileCount': 1,
u'longClickable': False,
u'visibleBounds':
{
u'top': 231,
u'left': 0,
u'right': 1080,
u'bottom': 1776
}
}
def get_info_of_object(self, obj, selector=None):
"""
return info dictionary of the *obj*
The info example:
{
u'contentDescription': u'',
u'checked': False,
u'scrollable': True,
u'text': u'',
u'packageName': u'com.android.launcher',
u'selected': False,
u'enabled': True,
u'bounds':
{
u'top': 231,
u'left': 0,
u'right': 1080,
u'bottom': 1776
},
u'className': u'android.view.View',
u'focusable': False,
u'focused': False,
u'clickable': False,
u'checkable': False,
u'chileCount': 1,
u'longClickable': False,
u'visibleBounds':
{
u'top': 231,
u'left': 0,
u'right': 1080,
u'bottom': 1776
}
}
"""
if selector:
return obj.info.get(selector)
else:
return obj.info |
This keyword can use object method from original python uiautomator
See more details from https://github.com/xiaocong/uiautomator
Example:
| ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object |
| Call | ${accessibility_text} | click | # Call the method of the UI object 'click' |
def call(self, obj, method, *args, **selectors):
"""
This keyword can use object method from original python uiautomator
See more details from https://github.com/xiaocong/uiautomator
Example:
| ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object |
| Call | ${accessibility_text} | click | # Call the method of the UI object 'click' |
"""
func = getattr(obj, method)
return func(**selectors) |
Set *input_text* to the UI object with *selectors*
def set_text(self, input_text, *args, **selectors):
"""
Set *input_text* to the UI object with *selectors*
"""
self.device(**selectors).set_text(input_text) |
Clear text of the UI object with *selectors*
def clear_text(self, *args, **selectors):
"""
Clear text of the UI object with *selectors*
"""
while True:
target = self.device(**selectors)
text = target.info['text']
target.clear_text()
remain_text = target.info['text']
if text == '' or remain_text == text:
break |
Open notification
Built in support for Android 4.3 (API level 18)
Using swipe action as a workaround for API level lower than 18
def open_notification(self):
"""
Open notification
Built in support for Android 4.3 (API level 18)
Using swipe action as a workaround for API level lower than 18
"""
sdk_version = self.device.info['sdkInt']
if sdk_version < 18:
height = self.device.info['displayHeight']
self.device.swipe(1, 1, 1, height - 1, 1)
else:
self.device.open.notification() |
Sleep(no action) for *time* (in millisecond)
def sleep(self, time):
"""
Sleep(no action) for *time* (in millisecond)
"""
target = 'wait for %s' % str(time)
self.device(text=target).wait.exists(timeout=time) |
[Test Agent]
Connect to *ssid* with *password*
def connect_to_wifi(self, ssid, password=None):
"""
[Test Agent]
Connect to *ssid* with *password*
"""
cmd = 'am broadcast -a testagent -e action CONNECT_TO_WIFI -e ssid %s -e password %s' % (ssid, password)
self.adb.shell_cmd(cmd) |
Merge two precomputed similarity lists, truncating the result to `clip` most similar items.
def merge_sims(oldsims, newsims, clip=None):
"""Merge two precomputed similarity lists, truncating the result to `clip` most similar items."""
if oldsims is None:
result = newsims or []
elif newsims is None:
result = oldsims
else:
result = sorted(oldsims + newsims, key=lambda item: -item[1])
if clip is not None:
result = result[:clip]
return result |
Delete all files created by this index, invalidating `self`. Use with care.
def terminate(self):
"""Delete all files created by this index, invalidating `self`. Use with care."""
try:
self.id2sims.terminate()
except:
pass
import glob
for fname in glob.glob(self.fname + '*'):
try:
os.remove(fname)
logger.info("deleted %s" % fname)
except Exception, e:
logger.warning("failed to delete %s: %s" % (fname, e))
for val in self.__dict__.keys():
try:
delattr(self, val)
except:
pass |
Update fresh index with new documents (potentially replacing old ones with
the same id). `fresh_docs` is a dictionary-like object (=dict, sqlitedict, shelve etc)
that maps document_id->document.
def index_documents(self, fresh_docs, model):
"""
Update fresh index with new documents (potentially replacing old ones with
the same id). `fresh_docs` is a dictionary-like object (=dict, sqlitedict, shelve etc)
that maps document_id->document.
"""
docids = fresh_docs.keys()
vectors = (model.docs2vecs(fresh_docs[docid] for docid in docids))
logger.info("adding %i documents to %s" % (len(docids), self))
self.qindex.add_documents(vectors)
self.qindex.save()
self.update_ids(docids) |
Update id->pos mapping with new document ids.
def update_ids(self, docids):
"""Update id->pos mapping with new document ids."""
logger.info("updating %i id mappings" % len(docids))
for docid in docids:
if docid is not None:
pos = self.id2pos.get(docid, None)
if pos is not None:
logger.info("replacing existing document %r in %s" % (docid, self))
del self.pos2id[pos]
self.id2pos[docid] = self.length
try:
del self.id2sims[docid]
except:
pass
self.length += 1
self.id2sims.sync()
self.update_mappings() |
Synchronize id<->position mappings.
def update_mappings(self):
"""Synchronize id<->position mappings."""
self.pos2id = dict((v, k) for k, v in self.id2pos.iteritems())
assert len(self.pos2id) == len(self.id2pos), "duplicate ids or positions detected" |
Delete documents (specified by their ids) from the index.
def delete(self, docids):
"""Delete documents (specified by their ids) from the index."""
logger.debug("deleting %i documents from %s" % (len(docids), self))
deleted = 0
for docid in docids:
try:
del self.id2pos[docid]
deleted += 1
del self.id2sims[docid]
except:
pass
self.id2sims.sync()
if deleted:
logger.info("deleted %i documents from %s" % (deleted, self))
self.update_mappings() |
Convert raw similarity vector to a list of (docid, similarity) results.
def sims2scores(self, sims, eps=1e-7):
"""Convert raw similarity vector to a list of (docid, similarity) results."""
result = []
if isinstance(sims, numpy.ndarray):
sims = abs(sims) # TODO or maybe clip? are opposite vectors "similar" or "dissimilar"?!
for pos in numpy.argsort(sims)[::-1]:
if pos in self.pos2id and sims[pos] > eps: # ignore deleted/rewritten documents
# convert positions of resulting docs back to ids
result.append((self.pos2id[pos], sims[pos]))
if len(result) == self.topsims:
break
else:
for pos, score in sims:
if pos in self.pos2id and abs(score) > eps: # ignore deleted/rewritten documents
# convert positions of resulting docs back to ids
result.append((self.pos2id[pos], abs(score)))
if len(result) == self.topsims:
break
return result |
Return indexed vector corresponding to document `docid`.
def vec_by_id(self, docid):
"""Return indexed vector corresponding to document `docid`."""
pos = self.id2pos[docid]
return self.qindex.vector_by_id(pos) |
Find the most similar documents to the (already indexed) document with `docid`.
def sims_by_id(self, docid):
"""Find the most similar documents to the (already indexed) document with `docid`."""
result = self.id2sims.get(docid, None)
if result is None:
self.qindex.num_best = self.topsims
sims = self.qindex.similarity_by_id(self.id2pos[docid])
result = self.sims2scores(sims)
return result |
Find the most similar documents to a given vector (=already processed document).
def sims_by_vec(self, vec, normalize=None):
"""
Find the most similar documents to a given vector (=already processed document).
"""
if normalize is None:
normalize = self.qindex.normalize
norm, self.qindex.normalize = self.qindex.normalize, normalize # store old value
self.qindex.num_best = self.topsims
sims = self.qindex[vec]
self.qindex.normalize = norm # restore old value of qindex.normalize
return self.sims2scores(sims) |
Merge documents from the other index. Update precomputed similarities
in the process.
def merge(self, other):
"""Merge documents from the other index. Update precomputed similarities
in the process."""
other.qindex.normalize, other.qindex.num_best = False, self.topsims
# update precomputed "most similar" for old documents (in case some of
# the new docs make it to the top-N for some of the old documents)
logger.info("updating old precomputed values")
pos, lenself = 0, len(self.qindex)
for chunk in self.qindex.iter_chunks():
for sims in other.qindex[chunk]:
if pos in self.pos2id:
# ignore masked entries (deleted, overwritten documents)
docid = self.pos2id[pos]
sims = self.sims2scores(sims)
self.id2sims[docid] = merge_sims(self.id2sims[docid], sims, self.topsims)
pos += 1
if pos % 10000 == 0:
logger.info("PROGRESS: updated doc #%i/%i" % (pos, lenself))
self.id2sims.sync()
logger.info("merging fresh index into optimized one")
pos, docids = 0, []
for chunk in other.qindex.iter_chunks():
for vec in chunk:
if pos in other.pos2id: # don't copy deleted documents
self.qindex.add_documents([vec])
docids.append(other.pos2id[pos])
pos += 1
self.qindex.save()
self.update_ids(docids)
logger.info("precomputing most similar for the fresh index")
pos, lenother = 0, len(other.qindex)
norm, self.qindex.normalize = self.qindex.normalize, False
topsims, self.qindex.num_best = self.qindex.num_best, self.topsims
for chunk in other.qindex.iter_chunks():
for sims in self.qindex[chunk]:
if pos in other.pos2id:
# ignore masked entries (deleted, overwritten documents)
docid = other.pos2id[pos]
self.id2sims[docid] = self.sims2scores(sims)
pos += 1
if pos % 10000 == 0:
logger.info("PROGRESS: precomputed doc #%i/%i" % (pos, lenother))
self.qindex.normalize, self.qindex.num_best = norm, topsims
self.id2sims.sync() |
Convert a single SimilarityDocument to vector.
def doc2vec(self, doc):
"""Convert a single SimilarityDocument to vector."""
bow = self.dictionary.doc2bow(doc['tokens'])
if self.method == 'lsi':
return self.lsi[self.tfidf[bow]]
elif self.method == 'lda':
return self.lda[bow]
elif self.method == 'lda_tfidf':
return self.lda[self.tfidf[bow]]
elif self.method == 'logentropy':
return self.logent[bow] |
Convert multiple SimilarityDocuments to vectors (batch version of doc2vec).
def docs2vecs(self, docs):
"""Convert multiple SimilarityDocuments to vectors (batch version of doc2vec)."""
bows = (self.dictionary.doc2bow(doc['tokens']) for doc in docs)
if self.method == 'lsi':
return self.lsi[self.tfidf[bows]]
elif self.method == 'lda':
return self.lda[bows]
elif self.method == 'lda_tfidf':
return self.lda[self.tfidf[bows]]
elif self.method == 'logentropy':
return self.logent[bows] |
Commit all changes, clear all caches.
def flush(self, save_index=False, save_model=False, clear_buffer=False):
"""Commit all changes, clear all caches."""
if save_index:
if self.fresh_index is not None:
self.fresh_index.save(self.location('index_fresh'))
if self.opt_index is not None:
self.opt_index.save(self.location('index_opt'))
if save_model:
if self.model is not None:
self.model.save(self.location('model'))
self.payload.commit()
if clear_buffer:
if hasattr(self, 'fresh_docs'):
try:
self.fresh_docs.terminate() # erase all buffered documents + file on disk
except:
pass
self.fresh_docs = SqliteDict(journal_mode=JOURNAL_MODE) # buffer defaults to a random location in temp
self.fresh_docs.sync() |
Explicitly close open file handles, databases etc.
def close(self):
"""Explicitly close open file handles, databases etc."""
try:
self.payload.close()
except:
pass
try:
self.model.close()
except:
pass
try:
self.fresh_index.close()
except:
pass
try:
self.opt_index.close()
except:
pass
try:
self.fresh_docs.terminate()
except:
pass |
Add a sequence of documents to be processed (indexed or trained on).
Here, the documents are simply collected; real processing is done later,
during the `self.index` or `self.train` calls.
`buffer` can be called repeatedly; the result is the same as if it was
called once, with a concatenation of all the partial document batches.
The point is to save memory when sending large corpora over network: the
entire `documents` must be serialized into RAM. See `utils.upload_chunked()`.
A call to `flush()` clears this documents-to-be-processed buffer (`flush`
is also implicitly called when you call `index()` and `train()`).
def buffer(self, documents):
"""
Add a sequence of documents to be processed (indexed or trained on).
Here, the documents are simply collected; real processing is done later,
during the `self.index` or `self.train` calls.
`buffer` can be called repeatedly; the result is the same as if it was
called once, with a concatenation of all the partial document batches.
The point is to save memory when sending large corpora over network: the
entire `documents` must be serialized into RAM. See `utils.upload_chunked()`.
A call to `flush()` clears this documents-to-be-processed buffer (`flush`
is also implicitly called when you call `index()` and `train()`).
"""
logger.info("adding documents to temporary buffer of %s" % (self))
for doc in documents:
docid = doc['id']
# logger.debug("buffering document %r" % docid)
if docid in self.fresh_docs:
logger.warning("asked to re-add id %r; rewriting old value" % docid)
self.fresh_docs[docid] = doc
self.fresh_docs.sync() |
Create an indexing model. Will overwrite the model if it already exists.
All indexes become invalid, because documents in them use a now-obsolete
representation.
The model is trained on documents previously entered via `buffer`,
or directly on `corpus`, if specified.
def train(self, corpus=None, method='auto', clear_buffer=True, params=None):
"""
Create an indexing model. Will overwrite the model if it already exists.
All indexes become invalid, because documents in them use a now-obsolete
representation.
The model is trained on documents previously entered via `buffer`,
or directly on `corpus`, if specified.
"""
if corpus is not None:
# use the supplied corpus only (erase existing buffer, if any)
self.flush(clear_buffer=True)
self.buffer(corpus)
if not self.fresh_docs:
msg = "train called but no training corpus specified for %s" % self
logger.error(msg)
raise ValueError(msg)
if method == 'auto':
numdocs = len(self.fresh_docs)
if numdocs < 1000:
logging.warning("too few training documents; using simple log-entropy model instead of latent semantic indexing")
method = 'logentropy'
else:
method = 'lsi'
if params is None:
params = {}
self.model = SimModel(self.fresh_docs, method=method, params=params)
self.flush(save_model=True, clear_buffer=clear_buffer) |
Permanently index all documents previously added via `buffer`, or
directly index documents from `corpus`, if specified.
The indexing model must already exist (see `train`) before this function
is called.
def index(self, corpus=None, clear_buffer=True):
"""
Permanently index all documents previously added via `buffer`, or
directly index documents from `corpus`, if specified.
The indexing model must already exist (see `train`) before this function
is called.
"""
if not self.model:
msg = 'must initialize model for %s before indexing documents' % self.basename
logger.error(msg)
raise AttributeError(msg)
if corpus is not None:
# use the supplied corpus only (erase existing buffer, if any)
self.flush(clear_buffer=True)
self.buffer(corpus)
if not self.fresh_docs:
msg = "index called but no indexing corpus specified for %s" % self
logger.error(msg)
raise ValueError(msg)
if not self.fresh_index:
logger.info("starting a new fresh index for %s" % self)
self.fresh_index = SimIndex(self.location('index_fresh'), self.model.num_features)
self.fresh_index.index_documents(self.fresh_docs, self.model)
if self.opt_index is not None:
self.opt_index.delete(self.fresh_docs.keys())
logger.info("storing document payloads")
for docid in self.fresh_docs:
payload = self.fresh_docs[docid].get('payload', None)
if payload is None:
# HACK: exit on first doc without a payload (=assume all docs have payload, or none does)
break
self.payload[docid] = payload
self.flush(save_index=True, clear_buffer=clear_buffer) |
Precompute top similarities for all indexed documents. This speeds up
`find_similar` queries by id (but not queries by fulltext).
Internally, documents are moved from a fresh index (=no precomputed similarities)
to an optimized index (precomputed similarities). Similarity queries always
query both indexes, so this split is transparent to clients.
If you add documents later via `index`, they go to the fresh index again.
To precompute top similarities for these new documents too, simply call
`optimize` again.
def optimize(self):
"""
Precompute top similarities for all indexed documents. This speeds up
`find_similar` queries by id (but not queries by fulltext).
Internally, documents are moved from a fresh index (=no precomputed similarities)
to an optimized index (precomputed similarities). Similarity queries always
query both indexes, so this split is transparent to clients.
If you add documents later via `index`, they go to the fresh index again.
To precompute top similarities for these new documents too, simply call
`optimize` again.
"""
if self.fresh_index is None:
logger.warning("optimize called but there are no new documents")
return # nothing to do!
if self.opt_index is None:
logger.info("starting a new optimized index for %s" % self)
self.opt_index = SimIndex(self.location('index_opt'), self.model.num_features)
self.opt_index.merge(self.fresh_index)
self.fresh_index.terminate() # delete old files
self.fresh_index = None
self.flush(save_index=True) |
Drop all indexed documents. If `keep_model` is False, also dropped the model.
def drop_index(self, keep_model=True):
"""Drop all indexed documents. If `keep_model` is False, also dropped the model."""
modelstr = "" if keep_model else "and model "
logger.info("deleting similarity index " + modelstr + "from %s" % self.basename)
# delete indexes
for index in [self.fresh_index, self.opt_index]:
if index is not None:
index.terminate()
self.fresh_index, self.opt_index = None, None
# delete payload
if self.payload is not None:
self.payload.close()
fname = self.location('payload')
try:
if os.path.exists(fname):
os.remove(fname)
logger.info("deleted %s" % fname)
except Exception, e:
logger.warning("failed to delete %s" % fname)
self.payload = SqliteDict(self.location('payload'), autocommit=True, journal_mode=JOURNAL_MODE)
# optionally, delete the model as well
if not keep_model and self.model is not None:
self.model.close()
fname = self.location('model')
try:
if os.path.exists(fname):
os.remove(fname)
logger.info("deleted %s" % fname)
except Exception, e:
logger.warning("failed to delete %s" % fname)
self.model = None
self.flush(save_index=True, save_model=True, clear_buffer=True) |
Delete specified documents from the index.
def delete(self, docids):
"""Delete specified documents from the index."""
logger.info("asked to drop %i documents" % len(docids))
for index in [self.opt_index, self.fresh_index]:
if index is not None:
index.delete(docids)
self.flush(save_index=True) |
Find `max_results` most similar articles in the index, each having similarity
score of at least `min_score`. The resulting list may be shorter than `max_results`,
in case there are not enough matching documents.
`doc` is either a string (=document id, previously indexed) or a
dict containing a 'tokens' key. These tokens are processed to produce a
vector, which is then used as a query against the index.
The similar documents are returned in decreasing similarity order, as
`(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned
is identical to what was supplied for this document during indexing.
def find_similar(self, doc, min_score=0.0, max_results=100):
"""
Find `max_results` most similar articles in the index, each having similarity
score of at least `min_score`. The resulting list may be shorter than `max_results`,
in case there are not enough matching documents.
`doc` is either a string (=document id, previously indexed) or a
dict containing a 'tokens' key. These tokens are processed to produce a
vector, which is then used as a query against the index.
The similar documents are returned in decreasing similarity order, as
`(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned
is identical to what was supplied for this document during indexing.
"""
logger.debug("received query call with %r" % doc)
if self.is_locked():
msg = "cannot query while the server is being updated"
logger.error(msg)
raise RuntimeError(msg)
sims_opt, sims_fresh = None, None
for index in [self.fresh_index, self.opt_index]:
if index is not None:
index.topsims = max_results
if isinstance(doc, basestring):
# query by direct document id
docid = doc
if self.opt_index is not None and docid in self.opt_index:
sims_opt = self.opt_index.sims_by_id(docid)
if self.fresh_index is not None:
vec = self.opt_index.vec_by_id(docid)
sims_fresh = self.fresh_index.sims_by_vec(vec, normalize=False)
elif self.fresh_index is not None and docid in self.fresh_index:
sims_fresh = self.fresh_index.sims_by_id(docid)
if self.opt_index is not None:
vec = self.fresh_index.vec_by_id(docid)
sims_opt = self.opt_index.sims_by_vec(vec, normalize=False)
else:
raise ValueError("document %r not in index" % docid)
else:
if 'topics' in doc:
# user supplied vector directly => use that
vec = gensim.matutils.any2sparse(doc['topics'])
else:
# query by an arbitrary text (=tokens) inside doc['tokens']
vec = self.model.doc2vec(doc) # convert document (text) to vector
if self.opt_index is not None:
sims_opt = self.opt_index.sims_by_vec(vec)
if self.fresh_index is not None:
sims_fresh = self.fresh_index.sims_by_vec(vec)
merged = merge_sims(sims_opt, sims_fresh)
logger.debug("got %s raw similars, pruning with max_results=%s, min_score=%s" %
(len(merged), max_results, min_score))
result = []
for docid, score in merged:
if score < min_score or 0 < max_results <= len(result):
break
result.append((docid, float(score), self.payload.get(docid, None)))
return result |
Return ids of all indexed documents.
def keys(self):
"""Return ids of all indexed documents."""
result = []
if self.fresh_index is not None:
result += self.fresh_index.keys()
if self.opt_index is not None:
result += self.opt_index.keys()
return result |
Make sure a session is open.
If it's not and autosession is turned on, create a new session automatically.
If it's not and autosession is off, raise an exception.
def check_session(self):
"""
Make sure a session is open.
If it's not and autosession is turned on, create a new session automatically.
If it's not and autosession is off, raise an exception.
"""
if self.session is None:
if self.autosession:
self.open_session()
else:
msg = "must open a session before modifying %s" % self
raise RuntimeError(msg) |
Open a new session to modify this server.
You can either call this fnc directly, or turn on autosession which will
open/commit sessions for you transparently.
def open_session(self):
"""
Open a new session to modify this server.
You can either call this fnc directly, or turn on autosession which will
open/commit sessions for you transparently.
"""
if self.session is not None:
msg = "session already open; commit it or rollback before opening another one in %s" % self
logger.error(msg)
raise RuntimeError(msg)
logger.info("opening a new session")
logger.info("removing %s" % self.loc_session)
try:
shutil.rmtree(self.loc_session)
except:
logger.info("failed to delete %s" % self.loc_session)
logger.info("cloning server from %s to %s" %
(self.loc_stable, self.loc_session))
shutil.copytree(self.loc_stable, self.loc_session)
self.session = SimServer(self.loc_session, use_locks=self.use_locks)
self.lock_update.acquire() |
Buffer documents, in the current session
def buffer(self, *args, **kwargs):
"""Buffer documents, in the current session"""
self.check_session()
result = self.session.buffer(*args, **kwargs)
return result |
Index documents, in the current session
def index(self, *args, **kwargs):
"""Index documents, in the current session"""
self.check_session()
result = self.session.index(*args, **kwargs)
if self.autosession:
self.commit()
return result |
Drop all indexed documents from the session. Optionally, drop model too.
def drop_index(self, keep_model=True):
"""Drop all indexed documents from the session. Optionally, drop model too."""
self.check_session()
result = self.session.drop_index(keep_model)
if self.autosession:
self.commit()
return result |
Delete documents from the current session.
def delete(self, docids):
"""Delete documents from the current session."""
self.check_session()
result = self.session.delete(docids)
if self.autosession:
self.commit()
return result |
Optimize index for faster by-document-id queries.
def optimize(self):
"""Optimize index for faster by-document-id queries."""
self.check_session()
result = self.session.optimize()
if self.autosession:
self.commit()
return result |
Commit changes made by the latest session.
def commit(self):
"""Commit changes made by the latest session."""
if self.session is not None:
logger.info("committing transaction in %s" % self)
tmp = self.stable
self.stable, self.session = self.session, None
self.istable = 1 - self.istable
self.write_istable()
tmp.close() # don't wait for gc, release resources manually
self.lock_update.release()
else:
logger.warning("commit called but there's no open session in %s" % self) |
Ignore all changes made in the latest session (terminate the session).
def rollback(self):
"""Ignore all changes made in the latest session (terminate the session)."""
if self.session is not None:
logger.info("rolling back transaction in %s" % self)
self.session.close()
self.session = None
self.lock_update.release()
else:
logger.warning("rollback called but there's no open session in %s" % self) |
Turn autosession (automatic committing after each modification call) on/off.
If value is None, only query the current value (don't change anything).
def set_autosession(self, value=None):
"""
Turn autosession (automatic committing after each modification call) on/off.
If value is None, only query the current value (don't change anything).
"""
if value is not None:
self.rollback()
self.autosession = value
return self.autosession |
Delete all files created by this server, invalidating `self`. Use with care.
def terminate(self):
"""Delete all files created by this server, invalidating `self`. Use with care."""
logger.info("deleting entire server %s" % self)
self.close()
try:
shutil.rmtree(self.basedir)
logger.info("deleted server under %s" % self.basedir)
# delete everything from self, so that using this object fails results
# in an error as quickly as possible
for val in self.__dict__.keys():
try:
delattr(self, val)
except:
pass
except Exception, e:
logger.warning("failed to delete SessionServer: %s" % (e)) |
Find similar articles.
With autosession off, use the index state *before* current session started,
so that changes made in the session will not be visible here. With autosession
on, close the current session first (so that session changes *are* committed
and visible).
def find_similar(self, *args, **kwargs):
"""
Find similar articles.
With autosession off, use the index state *before* current session started,
so that changes made in the session will not be visible here. With autosession
on, close the current session first (so that session changes *are* committed
and visible).
"""
if self.session is not None and self.autosession:
# with autosession on, commit the pending transaction first
self.commit()
return self.stable.find_similar(*args, **kwargs) |
Fetch a profile.
async def profile(self, ctx, platform, name):
'''Fetch a profile.'''
player = await self.client.get_player(platform, name)
solos = await player.get_solos()
await ctx.send("# of kills in solos for {}: {}".format(name,solos.kills.value)) |
Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
def start(io_loop=None, check_time=2):
"""Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
io_loop = io_loop or asyncio.get_event_loop()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
logger.warning("aiohttp_autoreload started more than once in the same process")
# if _has_execv:
# add_reload_hook(functools.partial(io_loop.close, all_fds=True))
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
logger.debug("Starting periodic checks for code changes")
call_periodic(check_time, callback, loop=io_loop) |
Yield 'chunk_size' items from 'data' at a time.
def generate_chunks(data, chunk_size=DEFAULT_CHUNK_SIZE):
"""Yield 'chunk_size' items from 'data' at a time."""
iterator = iter(repeated.getvalues(data))
while True:
chunk = list(itertools.islice(iterator, chunk_size))
if not chunk:
return
yield chunk |
Repeatedly call fold and merge on data and then finalize.
Arguments:
data: Input for the fold function.
reducer: The IReducer to use.
chunk_size: How many items should be passed to fold at a time?
Returns:
Return value of finalize.
def reduce(reducer, data, chunk_size=DEFAULT_CHUNK_SIZE):
"""Repeatedly call fold and merge on data and then finalize.
Arguments:
data: Input for the fold function.
reducer: The IReducer to use.
chunk_size: How many items should be passed to fold at a time?
Returns:
Return value of finalize.
"""
if not chunk_size:
return finalize(reducer, fold(reducer, data))
# Splitting the work up into chunks allows us to, e.g. reduce a large file
# without loading everything into memory, while still being significantly
# faster than repeatedly calling the fold function for every element.
chunks = generate_chunks(data, chunk_size)
intermediate = fold(reducer, next(chunks))
for chunk in chunks:
intermediate = merge(reducer, intermediate, fold(reducer, chunk))
return finalize(reducer, intermediate) |
The if-else pairs.
def conditions(self):
"""The if-else pairs."""
for idx in six.moves.range(1, len(self.children), 2):
yield (self.children[idx - 1], self.children[idx]) |
**Purpose**: Substitute placeholders in staging attributes of a Task with actual paths to the corresponding tasks.
:arguments:
:path: string describing the staging paths, possibly containing a placeholder
:placeholder_dict: dictionary holding the values for placeholders
def resolve_placeholders(path, placeholder_dict):
"""
**Purpose**: Substitute placeholders in staging attributes of a Task with actual paths to the corresponding tasks.
:arguments:
:path: string describing the staging paths, possibly containing a placeholder
:placeholder_dict: dictionary holding the values for placeholders
"""
try:
if isinstance(path, unicode):
path = str(path)
if not isinstance(path, str):
raise TypeError(expected_type=str, actual_type=type(path))
if '$' not in path:
return path
# Extract placeholder from path
if len(path.split('>')) == 1:
placeholder = path.split('/')[0]
else:
if path.split('>')[0].strip().startswith('$'):
placeholder = path.split('>')[0].strip().split('/')[0]
else:
placeholder = path.split('>')[1].strip().split('/')[0]
# SHARED
if placeholder == "$SHARED":
return path.replace(placeholder, 'pilot://')
# Expected placeholder format:
# $Pipeline_{pipeline.uid}_Stage_{stage.uid}_Task_{task.uid}
broken_placeholder = placeholder.split('/')[0].split('_')
if not len(broken_placeholder) == 6:
raise ValueError(
obj='placeholder',
attribute='task',
expected_value='$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name) or $SHARED',
actual_value=broken_placeholder)
pipeline_name = broken_placeholder[1]
stage_name = broken_placeholder[3]
task_name = broken_placeholder[5]
resolved_placeholder = None
if pipeline_name in placeholder_dict.keys():
if stage_name in placeholder_dict[pipeline_name].keys():
if task_name in placeholder_dict[pipeline_name][stage_name].keys():
resolved_placeholder = path.replace(placeholder, placeholder_dict[
pipeline_name][stage_name][task_name]['path'])
else:
logger.warning('%s not assigned to any task in Stage %s Pipeline %s' %
(task_name, stage_name, pipeline_name))
else:
logger.warning('%s not assigned to any Stage in Pipeline %s' % (
stage_name, pipeline_name))
else:
logger.warning('%s not assigned to any Pipeline' % (pipeline_name))
if not resolved_placeholder:
logger.warning('No placeholder could be found for task name %s \
stage name %s and pipeline name %s. Please be sure to \
use object names and not uids in your references,i.e, \
$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name)')
raise ValueError(
obj='placeholder',
attribute='task',
expected_value='$Pipeline_(pipeline_name)_Stage_(stage_name)_Task_(task_name) or $SHARED',
actual_value=broken_placeholder)
return resolved_placeholder
except Exception, ex:
logger.exception('Failed to resolve placeholder %s, error: %s' %(path, ex))
raise |
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
def get_input_list_from_task(task, placeholder_dict):
"""
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
"""
try:
if not isinstance(task, Task):
raise TypeError(expected_type=Task, actual_type=type(task))
input_data = []
if task.link_input_data:
for path in task.link_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.LINK
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.LINK
}
input_data.append(temp)
if task.upload_input_data:
for path in task.upload_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip()
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip())
}
input_data.append(temp)
if task.copy_input_data:
for path in task.copy_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.COPY
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.COPY
}
input_data.append(temp)
if task.move_input_data:
for path in task.move_input_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.MOVE
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.MOVE
}
input_data.append(temp)
return input_data
except Exception, ex:
logger.exception('Failed to get input list of files from task, error: %s' % ex)
raise |
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
def get_output_list_from_task(task, placeholder_dict):
"""
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
"""
try:
if not isinstance(task, Task):
raise TypeError(expected_type=Task, actual_type=type(task))
output_data = []
if task.copy_output_data:
for path in task.copy_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.COPY
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.COPY
}
output_data.append(temp)
if task.download_output_data:
for path in task.download_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip()
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip())
}
output_data.append(temp)
if task.move_output_data:
for path in task.move_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.MOVE
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.MOVE
}
output_data.append(temp)
return output_data
except Exception, ex:
logger.exception('Failed to get output list of files from task, error: %s' % ex)
raise |
Purpose: Create a Compute Unit description based on the defined Task.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: ComputeUnitDescription
def create_cud_from_task(task, placeholder_dict, prof=None):
"""
Purpose: Create a Compute Unit description based on the defined Task.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: ComputeUnitDescription
"""
try:
logger.debug('Creating CU from Task %s' % (task.uid))
if prof:
prof.prof('cud from task - create', uid=task.uid)
cud = rp.ComputeUnitDescription()
cud.name = '%s,%s,%s,%s,%s,%s' % (task.uid, task.name,
task.parent_stage['uid'], task.parent_stage['name'],
task.parent_pipeline['uid'], task.parent_pipeline['name'])
cud.pre_exec = task.pre_exec
cud.executable = task.executable
cud.arguments = resolve_arguments(task.arguments, placeholder_dict)
cud.post_exec = task.post_exec
if task.tag:
if task.parent_pipeline['name']:
cud.tag = resolve_tags( tag=task.tag,
parent_pipeline_name=task.parent_pipeline['name'],
placeholder_dict=placeholder_dict)
cud.cpu_processes = task.cpu_reqs['processes']
cud.cpu_threads = task.cpu_reqs['threads_per_process']
cud.cpu_process_type = task.cpu_reqs['process_type']
cud.cpu_thread_type = task.cpu_reqs['thread_type']
cud.gpu_processes = task.gpu_reqs['processes']
cud.gpu_threads = task.gpu_reqs['threads_per_process']
cud.gpu_process_type = task.gpu_reqs['process_type']
cud.gpu_thread_type = task.gpu_reqs['thread_type']
if task.lfs_per_process:
cud.lfs_per_process = task.lfs_per_process
if task.stdout:
cud.stdout = task.stdout
if task.stderr:
cud.stderr = task.stderr
cud.input_staging = get_input_list_from_task(task, placeholder_dict)
cud.output_staging = get_output_list_from_task(task, placeholder_dict)
if prof:
prof.prof('cud from task - done', uid=task.uid)
logger.debug('CU %s created from Task %s' % (cud.name, task.uid))
return cud
except Exception, ex:
logger.exception('CU creation failed, error: %s' % ex)
raise |
Purpose: Create a Task based on the Compute Unit.
Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was
converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD.
Also, this is not required for the most part.
TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU
:arguments:
:cu: RP Compute Unit
:return: Task
def create_task_from_cu(cu, prof=None):
"""
Purpose: Create a Task based on the Compute Unit.
Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was
converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD.
Also, this is not required for the most part.
TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU
:arguments:
:cu: RP Compute Unit
:return: Task
"""
try:
logger.debug('Create Task from CU %s' % cu.name)
if prof:
prof.prof('task from cu - create',
uid=cu.name.split(',')[0].strip())
task = Task()
task.uid = cu.name.split(',')[0].strip()
task.name = cu.name.split(',')[1].strip()
task.parent_stage['uid'] = cu.name.split(',')[2].strip()
task.parent_stage['name'] = cu.name.split(',')[3].strip()
task.parent_pipeline['uid'] = cu.name.split(',')[4].strip()
task.parent_pipeline['name'] = cu.name.split(',')[5].strip()
task.rts_uid = cu.uid
if cu.state == rp.DONE:
task.exit_code = 0
else:
task.exit_code = 1
task.path = ru.Url(cu.sandbox).path
if prof:
prof.prof('task from cu - done', uid=cu.name.split(',')[0].strip())
logger.debug('Task %s created from CU %s' % (task.uid, cu.name))
return task
except Exception, ex:
logger.exception('Task creation from CU failed, error: %s' % ex)
raise |
Send Report E-mails.
def handle_noargs(self, **options):
"""Send Report E-mails."""
r = get_r()
since = datetime.utcnow() - timedelta(days=1)
metrics = {}
categories = r.metric_slugs_by_category()
for category_name, slug_list in categories.items():
metrics[category_name] = []
for slug in slug_list:
metric_values = r.get_metric_history(slug, since=since)
metrics[category_name].append(
(slug, metric_values)
)
# metrics is now:
# --------------
# { Category : [
# ('foo', [('m:foo:2012-07-18', 1), ('m:foo:2012-07-19, 2), ...])
# ],
# ...
# }
template = "redis_metrics/email/report.{fmt}"
data = {
'today': since,
'metrics': metrics,
}
message = render_to_string(template.format(fmt='txt'), data)
message_html = render_to_string(template.format(fmt='html'), data)
msg = EmailMultiAlternatives(
subject="Redis Metrics Report",
body=message,
from_email=settings.DEFAULT_FROM_EMAIL,
to=[email for name, email in settings.ADMINS]
)
msg.attach_alternative(message_html, "text/html")
msg.send() |
Unique ID of the current stage (fully qualified).
example:
>>> stage.luid
pipe.0001.stage.0004
:getter: Returns the fully qualified uid of the current stage
:type: String
def luid(self):
"""
Unique ID of the current stage (fully qualified).
example:
>>> stage.luid
pipe.0001.stage.0004
:getter: Returns the fully qualified uid of the current stage
:type: String
"""
p_elem = self.parent_pipeline.get('name')
if not p_elem:
p_elem = self.parent_pipeline['uid']
s_elem = self.name
if not s_elem:
s_elem = self.uid
return '%s.%s' % (p_elem, s_elem) |
Adds tasks to the existing set of tasks of the Stage
:argument: set of tasks
def add_tasks(self, value):
"""
Adds tasks to the existing set of tasks of the Stage
:argument: set of tasks
"""
tasks = self._validate_entities(value)
self._tasks.update(tasks)
self._task_count = len(self._tasks) |
Convert current Stage into a dictionary
:return: python dictionary
def to_dict(self):
"""
Convert current Stage into a dictionary
:return: python dictionary
"""
stage_desc_as_dict = {
'uid': self._uid,
'name': self._name,
'state': self._state,
'state_history': self._state_history,
'parent_pipeline': self._p_pipeline
}
return stage_desc_as_dict |
Create a Stage from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
def from_dict(self, d):
"""
Create a Stage from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
"""
if 'uid' in d:
if d['uid']:
self._uid = d['uid']
if 'name' in d:
if d['name']:
self._name = d['name']
if 'state' in d:
if isinstance(d['state'], str) or isinstance(d['state'], unicode):
if d['state'] in states._stage_state_values.keys():
self._state = d['state']
else:
raise ValueError(obj=self._uid,
attribute='state',
expected_value=states._stage_state_values.keys(),
actual_value=value)
else:
raise TypeError(entity='state', expected_type=str, actual_type=type(d['state']))
else:
self._state = states.INITIAL
if 'state_history' in d:
if isinstance(d['state_history'], list):
self._state_history = d['state_history']
else:
raise TypeError(entity='state_history', expected_type=list, actual_type=type(d['state_history']))
if 'parent_pipeline' in d:
if isinstance(d['parent_pipeline'], dict):
self._p_pipeline = d['parent_pipeline']
else:
raise TypeError(entity='parent_pipeline', expected_type=dict, actual_type=type(d['parent_pipeline'])) |
Purpose: Set state of all tasks of the current stage.
:arguments: String
def _set_tasks_state(self, value):
"""
Purpose: Set state of all tasks of the current stage.
:arguments: String
"""
if value not in states.state_numbers.keys():
raise ValueError(obj=self._uid,
attribute='set_tasks_state',
expected_value=states.state_numbers.keys(),
actual_value=value)
for task in self._tasks:
task.state = value |
Purpose: Check if all tasks of the current stage have completed, i.e., are in either DONE or FAILED state.
def _check_stage_complete(self):
"""
Purpose: Check if all tasks of the current stage have completed, i.e., are in either DONE or FAILED state.
"""
try:
for task in self._tasks:
if task.state not in [states.DONE, states.FAILED]:
return False
return True
except Exception, ex:
raise EnTKError(ex) |
Purpose: Validate whether the 'tasks' is of type set. Validate the description of each Task.
def _validate_entities(self, tasks):
"""
Purpose: Validate whether the 'tasks' is of type set. Validate the description of each Task.
"""
if not tasks:
raise TypeError(expected_type=Task, actual_type=type(tasks))
if not isinstance(tasks, set):
if not isinstance(tasks, list):
tasks = set([tasks])
else:
tasks = set(tasks)
for t in tasks:
if not isinstance(t, Task):
raise TypeError(expected_type=Task, actual_type=type(t))
return tasks |
Purpose: Assign a uid to the current object based on the sid passed. Pass the current uid to children of
current object
def _assign_uid(self, sid):
"""
Purpose: Assign a uid to the current object based on the sid passed. Pass the current uid to children of
current object
"""
self._uid = ru.generate_id('stage.%(item_counter)04d', ru.ID_CUSTOM, namespace=sid)
for task in self._tasks:
task._assign_uid(sid)
self._pass_uid() |
Purpose: Assign the parent Stage and the parent Pipeline to all the tasks of the current stage.
:arguments: set of Tasks (optional)
:return: list of updated Tasks
def _pass_uid(self):
"""
Purpose: Assign the parent Stage and the parent Pipeline to all the tasks of the current stage.
:arguments: set of Tasks (optional)
:return: list of updated Tasks
"""
for task in self._tasks:
task.parent_stage['uid'] = self._uid
task.parent_stage['name'] = self._name
task.parent_pipeline['uid'] = self._p_pipeline['uid']
task.parent_pipeline['name'] = self._p_pipeline['name'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.