index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
23,409
|
rodrigourban/llanerosales
|
refs/heads/master
|
/history/urls.py
|
from django.urls.conf import path
from .views import sell_list, sell_item, cancel_sell
app_name = 'history'
urlpatterns = [
path('', sell_list, name="index"),
path('sell_item/<int:pk>', sell_item, name="sell-item"),
path('cancel_sell/<int:pk>', cancel_sell, name="cancel-sell"),
]
|
{"/orders/forms.py": ["/orders/models.py"], "/history/views.py": ["/history/models.py", "/history/forms.py", "/inventory/models.py"], "/inventory/urls.py": ["/inventory/views.py"], "/history/models.py": ["/inventory/models.py"], "/orders/urls.py": ["/orders/views.py"], "/orders/models.py": ["/inventory/models.py"], "/inventory/views.py": ["/inventory/forms.py", "/inventory/models.py"], "/inventory/forms.py": ["/inventory/models.py"], "/orders/views.py": ["/orders/models.py", "/orders/forms.py"], "/inventory/admin.py": ["/inventory/models.py"], "/history/urls.py": ["/history/views.py"], "/adminpanel/urls.py": ["/adminpanel/views.py"], "/history/forms.py": ["/history/models.py"]}
|
23,410
|
rodrigourban/llanerosales
|
refs/heads/master
|
/adminpanel/urls.py
|
from django.urls.conf import path
from .views import (
admin_panel,
create_user,
delete_user,
)
app_name = 'admin-panel'
urlpatterns = [
path('', admin_panel, name="index"),
path('create/', create_user, name="create-user"),
path('delete/<int:pk>', delete_user, name="delete-user")
]
|
{"/orders/forms.py": ["/orders/models.py"], "/history/views.py": ["/history/models.py", "/history/forms.py", "/inventory/models.py"], "/inventory/urls.py": ["/inventory/views.py"], "/history/models.py": ["/inventory/models.py"], "/orders/urls.py": ["/orders/views.py"], "/orders/models.py": ["/inventory/models.py"], "/inventory/views.py": ["/inventory/forms.py", "/inventory/models.py"], "/inventory/forms.py": ["/inventory/models.py"], "/orders/views.py": ["/orders/models.py", "/orders/forms.py"], "/inventory/admin.py": ["/inventory/models.py"], "/history/urls.py": ["/history/views.py"], "/adminpanel/urls.py": ["/adminpanel/views.py"], "/history/forms.py": ["/history/models.py"]}
|
23,411
|
rodrigourban/llanerosales
|
refs/heads/master
|
/history/forms.py
|
from django import forms
from .models import Sell
class SellForm(forms.ModelForm):
class Meta:
model = Sell
fields = ['amount', 'sell_price']
class CancelForm(forms.Form):
retrieve = forms.BooleanField(label="Reponer stock?", required=True, initial=False)
|
{"/orders/forms.py": ["/orders/models.py"], "/history/views.py": ["/history/models.py", "/history/forms.py", "/inventory/models.py"], "/inventory/urls.py": ["/inventory/views.py"], "/history/models.py": ["/inventory/models.py"], "/orders/urls.py": ["/orders/views.py"], "/orders/models.py": ["/inventory/models.py"], "/inventory/views.py": ["/inventory/forms.py", "/inventory/models.py"], "/inventory/forms.py": ["/inventory/models.py"], "/orders/views.py": ["/orders/models.py", "/orders/forms.py"], "/inventory/admin.py": ["/inventory/models.py"], "/history/urls.py": ["/history/views.py"], "/adminpanel/urls.py": ["/adminpanel/views.py"], "/history/forms.py": ["/history/models.py"]}
|
23,412
|
rodrigourban/llanerosales
|
refs/heads/master
|
/inventory/migrations/0002_auto_20190928_1406.py
|
# Generated by Django 2.1.5 on 2019-09-28 17:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('inventory', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='item',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='item_user', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='stock',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='stock_user', to=settings.AUTH_USER_MODEL),
),
]
|
{"/orders/forms.py": ["/orders/models.py"], "/history/views.py": ["/history/models.py", "/history/forms.py", "/inventory/models.py"], "/inventory/urls.py": ["/inventory/views.py"], "/history/models.py": ["/inventory/models.py"], "/orders/urls.py": ["/orders/views.py"], "/orders/models.py": ["/inventory/models.py"], "/inventory/views.py": ["/inventory/forms.py", "/inventory/models.py"], "/inventory/forms.py": ["/inventory/models.py"], "/orders/views.py": ["/orders/models.py", "/orders/forms.py"], "/inventory/admin.py": ["/inventory/models.py"], "/history/urls.py": ["/history/views.py"], "/adminpanel/urls.py": ["/adminpanel/views.py"], "/history/forms.py": ["/history/models.py"]}
|
23,420
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py
|
from SeleniumLibrary.base import keyword, LibraryComponent
from SeleniumLibrary.keywords import ElementKeywords as SeleniumElementKeywords
from selenium.webdriver.common.keys import Keys
class ElementKeywords(LibraryComponent):
def __init__(self, ctx):
LibraryComponent.__init__(self, ctx)
self.element_management = SeleniumElementKeywords(ctx)
@keyword
def clear_textfield_value(self, locator):
text = self.element_management.get_value(locator)
i = 0
while i < len(text):
i += 1
self.element_management.press_key(locator, Keys.BACK_SPACE)
self.element_management.press_key(locator, Keys.DELETE)
@keyword
def scroll_to_element(self, locator):
self.driver.execute_script("arguments[0].scrollIntoView();", self.find_element(locator))
def _scroll_to_left_of_webElement(self, element):
self.driver.execute_script("arguments[0].scrollTo(0,0);", element)
@keyword
def js_click(self, element):
self.driver.execute_script("arguments[0].click();", element)
@keyword
def get_child_element_by_tag_and_attribute(self, element, tag, attribute_name, attribute_value):
child_elements = element.find_elements_by_tag_name(tag)
for child in child_elements:
if child.get_attribute(attribute_name).strip() == attribute_value:
return child
message = "Child element '%s = %s' not found!" % (attribute_name, attribute_value)
raise AssertionError(message)
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,421
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/MyProject/Libs/MyCustomizedLibrary/utilities/utilities.py
|
import re
class Utilities():
def _convert_rgb_to_hex(self, rgb_string):
color_tuple = rgb_string.replace("rgb(", "").replace("rgba(", "").replace(")", "").replace(" ", "")
color_tuple = color_tuple.split(",")
rgb = (int(color_tuple[0]), int(color_tuple[1]), int(color_tuple[2]))
hex_str = rgb_to_hex(rgb)
return hex_str
def _is_rgb_color(self, color_str):
r = r"rgb\((\d+),\s*(\d+),\s*(\d+)\)"
r2 = r"rgba\((\d+),\s*(\d+),\s*(\d+),\s*(\d+)\)"
return re.match(r, color_str) or re.match(r2, color_str)
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,422
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/templates/Libs/utilities/__init__.py
|
from .utilities import Utilities
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,423
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/generator.py
|
import optparse
import os
import shutil
from os import path
from os import walk
import sys
import enum
import re
CUSTOMIZED_LIB_DEFAULT_NAME = "MyCustomizedLibrary"
DIRECTORIES = ['Libs', 'Pages', 'Tests/Data', 'Tests', "Utilities", 'Webdrivers']
DIRECTORY_PATHS = {}
DEMO_URL = "http://automationpractice.com"
def create_options_parser():
desc = """This tool generates a template project of Robotframework for WebUI Testing."""
parser = MyParser(description=desc)
group1 = optparse.OptionGroup(parser, 'Test related options')
group2 = optparse.OptionGroup(parser, 'Common options')
group1.add_option("--libs", dest="libs", help="Name of customized library [default: %d]",
default=CUSTOMIZED_LIB_DEFAULT_NAME)
group2.add_option("-i", "--initialize", dest="init",
help="Initialize a blank new RobotFramework Project [default: %d]", action="store_true",
default=False)
group2.add_option("--dir", dest="dir", help="Target directory for the test project [default: %d]",
default=os.path.join(".", "MyProject"))
group2.add_option("--url", dest="url", help="Web App URL [default: %d]", default=DEMO_URL)
group2.add_option("--browser", dest="browser", help="Web browser is used for testing [default: %d]",
default="chrome")
group2.add_option("-e", "--headless", dest="headless", help="Start browser with HEADLESS mode [default: %d]",
action="store_true", default=False)
parser.add_option_group(group1)
parser.add_option_group(group2)
return parser
class MyParser(optparse.OptionParser):
def format_epilog(self, formatter):
return self.epilog
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
return "".join(result)
def copy_template(src_file, des_file):
with open(src_file, "r") as file:
data = file.read()
file2 = open(des_file, "w")
file2.write(data)
def get_all_files_in_dir(dir, includes):
files = []
for (dirpath, dirnames, filenames) in walk(dir):
for f in filenames:
if includes != []:
if os.path.splitext(f)[1] in includes:
files.append(os.path.join(dirpath, f))
else:
files.append(os.path.join(dirpath, f))
return files
def write_on_template(filename, search_string, replace_string):
with open(filename) as f:
s = f.read()
if search_string not in s:
return
with open(filename, 'w') as f:
s = s.replace(search_string, replace_string)
f.write(s)
def _clone_blank_files(project_dir):
print("Generate blank project....")
shutil.copytree("blank/", project_dir)
print("DONE!")
def _clone_template_files(project_dir, lib_name):
print("Cloning from template....")
shutil.copytree("templates/", project_dir)
libs_path = os.path.join(project_dir, "Libs")
mylib_path = os.path.join(libs_path, lib_name)
print(os.listdir(libs_path))
children = os.listdir(libs_path)
if not os.path.exists(mylib_path):
os.makedirs(mylib_path)
for child in children:
old_path = os.path.join(libs_path, child)
shutil.move(old_path, mylib_path)
# old_keywords_path = os.path.join(libs_path, "keywords")
# old_utils_path = os.path.join(libs_path, "utilities")
# old_init_file_path = os.path.join(libs_path, "__init__.py")
# shutil.move(old_keywords_path, mylib_path)
# shutil.move(old_utils_path, mylib_path)
# shutil.move(old_init_file_path, mylib_path)
keywords_path = os.path.join(mylib_path, "keywords")
files = get_all_files_in_dir(project_dir, ['.py', '.robot'])
for file in files:
write_on_template(file, "MyCustomizedLibrary", lib_name)
write_on_template(file, "LIBRARY_PATH_TO_DEFINE", keywords_path.replace(project_dir, ""))
write_on_template(file, "CustomizedLibraryFile", lib_name.lower())
print("Cloning: {} ... DONE!".format(file))
def _init_library(dir, project_name, lib_name):
print("Initalizing library ... ", end="")
mylib_path = os.path.join(dir, lib_name)
keywords_path = os.path.join(mylib_path, "keywords")
old_file = os.path.join(keywords_path, "mycustomizedlibrarywords.py")
new_file = os.path.join(keywords_path, lib_name.lower() + "keywords.py")
os.rename(old_file, new_file)
print("DONE!")
def _init_setting(dir, url, browser, headless):
print("Configuring setting .... ", end="")
setting_file_path = os.path.join(dir, "settings.yaml")
write_on_template(setting_file_path, DEMO_URL, url)
write_on_template(setting_file_path, "<BROWSER>", browser)
write_on_template(setting_file_path, "<HEADLESS>", headless)
print("DONE!")
def _generate_blank_project(project_name):
_clone_blank_files(project_name)
def main(options=None):
parser = create_options_parser()
(options, args) = parser.parse_args()
# 1. setup options
is_blank_project = options.init or False
project_name = options.dir or sys.exit("Error: No path was defined")
shutil.rmtree(project_name, ignore_errors=True)
if not is_blank_project:
lib_name = options.libs or CUSTOMIZED_LIB_DEFAULT_NAME
url = options.url or DEMO_URL
browser = options.browser or "chrome"
is_headless = options.headless or False
# 2. setup paths
for dir in DIRECTORIES:
DIRECTORY_PATHS.update({dir: os.path.join(project_name, dir)})
# 3. Clone template files
_clone_template_files(project_name, lib_name)
# 4. Initialize template files
_init_library(DIRECTORY_PATHS['Libs'], project_name, lib_name)
# # _create_resources(DIRECTORY_PATHS['Resources'])
# # _create_tests(DIRECTORY_PATHS['Tests'])
_init_setting(DIRECTORY_PATHS['Tests/Data'], url, browser, "True" if is_headless else "False")
# _create_webdriver(DIRECTORY_PATHS['Webdrivers'], browser)
# 2. Generate a blank project
else:
_generate_blank_project(project_name)
if __name__ == '__main__':
main()
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,424
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/blank/Libs/MyCustomizedLibrary/keywords/__init__.py
|
from .<file_name1> import <Module1>
from .<file_name2> import <Module2>
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,425
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/templates/Libs/__init__.py
|
from SeleniumLibrary import SeleniumLibrary
from .keywords import *
from .extendedkeywords import *
__version__ = '1.0.0'
class MyCustomizedLibrary(SeleniumLibrary):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self):
SeleniumLibrary.__init__(self, 30)
self.add_library_components([BrowserKeywords(self), ElementKeywords(self), MyCustomizedLibraryKeywords(self)])
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,426
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/templates/Libs/keywords/__init__.py
|
from .browserkeywords import BrowserKeywords
from .elementkeywords import ElementKeywords
from .CustomizedLibraryFilekeywords import MyCustomizedLibraryKeywords
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,427
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/blank/Libs/MyCustomizedLibrary/extendedkeywords/anykeywords.py
|
from <package> import <MODULE>
def extend_keyword1(self):
<Implementation goes here>
def extend_keyword2(self):
<Implementation goes here>
<MODULE>.extend_keyword1 = extend_keyword1
<MODULE>.extend_keyword2 = extend_keyword2
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,428
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/templates/Libs/extendedkeywords/webelement.py
|
from selenium.webdriver.remote.webelement import WebElement
def js_click(self):
self.scroll_to_webelement()
self.parent.execute_script("arguments[0].click();", self)
def scroll_to_webelement(self):
self.parent.execute_script("arguments[0].scrollIntoView();", self)
def get_textContent(self):
return self.get_attribute("textContent").strip()
WebElement.js_click = js_click
WebElement.scroll_to_webelement = scroll_to_webelement
WebElement.get_textContent = get_textContent
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,429
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/blank/Libs/MyCustomizedLibrary/keywords/file_name2.py
|
from SeleniumLibrary.base import keyword, LibraryComponent
__version__ = '1.0.0'
class Module2(LibraryComponent):
def __init__(self, ctx):
LibraryComponent.__init__(self, ctx)
<Init-function-goes-here>
<Keywords go here>
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,430
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/templates/Libs/keywords/mycustomizedlibrarywords.py
|
from SeleniumLibrary.base import keyword, LibraryComponent
from SeleniumLibrary.keywords import WaitingKeywords
from .elementkeywords import ElementKeywords
__version__ = '1.0.0'
class MyCustomizedLibraryKeywords(LibraryComponent):
def __init__(self, ctx):
LibraryComponent.__init__(self, ctx)
self.elementkeys = ElementKeywords(ctx)
self.waiting_management = WaitingKeywords(ctx)
def get_all_product_in_tab(self, locator):
return self.find_element(locator).find_elements_by_tag_name("li")
def click(self, element):
self.elementkeys.js_click(element)
@keyword
def click_on_product_item(self, locator, product_name, product_price):
"""
:locator: Locator of the tab container
:product_name: Displayed name of product
"product_price: Displayed price of product"
"""
items = self.get_all_product_in_tab(locator)
for i in items:
if self.get_product_name(i) == product_name:
if self.get_product_price(i) == product_price:
self.click(self.get_product_clickable_item(i))
return
message = "Item %s - %s not found in %s!" % (product_name, product_price, locator)
raise AssertionError(message)
def get_product_url(self, element):
return self._get_child_element_by_property(element).get_attribute("href").strip()
def get_product_clickable_item(self, element):
return self._get_child_element_by_property(element, "url")
def get_product_name(self, element):
return self._get_child_element_by_property(element, "name").get_attribute("textContent").strip()
def get_product_price(self, element):
return self._get_child_element_by_property(element, "price").get_attribute("textContent").strip().replace("$","")
def _get_child_element_by_property(self, locator, property):
return self.find_element(locator).find_element_by_xpath(".//*[@itemprop='"+property+"']")
@keyword
def get_alert_div(self):
return self.driver.find_element_by_xpath("//*[@id='center_column']//*[@class='alert alert-danger']") #get the first element
@keyword
def is_alert_visible(self):
try:
self.get_alert_div()
return True
except:
return False
@keyword
def wait_until_alert_displayed(self, timeout=None, error=None):
self.waiting_management._wait_until(
lambda: self.is_alert_visible() == True,
"Alert was not appeared in <TIMEOUT>",
timeout,
error
)
@keyword
def get_login_alert_messages(self):
alert = self.get_alert_div()
return [li.get_attribute("textContent").strip() for li in alert.find_elements_by_tag_name("li")]
@keyword
def is_error_message(self, error_message):
return error_message in self.get_login_alert_messages()
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,431
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/MyProject/Libs/MyCustomizedLibrary/__init__.py
|
from SeleniumLibrary import SeleniumLibrary
from .keywords import *
from .extendedkeywords import *
import sys
__version__ = '1.0.0'
class MyCustomizedLibrary(SeleniumLibrary):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self):
SeleniumLibrary.__init__(self, 30)
self.add_library_components([BrowserKeywords(self), ElementKeywords(self), MyCustomizedLibraryKeywords(self)])
####################################################################################
# Make sure pydevd installed: pip install pydevd
# AND Uncomment following codes to enable debug mode
# sys.path.append("pydevd-pycharm.egg")
# import pydevd
# pydevd.settrace('localhost', port=12345, stdoutToServer=True, stderrToServer=True)
####################################################################################
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,432
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/blank/Libs/MyCustomizedLibrary/utilities/utilities.py
|
import re
class Utilities():
<Utility functions go here>
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,433
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py
|
from .browserkeywords import BrowserKeywords
from .elementkeywords import ElementKeywords
from .mycustomizedlibrarykeywords import MyCustomizedLibraryKeywords
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,434
|
nyonnguyen/rbfgenerator
|
refs/heads/master
|
/blank/Libs/MyCustomizedLibrary/__init__.py
|
from SeleniumLibrary import SeleniumLibrary
from .keywords import *
from .extendedkeywords import *
__version__ = '1.0.0'
class MyCustomizedLibrary(SeleniumLibrary):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self):
SeleniumLibrary.__init__(self, 30)
self.add_library_components([Module1(self), Module2(self), OtherModules(self)])
|
{"/templates/Libs/__init__.py": ["/templates/Libs/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py"], "/MyProject/Libs/MyCustomizedLibrary/keywords/__init__.py": ["/MyProject/Libs/MyCustomizedLibrary/keywords/elementkeywords.py"], "/blank/Libs/MyCustomizedLibrary/__init__.py": ["/blank/Libs/MyCustomizedLibrary/keywords/__init__.py"]}
|
23,479
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/models.py
|
from time import timezone
from django.db.models.signals import post_save
from slugify import slugify
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
User = get_user_model()
class CustomerSite(models.Model):
user = models.ForeignKey(User, verbose_name='Пользователь', on_delete=models.CASCADE)
phone = models.CharField(max_length=20, verbose_name='Номер телефона', null=True, blank=True)
address = models.CharField(max_length=255, verbose_name='Адрес', null=True, blank=True)
orders = models.ManyToManyField('Order', verbose_name='Заказы покупателя', related_name='related_order_site')
def __str__(self):
return "Покупатель: {} {}".format(self.user.first_name, self.user.last_name)
class CustomerBot(models.Model):
user_id = models.PositiveIntegerField(verbose_name='ID пользователя', unique=True)
name = models.CharField(verbose_name='Имя пользователя', max_length=50)
username = models.CharField(verbose_name='Никнейм пользователя', max_length=50)
phone = models.CharField(max_length=20, verbose_name='Номер телефона', null=True, blank=True)
address = models.CharField(max_length=255, verbose_name='Адрес', null=True, blank=True)
orders = models.ManyToManyField('Order', verbose_name='Заказы покупателя', related_name='related_order_bot')
is_admin = models.BooleanField(verbose_name='Админ', default=False)
def __str__(self):
return f'{self.user_id} {self.name} {self.username} {self.is_admin}'
class Meta:
verbose_name = 'Профиль пользователя бота'
verbose_name_plural = 'Профили пользователей бота'
class Category(models.Model):
title = models.CharField('Название категории', max_length=50, unique=True)
slug = models.SlugField(blank=True)
parent = models.ForeignKey('self', blank=True, null=True, related_name="children", on_delete=models.CASCADE)
def __str__(self):
return f'{self.title}'
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
return super(Category, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
class Product(models.Model):
title = models.CharField('Наименование товара', max_length=50)
description = models.CharField(max_length=200, blank=True)
image = models.ImageField('Картинка товара', upload_to='items/')
category = models.ForeignKey(Category, limit_choices_to={'children': None}, verbose_name='Категория', on_delete=models.CASCADE)
price = models.DecimalField(verbose_name='Цена', max_digits=9, decimal_places=2)
def __str__(self):
return f'{self.title}'
class Meta:
verbose_name = 'Товар'
verbose_name_plural = 'Товары'
# def get_absolute_url(self):
# return reverse('product_detail', kwargs={'slug': self.slug})
class CartProduct(models.Model):
user = models.ForeignKey(CustomerBot, verbose_name='Покупатель из бота', on_delete=models.CASCADE)
cart = models.ForeignKey('Cart', verbose_name='Корзина', on_delete=models.CASCADE, related_name='related_products')
product = models.ForeignKey(Product, verbose_name='Товар', on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(default=1)
total_price = models.DecimalField(max_digits=9, decimal_places=2, verbose_name='Общая цена', default=0)
def save(self, *args, **kwargs):
self.total_price = self.quantity * self.product.price
super().save(*args, **kwargs)
def __str__(self):
return f'{self.product}'
def delete(self, *args, **kwargs):
self.total_price = self.quantity * self.product.price
super().save(*args, **kwargs)
class Meta:
verbose_name = 'Объект корзины'
verbose_name_plural = 'Объекты корзины'
class Cart(models.Model):
user = models.ForeignKey(CustomerBot, verbose_name='Владелец корзины', on_delete=models.CASCADE)
product = models.ManyToManyField(CartProduct, blank=True, related_name='related_cart')
total_products = models.PositiveIntegerField(default=0)
total_price = models.DecimalField(max_digits=9, decimal_places=2, verbose_name='Общая цена товаров в корзине',
default=0)
in_order = models.BooleanField(default=False)
def get_products(self):
products = self.related_products.all()
return products
def save(self, *args, **kwargs):
products = self.related_products.all()
cart_total_price = 0
for product in products:
cart_total_price += product.total_price
self.total_price = cart_total_price
self.total_products = products.count()
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
print('override delete')
super().delete(*args, **kwargs)
def __str__(self):
return f'{self.user.user_id}'
class Meta:
verbose_name = 'Корзина товаров'
verbose_name_plural = 'Корзина товаров'
class Order(models.Model):
STATUS_NEW = 'new'
STATUS_IN_PROGRESS = 'in_progress'
STATUS_READY = 'is_ready'
STATUS_COMPLETED = 'completed'
BUYING_TYPE_SELF = 'self'
BUYING_TYPE_DELIVERY = 'delivery'
STATUS_CHOICES = (
(STATUS_NEW, 'Новый заказ'),
(STATUS_IN_PROGRESS, 'Заказ в обработке'),
(STATUS_READY, 'Заказ готов'),
(STATUS_COMPLETED, 'Заказ выполнен')
)
BUYING_TYPE_CHOICES = (
(BUYING_TYPE_SELF, 'Самовывоз'),
(BUYING_TYPE_DELIVERY, 'Доставка')
)
customer_bot = models.ForeignKey(CustomerBot, verbose_name='Покупатель из бота', related_name='related_orders',
blank=True, on_delete=models.CASCADE)
customer_site = models.ForeignKey(CustomerSite, verbose_name='Покупатель с сайта', related_name='related_orders',
blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length=255, verbose_name='Имя')
phone = models.CharField(max_length=20, verbose_name='Телефон')
cart = models.ForeignKey(Cart, verbose_name='Корзина', on_delete=models.CASCADE, null=True, blank=True)
address = models.CharField(max_length=1024, verbose_name='Адрес', null=True, blank=True)
status = models.CharField(
max_length=100,
verbose_name='Статус заказ',
choices=STATUS_CHOICES,
default=STATUS_NEW
)
buying_type = models.CharField(
max_length=100,
verbose_name='Тип заказа',
choices=BUYING_TYPE_CHOICES,
default=BUYING_TYPE_SELF
)
comment = models.TextField(verbose_name='Комментарий к заказу', null=True, blank=True)
created_at = models.DateTimeField(auto_now=True, verbose_name='Дата создания заказа')
def __str__(self):
return str(self.id)
def cart_product_delete_post_save(sender, instance, created, **kwargs):
print('post_save signal')
print(instance)
print('sender', sender)
post_save.connect(cart_product_delete_post_save, sender=CartProduct)
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,480
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/management/commands/bot.py
|
from aiogram.utils.executor import start_polling
from django.core.management.base import BaseCommand
from ugc.loader import bot, storage
from ugc.handlers import dp
# import filters
# import middlewares
class Command(BaseCommand):
""" Класс для запуска бота в management commands Django """
help = 'Телеграм-бот'
def handle(self, *args, **options):
start_polling(dp, on_startup=on_startup, on_shutdown=on_shutdown)
async def on_startup(dp):
pass
# filters.setup(dp)
# middlewares.setup(dp)
async def on_shutdown(dp):
await bot.close()
await storage.close()
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,481
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/loader.py
|
from aiogram import Bot, Dispatcher, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from django.conf import settings
import logging
bot = Bot(token=settings.BOT_TOKEN, parse_mode=types.ParseMode.HTML)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
logging.basicConfig(level=logging.INFO)
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,482
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/tga/settings.py
|
import os
from dotenv import load_dotenv
load_dotenv()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv("SECRET_KEY", 'dfsdfsgdfgdsfsdgf')
DEBUG = os.getenv('DEBUG')
ALLOWED_HOSTS = os.getenv("ALLOWED_HOSTS", '*').split(" ")
CONN_MAX_AGE = 60
#ассинхронностьkeepalive_kwargs
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ugc',
'rest_framework',
'django_filters'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tga.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tga.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv("DB_NAME"),
'USER': os.getenv("DB_USER"),
'PASSWORD': os.getenv("DB_PASSWORD"),
'HOST': os.getenv("DB_HOST"),
'PORT': os.getenv("DB_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_ROOT = os.path.join(BASE_DIR, "media/")
MEDIA_URL = '/media/'
#Настройки бота
BOT_TOKEN = os.getenv('BOT_TOKEN')
ADMIN_ID = os.getenv('ADMIN_ID')
URL = os.getenv('URL')
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
)
}
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,483
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/message_text.py
|
category_text = 'Выберите категорию'
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,484
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/views.py
|
from .models import Category, Product
from .serializers import ProductSerializer, CategorySerializer
from rest_framework.generics import ListAPIView
from rest_framework.viewsets import ReadOnlyModelViewSet
from django_filters.rest_framework import DjangoFilterBackend
from .service import ProductFilter, CategoryFilter
class CategoryView(ReadOnlyModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
filter_backends = (DjangoFilterBackend,)
filterset_class = CategoryFilter
class ProductView(ListAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
filter_backends = (DjangoFilterBackend,)
filterset_class = ProductFilter
# def get_queryset(self):
# query = self.request.data.get("query")
# print('query: ', query)
# print('queryset', self)
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,485
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/handlers/users.py
|
import re
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart
from aiogram.types import InputMediaPhoto
from ugc.loader import dp, bot
from ugc.keyboards import start_kb, category_kb, product_info_kb, cart_kb, delete_confirm, confirm_order, \
child_category_kb
from ugc.message_text import category_text
from ugc.service import add_new_user, get_products, get_product, add_cart, get_cart, get_category, make_order
from django.conf import settings
from aiogram.types import CallbackQuery
from aiogram.dispatcher import FSMContext
from aiogram.types import InlineQuery, InputTextMessageContent, InlineQueryResultArticle
from asyncio import sleep
from ugc.states import Order, DeleteProduct, Checkout
async def edit_cart(call, cart, i=0):
cart_product = cart.get_products()
kb = await cart_kb(cart, cart_product.count(), i)
await call.message.edit_media(
media=InputMediaPhoto(
media=settings.URL + cart_product[i].product.image.url,
caption=f'<b>{cart_product[i].product.title}\n'
f'{cart_product[i].product.description}.\n\n'
f'{cart_product[i].quantity} шт.\n\n'
f'{cart_product[i].product.price} руб.</b>'),
reply_markup=kb)
async def send_cart(cart, user_id, i=0):
cart_product = cart.get_products()
if not cart_product:
kb = await category_kb()
await bot.send_message(user_id, f'Ваша корзина пуста.\n\n'
f'{category_text}',
reply_markup=kb)
else:
photo = settings.URL + cart_product[i].product.image.url
kb = await cart_kb(cart, cart_product.count(), 0)
await bot.send_photo(
user_id,
photo=photo,
caption=f'<b>{cart_product[i].product.title}\n'
f'{cart_product[i].product.description}.\n\n'
f'{cart_product[i].quantity} шт.\n\n'
f'{cart_product[i].product.price} руб.</b>',
reply_markup=kb
)
@dp.message_handler(CommandStart(), state='*')
async def bot_start(message: types.Message):
await get_category()
user = await add_new_user(message)
if user.is_admin:
await message.answer(
f'Привет Админ, {message.from_user.full_name}! Сделать рассылку',
reply_markup=start_kb
)
else:
await message.answer(
f'Привет, {message.from_user.full_name}!\nДобро пожаловать в магазин ХАТА',
reply_markup=start_kb
)
@dp.message_handler(text='Выбрать товары', state='*')
async def category_handler(message: types.Message, state: FSMContext):
kb = await category_kb()
await message.answer(category_text, reply_markup=kb)
@dp.message_handler(text='🛒Показать корзину', state='*')
async def show_cart(message: types.Message, state: FSMContext):
cart = await get_cart(message.chat.id)
if not cart:
await message.answer('Ваша корзина пуста.')
else:
await send_cart(cart, message.chat.id)
await state.update_data(cart=cart, i=0)
@dp.inline_handler(state='*')
async def inline_categories(inline_query: InlineQuery, state: FSMContext):
category_id = inline_query.query
result = []
products = await get_products(category_id)
print('products', products)
for product in products:
try:
result.append(InlineQueryResultArticle(
id=product["id"],
thumb_url=product["image"],
title=product["title"],
description=f'{product["price"]} руб.\n'
f'{product["description"]}',
input_message_content=InputTextMessageContent(product["id"])
))
except TypeError:
await sleep(5)
await bot.answer_inline_query(inline_query.id, results=result, cache_time=5)
await Order.select.set()
@dp.message_handler(state=Order.select)
async def hand_product(message: types.Message, state: FSMContext):
quantity = 1
product = await get_product(message.text)
photo = settings.URL + product.image.url
kb = await product_info_kb(quantity)
await message.answer_photo(
photo=photo,
caption=f'<b>{product.title}.\n\n'
f'{product.price} руб.\n'
f'{product.description}</b>',
reply_markup=kb
)
await state.update_data(product=product, quantity=quantity)
@dp.callback_query_handler(lambda call: call.data in ['up', 'down'], state='*')
async def up_down_handlers(call: CallbackQuery, state: FSMContext):
data = await state.get_data()
quantity = data.get('quantity')
if call.data == 'up':
await bot.answer_callback_query(call.id)
quantity += 1
kb = await product_info_kb(quantity)
await call.message.edit_reply_markup(kb)
if call.data == 'down':
if quantity == 1:
await bot.answer_callback_query(call.id, 'Невозможно выбрать меньшее количество', show_alert=False)
else:
await bot.answer_callback_query(call.id)
quantity -= 1
kb = await product_info_kb(quantity)
await call.message.edit_reply_markup(kb)
await state.update_data(quantity=quantity)
@dp.callback_query_handler(lambda call: call.data in ['left', 'right'], state='*')
async def left_right_handlers(call: CallbackQuery, state: FSMContext):
data = await state.get_data()
i = data.get('i', 0)
cart = data.get('cart')
cart_product = cart.get_products()
if call.data == 'right':
await bot.answer_callback_query(call.id)
if i != cart_product.count() - 1:
i = i + 1
await edit_cart(call, cart, i)
if call.data == 'left':
await bot.answer_callback_query(call.id)
if i != 0:
i = i - 1
await edit_cart(call, cart, i)
await state.update_data(i=i)
@dp.callback_query_handler(lambda call: call.data in ['yes_del', 'no_del'], state=DeleteProduct.delete)
async def confirm_delete(call: CallbackQuery, state: FSMContext):
await bot.answer_callback_query(call.id)
data = await state.get_data()
i = data.get('i')
print('delete', i)
cart = data.get('cart')
cart_product = cart.get_products()
if call.data == 'yes_del':
await bot.delete_message(call.message.chat.id, call.message.message_id)
cart_product[i].delete()
cart = await get_cart(call.message.chat.id)
await send_cart(cart, call.message.chat.id, 0)
await state.update_data(i=0, cart=cart)
if call.data == 'no_del':
kb = await cart_kb(cart, cart_product.count(), i)
await call.message.edit_reply_markup(reply_markup=kb)
@dp.callback_query_handler(lambda call: call.data in ['confirm', 'back_cart'], state=Checkout.confirm)
async def confirm_handler(call: CallbackQuery, state: FSMContext):
await bot.answer_callback_query(call.id)
data = await state.get_data()
cart = data.get('cart')
if call.data == 'back_cart':
await send_cart(cart, call.message.chat.id, 0)
await state.update_data(i=0)
if call.data == 'confirm':
await call.message.answer('Отлично! Введите ваше имя')
await Checkout.name.set()
@dp.message_handler(state=Checkout.name)
async def name_handler(message: types.Message, state: FSMContext):
await state.update_data(name=message.text)
await message.answer('Введите ваш номер телефона')
await Checkout.phone.set()
@dp.message_handler(state=Checkout.phone)
async def phone_handler(message: types.Message, state: FSMContext):
if re.search(r'^(8|\+?\d{1,3})?[ -]?\(?(\d{3})\)?[ -]?(\d{3})[ -]?(\d{2})[ -]?(\d{2})$', message.text):
data = await state.get_data()
name = data.get('name')
cart = data.get('cart')
cart_product = cart.get_products()
text = 'Поступил заказ:\n\n'
for product in cart_product:
text += f'{product.product.title} - {product.quantity} шт. {product.total_price} руб.\n'
text += f'\nИтоговая стоимость заказа: {cart.total_price} руб\n\n' \
f'Покупатель:\n' \
f'{name} - {message.text}'
await message.answer('Ваш заказ принят. Ожидайте скоро свяжемся')
await make_order(message.chat.id, cart, name, message.text)
await bot.send_message(
int(settings.ADMIN_ID),
text=text
)
else:
await message.answer('Неправильный формат.Введите реальный номер,чтобы мы могли с вязаться с вами')
await Checkout.phone.set()
@dp.callback_query_handler(state='*')
async def cart_handlers(call: CallbackQuery, state: FSMContext):
data = await state.get_data()
await bot.answer_callback_query(call.id)
if call.data in [str(category['id']) for category in await get_category()]:
print('category', call.data)
kb = await child_category_kb(call.data)
await call.message.edit_reply_markup(reply_markup=kb)
if call.data == 'add':
product = data.get('product')
quantity = data.get('quantity')
await add_cart(call.message.chat.id, product, quantity)
cart = await get_cart(call.message.chat.id)
await send_cart(cart, call.message.chat.id)
await state.update_data(cart=cart, i=0)
if call.data == 'back':
kb = await category_kb()
await call.message.edit_reply_markup(reply_markup=kb)
if call.data == 'cancel':
await call.message.delete()
kb = await category_kb()
await call.message.answer(category_text, reply_markup=kb)
if call.data == 'delete_cart':
cart = data.get('cart')
cart.delete()
await bot.delete_message(call.message.chat.id, call.message.message_id)
await call.message.answer('Ваша Корзина пуста')
kb = await category_kb()
await call.message.answer(category_text, reply_markup=kb)
if call.data == 'delete_product':
await bot.answer_callback_query(call.id)
await call.message.edit_reply_markup(reply_markup=delete_confirm)
await DeleteProduct.delete.set()
if call.data == 'pay':
await bot.answer_callback_query(call.id)
cart = data.get('cart')
cart_product = cart.get_products()
text = 'Ваш заказ:\n\n'
for product in cart_product:
text += f'{product.product.title} - {product.quantity} шт. {product.total_price} руб.\n'
text += f'\nИтоговая стоимость заказа: {cart.total_price} руб\n'
await call.message.answer(
text,
reply_markup=confirm_order
)
await Checkout.confirm.set()
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,486
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/states.py
|
from aiogram.dispatcher.filters.state import StatesGroup, State
class Order(StatesGroup):
category = State()
select = State()
class DeleteProduct(StatesGroup):
delete = State()
class Checkout(StatesGroup):
confirm = State()
name = State()
phone = State()
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,487
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/admin.py
|
from django.contrib import admin
from .models import CustomerBot, CustomerSite, Product, Category, CartProduct, Cart, Order
@admin.register(CustomerBot)
class UsersAdmin(admin.ModelAdmin):
list_display = ('user_id', 'name', 'username')
readonly_fields = ('user_id', 'name', 'username')
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_select_related = ('parent',)
list_display = ('title', 'parent', )
def get_readonly_fields(self, request, obj=None):
if obj:
return ['parent']
else:
return []
admin.site.register(Product)
admin.site.register(Cart)
admin.site.register(CartProduct)
admin.site.register(Order)
admin.site.register(CustomerSite)
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,488
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/serializers.py
|
from rest_framework.serializers import ModelSerializer, SlugRelatedField
from .models import Category, Product
class ProductSerializer(ModelSerializer):
# products = serializers.SlugRelatedField(slug_field='name', read_only=True)
class Meta:
model = Product
fields = ("id", "title", "description", "image", "price", "category")
class CategorySerializer(ModelSerializer):
product_set = ProductSerializer(many=True)
class Meta:
model = Category
fields = ("id", "title", "parent", "product_set", "children")
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,489
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/service.py
|
import aiohttp
from ugc.models import CustomerBot, Category, Product, Cart, CartProduct, Order
from asgiref.sync import sync_to_async
from django.conf import settings
from django_filters import rest_framework as filters
class ProductFilter(filters.FilterSet):
class Meta:
model = Product
fields = ['category']
class CategoryFilter(filters.FilterSet):
class Meta:
model = Category
fields = ['parent', 'children']
@sync_to_async
def add_new_user(message):
user, created = CustomerBot.objects.get_or_create(
user_id=message.chat.id,
defaults={
'name': message.from_user.full_name,
'username': message.from_user.username
}
)
return user
# @sync_to_async
# def get_category():
# return Category.objects.filter(parent=None)
# @sync_to_async
# def get_child_category(category_id):
# return Category.objects.filter(parent__id=category_id)
# @sync_to_async
# def get_products(category_id):
# return Product.objects.all().filter(category__id=category_id)
@sync_to_async
def get_product(pk):
return Product.objects.get(pk=pk)
@sync_to_async
def add_cart(user_id, product, quantity):
user = CustomerBot.objects.get(user_id=user_id)
cart = Cart.objects.filter(user=user, in_order=False).first()
if not cart:
cart = Cart.objects.create(user=user)
cart_product, created = CartProduct.objects.get_or_create(user=user, cart=cart, product=product)
if not created:
cart_product.quantity = quantity + cart_product.quantity
else:
cart_product.quantity = quantity
cart_product.save()
cart.save()
@sync_to_async
def get_cart(user_id):
return Cart.objects.filter(user__user_id=user_id, in_order=False).first()
@sync_to_async
def make_order(user_id, cart, name, phone):
customer_bot = CustomerBot.objects.get(user_id=user_id)
print('cart', cart)
print('type cart', type(cart))
Order.object.create(customer_bot=customer_bot, name=name, phone=phone, cart=cart)
async def get_products(category_id):
url = settings.URL + '/api' + '/products'
async with aiohttp.ClientSession() as session:
params = {'category': category_id}
response = await session.get(url, params=params)
data = await response.json()
print('data', data)
return data
async def get_category():
url = settings.URL + '/api' + '/category'
async with aiohttp.ClientSession() as session:
response = await session.get(url)
data = await response.json()
print('category', data)
return data
async def get_child_category(category_id):
url = settings.URL + '/api' + '/category'
async with aiohttp.ClientSession() as session:
params = {'parent': category_id}
response = await session.get(url, params=params)
data = await response.json()
print('data', data)
return data
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,490
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/urls.py
|
from django.urls import path
from rest_framework.routers import DefaultRouter
from .views import CategoryView, ProductView
router = DefaultRouter()
router.register('category', CategoryView)
app_name = "api"
urlpatterns = [
path('products/', ProductView.as_view()),
]
urlpatterns += router.urls
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,491
|
vkhnychenko/hata-hookah-shop
|
refs/heads/master
|
/ugc/keyboards.py
|
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup, KeyboardButton
from ugc.service import get_category, get_products, get_child_category
delete = InlineKeyboardButton('❌Удалить этот товар', callback_data='delete_product')
left = InlineKeyboardButton('⬅️', callback_data='left')
right = InlineKeyboardButton('➡️', callback_data='right')
delete_cart = InlineKeyboardButton('🗑Очистить корзину', callback_data='delete_cart')
delete_confirm = InlineKeyboardMarkup(row_width=1)
delete_confirm.add(InlineKeyboardButton('Да.Точно удалить!', callback_data='yes_del'))
delete_confirm.add(InlineKeyboardButton('Отмена', callback_data='no_del'))
confirm_order = InlineKeyboardMarkup(row_width=1)
confirm_order.add(InlineKeyboardButton('Да.Заказ верен!', callback_data='confirm'))
confirm_order.add(InlineKeyboardButton('Вернуться в корзину', callback_data='back_cart'))
start_kb = ReplyKeyboardMarkup(
row_width=1, resize_keyboard=True).add(
KeyboardButton('Выбрать товары'), KeyboardButton('🛒Показать корзину')
)
async def category_kb():
kb = InlineKeyboardMarkup(row_width=1)
categories = await get_category()
for category in categories:
if category['children'] and not category['parent']:
kb.insert(InlineKeyboardButton(category['title'], callback_data=category['id']))
# else:
# kb.insert(InlineKeyboardButton(category['title'], switch_inline_query_current_chat=category['id']))
return kb
async def child_category_kb(category_id):
kb = InlineKeyboardMarkup(row_width=1)
categories = await get_child_category(category_id)
for category in categories:
kb.insert(InlineKeyboardButton(category['title'], switch_inline_query_current_chat=category['id']))
kb.add(InlineKeyboardButton('Назад◀️', callback_data='back'))
return kb
async def products_kb(category_id):
kb = InlineKeyboardMarkup(row_width=1)
items = await get_products(category_id)
for item in items:
kb.insert(InlineKeyboardButton(item['name'], switch_inline_query_current_chat=item['id']))
kb.add(InlineKeyboardButton('Назад◀️', callback_data='back'))
return kb
async def product_info_kb(quantity):
kb = InlineKeyboardMarkup(row_width=2)
quantity_btn = InlineKeyboardButton(f'{quantity} шт.', callback_data='None')
kb.row(InlineKeyboardButton('⬇️', callback_data='down'), quantity_btn,
InlineKeyboardButton('⬆️', callback_data='up'), InlineKeyboardButton('Отмена', callback_data='cancel'))
kb.add(InlineKeyboardButton('🛒Добавить в корзину🛒', callback_data='add'))
return kb
async def cart_kb(cart, cart_size, i):
kb = InlineKeyboardMarkup(row_width=3)
if cart_size > 1:
kb.row(left, InlineKeyboardButton(text=f'{i + 1} / {cart_size}', callback_data='None'), right)
kb.add(delete)
kb.add(delete_cart)
kb.add(InlineKeyboardButton(f'💰Оформить заказ({cart.total_price})', callback_data='pay'))
return kb
|
{"/ugc/management/commands/bot.py": ["/ugc/loader.py"], "/ugc/views.py": ["/ugc/models.py", "/ugc/serializers.py", "/ugc/service.py"], "/ugc/handlers/users.py": ["/ugc/loader.py", "/ugc/keyboards.py", "/ugc/message_text.py", "/ugc/service.py", "/ugc/states.py"], "/ugc/admin.py": ["/ugc/models.py"], "/ugc/serializers.py": ["/ugc/models.py"], "/ugc/service.py": ["/ugc/models.py"], "/ugc/urls.py": ["/ugc/views.py"], "/ugc/keyboards.py": ["/ugc/service.py"]}
|
23,492
|
MilanDroid/Python-Profile-analyser
|
refs/heads/master
|
/apps/industries.py
|
# -*- coding: utf-8 -*-
from apps.registers import Registers
class Industries(object):
def __init__(self, folder, filename):
self.register = Registers()
self.filename = folder + "Industries/" + filename
self.content = []
def start(self):
self.content = self.register.get_data(self.filename, 'industry', 3)
|
{"/apps/industries.py": ["/apps/registers.py"], "/apps/analyser.py": ["/apps/people.py", "/apps/countries.py", "/apps/industries.py"], "/apps/people.py": ["/apps/registers.py"], "/main.py": ["/apps/analyser.py"], "/apps/countries.py": ["/apps/registers.py"]}
|
23,493
|
MilanDroid/Python-Profile-analyser
|
refs/heads/master
|
/apps/analyser.py
|
# -*- coding: utf-8 -*-
import os
import sys
import random
import operator
from apps.people import People
from apps.countries import Countries
from apps.roles import Roles
from apps.industries import Industries
# GLOBAL VARIABLES SECTION, USE ONLY FOR CONSTANT VALUES
DIR_ROOT = os.getcwd() + "/"
DIR_INPUTS = DIR_ROOT + "resources/files/inputs/"
DIR_OUTPUTS = DIR_ROOT + "resources/files/outputs/"
class Analyser(object):
def __init__(self, country, industry, rol, people):
self.people_out = []
self.MAX_RECOMMENDATIONS = 0
self.MAX_CONNECTIONS = 0
self.countries = Countries(DIR_INPUTS, country)
self.industries = Industries(DIR_INPUTS, industry)
self.roles = Roles(DIR_INPUTS, rol)
self.people = People(DIR_INPUTS, people)
# Importance of parameters. Between 0 - 100
self.get_weights()
# This is only for analytics purpose on testing, etc...
# can remove it from here
self.list_roles = []
self.list_countries = []
self.list_industries = []
self.list_people = self.people.content
self.dict_stats = {}
def get_weights(self):
message = "Change defaults weights (y/n):"
default_weights = input(message)
if default_weights.lower() == 'y':
self.W_RECOMMENDATIOS = self.input_number('Recommendations weight (0-100):')
self.W_CONNECTIONS = self.input_number('Connections weight (0-100):')
self.W_INDUSTRIES = self.input_number('Industries weight (0-100):')
self.W_COUNTRIES = self.input_number('Countries weight (0-100):')
self.W_ROLES = self.input_number('Roles weight (0-100):')
comprobation = self.W_RECOMMENDATIOS + self.W_CONNECTIONS
comprobation += self.W_INDUSTRIES + self.W_COUNTRIES
comprobation += self.W_ROLES
if comprobation != 100:
print("Sum of weights needs to be 100")
self.get_weights()
else:
self.W_RECOMMENDATIOS = 45
self.W_CONNECTIONS = 30
self.W_INDUSTRIES = 10
self.W_COUNTRIES = 8
self.W_ROLES = 7
def input_number(self, message):
while True:
try:
user_input = int(input(message))
except ValueError:
print("Not an integer! Try again.")
continue
else:
return user_input
break
def start_process(self):
self.people.start()
self.industries.start()
self.roles.start()
self.countries.start()
self.explore_list()
def explore_list(self):
for person in self.people.content:
self.extract_unique_values(person)
# This lines can be removed, are only for test and analitycs
self.order_lists()
self.stats()
def extract_unique_values(self, person):
connections = int(person[7])
recommendations = int(person[6])
if self.MAX_CONNECTIONS < connections:
self.MAX_CONNECTIONS = connections
if self.MAX_RECOMMENDATIONS < recommendations:
self.MAX_RECOMMENDATIONS = recommendations
# Testing and dev, can be removed
# This lines helps to generate a list of roles, countries and industries
if self.checking_list(person[3], self.list_roles):
self.list_roles.append(person[3])
if self.checking_list(person[4], self.list_countries):
self.list_countries.append(person[4])
if self.checking_list(person[5], self.list_industries):
self.list_industries.append(person[5])
def checking_list(self, value, source_list):
return value not in source_list
def get_evaluatization(self):
for client in self.people.content:
p_rol = self.valorization(self.roles.content, client[3])
p_countries = self.valorization(self.countries.content, client[4])
p_industries = self.valorization(self.industries.content, client[5])
p_recommendations = int(client[6]) / self.MAX_RECOMMENDATIONS
p_connections = int(client[7]) / self.MAX_CONNECTIONS
p_rol = p_rol * self.W_ROLES
p_countries = p_countries * self.W_COUNTRIES
p_industries = p_industries * self.W_INDUSTRIES
p_recommendations = p_recommendations * self.W_RECOMMENDATIOS
p_connections = p_connections * self.W_CONNECTIONS
p_total = p_rol
p_total += p_countries
p_total += p_industries
p_total += p_recommendations
p_total += p_connections
p_total = p_total/100
self.people_out.append([client[0], p_total])
self.sort_out()
self.people.file_out(self.people_out, DIR_OUTPUTS)
self.print_out_menu()
def valorization(self, source_list, value):
for items in source_list:
if value == items[1]:
return float(items[2])
def sort_out(self):
self.people_out.sort(key=lambda x: x[1])
self.people_out.reverse()
self.people_out = self.people_out[:100]
def print_out_menu(self):
option = input("Print out list with details (y/n):")
if option == "y":
for person in self.people_out:
for row in self.people.content:
if row[0] == person[0]:
print(row)
#
# Analitycs and test functions from here, can remove it
#
"""
This function extract all parameters from people input and start
a process to give fill al lists
It's a process to extract stats and fill test data
"""
def stats(self):
self.dict_stats['people'] = {
'quantity': len(self.people.content)
}
self.dict_stats['roles'] = {
'quantity': len(self.list_roles)
}
self.dict_stats['countries'] = {
'quantity': len(self.list_countries)
}
self.dict_stats['industries'] = {
'quantity': len(self.list_industries)
}
print(self.dict_stats)
def order_lists(self):
self.list_roles.sort()
self.list_countries.sort()
self.list_industries.sort()
"""
This section has all visualization methods
"""
def menu_print_lists(self):
selection = None
dict_options = {
1: "roles",
2: "countries",
3: "industries",
4: "people"
}
show = input("Do you want see a list? (y/n): ")
if show.lower() == 'y':
self.explore_list()
selection = int(input(
"Enter number of list.\r\n" +
"1. Roles\r\n" +
"2. Countries\r\n" +
"3. Industries\r\n" +
"4. People\r\n" +
"Enter your selection: "
))
self.printing_selection(selection, dict_options)
else:
print("Process clompleted...")
def printing_selection(self, selection, dict_options):
if selection in dict_options:
option = "list_" + dict_options[selection]
printable_lst = getattr(self, option, "That list doesn't exists")
self.print_list(printable_lst)
else:
print("That's not a valid option")
print("Process clompleted...")
def print_list(self, value):
print(value)
"""
Tests input generation area.
Fill a list with a random valorization
"""
def create_lists(self, filename, list_input):
i = 0
f = open(filename, "w")
for row in list_input:
value = round(random.uniform(0, 1), 8)
line = str(i) + '|' + row + '|' + str(value) + "\n"
f.write(line)
i += 1
def init_lists(self):
self.create_lists(self.roles.filename, self.list_roles)
self.create_lists(self.countries.filename, self.list_countries)
self.create_lists(self.industries.filename, self.list_industries)
# This function helps to verify if all countries are on countries.in
def check_countries(self):
list_test_countries = []
for country in self.countries.content:
list_test_countries.append(country[1])
tmp = country[2]
print(tmp)
for country in self.list_countries:
if country not in list_test_countries:
print(country)
print('This is not on list countries.in')
|
{"/apps/industries.py": ["/apps/registers.py"], "/apps/analyser.py": ["/apps/people.py", "/apps/countries.py", "/apps/industries.py"], "/apps/people.py": ["/apps/registers.py"], "/main.py": ["/apps/analyser.py"], "/apps/countries.py": ["/apps/registers.py"]}
|
23,494
|
MilanDroid/Python-Profile-analyser
|
refs/heads/master
|
/apps/people.py
|
# -*- coding: utf-8 -*-
from apps.registers import Registers
class People(object):
def __init__(self, folder, filename):
self.register = Registers()
self.filename = folder + "People/" + filename
self.content = []
def start(self):
self.content = self.register.get_data(self.filename, 'person', 8)
def file_out(self, out_list, folder):
filename = folder + "People/people.out"
f = open(filename, "w")
for row in out_list:
line = str(row[0]) + "\n"
f.write(line)
|
{"/apps/industries.py": ["/apps/registers.py"], "/apps/analyser.py": ["/apps/people.py", "/apps/countries.py", "/apps/industries.py"], "/apps/people.py": ["/apps/registers.py"], "/main.py": ["/apps/analyser.py"], "/apps/countries.py": ["/apps/registers.py"]}
|
23,495
|
MilanDroid/Python-Profile-analyser
|
refs/heads/master
|
/apps/registers.py
|
# -*- coding: utf-8 -*-
import sys
class Registers(object):
def __str__(self):
print("Register class. Read, process and save files data in arrays")
def get_data(self, dir_file, name, end):
"""
Reads the file to extract the value by line,
then the file data is saved in a list
and splits the content of lines by '|'
dir_file(string): File directory
type(string): Type of data 'person', 'country', 'roles', etc...
end(int): Columns number
"""
self.list_response = []
input_file = open(dir_file, "r")
for line in input_file:
content = line.split('|')
self.verify_data_structure(content, name, end)
# Because the end of line has a break '\n'
content[end - 1] = self.clean_value(content[end - 1])
self.list_response.append(content)
input_file.close()
return self.list_response
def verify_data_structure(self, line, name, end):
# Verify if all lines has a correct format (same columns number)
quantity_values = len(line)
if quantity_values != end:
print("\nError:\nThis " + name + " hasn't a valid format: ")
print(line, quantity_values)
sys.exit()
def clean_value(self, value):
# Removes end line characters
value = value.replace('\n', '')
return value
|
{"/apps/industries.py": ["/apps/registers.py"], "/apps/analyser.py": ["/apps/people.py", "/apps/countries.py", "/apps/industries.py"], "/apps/people.py": ["/apps/registers.py"], "/main.py": ["/apps/analyser.py"], "/apps/countries.py": ["/apps/registers.py"]}
|
23,496
|
MilanDroid/Python-Profile-analyser
|
refs/heads/master
|
/main.py
|
# -*- coding: utf-8 -*-
from apps.analyser import Analyser
# INIT PROCESS
ranking = Analyser("countries.in", "industries.in", 'roles.in', "people.in")
ranking.start_process()
ranking.get_evaluatization()
#
# Analitycs functions from here, can remove it
#
"""
This functions are used to extract a list of countries ,
roles and indurtries on input file and generate a test values
"""
# ranking.init_lists()
# ranking.menu_print_lists()
|
{"/apps/industries.py": ["/apps/registers.py"], "/apps/analyser.py": ["/apps/people.py", "/apps/countries.py", "/apps/industries.py"], "/apps/people.py": ["/apps/registers.py"], "/main.py": ["/apps/analyser.py"], "/apps/countries.py": ["/apps/registers.py"]}
|
23,497
|
MilanDroid/Python-Profile-analyser
|
refs/heads/master
|
/apps/countries.py
|
# -*- coding: utf-8 -*-
from apps.registers import Registers
class Countries(object):
def __init__(self, folder, filename):
self.register = Registers()
self.filename = folder + "Countries/" + filename
self.content = []
def start(self):
self.content = self.register.get_data(self.filename, "countries", 3)
def clean(self):
for country in self.content:
country[2] = country[2].replace(',', '')
country[2] = country[2].replace('.00', '')
|
{"/apps/industries.py": ["/apps/registers.py"], "/apps/analyser.py": ["/apps/people.py", "/apps/countries.py", "/apps/industries.py"], "/apps/people.py": ["/apps/registers.py"], "/main.py": ["/apps/analyser.py"], "/apps/countries.py": ["/apps/registers.py"]}
|
23,547
|
henna-h/movie-rating-app
|
refs/heads/main
|
/users.py
|
from db import db
from flask import session
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import check_password_hash, generate_password_hash
def set_session(user):
session['id'] = user.id
session['username'] = user.username
def get_session_user_id():
return session['id']
def del_session():
del session['id']
del session['username']
def get_user(id):
sql = "SELECT id, username FROM users WHERE id=:id"
result = db.session.execute(sql, {"id":id})
user = result.fetchone()
return user
def get_user_by_username(username):
sql = "SELECT id, username, description, password FROM users WHERE username=:username"
result = db.session.execute(sql, {"username":username})
user = result.fetchone()
return user
def register(username, hash_value):
sql = "INSERT INTO users (username, password) VALUES (:username, :password)"
db.session.execute(sql, {"username":username, "password":hash_value})
db.session.commit()
sql = "SELECT * FROM users WHERE username=:username"
result =db.session.execute(sql, {"username":username})
db.session.commit()
user = result.fetchone()
return user
def add_description(username, description):
sql = "UPDATE users SET description=:description WHERE username=:username"
db.session.execute(sql, {"description":description, "username":username})
db.session.commit()
|
{"/routes.py": ["/app.py", "/users.py", "/movies.py", "/reviews.py"], "/app.py": ["/routes.py"]}
|
23,548
|
henna-h/movie-rating-app
|
refs/heads/main
|
/movies.py
|
from operator import truediv
from db import db
def get_movie_list():
sql = "SELECT * FROM movies ORDER BY submitted_at DESC"
result = db.session.execute(sql)
return result.fetchall()
def get_movie(id):
sql = "SELECT * FROM movies WHERE id=:id"
result = db.session.execute(sql, {"id":id})
movie = result.fetchone()
return movie
def get_average_rating(movie_id):
sql = "SELECT ROUND(AVG(stars), 1) FROM reviews WHERE movie_id=:id"
result = db.session.execute(sql, {"id":movie_id})
average = result.fetchone()[0]
return average
def add_movie(name, director, screenwriter, cast_members, year, description, user_id):
sql = "INSERT INTO movies (name, director, screenwriter, cast_members, year, description, user_id) VALUES (:name, :director, :screenwriter, :cast_members, :year, :description, :user_id)"
db.session.execute(sql, {"name":name, "director":director, "screenwriter":screenwriter, "cast_members":cast_members, "year":year, "description":description, "user_id":user_id})
db.session.commit()
def delete_movie(id):
sqlReviews = "DELETE FROM reviews WHERE movie_id=:id"
db.session.execute(sqlReviews, {"id":id})
#db.session.commit()
sqlMovie = "DELETE FROM movies WHERE id=:id"
db.session.execute(sqlMovie, {"id":id})
db.session.commit()
def mark_movie_as_seen(user_id, movie_id):
sql = "INSERT INTO movies_seen (user_id, movie_id) VALUES (:user_id, :movie_id)"
db.session.execute(sql, {"user_id":user_id, "movie_id":movie_id})
db.session.commit()
def add_to_watch_later(user_id, movie_id):
sql = "INSERT INTO watch_later (user_id, movie_id) VALUES (:user_id, :movie_id)"
db.session.execute(sql, {"user_id":user_id, "movie_id":movie_id})
db.session.commit()
def is_in_seen_list(user_id, movie_id):
sql="SELECT COUNT(*) FROM movies_seen WHERE user_id=:user_id AND movie_id=:movie_id"
result = db.session.execute(sql, {"user_id":user_id, "movie_id":movie_id})
count = result.fetchone()[0]
if count > 0:
return True
else:
return False
def is_in_watch_later_list(user_id, movie_id):
sql="SELECT COUNT(*) FROM watch_later WHERE user_id=:user_id AND movie_id=:movie_id"
result = db.session.execute(sql, {"user_id":user_id, "movie_id":movie_id})
count = result.fetchone()[0]
if count > 0:
return True
else:
return False
def delete_from_seen_list(user_id, movie_id):
sql="DELETE FROM movies_seen WHERE user_id=:user_id AND movie_id=:movie_id"
db.session.execute(sql, {"user_id":user_id, "movie_id":movie_id})
db.session.commit()
def delete_from_watch_later_list(user_id, movie_id):
sql="DELETE FROM watch_later WHERE user_id=:user_id AND movie_id=:movie_id"
db.session.execute(sql, {"user_id":user_id, "movie_id":movie_id})
db.session.commit()
def get_users_seen_movies_list(user_id):
sql = "SELECT movie_id FROM movies_seen WHERE user_id=:user_id"
result =db.session.execute(sql, {"user_id":user_id})
db.session.commit()
movieList = result.fetchall()
print(str(movieList))
return movieList
def get_users_watch_later_list(user_id):
sql = "SELECT movie_id FROM watch_later WHERE user_id=:user_id"
result =db.session.execute(sql, {"user_id":user_id})
db.session.commit()
movieList = result.fetchall()
return movieList
def get_seen_movies_count(user_id):
sql="SELECT COUNT(*) FROM movies_seen WHERE user_id=:user_id"
result =db.session.execute(sql, {"user_id":user_id})
result = db.session.execute(sql, {"user_id":user_id})
count = result.fetchone()[0]
return count
def get_watch_later_count(user_id):
sql="SELECT COUNT(*) FROM watch_later WHERE user_id=:user_id"
result =db.session.execute(sql, {"user_id":user_id})
result = db.session.execute(sql, {"user_id":user_id})
count = result.fetchone()[0]
return count
|
{"/routes.py": ["/app.py", "/users.py", "/movies.py", "/reviews.py"], "/app.py": ["/routes.py"]}
|
23,549
|
henna-h/movie-rating-app
|
refs/heads/main
|
/routes.py
|
from app import app
import users
import movies
import reviews
from flask import render_template, request, flash, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import check_password_hash, generate_password_hash
@app.route("/")
def index():
movieList= movies.get_movie_list()
get_average_rating = movies.get_average_rating
return render_template("index.html", movies = movieList, get_average_rating = get_average_rating)
@app.route("/login",methods=["POST"])
def login():
username = request.form["username"]
password = request.form["password"]
user = users.get_user_by_username(username)
if not user:
#invalid username
flash("Invalid password or username")
return redirect("/")
else:
hash_value = user.password
if check_password_hash(hash_value, password):
#correct username and password
users.set_session(user)
return redirect("/")
else:
#invalid password
flash("Invalid password or username")
return redirect("/")
@app.route("/logout")
def logout():
users.del_session()
return redirect("/")
@app.route("/register",methods=["GET","POST"])
def register():
if request.method == "GET":
return render_template("registration_form.html")
else:
username = request.form["username"]
password = request.form["password"]
password2 = request.form["password2"]
usernameExists = users.get_user_by_username(username)
if not username:
flash("Must include a username")
return redirect("/register")
elif len(username) > 50:
flash("Username must be under 50 characters")
return redirect("/register")
elif len(password) < 8:
flash("Password must be at least 8 characters long")
return redirect("/register")
elif not usernameExists:
if password == password2:
hash_value = generate_password_hash(password)
user = users.register(username, hash_value)
users.set_session(user)
return redirect("/")
else:
flash("Password fields must match")
return redirect("/register")
else:
flash("This username is taken")
return redirect("/register")
@app.route("/profile/<string:username>",methods=["GET"])
def profile(username):
user = users.get_user_by_username(username)
if not user:
return redirect("/")
else:
reviewList = reviews.get_reviews_by_user(user.id)
reviewCount = reviews.get_users_review_count(user.id)
moviesSeenList = movies.get_users_seen_movies_list(user.id)
watchLaterList = movies.get_users_watch_later_list(user.id)
moviesSeenCount = movies.get_seen_movies_count(user.id)
watchLaterCount = movies.get_watch_later_count(user.id)
profile =True
return render_template("profile.html", user = user, reviewCount=reviewCount, reviews = reviewList, get_movie=movies.get_movie, moviesSeenList = moviesSeenList, watchLaterList = watchLaterList, get_user=users.get_user, moviesSeenCount = moviesSeenCount, watchLaterCount = watchLaterCount, profile=profile)
@app.route("/add-description", methods=["POST"])
def add_description():
description = request.form['description']
username = request.form['username']
if description > 1000:
flash("description must be less than 1000 characters long")
return redirect(url_for("profile", username=username))
else:
users.add_description(username, description)
return redirect(url_for("profile", username=username))
@app.route("/movie/<int:id>",methods=["GET"])
def movie(id):
movie = movies.get_movie(id)
print("movie.user_id"+ str(movie.user_id))
reviewList = reviews.get_reviews_by_movie(id)
user = users.get_user(movie.user_id)
current_user_id = users.get_session_user_id()
has_been_seen = movies.is_in_seen_list(current_user_id, id)
is_in_watch_later_list = movies.is_in_watch_later_list(current_user_id, id)
reviewCount = reviews.get_movies_review_count(id)
return render_template("movie.html", movie = movie, reviews = reviewList, user = user, get_user=users.get_user, get_average_rating = movies.get_average_rating, has_been_seen = has_been_seen, is_in_watch_later_list = is_in_watch_later_list, reviewCount = reviewCount)
@app.route("/add-movie",methods=["GET", "POST"])
def add_movie():
if request.method == "GET":
return render_template("add_movie_form.html")
else:
name = request.form["name"]
director = request.form["director"]
screenwriter = request.form["screenwriter"]
cast_members = request.form["cast"]
year = request.form["year"]
description = request.form["description"]
user_id = users.get_session_user_id()
if len(name) > 300:
flash("Title of the movie must be less than 300 characters long")
return redirect("/add-movie")
elif not name:
flash("Must include a title")
return redirect("/add-movie")
elif len(director) > 300:
flash("Name of the director must be less than 300 characters long")
return redirect("/add-movie")
elif not director:
flash("Must include name of director")
return redirect("/add-movie")
elif len(screenwriter) > 300:
flash("Name of the screenwriter must be less than 300 characters long")
return redirect("/add-movie")
elif not screenwriter:
flash("Must include name of screenwriter")
return redirect("/add-movie")
elif len(cast_members) > 1000:
flash("List of cast members must be less than 1000 characters long")
return redirect("/add-movie")
elif not cast_members:
flash("Must include a list of cast members")
return redirect("/add-movie")
elif (int(year) < 0) or (int(year) > 3000):
flash("You must give a valid year")
return redirect("/add-movie")
elif len(description) > 1000:
flash("Synopsis must be less than 1000 characters long")
return redirect("/add-movie")
elif not description:
flash("Must include a synopsis")
return redirect("/add-movie")
else:
movies.add_movie(name, director, screenwriter, cast_members, year, description, user_id)
return redirect("/")
@app.route("/add-review/movie_id?<int:movie_id>",methods=["POST"])
def add_review(movie_id):
stars = request.form["stars"]
review = request.form["review"]
user_id = users.get_session_user_id()
if len(review) > 1000:
flash("Review must be less than 1000 characters long")
return redirect(url_for("movie", id=movie_id))
elif not review:
flash("Must include a review")
return redirect(url_for("movie", id=movie_id))
else:
reviews.add_review(stars, review, user_id, movie_id)
return redirect(url_for("movie", id=movie_id))
@app.route("/delete-movie/<int:id>",methods=["POST"])
def delete_movie(id):
movies.delete_movie(id)
flash("Movie deleted successfully")
return redirect("/")
@app.route("/delete-review/<int:id>",methods=["POST"])
def delete_review(id):
reviews.delete_review(id)
flash("Review deleted successfully")
return redirect("/")
@app.route("/movie-seen/<int:movie_id>",methods=["POST"])
def add_movie_to_seen_list(movie_id):
user_id = users.get_session_user_id()
movies.mark_movie_as_seen(user_id, movie_id)
flash("marked as seen")
return redirect(url_for("movie", id=movie_id))
@app.route("/add-to-watch-later/<int:movie_id>",methods=["POST"])
def add_to_watch_later(movie_id):
user_id = users.get_session_user_id()
movies.add_to_watch_later(user_id, movie_id)
flash("added to your watch later list")
return redirect(url_for("movie", id=movie_id))
@app.route("/unmark-movie-as-seen/<int:movie_id>",methods=["POST"])
def unmark_as_seen(movie_id):
user_id = users.get_session_user_id()
movies.delete_from_seen_list(user_id, movie_id)
flash("unmarked as seen")
return redirect(url_for("movie", id=movie_id))
@app.route("/remove-from-watch-later/<int:movie_id>",methods=["POST"])
def remove_from_watch_later(movie_id):
user_id = users.get_session_user_id()
movies.delete_from_watch_later_list(user_id, movie_id)
flash("removed from your watch later list")
return redirect(url_for("movie", id=movie_id))
|
{"/routes.py": ["/app.py", "/users.py", "/movies.py", "/reviews.py"], "/app.py": ["/routes.py"]}
|
23,550
|
henna-h/movie-rating-app
|
refs/heads/main
|
/reviews.py
|
from db import db
def get_reviews_by_user(id):
sqlReviews = "SELECT * FROM reviews WHERE user_id=:user_id ORDER BY submitted_at DESC"
resultReviews = db.session.execute(sqlReviews, {"user_id":id})
reviews = resultReviews.fetchall()
return reviews
def get_reviews_by_movie(id):
sqlReviews = "SELECT * FROM reviews WHERE movie_id=:movie_id ORDER BY submitted_at DESC"
resultReviews = db.session.execute(sqlReviews, {"movie_id":id})
reviews = resultReviews.fetchall()
return reviews
def get_users_review_count(id):
sqlReviewCount = "SELECT COUNT(*) FROM reviews WHERE user_id=:user_id"
resultReviewCount = db.session.execute(sqlReviewCount, {"user_id":id})
reviewCount = resultReviewCount.fetchone()[0]
return reviewCount
def get_movies_review_count(id):
sqlReviewCount = "SELECT COUNT(*) FROM reviews WHERE movie_id=:movie_id"
resultReviewCount = db.session.execute(sqlReviewCount, {"movie_id":id})
reviewCount = resultReviewCount.fetchone()[0]
return reviewCount
def add_review(stars, review, user_id, movie_id):
sqlReview = "INSERT INTO reviews (stars, review, movie_id, user_id) VALUES (:stars, :review, :movie_id, :user_id)"
db.session.execute(sqlReview, {"stars":stars, "review":review, "movie_id":movie_id, "user_id":user_id})
db.session.commit()
def delete_review(id):
sql="DELETE FROM reviews WHERE id=:id"
db.session.execute(sql, {"id":id})
db.session.commit()
|
{"/routes.py": ["/app.py", "/users.py", "/movies.py", "/reviews.py"], "/app.py": ["/routes.py"]}
|
23,551
|
henna-h/movie-rating-app
|
refs/heads/main
|
/app.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import getenv
from flask_wtf.csrf import CSRFProtect
app = Flask(__name__)
app.secret_key = getenv("SECRET_KEY")
csrf = CSRFProtect()
csrf.init_app(app)
import routes
|
{"/routes.py": ["/app.py", "/users.py", "/movies.py", "/reviews.py"], "/app.py": ["/routes.py"]}
|
23,555
|
e-kneip/fibonacci
|
refs/heads/main
|
/fibonacci/__init__.py
|
from .fibonacci import fib
|
{"/tests/test_fibonacci.py": ["/fibonacci/__init__.py"]}
|
23,556
|
e-kneip/fibonacci
|
refs/heads/main
|
/tests/test_fibonacci.py
|
from fibonacci import fib
def test_fibonacci():
for n, v in enumerate([0, 1, 1, 2, 3, 5]):
assert fib(n) == v
|
{"/tests/test_fibonacci.py": ["/fibonacci/__init__.py"]}
|
23,558
|
mohd353917/shahiproducts
|
refs/heads/master
|
/home/admin.py
|
from django.contrib import admin
from home.models import Location, Manufacturer, Inventory, Product, Shop, Order
@admin.register(Location)
class LocationAdmin(admin.ModelAdmin):
list_display = ["name"]
@admin.register(Manufacturer)
class ManufacturerAdmin(admin.ModelAdmin):
list_display = ["name", "location"]
@admin.register(Inventory)
class InventoryAdmin(admin.ModelAdmin):
list_display = ["arrived_at", "product", "quantity"]
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ["name", "manufacturer", "stock_price", "retail_price", "stock"]
@admin.register(Shop)
class ShopAdmin(admin.ModelAdmin):
list_display = ["name", "shopkeeper_name", "location"]
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ["shop", "product", "quantity", "amount"]
|
{"/home/admin.py": ["/home/models.py"]}
|
23,559
|
mohd353917/shahiproducts
|
refs/heads/master
|
/home/urls.py
|
from django.urls import path
from home import views
urlpatterns = [
path("", views.index),
path("about", views.about),
path("contact-us", views.contact_us),
path("login", views.Login),
path("faq", views.faqs),
path("ShahiSociety", views.shahi_society),
path("turmeric", views.turmeric),
]
|
{"/home/admin.py": ["/home/models.py"]}
|
23,560
|
mohd353917/shahiproducts
|
refs/heads/master
|
/home/models.py
|
from django.db import models
class Location(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Manufacturer(models.Model):
added_on = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=30)
location = models.ForeignKey("Location", on_delete=models.CASCADE)
def __str__(self):
return self.name
class Inventory(models.Model):
arrived_at = models.DateTimeField(auto_now_add=True)
product = models.ForeignKey("Product", on_delete=models.CASCADE)
quantity = models.IntegerField()
def __str__(self):
return f"{self.product} - {self.quantity}"
def save(self, *args, **kwargs):
if self.pk is not None:
self.product.stock += self.quantity
self.product.save()
super().save(*args, **kwargs)
class Product(models.Model):
added_on = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
manufacturer = models.ForeignKey("Manufacturer", on_delete=models.CASCADE)
stock_price = models.FloatField()
retail_price = models.FloatField()
stock = models.IntegerField(default=0)
def __str__(self):
return self.name
class Shop(models.Model):
added_on = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
shopkeeper_name = models.CharField(max_length=50)
location = models.ForeignKey("Location", on_delete=models.CASCADE)
def __str__(self):
return self.name
class Order(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
shop = models.ForeignKey("Shop", on_delete=models.CASCADE)
product = models.ForeignKey("Product", on_delete=models.CASCADE)
quantity = models.IntegerField()
amount = models.FloatField()
def __str__(self):
return f"{self.shop} - {self.product} - {self.quantity}"
def save(self, *args, **kwargs):
if self.pk is not None:
self.product.stock -= self.quantity
self.product.save()
super().save(*args, **kwargs)
|
{"/home/admin.py": ["/home/models.py"]}
|
23,561
|
mohd353917/shahiproducts
|
refs/heads/master
|
/home/views.py
|
from django.shortcuts import render
def index(request):
products = [
{
"title": "Turmeric Powder",
"description": "Perfect combinaion of Fragrance and Taste/ 100% Natural Food Spices STAY HOME STAY SAFE.",
"image": "/static/img/turmeric1.jpg",
},
{
"title": "Coriander Powder",
"description": "Perfect combinaion of Fragrance and Taste/ 100% Natural Food Spices STAY HOME STAY SAFE.",
"image": "/static/img/coriander1.jpg",
},
{
"title": "Red Chilli Powder",
"description": "Perfect combinaion of Fragrance and Taste/ 100% Natural Food Spices STAY HOME STAY SAFE.",
"image": "/static/img/red.jpg",
},
{
"title": "Garam Masala",
"description": "Perfect combinaion of Fragrance and Taste/ 100% Natural Food Spices STAY HOME STAY SAFE.",
"image": "/static/img/red.jpg",
},
]
return render(
request, template_name="home/index.html", context={"products": products}
)
def about(request):
return render(request, template_name="home/about.html")
def contact_us(request):
return render(request, template_name="home/contact-us.html")
def Login(request):
return render(request, template_name="home/login.html")
def faqs(request):
return render(request, template_name="home/faq.html")
def shahi_society(request):
return render(request, template_name="home/shahi-society.html")
def turmeric(request):
return render(request, template_name="home/turmeric.html")
|
{"/home/admin.py": ["/home/models.py"]}
|
23,562
|
mohd353917/shahiproducts
|
refs/heads/master
|
/home/migrations/0001_initial.py
|
# Generated by Django 3.1.2 on 2021-01-12 07:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added_on', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=30)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.location')),
],
),
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added_on', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=50)),
('shopkeeper_name', models.CharField(max_length=50)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.location')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added_on', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=50)),
('stock_price', models.FloatField()),
('retail_price', models.FloatField()),
('stock', models.IntegerField(default=0)),
('manufacturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.manufacturer')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('quantity', models.IntegerField()),
('amount', models.FloatField()),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.product')),
('shop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.shop')),
],
),
migrations.CreateModel(
name='Inventory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('arrived_at', models.DateTimeField(auto_now_add=True)),
('quantity', models.IntegerField()),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.product')),
],
),
]
|
{"/home/admin.py": ["/home/models.py"]}
|
23,573
|
mikpim01/async_exchange
|
refs/heads/master
|
/async_exchange/trading_session.py
|
import asyncio
import logging
from signal import SIGTERM, SIGINT
from typing import List
from async_exchange.exchange import Exchange
from async_exchange.trader import Trader
default_logger = logging.getLogger(__name__)
CANCEL_SIGNALS = (SIGTERM, SIGINT)
class TradingSession:
"""Trading session generator.
Activates all ``traders`` that interact with the given ``exchange``.
"""
def __init__(
self, traders: List[Trader], exchange: Exchange = None, logger=None
):
self.traders = traders
if exchange is not None:
self.exchange = exchange
else:
self.exchange = Exchange(logger=logger)
if logger is None:
self.logger = default_logger
for trader in self.traders:
self.exchange.register_trader(trader)
def run(self):
asyncio.run(self._runnable())
async def _runnable(self):
loop = asyncio.get_running_loop()
for signal in CANCEL_SIGNALS:
loop.add_signal_handler(signal, self.shutdown, signal)
tasks = [trader.cycle() for trader in self.traders]
group = asyncio.gather(*tasks, return_exceptions=False)
try:
await group
except asyncio.CancelledError:
default_logger.info("Trading session cancelled.")
def shutdown(self, signal: str):
loop = asyncio.get_running_loop()
for task in asyncio.all_tasks(loop=loop):
task.cancel()
default_logger.info(f"Got signal: {signal!s}, shutting down.")
loop.remove_signal_handler(SIGTERM)
loop.add_signal_handler(SIGINT, lambda: None)
self.exchange.shutdown()
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,574
|
mikpim01/async_exchange
|
refs/heads/master
|
/async_exchange/exchange.py
|
from collections import defaultdict, deque
import logging
from async_exchange.orders import BuyOrder, SellOrder, _Order
from async_exchange.trader import (
NotEnoughMoneyError,
NotEnoughStocksError,
Trader,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class Level(deque):
pass
class Exchange:
def __init__(self, logger=None):
self.buy_levels = defaultdict(Level)
self.sell_levels = defaultdict(Level)
self._logger = logger
def log_event(self, event_type, message):
if self._logger is not None:
self._logger.send_event(record_type=event_type, message=message)
def __repr__(self):
_repr = "\n______________\n"
_repr += "Buy | Sell price\n |\n"
_repr += "\n".join(
f" |- {sum(order.amount for order in level):4} {price}"
for price, level in sorted(self.sell_levels.items(), reverse=True)
if sum(order.amount for order in level) > 0
)
_repr += "\n------+-------\n"
_repr += "\n".join(
f"{sum(order.amount for order in level):4} -| {price:8}"
for price, level in sorted(self.buy_levels.items(), reverse=True)
if sum(order.amount for order in level) > 0
)
_repr += "\n______________\n"
return _repr
@property
def best_buy(self):
try:
best_buy_price = max(
price
for price, orders in self.buy_levels.items()
if len(orders) > 0
)
except ValueError:
return None
else:
return best_buy_price
@property
def best_sell(self):
try:
best_sell_price = min(
price
for price, orders in self.sell_levels.items()
if len(orders) > 0
)
except ValueError:
return None
else:
return best_sell_price
def process_order(self, order):
if order.amount == 0:
return
if isinstance(order, BuyOrder):
self._process_buy_order(order)
elif isinstance(order, SellOrder):
self._process_sell_order(order)
def _exchange_assets(
self, buyer: Trader, seller: Trader, stocks: int, money: int
):
if stocks == 0:
return
buyer.has_enough_money(money)
seller.has_enough_stocks(stocks)
buyer.money -= money
seller.money += money
buyer.stocks += stocks
seller.stocks -= stocks
self.log_event(
event_type="exchange",
message={"price": money / stocks, "amount": stocks},
)
def _process_buy_order(self, order: BuyOrder):
if order.amount <= 0:
return
current_best_sell = self.best_sell
if current_best_sell is None or order.price < current_best_sell:
self.buy_levels[order.price].append(order)
return
matched_sell_order = self.sell_levels[current_best_sell][0]
stocks_to_transfer = min(order.amount, matched_sell_order.amount)
money_to_transfer = stocks_to_transfer * matched_sell_order.price
try:
self._exchange_assets(
order.owner,
matched_sell_order.owner,
stocks_to_transfer,
money_to_transfer,
)
except NotEnoughMoneyError:
logger.warning(
f"Could not complete exchange: buyer {order.owner} does not"
" have enough money. Adjusting the buyer's order and retrying"
" the exchange."
)
buyer_can_afford = int(
order.owner.money / matched_sell_order.price
)
order.amount = buyer_can_afford
self._process_buy_order(order)
except NotEnoughStocksError:
_seller = matched_sell_order.owner
logger.warning(
f"Could not complete exchange: seller {_seller}"
" does not have enough stocks. Adjusting the seller's order "
"and retrying the exchange."
)
matched_sell_order.amount = matched_sell_order.owner.stocks
self._process_buy_order(order)
else:
order.amount -= stocks_to_transfer
matched_sell_order.amount -= stocks_to_transfer
if matched_sell_order.amount == 0:
self.sell_levels[current_best_sell].popleft()
if len(self.sell_levels[current_best_sell]) == 0:
self.sell_levels.pop(current_best_sell)
self._process_buy_order(order)
def _process_sell_order(self, order: SellOrder):
if order.amount <= 0:
return
current_best_buy = self.best_buy
if current_best_buy is None or order.price > current_best_buy:
self.sell_levels[order.price].append(order)
return
matched_buy_order = self.buy_levels[current_best_buy][0]
stocks_to_transfer = min(order.amount, matched_buy_order.amount)
money_to_transfer = stocks_to_transfer * matched_buy_order.price
try:
self._exchange_assets(
matched_buy_order.owner,
order.owner,
stocks_to_transfer,
money_to_transfer,
)
except NotEnoughMoneyError:
logger.warning(
f"Could not complete exchange: buyer {matched_buy_order.owner}"
" does not have enough money. Adjusting the buyer's order and"
" retrying the exchange."
)
buyer_can_afford = int(
matched_buy_order.owner.money / matched_buy_order.price
)
matched_buy_order.amount = buyer_can_afford
self._process_sell_order(order)
except NotEnoughStocksError:
logger.warning(
f"Could not complete exchange: seller {order.owner}"
" does not have enough stocks. Adjusting the seller's order "
"and retrying the exchange."
)
order.amount = order.owner.stocks
self._process_sell_order(order)
else:
order.amount -= stocks_to_transfer
matched_buy_order.amount -= stocks_to_transfer
if matched_buy_order.amount == 0:
self.buy_levels[current_best_buy].popleft()
if len(self.buy_levels[current_best_buy]) == 0:
self.buy_levels.pop(current_best_buy)
self._process_sell_order(order)
def cancel_order(self, order: _Order) -> bool:
""" Cancel the given ``order`` by removing it from the OrderBook.
The order is identified by its ``id``.
Note: the existing implementation is far from optimal because the
``Level`` is essentially a deque with O(n) complexity for removal by
index. Alternative approaches are:
- set the volume of the ``Order`` to zero in the orderbook (such that
it does not yield actual exchange)
- rework the ``Level`` to be an ordered dict
"""
if isinstance(order, BuyOrder):
level = self.buy_levels[order.price]
elif isinstance(order, SellOrder):
level = self.sell_levels[order.price]
else:
return False
index_in_orderbook = None
for index, existing_order in enumerate(level):
if existing_order.id == order.id:
index_in_orderbook = index
break
if index_in_orderbook is None:
return False
del level[index_in_orderbook]
return True
def standing_orders(self, trader):
buy_orders = tuple(
order
for level in self.buy_levels.values()
for order in level
if order.owner is trader
)
sell_orders = tuple(
order
for level in self.sell_levels.values()
for order in level
if order.owner is trader
)
return buy_orders, sell_orders
def get_orderbook(self):
buy_orders = {
price: sum(order.amount for order in level)
for price, level in self.buy_levels.items()
}
sell_orders = {
price: sum(order.amount for order in level)
for price, level in self.sell_levels.items()
}
return buy_orders, sell_orders
def register_trader(self, trader: Trader):
"""Register a ``trader`` in the Exchange.
Provides the ``exchange_api`` to the ``trader``.
"""
trader.exchange_api = self.api
def shutdown(self):
"""Perform necessary actions to shutdown the ``Exchange`` instance."""
pass
@property
def api(self):
return ExchangeAPI(exchange=self)
class ExchangeAPI:
def __init__(self, exchange: Exchange):
self._process_order = exchange.process_order
self._standing_orders = exchange.standing_orders
self._get_order_book = exchange.get_orderbook
self._cancel_order = exchange.cancel_order
@property
def process_order(self):
return self._process_order
@property
def standing_orders(self):
return self._standing_orders
@property
def get_orderbook(self):
return self._get_order_book
@property
def cancel_order(self):
return self._cancel_order
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,575
|
mikpim01/async_exchange
|
refs/heads/master
|
/tests/test_trader.py
|
import unittest
from async_exchange.exchange import Exchange
from async_exchange.trader import (
Trader,
NotEnoughMoneyError,
NotEnoughStocksError,
)
class TestTrader(unittest.TestCase):
def setUp(self):
self.exchange = Exchange()
api = self.exchange.api
self.trader = Trader(exchange_api=api, money=100, stocks=10)
def test_trader_money(self):
self.assertEqual(self.trader.money, 100)
self.trader.money = 1000
self.assertEqual(self.trader.money, 1000)
self.trader.money = 0
self.assertEqual(self.trader.money, 0)
with self.assertRaises(NotEnoughMoneyError):
self.trader.money = -1
def test_trader_has_enough_money(self):
self.assertTrue(self.trader.has_enough_money(10))
with self.assertRaises(NotEnoughMoneyError):
self.trader.has_enough_money(10000)
def test_trader_stocks(self):
self.assertEqual(self.trader.stocks, 10)
self.trader.stocks = 1000
self.assertEqual(self.trader.stocks, 1000)
self.trader.stocks = 0
self.assertEqual(self.trader.stocks, 0)
with self.assertRaises(NotEnoughStocksError):
self.trader.stocks = -1
def test_trader_has_enough_stocks(self):
self.assertTrue(self.trader.has_enough_stocks(10))
with self.assertRaises(NotEnoughStocksError):
self.trader.has_enough_stocks(10000)
def test_inspect_exchange(self):
self.assertEqual(
self.trader.inspect_exchange(), self.exchange.get_orderbook()
)
def test_sell(self):
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(len(self.exchange.buy_levels), 0)
self.trader.sell(amount=5, price=12)
self.assertEqual(len(self.exchange.sell_levels), 1)
self.assertEqual(len(self.exchange.buy_levels), 0)
sell_order, = self.exchange.sell_levels[12]
self.assertIs(self.trader, sell_order.owner)
def test_buy(self):
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(len(self.exchange.buy_levels), 0)
self.trader.buy(amount=5, price=12)
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(len(self.exchange.buy_levels), 1)
buy_order, = self.exchange.buy_levels[12]
self.assertIs(self.trader, buy_order.owner)
def test_standing_orders(self):
buy_orders, sell_orders = self.trader.standing_orders
self.assertEqual(len(buy_orders), 0)
self.assertEqual(len(sell_orders), 0)
self.trader.buy(amount=20, price=10)
buy_orders, sell_orders = self.trader.standing_orders
self.assertEqual(len(buy_orders), 1)
buy_order, = buy_orders
self.assertEqual(buy_order.price, 10)
self.assertEqual(buy_order.amount, 20)
self.assertEqual(len(sell_orders), 0)
self.trader.sell(amount=30, price=40)
buy_orders, sell_orders = self.trader.standing_orders
self.assertEqual(len(buy_orders), 1)
buy_order, = buy_orders
self.assertEqual(buy_order.price, 10)
self.assertEqual(buy_order.amount, 20)
self.assertEqual(len(sell_orders), 1)
sell_order, = sell_orders
self.assertEqual(sell_order.price, 40)
self.assertEqual(sell_order.amount, 30)
def test_cancel_order_buy(self):
buy_orders, sell_orders = self.trader.standing_orders
self.assertEqual(len(buy_orders), 0)
self.assertEqual(len(sell_orders), 0)
self.trader.buy(amount=20, price=10)
(buy_order,), _ = self.trader.standing_orders
result = self.trader.cancel_order(buy_order)
self.assertTrue(result)
buy_orders, sell_orders = self.trader.standing_orders
self.assertEqual(len(buy_orders), 0)
self.assertEqual(len(sell_orders), 0)
result = self.trader.cancel_order(buy_order)
self.assertFalse(result)
def test_cancel_order_sell(self):
buy_orders, sell_orders = self.trader.standing_orders
self.assertEqual(len(buy_orders), 0)
self.assertEqual(len(sell_orders), 0)
self.trader.sell(amount=20, price=10)
_, (sell_order,) = self.trader.standing_orders
result = self.trader.cancel_order(sell_order)
self.assertTrue(result)
buy_orders, sell_orders = self.trader.standing_orders
self.assertEqual(len(buy_orders), 0)
self.assertEqual(len(sell_orders), 0)
result = self.trader.cancel_order(sell_order)
self.assertFalse(result)
def test_cancel_order_many_orders(self):
self.trader.sell(amount=10, price=40)
self.trader.sell(amount=20, price=30)
self.trader.buy(amount=30, price=20)
self.trader.buy(amount=40, price=10)
(buy_order_1, buy_order_2), (
sell_order_1,
sell_order_2,
) = self.trader.standing_orders
result = self.trader.cancel_order(buy_order_2)
self.assertTrue(result)
(actual_buy_order_1,), (
actual_sell_order_1,
actual_sell_order_2,
) = self.trader.standing_orders
self.assertIs(actual_buy_order_1, buy_order_1)
self.assertIs(actual_sell_order_1, sell_order_1)
self.assertIs(actual_sell_order_2, sell_order_2)
result = self.trader.cancel_order(sell_order_1)
self.assertTrue(result)
(actual_buy_order_1,), (
actual_sell_order_2,
) = self.trader.standing_orders
self.assertIs(actual_buy_order_1, buy_order_1)
self.assertIs(actual_sell_order_2, sell_order_2)
def test_cancel_all_orders(self):
self.trader.sell(amount=10, price=40)
self.trader.sell(amount=20, price=30)
self.trader.buy(amount=30, price=20)
self.trader.buy(amount=40, price=10)
(buy_order_1, buy_order_2), (
sell_order_1,
sell_order_2,
) = self.trader.standing_orders
self.trader.cancel_all_orders()
buy_orders, sell_orders = self.trader.standing_orders
self.assertEqual(len(buy_orders), 0)
self.assertEqual(len(sell_orders), 0)
if __name__ == "__main__":
unittest.main()
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,576
|
mikpim01/async_exchange
|
refs/heads/master
|
/async_exchange/logging/influxdb_logger.py
|
import logging
from influxdb_client import (
BucketRetentionRules,
InfluxDBClient,
Point,
WriteOptions
)
logger = logging.getLogger(__name__)
INFLUXDB_HOSTNAME = "http://localhost"
INFLUXDB_PORT = 8086
INFLUXDB_URL = f"{INFLUXDB_HOSTNAME}:{INFLUXDB_PORT}"
# A default auth token
INFLUXDB_TOKEN = "my-super-secret-auth-token"
ORGANIZATION = "myorg"
BUCKET = "exchange"
TIME_FIELD = "time"
RECORD_TYPE_FIELD = "measurement"
FIELDS_TYPE_FIELD = "fields"
LOG_BATCH_SIZE = 1000
class InfluxDBLogger:
def __init__(
self,
bucket_name=BUCKET,
batch_size=LOG_BATCH_SIZE,
data_retention=3600,
):
self.organization = ORGANIZATION
self.client = InfluxDBClient(
url=INFLUXDB_URL, token=INFLUXDB_TOKEN, org=self.organization
)
self.batch_size = batch_size
self.bucket_name = bucket_name
self.write_api = self.client.write_api(
write_options=WriteOptions(batch_size=self.batch_size)
)
self.query_api = self.client.query_api()
self.buckets_api = self.client.buckets_api()
bucket = self.buckets_api.find_bucket_by_name(self.bucket_name)
if bucket is None:
logger.warning(
f"Bucket {self.bucket_name!r} not found. "
f"Creating a bucket {self.bucket_name!r}."
)
retention_rules = None
if data_retention is not None:
retention_rules = BucketRetentionRules(
type="expire", every_seconds=data_retention
)
self.buckets_api.create_bucket(
bucket_name=self.bucket_name,
retention_rules=retention_rules,
org=self.organization,
)
def send_event(self, record_type, message):
point = Point(record_type)
for key, value in message.items():
point = point.field(key, value)
self.write_api.write(bucket=self.bucket_name, record=point)
def get_events(self, record_type):
query = '''
from(bucket: currentBucket)
|> range(start: -5m, stop: now())
|> filter(fn: (r) => r._measurement == recordType)
|> pivot(rowKey:["_time"], columnKey: ["_field"], \
valueColumn: "_value")
'''
params = {"currentBucket": self.bucket_name, "recordType": record_type}
tables = self.query_api.query(query=query, params=params)
if len(tables) > 0:
table, *_ = tables
events = table.records
else:
events = []
return events
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,577
|
mikpim01/async_exchange
|
refs/heads/master
|
/tests/test_exchange.py
|
import unittest
from async_exchange.exchange import Exchange, ExchangeAPI
from async_exchange.trader import Trader
from async_exchange.orders import BuyOrder, SellOrder
class TestExchange(unittest.TestCase):
def setUp(self):
self.exchange = Exchange()
api = self.exchange.api
self.trader_1 = Trader(exchange_api=api, money=100, stocks=10)
self.trader_2 = Trader(exchange_api=api, money=100, stocks=10)
self.trader_3 = Trader(exchange_api=api, money=100, stocks=10)
self.trader_4 = Trader(exchange_api=api, money=100, stocks=10)
self.trader_5 = Trader(exchange_api=api, money=100, stocks=10)
def test_register_trader(self):
trader = Trader()
self.assertIsNone(trader.exchange_api)
self.exchange.register_trader(trader)
self.assertIsNotNone(trader.exchange_api)
self.assertIsInstance(trader.exchange_api, ExchangeAPI)
def test_submit_buy_order(self):
self.trader_1.buy(100, 10)
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(len(self.exchange.buy_levels), 1)
buy_level = self.exchange.buy_levels[10]
(existing_order,) = buy_level
self.assertIsInstance(existing_order, BuyOrder)
self.assertEqual(existing_order.amount, 100)
self.assertEqual(existing_order.price, 10)
self.assertIs(existing_order.owner, self.trader_1)
def test_submit_sell_order(self):
self.trader_1.sell(100, 10)
self.assertEqual(len(self.exchange.buy_levels), 0)
self.assertEqual(len(self.exchange.sell_levels), 1)
sell_level = self.exchange.sell_levels[10]
(existing_order,) = sell_level
self.assertIsInstance(existing_order, SellOrder)
self.assertEqual(existing_order.amount, 100)
self.assertEqual(existing_order.price, 10)
self.assertIs(existing_order.owner, self.trader_1)
def test_submit_nonmatching_sell_buy_orders(self):
self.trader_1.buy(100, 10)
self.trader_2.sell(42, 20)
self.assertEqual(len(self.exchange.buy_levels), 1)
buy_level = self.exchange.buy_levels[10]
(existing_order,) = buy_level
self.assertIsInstance(existing_order, BuyOrder)
self.assertEqual(existing_order.amount, 100)
self.assertEqual(existing_order.price, 10)
self.assertIs(existing_order.owner, self.trader_1)
self.assertEqual(len(self.exchange.sell_levels), 1)
sell_level = self.exchange.sell_levels[20]
(existing_order,) = sell_level
self.assertIsInstance(existing_order, SellOrder)
self.assertEqual(existing_order.amount, 42)
self.assertEqual(existing_order.price, 20)
self.assertIs(existing_order.owner, self.trader_2)
def test_submit_matching_sell_order(self):
self.trader_1.buy(100, 10)
self.trader_2.sell(5, 10)
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(len(self.exchange.buy_levels), 1)
sell_level = self.exchange.buy_levels[10]
(existing_order,) = sell_level
self.assertIsInstance(existing_order, BuyOrder)
self.assertEqual(existing_order.amount, 95)
self.assertEqual(existing_order.price, 10)
self.assertIs(existing_order.owner, self.trader_1)
self.assertEqual(self.trader_1.money, 100 - 10 * 5)
self.assertEqual(self.trader_2.money, 100 + 10 * 5)
self.assertEqual(self.trader_1.stocks, 10 + 5)
self.assertEqual(self.trader_2.stocks, 10 - 5)
def test_submit_matching_sell_order_not_enough_money(self):
self.trader_1.buy(5, 80)
self.trader_2.sell(5, 80)
self.assertEqual(len(self.exchange.buy_levels), 0)
self.assertEqual(len(self.exchange.sell_levels), 1)
sell_level = self.exchange.sell_levels[80]
(existing_order,) = sell_level
self.assertIsInstance(existing_order, SellOrder)
self.assertEqual(existing_order.amount, 4)
self.assertEqual(existing_order.price, 80)
self.assertIs(existing_order.owner, self.trader_2)
self.assertEqual(self.trader_1.money, 20)
self.assertEqual(self.trader_2.money, 180)
self.assertEqual(self.trader_1.stocks, 11)
self.assertEqual(self.trader_2.stocks, 9)
def test_submit_matching_sell_order_not_enough_money_2(self):
self.trader_1.buy(5, 180)
self.trader_2.sell(5, 180)
self.assertEqual(len(self.exchange.buy_levels), 0)
self.assertEqual(len(self.exchange.sell_levels), 1)
sell_level = self.exchange.sell_levels[180]
(existing_order,) = sell_level
self.assertIsInstance(existing_order, SellOrder)
self.assertEqual(existing_order.amount, 5)
self.assertEqual(existing_order.price, 180)
self.assertIs(existing_order.owner, self.trader_2)
self.assertEqual(self.trader_1.money, 100)
self.assertEqual(self.trader_2.money, 100)
self.assertEqual(self.trader_1.stocks, 10)
self.assertEqual(self.trader_2.stocks, 10)
def test_submit_matching_sell_order_not_enough_stocks(self):
self.trader_2.stocks = 1
self.trader_1.buy(10, 2)
self.trader_2.sell(5, 2)
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(len(self.exchange.buy_levels), 1)
buy_level = self.exchange.buy_levels[2]
(existing_order,) = buy_level
self.assertIsInstance(existing_order, BuyOrder)
self.assertEqual(existing_order.amount, 9)
self.assertEqual(existing_order.price, 2)
self.assertIs(existing_order.owner, self.trader_1)
self.assertEqual(self.trader_1.money, 98)
self.assertEqual(self.trader_2.money, 102)
self.assertEqual(self.trader_1.stocks, 11)
self.assertEqual(self.trader_2.stocks, 0)
def test_submit_matching_sell_order_not_enough_stocks_2(self):
self.trader_2.stocks = 0
self.trader_1.buy(10, 2)
self.trader_2.sell(5, 2)
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(len(self.exchange.buy_levels), 1)
buy_level = self.exchange.buy_levels[2]
(existing_order,) = buy_level
self.assertIsInstance(existing_order, BuyOrder)
self.assertEqual(existing_order.amount, 10)
self.assertEqual(existing_order.price, 2)
self.assertIs(existing_order.owner, self.trader_1)
self.assertEqual(self.trader_1.money, 100)
self.assertEqual(self.trader_2.money, 100)
self.assertEqual(self.trader_1.stocks, 10)
self.assertEqual(self.trader_2.stocks, 0)
def test_submit_matching_buy_order_not_enough_money(self):
self.trader_1.sell(5, 80)
self.trader_2.buy(5, 80)
self.assertEqual(len(self.exchange.sell_levels), 1)
self.assertEqual(len(self.exchange.buy_levels), 0)
sell_level = self.exchange.sell_levels[80]
(existing_order,) = sell_level
self.assertIsInstance(existing_order, SellOrder)
self.assertEqual(existing_order.amount, 4)
self.assertEqual(existing_order.price, 80)
self.assertIs(existing_order.owner, self.trader_1)
self.assertEqual(self.trader_1.money, 180)
self.assertEqual(self.trader_2.money, 20)
self.assertEqual(self.trader_1.stocks, 9)
self.assertEqual(self.trader_2.stocks, 11)
def test_submit_matching_buy_order_not_enough_money_2(self):
self.trader_1.sell(5, 180)
self.trader_2.buy(5, 180)
self.assertEqual(len(self.exchange.sell_levels), 1)
self.assertEqual(len(self.exchange.buy_levels), 0)
sell_level = self.exchange.sell_levels[180]
(existing_order,) = sell_level
self.assertIsInstance(existing_order, SellOrder)
self.assertEqual(existing_order.amount, 5)
self.assertEqual(existing_order.price, 180)
self.assertIs(existing_order.owner, self.trader_1)
self.assertEqual(self.trader_1.money, 100)
self.assertEqual(self.trader_2.money, 100)
self.assertEqual(self.trader_1.stocks, 10)
self.assertEqual(self.trader_2.stocks, 10)
def test_submit_matching_buy_order_not_enough_stocks(self):
self.trader_1.stocks = 1
self.trader_1.sell(10, 2)
self.trader_2.buy(5, 2)
self.assertEqual(len(self.exchange.buy_levels), 1)
buy_level = self.exchange.buy_levels[2]
(existing_order,) = buy_level
self.assertIsInstance(existing_order, BuyOrder)
self.assertEqual(existing_order.amount, 4)
self.assertEqual(existing_order.price, 2)
self.assertIs(existing_order.owner, self.trader_2)
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(self.trader_1.money, 102)
self.assertEqual(self.trader_2.money, 98)
self.assertEqual(self.trader_1.stocks, 0)
self.assertEqual(self.trader_2.stocks, 11)
def test_submit_matching_buy_order_not_enough_stocks_2(self):
self.trader_1.stocks = 0
self.trader_1.sell(10, 2)
self.trader_2.buy(5, 2)
self.assertEqual(len(self.exchange.buy_levels), 1)
buy_level = self.exchange.buy_levels[2]
(existing_order,) = buy_level
self.assertIsInstance(existing_order, BuyOrder)
self.assertEqual(existing_order.amount, 5)
self.assertEqual(existing_order.price, 2)
self.assertIs(existing_order.owner, self.trader_2)
self.assertEqual(len(self.exchange.sell_levels), 0)
self.assertEqual(self.trader_1.money, 100)
self.assertEqual(self.trader_2.money, 100)
self.assertEqual(self.trader_1.stocks, 0)
self.assertEqual(self.trader_2.stocks, 10)
def test_sell_below_ask_price(self):
self.trader_1.buy(1, 4)
self.trader_2.buy(2, 3)
self.trader_3.buy(3, 1)
self.trader_4.sell(4, 2)
sell_levels = self.exchange.sell_levels
self.assertEqual(len(sell_levels), 1)
(existing_sell_order,) = sell_levels[2]
self.assertEqual(existing_sell_order.amount, 1)
self.assertEqual(existing_sell_order.price, 2)
self.assertIs(existing_sell_order.owner, self.trader_4)
buy_levels = self.exchange.buy_levels
self.assertEqual(len(buy_levels), 1)
self.assertEqual(len(buy_levels[4]), 0)
self.assertEqual(len(buy_levels[3]), 0)
(existing_buy_order,) = buy_levels[1]
self.assertEqual(existing_buy_order.amount, 3)
self.assertEqual(existing_buy_order.price, 1)
self.assertIs(existing_buy_order.owner, self.trader_3)
self.assertEqual(self.trader_1.stocks, 11)
self.assertEqual(self.trader_1.money, 96)
self.assertEqual(self.trader_2.stocks, 12)
self.assertEqual(self.trader_2.money, 94)
self.assertEqual(self.trader_3.stocks, 10)
self.assertEqual(self.trader_3.money, 100)
self.assertEqual(self.trader_4.stocks, 7)
self.assertEqual(self.trader_4.money, 110)
def test_buy_above_sell_price(self):
self.trader_1.sell(1, 4)
self.trader_2.sell(2, 5)
self.trader_3.sell(3, 7)
self.trader_4.buy(4, 6)
buy_levels = self.exchange.buy_levels
self.assertEqual(len(buy_levels), 1)
(existing_buy_order,) = buy_levels[6]
self.assertEqual(existing_buy_order.amount, 1)
self.assertEqual(existing_buy_order.price, 6)
self.assertIs(existing_buy_order.owner, self.trader_4)
sell_levels = self.exchange.sell_levels
self.assertEqual(len(sell_levels), 1)
(existing_sell_order,) = sell_levels[7]
self.assertEqual(existing_sell_order.amount, 3)
self.assertEqual(existing_sell_order.price, 7)
self.assertIs(existing_sell_order.owner, self.trader_3)
self.assertEqual(self.trader_1.stocks, 9)
self.assertEqual(self.trader_1.money, 104)
self.assertEqual(self.trader_2.stocks, 8)
self.assertEqual(self.trader_2.money, 110)
self.assertEqual(self.trader_3.stocks, 10)
self.assertEqual(self.trader_3.money, 100)
self.assertEqual(self.trader_4.stocks, 13)
self.assertEqual(self.trader_4.money, 86)
def test_buy_orders_ordered(self):
self.trader_1.buy(2, 1)
self.trader_2.buy(3, 1)
self.trader_3.sell(1, 1)
self.assertEqual(self.trader_1.stocks, 11)
self.assertEqual(self.trader_1.money, 99)
self.assertEqual(self.trader_2.stocks, 10)
self.assertEqual(self.trader_2.money, 100)
self.assertEqual(self.trader_3.stocks, 9)
self.assertEqual(self.trader_3.money, 101)
def test_sell_orders_ordered(self):
self.trader_1.sell(2, 1)
self.trader_2.sell(3, 1)
self.trader_3.buy(1, 1)
self.assertEqual(self.trader_1.stocks, 9)
self.assertEqual(self.trader_1.money, 101)
self.assertEqual(self.trader_2.stocks, 10)
self.assertEqual(self.trader_2.money, 100)
self.assertEqual(self.trader_3.stocks, 11)
self.assertEqual(self.trader_3.money, 99)
def test_cancel_order(self):
order = BuyOrder(owner=self.trader_1, amount=10, price=10)
self.exchange.process_order(order)
self.assertEqual(len(self.exchange.buy_levels[10]), 1)
result = self.exchange.cancel_order(order=order)
self.assertTrue(result)
self.assertEqual(len(self.exchange.buy_levels[10]), 0)
self.assertEqual(len(self.exchange.buy_levels), 1)
def test_cancel_order_two_traders(self):
order_1 = BuyOrder(owner=self.trader_1, amount=10, price=10)
self.exchange.process_order(order_1)
order_2 = BuyOrder(owner=self.trader_2, amount=10, price=10)
self.exchange.process_order(order_2)
result = self.exchange.cancel_order(order=order_1)
self.assertTrue(result)
self.assertEqual(len(self.exchange.buy_levels[10]), 1)
buy_order, = self.exchange.buy_levels[10]
self.assertIs(buy_order, order_2)
self.assertEqual(len(self.exchange.buy_levels), 1)
def test_cancel_order_does_not_exist(self):
order = BuyOrder(owner=self.trader_1, amount=10, price=10)
self.assertEqual(len(self.exchange.buy_levels), 0)
result = self.exchange.cancel_order(order=order)
self.assertFalse(result)
class TestExchangeAPI(unittest.TestCase):
def setUp(self):
self.exchange = Exchange()
self.api = self.exchange.api
def test_exchange_api(self):
self.assertIsInstance(self.api, ExchangeAPI)
self.assertEqual(self.api.process_order, self.exchange.process_order)
self.assertEqual(self.api.standing_orders, self.exchange.standing_orders)
self.assertEqual(self.api.get_orderbook, self.exchange.get_orderbook)
self.assertEqual(self.api.cancel_order, self.exchange.cancel_order)
if __name__ == "__main__":
unittest.main()
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,578
|
mikpim01/async_exchange
|
refs/heads/master
|
/async_exchange/orders.py
|
from uuid import uuid4
class _Order:
def __init__(self, owner, amount, price):
self.owner = owner
self.amount = amount
self.price = price
self._id = uuid4()
def __repr__(self):
return f"Trader {self.owner._id} {self.action} {self.amount}"
@property
def id(self):
return self._id
class BuyOrder(_Order):
action = "buys"
class SellOrder(_Order):
action = "sells"
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,579
|
mikpim01/async_exchange
|
refs/heads/master
|
/async_exchange/trader.py
|
from async_exchange.orders import BuyOrder, SellOrder, _Order
class NotEnoughMoneyError(ValueError):
pass
class NotEnoughStocksError(ValueError):
pass
class Trader:
_id = 1
def __init__(self, exchange_api=None, money=100, stocks=10):
self._money = None
self._stocks = None
self.exchange_api = exchange_api
self.money = money
self.stocks = stocks
self._id = Trader._id
Trader._id += 1
@property
def money(self):
return self._money
@money.setter
def money(self, value):
if value < 0:
raise NotEnoughMoneyError
self._money = value
@property
def stocks(self):
return self._stocks
@stocks.setter
def stocks(self, value):
if value < 0:
raise NotEnoughStocksError
self._stocks = value
def sell(self, amount, price):
self.exchange_api.process_order(SellOrder(self, amount, price))
def buy(self, amount, price):
self.exchange_api.process_order(BuyOrder(self, amount, price))
def has_enough_money(self, money):
if self.money < money:
raise NotEnoughMoneyError
return True
def has_enough_stocks(self, stocks):
if self.stocks < stocks:
raise NotEnoughStocksError
return True
def inspect_exchange(self):
return self.exchange_api.get_orderbook()
@property
def standing_orders(self):
""" Get two tuples of standing orders from ``self``.
"""
return self.exchange_api.standing_orders(self)
def cancel_order(self, order: _Order) -> bool:
""" Cancel the ``order``: the order will be removed from the
exchange's order book. If the ``order`` has been successfully
removed, returns ``True``. If the ``order`` cannot be removed,
returns ``False``.
"""
return self.exchange_api.cancel_order(order)
def cancel_all_orders(self) -> bool:
""" Cancel all orders the ``_Trader`` has submitted. Returns
``True`` iif all standing orders have been successfully removed.
"""
buy_orders, sell_orders = self.standing_orders
return all(
self.cancel_order(order) for order in buy_orders + sell_orders
)
def __str__(self):
return f"Trader {self._id}: stocks {self.stocks}, cash {self.money}"
async def cycle(self):
raise NotImplementedError
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,580
|
mikpim01/async_exchange
|
refs/heads/master
|
/async_exchange/logging/tests/test_influxdb_logger.py
|
import unittest
from influxdb_client.client.write_api import SYNCHRONOUS
from async_exchange.logging.influxdb_logger import InfluxDBLogger
class TestInfluxDBLogger(unittest.TestCase):
def setUp(self):
self.logger = InfluxDBLogger(bucket_name="test_bucket", batch_size=10)
def cleanup_bucket():
bucket = self.logger.buckets_api.find_bucket_by_name("test_bucket")
self.logger.buckets_api.delete_bucket(bucket=bucket)
self.addCleanup(cleanup_bucket)
self.addCleanup(self.logger.client.close)
def test_send_event_get_event(self):
# Convert the write API to synchronous to avoid waiting for background
# threads finishing writing
self.logger.write_api = self.logger.client.write_api(
write_options=SYNCHRONOUS
)
self.logger.send_event(
record_type="type",
message={"data": "my data", "another data": "something else"}
)
point, = self.logger.get_events("type")
self.assertEqual(point["_measurement"], "type")
self.assertEqual(point["data"], "my data")
self.assertEqual(point["another data"], "something else")
def test_batch_set(self):
self.assertEqual(
self.logger.write_api._write_options.batch_size, 10
)
if __name__ == "__main__":
unittest.main()
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,581
|
mikpim01/async_exchange
|
refs/heads/master
|
/async_exchange/api.py
|
from .trader import Trader
from .exchange import Exchange
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,582
|
mikpim01/async_exchange
|
refs/heads/master
|
/demos/run_session.py
|
from async_exchange.algorithms.random_trader import RandomTrader
from async_exchange.trading_session import TradingSession
try:
from async_exchange.logging.influxdb_logger import InfluxDBLogger
except ImportError:
logger = None
RandomTrader.verbose = True
else:
logger = InfluxDBLogger()
RandomTrader.verbose = False
NOF_TRADERS = 100
if __name__ == "__main__":
traders = [RandomTrader(money=300, stocks=10) for _ in range(NOF_TRADERS)]
session = TradingSession(traders=traders, logger=logger)
session.run()
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,583
|
mikpim01/async_exchange
|
refs/heads/master
|
/tests/test_trading_session.py
|
import unittest
from unittest import TestCase
from async_exchange.exchange import Exchange
from async_exchange.trader import Trader
from async_exchange.trading_session import TradingSession
class TestTradingSession(TestCase):
def setUp(self):
traders = [Trader()]
exchange = Exchange()
self.trading_session = TradingSession(
traders=traders, exchange=exchange
)
def test_init(self):
self.assertIsNotNone(self.trading_session.logger)
(trader,) = self.trading_session.traders
self.assertIsNotNone(trader.exchange_api)
def test_init_default_exchange(self):
trading_session = TradingSession(
traders=[], exchange=None, logger=None
)
self.assertIsInstance(trading_session.exchange, Exchange)
self.assertIsNone(trading_session.exchange._logger)
if __name__ == "__main__":
unittest.main()
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,584
|
mikpim01/async_exchange
|
refs/heads/master
|
/async_exchange/algorithms/random_trader.py
|
import asyncio
import logging
import random
from async_exchange.trader import Trader
logger = logging.getLogger(__name__)
BUY = "BUY"
SELL = "SELL"
RANDOM_CHOICE = {0: BUY, 1: SELL}
DEFAULT_PRICE = 10
class RandomTrader(Trader):
verbose = False
async def sleep(self):
sleep_time = random.random() * 1.0
await asyncio.sleep(sleep_time)
async def cycle(self):
while True:
await self.sleep()
self.place_random_order()
def place_random_order(self):
buy_or_sell = RANDOM_CHOICE[random.randint(0, 1)]
existing_buy_orders, existing_sell_orders = self.inspect_exchange()
if len(existing_buy_orders) == 0 and len(existing_sell_orders) == 0:
return self.submit_order(buy_or_sell, price=None, amount=None)
best_buy = None
best_sell = None
if len(existing_buy_orders) != 0:
best_buy = max(existing_buy_orders)
if len(existing_sell_orders) != 0:
best_sell = min(existing_sell_orders)
if best_buy is None:
best_buy = best_sell
if best_sell is None:
best_sell = best_buy
median_price = (best_buy + best_sell) // 2
deviation = int(median_price ** 0.5)
random_price = median_price + random.randint(-deviation, deviation)
if buy_or_sell == BUY:
max_stocks = self.money // max(1, random_price)
else:
max_stocks = self.stocks
if max_stocks == 0:
return
random_stocks = random.randint(1, max_stocks)
return self.submit_order(
buy_or_sell, price=random_price, amount=random_stocks
)
def submit_order(self, buy_or_sell, price, amount):
if price is None:
price = DEFAULT_PRICE
if amount is None:
amount = random.randint(1, self.money // price)
if self.verbose:
logger.info(
f"Trader {self._id} wants to {buy_or_sell} "
f"{amount} stocks at {price}."
)
if buy_or_sell == BUY:
operation = self.buy
else:
operation = self.sell
operation(amount, price)
|
{"/async_exchange/trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/async_exchange/exchange.py": ["/async_exchange/orders.py", "/async_exchange/trader.py"], "/tests/test_trader.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py"], "/tests/test_exchange.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/orders.py"], "/async_exchange/trader.py": ["/async_exchange/orders.py"], "/async_exchange/logging/tests/test_influxdb_logger.py": ["/async_exchange/logging/influxdb_logger.py"], "/async_exchange/api.py": ["/async_exchange/trader.py", "/async_exchange/exchange.py"], "/demos/run_session.py": ["/async_exchange/algorithms/random_trader.py", "/async_exchange/trading_session.py", "/async_exchange/logging/influxdb_logger.py"], "/tests/test_trading_session.py": ["/async_exchange/exchange.py", "/async_exchange/trader.py", "/async_exchange/trading_session.py"], "/async_exchange/algorithms/random_trader.py": ["/async_exchange/trader.py"]}
|
23,585
|
arheinzen/recipes
|
refs/heads/master
|
/main/models.py
|
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from main.slug import unique_slugify
from django.db import models
# Create your models here. This is a test
class Ingredients(models.Model):
name = models.CharField(max_length=300)
def __str__(self):
return self.name
class Meta:
verbose_name_plural="Ingredients"
def get_absolute_url(self):
return reverse ('main.views.recipelist')
class Recipe(models.Model):
name = models.CharField(max_length=300)
ingredients = models.ManyToManyField("main.Ingredients")
directions = models.TextField()
slug = models.SlugField(unique=True)
recipe_link = models.URLField(null=True, blank=True)
image = models.ImageField(upload_to='media')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse ('main.views.recipelist')
def save(self, **kwargs):
slug = '%s' % (self.name)
unique_slugify(self, slug)
super(Recipe, self).save()
|
{"/main/forms.py": ["/main/models.py"], "/main/views.py": ["/main/forms.py", "/main/models.py"], "/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"]}
|
23,586
|
arheinzen/recipes
|
refs/heads/master
|
/scripts/get_recipes.py
|
import csv
import os
import sys
import requests
from PIL import Image
import urllib
import urllib2
from slugify import slugify
from django.core.files import File
sys.path.append("..")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "recipes.settings")
import django
django.setup()
from main.models import Recipe, Ingredients
Recipe.objects.all().delete()
Ingredients.objects.all().delete()
api_key = "f226968bd5f4291fea9ea9e0caad65b8"
for x in range(1,10):
param_dict = {'key': api_key, 'sort': 'r', 'page': x}
response = requests.get('http://food2fork.com/api/search/recipes.json', params=param_dict)
print response
response = response.json()
recipes = response['recipes']
for recipe in recipes:
print recipe['title']
recipe_id = recipe['recipe_id']
param_dict = {'key': api_key, 'rId':recipe_id}
response = requests.get('http://food2fork.com/api/get', params=param_dict)
response = response.json()
print response['recipe']['ingredients']
ingredients = response['recipe']['ingredients']
new_recipe, created = Recipe.objects.get_or_create(name=recipe['title'])
print recipe
print created
print '******'
print Ingredients
new_recipe.name = recipe['title']
new_recipe.slug = slugify(recipe['title'])
new_recipe.recipe_link = recipe['source_url']
print new_recipe.name
image = urllib.urlretrieve(recipe['image_url'])
new_recipe.image.save(os.path.basename(recipe['image_url']), File(open(image[0])))
for ingredient in ingredients:
print "*****"
print ingredient
new_ingredient, created = Ingredients.objects.get_or_create(name=ingredient)
print "NEW INGREDIENT"
print new_ingredient.name
new_recipe.ingredients.add(new_ingredient)
new_ingredient.save()
new_recipe.save()
# new_ingredient, created = Ingredient.objects.get_or_create(name=ingredients['ingredients'])
# new_ingredient.save()
# new_image = urllib.urlretrieve(recipe['image_url'])
# new_recipe.image.save(os.path.basename(recipe['image_url']), File(open(new_image[0])))
|
{"/main/forms.py": ["/main/models.py"], "/main/views.py": ["/main/forms.py", "/main/models.py"], "/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"]}
|
23,587
|
arheinzen/recipes
|
refs/heads/master
|
/main/forms.py
|
from django import forms
from .models import Recipe, Ingredients
class RecipeForm(forms.ModelForm):
class Meta:
model = Recipe
fields = ['name', 'ingredients', 'directions', 'image', 'recipe_link']
class IngredientForm(forms.ModelForm):
class Meta:
model = Ingredients
fields = ['name']
|
{"/main/forms.py": ["/main/models.py"], "/main/views.py": ["/main/forms.py", "/main/models.py"], "/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"]}
|
23,588
|
arheinzen/recipes
|
refs/heads/master
|
/main/views.py
|
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from django.core import serializers
from django.views.generic import View, ListView, DetailView, TemplateView
from django.views.generic.edit import CreateView
from django.core.urlresolvers import reverse
from slugify import slugify
import re
from django.db.models import Q
from .forms import RecipeForm, IngredientForm
from .models import Recipe, Ingredients
# Create your views here.
def recipe_list_API_view(request):
recipes = Recipe.objects.all()
output = serializers.serialize('json', recipes, fields=('name','ingredients','directions'))
return HttpResponse(output, content_type='application/json')
def home(request):
recipes = Recipe.objects.all()
ingredients = Ingredients.objects.all()
context = {}
context['recipe_list'] = recipes
context['ingredient_list'] = ingredients
return render(request, 'home.html', context)
def recipelist(request):
recipes = Recipe.objects.all()
context = {}
for recipe in recipes:
recipe.title = recipe.name
context['recipe_list'] = recipes
return render(request, 'recipelist.html', context)
# LISTVIEW WILL NOT WORK WITH "POST" it will with GET but not POST
#class RecipeListView(ListView):
# model = Recipe
# template_name = "recipelist.html"
# context_object_name = "recipe_list"
class RecipeDetailView(DetailView):
model = Recipe
slug_field = "slug"
template_name = "ingredients.html"
context_object_name = "recipe"
class RecipeCreateView(CreateView):
form_class = RecipeForm
template_name = "recipecreate.html"
class IngredientCreateView(CreateView):
form_class = IngredientForm
template_name = "ingredientcreate.html"
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def search(request):
query_string = ''
found_entries = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
ingredients = get_query(query_string, ['title', 'body',])
found_entries = Entry.objects.filter(entry_query).order_by('-pub_date')
return render_to_response('search/search_results.html',
{ 'query_string': query_string, 'found_entries': found_entries },
context_instance=RequestContext(request))
|
{"/main/forms.py": ["/main/models.py"], "/main/views.py": ["/main/forms.py", "/main/models.py"], "/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"]}
|
23,589
|
arheinzen/recipes
|
refs/heads/master
|
/main/admin.py
|
from django.contrib import admin
from main.models import Ingredients, Recipe
# Register your models here.
#class IngredientsAdmin(admin.ModelAdmin):
# list_display = ("name",)
# search_fields = ["name"]
#
#class RecipeAdmin(admin.ModelAdmin):
# list_display = ("name",)
# search_fields = ["name"]
admin.site.register(Ingredients)
admin.site.register(Recipe)
|
{"/main/forms.py": ["/main/models.py"], "/main/views.py": ["/main/forms.py", "/main/models.py"], "/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"]}
|
23,590
|
arheinzen/recipes
|
refs/heads/master
|
/main/urls.py
|
from django.conf.urls import include, url
from django.conf import settings
from django.views.generic import TemplateView
from .views import RecipeDetailView, RecipeCreateView, IngredientCreateView
urlpatterns = [
url(r'^recipe_list_API/$', 'main.views.recipe_list_API_view'),
url(r'^recipelist/$', 'main.views.recipelist'),
url(r'^recipelist/(?P<slug>.+)/$', RecipeDetailView.as_view()),
url(r'^recipecreate/$', RecipeCreateView.as_view()),
url(r'^recipesuccess/$', TemplateView.as_view(template_name="recipe_success.html")),
url(r'^home/$', 'main.views.home'),
url(r'^home/(?P<slug>.+)/$', RecipeDetailView.as_view()),
url(r'^ingredientcreate/$', IngredientCreateView.as_view()),
]
#TemplateView.as_view(template_name="home.html")
|
{"/main/forms.py": ["/main/models.py"], "/main/views.py": ["/main/forms.py", "/main/models.py"], "/main/admin.py": ["/main/models.py"], "/main/urls.py": ["/main/views.py"]}
|
23,593
|
coleifer/walrus
|
refs/heads/master
|
/walrus/streams.py
|
import datetime
import operator
import time
from walrus.containers import ConsumerGroup
from walrus.containers import ConsumerGroupStream
from walrus.utils import basestring_type
from walrus.utils import decode
from walrus.utils import decode_dict
from walrus.utils import make_python_attr
def id_to_datetime(ts):
tsm, seq = ts.split(b'-', 1)
return datetime.datetime.fromtimestamp(int(tsm) / 1000.), int(seq)
def datetime_to_id(dt, seq=0):
tsm = time.mktime(dt.timetuple()) * 1000
return '%s-%s' % (int(tsm + (dt.microsecond / 1000)), seq)
class Message(object):
"""
A message stored in a Redis stream.
When reading messages from a :py:class:`TimeSeries`, the usual 2-tuple of
(message id, data) is unpacked into a :py:class:`Message` instance. The
message instance provides convenient access to the message timestamp as a
datetime. Additionally, the message data is UTF8-decoded for convenience.
"""
__slots__ = ('stream', 'timestamp', 'sequence', 'data', 'message_id')
def __init__(self, stream, message_id, data):
self.stream = decode(stream)
self.message_id = decode(message_id)
self.data = decode_dict(data)
self.timestamp, self.sequence = id_to_datetime(message_id)
def __repr__(self):
return '<Message %s %s: %s>' % (self.stream, self.message_id,
self.data)
def normalize_id(message_id):
if isinstance(message_id, basestring_type):
return message_id
elif isinstance(message_id, datetime.datetime):
return datetime_to_id(message_id)
elif isinstance(message_id, tuple):
return datetime_to_id(*message_id)
elif isinstance(message_id, Message):
return message_id.message_id
return message_id
def normalize_ids(id_list):
return [normalize_id(id) for id in id_list]
def xread_to_messages(resp):
if resp is None: return
accum = []
for stream, messages in resp:
accum.extend(xrange_to_messages(stream, messages))
# If multiple streams are present, sort them by timestamp.
if len(resp) > 1:
accum.sort(key=operator.attrgetter('message_id'))
return accum
def xrange_to_messages(stream, resp):
return [Message(stream, message_id, data) for message_id, data in resp]
class TimeSeriesStream(ConsumerGroupStream):
"""
Helper for working with an individual stream within the context of a
:py:class:`TimeSeries` consumer group. This object is exposed as an
attribute on a :py:class:`TimeSeries` object using the stream key for the
attribute name.
This class should not be created directly. It will automatically be added
to the ``TimeSeries`` object.
For example::
ts = db.time_series('events', ['stream-1', 'stream-2'])
ts.stream_1 # TimeSeriesStream for "stream-1"
ts.stream_2 # TimeSeriesStream for "stream-2"
This class implements the same methods as :py:class:`ConsumerGroupStream`,
with the following differences in behavior:
* Anywhere an ID (or list of IDs) is accepted, this class will also accept
a datetime, a 2-tuple of (datetime, sequence), a :py:class:`Message`, in
addition to a regular bytestring ID.
* Instead of returning a list of (message id, data) 2-tuples, this class
returns a list of :py:class:`Message` objects.
* Data is automatically UTF8 decoded when being read for convenience.
"""
__slots__ = ('database', 'group', 'key', '_consumer')
def ack(self, *id_list):
return super(TimeSeriesStream, self).ack(*normalize_ids(id_list))
def add(self, data, id='*', maxlen=None, approximate=True):
db_id = super(TimeSeriesStream, self).add(data, normalize_id(id),
maxlen, approximate)
return id_to_datetime(db_id)
def claim(self, *id_list, **kwargs):
resp = super(TimeSeriesStream, self).claim(*normalize_ids(id_list),
**kwargs)
return xrange_to_messages(self.key, resp)
def delete(self, *id_list):
return super(TimeSeriesStream, self).delete(*normalize_ids(id_list))
def get(self, id):
id = normalize_id(id)
messages = self.range(id, id, 1)
if messages:
return messages[0]
def range(self, start='-', stop='+', count=None):
resp = super(TimeSeriesStream, self).range(
normalize_id(start),
normalize_id(stop),
count)
return xrange_to_messages(self.key, resp)
def pending(self, start='-', stop='+', count=1000, consumer=None,
idle=None):
start = normalize_id(start)
stop = normalize_id(stop)
resp = self.database.xpending_range(self.key, self.group, min=start,
max=stop, count=count,
consumername=consumer, idle=idle)
return [(id_to_datetime(msg['message_id']), decode(msg['consumer']),
msg['time_since_delivered'], msg['times_delivered'])
for msg in resp]
def read(self, count=None, block=None):
resp = super(TimeSeriesStream, self).read(count, block)
if resp is not None:
return xrange_to_messages(self.key, resp)
def set_id(self, id='$'):
return super(TimeSeriesStream, self).set_id(normalize_id(id))
class TimeSeries(ConsumerGroup):
"""
:py:class:`TimeSeries` is a consumer-group that provides a higher level of
abstraction, reading and writing message ids as datetimes, and returning
messages using a convenient, lightweight :py:class:`Message` class.
Rather than creating this class directly, use the
:py:meth:`Database.time_series` method.
Each registered stream within the group is exposed as a special attribute
that provides stream-specific APIs within the context of the group. For
more information see :py:class:`TimeSeriesStream`.
Example::
ts = db.time_series('groupname', ['stream-1', 'stream-2'])
ts.stream_1 # TimeSeriesStream for "stream-1"
ts.stream_2 # TimeSeriesStream for "stream-2"
:param Database database: Redis client
:param group: name of consumer group
:param keys: stream identifier(s) to monitor. May be a single stream
key, a list of stream keys, or a key-to-minimum id mapping. The
minimum id for each stream should be considered an exclusive
lower-bound. The '$' value can also be used to only read values
added *after* our command started blocking.
:param consumer: name for consumer within group
:returns: a :py:class:`TimeSeries` instance
"""
stream_key_class = TimeSeriesStream
def read(self, count=None, block=None):
"""
Read unseen messages from all streams in the consumer group. Wrapper
for :py:class:`Database.xreadgroup` method.
:param int count: limit number of messages returned
:param int block: milliseconds to block, 0 for indefinitely.
:returns: a list of :py:class:`Message` objects
"""
resp = super(TimeSeries, self).read(count, block)
return xread_to_messages(resp)
def set_id(self, id='$'):
return super(TimeSeries, self).set_id(normalize_id(id))
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,594
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/models.py
|
import datetime
from walrus import *
from walrus.query import OP_AND
from walrus.query import OP_OR
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
class BaseModel(Model):
__database__ = db
__namespace__ = 'test'
class User(BaseModel):
username = TextField(primary_key=True)
class Note(BaseModel):
user = TextField(index=True)
text = TextField()
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
tags = JSONField()
class Message(BaseModel):
content = TextField(fts=True)
status = IntegerField(default=1, index=True)
class FTSOptions(BaseModel):
content = TextField(fts=True, stemmer=True)
metaphone = TextField(fts=True, stemmer=True, metaphone=True)
class Stat(BaseModel):
key = AutoIncrementField()
stat_type = ByteField(index=True)
value = IntegerField(index=True)
class DefaultOption(BaseModel):
default_empty = JSONField()
txt = TextField(default='')
num = IntegerField(default=0)
class TestModels(WalrusTestCase):
def create_objects(self):
for i in range(3):
u = User.create(username='u%s' % (i + 1))
for j in range(3):
Note.create(
user=u.username,
text='n%s-%s' % (i + 1, j + 1),
tags=['t%s' % (k + 1) for k in range(j)])
def test_textfield_whitespace(self):
h = User.create(username='huey cat')
z = User.create(username='zaizee cat')
h_db = User.load('huey cat')
self.assertEqual(h_db.username, 'huey cat')
z_db = User.load('zaizee cat')
self.assertEqual(z_db.username, 'zaizee cat')
query = User.query(User.username == 'huey cat')
self.assertEqual([u.username for u in query], ['huey cat'])
self.assertRaises(KeyError, User.load, 'mickey dog')
def test_store_none(self):
class Simple(BaseModel):
text = TextField()
number = IntegerField()
normalized = FloatField()
s = Simple.create(text=None, number=None, normalized=None)
s_db = Simple.load(s._id)
self.assertEqual(s_db.text, '')
self.assertEqual(s_db.number, 0)
self.assertEqual(s_db.normalized, 0.)
def test_create(self):
self.create_objects()
self.assertEqual(
sorted(user.username for user in User.all()),
['u1', 'u2', 'u3'])
notes = Note.query(Note.user == 'u1')
self.assertEqual(
sorted(note.text for note in notes),
['n1-1', 'n1-2', 'n1-3'])
notes = sorted(
Note.query(Note.user == 'u2'),
key = lambda note: note._id)
note = notes[2]
self.assertEqual(note.tags, ['t1', 't2'])
def test_exceptions(self):
self.assertRaises(KeyError, User.load, 'charlie')
User.create(username='charlie')
user = User.load('charlie')
self.assertEqual(user.username, 'charlie')
def test_query(self):
self.create_objects()
notes = Note.query(Note.user == 'u2')
self.assertEqual(
sorted(note.text for note in notes),
['n2-1', 'n2-2', 'n2-3'])
user = User.get(User.username == 'u3')
self.assertEqual(user._data, {'username': 'u3'})
self.assertRaises(ValueError, User.get, User.username == 'ux')
def test_query_with_update(self):
stat = Stat.create(stat_type='s1', value=1)
vq = list(Stat.query(Stat.value == 1))
self.assertEqual(len(vq), 1)
stat_db = vq[0]
self.assertEqual(stat_db.stat_type, b's1')
self.assertEqual(stat_db.value, 1)
stat.value = 2
stat.save()
def assertCount(expr, count):
self.assertEqual(len(list(Stat.query(expr))), count)
assertCount(Stat.value == 1, 0)
assertCount(Stat.value == 2, 1)
assertCount(Stat.stat_type == 's1', 1)
stat.stat_type = 's2'
stat.save()
assertCount(Stat.value == 1, 0)
assertCount(Stat.value == 2, 1)
assertCount(Stat.stat_type == 's1', 0)
assertCount(Stat.stat_type == 's2', 1)
def test_sorting(self):
self.create_objects()
all_notes = [
'n1-1', 'n1-2', 'n1-3', 'n2-1', 'n2-2', 'n2-3', 'n3-1', 'n3-2',
'n3-3']
notes = Note.query(order_by=Note.text)
self.assertEqual([note.text for note in notes], all_notes)
notes = Note.query(order_by=Note.text.desc())
self.assertEqual(
[note.text for note in notes],
all_notes[::-1])
notes = Note.query(Note.user == 'u2', Note.text)
self.assertEqual(
[note.text for note in notes],
['n2-1', 'n2-2', 'n2-3'])
notes = Note.query(Note.user == 'u2', Note.text.desc())
self.assertEqual(
[note.text for note in notes],
['n2-3', 'n2-2', 'n2-1'])
def test_complex_query(self):
usernames = ['charlie', 'huey', 'mickey', 'zaizee']
for username in usernames:
User.create(username=username)
def assertUsers(expr, expected):
users = User.query(expr)
self.assertEqual(
sorted(user.username for user in users),
sorted(expected))
assertUsers(User.username == 'charlie', ['charlie'])
assertUsers(User.username != 'huey', ['charlie', 'mickey', 'zaizee'])
assertUsers(
((User.username == 'charlie') | (User.username == 'mickey')),
['charlie', 'mickey'])
assertUsers(
(User.username == 'charlie') | (User.username != 'mickey'),
['charlie', 'huey', 'zaizee'])
expr = (
((User.username != 'huey') & (User.username != 'zaizee')) |
(User.username == 'charlie'))
assertUsers(expr, ['charlie', 'mickey'])
def test_scalar_query(self):
data = [
('t1', 1),
('t1', 2),
('t1', 3),
('t2', 10),
('t2', 11),
('t2', 12),
('t3', 0),
]
for stat_type, value in data:
Stat.create(stat_type=stat_type, value=value)
stat_objects = sorted(
(stat for stat in Stat.all()),
key=lambda stat: stat.key)
self.assertEqual([stat._data for stat in stat_objects], [
{'key': 1, 'stat_type': b't1', 'value': 1},
{'key': 2, 'stat_type': b't1', 'value': 2},
{'key': 3, 'stat_type': b't1', 'value': 3},
{'key': 4, 'stat_type': b't2', 'value': 10},
{'key': 5, 'stat_type': b't2', 'value': 11},
{'key': 6, 'stat_type': b't2', 'value': 12},
{'key': 7, 'stat_type': b't3', 'value': 0},
])
def assertStats(expr, expected):
stats = Stat.query(expr)
self.assertEqual(
sorted(stat.key for stat in stats),
sorted(expected))
assertStats(Stat.value <= 3, [1, 2, 3, 7])
assertStats(Stat.value >= 10, [4, 5, 6])
assertStats(Stat.value < 3, [1, 2, 7])
assertStats(Stat.value > 10, [5, 6])
assertStats(Stat.value == 3, [3])
assertStats(Stat.value >= 13, [])
assertStats(
(Stat.value <= 2) | (Stat.key >= 7),
[1, 2, 7])
assertStats(
((Stat.value <= 2) & (Stat.key >= 7)) | (Stat.value >= 11),
[5, 6, 7])
assertStats(
((Stat.value <= 2) | (Stat.key >= 7)) & (Stat.stat_type == 't1'),
[1, 2])
assertStats(Stat.value.between(2, 11), [2, 3, 4, 5])
assertStats(Stat.value.between(4, 12), [4, 5, 6])
def test_full_text_search(self):
phrases = [
('A faith is a necessity to a man. Woe to him who believes in '
'nothing.'),
('All who call on God in true faith, earnestly from the heart, '
'will certainly be heard, and will receive what they have asked '
'and desired.'),
('Be faithful in small things because it is in them that your '
'strength lies.'),
('Faith consists in believing when it is beyond the power of '
'reason to believe.'),
('Faith has to do with things that are not seen and hope with '
'things that are not at hand.')]
for idx, message in enumerate(phrases):
Message.create(content=message, status=1 + (idx % 2))
def assertMatches(query, indexes):
results = [message.content for message in query]
self.assertEqual(results, [phrases[i] for i in indexes])
def assertSearch(search, indexes):
assertMatches(
Message.query(Message.content.match(search)),
indexes)
assertSearch('faith', [1, 4, 3, 2, 0])
assertSearch('faith man', [0])
assertSearch('things', [2, 4])
assertSearch('blah', [])
query = Message.query(
Message.content.match('faith') & (Message.status == 1))
assertMatches(query, [4, 2, 0])
def test_full_text_combined(self):
phrases = [
'little bunny foo foo', # 0, s=1
'a little green owl', # 1, s=2
'the owl was named foo', # 2, s=1
'he had a nicotine patch on his wing', # 3, s=2
'he was trying to quit smoking', # 4, s=1
'the owl was little and green and sweet', # 5, s=2
'he dropped presents on my porch', # 6, s=1
]
index_to_phrase = {}
for idx, message in enumerate(phrases):
msg = Message.create(content=message, status=1 + (idx % 2))
index_to_phrase[idx] = message
def assertSearch(search, indexes):
self.assertEqual(
sorted(message.content for message in query),
sorted(index_to_phrase[idx] for idx in indexes))
query = Message.query(Message.content.match('little owl'))
assertSearch(query, [1, 5, 2]) # "little" is ignored (stop word).
query = Message.query(
Message.content.match('little owl') &
Message.content.match('foo'))
assertSearch(query, [2])
query = Message.query(
(Message.content.match('owl') & (Message.status == 1)) |
(Message.content.match('foo') & (Message.status == 2)))
assertSearch(query, [2])
query = Message.query(
(Message.content.match('green') & (Message.status == 2)) |
(Message.status == 1))
assertSearch(query, [0, 2, 4, 6, 1, 5])
query = Message.query(
((Message.status == 2) & Message.content.match('green')) |
(Message.status == 1))
assertSearch(query, [0, 2, 4, 6, 1, 5])
def test_full_text_options(self):
phrases = [
'building web applications with python and flask',
'modern web development with python',
'unit testing with python',
'writing better tests for your application',
'applications for the web',
]
for phrase in phrases:
FTSOptions.create(content=phrase, metaphone=phrase)
def assertMatches(search, indexes, use_metaphone=False):
if use_metaphone:
field = FTSOptions.metaphone
else:
field = FTSOptions.content
query = FTSOptions.query(field.match(search))
results = [message.content for message in query]
self.assertEqual(results, [phrases[i] for i in indexes])
assertMatches('web application', [0, 4])
assertMatches('web application', [0, 4], True)
assertMatches('python', [0, 1, 2])
assertMatches('python', [0, 1, 2], True)
assertMatches('testing', [3, 2])
assertMatches('testing', [3, 2], True)
# Test behavior of the metaphone algorithm.
assertMatches('python flasck', [0], True)
assertMatches('pithon devellepment', [], False)
assertMatches('pithon devellepment', [1], True)
assertMatches('younit tessts', [2], True)
def test_fts_query_parser(self):
messages = [
'foo green',
'bar green',
'baz blue',
'nug blue',
'nize yellow',
'huey greener',
'mickey greens',
'zaizee',
]
for message in messages:
Message.create(content=message)
def assertMatches(query, expected, default_conjunction=OP_AND):
expression = Message.content.search(query, default_conjunction)
messages = Message.query(expression, order_by=Message.content)
results = [msg.content for msg in messages]
self.assertEqual(results, expected)
assertMatches('foo', ['foo green'])
assertMatches('foo OR baz', ['baz blue', 'foo green'])
assertMatches('green OR blue', [
'bar green',
'baz blue',
'foo green',
'mickey greens',
'nug blue',
])
assertMatches('green AND (bar OR mickey OR nize)', [
'bar green',
'mickey greens',
])
assertMatches('zaizee OR (blue AND nug) OR (green AND bar)', [
'bar green',
'nug blue',
'zaizee',
])
assertMatches('(blue AND (baz OR (nug OR huey OR mickey))', [
'baz blue',
'nug blue',
])
assertMatches(
'(blue OR foo) AND (green OR (huey OR (baz AND mickey)))',
['foo green'])
assertMatches('(green AND nug) OR (blue AND bar)', [])
assertMatches('nuglet', [])
assertMatches('foobar', [])
assertMatches('', sorted(messages))
def test_load(self):
User.create(username='charlie')
u = User.load('charlie')
self.assertEqual(u._data, {'username': 'charlie'})
def test_save_delete(self):
charlie = User.create(username='charlie')
huey = User.create(username='huey')
note = Note.create(user='huey', text='n1')
note.text = 'n1-edited'
note.save()
self.assertEqual(
sorted(user.username for user in User.all()),
['charlie', 'huey'])
notes = Note.all()
self.assertEqual([note.text for note in notes], ['n1-edited'])
charlie.delete()
self.assertEqual([user.username for user in User.all()], ['huey'])
def test_delete_indexes(self):
self.assertEqual(set(db.keys()), set())
Message.create(content='charlie message', status=1)
Message.create(content='huey message', status=2)
keys = set(db.keys())
charlie = Message.load(1)
charlie.delete()
huey_keys = set(db.keys())
diff = keys - huey_keys
def make_key(*args):
return Message._query.make_key(*args).encode('utf-8')
self.assertEqual(diff, set([
make_key('_id', 'absolute', 1),
make_key('content', 'absolute', 'charlie message'),
make_key('content', 'fts', 'charli'),
make_key('id', 1),
make_key('status', 'absolute', 1),
]))
# Ensure we cannot query for Charlie, but that we can query for Huey.
expressions = [
(Message.status == 1),
(Message.status != 2),
(Message._id == 1),
(Message._id != 2),
(Message.content == 'charlie message'),
(Message.content != 'huey message'),
(Message.content.match('charlie')),
]
for expression in expressions:
self.assertRaises(ValueError, Message.get, expression)
expressions = [
(Message.status == 2),
(Message.status > 1),
(Message._id == 2),
(Message._id != 1),
(Message.content == 'huey message'),
(Message.content != 'charlie'),
(Message.content.match('huey')),
(Message.content.match('message')),
]
for expression in expressions:
obj = Message.get(expression)
self.assertEqual(obj._data, {
'_id': 2,
'content': 'huey message',
'status': 2,
})
after_filter_keys = set(db.keys())
symm_diff = huey_keys ^ after_filter_keys
self.assertTrue(all(key.startswith(b'temp') for key in symm_diff))
huey = Message.load(2)
huey.delete()
final_keys = set(key for key in db.keys()
if not key.startswith(b'temp'))
self.assertEqual(final_keys, set([make_key('_id', '_sequence')]))
def test_get_regression(self):
Message.create(content='huey', status=1)
Message.create(content='charlie', status=2)
def assertMessage(msg, data):
self.assertEqual(msg._data, data)
huey = {'_id': 1, 'content': 'huey', 'status': 1}
charlie = {'_id': 2, 'content': 'charlie', 'status': 2}
assertMessage(Message.load(1), huey)
assertMessage(Message.load(2), charlie)
for i in range(3):
assertMessage(Message.get(Message._id == 1), huey)
assertMessage(Message.get(Message._id == 2), charlie)
assertMessage(Message.get(Message.status == 1), huey)
assertMessage(Message.get(Message.status == 2), charlie)
assertMessage(Message.get(Message.status != 1), charlie)
assertMessage(Message.get(Message.status != 2), huey)
messages = list(Message.query(Message.status == 1))
self.assertEqual(len(messages), 1)
assertMessage(messages[0], huey)
messages = list(Message.query(Message.status != 1))
self.assertEqual(len(messages), 1)
assertMessage(messages[0], charlie)
def test_index_separator(self):
class CustomSeparator(BaseModel):
index_separator = '$'
name = TextField(primary_key=True)
data = IntegerField(index=True)
CustomSeparator.create(name='huey.zai', data=3)
CustomSeparator.create(name='michael.nuggie', data=5)
keys = sorted(db.keys())
self.assertEqual(keys, [
# namespace | model : $-delimited indexed data
b'test|customseparator:all',
b'test|customseparator:data$absolute$3',
b'test|customseparator:data$absolute$5',
b'test|customseparator:data$continuous',
b'test|customseparator:id$huey.zai',
b'test|customseparator:id$michael.nuggie',
b'test|customseparator:name$absolute$huey.zai',
b'test|customseparator:name$absolute$michael.nuggie'])
huey = CustomSeparator.get(CustomSeparator.data < 5)
self.assertEqual(huey.name, 'huey.zai')
mickey = CustomSeparator.load('michael.nuggie')
self.assertEqual(mickey.data, 5)
def test_incr(self):
for i in range(3):
Stat.create(stat_type='test', value=i)
s1 = Stat.get(Stat.value == 1)
res = s1.incr(Stat.value, 5)
self.assertEqual(res, 6)
self.assertEqual(s1.value, 6)
self.assertRaises(ValueError, Stat.get, Stat.value == 1)
s6 = Stat.get(Stat.value == 6)
self.assertEqual(s1.key, s6.key)
def test_count(self):
self.assertEqual(User.count(), 0)
for username in ['charlie', 'leslie', 'connor']:
User.create(username=username)
self.assertEqual(User.count(), 3)
def test_query_delete(self):
for i in range(5):
u = User.create(username='u%s' % (i + 1))
User.query_delete((User.username == 'u1') | (User.username == 'u4'))
usernames = [user.username for user in User.all()]
self.assertEqual(sorted(usernames), ['u2', 'u3', 'u5'])
User.query_delete()
self.assertEqual([user for user in User.all()], [])
def test_container_field_persistence(self):
class HashModel(BaseModel):
data = HashField()
name = TextField()
hm1 = HashModel.create(name='hm1')
hm1.data.update(k1='v1', k2='v2')
hm2 = HashModel.create(name='hm2')
hm2.data.update(k3='v3', k4='v4')
hm1.name = 'hm1-e'
hm1.save()
hm1_db = HashModel.load(hm1._id)
self.assertEqual(hm1_db.name, 'hm1-e')
self.assertEqual(hm1.data.as_dict(), {b'k1': b'v1', b'k2': b'v2'})
def test_delete_container_fields(self):
class HashModel(BaseModel):
data = HashField()
name = TextField()
hm1 = HashModel.create(name='hm1')
hm1.data.update(k1='v1', k2='v2')
hm2 = HashModel.create(name='hm2')
hm2.data.update(k3='v3', k4='v4')
hm1.delete()
self.assertEqual(hm1.data.as_dict(), {})
self.assertEqual(hm2.data.as_dict(), {b'k3': b'v3', b'k4': b'v4'})
def test_default_is_an_empty_dict(self):
instance = DefaultOption()
self.assertTrue(instance.default_empty is None)
self.assertEqual(instance.num, 0)
self.assertEqual(instance.txt, '')
def test_json_storage(self):
class APIResponse(BaseModel):
data = JSONField()
ar = APIResponse(data={'k1': 'v1', 'k2': 'v2'})
ar.save()
ar_db = APIResponse.load(ar._id)
self.assertEqual(ar_db.data, {'k1': 'v1', 'k2': 'v2'})
def test_pickled_storage(self):
class PythonData(BaseModel):
data = PickledField()
pd = PythonData(data={'k1': ['v1', None, 'v3']})
pd.save()
pd_db = PythonData.load(pd._id)
self.assertEqual(pd_db.data, {'k1': ['v1', None, 'v3']})
pd2 = PythonData.create(data=None)
pd2_db = PythonData.load(pd2._id)
self.assertTrue(pd2_db.data is None)
def test_boolean_field(self):
class Account(BaseModel):
name = TextField(primary_key=True)
active = BooleanField()
admin = BooleanField(default=False)
charlie = Account(name='charlie', active=True, admin=True)
huey = Account(name='huey', active=False)
charlie.save()
huey.save()
charlie_db = Account.get(Account.name == 'charlie')
self.assertTrue(charlie_db.active)
self.assertTrue(charlie_db.admin)
huey_db = Account.get(Account.name == 'huey')
self.assertFalse(huey_db.active)
self.assertFalse(huey_db.admin)
huey_db.active = True
huey_db.admin = True
huey_db.save()
huey_db2 = Account.get(Account.name == 'huey')
self.assertTrue(huey_db2.active)
self.assertTrue(huey_db2.admin)
def test_query_boolean(self):
class BT(BaseModel):
key = TextField(primary_key=True)
flag = BooleanField(default=False, index=True)
for i in range(4):
BT.create(key='k%s' % i, flag=True if i % 2 else False)
query = BT.query(BT.flag == True)
self.assertEqual(sorted([bt.key for bt in query]), ['k1', 'k3'])
query = BT.query(BT.flag == False)
self.assertEqual(sorted([bt.key for bt in query]), ['k0', 'k2'])
def test_uuid(self):
class Beacon(BaseModel):
name = TextField(primary_key=True)
data = UUIDField()
b1 = Beacon.create(name='alpha', data=uuid.uuid4())
b2 = Beacon.create(name='bravo', data=uuid.uuid4())
b3 = Beacon.create(name='charlie')
b3_db = Beacon.load('charlie')
b2_db = Beacon.load('bravo')
b1_db = Beacon.load('alpha')
self.assertEqual(b1.data, b1_db.data)
self.assertEqual(b2.data, b2_db.data)
self.assertTrue(b3.data is None)
def _test_date_field(self, field_class, dt_func):
class Event(BaseModel):
timestamp = field_class(index=True)
value = TextField()
events = [
Event.create(timestamp=dt_func(i), value='e%s' % i)
for i in range(1, 11)]
e_db = Event.get(Event._id == events[-1]._id)
self.assertEqual(e_db.timestamp, dt_func(10))
self.assertEqual(e_db.value, 'e10')
events = Event.query(
(Event.timestamp >= dt_func(3)) &
(Event.timestamp < dt_func(7)), Event.timestamp)
ts2value = [(e.timestamp, e.value) for e in events]
self.assertEqual(ts2value, [
(dt_func(3), 'e3'),
(dt_func(4), 'e4'),
(dt_func(5), 'e5'),
(dt_func(6), 'e6')])
e = Event.create(value='ex')
e_db = Event.load(e._id)
self.assertTrue(e_db.timestamp is None)
self.assertEqual(e_db.value, 'ex')
def test_datetime_field(self):
dt = lambda day: datetime.datetime(2018, 1, day, 3, 13, 37)
self._test_date_field(DateTimeField, dt)
def test_date_field(self):
dt = lambda day: datetime.date(2018, 1, day)
self._test_date_field(DateField, dt)
if __name__ == '__main__':
import unittest; unittest.main()
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,595
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tusks/vedisdb.py
|
import sys
import unittest
from vedis import Vedis
from walrus import *
class VedisList(List):
def extend(self, value):
return self.database.lmpush(self.key, value)
def pop(self):
return self.database.lpop(self.key)
class WalrusVedis(Vedis, Database):
def __init__(self, filename=':mem:'):
self._filename = filename
Vedis.__init__(self, filename)
def __repr__(self):
if self._filename in (':memory:', ':mem:'):
db_file = 'in-memory database'
else:
db_file = self._filename
return '<WalrusVedis: %s>' % db_file
def execute_command(self, *args, **options):
raise ValueError('"%s" is not supported by Vedis.' % args[0])
def parse_response(self, *args, **kwargs):
raise RuntimeError('Error, parse_response should not be called.')
def command(self, command_name):
return self.register(command_name)
# Compatibility with method names from redis-py.
def getset(self, key, value):
return self.get_set(key, value)
def incrby(self, name, amount=1):
return self.incr_by(name, amount)
def decrby(self, name, amount=1):
return self.decr_by(name, amount)
# Compatibility with method signatures.
def mset(self, **data):
return super(WalrusVedis, self).mset(data)
def mget(self, *keys):
return super(WalrusVedis, self).mget(list(keys))
def __getitem__(self, key):
try:
return super(WalrusVedis, self).__getitem__(key)
except KeyError:
pass
def sadd(self, key, *items):
return super(WalrusVedis, self).smadd(key, list(items))
# Override the container types since Vedis provides its own using the
# same method-names as Walrus, and we want the Walrus containers.
def Hash(self, key):
return Hash(self, key)
def Set(self, key):
return Set(self, key)
def List(self, key):
return VedisList(self, key)
def not_supported(name):
def decorator(self, *args, **kwargs):
raise ValueError('%s is not supported by Vedis.' % name)
return decorator
ZSet = not_supported('ZSet')
Array = not_supported('Array')
HyperLogLog = not_supported('HyperLogLog')
pipeline = not_supported('pipeline')
lock = not_supported('lock')
pubsub = not_supported('pubsub')
class TestWalrusVedis(unittest.TestCase):
def setUp(self):
self.db = WalrusVedis()
def test_basic(self):
self.db['foo'] = 'bar'
self.assertEqual(self.db['foo'], b'bar')
self.assertTrue('foo' in self.db)
self.assertFalse('xx' in self.db)
self.assertIsNone(self.db['xx'])
self.db.mset(k1='v1', k2='v2', k3='v3')
results = self.db.mget('k1', 'k2', 'k3', 'kx')
self.assertEqual(list(results), [b'v1', b'v2', b'v3', None])
self.db.append('foo', 'baz')
self.assertEqual(self.db.get('foo'), b'barbaz')
self.db.incr_by('counter', 1)
self.assertEqual(self.db.incr_by('counter', 5), 6)
self.assertEqual(self.db.decr_by('counter', 2), 4)
self.assertEqual(self.db.strlen('foo'), 6)
self.assertEqual(self.db.getset('foo', 'nug'), b'barbaz')
self.assertEqual(self.db['foo'], b'nug')
self.assertFalse(self.db.setnx('foo', 'xxx'))
self.assertTrue(self.db.setnx('bar', 'yyy'))
self.assertEqual(self.db['bar'], b'yyy')
del self.db['foo']
self.assertFalse('foo' in self.db)
def test_hash(self):
h = self.db.Hash('hash_obj')
h['k1'] = 'v1'
h.update({'k2': 'v2', 'k3': 'v3'})
self.assertEqual(h.as_dict(), {
b'k1': b'v1',
b'k2': b'v2',
b'k3': b'v3'})
self.assertEqual(h['k2'], b'v2')
self.assertIsNone(h['kx'])
self.assertTrue('k2' in h)
self.assertEqual(len(h), 3)
del h['k2']
del h['kxx']
self.assertEqual(sorted(h.keys()), [b'k1', b'k3'])
self.assertEqual(sorted(h.values()), [b'v1', b'v3'])
def test_list(self):
l = self.db.List('list_obj')
l.prepend('charlie')
l.extend(['mickey', 'huey', 'zaizee'])
self.assertEqual(l[0], b'charlie')
self.assertEqual(l[-1], b'zaizee')
self.assertEqual(len(l), 4)
self.assertEqual(l.pop(), b'charlie')
def test_set(self):
s = self.db.Set('set_obj')
s.add('charlie')
s.add('charlie', 'huey', 'mickey')
self.assertEqual(len(s), 3)
self.assertTrue('huey' in s)
self.assertFalse('xx' in s)
del s['huey']
self.assertFalse('huey' in s)
self.assertEqual(s.members(), set([b'charlie', b'mickey']))
s1 = self.db.Set('s1')
s2 = self.db.Set('s2')
s1.add(*map(str, range(5)))
s2.add(*map(str, range(3, 7)))
self.assertEqual(s1 - s2, set([b'0', b'1', b'2']))
self.assertEqual(s2 - s1, set([b'5', b'6']))
self.assertEqual(s1 & s2, set([b'3', b'4']))
def test_unsupported(self):
def assertUnsupported(cmd, *args):
method = getattr(self.db, cmd)
self.assertRaises(ValueError, method, *args)
# Just check a handful of methods.
assertUnsupported('zadd', 'zs', {'foo': 1})
assertUnsupported('ZSet', 'zs')
assertUnsupported('rpush', 'l_obj', 'val')
assertUnsupported('rpop', 'l_obj')
assertUnsupported('ltrim', 'l_obj', 0, 1)
assertUnsupported('lrem', 'l_obj', 3, 1)
def test_custom_commands(self):
@self.db.command('KTITLE')
def _ktitle_impl(context, key):
value = context[key]
if value:
context[key] = value.title()
return True
return False
self.db['n1'] = 'charlie'
self.db['n2'] = 'huey'
self.assertTrue(_ktitle_impl('n1'))
self.assertEqual(self.db['n1'], b'Charlie')
self.assertTrue(self.db.execute('KTITLE n2'))
self.assertEqual(self.db['n2'], b'Huey')
self.assertFalse(self.db.execute('KTITLE nx'))
self.assertIsNone(self.db['nx'])
if __name__ == '__main__':
unittest.main(argv=sys.argv)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,596
|
coleifer/walrus
|
refs/heads/master
|
/examples/twitter/app.py
|
import datetime
import operator
from flask import abort
from flask import flash
from flask import Flask
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
from functools import wraps
from hashlib import md5
from walrus import *
# Configure our app's settings.
DEBUG = True
SECRET_KEY = 'hin6bab8ge25*r=x&+5$0kn=-#log$pt^#@vrqjld!^2ci@g*b'
# Create a flask application - this `app` object will be used to handle
# inbound requests, routing them to the proper 'view' functions, etc.
app = Flask(__name__)
app.config.from_object(__name__)
# Create a walrus database instance - our models will use this database to
# persist information.
database = Database()
# Model definitions - the standard "pattern" is to define a base model class
# that specifies which database to use. Then, any subclasses will automatically
# use the correct storage.
class BaseModel(Model):
__database__ = database
__namespace__ = 'twitter'
# Model classes specify fields declaratively, like django.
class User(BaseModel):
username = TextField(primary_key=True)
password = TextField(index=True)
email = TextField()
followers = ZSetField()
following = ZSetField()
def get_followers(self):
# Because all users are added to the `followers` sorted-set with the
# same score, when retrieved they will be sorted by key (username).
return [User.load(username) for username in self.followers]
def get_following(self):
# Because all users are added to the `following` sorted-set with the
# same score, when retrieved they will be sorted by key (username).
return [User.load(username) for username in self.following]
def is_following(self, user):
# We can use Pythonic operators when working with Walrus containers.
return user.username in self.following
def gravatar_url(self, size=80):
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(self.email.strip().lower().encode('utf-8')).hexdigest(), size)
# Simple model with a one-to-many relationship: one user has 0..n messages.
# A user is associated with a message via the `username` field.
class Message(BaseModel):
username = TextField(index=True)
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
def get_user(self):
return User.load(self.username)
# Flask provides a `session` object, which allows us to store information
# across requests (stored by default in a secure cookie). This function allows
# us to mark a user as being logged-in by setting some values in the session:
def auth_user(user):
session['logged_in'] = True
session['username'] = user.username
flash('You are logged in as %s' % (user.username))
# Get the currently logged-in user, or return `None`.
def get_current_user():
if session.get('logged_in'):
try:
return User.load(session['username'])
except KeyError:
session.pop('logged_in')
# View decorator which indicates that the requesting user must be authenticated
# before they can access the wrapped view. The decorator checks the session to
# see if they're logged in, and if not redirects them to the login view.
def login_required(f):
@wraps(f)
def inner(*args, **kwargs):
if not session.get('logged_in'):
return redirect(url_for('login'))
return f(*args, **kwargs)
return inner
# Retrieve an object by primary key. If the object does not exist, return a
# 404 not found.
def get_object_or_404(model, pk):
try:
return model.load(pk)
except ValueError:
abort(404)
# Custom template filter: Flask allows you to define these functions and then
# they are accessible in the template. This one returns a boolean whether the
# given user is following another user.
@app.template_filter('is_following')
def is_following(from_user, to_user):
return from_user.is_following(to_user)
# Views: these are the actual mappings of url to view function.
@app.route('/')
def homepage():
# Depending on whether the requesting user is logged in or not, show them
# either the public timeline or their own private timeline.
if session.get('logged_in'):
return private_timeline()
else:
return public_timeline()
@app.route('/private/')
def private_timeline():
# The private timeline is a bit interesting as it shows how to create a
# query dynamically. We are taking all the users the current user follows
# and basically performing a big set union on message objects. Matching
# messages are then sorted newest to oldest.
user = get_current_user()
if user.following:
query = reduce(operator.or_, [
Message.username == username
for username, _ in user.following
])
messages = Message.query(query, order_by=Message.timestamp.desc())
else:
messages = []
return render_template('private_messages.html', message_list=messages)
@app.route('/public/')
def public_timeline():
# Display all messages, newest to oldest.
messages = Message.query(order_by=Message.timestamp.desc())
return render_template('public_messages.html', message_list=messages)
@app.route('/join/', methods=['GET', 'POST'])
def join():
if request.method == 'POST' and request.form['username']:
username = request.form['username']
try:
User.load(username)
except KeyError:
user = User.create(
username=username,
password=md5(request.form['password']).hexdigest(),
email=request.form['email'])
auth_user(user)
return redirect(url_for('homepage'))
else:
flash('That username is already taken')
return render_template('join.html')
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST' and request.form['username']:
try:
user = User.get(
(User.username == request.form['username']) &
(User.password == md5(request.form['password']).hexdigest()))
except ValueError:
flash('The password entered is incorrect')
else:
auth_user(user)
return redirect(url_for('homepage'))
return render_template('login.html')
@app.route('/logout/')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('homepage'))
@app.route('/following/')
@login_required
def following():
# Get the list of user objects the current user is following.
user = get_current_user()
return render_template('user_following', user_list=user.get_following())
@app.route('/followers/')
@login_required
def followers():
# Get the list of user objects the current user is followed by.
user = get_current_user()
return render_template('user_following', user_list=user.get_followers())
@app.route('/users/')
def user_list():
# Display all users ordered by their username.
users = User.query(order_by=User.username)
return render_template('user_list.html', user_list=users)
@app.route('/users/<username>/')
def user_detail(username):
# Using the "get_object_or_404" shortcut here to get a user with a valid
# username or short-circuit and display a 404 if no user exists in the db.
user = get_object_or_404(User, username)
# Get all the users messages ordered newest-first.
messages = Message.query(
Message.username == user.username,
order_by=Message.timestamp.desc())
return render_template(
'user_detail.html',
message_list=messages,
user=user)
@app.route('/users/<username>/follow/', methods=['POST'])
@login_required
def user_follow(username):
current_user = get_current_user()
user = get_object_or_404(User, username)
current_user.following.add(user.username, 0)
user.followers.add(current_user.username, 0)
flash('You are following %s' % user.username)
return redirect(url_for('user_detail', username=user.username))
@app.route('/users/<username>/unfollow/', methods=['POST'])
@login_required
def user_unfollow(username):
current_user = get_current_user()
user = get_object_or_404(User, username)
current_user.following.remove(user.username)
user.followers.remove(current_user.username)
flash('You are no longer following %s' % user.username)
return redirect(url_for('user_detail', username=user.username))
@app.route('/create/', methods=['GET', 'POST'])
@login_required
def create():
# Create a new message.
user = get_current_user()
if request.method == 'POST' and request.form['content']:
message = Message.create(
username=user.username,
content=request.form['content'])
flash('Your message has been created')
return redirect(url_for('user_detail', username=user.username))
return render_template('create.html')
@app.context_processor
def _inject_user():
return {'current_user': get_current_user()}
# allow running from the command line
if __name__ == '__main__':
app.run()
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,597
|
coleifer/walrus
|
refs/heads/master
|
/walrus/containers.py
|
import hashlib
import operator
import struct
try:
from functools import reduce
except ImportError:
pass
from functools import wraps
try:
from redis.exceptions import ResponseError
except ImportError:
ResponseError = Exception
from walrus.utils import basestring_type
from walrus.utils import decode as _decode
from walrus.utils import decode_dict
from walrus.utils import encode
from walrus.utils import exception_message
from walrus.utils import make_python_attr
from walrus.utils import safe_decode_list
def chainable_method(fn):
@wraps(fn)
def inner(self, *args, **kwargs):
fn(self, *args, **kwargs)
return self
return inner
class Sortable(object):
def sort(self, pattern=None, limit=None, offset=None, get_pattern=None,
ordering=None, alpha=True, store=None):
if limit or offset:
offset = offset or 0
return self.database.sort(
self.key,
start=offset,
num=limit,
by=pattern,
get=get_pattern,
desc=ordering in ('DESC', 'desc'),
alpha=alpha,
store=store)
class Container(object):
"""
Base-class for rich Redis object wrappers.
"""
def __init__(self, database, key):
self.database = database
self.key = key
def expire(self, ttl=None):
"""
Expire the given key in the given number of seconds.
If ``ttl`` is ``None``, then any expiry will be cleared
and key will be persisted.
"""
if ttl is not None:
self.database.expire(self.key, ttl)
else:
self.database.persist(self.key)
def pexpire(self, ttl=None):
"""
Expire the given key in the given number of milliseconds.
If ``ttl`` is ``None``, then any expiry will be cleared
and key will be persisted.
"""
if ttl is not None:
self.database.pexpire(self.key, ttl)
else:
self.database.persist(self.key)
def dump(self):
"""
Dump the contents of the given key using Redis' native
serialization format.
"""
return self.database.dump(self.key)
@chainable_method
def clear(self):
"""
Clear the contents of the container by deleting the key.
"""
self.database.delete(self.key)
class Hash(Container):
"""
Redis Hash object wrapper. Supports a dictionary-like interface
with some modifications.
See `Hash commands <http://redis.io/commands#hash>`_ for more info.
"""
def __repr__(self):
l = len(self)
if l > 5:
# Get a few keys.
data = self.database.hscan(self.key, count=5)
else:
data = self.as_dict()
return '<Hash "%s": %s>' % (self.key, data)
def __getitem__(self, item):
"""
Retrieve the value at the given key. To retrieve multiple
values at once, you can specify multiple keys as a tuple or
list:
.. code-block:: python
hsh = db.Hash('my-hash')
first, last = hsh['first_name', 'last_name']
"""
if isinstance(item, (list, tuple)):
return self.database.hmget(self.key, item)
else:
return self.database.hget(self.key, item)
def get(self, key, fallback=None):
val = self.database.hget(self.key, key)
return val if val is not None else fallback
def __setitem__(self, key, value):
"""Set the value of the given key."""
return self.database.hset(self.key, key, value)
def __delitem__(self, key):
"""Delete the key from the hash."""
return self.database.hdel(self.key, key)
def __contains__(self, key):
"""
Return a boolean valud indicating whether the given key
exists.
"""
return self.database.hexists(self.key, key)
def __len__(self):
"""Return the number of keys in the hash."""
return self.database.hlen(self.key)
def _scan(self, *args, **kwargs):
return self.database.hscan_iter(self.key, *args, **kwargs)
def __iter__(self):
"""Iterate over the items in the hash."""
return iter(self._scan())
def search(self, pattern, count=None):
"""
Search the keys of the given hash using the specified pattern.
:param str pattern: Pattern used to match keys.
:param int count: Limit number of results returned.
:returns: An iterator yielding matching key/value pairs.
"""
return self._scan(match=pattern, count=count)
def keys(self):
"""Return the keys of the hash."""
return self.database.hkeys(self.key)
def values(self):
"""Return the values stored in the hash."""
return self.database.hvals(self.key)
def items(self, lazy=False):
"""
Like Python's ``dict.items()`` but supports an optional
parameter ``lazy`` which will return a generator rather than
a list.
"""
if lazy:
return self._scan()
else:
return list(self)
def setnx(self, key, value):
"""
Set ``key`` to ``value`` if ``key`` does not exist.
:returns: True if successfully set or False if the key already existed.
"""
return bool(self.database.hsetnx(self.key, key, value))
@chainable_method
def update(self, __data=None, **kwargs):
"""
Update the hash using the given dictionary or key/value pairs.
"""
if __data is None:
__data = kwargs
else:
__data.update(kwargs)
return self.database.hset(self.key, mapping=__data)
def incr(self, key, incr_by=1):
"""Increment the key by the given amount."""
return self.database.hincrby(self.key, key, incr_by)
def incr_float(self, key, incr_by=1.):
"""Increment the key by the given amount."""
return self.database.hincrbyfloat(self.key, key, incr_by)
def as_dict(self, decode=False):
"""
Return a dictionary containing all the key/value pairs in the
hash.
"""
res = self.database.hgetall(self.key)
return decode_dict(res) if decode else res
@classmethod
def from_dict(cls, database, key, data, clear=False):
"""
Create and populate a Hash object from a data dictionary.
"""
hsh = cls(database, key)
if clear:
hsh.clear()
hsh.update(data)
return hsh
class List(Sortable, Container):
"""
Redis List object wrapper. Supports a list-like interface.
See `List commands <http://redis.io/commands#list>`_ for more info.
"""
def __repr__(self):
l = len(self)
n_items = min(l, 10)
items = safe_decode_list(self[:n_items])
return '<List "%s": %s%s>' % (
self.key,
', '.join(items),
n_items < l and '...' or '')
def __getitem__(self, item):
"""
Retrieve an item from the list by index. In addition to
integer indexes, you can also pass a ``slice``.
"""
if isinstance(item, slice):
start = item.start or 0
stop = item.stop
if not stop:
stop = -1
else:
stop -= 1
return self.database.lrange(self.key, start, stop)
return self.database.lindex(self.key, item)
def __setitem__(self, idx, value):
"""Set the value of the given index."""
return self.database.lset(self.key, idx, value)
def __delitem__(self, item):
"""
By default Redis treats deletes as delete by value, as
opposed to delete by index. If an integer is passed into the
function, it will be treated as an index, otherwise it will
be treated as a value.
If a slice is passed, then the list will be trimmed so that it *ONLY*
contains the range specified by the slice start and stop. Note that
this differs from the default behavior of Python's `list` type.
"""
if isinstance(item, slice):
start = item.start or 0
stop = item.stop or -1
if stop > 0:
stop -= 1
return self.database.ltrim(self.key, start, stop)
elif isinstance(item, int):
item = self[item]
if item is None:
return
return self.database.lrem(self.key, 1, item)
def __len__(self):
"""Return the length of the list."""
return self.database.llen(self.key)
def __iter__(self):
"""Iterate over the items in the list."""
return iter(self.database.lrange(self.key, 0, -1))
def append(self, value):
"""Add the given value to the end of the list."""
return self.database.rpush(self.key, value)
def prepend(self, value):
"""Add the given value to the beginning of the list."""
return self.database.lpush(self.key, value)
def extend(self, value):
"""Extend the list by the given value."""
return self.database.rpush(self.key, *value)
def insert(self, value, pivot, where):
return self.database.linsert(self.key, where, pivot, value)
def insert_before(self, value, key):
"""
Insert the given value into the list before the index
containing ``key``.
"""
self.insert(value, key, 'before')
def insert_after(self, value, key):
"""
Insert the given value into the list after the index
containing ``key``.
"""
self.insert(value, key, 'after')
def popleft(self):
"""Remove the first item from the list."""
return self.database.lpop(self.key)
def popright(self):
"""Remove the last item from the list."""
return self.database.rpop(self.key)
pop = popright
def bpopleft(self, timeout=0):
"""
Remove the first item from the list, blocking until an item becomes
available or timeout is reached (0 for no timeout, default).
"""
ret = self.database.blpop(self.key, timeout)
if ret is not None:
return ret[1]
def bpopright(self, timeout=0):
"""
Remove the last item from the list, blocking until an item becomes
available or timeout is reached (0 for no timeout, default).
"""
ret = self.database.brpop(self.key, timeout)
if ret is not None:
return ret[1]
def move_tail(self, key):
return self.database.rpoplpush(self.key, key)
def bmove_tail(self, key, timeout=0):
return self.database.brpoplpush(self.key, key, timeout)
def as_list(self, decode=False):
"""
Return a list containing all the items in the list.
"""
items = self.database.lrange(self.key, 0, -1)
return [_decode(item) for item in items] if decode else items
@classmethod
def from_list(cls, database, key, data, clear=False):
"""
Create and populate a List object from a data list.
"""
lst = cls(database, key)
if clear:
lst.clear()
lst.extend(data)
return lst
class Set(Sortable, Container):
"""
Redis Set object wrapper. Supports a set-like interface.
See `Set commands <http://redis.io/commands#set>`_ for more info.
"""
def __repr__(self):
return '<Set "%s": %s items>' % (self.key, len(self))
def add(self, *items):
"""Add the given items to the set."""
return self.database.sadd(self.key, *items)
def __delitem__(self, item):
"""Remove the given item from the set."""
return self.remove(item)
def remove(self, *items):
"""Remove the given item(s) from the set."""
return self.database.srem(self.key, *items)
def pop(self):
"""Remove an element from the set."""
return self.database.spop(self.key)
def _first_or_any(self):
return self.random()
def __contains__(self, item):
"""
Return a boolean value indicating whether the given item is
a member of the set.
"""
return self.database.sismember(self.key, item)
def __len__(self):
"""Return the number of items in the set."""
return self.database.scard(self.key)
def _scan(self, *args, **kwargs):
return self.database.sscan_iter(self.key, *args, **kwargs)
def __iter__(self):
"""Return an iterable that yields the items of the set."""
return iter(self._scan())
def search(self, pattern, count=None):
"""
Search the values of the given set using the specified pattern.
:param str pattern: Pattern used to match keys.
:param int count: Limit number of results returned.
:returns: An iterator yielding matching values.
"""
return self._scan(match=pattern, count=count)
def members(self):
"""Return a ``set()`` containing the members of the set."""
return self.database.smembers(self.key)
def random(self, n=None):
"""Return a random member of the given set."""
return self.database.srandmember(self.key, n)
def __sub__(self, other):
"""
Return the set difference of the current set and the left-
hand :py:class:`Set` object.
"""
return self.database.sdiff(self.key, other.key)
def __or__(self, other):
"""
Return the set union of the current set and the left-hand
:py:class:`Set` object.
"""
return self.database.sunion(self.key, other.key)
def __and__(self, other):
"""
Return the set intersection of the current set and the left-
hand :py:class:`Set` object.
"""
return self.database.sinter(self.key, other.key)
@chainable_method
def __isub__(self, other):
self.diffstore(self.key, other)
@chainable_method
def __ior__(self, other):
self.unionstore(self.key, other)
@chainable_method
def __iand__(self, other):
self.interstore(self.key, other)
def diffstore(self, dest, *others):
"""
Store the set difference of the current set and one or more
others in a new key.
:param dest: the name of the key to store set difference
:param others: One or more :py:class:`Set` instances
:returns: A :py:class:`Set` referencing ``dest``.
"""
keys = [self.key]
keys.extend([other.key for other in others])
self.database.sdiffstore(dest, keys)
return self.database.Set(dest)
def interstore(self, dest, *others):
"""
Store the intersection of the current set and one or more
others in a new key.
:param dest: the name of the key to store intersection
:param others: One or more :py:class:`Set` instances
:returns: A :py:class:`Set` referencing ``dest``.
"""
keys = [self.key]
keys.extend([other.key for other in others])
self.database.sinterstore(dest, keys)
return self.database.Set(dest)
def unionstore(self, dest, *others):
"""
Store the union of the current set and one or more
others in a new key.
:param dest: the name of the key to store union
:param others: One or more :py:class:`Set` instances
:returns: A :py:class:`Set` referencing ``dest``.
"""
keys = [self.key]
keys.extend([other.key for other in others])
self.database.sunionstore(dest, keys)
return self.database.Set(dest)
def as_set(self, decode=False):
"""
Return a Python set containing all the items in the collection.
"""
items = self.database.smembers(self.key)
return set(_decode(item) for item in items) if decode else items
@classmethod
def from_set(cls, database, key, data, clear=False):
"""
Create and populate a Set object from a data set.
"""
s = cls(database, key)
if clear:
s.clear()
s.add(*data)
return s
class ZSet(Sortable, Container):
"""
Redis ZSet object wrapper. Acts like a set and a dictionary.
See `Sorted set commands <http://redis.io/commands#sorted_set>`_
for more info.
"""
def __repr__(self):
l = len(self)
n_items = min(l, 5)
items = safe_decode_list(self[:n_items, False])
return '<ZSet "%s": %s%s>' % (
self.key,
', '.join(items),
n_items < l and '...' or '')
def add(self, _mapping=None, **kwargs):
"""
Add the given item/score pairs to the ZSet. Arguments are
specified as a dictionary of item: score, or as keyword arguments.
"""
if _mapping is not None:
_mapping.update(kwargs)
mapping = _mapping
else:
mapping = _mapping
return self.database.zadd(self.key, mapping)
def _convert_slice(self, s):
def _slice_to_indexes(s):
start = s.start
stop = s.stop
if isinstance(start, int) or isinstance(stop, int):
return start, stop
if start:
start = self.database.zrank(self.key, start)
if start is None:
raise KeyError(s.start)
if stop:
stop = self.database.zrank(self.key, stop)
if stop is None:
raise KeyError(s.stop)
return start, stop
start, stop = _slice_to_indexes(s)
start = start or 0
if not stop:
stop = -1
else:
stop -= 1
return start, stop
def __getitem__(self, item):
"""
Retrieve the given values from the sorted set. Accepts a
variety of parameters for the input:
.. code-block:: python
zs = db.ZSet('my-zset')
# Return the first 10 elements with their scores.
zs[:10, True]
# Return the first 10 elements without scores.
zs[:10]
zs[:10, False]
# Return the range of values between 'k1' and 'k10' along
# with their scores.
zs['k1':'k10', True]
# Return the range of items preceding and including 'k5'
# without scores.
zs[:'k5', False]
"""
if isinstance(item, tuple) and len(item) == 2:
item, withscores = item
else:
withscores = False
if isinstance(item, slice):
start, stop = self._convert_slice(item)
else:
start = stop = item
return self.database.zrange(
self.key,
start,
stop,
withscores=withscores)
def __setitem__(self, item, score):
"""Add item to the set with the given score."""
return self.database.zadd(self.key, {item: score})
def __delitem__(self, item):
"""
Delete the given item(s) from the set. Like
:py:meth:`~ZSet.__getitem__`, this method supports a wide
variety of indexing and slicing options.
"""
if isinstance(item, slice):
start, stop = self._convert_slice(item)
return self.database.zremrangebyrank(self.key, start, stop)
else:
return self.remove(item)
def remove(self, *items):
"""Remove the given items from the ZSet."""
return self.database.zrem(self.key, *items)
def __contains__(self, item):
"""
Return a boolean indicating whether the given item is in the
sorted set.
"""
return not (self.rank(item) is None)
def __len__(self):
"""Return the number of items in the sorted set."""
return self.database.zcard(self.key)
def _scan(self, *args, **kwargs):
return self.database.zscan_iter(self.key, *args, **kwargs)
def __iter__(self):
"""
Return an iterator that will yield (item, score) tuples.
"""
return iter(self._scan())
def iterator(self, with_scores=False, reverse=False):
if with_scores and not reverse:
return self.search(None)
return self.range(
0,
-1,
with_scores=with_scores,
reverse=reverse)
def search(self, pattern, count=None):
"""
Search the set, returning items that match the given search
pattern.
:param str pattern: Search pattern using wildcards.
:param int count: Limit result set size.
:returns: Iterator that yields matching item/score tuples.
"""
return self._scan(match=pattern, count=count)
def score(self, item):
"""Return the score of the given item."""
return self.database.zscore(self.key, item)
def rank(self, item, reverse=False):
"""Return the rank of the given item."""
fn = reverse and self.database.zrevrank or self.database.zrank
return fn(self.key, item)
def count(self, low, high=None):
"""
Return the number of items between the given bounds.
"""
if high is None:
high = low
return self.database.zcount(self.key, low, high)
def lex_count(self, low, high):
"""
Count the number of members in a sorted set between a given
lexicographical range.
"""
return self.database.zlexcount(self.key, low, high)
def range(self, low, high, with_scores=False, desc=False, reverse=False):
"""
Return a range of items between ``low`` and ``high``. By
default scores will not be included, but this can be controlled
via the ``with_scores`` parameter.
:param low: Lower bound.
:param high: Upper bound.
:param bool with_scores: Whether the range should include the
scores along with the items.
:param bool desc: Whether to sort the results descendingly.
:param bool reverse: Whether to select the range in reverse.
"""
if reverse:
return self.database.zrevrange(self.key, low, high, with_scores)
else:
return self.database.zrange(self.key, low, high, desc, with_scores)
def range_by_score(self, low, high, start=None, num=None,
with_scores=False, reverse=False):
if reverse:
fn = self.database.zrevrangebyscore
low, high = high, low
else:
fn = self.database.zrangebyscore
return fn(self.key, low, high, start, num, with_scores)
def range_by_lex(self, low, high, start=None, num=None, reverse=False):
"""
Return a range of members in a sorted set, by lexicographical range.
"""
if reverse:
fn = self.database.zrevrangebylex
low, high = high, low
else:
fn = self.database.zrangebylex
return fn(self.key, low, high, start, num)
def remove_by_rank(self, low, high=None):
"""
Remove elements from the ZSet by their rank (relative position).
:param low: Lower bound.
:param high: Upper bound.
"""
if high is None:
high = low
return self.database.zremrangebyrank(self.key, low, high)
def remove_by_score(self, low, high=None):
"""
Remove elements from the ZSet by their score.
:param low: Lower bound.
:param high: Upper bound.
"""
if high is None:
high = low
return self.database.zremrangebyscore(self.key, low, high)
def remove_by_lex(self, low, high):
return self.database.zremrangebylex(self.key, low, high)
def incr(self, key, incr_by=1.):
"""
Increment the score of an item in the ZSet.
:param key: Item to increment.
:param incr_by: Amount to increment item's score.
"""
return self.database.zincrby(self.key, incr_by, key)
def _first_or_any(self):
item = self[0]
if item:
return item[0]
@chainable_method
def __ior__(self, other):
self.unionstore(self.key, other)
return self
@chainable_method
def __iand__(self, other):
self.interstore(self.key, other)
return self
def interstore(self, dest, *others, **kwargs):
"""
Store the intersection of the current zset and one or more
others in a new key.
:param dest: the name of the key to store intersection
:param others: One or more :py:class:`ZSet` instances
:returns: A :py:class:`ZSet` referencing ``dest``.
"""
keys = [self.key]
keys.extend([other.key for other in others])
self.database.zinterstore(dest, keys, **kwargs)
return self.database.ZSet(dest)
def unionstore(self, dest, *others, **kwargs):
"""
Store the union of the current set and one or more
others in a new key.
:param dest: the name of the key to store union
:param others: One or more :py:class:`ZSet` instances
:returns: A :py:class:`ZSet` referencing ``dest``.
"""
keys = [self.key]
keys.extend([other.key for other in others])
self.database.zunionstore(dest, keys, **kwargs)
return self.database.ZSet(dest)
def popmin(self, count=1):
"""
Atomically remove the lowest-scoring item(s) in the set.
:returns: a list of item, score tuples or ``None`` if the set is empty.
"""
return self.database.zpopmin(self.key, count)
def popmax(self, count=1):
"""
Atomically remove the highest-scoring item(s) in the set.
:returns: a list of item, score tuples or ``None`` if the set is empty.
"""
return self.database.zpopmax(self.key, count)
def bpopmin(self, timeout=0):
"""
Atomically remove the lowest-scoring item from the set, blocking until
an item becomes available or timeout is reached (0 for no timeout,
default).
Returns a 2-tuple of (item, score).
"""
res = self.database.bzpopmin(self.key, timeout)
if res is not None:
return (res[1], res[2])
def bpopmax(self, timeout=0):
"""
Atomically remove the highest-scoring item from the set, blocking until
an item becomes available or timeout is reached (0 for no timeout,
default).
Returns a 2-tuple of (item, score).
"""
res = self.database.bzpopmax(self.key, timeout)
if res is not None:
return (res[1], res[2])
def popmin_compat(self, count=1):
"""
Atomically remove the lowest-scoring item(s) in the set. Compatible
with Redis versions < 5.0.
:returns: a list of item, score tuples or ``None`` if the set is empty.
"""
pipe = self.database.pipeline()
r1, r2 = (pipe
.zrange(self.key, 0, count - 1, withscores=True)
.zremrangebyrank(self.key, 0, count - 1)
.execute())
return r1
def popmax_compat(self, count=1):
"""
Atomically remove the highest-scoring item(s) in the set. Compatible
with Redis versions < 5.0.
:returns: a list of item, score tuples or ``None`` if the set is empty.
"""
pipe = self.database.pipeline()
r1, r2 = (pipe
.zrange(self.key, 0 - count, -1, withscores=True)
.zremrangebyrank(self.key, 0 - count, -1)
.execute())
return r1[::-1]
def as_items(self, decode=False):
"""
Return a list of 2-tuples consisting of key/score.
"""
items = self.database.zrange(self.key, 0, -1, withscores=True)
if decode:
items = [(_decode(k), score) for k, score in items]
return items
@classmethod
def from_dict(cls, database, key, data, clear=False):
"""
Create and populate a ZSet object from a data dictionary.
"""
zset = cls(database, key)
if clear:
zset.clear()
zset.add(data)
return zset
class HyperLogLog(Container):
"""
Redis HyperLogLog object wrapper.
See `HyperLogLog commands <http://redis.io/commands#hyperloglog>`_
for more info.
"""
def add(self, *items):
"""
Add the given items to the HyperLogLog.
"""
return self.database.pfadd(self.key, *items)
def __len__(self):
return self.database.pfcount(self.key)
def __ior__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
return self.merge(self.key, *other)
def merge(self, dest, *others):
"""
Merge one or more :py:class:`HyperLogLog` instances.
:param dest: Key to store merged result.
:param others: One or more ``HyperLogLog`` instances.
"""
items = [self.key]
items.extend([other.key for other in others])
self.database.pfmerge(dest, *items)
return HyperLogLog(self.database, dest)
class Array(Container):
"""
Custom container that emulates an array (as opposed to the
linked-list implementation of :py:class:`List`). This gives:
* O(1) append, get, len, pop last, set
* O(n) remove from middle
:py:class:`Array` is built on top of the hash data type and
is implemented using lua scripts.
"""
def __getitem__(self, idx):
"""Get the value stored in the given index."""
return self.database.run_script(
'array_get',
keys=[self.key],
args=[idx])
def __setitem__(self, idx, value):
"""Set the value at the given index."""
return self.database.run_script(
'array_set',
keys=[self.key],
args=[idx, value])
def __delitem__(self, idx):
"""Delete the given index."""
return self.pop(idx)
def __len__(self):
"""Return the number of items in the array."""
return self.database.hlen(self.key)
def append(self, value):
"""Append a new value to the end of the array."""
self.database.run_script(
'array_append',
keys=[self.key],
args=[value])
def extend(self, values):
"""Extend the array, appending the given values."""
self.database.run_script(
'array_extend',
keys=[self.key],
args=values)
def pop(self, idx=None):
"""
Remove an item from the array. By default this will be the
last item by index, but any index can be specified.
"""
if idx is not None:
return self.database.run_script(
'array_remove',
keys=[self.key],
args=[idx])
else:
return self.database.run_script(
'array_pop',
keys=[self.key],
args=[])
def __contains__(self, item):
"""
Return a boolean indicating whether the given item is stored
in the array. O(n).
"""
item = encode(item)
for value in self:
if value == item:
return True
return False
def __iter__(self):
"""Return an iterable that yields array items."""
return iter(
item[1] for item in sorted(self.database.hscan_iter(self.key)))
def as_list(self, decode=False):
"""
Return a list of items in the array.
"""
return [_decode(i) for i in self] if decode else list(self)
@classmethod
def from_list(cls, database, key, data, clear=False):
"""
Create and populate an Array object from a data dictionary.
"""
arr = cls(database, key)
if clear:
arr.clear()
arr.extend(data)
return arr
def _normalize_stream_keys(keys, default_id='0-0'):
if isinstance(keys, basestring_type):
return {keys: default_id}
elif isinstance(keys, (list, tuple)):
return dict([(key, default_id) for key in keys])
elif isinstance(keys, dict):
return keys
else:
raise ValueError('keys must be either a stream key, a list of '
'stream keys, or a dictionary mapping key to '
'minimum message id.')
class Stream(Container):
"""
Redis stream container.
"""
def add(self, data, id='*', maxlen=None, approximate=True):
"""
Add data to a stream.
:param dict data: data to add to stream
:param id: identifier for message ('*' to automatically append)
:param maxlen: maximum length for stream
:param approximate: allow stream max length to be approximate
:returns: the added message id.
"""
return self.database.xadd(self.key, data, id, maxlen, approximate)
def __getitem__(self, item):
"""
Read a range of values from a stream.
The index must be a message id or a slice. An empty slice will result
in reading all values from the stream. Message ids provided as lower or
upper bounds are inclusive.
To specify a maximum number of messages, use the "step" parameter of
the slice.
"""
if isinstance(item, slice):
return self.range(item.start or '-', item.stop or '+', item.step)
return self.get(item)
def get(self, docid):
"""
Get a message by id.
:param docid: the message id to retrieve.
:returns: a 2-tuple of (message id, data) or None if not found.
"""
items = self[docid:docid:1]
if items:
return items[0]
def range(self, start='-', stop='+', count=None):
"""
Read a range of values from a stream.
:param start: start key of range (inclusive) or '-' for oldest message
:param stop: stop key of range (inclusive) or '+' for newest message
:param count: limit number of messages returned
"""
return self.database.xrange(self.key, start, stop, count)
def revrange(self, start='+', stop='-', count=None):
"""
Read a range of values from a stream in reverse.
:param start: start key of range (inclusive) or '+' for newest message
:param stop: stop key of range (inclusive) or '-' for oldest message
:param count: limit number of messages returned
"""
return self.database.xrevrange(self.key, start, stop, count)
def __iter__(self):
return iter(self[:])
def __delitem__(self, item):
"""
Delete one or more messages by id. The index can be either a single
message id or a list/tuple of multiple ids.
"""
if not isinstance(item, (list, tuple)):
item = (item,)
self.delete(*item)
def delete(self, *id_list):
"""
Delete one or more message by id. The index can be either a single
message id or a list/tuple of multiple ids.
"""
return self.database.xdel(self.key, *id_list)
def length(self):
"""
Return the length of a stream.
"""
return self.database.xlen(self.key)
__len__ = length
def read(self, count=None, block=None, last_id=None):
"""
Monitor stream for new data.
:param int count: limit number of messages returned
:param int block: milliseconds to block, 0 for indefinitely
:param last_id: Last id read (an exclusive lower-bound). If the '$'
value is given, we will only read values added *after* our command
started blocking.
:returns: a list of (message id, data) 2-tuples.
"""
if last_id is None: last_id = '0-0'
resp = self.database.xread({self.key: _decode(last_id)}, count, block)
# resp is a 2-tuple of stream name -> message list.
return resp[0][1] if resp else []
def info(self):
"""
Retrieve information about the stream. Wraps call to
:py:meth:`~Database.xinfo_stream`.
:returns: a dictionary containing stream metadata
"""
return self.database.xinfo_stream(self.key)
def groups_info(self):
"""
Retrieve information about consumer groups for the stream. Wraps call
to :py:meth:`~Database.xinfo_groups`.
:returns: a dictionary containing consumer group metadata
"""
return self.database.xinfo_groups(self.key)
def consumers_info(self, group):
"""
Retrieve information about consumers within the given consumer group
operating on the stream. Calls :py:meth:`~Database.xinfo_consumers`.
:param group: consumer group name
:returns: a dictionary containing consumer metadata
"""
return self.database.xinfo_consumers(self.key, group)
def set_id(self, id):
"""
Set the maximum message id for the stream.
:param id: id of last-read message
"""
return self.database.xsetid(self.key, id)
def trim(self, count=None, approximate=True, minid=None, limit=None):
"""
Trim the stream to the given "count" of messages, discarding the oldest
messages first.
:param count: maximum size of stream (maxlen)
:param approximate: allow size to be approximate
:param minid: evicts entries with IDs lower than the given min id.
:param limit: maximum number of entries to evict.
"""
return self.database.xtrim(self.key, maxlen=count,
approximate=approximate, minid=minid,
limit=limit)
class ConsumerGroupStream(Stream):
"""
Helper for working with an individual stream within the context of a
consumer group. This object is exposed as an attribute on a
:py:class:`ConsumerGroup` object using the stream key for the attribute
name.
This class should not be created directly. It will automatically be added
to the ``ConsumerGroup`` object.
For example::
cg = db.consumer_group('groupname', ['stream-1', 'stream-2'])
cg.stream_1 # ConsumerGroupStream for "stream-1"
cg.stream_2 # ConsumerGroupStream for "stream-2"
"""
__slots__ = ('database', 'group', 'key', '_consumer')
def __init__(self, database, group, key, consumer):
self.database = database
self.group = group
self.key = key
self._consumer = consumer
def consumers_info(self):
"""
Retrieve information about consumers within the given consumer group
operating on the stream. Calls :py:meth:`~Database.xinfo_consumers`.
:returns: a list of dictionaries containing consumer metadata
"""
return self.database.xinfo_consumers(self.key, self.group)
def ack(self, *id_list):
"""
Acknowledge that the message(s) were been processed by the consumer
associated with the parent :py:class:`ConsumerGroup`.
:param id_list: one or more message ids to acknowledge
:returns: number of messages marked acknowledged
"""
return self.database.xack(self.key, self.group, *id_list)
def claim(self, *id_list, **kwargs):
"""
Claim pending - but unacknowledged - messages for this stream within
the context of the parent :py:class:`ConsumerGroup`.
:param id_list: one or more message ids to acknowledge
:param min_idle_time: minimum idle time in milliseconds (keyword-arg).
:returns: list of (message id, data) 2-tuples of messages that were
successfully claimed
"""
min_idle_time = kwargs.pop('min_idle_time', None) or 0
if kwargs: raise ValueError('incorrect arguments for claim()')
return self.database.xclaim(self.key, self.group, self._consumer,
min_idle_time, id_list)
def pending(self, start='-', stop='+', count=1000, consumer=None,
idle=None):
"""
List pending messages within the consumer group for this stream.
:param start: start id (or '-' for oldest pending)
:param stop: stop id (or '+' for newest pending)
:param count: limit number of messages returned
:param consumer: restrict message list to the given consumer
:param int idle: filter by idle-time in milliseconds (6.2)
:returns: A list containing status for each pending message. Each
pending message returns [id, consumer, idle time, deliveries].
"""
return self.database.xpending_range(self.key, self.group, min=start,
max=stop, count=count,
consumername=consumer, idle=idle)
def autoclaim(self, consumer, min_idle_time, start_id=0, count=None, justid=False):
"""
Transfer ownership of pending stream entries that match the specified
criteria. Similar to calling XPENDING and XCLAIM, but provides a more
straightforward way to deal with message delivery failures.
:param consumer: name of consumer that claims the message.
:param min_idle_time: in milliseconds
:param start_id: start id
:param count: optional, upper limit of entries to claim. Default 100.
:param justid: return just IDs of messages claimed.
:returns: [next start id, [messages that were claimed]
"""
return self.database.xautoclaim(self.key, self.group, consumer,
min_idle_time, start_id, count, justid)
def read(self, count=None, block=None, last_id=None):
"""
Monitor the stream for new messages within the context of the parent
:py:class:`ConsumerGroup`.
:param int count: limit number of messages returned
:param int block: milliseconds to block, 0 for indefinitely.
:param str last_id: optional last ID, by default uses the special
token ">", which reads the oldest unread message.
:returns: a list of (message id, data) 2-tuples.
"""
key = {self.key: '>' if last_id is None else last_id}
resp = self.database.xreadgroup(self.group, self._consumer, key, count,
block)
return resp[0][1] if resp else []
def set_id(self, id='$'):
"""
Set the last-read message id for the stream within the context of the
parent :py:class:`ConsumerGroup`. By default this will be the special
"$" identifier, meaning all messages are marked as having been read.
:param id: id of last-read message (or "$").
"""
return self.database.xgroup_setid(self.key, self.group, id)
def delete_consumer(self, consumer=None):
"""
Remove a specific consumer from a consumer group.
:consumer: name of consumer to delete. If not provided, will be the
default consumer for this stream.
:returns: number of pending messages that the consumer had before
being deleted.
"""
if consumer is None: consumer = self._consumer
return self.database.xgroup_delconsumer(self.key, self.group, consumer)
class ConsumerGroup(object):
"""
Helper for working with Redis Streams consumer groups functionality. Each
stream associated with the consumer group is exposed as a special attribute
of the ``ConsumerGroup`` object, exposing stream-specific functionality
within the context of the group.
Rather than creating this class directly, use the
:py:meth:`Database.consumer_group` method.
Each registered stream within the group is exposed as a special attribute
that provides stream-specific APIs within the context of the group. For
more information see :py:class:`ConsumerGroupStream`.
The streams managed by a consumer group must exist before the consumer
group can be created. By default, calling :py:meth:`ConsumerGroup.create`
will automatically create stream keys for any that do not exist.
Example::
cg = db.consumer_group('groupname', ['stream-1', 'stream-2'])
cg.create() # Create consumer group.
cg.stream_1 # ConsumerGroupStream for "stream-1"
cg.stream_2 # ConsumerGroupStream for "stream-2"
# or, alternatively:
cg.streams['stream-1']
:param Database database: Redis client
:param name: consumer group name
:param keys: stream identifier(s) to monitor. May be a single stream
key, a list of stream keys, or a key-to-minimum id mapping. The
minimum id for each stream should be considered an exclusive
lower-bound. The '$' value can also be used to only read values
added *after* our command started blocking.
:param consumer: name for consumer
"""
stream_key_class = ConsumerGroupStream
def __init__(self, database, name, keys, consumer=None):
self.database = database
self.name = name
self.keys = _normalize_stream_keys(keys)
self._read_keys = _normalize_stream_keys(list(self.keys), '>')
self._consumer = consumer or (self.name + '.c1')
self.streams = {} # Dict of key->ConsumerGroupStream.
# Add attributes for each stream exposed as part of the group.
for key in self.keys:
attr = make_python_attr(key)
stream = self.stream_key_class(database, name, key, self._consumer)
setattr(self, attr, stream)
self.streams[key] = stream
def consumer(self, name):
"""
Create a new consumer for the :py:class:`ConsumerGroup`.
:param name: name of consumer
:returns: a :py:class:`ConsumerGroup` using the given consumer name.
"""
return type(self)(self.database, self.name, self.keys, name)
def create(self, ensure_keys_exist=True, mkstream=False):
"""
Create the consumer group and register it with the group's stream keys.
:param ensure_keys_exist: Ensure that the streams exist before creating
the consumer group. Streams that do not exist will be created.
:param mkstream: Use the "MKSTREAM" option to ensure stream exists (may
require unstable version of Redis).
"""
if ensure_keys_exist:
for key in self.keys:
if not self.database.exists(key):
msg_id = self.database.xadd(key, {'': ''}, id=b'0-1')
self.database.xdel(key, msg_id)
elif self.database.type(key) != b'stream':
raise ValueError('Consumer group key "%s" exists and is '
'not a stream. To prevent data-loss '
'this key will not be deleted.' % key)
resp = {}
# Mapping of key -> last-read message ID.
for key, value in self.keys.items():
try:
resp[key] = self.database.xgroup_create(key, self.name, value,
mkstream)
except ResponseError as exc:
if exception_message(exc).startswith('BUSYGROUP'):
resp[key] = False
else:
raise
return resp
def reset(self):
"""
Reset the consumer group, clearing the last-read status for each
stream so it will read from the beginning of each stream.
"""
return self.set_id('0-0')
def destroy(self):
"""
Destroy the consumer group.
"""
resp = {}
for key in self.keys:
resp[key] = self.database.xgroup_destroy(key, self.name)
return resp
def read(self, count=None, block=None, consumer=None):
"""
Read unseen messages from all streams in the consumer group. Wrapper
for :py:class:`Database.xreadgroup` method.
:param int count: limit number of messages returned
:param int block: milliseconds to block, 0 for indefinitely.
:param consumer: consumer name
:returns: a list of (stream key, messages) tuples, where messages is
a list of (message id, data) 2-tuples.
"""
if consumer is None: consumer = self._consumer
return self.database.xreadgroup(self.name, consumer, self._read_keys,
count, block)
def set_id(self, id='$'):
"""
Set the last-read message id for each stream in the consumer group. By
default, this will be the special "$" identifier, meaning all messages
are marked as having been read.
:param id: id of last-read message (or "$").
"""
accum = {}
for key in self.keys:
accum[key] = self.database.xgroup_setid(key, self.name, id)
return accum
def stream_info(self):
"""
Retrieve information for each stream managed by the consumer group.
Calls :py:meth:`~Database.xinfo_stream` for each stream.
:returns: a dictionary mapping stream key to a dictionary of metadata
"""
accum = {}
for key in self.keys:
accum[key] = self.database.xinfo_stream(key)
return accum
class BitFieldOperation(object):
"""
Command builder for BITFIELD commands.
"""
def __init__(self, database, key):
self.database = database
self.key = key
self.operations = []
self._last_overflow = None # Default is "WRAP".
def incrby(self, fmt, offset, increment, overflow=None):
"""
Increment a bitfield by a given amount.
:param fmt: format-string for the bitfield being updated, e.g. u8 for
an unsigned 8-bit integer.
:param int offset: offset (in number of bits).
:param int increment: value to increment the bitfield by.
:param str overflow: overflow algorithm. Defaults to WRAP, but other
acceptable values are SAT and FAIL. See the Redis docs for
descriptions of these algorithms.
:returns: a :py:class:`BitFieldOperation` instance.
"""
if overflow is not None and overflow != self._last_overflow:
self._last_overflow = overflow
self.operations.append(('OVERFLOW', overflow))
self.operations.append(('INCRBY', fmt, offset, increment))
return self
def get(self, fmt, offset):
"""
Get the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. u8 for an
unsigned 8-bit integer.
:param int offset: offset (in number of bits).
:returns: a :py:class:`BitFieldOperation` instance.
"""
self.operations.append(('GET', fmt, offset))
return self
def set(self, fmt, offset, value):
"""
Set the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. u8 for an
unsigned 8-bit integer.
:param int offset: offset (in number of bits).
:param int value: value to set at the given position.
:returns: a :py:class:`BitFieldOperation` instance.
"""
self.operations.append(('SET', fmt, offset, value))
return self
@property
def command(self):
return reduce(operator.add, self.operations, ('BITFIELD', self.key))
def execute(self):
"""
Execute the operation(s) in a single BITFIELD command. The return value
is a list of values corresponding to each operation.
"""
return self.database.execute_command(*self.command)
def __iter__(self):
"""
Implicit execution and iteration of the return values for a sequence of
operations.
"""
return iter(self.execute())
class BitField(Container):
"""
Wrapper that provides a convenient API for constructing and executing Redis
BITFIELD commands. The BITFIELD command can pack multiple operations into a
single logical command, so the :py:class:`BitField` supports a method-
chaining API that allows multiple operations to be performed atomically.
Rather than instantiating this class directly, you should use the
:py:meth:`Database.bit_field` method to obtain a :py:class:`BitField`.
"""
def incrby(self, fmt, offset, increment, overflow=None):
"""
Increment a bitfield by a given amount.
:param fmt: format-string for the bitfield being updated, e.g. u8 for
an unsigned 8-bit integer.
:param int offset: offset (in number of bits).
:param int increment: value to increment the bitfield by.
:param str overflow: overflow algorithm. Defaults to WRAP, but other
acceptable values are SAT and FAIL. See the Redis docs for
descriptions of these algorithms.
:returns: a :py:class:`BitFieldOperation` instance.
"""
bfo = BitFieldOperation(self.database, self.key)
return bfo.incrby(fmt, offset, increment, overflow)
def get(self, fmt, offset):
"""
Get the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. u8 for an
unsigned 8-bit integer.
:param int offset: offset (in number of bits).
:returns: a :py:class:`BitFieldOperation` instance.
"""
bfo = BitFieldOperation(self.database, self.key)
return bfo.get(fmt, offset)
def set(self, fmt, offset, value):
"""
Set the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. u8 for an
unsigned 8-bit integer.
:param int offset: offset (in number of bits).
:param int value: value to set at the given position.
:returns: a :py:class:`BitFieldOperation` instance.
"""
bfo = BitFieldOperation(self.database, self.key)
return bfo.set(fmt, offset, value)
def _validate_slice(self, item):
if not isinstance(item, slice):
raise ValueError('Must use a slice.')
if item.stop is None or item.stop < 0:
raise ValueError('slice must have a non-negative upper-bound')
start = item.start or 0
if start > item.stop:
raise ValueError('start of slice cannot exceed stop')
return start, item.stop
def __getitem__(self, item):
"""
Short-hand for getting a range of bits in a bitfield. Note that the
item **must** be a slice specifying the start and end of the range of
bits to read.
"""
start, stop = self._validate_slice(item)
return self.get('u%s' % (stop - start), start).execute()[0]
def __setitem__(self, item, value):
"""
Short-hand for setting a range of bits in a bitfield. Note that the
item **must** be a slice specifying the start and end of the range of
bits to read. If the value representation exceeds the number of bits
implied by the slice range, a ``ValueError`` is raised.
"""
start, stop = self._validate_slice(item)
nbits = stop - start
if value >= (1 << nbits):
raise ValueError('value exceeds width specified by slice')
self.set('u%s' % nbits, start, value).execute()
def __delitem__(self, item):
"""
Clear a range of bits in a bitfield. Note that the item **must** be a
slice specifying the start and end of the range of bits to clear.
"""
start, stop = self._validate_slice(item)
self.set('u%s' % (stop - start), start, 0).execute()
def get_raw(self):
"""
Return the raw bytestring that comprises the bitfield. Equivalent to a
normal GET command.
"""
return self.database.get(self.key)
def set_raw(self, value):
"""
Set the raw bytestring that comprises the bitfield. Equivalent to a
normal SET command.
"""
return self.database.set(self.key, value)
def bit_count(self, start=None, end=None):
"""
Count the set bits in a string. Note that the `start` and `end`
parameters are offsets in **bytes**.
"""
return self.database.bitcount(self.key, start, end)
def get_bit(self, offset):
"""
Get the bit value at the given offset (in bits).
:param int offset: bit offset
:returns: value at bit offset, 1 or 0
"""
return self.database.getbit(self.key, offset)
def set_bit(self, offset, value):
"""
Set the bit value at the given offset (in bits).
:param int offset: bit offset
:param int value: new value for bit, 1 or 0
:returns: previous value at bit offset, 1 or 0
"""
return self.database.setbit(self.key, offset, value)
class BloomFilter(Container):
"""
Bloom-filters are probabilistic data-structures that are used to answer the
question: "is X a member of set S?" It is possible to receive a false
positive, but impossible to receive a false negative (in other words, if
the bloom filter contains a value, it will never erroneously report that it
does *not* contain such a value). The accuracy of the bloom-filter and the
likelihood of a false positive can be reduced by increasing the size of the
bloomfilter. The default size is 64KB (or 524,288 bits).
Rather than instantiate this class directly, use
:py:meth:`Database.bloom_filter`.
"""
def __init__(self, database, key, size=64 * 1024):
super(BloomFilter, self).__init__(database, key)
self.size = size
self.bits = self.size * 8
self._bf = BitField(self.database, self.key)
def _get_seeds(self, data):
# Hash the data into a 16-byte digest, then break that up into 4 4-byte
# (32-bit) unsigned integers. We use the modulo operator to normalize
# these 32-bit ints to bit-indices.
seeds = struct.unpack('>IIII', hashlib.md5(encode(data)).digest())
return [seed % self.bits for seed in seeds]
def add(self, data):
"""
Add an item to the bloomfilter.
:param bytes data: a bytestring representing the item to add.
"""
bfo = BitFieldOperation(self.database, self.key)
for bit_index in self._get_seeds(data):
bfo.set('u1', bit_index, 1)
bfo.execute()
def contains(self, data):
"""
Check if an item has been added to the bloomfilter.
:param bytes data: a bytestring representing the item to check.
:returns: a boolean indicating whether or not the item is present in
the bloomfilter. False-positives are possible, but a negative
return value is definitive.
"""
bfo = BitFieldOperation(self.database, self.key)
for bit_index in self._get_seeds(data):
bfo.get('u1', bit_index)
return all(bfo.execute())
__contains__ = contains
def __len__(self):
return self.size
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,598
|
coleifer/walrus
|
refs/heads/master
|
/runtests.py
|
#!/usr/bin/env python
import optparse
import os
import sys
import unittest
def runtests(verbose=False, failfast=False, names=None):
if names:
suite = unittest.TestLoader().loadTestsFromNames(names, tests)
else:
suite = unittest.TestLoader().loadTestsFromModule(tests)
runner = unittest.TextTestRunner(verbosity=2 if verbose else 1,
failfast=failfast)
return runner.run(suite)
if __name__ == '__main__':
try:
from redis import Redis
except ImportError:
raise RuntimeError('redis-py must be installed.')
else:
try:
Redis().info()
except:
raise RuntimeError('redis server does not appear to be running')
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action='store_true', default=False,
dest='verbose', help='Verbose output.')
parser.add_option('-f', '--failfast', action='store_true', default=False,
help='Stop on first failure or error.')
parser.add_option('-s', '--stream', action='store_true', dest='stream',
help='Run stream command tests (default if server>=5.0)')
parser.add_option('-z', '--zpop', action='store_true', dest='zpop',
help='Run ZPOP* tests (default if server>=5.0)')
options, args = parser.parse_args()
if options.stream:
os.environ['TEST_STREAM'] = '1'
if options.zpop:
os.environ['TEST_ZPOP'] = '1'
from walrus import tests
result = runtests(
verbose=options.verbose,
failfast=options.failfast,
names=args)
if result.failures:
sys.exit(1)
elif result.errors:
sys.exit(2)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,599
|
coleifer/walrus
|
refs/heads/master
|
/walrus/autocomplete.py
|
import json
import re
from walrus.utils import decode
from walrus.utils import encode
from walrus.utils import load_stopwords
from walrus.utils import PY3
class Autocomplete(object):
"""
Autocompletion for ascii-encoded string data. Titles are stored,
along with any corollary data in Redis. Substrings of the title
are stored in sorted sets using a unique scoring algorithm. The
scoring algorithm aims to return results in a sensible order,
by looking at the entire title and the position of the matched
substring within the title.
Additionally, the autocomplete object supports boosting search
results by object ID or object type.
"""
def __init__(self, database, namespace='walrus', cache_timeout=600,
stopwords_file='stopwords.txt', use_json=True):
"""
:param database: A :py:class:`Database` instance.
:param namespace: Namespace to prefix keys used to store
metadata.
:param cache_timeout: Complex searches using boosts will be
cached. Specify the amount of time these results are
cached for.
:param stopwords_file: Filename containing newline-separated
stopwords. Set to `None` to disable stopwords filtering.
:param bool use_json: Whether object data should be
serialized as JSON.
"""
self.database = database
self.namespace = namespace
self._cache_timeout = cache_timeout
self._stopwords_file = stopwords_file
self._use_json = use_json
self._load_stopwords()
self._data = self.database.Hash('%s:d' % self.namespace)
self._title_data = self.database.Hash('%s:t' % self.namespace)
self._boosts = self.database.Hash('%s:b' % self.namespace)
self._max_title = 10
self._offset = self.score_token('z' * self._max_title) + 1
def _load_stopwords(self):
if self._stopwords_file:
stopwords = load_stopwords(self._stopwords_file)
self._stopwords = set(stopwords.splitlines())
else:
self._stopwords = set()
def tokenize_title(self, phrase, stopwords=True):
if isinstance(phrase, bytes):
phrase = decode(phrase)
phrase = re.sub('[^a-z0-9_\-\s]', '', phrase.lower())
if stopwords and self._stopwords:
return [w for w in phrase.split() if w not in self._stopwords]
else:
return phrase.split()
def score_token(self, token):
l = len(token)
a = ord('a') - 2
score = 0
for i in range(self._max_title):
if i < l:
c = ord(token[i]) - a
if c < 2 or c > 27:
c = 1
else:
c = 1
score += c * (27 ** (self._max_title - i - 1))
return score
def substrings(self, w):
for i in range(1, len(w)):
yield w[:i]
yield w
def object_key(self, obj_id, obj_type):
return '%s\x01%s' % (obj_id, obj_type or '')
def word_key(self, word):
return '%s:s:%s' % (self.namespace, word)
def store(self, obj_id, title=None, data=None, obj_type=None):
"""
Store data in the autocomplete index.
:param obj_id: Either a unique identifier for the object
being indexed or the word/phrase to be indexed.
:param title: The word or phrase to be indexed. If not
provided, the ``obj_id`` will be used as the title.
:param data: Arbitrary data to index, which will be
returned when searching for results. If not provided,
this value will default to the title being indexed.
:param obj_type: Optional object type. Since results can be
boosted by type, you might find it useful to specify this
when storing multiple types of objects.
You have the option of storing several types of data as
defined by the parameters. At the minimum, you can specify
an ``obj_id``, which will be the word or phrase you wish to
index. Alternatively, if for instance you were indexing blog
posts, you might specify all parameters.
"""
if title is None:
title = obj_id
if data is None:
data = title
obj_type = obj_type or ''
if self._use_json:
data = json.dumps(data)
combined_id = self.object_key(obj_id, obj_type)
if self.exists(obj_id, obj_type):
stored_title = self._title_data[combined_id]
if stored_title == title:
self._data[combined_id] = data
return
else:
self.remove(obj_id, obj_type)
self._data[combined_id] = data
self._title_data[combined_id] = title
clean_title = ' '.join(self.tokenize_title(title))
title_score = self.score_token(clean_title)
for idx, word in enumerate(self.tokenize_title(title)):
word_score = self.score_token(word)
position_score = word_score + (self._offset * idx)
key_score = position_score + title_score
for substring in self.substrings(word):
self.database.zadd(self.word_key(substring),
{combined_id: key_score})
return True
def remove(self, obj_id, obj_type=None):
"""
Remove an object identified by the given ``obj_id`` (and
optionally ``obj_type``) from the search index.
:param obj_id: The object's unique identifier.
:param obj_type: The object's type.
"""
if not self.exists(obj_id, obj_type):
raise KeyError('Object not found.')
combined_id = self.object_key(obj_id, obj_type)
title = self._title_data[combined_id]
for word in self.tokenize_title(title):
for substring in self.substrings(word):
key = self.word_key(substring)
if not self.database.zrange(key, 1, 2):
self.database.delete(key)
else:
self.database.zrem(key, combined_id)
del self._data[combined_id]
del self._title_data[combined_id]
del self._boosts[combined_id]
def exists(self, obj_id, obj_type=None):
"""
Return whether the given object exists in the search index.
:param obj_id: The object's unique identifier.
:param obj_type: The object's type.
"""
return self.object_key(obj_id, obj_type) in self._data
def boost_object(self, obj_id=None, obj_type=None, multiplier=1.1,
relative=True):
"""
Boost search results for the given object or type by the
amount specified. When the ``multiplier`` is greater than
1, the results will percolate to the top. Values between
0 and 1 will percolate results to the bottom.
Either an ``obj_id`` or ``obj_type`` (or both) must be
specified.
:param obj_id: An object's unique identifier (optional).
:param obj_type: The object's type (optional).
:param multiplier: A positive floating-point number.
:param relative: If ``True``, then any pre-existing saved
boost will be updated using the given multiplier.
Examples:
.. code-block:: python
# Make all objects of type=photos percolate to top.
ac.boost_object(obj_type='photo', multiplier=2.0)
# Boost a particularly popular blog entry.
ac.boost_object(
popular_entry.id,
'entry',
multipler=5.0,
relative=False)
"""
combined_id = self.object_key(obj_id or '', obj_type or '')
if relative:
current = float(self._boosts[combined_id] or 1.0)
self._boosts[combined_id] = current * multiplier
else:
self._boosts[combined_id] = multiplier
def _load_objects(self, obj_id_zset, limit, chunk_size=1000):
ct = i = 0
while True:
id_chunk = obj_id_zset[i:i + chunk_size]
if not id_chunk:
return
i += chunk_size
for raw_data in self._data[id_chunk]:
if not raw_data:
continue
if self._use_json:
yield json.loads(decode(raw_data))
else:
yield raw_data
ct += 1
if limit and ct == limit:
return
def _load_saved_boosts(self):
boosts = {}
for combined_id, score in self._boosts:
obj_id, obj_type = combined_id.split(encode('\x01'), 1)
score = float(score)
if obj_id and obj_type:
boosts[combined_id] = score
elif obj_id:
boosts[obj_id] = score
elif obj_type:
boosts[obj_type] = score
return boosts
def search(self, phrase, limit=None, boosts=None, chunk_size=1000):
"""
Perform a search for the given phrase. Objects whose title
matches the search will be returned. The values returned
will be whatever you specified as the ``data`` parameter
when you called :py:meth:`~Autocomplete.store`.
:param phrase: One or more words or substrings.
:param int limit: Limit size of the result set.
:param dict boosts: A mapping of object id/object type to
floating point multipliers.
:returns: A list containing the object data for objects
matching the search phrase.
"""
cleaned = self.tokenize_title(phrase, stopwords=False)
# Remove stopwords except for the last token, which may be a partially
# typed string that just happens to match a stopword.
last_token = len(cleaned) - 1
cleaned = [c for i, c in enumerate(cleaned)
if (c not in self._stopwords) or (i == last_token)]
if not cleaned:
return
all_boosts = self._load_saved_boosts()
if PY3 and boosts:
for key in boosts:
all_boosts[encode(key)] = boosts[key]
elif boosts:
all_boosts.update(boosts)
if len(cleaned) == 1 and not all_boosts:
result_key = self.word_key(cleaned[0])
else:
result_key = self.get_cache_key(cleaned, all_boosts)
if result_key not in self.database:
self.database.zinterstore(
result_key,
list(map(self.word_key, cleaned)))
self.database.expire(result_key, self._cache_timeout)
results = self.database.ZSet(result_key)
if all_boosts:
for raw_id, score in results[0:0, True]:
orig_score = score
for identifier in raw_id.split(encode('\x01'), 1):
if identifier and identifier in all_boosts:
score *= 1 / all_boosts[identifier]
if orig_score != score:
results[raw_id] = score
for result in self._load_objects(results, limit, chunk_size):
yield result
def get_cache_key(self, phrases, boosts):
if boosts:
boost_key = '|'.join(sorted(
'%s:%s' % (k, v) for k, v in boosts.items()))
else:
boost_key = ''
phrase_key = '|'.join(phrases)
return '%s:c:%s:%s' % (self.namespace, phrase_key, boost_key)
def list_data(self):
"""
Return all the data stored in the autocomplete index. If the data was
stored as serialized JSON, then it will be de-serialized before being
returned.
:rtype: list
"""
fn = (lambda v: json.loads(decode(v))) if self._use_json else decode
return map(fn, self._data.values())
def list_titles(self):
"""
Return the titles of all objects stored in the autocomplete index.
:rtype: list
"""
return map(decode, self._title_data.values())
def flush(self, batch_size=1000):
"""
Delete all autocomplete indexes and metadata.
"""
keys = self.database.keys(self.namespace + ':*')
for i in range(0, len(keys), batch_size):
self.database.delete(*keys[i:i + batch_size])
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,600
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/streams.py
|
import datetime
import unittest
from walrus.streams import TimeSeries
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
from walrus.tests.base import stream_test
class TestTimeSeries(WalrusTestCase):
def setUp(self):
super(TestTimeSeries, self).setUp()
for key in ('sa', 'sb', 'sc'):
db.delete('key')
self.ts = TimeSeries(db, 'cgabc', {'sa': '0', 'sb': '0', 'sc': '0'},
consumer='ts1')
def _create_test_data(self):
start = datetime.datetime(2018, 1, 1)
id_list = []
keys = ('sa', 'sb', 'sc')
for i in range(0, 10):
tskey = getattr(self.ts, keys[i % 3])
ts = start + datetime.timedelta(days=i)
id_list.append(tskey.add({'k': '%s-%s' % (keys[i % 3], i)}, id=ts))
return id_list
def assertMessages(self, results, expected_ids):
rdata = [(r.stream, r.timestamp, r.data) for r in results]
streams = ('sa', 'sb', 'sc')
edata = [(streams[i % 3], datetime.datetime(2018, 1, i + 1),
{'k': '%s-%s' % (streams[i % 3], i)}) for i in expected_ids]
self.assertEqual(rdata, edata)
@stream_test
def test_timeseries_ranges(self):
docids = self._create_test_data()
self.ts.create()
self.assertMessages(self.ts.sa.range(), [0, 3, 6, 9])
self.assertMessages(self.ts.sb.range(), [1, 4, 7])
self.assertMessages(self.ts.sc.range(), [2, 5, 8])
self.assertMessages(self.ts.sc.range(count=2), [2, 5])
self.assertMessages(self.ts.sa[:docids[4]], [0, 3])
self.assertMessages(self.ts.sb[:docids[4]], [1, 4])
self.assertMessages(self.ts.sa[docids[4]:], [6, 9])
self.assertMessages(self.ts.sb[docids[4]:], [4, 7])
self.assertMessages([self.ts.sa.get(docids[6])], [6])
self.assertMessages([self.ts.sa.get(docids[9])], [9])
self.assertMessages([self.ts.sc.get(docids[5])], [5])
self.assertTrue(self.ts.sa.get(docids[5]) is None)
self.assertTrue(self.ts.sb.get(docids[5]) is None)
# Trim sa down to 2 items.
self.assertEqual(self.ts.sa.trim(2, False), 2)
self.assertMessages(self.ts.sa.range(), [6, 9])
self.assertMessages(self.ts.sa.range(count=1), [6])
self.assertMessages(self.ts.streams['sa'].range(count=1), [6])
@stream_test
def test_timeseries_read(self):
self._create_test_data()
self.ts.create()
self.assertMessages(self.ts.read(count=1), [0, 1, 2])
self.assertMessages(self.ts.read(count=1), [3, 4, 5])
self.assertMessages(self.ts.read(count=2), [6, 7, 8, 9])
self.assertEqual(self.ts.read(), [])
# Trim the 0-th item off of sa and reset all streams.
self.ts.sa.trim(3, False)
self.ts.reset()
self.assertMessages(self.ts.read(), list(range(1, 10)))
# Trim the first two items from sc (2 and 5), then set the date so
# we've read the first item from each queue. Next read will be 4.
self.ts.sc.trim(1, False)
self.ts.sb.delete(datetime.datetime(2018, 1, 8)) # Delete item 7.
self.ts.set_id(datetime.datetime(2018, 1, 4))
self.assertMessages(self.ts.read(), [4, 6, 8, 9])
@stream_test
def test_adding(self):
self._create_test_data()
self.ts.create()
resp = self.ts.set_id()
self.assertEqual(resp, {'sa': True, 'sb': True, 'sc': True})
# We can add another record to the max ts if we increment the seq.
r = self.ts.sa.add({'k': 'sa-10'}, (datetime.datetime(2018, 1, 10), 1))
self.assertEqual(r, (datetime.datetime(2018, 1, 10), 1))
r = self.ts.sb.add({'k': 'sb-11'}, (datetime.datetime(2018, 1, 10), 2))
self.assertEqual(r, (datetime.datetime(2018, 1, 10), 2))
self.assertEqual(len(self.ts.sa), 5)
self.assertEqual(len(self.ts.sb), 4)
self.assertEqual(len(self.ts.sc), 3)
# Read the newly-added records.
r10, r11 = self.ts.read()
self.assertEqual(r10.timestamp, datetime.datetime(2018, 1, 10))
self.assertEqual(r10.sequence, 1)
self.assertEqual(r10.stream, 'sa')
self.assertEqual(r10.data, {'k': 'sa-10'})
self.assertEqual(r11.timestamp, datetime.datetime(2018, 1, 10))
self.assertEqual(r11.sequence, 2)
self.assertEqual(r11.stream, 'sb')
self.assertEqual(r11.data, {'k': 'sb-11'})
@stream_test
def test_timeseries_stream_read(self):
self._create_test_data()
self.ts.create()
# Read two from sa, one from sc, then read 2x from all. Messages that
# were read will not be re-read.
self.assertMessages(self.ts.sa.read(count=2), [0, 3])
self.assertMessages(self.ts.sc.read(count=1), [2])
self.assertMessages(self.ts.read(count=2), [1, 4, 5, 6, 8, 9])
self.assertMessages(self.ts.read(count=1), [7])
for s in (self.ts.sa, self.ts.sb, self.ts.sc):
self.assertEqual(s.read(), [])
# Re-set the ID of stream b. Other streams are unaffected, so we just
# re-read items from stream b.
self.ts.sb.set_id(datetime.datetime(2018, 1, 4))
self.assertMessages(self.ts.read(), [4, 7])
# Re-set the ID of stream a and trim.
self.ts.sa.set_id('0')
self.ts.sa.trim(2, False)
self.assertMessages(self.ts.read(), [6, 9])
@stream_test
def test_ack_claim_pending(self):
self._create_test_data()
self.ts.create()
ts1 = self.ts
ts2 = ts1.consumer('ts2')
# Read items 0, 1, 3, and 4.
self.assertMessages(ts1.sa.read(1), [0])
self.assertMessages(ts2.sb.read(2), [1, 4])
self.assertMessages(ts2.sa.read(1), [3])
def assertPending(resp, expected):
clean = [(r[0][0], r[1], r[3]) for r in resp]
self.assertEqual(clean, expected)
# Check pending status. sa was read by ts1 first, then ts2.
assertPending(ts1.sa.pending(), [
(datetime.datetime(2018, 1, 1), 'ts1', 1),
(datetime.datetime(2018, 1, 4), 'ts2', 1)])
assertPending(ts1.sa.pending(consumer='ts1'), [
(datetime.datetime(2018, 1, 1), 'ts1', 1)])
assertPending(ts1.sa.pending(consumer='ts2'), [
(datetime.datetime(2018, 1, 4), 'ts2', 1)])
# sb was read by ts2 only.
assertPending(ts1.sb.pending(), [
(datetime.datetime(2018, 1, 2), 'ts2', 1),
(datetime.datetime(2018, 1, 5), 'ts2', 1)])
# Acknowledge receipt. Although we read the Jan 4th item from "sa"
# using ts2, we can still acknowledge it from ts1.
self.assertEqual(ts1.sa.ack(datetime.datetime(2018, 1, 4)), 1)
self.assertEqual(ts2.sb.ack(datetime.datetime(2018, 1, 2),
datetime.datetime(2018, 1, 5)), 2)
# Verify pending removed.
assertPending(ts1.sa.pending(), [
(datetime.datetime(2018, 1, 1), 'ts1', 1)])
assertPending(ts2.sb.pending(), [])
# Claim the first message for consumer ts2.
resp = ts2.sa.claim(datetime.datetime(2018, 1, 1))
self.assertMessages(resp, [0])
# Pending is now marked for ts2, ack'd, and removed.
assertPending(ts1.sa.pending(), [
(datetime.datetime(2018, 1, 1), 'ts2', 2)])
self.assertEqual(ts2.sa.ack(datetime.datetime(2018, 1, 1)), 1)
assertPending(ts2.sa.pending(), [])
# Read the rest from consumer ts2 and verify pending.
self.assertMessages(ts2.read(), [2, 5, 6, 7, 8, 9])
assertPending(ts2.sa.pending(), [
(datetime.datetime(2018, 1, 7), 'ts2', 1),
(datetime.datetime(2018, 1, 10), 'ts2', 1)])
assertPending(ts2.sb.pending(), [
(datetime.datetime(2018, 1, 8), 'ts2', 1)])
assertPending(ts2.sc.pending(), [
(datetime.datetime(2018, 1, 3), 'ts2', 1),
(datetime.datetime(2018, 1, 6), 'ts2', 1),
(datetime.datetime(2018, 1, 9), 'ts2', 1)])
# Claim the records in sc for ts1.
resp = ts1.sc.claim(
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 6),
datetime.datetime(2018, 1, 9))
self.assertMessages(resp, [2, 5, 8])
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,601
|
coleifer/walrus
|
refs/heads/master
|
/walrus/fts.py
|
from walrus.query import Executor
from walrus.query import OP_MATCH
from walrus.query import parse
from walrus.utils import decode
from walrus.utils import decode_dict
from walrus.search import Tokenizer
class Index(object):
"""
Full-text search index.
Store documents, along with arbitrary metadata, and perform full-text
search on the document content. Supports porter-stemming, stopword
filtering, basic result ranking, and (optionally) double-metaphone for
phonetic search.
"""
def __init__(self, db, name, **tokenizer_settings):
"""
:param Database db: a walrus database object.
:param str name: name for the search index.
:param bool stemmer: use porter stemmer (default True).
:param bool metaphone: use double metaphone (default False).
:param str stopwords_file: defaults to walrus stopwords.txt.
:param int min_word_length: specify minimum word length.
Create a search index for storing and searching documents.
"""
self.db = db
self.name = name
self.tokenizer = Tokenizer(**tokenizer_settings)
self.members = self.db.Set('fts.%s' % self.name)
def get_key(self, word):
return self.db.ZSet('fts.%s.%s' % (self.name, word))
def _get_hash(self, document_id):
return self.db.Hash('doc.%s.%s' % (self.name, decode(document_id)))
def get_document(self, document_id):
"""
:param document_id: Document unique identifier.
:returns: a dictionary containing the document content and
any associated metadata.
"""
key = 'doc.%s.%s' % (self.name, decode(document_id))
return decode_dict(self.db.hgetall(key))
def add(self, key, content, __metadata=None, **metadata):
"""
:param key: Document unique identifier.
:param str content: Content to store and index for search.
:param metadata: Arbitrary key/value pairs to store for document.
Add a document to the search index.
"""
if __metadata is None:
__metadata = metadata
elif metadata:
__metadata.update(metadata)
if not isinstance(key, str):
key = str(key)
self.members.add(key)
document_hash = self._get_hash(key)
document_hash.update(__metadata, content=content)
for word, score in self.tokenizer.tokenize(content).items():
word_key = self.get_key(word)
word_key[key] = -score
def remove(self, key, preserve_data=False):
"""
:param key: Document unique identifier.
Remove the document from the search index.
"""
if not isinstance(key, str):
key = str(key)
if self.members.remove(key) != 1:
raise KeyError('Document with key "%s" not found.' % key)
document_hash = self._get_hash(key)
content = decode(document_hash['content'])
if not preserve_data:
document_hash.clear()
for word in self.tokenizer.tokenize(content):
word_key = self.get_key(word)
del word_key[key]
if len(word_key) == 0:
word_key.clear()
def update(self, key, content, __metadata=None, **metadata):
"""
:param key: Document unique identifier.
:param str content: Content to store and index for search.
:param metadata: Arbitrary key/value pairs to store for document.
Update the given document. Existing metadata will be preserved and,
optionally, updated with the provided metadata.
"""
self.remove(key, preserve_data=True)
self.add(key, content, __metadata, **metadata)
def replace(self, key, content, __metadata=None, **metadata):
"""
:param key: Document unique identifier.
:param str content: Content to store and index for search.
:param metadata: Arbitrary key/value pairs to store for document.
Update the given document. Existing metadata will not be removed and
replaced with the provided metadata.
"""
self.remove(key)
self.add(key, content, __metadata, **metadata)
def get_index(self, op):
assert op == OP_MATCH
return self
def db_value(self, value):
return value
def _search(self, query):
expression = parse(query, self)
if expression is None:
return [(member, 0) for member in self.members]
executor = Executor(self.db)
return executor.execute(expression)
def search(self, query):
"""
:param str query: Search query. May contain boolean/set operations
and parentheses.
:returns: a list of document hashes corresponding to matching
documents.
Search the index. The return value is a list of dictionaries
corresponding to the documents that matched. These dictionaries contain
a ``content`` key with the original indexed content, along with any
additional metadata that was specified.
"""
return [self.get_document(key) for key, _ in self._search(query)]
def search_items(self, query):
"""
:param str query: Search query. May contain boolean/set operations
and parentheses.
:returns: a list of (key, document hashes) tuples corresponding to
matching documents.
Search the index. The return value is a list of (key, document dict)
corresponding to the documents that matched. These dictionaries contain
a ``content`` key with the original indexed content, along with any
additional metadata that was specified.
"""
return [(decode(key), self.get_document(key))
for key, _ in self._search(query)]
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,602
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/graph.py
|
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
class TestGraph(WalrusTestCase):
def setUp(self):
super(TestGraph, self).setUp()
# Limit to 5 events per second.
self.g = db.graph('test-graph')
def create_graph_data(self):
data = (
('charlie', 'likes', 'huey'),
('charlie', 'likes', 'mickey'),
('charlie', 'likes', 'zaizee'),
('charlie', 'is', 'human'),
('connor', 'likes', 'huey'),
('connor', 'likes', 'mickey'),
('huey', 'eats', 'catfood'),
('huey', 'is', 'cat'),
('mickey', 'eats', 'anything'),
('mickey', 'is', 'dog'),
('zaizee', 'eats', 'catfood'),
('zaizee', 'is', 'cat'),
)
self.g.store_many(data)
def create_friends(self):
data = (
('charlie', 'friend', 'huey'),
('huey', 'friend', 'charlie'),
('huey', 'friend', 'mickey'),
('zaizee', 'friend', 'charlie'),
('zaizee', 'friend', 'mickey'),
('mickey', 'friend', 'nuggie'),
)
for item in data:
self.g.store(*item)
def test_search_extended(self):
self.create_graph_data()
X = self.g.v.x
Y = self.g.v.y
Z = self.g.v.z
result = self.g.search(
(X, 'likes', Y),
(Y, 'is', 'cat'),
(Z, 'likes', Y))
self.assertEqual(result['x'], set(['charlie', 'connor']))
self.assertEqual(result['y'], set(['huey', 'zaizee']))
self.assertEqual(result['z'], set(['charlie', 'connor']))
self.g.store_many((
('charlie', 'likes', 'connor'),
('connor', 'likes', 'charlie'),
('connor', 'is', 'baby'),
('connor', 'is', 'human'),
('nash', 'is', 'baby'),
('nash', 'is', 'human'),
('connor', 'lives', 'ks'),
('nash', 'lives', 'nv'),
('charlie', 'lives', 'ks')))
result = self.g.search(
('charlie', 'likes', X),
(X, 'is', 'baby'),
(X, 'lives', 'ks'))
self.assertEqual(result, {'x': set(['connor'])})
result = self.g.search(
(X, 'is', 'baby'),
(X, 'likes', Y),
(Y, 'lives', 'ks'))
self.assertEqual(result, {
'x': set(['connor']),
'y': set(['charlie']),
})
def assertTriples(self, result, expected):
result = list(result)
self.assertEqual(len(result), len(expected))
for i1, i2 in zip(result, expected):
self.assertEqual(
(i1['s'], i1['p'], i1['o']), i2)
def test_query(self):
self.create_graph_data()
res = self.g.query()
self.assertTriples(res, (
('charlie', 'is', 'human'),
('charlie', 'likes', 'huey'),
('charlie', 'likes', 'mickey'),
('charlie', 'likes', 'zaizee'),
('connor', 'likes', 'huey'),
('connor', 'likes', 'mickey'),
('huey', 'eats', 'catfood'),
('huey', 'is', 'cat'),
('mickey', 'eats', 'anything'),
('mickey', 'is', 'dog'),
('zaizee', 'eats', 'catfood'),
('zaizee', 'is', 'cat'),
))
res = self.g.query('charlie', 'likes')
self.assertTriples(res, (
('charlie', 'likes', 'huey'),
('charlie', 'likes', 'mickey'),
('charlie', 'likes', 'zaizee'),
))
res = self.g.query(p='is', o='cat')
self.assertTriples(res, (
('huey', 'is', 'cat'),
('zaizee', 'is', 'cat'),
))
res = self.g.query(s='huey')
self.assertTriples(res, (
('huey', 'eats', 'catfood'),
('huey', 'is', 'cat'),
))
res = self.g.query(o='huey')
self.assertTriples(res, (
('charlie', 'likes', 'huey'),
('connor', 'likes', 'huey'),
))
res = self.g.query(s='does-not-exist')
self.assertTriples(res, [])
res = self.g.query(s='huey', p='is', o='x')
self.assertTriples(res, [])
def test_search(self):
self.create_graph_data()
X = self.g.v('x')
result = self.g.search(
{'s': 'charlie', 'p': 'likes', 'o': X},
{'s': X, 'p': 'eats', 'o': 'catfood'},
{'s': X, 'p': 'is', 'o': 'cat'})
self.assertEqual(result, {'x': set(['huey', 'zaizee'])})
def test_search_simple(self):
self.create_friends()
X = self.g.v('x')
result = self.g.search({'s': X, 'p': 'friend', 'o': 'charlie'})
self.assertEqual(result, {'x': set(['huey', 'zaizee'])})
def test_search_2var(self):
self.create_friends()
X = self.g.v('x')
Y = self.g.v('y')
result = self.g.search(
{'s': X, 'p': 'friend', 'o': 'charlie'},
{'s': Y, 'p': 'friend', 'o': X})
self.assertEqual(result, {
'x': set(['huey']),
'y': set(['charlie']),
})
result = self.g.search(
('charlie', 'friend', X),
(X, 'friend', Y),
(Y, 'friend', 'nuggie'))
self.assertEqual(result, {
'x': set(['huey']),
'y': set(['mickey']),
})
result = self.g.search(
('huey', 'friend', X),
(X, 'friend', Y))
self.assertEqual(result['y'], set(['huey', 'nuggie']))
def test_search_mutual(self):
self.create_friends()
X = self.g.v('x')
Y = self.g.v('y')
result = self.g.search(
{'s': X, 'p': 'friend', 'o': Y},
{'s': Y, 'p': 'friend', 'o': X})
self.assertEqual(result['y'], set(['charlie', 'huey']))
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,603
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/autocomplete.py
|
import random
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
class TestAutocomplete(WalrusTestCase):
test_data = (
(1, 'testing python'),
(2, 'testing python code'),
(3, 'web testing python code'),
(4, 'unit tests with python'))
def setUp(self):
super(TestAutocomplete, self).setUp()
self.ac = db.autocomplete()
def store_test_data(self, id_to_store=None):
for obj_id, title in self.test_data:
if id_to_store is None or obj_id == id_to_store:
self.ac.store(obj_id, title, {
'obj_id': obj_id,
'title': title,
'value': obj_id % 2 == 0 and 'even' or 'odd'})
def sort_results(self, results):
return sorted(results, key=lambda item: item['obj_id'])
def assertResults(self, results, expected):
self.assertEqual([result['obj_id'] for result in results], expected)
def test_search(self):
self.store_test_data()
results = self.ac.search('testing python')
self.assertList(results, [
{'obj_id': 1, 'title': 'testing python', 'value': 'odd'},
{'obj_id': 2, 'title': 'testing python code', 'value': 'even'},
{'obj_id': 3, 'title': 'web testing python code', 'value': 'odd'},
])
results = self.ac.search('test')
self.assertResults(results, [1, 2, 4, 3])
results = self.ac.search('uni')
self.assertResults(results, [4])
self.assertList(self.ac.search(''), [])
self.assertList(self.ac.search('missing'), [])
self.assertList(self.ac.search('with'), [])
def test_boosting(self):
letters = ('alpha', 'beta', 'gamma', 'delta')
n = len(letters)
test_data = []
for i in range(n * 3):
obj_id = i + 1
obj_type = 't%d' % ((i / n) + 1)
title = 'test %s' % letters[i % n]
self.ac.store(
obj_id,
title,
{'obj_id': obj_id, 'title': title},
obj_type)
def assertBoosts(query, boosts, expected):
results = self.ac.search(query, boosts=boosts)
self.assertResults(results, expected)
assertBoosts('alp', None, [1, 5, 9])
assertBoosts('alp', {'t2': 1.1}, [5, 1, 9])
assertBoosts('test', {'t3': 1.5, 't2': 1.1}, [
9, 10, 12, 11, 5, 6, 8, 7, 1, 2, 4, 3])
assertBoosts('alp', {'t1': 0.5}, [5, 9, 1])
assertBoosts('alp', {'t1': 1.5, 't3': 1.6}, [9, 1, 5])
assertBoosts('alp', {'t3': 1.5, '5': 1.6}, [5, 9, 1])
def test_stored_boosts(self):
id_to_type = {
'aaa': 1,
'aab': 2,
'aac': 3,
'aaab': 4,
'bbbb': 4}
for obj_id, obj_type in id_to_type.items():
self.ac.store(obj_id, obj_type=obj_type)
self.assertList(self.ac.search('aa'), ['aaa', 'aaab', 'aab', 'aac'])
self.ac.boost_object(obj_type=2, multiplier=2)
self.assertList(self.ac.search('aa'), ['aab', 'aaa', 'aaab', 'aac'])
self.ac.boost_object('aac', multiplier=3)
self.assertList(self.ac.search('aa'), ['aac', 'aab', 'aaa', 'aaab'])
results = self.ac.search('aa', boosts={'aac': 1.5})
self.assertList(results, ['aab', 'aac', 'aaa', 'aaab'])
def test_limit(self):
self.store_test_data()
results = self.ac.search('testing', limit=1)
self.assertResults(results, [1])
results = self.ac.search('testing', limit=2)
self.assertResults(results, [1, 2])
def test_search_empty(self):
self.assertList(self.ac.search(''), [])
def test_chunked(self):
for i in range(25):
self.ac.store('foo %s' % (chr(i + ord('a')) * 2))
ge = self.ac.search('foo', limit=21, chunk_size=5)
results = list(ge)
self.assertEqual(len(results), 21)
self.assertEqual(results[0], 'foo aa')
self.assertEqual(results[-1], 'foo uu')
def test_scoring_proximity_to_front(self):
self.ac.store('aa bb cc')
self.ac.store('tt cc')
self.assertList(self.ac.search('cc'), ['tt cc', 'aa bb cc'])
self.ac.store('aa b cc')
self.assertList(self.ac.search('cc'), ['tt cc', 'aa b cc', 'aa bb cc'])
def test_simple(self):
for _, title in self.test_data:
self.ac.store(title)
self.assertList(self.ac.search('testing'), [
'testing python',
'testing python code',
'web testing python code'])
self.assertList(self.ac.search('code'), [
'testing python code',
'web testing python code'])
self.ac.store('z python code')
self.assertList(self.ac.search('cod'), [
'testing python code',
'z python code',
'web testing python code'])
def test_sorting(self):
strings = []
for i in range(26):
strings.append('aaaa%s' % chr(i + ord('a')))
if i > 0:
strings.append('aaa%sa' % chr(i + ord('a')))
random.shuffle(strings)
for s in strings:
self.ac.store(s)
self.assertList(self.ac.search('aaa'), sorted(strings))
self.assertList(self.ac.search('aaa', limit=30), sorted(strings)[:30])
def test_removing_objects(self):
self.store_test_data()
self.ac.remove(1)
self.assertResults(self.ac.search('testing'), [2, 3])
# Restore item 1 and remove item 2.
self.store_test_data(1)
self.ac.remove(2)
self.assertResults(self.ac.search('testing'), [1, 3])
# Item with obj_id=2 has already been removed.
with self.assertRaises(KeyError):
self.ac.remove(2)
def test_tokenize_title(self):
self.assertEqual(
self.ac.tokenize_title('abc def ghi'),
['abc', 'def', 'ghi'])
# Stop-words are removed automatically.
self.assertEqual(self.ac.tokenize_title('a A tHe an a'), [])
# Empty string yields an empty list.
self.assertEqual(self.ac.tokenize_title(''), [])
# Stop-words, punctuation, capitalization, etc.
self.assertEqual(self.ac.tokenize_title(
'The Best of times, the blurst of times'),
['times', 'blurst', 'times'])
def test_exists(self):
self.assertFalse(self.ac.exists('test'))
self.ac.store('test')
self.assertTrue(self.ac.exists('test'))
def test_key_leaks(self):
initial_key_count = len(db.keys())
# Store a single item.
self.store_test_data(1)
# See how many keys we have in the db - check again in a bit.
key_len = len(db.keys())
# Store a second item.
self.store_test_data(2)
key_len2 = len(db.keys())
self.assertTrue(key_len != key_len2)
self.ac.remove(2)
# Back to the original amount of keys we had after one item.
self.assertEqual(len(db.keys()), key_len)
# Remove the first item, back to original count at start.
self.ac.remove(1)
self.assertEqual(len(db.keys()), initial_key_count)
def test_updating(self):
# store(obj_id, title=None, data=None, obj_type=None).
self.ac.store('id1', 'title baze', 'd1', 't1')
self.ac.store('id2', 'title nugget', 'd2', 't2')
self.ac.store('id3', 'title foo', 'd3', 't3')
self.assertList(self.ac.search('tit'), ['d1', 'd3', 'd2'])
# Overwrite the data for id1.
self.ac.store('id1', 'title foo', 'D1', 't1')
self.assertList(self.ac.search('tit'), ['D1', 'd3', 'd2'])
# Overwrite the data with a new title, will remove the title one refs.
self.ac.store('id1', 'Herple', 'done', 't1')
self.assertList(self.ac.search('tit'), ['d3', 'd2'])
self.assertList(self.ac.search('herp'), ['done'])
# Overwrite again, capitalizing the data and changing the title.
self.ac.store('id1', 'title baze', 'Done', 't1')
self.assertList(self.ac.search('tit'), ['Done', 'd3', 'd2'])
# Verify that we clean up any crap when updating.
self.assertList(self.ac.search('herp'), [])
def test_word_position_ordering(self):
self.ac.store('aaaa bbbb')
self.ac.store('bbbb cccc')
self.ac.store('bbbb aaaa')
self.ac.store('aaaa bbbb')
results = self.ac.search('bb')
self.assertList(results, ['bbbb aaaa', 'bbbb cccc', 'aaaa bbbb'])
self.assertList(self.ac.search('aa'), ['aaaa bbbb', 'bbbb aaaa'])
self.ac.store('aabb bbbb')
self.assertList(self.ac.search('bb'), [
'bbbb aaaa',
'bbbb cccc',
'aaaa bbbb',
'aabb bbbb'])
self.assertList(self.ac.search('aa'), [
'aaaa bbbb',
'aabb bbbb',
'bbbb aaaa'])
# Verify issue 9 is fixed.
self.ac.store('foo one')
self.ac.store('bar foo one')
self.assertList(self.ac.search('foo'), ['foo one', 'bar foo one'])
def test_return_all_results(self):
phrases = ('aa bb', 'aa cc', 'bb aa cc', 'bb cc', 'cc aa bb')
for phrase in phrases:
self.ac.store(phrase)
self.assertList(sorted(self.ac.list_data()), list(phrases))
self.assertEqual(sorted(self.ac.list_titles()), list(phrases))
def test_multiword_phrases(self):
self.ac.store('p1', 'alpha beta gamma delta')
self.ac.store('p2', 'beta delta zeta')
self.ac.store('p3', 'gamma zeta iota')
self.assertList(self.ac.search('ga del'), ['alpha beta gamma delta'])
self.assertList(self.ac.search('be de'), [
'beta delta zeta',
'alpha beta gamma delta'])
self.assertList(self.ac.search('de be'), [
'beta delta zeta',
'alpha beta gamma delta'])
self.assertList(self.ac.search('bet delt'), [
'beta delta zeta',
'alpha beta gamma delta'])
self.assertList(self.ac.search('delt bet'), [
'beta delta zeta',
'alpha beta gamma delta'])
self.assertList(self.ac.search('delt bet alpha'),
['alpha beta gamma delta'])
def test_multiword_stopword_handling(self):
self.ac.store('p1', 'alpha beta delta')
self.ac.store('p2', 'alpha delta gamma')
self.ac.store('p3', 'beta gamma')
self.assertList(self.ac.search('a'), [
'alpha beta delta',
'alpha delta gamma'])
self.assertList(self.ac.search('be'), [
'beta gamma',
'alpha beta delta'])
# Here since "a" is a stopword and is not the last token, we strip it.
self.assertList(self.ac.search('a be'), [
'beta gamma',
'alpha beta delta'])
# a & be are stripped, since they are stopwords and not last token.
self.assertList(self.ac.search('a be de'), [
'alpha delta gamma',
'alpha beta delta'])
self.assertList(self.ac.search('al bet de'), [
'alpha beta delta'])
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,604
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tusks/ledisdb.py
|
import sys
import unittest
from ledis import Ledis
from ledis.client import Token
from walrus import *
from walrus.containers import chainable_method
from walrus.tusks.helpers import TestHelper
class Scannable(object):
def _scan(self, cmd, match=None, count=None, ordering=None, limit=None):
parts = [self.key, '']
if match:
parts.extend([Token('MATCH'), match])
if count:
parts.extend([Token('COUNT'), count])
if ordering:
parts.append(Token(ordering.upper()))
return self._execute_scan(self.database, cmd, parts, limit)
def _execute_scan(self, database, cmd, parts, limit=None):
idx = 0
while True:
cursor, rows = database.execute_command(cmd, *parts)
for row in rows:
idx += 1
if limit and idx > limit:
cursor = None
break
yield row
if cursor:
parts[1] = cursor
else:
break
class Sortable(object):
def _sort(self, cmd, pattern=None, limit=None, offset=None,
get_pattern=None, ordering=None, alpha=True, store=None):
parts = [self.key]
def add_kw(kw, param):
if param is not None:
parts.extend([Token(kw), param])
add_kw('BY', pattern)
if limit or offset:
offset = offset or 0
limit = limit or 'Inf'
parts.extend([Token('LIMIT'), offset, limit])
add_kw('GET', get_pattern)
if ordering:
parts.append(Token(ordering))
if alpha:
parts.append(Token('ALPHA'))
add_kw('STORE', store)
return self.database.execute_command(cmd, *parts)
class LedisHash(Scannable, Hash):
@chainable_method
def clear(self):
self.database.hclear(self.key)
@chainable_method
def expire(self, ttl=None):
if ttl is not None:
self.database.hexpire(self.key, ttl)
else:
self.database.hpersist(self.key)
def __iter__(self):
return self._scan('XHSCAN')
def scan(self, match=None, count=None, ordering=None, limit=None):
if limit is not None:
limit *= 2 # Hashes yield 2 values.
return self._scan('XHSCAN', match, count, ordering, limit)
class LedisList(Sortable, List):
@chainable_method
def clear(self):
self.database.lclear(self.key)
def __setitem__(self, idx, value):
raise TypeError('Ledis does not support setting values by index.')
@chainable_method
def expire(self, ttl=None):
if ttl is not None:
self.database.lexpire(self.key, ttl)
else:
self.database.lpersist(self.key)
def sort(self, *args, **kwargs):
return self._sort('XLSORT', *args, **kwargs)
class LedisSet(Scannable, Sortable, Set):
@chainable_method
def clear(self):
self.database.sclear(self.key)
@chainable_method
def expire(self, ttl=None):
if ttl is not None:
self.database.sexpire(self.key, ttl)
else:
self.database.spersist(self.key)
def __iter__(self):
return self._scan('XSSCAN')
def scan(self, match=None, count=None, ordering=None, limit=None):
return self._scan('XSSCAN', match, count, ordering, limit)
def sort(self, *args, **kwargs):
return self._sort('XSSORT', *args, **kwargs)
class LedisZSet(Scannable, Sortable, ZSet):
@chainable_method
def clear(self):
self.database.zclear(self.key)
@chainable_method
def expire(self, ttl=None):
if ttl is not None:
self.database.zexpire(self.key, ttl)
else:
self.database.zpersist(self.key)
def __iter__(self):
return self._scan('XZSCAN')
def scan(self, match=None, count=None, ordering=None, limit=None):
if limit:
limit *= 2
return self._scan('XZSCAN', match, count, ordering, limit)
def sort(self, *args, **kwargs):
return self._sort('XZSORT', *args, **kwargs)
class LedisBitSet(Container):
def clear(self):
self.database.delete(self.key)
def __getitem__(self, idx):
return self.database.execute_command('GETBIT', self.key, idx)
def __setitem__(self, idx, value):
return self.database.execute_command('SETBIT', self.key, idx, value)
def pos(self, bit, start=None, end=None):
pieces = ['BITPOS', self.key, bit]
if start or end:
pieces.append(start or 0)
if end:
pieces.append(end)
return self.database.execute_command(*pieces)
def __iand__(self, other):
self.database.execute_command(
'BITOP',
'AND',
self.key,
self.key,
other.key)
return self
def __ior__(self, other):
self.database.execute_command(
'BITOP',
'OR',
self.key,
self.key,
other.key)
return self
def __ixor__(self, other):
self.database.execute_command(
'BITOP',
'XOR',
self.key,
self.key,
other.key)
return self
def __str__(self):
return self.database[self.key]
__unicode__ = __str__
class WalrusLedis(Ledis, Scannable, Walrus):
def __init__(self, *args, **kwargs):
super(WalrusLedis, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
self.set(key, value)
def setex(self, name, value, time):
return super(WalrusLedis, self).setex(name, time, value)
def zadd(self, key, *args, **kwargs):
if not isinstance(args[0], (int, float)):
reordered = []
for idx in range(0, len(args), 2):
reordered.append(args[idx + 1])
reordered.append(args[idx])
else:
reordered = args
return super(WalrusLedis, self).zadd(key, *reordered, **kwargs)
def hash_exists(self, key):
return self.execute_command('HKEYEXISTS', key)
def __iter__(self):
return self.scan()
def scan(self, *args, **kwargs):
return self._scan('XSCAN', *args, **kwargs)
def _scan(self, cmd, match=None, count=None, ordering=None, limit=None):
parts = ['KV', '']
if match:
parts.extend([Token('MATCH'), match])
if count:
parts.extend([Token('COUNT'), count])
if ordering:
parts.append(Token(ordering.upper()))
return self._execute_scan(self, cmd, parts, limit)
def update(self, values):
return self.mset(values)
def BitSet(self, key):
return LedisBitSet(self, key)
def Hash(self, key):
return LedisHash(self, key)
def List(self, key):
return LedisList(self, key)
def Set(self, key):
return LedisSet(self, key)
def ZSet(self, key):
return LedisZSet(self, key)
class TestWalrusLedis(TestHelper, unittest.TestCase):
def setUp(self):
self.db = WalrusLedis()
self.db.flushall()
def test_scan(self):
values = {
'k1': 'v1',
'k2': 'v2',
'k3': 'v3',
'charlie': 31,
'mickey': 7,
'huey': 5}
self.db.update(values)
results = self.db.scan()
expected = ['charlie', 'huey', 'k1', 'k2', 'k3', 'mickey']
self.assertEqual(list(results), expected)
self.assertEqual([item for item in self.db], expected)
def test_hash_iter(self):
h = self.db.Hash('h_obj')
h.clear()
h.update({'k1': 'v1', 'k2': 'v2', 'k3': 'v3'})
items = [item for item in h]
self.assertEqual(items, ['k1', 'v1', 'k2', 'v2', 'k3', 'v3'])
items = [item for item in h.scan(limit=2)]
self.assertEqual(items, ['k1', 'v1', 'k2', 'v2'])
def test_no_setitem_list(self):
l = self.db.List('l_obj').clear()
l.append('foo')
self.assertRaises(TypeError, lambda: l.__setitem__(0, 'xx'))
def test_set_iter(self):
s = self.db.Set('s_obj').clear()
s.add('charlie', 'huey', 'mickey')
items = [item for item in s]
self.assertEqual(sorted(items), ['charlie', 'huey', 'mickey'])
items = [item for item in s.scan(limit=2, ordering='DESC')]
self.assertEqual(items, ['mickey', 'huey'])
def test_zset_iter(self):
zs = self.db.ZSet('z_obj').clear()
zs.add('zaizee', 3, 'mickey', 6, 'charlie', 31, 'huey', 3, 'nuggie', 0)
items = [item for item in zs]
self.assertEqual(items, [
'charlie', '31',
'huey', '3',
'mickey', '6',
'nuggie', '0',
'zaizee', '3',
])
items = [item for item in zs.scan(limit=3, ordering='DESC')]
self.assertEqual(items, [
'zaizee', '3',
'nuggie', '0',
'mickey', '6',
])
def test_bit_set(self):
b = self.db.BitSet('bitset_obj')
b.clear()
b[0] = 1
b[1] = 1
b[2] = 0
b[3] = 1
self.assertEqual(self.db[b.key], '\xd0')
b[4] = 1
self.assertEqual(self.db[b.key], '\xd8')
self.assertEqual(b[0], 1)
self.assertEqual(b[2], 0)
self.db['b1'] = 'foobar'
self.db['b2'] = 'abcdef'
b = self.db.BitSet('b1')
b2 = self.db.BitSet('b2')
b &= b2
self.assertEqual(self.db[b.key], '`bc`ab')
self.assertEqual(str(b), '`bc`ab')
self.db['b1'] = '\x00\xff\xf0'
self.assertEqual(b.pos(1, 0), 8)
self.assertEqual(b.pos(1, 2), 16)
self.db['b1'] = '\x00\x00\x00'
self.assertEqual(b.pos(1), -1)
def test_sorting(self):
items = ['charlie', 'zaizee', 'mickey', 'huey']
sorted_items = sorted(items)
l = self.db.List('l_obj').clear()
l.extend(items)
results = l.sort()
self.assertEqual(results, sorted_items)
dest = self.db.List('l_dest')
l.sort(ordering='DESC', limit=3, store=dest.key)
results = list(dest)
self.assertEqual(results, ['zaizee', 'mickey', 'huey'])
s = self.db.Set('s_obj').clear()
s.add(*items)
results = s.sort()
self.assertEqual(results, sorted_items)
results = s.sort(ordering='DESC', limit=3)
self.assertEqual(results, ['zaizee', 'mickey', 'huey'])
z = self.db.ZSet('z_obj').clear()
z.add('charlie', 10, 'zaizee', 10, 'mickey', 3, 'huey', 4)
results = z.sort()
self.assertEqual(results, sorted_items)
results = z.sort(ordering='DESC', limit=3)
self.assertEqual(results, ['zaizee', 'mickey', 'huey'])
if __name__ == '__main__':
unittest.main(argv=sys.argv)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,605
|
coleifer/walrus
|
refs/heads/master
|
/walrus/cache.py
|
from functools import wraps
import hashlib
import pickle
import sys
import threading
import time
try:
from Queue import Queue # Python 2
except ImportError:
from queue import Queue # Python 3
from walrus.utils import decode
from walrus.utils import encode
from walrus.utils import PY3
if PY3:
imap = map
else:
from itertools import imap
class Cache(object):
"""
Cache implementation with simple ``get``/``set`` operations,
and a decorator.
"""
def __init__(self, database, name='cache', default_timeout=None,
debug=False):
"""
:param database: :py:class:`Database` instance.
:param name: Namespace for this cache.
:param int default_timeout: Default cache timeout.
:param debug: Disable cache for debugging purposes. Cache will no-op.
"""
self.database = database
self.name = name
self.prefix_len = len(self.name) + 1
self.default_timeout = default_timeout
self.debug = debug
self.metrics = {'hits': 0, 'misses': 0, 'writes': 0}
def make_key(self, s):
return ':'.join((self.name, s))
def unmake_key(self, k):
return k[self.prefix_len:]
def get(self, key, default=None):
"""
Retreive a value from the cache. In the event the value
does not exist, return the ``default``.
"""
key = self.make_key(key)
if self.debug:
return default
try:
value = self.database[key]
except KeyError:
self.metrics['misses'] += 1
return default
else:
self.metrics['hits'] += 1
return pickle.loads(value)
def set(self, key, value, timeout=None):
"""
Cache the given ``value`` in the specified ``key``. If no
timeout is specified, the default timeout will be used.
"""
key = self.make_key(key)
if timeout is None:
timeout = self.default_timeout
if self.debug:
return True
pickled_value = pickle.dumps(value)
self.metrics['writes'] += 1
if timeout:
return self.database.setex(key, int(timeout), pickled_value)
else:
return self.database.set(key, pickled_value)
def delete(self, key):
"""Remove the given key from the cache."""
if self.debug: return 0
return self.database.delete(self.make_key(key))
def get_many(self, keys):
"""
Retrieve multiple values from the cache. Missing keys are not included
in the result dictionary.
:param list keys: list of keys to fetch.
:returns: dictionary mapping keys to cached values.
"""
accum = {}
if self.debug: return accum
prefixed = [self.make_key(key) for key in keys]
for key, value in zip(keys, self.database.mget(prefixed)):
if value is not None:
accum[key] = pickle.loads(value)
self.metrics['hits'] += 1
else:
self.metrics['misses'] += 1
return accum
def set_many(self, __data=None, timeout=None, **kwargs):
"""
Set multiple key/value pairs in one operation.
:param dict __data: provide data as dictionary of key/value pairs.
:param timeout: optional timeout for data.
:param kwargs: alternatively, provide data as keyword arguments.
:returns: True on success.
"""
if self.debug:
return True
timeout = timeout if timeout is not None else self.default_timeout
if __data is not None:
kwargs.update(__data)
accum = {}
for key, value in kwargs.items():
accum[self.make_key(key)] = pickle.dumps(value)
pipeline = self.database.pipeline()
pipeline.mset(accum)
if timeout:
for key in accum:
pipeline.expire(key, timeout)
self.metrics['writes'] += len(accum)
return pipeline.execute()[0]
def delete_many(self, keys):
"""
Delete multiple keys from the cache in one operation.
:param list keys: keys to delete.
:returns: number of keys removed.
"""
if self.debug: return
prefixed = [self.make_key(key) for key in keys]
return self.database.delete(*prefixed)
def keys(self):
"""
Return all keys for cached values.
"""
return imap(decode, self.database.keys(self.make_key('') + '*'))
def flush(self):
"""Remove all cached objects from the database."""
keys = list(self.keys())
if keys:
return self.database.delete(*keys)
def incr(self, key, delta=1):
return self.database.incr(self.make_key(key), delta)
def _key_fn(a, k):
return hashlib.md5(pickle.dumps((a, k))).hexdigest()
def cached(self, key_fn=_key_fn, timeout=None, metrics=False):
"""
Decorator that will transparently cache calls to the
wrapped function. By default, the cache key will be made
up of the arguments passed in (like memoize), but you can
override this by specifying a custom ``key_fn``.
:param key_fn: Function used to generate a key from the
given args and kwargs.
:param timeout: Time to cache return values.
:param metrics: Keep stats on cache utilization and timing.
:returns: Return the result of the decorated function
call with the given args and kwargs.
Usage::
cache = Cache(my_database)
@cache.cached(timeout=60)
def add_numbers(a, b):
return a + b
print add_numbers(3, 4) # Function is called.
print add_numbers(3, 4) # Not called, value is cached.
add_numbers.bust(3, 4) # Clear cache for (3, 4).
print add_numbers(3, 4) # Function is called.
The decorated function also gains a new attribute named
``bust`` which will clear the cache for the given args.
"""
def decorator(fn):
def make_key(args, kwargs):
return '%s:%s' % (fn.__name__, key_fn(args, kwargs))
def bust(*args, **kwargs):
return self.delete(make_key(args, kwargs))
_metrics = {
'hits': 0,
'misses': 0,
'avg_hit_time': 0,
'avg_miss_time': 0}
@wraps(fn)
def inner(*args, **kwargs):
start = time.time()
is_cache_hit = True
key = make_key(args, kwargs)
res = self.get(key, sentinel)
if res is sentinel:
res = fn(*args, **kwargs)
self.set(key, res, timeout)
is_cache_hit = False
if metrics:
dur = time.time() - start
if is_cache_hit:
_metrics['hits'] += 1
_metrics['avg_hit_time'] += (dur / _metrics['hits'])
else:
_metrics['misses'] += 1
_metrics['avg_miss_time'] += (dur / _metrics['misses'])
return res
inner.bust = bust
inner.make_key = make_key
if metrics:
inner.metrics = _metrics
return inner
return decorator
def cached_property(self, key_fn=_key_fn, timeout=None):
"""
Decorator that will transparently cache calls to the wrapped
method. The method will be exposed as a property.
Usage::
cache = Cache(my_database)
class Clock(object):
@cache.cached_property()
def now(self):
return datetime.datetime.now()
clock = Clock()
print clock.now
"""
this = self
class _cached_property(object):
def __init__(self, fn):
self._fn = this.cached(key_fn, timeout)(fn)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self._fn(instance)
def __delete__(self, obj):
self._fn.bust(obj)
def __set__(self, instance, value):
raise ValueError('Cannot set value of a cached property.')
def decorator(fn):
return _cached_property(fn)
return decorator
def cache_async(self, key_fn=_key_fn, timeout=3600):
"""
Decorator that will execute the cached function in a separate
thread. The function will immediately return, returning a
callable to the user. This callable can be used to check for
a return value.
For details, see the :ref:`cache-async` section of the docs.
:param key_fn: Function used to generate cache key.
:param int timeout: Cache timeout in seconds.
:returns: A new function which can be called to retrieve the
return value of the decorated function.
"""
def decorator(fn):
wrapped = self.cached(key_fn, timeout)(fn)
@wraps(fn)
def inner(*args, **kwargs):
q = Queue()
def _sub_fn():
q.put(wrapped(*args, **kwargs))
def _get_value(block=True, timeout=None):
if not hasattr(_get_value, '_return_value'):
result = q.get(block=block, timeout=timeout)
_get_value._return_value = result
return _get_value._return_value
thread = threading.Thread(target=_sub_fn)
thread.start()
return _get_value
return inner
return decorator
class sentinel(object):
pass
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,606
|
coleifer/walrus
|
refs/heads/master
|
/walrus/utils.py
|
import os
import re
import sys
PY3 = sys.version_info[0] == 3
if PY3:
unicode_type = str
basestring_type = (str, bytes)
def exception_message(exc):
return exc.args[0]
else:
unicode_type = unicode
basestring_type = basestring
def exception_message(exc):
return exc.message
def encode(s):
return s.encode('utf-8') if isinstance(s, unicode_type) else s
def decode(s):
return s.decode('utf-8') if isinstance(s, bytes) else s
def decode_dict(d):
accum = {}
for key in d:
accum[decode(key)] = decode(d[key])
return accum
def safe_decode_list(l):
return [i.decode('raw_unicode_escape') if isinstance(i, bytes) else i
for i in l]
def decode_dict_keys(d):
accum = {}
for key in d:
accum[decode(key)] = d[key]
return accum
def make_python_attr(s):
if isinstance(s, bytes):
s = decode(s)
s = re.sub('[^\w]+', '_', s)
if not s:
raise ValueError('cannot construct python identifer from "%s"' % s)
if s[0].isdigit():
s = '_' + s
return s.lower()
class memoize(dict):
def __init__(self, fn):
self._fn = fn
def __call__(self, *args):
return self[args]
def __missing__(self, key):
result = self[key] = self._fn(*key)
return result
@memoize
def load_stopwords(stopwords_file):
path, filename = os.path.split(stopwords_file)
if not path:
path = os.path.dirname(__file__)
filename = os.path.join(path, filename)
if not os.path.exists(filename):
return
with open(filename) as fh:
return fh.read()
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,607
|
coleifer/walrus
|
refs/heads/master
|
/examples/stocks.py
|
import urllib2
from walrus import Database
db = Database()
autocomplete = db.autocomplete(namespace='stocks')
def load_data():
url = 'http://media.charlesleifer.com/blog/downloads/misc/NYSE.txt'
contents = urllib2.urlopen(url).read()
for row in contents.splitlines()[1:]:
ticker, company = row.split('\t')
autocomplete.store(
ticker,
company,
{'ticker': ticker, 'company': company})
def search(p, **kwargs):
return autocomplete.search(p, **kwargs)
if __name__ == '__main__':
autocomplete.flush()
print 'Loading data (may take a few seconds...)'
load_data()
print 'Search stock data by typing a partial phrase.'
print 'Examples: "uni sta", "micro", "food", "auto"'
print 'Type "q" at any time to quit'
while 1:
cmd = raw_input('? ')
if cmd == 'q':
break
results = search(cmd)
print 'Found %s matches' % len(results)
for result in results:
print '%s: %s' % (result['ticker'], result['company'])
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,608
|
coleifer/walrus
|
refs/heads/master
|
/walrus/query.py
|
import re
from collections import deque
from walrus.containers import Set
from walrus.containers import ZSet
OP_AND = 'and'
OP_OR = 'or'
OP_EQ = '=='
OP_NE = '!='
OP_LT = '<'
OP_LTE = '<='
OP_GT = '>'
OP_GTE = '>='
OP_BETWEEN = 'between'
OP_MATCH = 'match'
ABSOLUTE = set([OP_EQ, OP_NE])
CONTINUOUS = set([OP_LT, OP_LTE, OP_GT, OP_GTE, OP_BETWEEN])
FTS = set([OP_MATCH])
class Lexer(object):
def __init__(self, query, default_conjunction='AND'):
self.query = query
self.default_conjunction = default_conjunction
def yield_symbol(symbol_type):
def callback(scanner, token):
return (symbol_type, token)
return callback
def yield_string(scanner, token):
return ('STRING', token[1:-1].lower())
def yield_simple_string(scanner, token):
return ('STRING', token.lower())
self.scanner = re.Scanner([
(r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"', yield_string),
(r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", yield_string),
(r'\bAND\b', yield_symbol('AND')),
(r'\bOR\b', yield_symbol('OR')),
(r'[@_\-\w]+', yield_simple_string),
(r'&', yield_symbol('AND')),
(r'\|', yield_symbol('OR')),
(r'\(', yield_symbol('LPAREN')),
(r'\)', yield_symbol('RPAREN')),
(r'\s+', None),
], re.U)
def lex(self):
symbols, _ = self.scanner.scan(self.query)
last = None
for (symbol, sval) in symbols:
if symbol == 'STRING' and last == 'STRING':
# Handle default conjunctions.
yield self.default_conjunction, None
yield symbol, sval
last = symbol
class BaseSymbol(object):
"""Base-class for a symbol in the AST."""
__slots__ = []
def code(self):
raise NotImplementedError
class Symbol(BaseSymbol):
"""An interior node of the AST, with left and optionally right children."""
__slots__ = ['left', 'right']
def __init__(self, left, right):
self.left = left
self.right = right
class Leaf(BaseSymbol):
"""Leaf node of the AST, with a value."""
__slots__ = ['value']
def __init__(self, value):
self.value = value
def code(self):
return lambda f, s: Expression(f, OP_MATCH, self.value)
class And(Symbol):
def code(self):
return lambda f, s: Expression(
self.left.code()(f, s),
OP_AND,
self.right.code()(f, s))
class Or(Symbol):
def code(self):
return lambda f, s: Expression(
self.left.code()(f, s),
OP_OR,
self.right.code()(f, s))
class Parser(object):
def __init__(self, lexer):
self.lexer = lexer
self.symbol_stream = lexer.lex()
self.root = None
self.current = None
self.finished = False
def get_symbol(self):
try:
self.current, self.sval = next(self.symbol_stream)
except StopIteration:
self.finished = True
return self.current
def parse(self):
self._expression()
if not self.finished:
raise ValueError('Malformed expression: %s.' % self.lexer.query)
return self.root
def _expression(self):
self._term()
while (self.current == 'OR'):
left = self.root
self._term()
self.root = Or(left, self.root)
def _term(self):
self._factor()
while (self.current == 'AND'):
left = self.root
self._factor()
self.root = And(left, self.root)
def _factor(self):
symbol = self.get_symbol()
if symbol == 'STRING':
self.root = Leaf(self.sval)
self.get_symbol()
elif symbol == 'LPAREN':
self._expression()
self.get_symbol()
else:
raise ValueError('Malformed expression: %s.' % self.lexer.query)
def parse(s, field, default_conjunction='AND'):
if not s.strip():
return None
lexer = Lexer(s, default_conjunction=default_conjunction)
parser = Parser(lexer)
ast = parser.parse()
return ast.code()(field, s)
class Node(object):
def __init__(self):
self._ordering = None
def desc(self):
return Desc(self)
def between(self, low, high):
return Expression(self, OP_BETWEEN, (low, high))
def match(self, term):
return Expression(self, OP_MATCH, term)
def search(self, search_query, default_conjunction=OP_AND):
return parse(search_query, self, default_conjunction)
def _e(op, inv=False):
def inner(self, rhs):
if inv:
return Expression(rhs, op, self)
return Expression(self, op, rhs)
return inner
__and__ = _e(OP_AND)
__or__ = _e(OP_OR)
__rand__ = _e(OP_AND, inv=True)
__ror__ = _e(OP_OR, inv=True)
__eq__ = _e(OP_EQ)
__ne__ = _e(OP_NE)
__lt__ = _e(OP_LT)
__le__ = _e(OP_LTE)
__gt__ = _e(OP_GT)
__ge__ = _e(OP_GTE)
class Desc(Node):
def __init__(self, node):
self.node = node
class Expression(Node):
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.op = op
self.rhs = rhs
def __repr__(self):
return '(%s %s %s)' % (self.lhs, self.op, self.rhs)
class Executor(object):
"""
Given an arbitrarily complex expression, recursively execute
it and return the resulting set (or sorted set). The set will
correspond to the primary hash keys of matching objects.
The executor works *only on fields with secondary indexes* or
the global "all" index created for all models.
"""
def __init__(self, database, temp_key_expire=15):
self.database = database
self.temp_key_expire = temp_key_expire
self._mapping = {
OP_OR: self.execute_or,
OP_AND: self.execute_and,
OP_EQ: self.execute_eq,
OP_NE: self.execute_ne,
OP_GT: self.execute_gt,
OP_GTE: self.execute_gte,
OP_LT: self.execute_lt,
OP_LTE: self.execute_lte,
OP_BETWEEN: self.execute_between,
OP_MATCH: self.execute_match,
}
def execute(self, expression):
op = expression.op
return self._mapping[op](expression.lhs, expression.rhs)
def execute_eq(self, lhs, rhs):
index = lhs.get_index(OP_EQ)
return index.get_key(lhs.db_value(rhs))
def execute_ne(self, lhs, rhs):
all_set = lhs.model_class._query.all_index()
index = lhs.get_index(OP_NE)
exclude_set = index.get_key(lhs.db_value(rhs))
tmp_set = all_set.diffstore(self.database.get_temp_key(), exclude_set)
tmp_set.expire(self.temp_key_expire)
return tmp_set
def _zset_score_filter(self, zset, low, high):
tmp_set = self.database.Set(self.database.get_temp_key())
self.database.run_script(
'zset_score_filter',
keys=[zset.key, tmp_set.key],
args=[low, high])
tmp_set.expire(self.temp_key_expire)
return tmp_set
def execute_between(self, lhs, rhs):
index = lhs.get_index(OP_BETWEEN)
low, high = map(lhs.db_value, rhs)
zset = index.get_key(None) # No value necessary.
return self._zset_score_filter(zset, low, high)
def execute_lte(self, lhs, rhs):
index = lhs.get_index(OP_LTE)
db_value = lhs.db_value(rhs)
zset = index.get_key(db_value)
return self._zset_score_filter(zset, float('-inf'), db_value)
def execute_gte(self, lhs, rhs):
index = lhs.get_index(OP_GTE)
db_value = lhs.db_value(rhs)
zset = index.get_key(db_value)
return self._zset_score_filter(zset, db_value, float('inf'))
def execute_lt(self, lhs, rhs):
index = lhs.get_index(OP_LTE)
db_value = lhs.db_value(rhs)
zset = index.get_key(db_value)
return self._zset_score_filter(zset, float('-inf'), '(%s' % db_value)
def execute_gt(self, lhs, rhs):
index = lhs.get_index(OP_GTE)
db_value = lhs.db_value(rhs)
zset = index.get_key(db_value)
return self._zset_score_filter(zset, '(%s' % db_value, float('inf'))
def execute_match(self, lhs, rhs):
index = lhs.get_index(OP_MATCH)
db_value = lhs.db_value(rhs)
words = index.tokenizer.tokenize(db_value)
index_keys = []
for word in words:
index_keys.append(index.get_key(word).key)
results = self.database.ZSet(self.database.get_temp_key())
if index_keys:
self.database.zinterstore(results.key, index_keys)
results.expire(self.temp_key_expire)
return results
def _combine_sets(self, lhs, rhs, operation):
if not isinstance(lhs, (Set, ZSet)):
lhs = self.execute(lhs)
if not isinstance(rhs, (Set, ZSet)):
rhs = self.execute(rhs)
source, dest = lhs, rhs
if type(lhs) != type(rhs):
# We'll perform the operation using the ZSet, as you can't call
# SINTERSTORE or SUNIONSTORE with a ZSet.
if isinstance(rhs, ZSet):
source, dest = rhs, lhs
if operation == 'AND':
method = source.interstore
elif operation == 'OR':
method = source.unionstore
else:
raise ValueError('Unrecognized operation: "%s".' % operation)
tmp_set = method(self.database.get_temp_key(), dest)
tmp_set.expire(self.temp_key_expire)
return tmp_set
def execute_or(self, lhs, rhs):
return self._combine_sets(lhs, rhs, 'OR')
def execute_and(self, lhs, rhs):
return self._combine_sets(lhs, rhs, 'AND')
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,609
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tusks/helpers.py
|
import datetime
from walrus import *
class TestHelper(object):
def test_simple_string_ops(self):
self.assertTrue(self.db.set('name', 'charlie'))
self.assertEqual(self.db.get('name'), 'charlie')
self.assertIsNone(self.db.get('not-exist'))
self.assertFalse(self.db.setnx('name', 'huey'))
self.db.setnx('friend', 'zaizee')
self.assertEqual(self.db['name'], 'charlie')
self.assertEqual(self.db['friend'], 'zaizee')
self.assertTrue(self.db.mset({'k1': 'v1', 'k2': 'v2'}))
res = self.db.mget('k1', 'k2')
self.assertEqual(res, ['v1', 'v2'])
self.db.append('k1', 'xx')
self.assertEqual(self.db['k1'], 'v1xx')
del self.db['counter']
self.assertEqual(self.db.incr('counter'), 1)
self.assertEqual(self.db.incr('counter', 5), 6)
self.assertEqual(self.db.decr('counter', 2), 4)
self.assertEqual(self.db.getrange('name', 3, 5), 'rli')
self.assertEqual(self.db.getset('k2', 'baze'), 'v2')
self.assertEqual(self.db['k2'], 'baze')
self.assertEqual(self.db.strlen('name'), 7)
self.db['data'] = '\x07'
self.assertEqual(self.db.bitcount('data'), 3)
del self.db['name']
self.assertIsNone(self.db.get('name'))
self.assertRaises(KeyError, lambda: self.db['name'])
self.assertFalse('name' in self.db)
self.assertTrue('k1' in self.db)
def test_simple_hash(self):
h = self.db.Hash('hash_obj')
h.clear()
h['k1'] = 'v1'
h.update({'k2': 'v2', 'k3': 'v3'})
self.assertEqual(h.as_dict(), {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'})
self.assertEqual(h['k2'], 'v2')
self.assertIsNone(h['k4'])
self.assertTrue('k2' in h)
self.assertFalse('k4' in h)
del h['k2']
del h['k4']
self.assertEqual(sorted(h.keys()), ['k1', 'k3'])
self.assertEqual(sorted(h.values()), ['v1', 'v3'])
self.assertEqual(len(h), 2)
self.assertEqual(h['k1', 'k2', 'k3'], ['v1', None, 'v3'])
self.assertEqual(h.incr('counter'), 1)
self.assertEqual(h.incr('counter', 3), 4)
def test_simple_list(self):
l = self.db.List('list_obj')
l.clear()
l.append('charlie')
l.extend(['mickey', 'huey', 'zaizee'])
self.assertEqual(l[1], 'mickey')
self.assertEqual(l[-1], 'zaizee')
self.assertEqual(l[:], ['charlie', 'mickey', 'huey', 'zaizee'])
self.assertEqual(l[1:-1], ['mickey', 'huey'])
self.assertEqual(l[2:], ['huey', 'zaizee'])
self.assertEqual(len(l), 4)
l.prepend('nuggie')
l.popright()
l.popright()
self.assertEqual([item for item in l], ['nuggie', 'charlie', 'mickey'])
self.assertEqual(l.popleft(), 'nuggie')
self.assertEqual(l.popright(), 'mickey')
l.clear()
self.assertEqual(list(l), [])
self.assertIsNone(l.popleft())
def test_simple_set(self):
s = self.db.Set('set_obj')
s.clear()
self.assertTrue(s.add('charlie'))
self.assertFalse(s.add('charlie'))
s.add('huey', 'mickey')
self.assertEqual(len(s), 3)
self.assertTrue('huey' in s)
self.assertFalse('xx' in s)
self.assertEqual(s.members(), set(['charlie', 'huey', 'mickey']))
del s['huey']
del s['xx']
self.assertEqual(s.members(), set(['charlie', 'mickey']))
n1 = self.db.Set('n1')
n2 = self.db.Set('n2')
n1.add(*range(5))
n2.add(*range(3, 7))
self.assertEqual(n1 - n2, set(['0', '1', '2']))
self.assertEqual(n2 - n1, set(['5', '6']))
self.assertEqual(n1 | n2, set(map(str, range(7))))
self.assertEqual(n1 & n2, set(['3', '4']))
n1.diffstore('ndiff', n2)
ndiff = self.db.Set('ndiff')
self.assertEqual(ndiff.members(), set(['0', '1', '2']))
n1.interstore('ninter', n2)
ninter = self.db.Set('ninter')
self.assertEqual(ninter.members(), set(['3', '4']))
def test_zset(self):
zs = self.db.ZSet('zset_obj')
zs.clear()
zs.add('charlie', 31, 'huey', 3, 'mickey', 6, 'zaizee', 3, 'nuggie', 0)
self.assertEqual(zs[1], ['huey'])
self.assertEqual(zs[1, True], [('huey', 3)])
self.assertEqual(
zs[:],
['nuggie', 'huey', 'zaizee', 'mickey', 'charlie'])
self.assertEqual(zs[:2], ['nuggie', 'huey'])
self.assertEqual(zs[1:3, True], [('huey', 3), ('zaizee', 3)])
self.assertEqual(zs['huey':'charlie'], ['huey', 'zaizee', 'mickey'])
self.assertEqual(len(zs), 5)
self.assertTrue('charlie' in zs)
self.assertFalse('xx' in zs)
self.assertEqual(zs.score('charlie'), 31.)
self.assertIsNone(zs.score('xx'))
self.assertEqual(zs.rank('mickey'), 3)
self.assertIsNone(zs.rank('xx'))
self.assertEqual(zs.count(0, 5), 3)
self.assertEqual(zs.count(6, 31), 2)
self.assertEqual(zs.count(6, 30), 1)
zs.incr('mickey')
self.assertEqual(zs.score('mickey'), 7.)
self.assertEqual(zs.range_by_score(0, 5), ['nuggie', 'huey', 'zaizee'])
zs.remove('nuggie')
self.assertEqual(zs[:2], ['huey', 'zaizee'])
del zs['mickey']
self.assertEqual(zs[:], ['huey', 'zaizee', 'charlie'])
self.assertEqual(len(zs), 3)
zs.remove_by_score(2, 4)
self.assertEqual(zs[:], ['charlie'])
zs.add('huey', 4, 'zaizee', 3, 'beanie', 8)
zs.remove_by_rank(2)
self.assertEqual(zs[:], ['zaizee', 'huey', 'charlie'])
self.assertRaises(KeyError, lambda: zs['xx':])
z1 = self.db.ZSet('z1')
z2 = self.db.ZSet('z2')
z1.add(1, 1, 2, 2, 3, 3)
z2.add(3, 3, 4, 4, 5, 5)
z3 = z1.unionstore('z3', z2)
self.assertEqual(z3[:], ['1', '2', '4', '5', '3'])
z3 = z1.interstore('z3', z2)
self.assertEqual(z3[:], ['3'])
def test_models(self):
class User(Model):
__database__ = self.db
username = TextField(primary_key=True)
value = IntegerField(index=True)
for i, username in enumerate(('charlie', 'huey', 'zaizee', 'mickey')):
User.create(username=username, value=i)
charlie = User.load('charlie')
self.assertEqual(charlie.username, 'charlie')
self.assertEqual(charlie.value, 0)
query = User.query(
(User.username == 'charlie') |
(User.username == 'huey'))
users = [user.username for user in query]
self.assertEqual(sorted(users), ['charlie', 'huey'])
def test_cache(self):
cache = self.db.cache(name='test-cache')
@cache.cached(timeout=10)
def now(seed=None):
return datetime.datetime.now()
dt1 = now()
self.assertEqual(now(), dt1)
self.assertNotEqual(now(1), dt1)
self.assertEqual(now(1), now(1))
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,610
|
coleifer/walrus
|
refs/heads/master
|
/setup.py
|
import os
from setuptools import find_packages
from setuptools import setup
cur_dir = os.path.dirname(__file__)
readme = os.path.join(cur_dir, 'README.md')
if os.path.exists(readme):
with open(readme) as fh:
long_description = fh.read()
else:
long_description = ''
setup(
name='walrus',
version=__import__('walrus').__version__,
description='walrus',
long_description=long_description,
author='Charles Leifer',
author_email='coleifer@gmail.com',
url='http://github.com/coleifer/walrus/',
install_requires=['redis>=3.0.0'],
packages=find_packages(),
package_data={
'walrus': [
'scripts/*',
'stopwords.txt',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='walrus.tests',
)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,611
|
coleifer/walrus
|
refs/heads/master
|
/walrus/database.py
|
from functools import wraps
import glob
import os
import sys
import threading
import uuid
try:
from redis import Redis
from redis.exceptions import ConnectionError
from redis.exceptions import TimeoutError
except ImportError:
Redis = object
ConnectionError = TimeoutError = Exception
from walrus.autocomplete import Autocomplete
from walrus.cache import Cache
from walrus.containers import Array
from walrus.containers import BitField
from walrus.containers import BloomFilter
from walrus.containers import ConsumerGroup
from walrus.containers import Hash
from walrus.containers import HyperLogLog
from walrus.containers import List
from walrus.containers import Set
from walrus.containers import Stream
from walrus.containers import ZSet
from walrus.counter import Counter
from walrus.fts import Index
from walrus.graph import Graph
from walrus.lock import Lock
from walrus.rate_limit import RateLimit
from walrus.rate_limit import RateLimitLua
from walrus.streams import TimeSeries
class TransactionLocal(threading.local):
def __init__(self, **kwargs):
super(TransactionLocal, self).__init__(**kwargs)
self.pipes = []
@property
def pipe(self):
if len(self.pipes):
return self.pipes[-1]
def commit(self):
pipe = self.pipes.pop()
return pipe.execute()
def abort(self):
pipe = self.pipes.pop()
pipe.reset()
class Database(Redis):
"""
Redis-py client with some extras.
"""
def __init__(self, *args, **kwargs):
"""
:param args: Arbitrary positional arguments to pass to the
base ``Redis`` instance.
:param kwargs: Arbitrary keyword arguments to pass to the
base ``Redis`` instance.
:param str script_dir: Path to directory containing walrus
scripts. Use "script_dir=False" to disable loading any scripts.
"""
script_dir = kwargs.pop('script_dir', None)
super(Database, self).__init__(*args, **kwargs)
self.__mapping = {
b'list': self.List,
b'set': self.Set,
b'zset': self.ZSet,
b'hash': self.Hash}
self._transaction_local = TransactionLocal()
self._transaction_lock = threading.RLock()
if script_dir is not False:
self.init_scripts(script_dir=script_dir)
def __bool__(self):
return True # Avoid falling back to __len__().
def xsetid(self, name, id):
"""
Set the last ID of the given stream.
:param name: stream identifier
:param id: new value for last ID
"""
return self.execute_command('XSETID', name, id) == b'OK'
def xpending_summary(self, key, group):
"""
Pending message summary report.
:param key: stream identifier
:param group: consumer group name
:returns: dictionary of information about pending messages
"""
return self.xpending(key, group)
def get_transaction(self):
with self._transaction_lock:
local = self._transaction_local
local.pipes.append(self.pipeline())
return local.pipe
def commit_transaction(self):
"""
Commit the currently active transaction (Pipeline). If no
transaction is active in the current thread, an exception
will be raised.
:returns: The return value of executing the Pipeline.
:raises: ``ValueError`` if no transaction is active.
"""
with self._transaction_lock:
local = self._transaction_local
if not local.pipes:
raise ValueError('No transaction is currently active.')
return local.commit()
def clear_transaction(self):
"""
Clear the currently active transaction (if exists). If the
transaction stack is not empty, then a new pipeline will
be initialized.
:returns: No return value.
:raises: ``ValueError`` if no transaction is active.
"""
with self._transaction_lock:
local = self._transaction_local
if not local.pipes:
raise ValueError('No transaction is currently active.')
local.abort()
def atomic(self):
return _Atomic(self)
def init_scripts(self, script_dir=None):
self._scripts = {}
if not script_dir:
script_dir = os.path.join(os.path.dirname(__file__), 'scripts')
for filename in glob.glob(os.path.join(script_dir, '*.lua')):
with open(filename, 'r') as fh:
script_obj = self.register_script(fh.read())
script_name = os.path.splitext(os.path.basename(filename))[0]
self._scripts[script_name] = script_obj
def run_script(self, script_name, keys=None, args=None):
"""
Execute a walrus script with the given arguments.
:param script_name: The base name of the script to execute.
:param list keys: Keys referenced by the script.
:param list args: Arguments passed in to the script.
:returns: Return value of script.
.. note:: Redis scripts require two parameters, ``keys``
and ``args``, which are referenced in lua as ``KEYS``
and ``ARGV``.
"""
return self._scripts[script_name](keys, args)
def get_temp_key(self):
"""
Generate a temporary random key using UUID4.
"""
return 'temp.%s' % uuid.uuid4()
def __iter__(self):
"""
Iterate over the keys of the selected database.
"""
return iter(self.scan_iter())
def __len__(self):
return self.dbsize()
def search(self, pattern):
"""
Search the keyspace of the selected database using the
given search pattern.
:param str pattern: Search pattern using wildcards.
:returns: Iterator that yields matching keys.
"""
return self.scan_iter(pattern)
def get_key(self, key):
"""
Return a rich object for the given key. For instance, if
a hash key is requested, then a :py:class:`Hash` will be
returned.
Note: only works for Hash, List, Set and ZSet.
:param str key: Key to retrieve.
:returns: A hash, set, list, zset or array.
"""
return self.__mapping.get(self.type(key), self.__getitem__)(key)
def hash_exists(self, key):
return self.exists(key)
def autocomplete(self, namespace='autocomplete', **kwargs):
return Autocomplete(self, namespace, **kwargs)
def cache(self, name='cache', default_timeout=3600):
"""
Create a :py:class:`Cache` instance.
:param str name: The name used to prefix keys used to
store cached data.
:param int default_timeout: The default key expiry.
:returns: A :py:class:`Cache` instance.
"""
return Cache(self, name=name, default_timeout=default_timeout)
def counter(self, name):
"""
Create a :py:class:`Counter` instance.
:param str name: The name used to store the counter's value.
:returns: A :py:class:`Counter` instance.
"""
return Counter(self, name=name)
def graph(self, name, *args, **kwargs):
"""
Creates a :py:class:`Graph` instance.
:param str name: The namespace for the graph metadata.
:returns: a :py:class:`Graph` instance.
"""
return Graph(self, name, *args, **kwargs)
def lock(self, name, ttl=None, lock_id=None):
"""
Create a named :py:class:`Lock` instance. The lock implements
an API similar to the standard library's ``threading.Lock``,
and can also be used as a context manager or decorator.
:param str name: The name of the lock.
:param int ttl: The time-to-live for the lock in milliseconds
(optional). If the ttl is ``None`` then the lock will not
expire.
:param str lock_id: Optional identifier for the lock instance.
"""
return Lock(self, name, ttl, lock_id)
def rate_limit(self, name, limit=5, per=60, debug=False):
"""
Rate limit implementation. Allows up to `limit` of events every `per`
seconds.
See :ref:`rate-limit` for more information.
"""
return RateLimit(self, name, limit, per, debug)
def rate_limit_lua(self, name, limit=5, per=60, debug=False):
"""
Rate limit implementation. Allows up to `limit` of events every `per`
seconds. Uses a Lua script for atomicity.
See :ref:`rate-limit` for more information.
"""
return RateLimitLua(self, name, limit, per, debug)
def Index(self, name, **options):
"""
Create a :py:class:`Index` full-text search index with the given
name and options.
"""
return Index(self, name, **options)
def List(self, key):
"""
Create a :py:class:`List` instance wrapping the given key.
"""
return List(self, key)
def Hash(self, key):
"""
Create a :py:class:`Hash` instance wrapping the given key.
"""
return Hash(self, key)
def Set(self, key):
"""
Create a :py:class:`Set` instance wrapping the given key.
"""
return Set(self, key)
def ZSet(self, key):
"""
Create a :py:class:`ZSet` instance wrapping the given key.
"""
return ZSet(self, key)
def HyperLogLog(self, key):
"""
Create a :py:class:`HyperLogLog` instance wrapping the given
key.
"""
return HyperLogLog(self, key)
def Array(self, key):
"""
Create a :py:class:`Array` instance wrapping the given key.
"""
return Array(self, key)
def Stream(self, key):
"""
Create a :py:class:`Stream` instance wrapping the given key.
"""
return Stream(self, key)
def consumer_group(self, group, keys, consumer=None):
"""
Create a named :py:class:`ConsumerGroup` instance for the given key(s).
:param group: name of consumer group
:param keys: stream identifier(s) to monitor. May be a single stream
key, a list of stream keys, or a key-to-minimum id mapping. The
minimum id for each stream should be considered an exclusive
lower-bound. The '$' value can also be used to only read values
added *after* our command started blocking.
:param consumer: name for consumer within group
:returns: a :py:class:`ConsumerGroup` instance
"""
return ConsumerGroup(self, group, keys, consumer=consumer)
def time_series(self, group, keys, consumer=None):
"""
Create a named :py:class:`TimeSeries` consumer-group for the
given key(s). TimeSeries objects are almost identical to
:py:class:`ConsumerGroup` except they offer a higher level of
abstraction and read/write message ids as datetimes.
:param group: name of consumer group
:param keys: stream identifier(s) to monitor. May be a single stream
key, a list of stream keys, or a key-to-minimum id mapping. The
minimum id for each stream should be considered an exclusive
lower-bound. The '$' value can also be used to only read values
added *after* our command started blocking.
:param consumer: name for consumer within group
:returns: a :py:class:`TimeSeries` instance
"""
return TimeSeries(self, group, keys, consumer=consumer)
def bit_field(self, key):
"""
Container for working with the Redis BITFIELD command.
:returns: a :py:class:`BitField` instance.
"""
return BitField(self, key)
def bloom_filter(self, key, size=64 * 1024):
"""
Create a :py:class:`BloomFilter` container type.
Bloom-filters are probabilistic data-structures that are used to answer
the question: "is X a member of set S?" It is possible to receive a
false positive, but impossible to receive a false negative (in other
words, if the bloom filter contains a value, it will never erroneously
report that it does *not* contain such a value). The accuracy of the
bloom-filter and the likelihood of a false positive can be reduced by
increasing the size of the bloomfilter. The default size is 64KB (or
524,288 bits).
"""
return BloomFilter(self, key, size)
def cas(self, key, value, new_value):
"""
Perform an atomic compare-and-set on the value in "key", using a prefix
match on the provided value.
"""
return self.run_script('cas', keys=[key], args=[value, new_value])
def listener(self, channels=None, patterns=None, is_async=False):
"""
Decorator for wrapping functions used to listen for Redis
pub-sub messages.
The listener will listen until the decorated function
raises a ``StopIteration`` exception.
:param list channels: Channels to listen on.
:param list patterns: Patterns to match.
:param bool is_async: Whether to start the listener in a
separate thread.
"""
def decorator(fn):
_channels = channels or []
_patterns = patterns or []
@wraps(fn)
def inner():
pubsub = self.pubsub()
def listen():
for channel in _channels:
pubsub.subscribe(channel)
for pattern in _patterns:
pubsub.psubscribe(pattern)
for data_dict in pubsub.listen():
try:
ret = fn(**data_dict)
except StopIteration:
pubsub.close()
break
if is_async:
worker = threading.Thread(target=listen)
worker.start()
return worker
else:
listen()
return inner
return decorator
def stream_log(self, callback, connection_id='monitor'):
"""
Stream Redis activity one line at a time to the given
callback.
:param callback: A function that accepts a single argument,
the Redis command.
"""
conn = self.connection_pool.get_connection(connection_id, None)
conn.send_command('monitor')
while callback(conn.read_response()):
pass
class _Atomic(object):
def __init__(self, db):
self.db = db
@property
def pipe(self):
return self.db._transaction_local.pipe
def __enter__(self):
self.db.get_transaction()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.clear(False)
else:
self.commit(False)
def commit(self, begin_new=True):
ret = self.db.commit_transaction()
if begin_new:
self.db.get_transaction()
return ret
def clear(self, begin_new=True):
self.db.clear_transaction()
if begin_new:
self.db.get_transaction()
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,612
|
coleifer/walrus
|
refs/heads/master
|
/walrus/graph.py
|
# Hexastore.
import itertools
import json
from walrus.utils import decode
class _VariableGenerator(object):
def __getattr__(self, name):
return Variable(name)
def __call__(self, name):
return Variable(name)
class Graph(object):
"""
Simple hexastore built using Redis ZSets. The basic idea is that we have
a collection of relationships of the form subject-predicate-object. For
example:
* charlie -- friends -- huey
* charlie -- lives -- Kansas
* huey -- lives -- Kansas
We might wish to ask questions of our data-store like "which of charlie's
friends live in Kansas?" To do this we will store every permutation of
the S-P-O triples, then we can do efficient queries using the parts of
the relationship we know:
* query the "object" portion of the "charlie -- friends" subject
and predicate.
* for each object returned, turn it into the subject of a second query
whose predicate is "lives" and whose object is "Kansas"
So we would return the subjects that satisfy the following expression::
("charlie -- friends") -- lives -- Kansas.
To accomplish this in Python we could write:
.. code-block:: python
db = Database()
graph = db.graph('people')
# Store my friends.
graph.store_many(
('charlie', 'friends', 'huey'),
('charlie', 'friends', 'zaizee'),
('charlie', 'friends', 'nuggie'))
# Store where people live.
graph.store_many(
('huey', 'lives', 'Kansas'),
('zaizee', 'lives', 'Missouri'),
('nuggie', 'lives', 'Kansas'),
('mickey', 'lives', 'Kansas'))
# Perform our search. We will use a variable (X) to indicate the
# value we're interested in.
X = graph.v.X # Create a variable placeholder.
# In the first clause we indicate we are searching for my friends.
# In the second clause, we only want those friends who also live in
# Kansas.
results = graph.search(
{'s': 'charlie', 'p': 'friends', 'o': X},
{'s': X, 'p': 'lives', 'o': 'Kansas'})
print results
# Prints: {'X': {'huey', 'nuggie'}}
See: http://redis.io/topics/indexes#representing-and-querying-graphs-using-an-hexastore
"""
def __init__(self, walrus, namespace):
self.walrus = walrus
self.namespace = namespace
self.v = _VariableGenerator()
self._z = self.walrus.ZSet(self.namespace)
def store(self, s, p, o):
"""
Store a subject-predicate-object triple in the database.
"""
with self.walrus.atomic():
for key in self.keys_for_values(s, p, o):
self._z[key] = 0
def store_many(self, items):
"""
Store multiple subject-predicate-object triples in the database.
:param items: A list of (subj, pred, obj) 3-tuples.
"""
with self.walrus.atomic():
for item in items:
self.store(*item)
def delete(self, s, p, o):
"""Remove the given subj-pred-obj triple from the database."""
with self.walrus.atomic():
for key in self.keys_for_values(s, p, o):
del self._z[key]
def keys_for_values(self, s, p, o):
parts = [
('spo', s, p, o),
('pos', p, o, s),
('osp', o, s, p)]
for part in parts:
yield '::'.join(part)
def keys_for_query(self, s=None, p=None, o=None):
parts = []
key = lambda parts: '::'.join(parts)
if s and p and o:
parts.extend(('spo', s, p, o))
return key(parts), None
elif s and p:
parts.extend(('spo', s, p))
elif s and o:
parts.extend(('osp', s, o))
elif p and o:
parts.extend(('pos', p, o))
elif s:
parts.extend(('spo', s))
elif p:
parts.extend(('pos', p))
elif o:
parts.extend(('osp', o))
else:
parts.extend(('spo',))
return key(parts + ['']), key(parts + ['\xff'])
def query(self, s=None, p=None, o=None):
"""
Return all triples that satisfy the given expression. You may specify
all or none of the fields (s, p, and o). For instance, if I wanted
to query for all the people who live in Kansas, I might write:
.. code-block:: python
for triple in graph.query(p='lives', o='Kansas'):
print triple['s'], 'lives in Kansas!'
"""
start, end = self.keys_for_query(s, p, o)
if end is None:
if start in self._z:
yield {'s': s, 'p': p, 'o': o}
else:
for key in self._z.range_by_lex('[' + start, '[' + end):
keys, p1, p2, p3 = decode(key).split('::')
yield dict(zip(keys, (p1, p2, p3)))
def v(self, name):
"""
Create a named variable, used to construct multi-clause queries with
the :py:meth:`Graph.search` method.
"""
return Variable(name)
def search(self, *conditions):
"""
Given a set of conditions, return all values that satisfy the
conditions for a given set of variables.
For example, suppose I wanted to find all of my friends who live in
Kansas:
.. code-block:: python
X = graph.v.X
results = graph.search(
{'s': 'charlie', 'p': 'friends', 'o': X},
{'s': X, 'p': 'lives', 'o': 'Kansas'})
The return value consists of a dictionary keyed by variable, whose
values are ``set`` objects containing the values that satisfy the
query clauses, e.g.:
.. code-block:: python
print results
# Result has one key, for our "X" variable. The value is the set
# of my friends that live in Kansas.
# {'X': {'huey', 'nuggie'}}
# We can assume the following triples exist:
# ('charlie', 'friends', 'huey')
# ('charlie', 'friends', 'nuggie')
# ('huey', 'lives', 'Kansas')
# ('nuggie', 'lives', 'Kansas')
"""
results = {}
for condition in conditions:
if isinstance(condition, tuple):
query = dict(zip('spo', condition))
else:
query = condition.copy()
materialized = {}
targets = []
for part in ('s', 'p', 'o'):
if isinstance(query[part], Variable):
variable = query.pop(part)
materialized[part] = set()
targets.append((variable, part))
# Potentially rather than popping all the variables, we could use
# the result values from a previous condition and do O(results)
# loops looking for a single variable.
for result in self.query(**query):
ok = True
for var, part in targets:
if var in results and result[part] not in results[var]:
ok = False
break
if ok:
for var, part in targets:
materialized[part].add(result[part])
for var, part in targets:
if var in results:
results[var] &= materialized[part]
else:
results[var] = materialized[part]
return dict((var.name, vals) for (var, vals) in results.items())
class Variable(object):
__slots__ = ['name']
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Variable: %s>' % (self.name)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,613
|
coleifer/walrus
|
refs/heads/master
|
/walrus/search/__init__.py
|
import re
from walrus.search.metaphone import dm as double_metaphone
from walrus.search.porter import PorterStemmer
from walrus.utils import decode
from walrus.utils import load_stopwords
class Tokenizer(object):
def __init__(self, stemmer=True, metaphone=False,
stopwords_file='stopwords.txt', min_word_length=None):
self._use_stemmer = stemmer
self._use_metaphone = metaphone
self._min_word_length = min_word_length
self._symbols_re = re.compile(
'[\.,;:"\'\\/!@#\$%\?\*\(\)\-=+\[\]\{\}_]')
self._stopwords = self._load_stopwords(stopwords_file)
def _load_stopwords(self, filename):
stopwords = load_stopwords(filename)
if stopwords:
return set(stopwords.splitlines())
def split_phrase(self, phrase):
"""Split the document or search query into tokens."""
return self._symbols_re.sub(' ', phrase).split()
def stem(self, words):
"""
Use the porter stemmer to generate consistent forms of
words, e.g.::
from walrus.search.utils import PorterStemmer
stemmer = PorterStemmer()
for word in ['faith', 'faiths', 'faithful']:
print s.stem(word, 0, len(word) - 1)
# Prints:
# faith
# faith
# faith
"""
stemmer = PorterStemmer()
_stem = stemmer.stem
for word in words:
yield _stem(word, 0, len(word) - 1)
def metaphone(self, words):
"""
Apply the double metaphone algorithm to the given words.
Using metaphone allows the search index to tolerate
misspellings and small typos.
Example::
>>> from walrus.search.metaphone import dm as metaphone
>>> print metaphone('walrus')
('ALRS', 'FLRS')
>>> print metaphone('python')
('P0N', 'PTN')
>>> print metaphone('pithonn')
('P0N', 'PTN')
"""
for word in words:
r = 0
for w in double_metaphone(word):
if w:
w = w.strip()
if w:
r += 1
yield w
if not r:
yield word
def tokenize(self, value):
"""
Split the incoming value into tokens and process each token,
optionally stemming or running metaphone.
:returns: A ``dict`` mapping token to score. The score is
based on the relative frequency of the word in the
document.
"""
words = self.split_phrase(decode(value).lower())
if self._stopwords:
words = [w for w in words if w not in self._stopwords]
if self._min_word_length:
words = [w for w in words if len(w) >= self._min_word_length]
fraction = 1. / (len(words) + 1) # Prevent division by zero.
# Apply optional transformations.
if self._use_stemmer:
words = self.stem(words)
if self._use_metaphone:
words = self.metaphone(words)
scores = {}
for word in words:
scores.setdefault(word, 0)
scores[word] += fraction
return scores
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,614
|
coleifer/walrus
|
refs/heads/master
|
/walrus/counter.py
|
class Counter(object):
"""
Simple counter.
"""
def __init__(self, database, name):
"""
:param database: A walrus ``Database`` instance.
:param str name: The name for the counter.
"""
self.database = database
self.name = name
self.key = 'counter:%s' % self.name
if self.key not in self.database:
self.database[self.key] = 0
def decr(self, decr_by=1):
return self.database.decr(self.key, decr_by)
def incr(self, incr_by=1):
return self.database.incr(self.key, incr_by)
def value(self):
return int(self.database[self.key])
def _op(self, method, other):
if isinstance(other, Counter):
other = other.value()
if not isinstance(other, int):
raise TypeError('Cannot add %s, not an integer.' % other)
method(other)
return self
def __iadd__(self, other):
return self._op(self.incr, other)
def __isub__(self, other):
return self._op(self.decr, other)
__add__ = __iadd__
__sub__ = __isub__
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,615
|
coleifer/walrus
|
refs/heads/master
|
/examples/work_queue.py
|
"""
Simple multi-process task queue using Redis Streams.
http://charlesleifer.com/blog/multi-process-task-queue-using-redis-streams/
"""
from collections import namedtuple
from functools import wraps
import datetime
import multiprocessing
import pickle
import time
# At the time of writing, the standard redis-py client does not implement
# stream/consumer-group commands. We'll use "walrus", which extends the client
# from redis-py to provide stream support and high-level, Pythonic containers.
# More info: https://github.com/coleifer/walrus
from walrus import Walrus
# Lightweight wrapper for storing exceptions that occurred executing a task.
TaskError = namedtuple('TaskError', ('error',))
class TaskQueue(object):
def __init__(self, client, stream_key='tasks'):
self.client = client # our Redis client.
self.stream_key = stream_key
# We'll also create a consumer group (whose name is derived from the
# stream key). Consumer groups are needed to provide message delivery
# tracking and to ensure that our messages are distributed among the
# worker processes.
self.name = stream_key + '-cg'
self.consumer_group = self.client.consumer_group(self.name, stream_key)
self.result_key = stream_key + '.results' # Store results in a Hash.
# Obtain a reference to the stream within the context of the
# consumer group.
self.stream = getattr(self.consumer_group, stream_key)
self.signal = multiprocessing.Event() # Used to signal shutdown.
self.signal.set() # Indicate the server is not running.
# Create the stream and consumer group (if they do not exist).
self.consumer_group.create()
self._running = False
self._tasks = {} # Lookup table for mapping function name -> impl.
def task(self, fn):
self._tasks[fn.__name__] = fn # Store function in lookup table.
@wraps(fn)
def inner(*args, **kwargs):
# When the decorated function is called, a message is added to the
# stream and a wrapper class is returned, which provides access to
# the task result.
message = self.serialize_message(fn, args, kwargs)
# Our message format is very simple -- just a "task" key and a blob
# of pickled data. You could extend this to provide additional
# data, such as the source of the event, etc, etc.
task_id = self.stream.add({'task': message})
return TaskResultWrapper(self, task_id)
return inner
def deserialize_message(self, message):
task_name, args, kwargs = pickle.loads(message)
if task_name not in self._tasks:
raise Exception('task "%s" not registered with queue.')
return self._tasks[task_name], args, kwargs
def serialize_message(self, task, args=None, kwargs=None):
return pickle.dumps((task.__name__, args, kwargs))
def store_result(self, task_id, result):
# API for storing the return value from a task. This is called by the
# workers after the execution of a task.
if result is not None:
self.client.hset(self.result_key, task_id, pickle.dumps(result))
def get_result(self, task_id):
# Obtain the return value of a finished task. This API is used by the
# TaskResultWrapper class. We'll use a pipeline to ensure that reading
# and popping the result is an atomic operation.
pipe = self.client.pipeline()
pipe.hexists(self.result_key, task_id)
pipe.hget(self.result_key, task_id)
pipe.hdel(self.result_key, task_id)
exists, val, n = pipe.execute()
return pickle.loads(val) if exists else None
def run(self, nworkers=1):
if not self.signal.is_set():
raise Exception('workers are already running')
# Start a pool of worker processes.
self._pool = []
self.signal.clear()
for i in range(nworkers):
worker = TaskWorker(self)
worker_t = multiprocessing.Process(target=worker.run)
worker_t.start()
self._pool.append(worker_t)
def shutdown(self):
if self.signal.is_set():
raise Exception('workers are not running')
# Send the "shutdown" signal and wait for the worker processes
# to exit.
self.signal.set()
for worker_t in self._pool:
worker_t.join()
class TaskWorker(object):
_worker_idx = 0
def __init__(self, queue):
self.queue = queue
self.consumer_group = queue.consumer_group
# Assign each worker processes a unique name.
TaskWorker._worker_idx += 1
worker_name = 'worker-%s' % TaskWorker._worker_idx
self.worker_name = worker_name
def run(self):
while not self.queue.signal.is_set():
# Read up to one message, blocking for up to 1sec, and identifying
# ourselves using our "worker name".
resp = self.consumer_group.read(1, 1000, self.worker_name)
if resp is not None:
# Resp is structured as:
# {stream_key: [(message id, data), ...]}
for stream_key, message_list in resp:
task_id, data = message_list[0]
self.execute(task_id.decode('utf-8'), data[b'task'])
def execute(self, task_id, message):
# Deserialize the task message, which consists of the task name, args
# and kwargs. The task function is then looked-up by name and called
# using the given arguments.
task, args, kwargs = self.queue.deserialize_message(message)
try:
ret = task(*(args or ()), **(kwargs or {}))
except Exception as exc:
# On failure, we'll store a special "TaskError" as the result. This
# will signal to the user that the task failed with an exception.
self.queue.store_result(task_id, TaskError(str(exc)))
else:
# Store the result and acknowledge (ACK) the message.
self.queue.store_result(task_id, ret)
self.queue.stream.ack(task_id)
class TaskResultWrapper(object):
def __init__(self, queue, task_id):
self.queue = queue
self.task_id = task_id
self._result = None
def __call__(self, block=True, timeout=None):
if self._result is None:
# Get the result from the result-store, optionally blocking until
# the result becomes available.
if not block:
result = self.queue.get_result(self.task_id)
else:
start = time.time()
while timeout is None or (start + timeout) > time.time():
result = self.queue.get_result(self.task_id)
if result is None:
time.sleep(0.1)
else:
break
if result is not None:
self._result = result
if self._result is not None and isinstance(self._result, TaskError):
raise Exception('task failed: %s' % self._result.error)
return self._result
if __name__ == '__main__':
db = Walrus() # roughly equivalent to db = Redis().
queue = TaskQueue(db)
@queue.task
def sleep(n):
print('going to sleep for %s seconds' % n)
time.sleep(n)
print('woke up after %s seconds' % n)
@queue.task
def fib(n):
a, b = 0, 1
for _ in range(n):
a, b = b, a + b
return b
# Start the queue with four worker processes.
queue.run(4)
# Send four "sleep" tasks.
sleep(2)
sleep(3)
sleep(4)
sleep(5)
# Send four tasks to compute large fibonacci numbers. We will then print the
# last 6 digits of each computed number (showing how result storage works):
v100k = fib(100000)
v200k = fib(200000)
v300k = fib(300000)
v400k = fib(400000)
# Calling the result wrapper will block until its value becomes available:
print('100kth fib number starts ends with: %s' % str(v100k())[-6:])
print('200kth fib number starts ends with: %s' % str(v200k())[-6:])
print('300kth fib number starts ends with: %s' % str(v300k())[-6:])
print('400kth fib number starts ends with: %s' % str(v400k())[-6:])
# We can shutdown and restart the consumer.
queue.shutdown()
print('all workers have stopped.')
queue.run(4)
print('workers are running again.')
# Enqueue another "sleep" task.
sleep(2)
# Calling shutdown now will block until the above sleep task
# has finished, after which all workers will stop.
queue.shutdown()
print('done!')
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,616
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/cache.py
|
import datetime
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
cache = db.cache(name='test.cache')
@cache.cached(timeout=60)
def now(seed=None):
return datetime.datetime.now()
class Clock(object):
@cache.cached_property()
def now(self):
return datetime.datetime.now()
class TestCache(WalrusTestCase):
def test_cache_apis(self):
# Nonexistant key returns None.
self.assertTrue(cache.get('foo') is None)
# Set key, value and expiration in seconds.
self.assertEqual(cache.set('foo', 'bar', 60), True)
self.assertEqual(cache.get('foo'), 'bar')
self.assertEqual(cache.delete('foo'), 1)
self.assertTrue(cache.get('foo') is None)
self.assertEqual(cache.delete('foo'), 0)
def test_cache_bulk_apis(self):
self.assertEqual(cache.get_many(['k1', 'k2']), {})
data = {'k1': 'v1', 'k2': 'v2'}
self.assertEqual(cache.set_many(data, 60), True)
self.assertEqual(cache.get_many(['k1', 'kx', 'k2']), data)
self.assertEqual(cache.delete_many(['k1', 'kx', 'k2']), 2)
self.assertEqual(cache.get_many(['k1', 'k2']), {})
self.assertEqual(cache.delete_many(['k1', 'kx', 'k2']), 0)
def test_cache_decorator(self):
n0 = now() # Each should have its own cache-key.
n1 = now(1)
n2 = now(2)
self.assertTrue(n0 != n1 != n2)
self.assertEqual(now(), n0)
self.assertEqual(now(1), n1)
self.assertEqual(now(2), n2)
now.bust(1)
self.assertNotEqual(now(1), n1)
self.assertEqual(now(1), now(1))
def test_cached_property(self):
c = Clock()
n1 = c.now
n2 = c.now
self.assertEqual(n1, n2)
del c.now # Property deleter busts the cache.
n3 = c.now
self.assertTrue(n1 != n3)
self.assertEqual(c.now, n3)
def test_cached_return_none(self):
S = {'count': 0}
@cache.cached()
def returns_none(arg):
S['count'] += 1
def assertMisses(arg, n):
returns_none(arg)
self.assertEqual(S['count'], n)
for i in range(3):
assertMisses('foo', 1)
assertMisses('bar', 2)
assertMisses('foo', 2)
def test_cached_async(self):
@cache.cache_async()
def double_value(value):
return value * 2
res = double_value(3)
self.assertEqual(res(), 6)
self.assertEqual(res(), 6)
self.assertEqual(double_value(3)(), 6)
self.assertEqual(double_value(4)(), 8)
def test_flush_empty_cache(self):
cache.set('foo', 'bar', 10)
self.assertList(cache.keys(), ['test.cache:foo'])
cache.flush()
self.assertList(cache.keys(), [])
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,617
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/base.py
|
import os
import unittest
from distutils.version import StrictVersion
from walrus import Database
HOST = os.environ.get('WALRUS_REDIS_HOST') or '127.0.0.1'
PORT = os.environ.get('WALRUS_REDIS_PORT') or 6379
db = Database(host=HOST, port=PORT, db=15)
REDIS_VERSION = None
def requires_version(min_version):
def decorator(fn):
global REDIS_VERSION
if REDIS_VERSION is None:
REDIS_VERSION = db.info()['redis_version']
too_old = StrictVersion(REDIS_VERSION) < StrictVersion(min_version)
return unittest.skipIf(too_old,
'redis too old, requires %s' % min_version)(fn)
return decorator
def stream_test(fn):
test_stream = os.environ.get('TEST_STREAM')
if not test_stream:
return requires_version('4.9.101')(fn)
else:
return unittest.skipIf(not test_stream, 'skipping stream tests')(fn)
def zpop_test(fn):
test_zpop = os.environ.get('TEST_ZPOP')
if not test_zpop:
return requires_version('4.9.101')(fn)
else:
return unittest.skipIf(not test_zpop, 'skipping zpop* tests')(fn)
class WalrusTestCase(unittest.TestCase):
def setUp(self):
db.flushdb()
db._transaction_local.pipes = []
def tearDown(self):
db.flushdb()
db._transaction_local.pipes = []
def assertList(self, values, expected):
values = list(values)
self.assertEqual(len(values), len(expected))
for value, item in zip(values, expected):
self.assertEqual(value, item)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,618
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tusks/rlite.py
|
import fnmatch
import sys
import unittest
from hirlite.hirlite import Rlite
from walrus import *
from walrus.tusks.helpers import TestHelper
class WalrusLite(Walrus):
_invalid_callbacks = ('SET', 'MSET', 'LSET')
def __init__(self, filename=':memory:', encoding='utf-8'):
self._filename = filename
self._encoding = encoding
self._db = Rlite(path=filename, encoding=encoding)
self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy()
for callback in self._invalid_callbacks:
del self.response_callbacks[callback]
def execute_command(self, *args, **options):
command_name = args[0]
result = self._db.command(*args)
return self.parse_response(result, command_name, **options)
def parse_response(self, result, command_name, **options):
try:
return self.response_callbacks[command_name.upper()](
result, **options)
except KeyError:
return result
def __repr__(self):
if self._filename == ':memory:':
db_file = 'in-memory database'
else:
db_file = self._filename
return '<WalrusLite: %s>' % db_file
def _filtered_scan(self, results, match=None, count=None):
if match is not None:
results = fnmatch.filter(results, match)
if count:
results = results[:count]
return results
def hscan_iter(self, key, match=None, count=None):
return self._filtered_scan(self.hgetall(key), match, count)
def sscan_iter(self, key, match=None, count=None):
return self._filtered_scan(self.smembers(key), match, count)
def zscan_iter(self, key, match=None, count=None):
return self._filtered_scan(self.zrange(key, 0, -1), match, count)
class TestWalrusLite(TestHelper, unittest.TestCase):
def setUp(self):
self.db = WalrusLite()
def test_list_set_delete_item(self):
l = self.db.List('list_obj')
l.clear()
l.extend(['i1', 'i2', 'i3', 'i4'])
l[-1] = 'ix'
l[1] = 'iy'
self.assertEqual(list(l), ['i1', 'iy', 'i3', 'ix'])
l.prepend('nuggie')
for idx in [-1, 2, 9]:
del l[idx]
self.assertEqual([item for item in l], ['nuggie', 'i1', 'i3'])
def test_set_random_and_pop(self):
s = self.db.Set('s_obj')
s.add('charlie', 'mickey')
self.assertTrue(s.random() in ['charlie', 'mickey'])
self.assertTrue(s.pop() in ['charlie', 'mickey'])
def test_zset_iter(self):
zs = self.db.ZSet('z_obj').clear()
zs.add('zaizee', 3, 'mickey', 6, 'charlie', 31, 'huey', 3, 'nuggie', 0)
items = [item for item in zs]
self.assertEqual(
items,
['nuggie', 'huey', 'zaizee', 'mickey', 'charlie'])
self.assertEqual(zs.search('*ie'), ['nuggie', 'charlie'])
self.assertEqual(zs.search('*u*'), ['nuggie', 'huey'])
if __name__ == '__main__':
unittest.main(argv=sys.argv)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,619
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/lock.py
|
import threading
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
class TestLock(WalrusTestCase):
def test_lock(self):
lock_a = db.lock('lock-a')
lock_b = db.lock('lock-b')
self.assertTrue(lock_a.acquire())
self.assertTrue(lock_b.acquire())
lock_a2 = db.lock('lock-a')
self.assertFalse(lock_a2.acquire(block=False))
self.assertFalse(lock_a2.release())
self.assertNotEqual(lock_a._lock_id, lock_a2._lock_id)
self.assertFalse(lock_a.acquire(block=False))
self.assertFalse(lock_b.acquire(block=False))
t_waiting = threading.Event()
t_acquired = threading.Event()
t_acknowledged = threading.Event()
def wait_for_lock():
lock_a = db.lock('lock-a')
t_waiting.set()
lock_a.acquire()
t_acquired.set()
t_acknowledged.wait()
lock_a.release()
waiter_t = threading.Thread(target=wait_for_lock)
waiter_t.start()
t_waiting.wait() # Wait until the thread is up and running.
lock_a.release()
t_acquired.wait()
self.assertFalse(lock_a.acquire(block=False))
t_acknowledged.set()
waiter_t.join()
self.assertTrue(lock_a.acquire(block=False))
lock_a.release()
def test_lock_ctx_mgr(self):
lock_a = db.lock('lock-a')
lock_a2 = db.lock('lock-a')
with lock_a:
self.assertFalse(lock_a2.acquire(block=False))
self.assertTrue(lock_a2.acquire(block=False))
def test_lock_decorator(self):
lock = db.lock('lock-a')
@lock
def locked():
lock2 = db.lock('lock-a')
self.assertFalse(lock2.acquire(block=False))
locked()
@lock
def raise_exception():
raise ValueError()
self.assertRaises(ValueError, raise_exception)
# In the event of an exception, the lock will still be released.
self.assertTrue(lock.acquire(block=False))
def test_lock_cleanup(self):
self.assertEqual(len(db), 0)
lock = db.lock('lock-a')
self.assertTrue(lock.acquire())
self.assertTrue(lock.release())
self.assertEqual(len(db), 1) # We have the lock event key.
self.assertEqual(db.lpop(lock.event), b'1')
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,620
|
coleifer/walrus
|
refs/heads/master
|
/walrus/search/metaphone.py
|
#!python
#coding= utf-8
#This script implements the Double Metaphone algorithm (c) 1998, 1999 by
#Lawrence Philips
#it was translated to Python from the C source written by Kevin Atkinson
#(http://aspell.net/metaphone/)
#By Andrew Collins - January 12, 2007 who claims no rights to this work
#http://www.atomodo.com/code/double-metaphone
#Tested with Pyhon 2.4.3
#Updated Feb 14, 2007 - Found a typo in the 'gh' section
#Updated Dec 17, 2007 - Bugs fixed in 'S', 'Z', and 'J' sections.
#Thanks Chris Leong!
#Updated June 25, 2010 - several bugs fixed thanks to Nils Johnsson for a
# spectacular bug squashing effort. There were many cases where this
# function wouldn't give the same output
# as the original C source that were fixed by his careful attention and
# excellent communication.
# The script was also updated to use utf-8 rather than latin-1.
import sys
try:
NNNN = unicode('N')
decode = lambda x: x.decode('utf-8', 'ignore')
except:
NNNN = 'N'
decode = lambda x: x
CCCC = 'Ç'
VOWELS = frozenset((decode(x) for x in ('A', 'E', 'I', 'O', 'U', 'Y')))
GNKN = frozenset((decode(x) for x in ('GN', 'KN', 'PN', 'WR', 'PS')))
def dm(st) :
"""dm(string) -> (string, string or None)
returns the double metaphone codes for given string - always a tuple
there are no checks done on the input string, but it should be a single
word or name."""
st = decode(st)
# st is short for string. I usually prefer descriptive over short,
# but this var is used a lot!
st = st.upper()
is_slavo_germanic = (st.find('W') > -1 or st.find('K') > -1 or
st.find('CZ') > -1 or st.find('WITZ') > -1)
length = len(st)
first = 2
# so we can index beyond the beginning and end of the input string
st = ('-') * first + st + (' ' * 5)
last = first + length -1
pos = first # pos is short for position
pri = sec = '' # primary and secondary metaphone codes
#skip these silent letters when at start of word
if st[first:first+2] in GNKN:
pos += 1
# Initial 'X' is pronounced 'Z' e.g. 'Xavier'
if st[first] == 'X' :
pri = sec = 'S' #'Z' maps to 'S'
pos += 1
# main loop through chars in st
while pos <= last :
#print str(pos) + '\t' + st[pos]
ch = st[pos] # ch is short for character
# nxt (short for next characters in metaphone code) is set to a
# tuple of the next characters in
# the primary and secondary codes and how many characters to move
# forward in the string.
# the secondary code letter is given only when it is different than
# the primary.
# This is just a trick to make the code easier to write and read.
#
# default action is to add nothing and move to next char
nxt = (None, 1)
if ch in VOWELS :
nxt = (None, 1)
if pos == first : # all init VOWELS now map to 'A'
nxt = ('A', 1)
elif ch == 'B' :
#"-mb", e.g", "dumb", already skipped over... see 'M' below
if st[pos+1] == 'B' :
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'C' :
# various germanic
if (pos > (first + 1) and st[pos-2] not in VOWELS and
st[pos-1:pos+2] == 'ACH' and
(st[pos+2] not in ['I', 'E'] or
st[pos-2:pos+4] in ['BACHER', 'MACHER'])):
nxt = ('K', 2)
# special case 'CAESAR'
elif pos == first and st[first:first+6] == 'CAESAR' :
nxt = ('S', 2)
elif st[pos:pos+4] == 'CHIA' : #italian 'chianti'
nxt = ('K', 2)
elif st[pos:pos+2] == 'CH' :
# find 'michael'
if pos > first and st[pos:pos+4] == 'CHAE' :
nxt = ('K', 'X', 2)
elif pos == first and (st[pos+1:pos+6] in ['HARAC', 'HARIS'] or \
st[pos+1:pos+4] in ["HOR", "HYM", "HIA", "HEM"]) and st[first:first+5] != 'CHORE' :
nxt = ('K', 2)
#germanic, greek, or otherwise 'ch' for 'kh' sound
elif st[first:first+4] in ['VAN ', 'VON '] or st[first:first+3] == 'SCH' \
or st[pos-2:pos+4] in ["ORCHES", "ARCHIT", "ORCHID"] \
or st[pos+2] in ['T', 'S'] \
or ((st[pos-1] in ["A", "O", "U", "E"] or pos == first) \
and st[pos+2] in ["L", "R", "N", "M", "B", "H", "F", "V", "W", " "]) :
nxt = ('K', 1)
else :
if pos > first :
if st[first:first+2] == 'MC' :
nxt = ('K', 2)
else :
nxt = ('X', 'K', 2)
else :
nxt = ('X', 2)
#e.g, 'czerny'
elif st[pos:pos+2] == 'CZ' and st[pos-2:pos+2] != 'WICZ' :
nxt = ('S', 'X', 2)
#e.g., 'focaccia'
elif st[pos+1:pos+4] == 'CIA' :
nxt = ('X', 3)
#double 'C', but not if e.g. 'McClellan'
elif st[pos:pos+2] == 'CC' and not (pos == (first +1) and st[first] == 'M') :
#'bellocchio' but not 'bacchus'
if st[pos+2] in ["I", "E", "H"] and st[pos+2:pos+4] != 'HU' :
#'accident', 'accede' 'succeed'
if (pos == (first +1) and st[first] == 'A') or \
st[pos-1:pos+4] in ['UCCEE', 'UCCES'] :
nxt = ('KS', 3)
#'bacci', 'bertucci', other italian
else:
nxt = ('X', 3)
else :
nxt = ('K', 2)
elif st[pos:pos+2] in ["CK", "CG", "CQ"] :
nxt = ('K', 'K', 2)
elif st[pos:pos+2] in ["CI", "CE", "CY"] :
#italian vs. english
if st[pos:pos+3] in ["CIO", "CIE", "CIA"] :
nxt = ('S', 'X', 2)
else :
nxt = ('S', 2)
else :
#name sent in 'mac caffrey', 'mac gregor
if st[pos+1:pos+3] in [" C", " Q", " G"] :
nxt = ('K', 3)
else :
if st[pos+1] in ["C", "K", "Q"] and st[pos+1:pos+3] not in ["CE", "CI"] :
nxt = ('K', 2)
else : # default for 'C'
nxt = ('K', 1)
#elif ch == CCCC:
# nxt = ('S', 1)
elif ch == 'D' :
if st[pos:pos+2] == 'DG' :
if st[pos+2] in ['I', 'E', 'Y'] : #e.g. 'edge'
nxt = ('J', 3)
else :
nxt = ('TK', 2)
elif st[pos:pos+2] in ['DT', 'DD'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'F' :
if st[pos+1] == 'F' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'G' :
if st[pos+1] == 'H' :
if pos > first and st[pos-1] not in VOWELS :
nxt = ('K', 2)
elif pos < (first + 3) :
if pos == first : #'ghislane', ghiradelli
if st[pos+2] == 'I' :
nxt = ('J', 2)
else :
nxt = ('K', 2)
#Parker's rule (with some further refinements) - e.g., 'hugh'
elif (pos > (first + 1) and st[pos-2] in ['B', 'H', 'D'] ) \
or (pos > (first + 2) and st[pos-3] in ['B', 'H', 'D'] ) \
or (pos > (first + 3) and st[pos-4] in ['B', 'H'] ) :
nxt = (None, 2)
else :
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if pos > (first + 2) and st[pos-1] == 'U' \
and st[pos-3] in ["C", "G", "L", "R", "T"] :
nxt = ('F', 2)
else :
if pos > first and st[pos-1] != 'I' :
nxt = ('K', 2)
elif st[pos+1] == 'N' :
if pos == (first +1) and st[first] in VOWELS and not is_slavo_germanic :
nxt = ('KN', 'N', 2)
else :
# not e.g. 'cagney'
if st[pos+2:pos+4] != 'EY' and st[pos+1] != 'Y' and not is_slavo_germanic :
nxt = ('N', 'KN', 2)
else :
nxt = ('KN', 2)
# 'tagliaro'
elif st[pos+1:pos+3] == 'LI' and not is_slavo_germanic :
nxt = ('KL', 'L', 2)
# -ges-,-gep-,-gel-, -gie- at beginning
elif pos == first and (st[pos+1] == 'Y' \
or st[pos+1:pos+3] in ["ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER"]) :
nxt = ('K', 'J', 2)
# -ger-, -gy-
elif (st[pos+1:pos+2] == 'ER' or st[pos+1] == 'Y') \
and st[first:first+6] not in ["DANGER", "RANGER", "MANGER"] \
and st[pos-1] not in ['E', 'I'] and st[pos-1:pos+2] not in ['RGY', 'OGY'] :
nxt = ('K', 'J', 2)
# italian e.g, 'biaggi'
elif st[pos+1] in ['E', 'I', 'Y'] or st[pos-1:pos+3] in ["AGGI", "OGGI"] :
# obvious germanic
if st[first:first+4] in ['VON ', 'VAN '] or st[first:first+3] == 'SCH' \
or st[pos+1:pos+3] == 'ET' :
nxt = ('K', 2)
else :
# always soft if french ending
if st[pos+1:pos+5] == 'IER ' :
nxt = ('J', 2)
else :
nxt = ('J', 'K', 2)
elif st[pos+1] == 'G' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'H' :
# only keep if first & before vowel or btw. 2 VOWELS
if (pos == first or st[pos-1] in VOWELS) and st[pos+1] in VOWELS :
nxt = ('H', 2)
else : # (also takes care of 'HH')
nxt = (None, 1)
elif ch == 'J' :
# obvious spanish, 'jose', 'san jacinto'
if st[pos:pos+4] == 'JOSE' or st[first:first+4] == 'SAN ' :
if (pos == first and st[pos+4] == ' ') or st[first:first+4] == 'SAN ' :
nxt = ('H',)
else :
nxt = ('J', 'H')
elif pos == first and st[pos:pos+4] != 'JOSE' :
nxt = ('J', 'A') # Yankelovich/Jankelowicz
else :
# spanish pron. of e.g. 'bajador'
if st[pos-1] in VOWELS and not is_slavo_germanic \
and st[pos+1] in ['A', 'O'] :
nxt = ('J', 'H')
else :
if pos == last :
nxt = ('J', ' ')
else :
if st[pos+1] not in ["L", "T", "K", "S", "N", "M", "B", "Z"] \
and st[pos-1] not in ["S", "K", "L"] :
nxt = ('J',)
else :
nxt = (None, )
if st[pos+1] == 'J' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'K' :
if st[pos+1] == 'K' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'L' :
if st[pos+1] == 'L' :
# spanish e.g. 'cabrillo', 'gallegos'
if (pos == (last - 2) and st[pos-1:pos+3] in ["ILLO", "ILLA", "ALLE"]) \
or ((st[last-1:last+1] in ["AS", "OS"] or st[last] in ["A", "O"]) \
and st[pos-1:pos+3] == 'ALLE') :
nxt = ('L', '', 2)
else :
nxt = ('L', 2)
else :
nxt = ('L', 1)
elif ch == 'M' :
if st[pos+1:pos+4] == 'UMB' \
and (pos + 1 == last or st[pos+2:pos+4] == 'ER') \
or st[pos+1] == 'M' :
nxt = ('M', 2)
else :
nxt = ('M', 1)
elif ch == 'N' :
if st[pos+1] == 'N' :
nxt = ('N', 2)
else :
nxt = ('N', 1)
elif ch == NNNN:
nxt = ('N', 1)
elif ch == 'P' :
if st[pos+1] == 'H' :
nxt = ('F', 2)
elif st[pos+1] in ['P', 'B'] : # also account for "campbell", "raspberry"
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'Q' :
if st[pos+1] == 'Q' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'R' :
# french e.g. 'rogier', but exclude 'hochmeier'
if pos == last and not is_slavo_germanic \
and st[pos-2:pos] == 'IE' and st[pos-4:pos-2] not in ['ME', 'MA'] :
nxt = ('', 'R')
else :
nxt = ('R',)
if st[pos+1] == 'R' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'S' :
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if st[pos-1:pos+2] in ['ISL', 'YSL'] :
nxt = (None, 1)
# special case 'sugar-'
elif pos == first and st[first:first+5] == 'SUGAR' :
nxt =('X', 'S', 1)
elif st[pos:pos+2] == 'SH' :
# germanic
if st[pos+1:pos+5] in ["HEIM", "HOEK", "HOLM", "HOLZ"] :
nxt = ('S', 2)
else :
nxt = ('X', 2)
# italian & armenian
elif st[pos:pos+3] in ["SIO", "SIA"] or st[pos:pos+4] == 'SIAN' :
if not is_slavo_germanic :
nxt = ('S', 'X', 3)
else :
nxt = ('S', 3)
# german & anglicisations, e.g. 'smith' match 'schmidt', 'snider'
# match 'schneider'
# also, -sz- in slavic language altho in hungarian it is
# pronounced 's'
elif (pos == first and st[pos+1] in ["M", "N", "L", "W"]) or st[pos+1] == 'Z' :
nxt = ('S', 'X')
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif st[pos:pos+2] == 'SC' :
# Schlesinger's rule
if st[pos+2] == 'H' :
# dutch origin, e.g. 'school', 'schooner'
if st[pos+3:pos+5] in ["OO", "ER", "EN", "UY", "ED", "EM"] :
# 'schermerhorn', 'schenker'
if st[pos+3:pos+5] in ['ER', 'EN'] :
nxt = ('X', 'SK', 3)
else :
nxt = ('SK', 3)
else :
if pos == first and st[first+3] not in VOWELS and st[first+3] != 'W' :
nxt = ('X', 'S', 3)
else :
nxt = ('X', 3)
elif st[pos+2] in ['I', 'E', 'Y'] :
nxt = ('S', 3)
else :
nxt = ('SK', 3)
# french e.g. 'resnais', 'artois'
elif pos == last and st[pos-2:pos] in ['AI', 'OI'] :
nxt = ('', 'S', 1)
else :
nxt = ('S',)
if st[pos+1] in ['S', 'Z'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'T' :
if st[pos:pos+4] == 'TION' :
nxt = ('X', 3)
elif st[pos:pos+3] in ['TIA', 'TCH'] :
nxt = ('X', 3)
elif st[pos:pos+2] == 'TH' or st[pos:pos+3] == 'TTH' :
# special case 'thomas', 'thames' or germanic
if st[pos+2:pos+4] in ['OM', 'AM'] or st[first:first+4] in ['VON ', 'VAN '] \
or st[first:first+3] == 'SCH' :
nxt = ('T', 2)
else :
nxt = ('0', 'T', 2)
elif st[pos+1] in ['T', 'D'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'V' :
if st[pos+1] == 'V' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'W' :
# can also be in middle of word
if st[pos:pos+2] == 'WR' :
nxt = ('R', 2)
elif pos == first and (st[pos+1] in VOWELS or st[pos:pos+2] == 'WH') :
# Wasserman should match Vasserman
if st[pos+1] in VOWELS :
nxt = ('A', 'F', 1)
else :
nxt = ('A', 1)
# Arnow should match Arnoff
elif (pos == last and st[pos-1] in VOWELS) \
or st[pos-1:pos+5] in ["EWSKI", "EWSKY", "OWSKI", "OWSKY"] \
or st[first:first+3] == 'SCH' :
nxt = ('', 'F', 1)
# polish e.g. 'filipowicz'
elif st[pos:pos+4] in ["WICZ", "WITZ"] :
nxt = ('TS', 'FX', 4)
else : # default is to skip it
nxt = (None, 1)
elif ch == 'X' :
# french e.g. breaux
nxt = (None,)
if not(pos == last and (st[pos-3:pos] in ["IAU", "EAU"] \
or st[pos-2:pos] in ['AU', 'OU'])):
nxt = ('KS',)
if st[pos+1] in ['C', 'X'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'Z' :
# chinese pinyin e.g. 'zhao'
if st[pos+1] == 'H' :
nxt = ('J',)
elif st[pos+1:pos+3] in ["ZO", "ZI", "ZA"] \
or (is_slavo_germanic and pos > first and st[pos-1] != 'T') :
nxt = ('S', 'TS')
else :
nxt = ('S',)
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
# ----------------------------------
# --- end checking letters------
# ----------------------------------
#print str(nxt)
if len(nxt) == 2 :
if nxt[0] :
pri += nxt[0]
sec += nxt[0]
pos += nxt[1]
elif len(nxt) == 3 :
if nxt[0] :
pri += nxt[0]
if nxt[1] :
sec += nxt[1]
pos += nxt[2]
if pri == sec :
return (pri, None)
else :
return (pri, sec)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,621
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/containers.py
|
import unittest
from walrus.containers import *
from walrus.containers import _normalize_stream_keys
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
from walrus.tests.base import stream_test
from walrus.tests.base import zpop_test
from walrus.utils import decode
from walrus.utils import decode_dict
from walrus.utils import encode
class TestHash(WalrusTestCase):
def setUp(self):
super(TestHash, self).setUp()
self.hsh = db.Hash('my-hash')
def test_item_api(self):
self.hsh['k1'] = 'v1'
self.assertEqual(self.hsh['k1'], b'v1')
self.assertTrue(self.hsh['kx'] is None)
self.hsh['k2'] = 'v2'
self.hsh['k3'] = 'v3'
self.assertEqual(self.hsh.as_dict(), {
b'k1': b'v1',
b'k2': b'v2',
b'k3': b'v3'})
del self.hsh['k2']
self.assertEqual(self.hsh.as_dict(), {b'k1': b'v1', b'k3': b'v3'})
def test_dict_apis(self):
self.hsh.update({'k1': 'v1', 'k2': 'v2'})
self.hsh.update(k3='v3', k4='v4')
self.assertEqual(sorted(self.hsh.items()), [
(b'k1', b'v1'),
(b'k2', b'v2'),
(b'k3', b'v3'),
(b'k4', b'v4')])
self.assertEqual(sorted(self.hsh.keys()), [b'k1', b'k2', b'k3', b'k4'])
self.assertEqual(sorted(self.hsh.values()),
[b'v1', b'v2', b'v3', b'v4'])
self.assertEqual(len(self.hsh), 4)
self.assertTrue('k1' in self.hsh)
self.assertFalse('kx' in self.hsh)
def test_search_iter(self):
self.hsh.update(foo='v1', bar='v2', baz='v3')
self.assertEqual(sorted(self.hsh), [
(b'bar', b'v2'),
(b'baz', b'v3'),
(b'foo', b'v1')])
self.assertEqual(sorted(self.hsh.search('b*')), [
(b'bar', b'v2'),
(b'baz', b'v3')])
def test_as_dict(self):
self.hsh.update(k1='v1', k2='v2')
self.assertEqual(self.hsh.as_dict(True), {'k1': 'v1', 'k2': 'v2'})
self.assertEqual(db.Hash('test').as_dict(), {})
def test_from_dict(self):
data = dict(zip('abcdefghij', 'klmnopqrst'))
hsh = Hash.from_dict(db, 'test', data)
self.assertEqual(hsh.as_dict(True), data)
def test_setnx(self):
key, value = "key_setnx", "value_setnx"
self.assertTrue(self.hsh.setnx(key, value))
self.assertFalse(self.hsh.setnx(key, value))
class TestSet(WalrusTestCase):
def setUp(self):
super(TestSet, self).setUp()
self.set = db.Set('my-set')
def test_basic_apis(self):
self.set.add('i1', 'i2', 'i3', 'i2', 'i1')
self.assertEqual(sorted(self.set), [b'i1', b'i2', b'i3'])
self.set.remove('i2')
self.assertEqual(sorted(self.set), [b'i1', b'i3'])
self.set.remove('ix')
self.assertEqual(sorted(self.set), [b'i1', b'i3'])
# Test __contains__
self.assertTrue('i1' in self.set)
self.assertFalse('ix' in self.set)
# Test __iter__.
self.assertEqual(sorted(self.set), [b'i1', b'i3'])
del self.set['i3']
self.assertEqual(sorted(self.set), [b'i1'])
def test_combining(self):
self.set2 = db.Set('my-set2')
self.set.add(1, 2, 3, 4)
self.set2.add(3, 4, 5, 6)
self.assertEqual(
self.set | self.set2,
set([b'1', b'2', b'3', b'4', b'5', b'6']))
self.assertEqual(self.set & self.set2, set([b'3', b'4']))
self.assertEqual(self.set - self.set2, set([b'1', b'2']))
self.assertEqual(self.set2 - self.set, set([b'5', b'6']))
def test_combine_store(self):
self.set2 = db.Set('my-set2')
self.set.add(1, 2, 3, 4)
self.set2.add(3, 4, 5, 6)
s3 = self.set.unionstore('my-set3', self.set2)
self.assertEqual(s3.members(),
set([b'1', b'2', b'3', b'4', b'5', b'6']))
s3 = self.set.interstore('my-set3', self.set2)
self.assertEqual(s3.members(), set([b'3', b'4']))
s3 = self.set.diffstore('my-set3', self.set2)
self.assertEqual(s3.members(), set([b'1', b'2']))
self.set |= self.set2
self.assertEqual(sorted(self.set),
[b'1', b'2', b'3', b'4', b'5', b'6'])
s4 = db.Set('my-set4')
s4.add('1', '3')
s3 &= s4
self.assertEqual(s3.members(), set([b'1']))
def test_search(self):
self.set.add('foo', 'bar', 'baz', 'nug')
self.assertEqual(sorted(self.set.search('b*')), [b'bar', b'baz'])
def test_sort(self):
values = ['charlie', 'zaizee', 'mickey', 'huey']
self.set.add(*values)
self.assertEqual(self.set.sort(),
[b'charlie', b'huey', b'mickey', b'zaizee'])
self.set.sort(ordering='DESC', limit=3, store='s_dest')
self.assertList(db.List('s_dest'), [b'zaizee', b'mickey', b'huey'])
def test_as_set(self):
self.set.add('foo', 'bar', 'baz')
self.assertEqual(self.set.as_set(True), set(('foo', 'bar', 'baz')))
self.assertEqual(db.Set('test').as_set(), set())
def test_from_set(self):
data = set('abcdefghij')
s = Set.from_set(db, 'test', data)
self.assertEqual(s.as_set(True), data)
class TestZSet(WalrusTestCase):
def setUp(self):
super(TestZSet, self).setUp()
self.zs = db.ZSet('my-zset')
def assertZSet(self, expected):
self.assertEqual(list(self.zs), expected)
def test_basic_apis(self):
self.zs.add({'i1': 1, 'i2': 2})
self.assertZSet([(b'i1', 1), (b'i2', 2)])
self.zs.add({'i0': 0})
self.zs.add({'i3': 3})
self.assertZSet([(b'i0', 0), (b'i1', 1), (b'i2', 2), (b'i3', 3)])
self.zs.remove('i1')
self.zs.remove_by_score(3)
self.zs.add({'i2': -2})
self.zs.add({'i9': 9})
self.assertZSet([(b'i2', -2.), (b'i0', 0.), (b'i9', 9.)])
# __len__
self.assertEqual(len(self.zs), 3)
# __contains__
self.assertTrue('i0' in self.zs)
self.assertFalse('i1' in self.zs)
self.assertEqual(self.zs.score('i2'), -2)
self.assertEqual(self.zs.score('ix'), None)
self.assertEqual(self.zs.rank('i0'), 1)
self.assertEqual(self.zs.rank('i1'), None)
self.assertEqual(self.zs.count(0, 10), 2)
self.assertEqual(self.zs.count(-3, 11), 3)
self.zs.incr('i2')
self.zs.incr('i0', -2)
self.assertZSet([(b'i0', -2.), (b'i2', -1.), (b'i9', 9.)])
self.assertEqual(self.zs.range_by_score(0, 9), [b'i9'])
self.assertEqual(self.zs.range_by_score(-3, 0), [b'i0', b'i2'])
self.assertEqual(self.zs.popmin_compat(), [(b'i0', -2.)])
self.assertEqual(len(self.zs), 2)
self.assertEqual(self.zs.popmax_compat(3),
[(b'i9', 9.), (b'i2', -1.)])
self.assertEqual(self.zs.popmin_compat(), [])
self.assertEqual(self.zs.popmax_compat(), [])
self.assertEqual(len(self.zs), 0)
@zpop_test
def test_popmin_popmax(self):
for i in range(10):
self.zs.add({'i%s' % i: i})
# a list of item/score tuples is returned.
self.assertEqual(self.zs.popmin(2), [(b'i0', 0.), (b'i1', 1.)])
self.assertEqual(self.zs.popmax(2), [(b'i9', 9.), (b'i8', 8.)])
# when called with no args, a list is still returned.
self.assertEqual(self.zs.popmin(), [(b'i2', 2.)])
self.assertEqual(self.zs.popmax(), [(b'i7', 7.)])
# blocking pop returns single item.
self.assertEqual(self.zs.bpopmin(), (b'i3', 3.))
self.assertEqual(self.zs.bpopmax(), (b'i6', 6.))
# blocking-pop with timeout.
self.assertEqual(self.zs.bpopmin(2), (b'i4', 4.))
self.assertEqual(self.zs.bpopmax(2), (b'i5', 5.))
# empty list is returned when zset is empty.
self.assertEqual(self.zs.popmin(), [])
self.assertEqual(self.zs.popmax(), [])
def test_item_apis(self):
self.zs['i1'] = 1
self.zs['i0'] = 0
self.zs['i3'] = 3
self.zs['i2'] = 2
self.assertEqual(self.zs[0, False], [b'i0'])
self.assertEqual(self.zs[0, True], [(b'i0', 0)])
self.assertEqual(self.zs[2, False], [b'i2'])
self.assertEqual(self.zs[2, True], [(b'i2', 2)])
self.assertEqual(self.zs[-1, True], [(b'i3', 3)])
self.assertEqual(self.zs[9, True], [])
self.assertEqual(self.zs[0], [b'i0'])
self.assertEqual(self.zs[2], [b'i2'])
self.assertEqual(self.zs[9], [])
del self.zs['i1']
del self.zs['i3']
self.zs['i2'] = -2
self.zs['i9'] = 9
self.assertZSet([(b'i2', -2.), (b'i0', 0.), (b'i9', 9.)])
def test_slicing(self):
self.zs.add({'i1': 1, 'i2': 2, 'i3': 3, 'i0': 0})
self.assertEqual(self.zs[:1, True], [(b'i0', 0)])
self.assertEqual(self.zs[1:3, False], [b'i1', b'i2'])
self.assertEqual(self.zs[1:-1, True], [(b'i1', 1), (b'i2', 2)])
self.assertEqual(self.zs['i1':, False], [b'i1', b'i2', b'i3'])
self.assertEqual(self.zs[:'i2', False], [b'i0', b'i1'])
self.assertEqual(
self.zs['i0':'i3', True],
[(b'i0', 0), (b'i1', 1), (b'i2', 2)])
self.assertRaises(KeyError, self.zs.__getitem__, (slice('i9'), False))
self.assertEqual(self.zs[99:, False], [])
del self.zs[:'i2']
self.assertZSet([(b'i2', 2.), (b'i3', 3.)])
del self.zs[1:]
self.assertZSet([(b'i2', 2.)])
def test_combine_store(self):
zs2 = db.ZSet('my-zset2')
self.zs.add({1: 1, 2: 2, 3: 3})
zs2.add({3: 3, 4: 4, 5: 5})
zs3 = self.zs.unionstore('my-zset3', zs2)
self.assertEqual(
list(zs3),
[(b'1', 1.), (b'2', 2.), (b'4', 4.), (b'5', 5.), (b'3', 6.)])
zs3 = self.zs.interstore('my-zset3', zs2)
self.assertEqual(list(zs3), [(b'3', 6.)])
self.zs |= zs2
self.assertZSet([
(b'1', 1.), (b'2', 2.), (b'4', 4.), (b'5', 5.), (b'3', 6.)])
zs3 &= zs2
self.assertEqual(list(zs3), [(b'3', 9.)])
def test_search(self):
self.zs.add({'foo': 1, 'bar': 2, 'baz': 1, 'nug': 3})
self.assertEqual(
list(self.zs.search('b*')),
[(b'baz', 1.), (b'bar', 2.)])
def test_sort(self):
values = ['charlie', 3, 'zaizee', 2, 'mickey', 6, 'huey', 3]
self.zs.add(dict(zip(values[::2], values[1::2])))
self.assertEqual(
self.zs.sort(),
[b'charlie', b'huey', b'mickey', b'zaizee'])
self.zs.sort(ordering='DESC', limit=3, store='z_dest')
res = db.List('z_dest')
self.assertEqual(list(res), [b'zaizee', b'mickey', b'huey'])
def test_as_items(self):
self.zs.add({'foo': 3, 'bar': 1, 'baz': 2})
self.assertEqual(self.zs.as_items(True),
[('bar', 1.), ('baz', 2.), ('foo', 3.)])
self.assertEqual(db.ZSet('test').as_items(), [])
def test_from_dict(self):
data = dict(zip('abcdefghij', [float(i) for i in range(10)]))
zs = ZSet.from_dict(db, 'test', data)
self.assertEqual(zs.as_items(True), sorted(data.items()))
class TestList(WalrusTestCase):
def setUp(self):
super(TestList, self).setUp()
self.lst = db.List('my-list')
def test_basic_apis(self):
self.lst.append('i1')
self.lst.extend(['i2', 'i3'])
self.lst.prepend('ix')
self.assertList(self.lst, [b'ix', b'i1', b'i2', b'i3'])
self.lst.insert('iy', 'i2', 'before')
self.lst.insert('iz', 'i2', 'after')
self.assertList(self.lst, [b'ix', b'i1', b'iy', b'i2', b'iz', b'i3'])
self.assertEqual(self.lst.pop(), b'i3')
self.assertEqual(self.lst.popleft(), b'ix')
self.assertEqual(len(self.lst), 4)
def test_item_apis(self):
self.lst.append('i0')
self.assertEqual(self.lst[0], b'i0')
self.lst.extend(['i1', 'i2'])
del self.lst['i1']
self.assertList(self.lst, [b'i0', b'i2'])
self.lst[1] = 'i2x'
self.assertList(self.lst, [b'i0', b'i2x'])
del self.lst[0]
self.assertList(self.lst, [b'i2x'])
del self.lst[99]
self.assertList(self.lst, [b'i2x'])
del self.lst['ixxx']
self.assertList(self.lst, [b'i2x'])
def test_slicing(self):
self.lst.extend(['i1', 'i2', 'i3', 'i4'])
self.assertEqual(self.lst[:1], [b'i1'])
self.assertEqual(self.lst[:2], [b'i1', b'i2'])
self.assertEqual(self.lst[:-1], [b'i1', b'i2', b'i3'])
self.assertEqual(self.lst[1:2], [b'i2'])
self.assertEqual(self.lst[1:], [b'i2', b'i3', b'i4'])
l = db.List('l1')
l.extend(range(10))
# LTRIM, preserve the 1st to last (removes the 0th element).
del l[1:-1]
self.assertEqual([int(decode(i)) for i in l],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
# Trim the list so that it contains only the values within the
# specified range.
del l[:3]
self.assertEqual([int(decode(i)) for i in l], [1, 2, 3])
def test_sort(self):
values = ['charlie', 'zaizee', 'mickey', 'huey']
self.lst.extend(values)
self.assertEqual(self.lst.sort(),
[b'charlie', b'huey', b'mickey', b'zaizee'])
self.lst.sort(ordering='DESC', limit=3, store='l_dest')
self.assertList(db.List('l_dest'), [b'zaizee', b'mickey', b'huey'])
def test_as_list(self):
self.lst.extend(['foo', 'bar'])
self.assertEqual(self.lst.as_list(True), ['foo', 'bar'])
self.assertEqual(db.List('test').as_list(), [])
def test_from_list(self):
data = list('abcdefghij')
lst = List.from_list(db, 'test', data)
self.assertEqual(lst.as_list(True), data)
class TestArray(WalrusTestCase):
def setUp(self):
super(TestArray, self).setUp()
self.arr = db.Array('my-arr')
def test_basic_apis(self):
self.arr.append('i1')
self.arr.append('i2')
self.arr.append('i3')
self.arr.append('i4')
self.assertEqual(len(self.arr), 4)
# Indexing works. Invalid indices return None.
self.assertEqual(self.arr[0], b'i1')
self.assertEqual(self.arr[3], b'i4')
self.assertTrue(self.arr[4] is None)
# Negative indexing works and includes bounds-checking.
self.assertEqual(self.arr[-1], b'i4')
self.assertEqual(self.arr[-4], b'i1')
self.assertTrue(self.arr[-5] is None)
self.assertEqual(self.arr.pop(1), b'i2')
self.assertList(self.arr, [b'i1', b'i3', b'i4'])
self.assertEqual(self.arr.pop(), b'i4')
self.assertList(self.arr, [b'i1', b'i3'])
self.arr[-1] = 'iy'
self.arr[0] = 'ix'
self.assertList(self.arr, [b'ix', b'iy'])
self.assertTrue('iy' in self.arr)
self.assertFalse('i1' in self.arr)
self.arr.extend(['foo', 'bar', 'baz'])
self.assertList(self.arr, [b'ix', b'iy', b'foo', b'bar', b'baz'])
def test_as_list(self):
self.arr.extend(['foo', 'bar'])
self.assertEqual(self.arr.as_list(True), ['foo', 'bar'])
self.assertEqual(db.Array('test').as_list(), [])
def test_from_list(self):
data = list('abcdefghij')
arr = Array.from_list(db, 'test', data)
self.assertEqual(arr.as_list(True), data)
class TestStream(WalrusTestCase):
def setUp(self):
super(TestStream, self).setUp()
db.delete('my-stream')
db.delete('sa')
db.delete('sb')
def _create_test_data(self):
return (db.xadd('sa', {'k': 'a1'}, b'1'),
db.xadd('sb', {'k': 'b1'}, b'2'),
db.xadd('sa', {'k': 'a2'}, b'3'),
db.xadd('sb', {'k': 'b2'}, b'4'),
db.xadd('sb', {'k': 'b3'}, b'5'))
@stream_test
def test_stream_group_info(self):
sa = db.Stream('sa')
ra1 = sa.add({'k': 'a1'})
ra2 = sa.add({'k': 'a2'})
ra3 = sa.add({'k': 'a3'})
sb = db.Stream('sb')
rb1 = sb.add({'k': 'b1'})
sa_info = sa.info()
self.assertEqual(sa_info['groups'], 0)
self.assertEqual(sa_info['length'], 3)
self.assertEqual(sa_info['first-entry'][0], ra1)
self.assertEqual(sa_info['last-entry'][0], ra3)
sb_info = sb.info()
self.assertEqual(sb_info['groups'], 0)
self.assertEqual(sb_info['length'], 1)
self.assertEqual(sb_info['first-entry'][0], rb1)
self.assertEqual(sb_info['last-entry'][0], rb1)
self.assertEqual(sa.groups_info(), [])
self.assertEqual(sb.groups_info(), [])
# Create consumer groups.
cga = db.consumer_group('cga', ['sa'])
cga.create()
cgab = db.consumer_group('cgab', ['sa', 'sb'])
cgab.create()
self.assertEqual(sa.info()['groups'], 2)
self.assertEqual(sb.info()['groups'], 1)
sa_groups = sa.groups_info()
self.assertEqual(len(sa_groups), 2)
self.assertEqual(sorted(g['name'] for g in sa_groups),
[b'cga', b'cgab'])
sb_groups = sb.groups_info()
self.assertEqual(len(sb_groups), 1)
self.assertEqual(sb_groups[0]['name'], b'cgab')
# Verify we can get stream info from the consumer group.
stream_info = cgab.stream_info()
self.assertEqual(sorted(stream_info), ['sa', 'sb'])
# Destroy consumer group?
cgab.destroy()
self.assertEqual(len(sa.groups_info()), 1)
self.assertEqual(len(sb.groups_info()), 0)
@stream_test
def test_consumer_group_create(self):
cg = db.consumer_group('cg', ['sa'])
self.assertEqual(cg.create(), {'sa': True})
# Creating the consumer group again will report that it was not created
# for the given key(s).
self.assertEqual(cg.create(), {'sa': False})
# We can register the consumer group with another key.
cg = db.consumer_group('cg', ['sa', 'sb'])
self.assertEqual(cg.create(), {'sa': False, 'sb': True})
@stream_test
def test_consumer_group_stream_creation(self):
cg = db.consumer_group('cg1', ['stream-a', 'stream-b'])
self.assertFalse(db.exists('stream-a'))
self.assertFalse(db.exists('stream-b'))
cg.create()
# The streams were created (by adding and then deleting a message).
self.assertTrue(db.exists('stream-a'))
self.assertTrue(db.exists('stream-b'))
# The streams that were automatically created will not have any data.
self.assertEqual(db.xlen('stream-a'), 0)
self.assertEqual(db.xlen('stream-b'), 0)
# If a stream already exists that's OK.
db.xadd('stream-c', {'data': 'dummy'}, id=b'1')
cg = db.consumer_group('cg2', ['stream-c', 'stream-d'])
self.assertTrue(db.exists('stream-c'))
self.assertEqual(db.type('stream-c'), b'stream')
self.assertFalse(db.exists('stream-d'))
cg.create()
self.assertTrue(db.exists('stream-d'))
self.assertEqual(db.type('stream-c'), b'stream')
self.assertEqual(db.type('stream-d'), b'stream')
self.assertEqual(db.xlen('stream-c'), 1)
self.assertEqual(db.xlen('stream-d'), 0)
# If a stream key already exists and is a different type, fail.
db.lpush('l1', 'item-1')
db.hset('h1', 'key', 'data')
db.sadd('s1', 'item-1')
db.set('k1', 'v1')
db.zadd('z1', {'item-1': 1.0})
for key in ('l1', 'h1', 's1', 'k1', 'z1'):
cg = db.consumer_group('cg-%s' % key, keys=[key])
self.assertRaises(ValueError, cg.create)
@stream_test
def test_consumer_group_streams(self):
ra1, rb1, ra2, rb2, rb3 = self._create_test_data()
cg = db.consumer_group('g1', ['sa', 'sb'])
self.assertEqual(cg.sa[ra1], (ra1, {b'k': b'a1'}))
self.assertEqual(cg.sb[rb3], (rb3, {b'k': b'b3'}))
def assertMessages(resp, expected):
self.assertEqual([mid for mid, _ in resp], expected)
assertMessages(cg.sa[ra1:], [ra1, ra2])
assertMessages(cg.sa[:ra1], [ra1])
assertMessages(cg.sa[ra2:], [ra2])
assertMessages(cg.sa[:ra2], [ra1, ra2])
assertMessages(cg.sa[rb3:], [])
assertMessages(cg.sa[:b'0-1'], [])
assertMessages(list(cg.sa), [ra1, ra2])
assertMessages(cg.sb[rb1:], [rb1, rb2, rb3])
assertMessages(cg.sb[rb1::2], [rb1, rb2])
assertMessages(cg.sb[:rb1], [rb1])
assertMessages(cg.sb[rb3:], [rb3])
assertMessages(cg.sb[:rb3], [rb1, rb2, rb3])
assertMessages(list(cg.sb), [rb1, rb2, rb3])
self.assertEqual(len(cg.sa), 2)
self.assertEqual(len(cg.sb), 3)
del cg.sa[ra1]
del cg.sb[rb1, rb3]
self.assertEqual(len(cg.sa), 1)
self.assertEqual(len(cg.sb), 1)
assertMessages(list(cg.sa), [ra2])
assertMessages(list(cg.sb), [rb2])
@stream_test
def test_consumer_group_container(self):
ra1, rb1, ra2, rb2, rb3 = self._create_test_data()
cg1 = db.consumer_group('g1', {'sa': '1', 'sb': '0'})
cg2 = db.consumer_group('g2', {'sb': '2'})
self.assertEqual(cg1.create(), {'sa': True, 'sb': True})
self.assertEqual(cg2.create(), {'sb': True})
self.assertEqual(dict(cg1.read(count=2)), {
b'sa': [(ra2, {b'k': b'a2'})],
b'sb': [(rb1, {b'k': b'b1'}), (rb2, {b'k': b'b2'})]})
self.assertEqual(cg1.sa.read(), [])
self.assertEqual(cg1.sb.read(), [(rb3, {b'k': b'b3'})])
self.assertEqual(cg1.sa.ack(ra2), 1)
self.assertEqual(cg1.sb.ack(rb1, rb3), 2)
p1, = cg1.sb.pending()
self.assertEqual(p1['message_id'], rb2)
self.assertEqual(p1['consumer'], b'g1.c1')
self.assertEqual(cg2.read(count=1), [
[b'sb', [(rb2, {b'k': b'b2'})]]])
self.assertEqual(cg2.sb.read(), [(rb3, {b'k': b'b3'})])
self.assertEqual(cg1.destroy(), {'sa': 1, 'sb': 1})
self.assertEqual(cg2.destroy(), {'sb': 1})
@stream_test
def test_consumer_group_consumers(self):
ra1, rb1, ra2, rb2, rb3 = self._create_test_data()
cg11 = db.consumer_group('g1', {'sa': '0', 'sb': '0'}, consumer='cg11')
cg11.create()
cg12 = cg11.consumer('cg12')
self.assertEqual(dict(cg11.read(count=1)), {
b'sa': [(ra1, {b'k': b'a1'})],
b'sb': [(rb1, {b'k': b'b1'})]})
self.assertEqual(dict(cg12.read(count=1, block=1)), {
b'sa': [(ra2, {b'k': b'a2'})],
b'sb': [(rb2, {b'k': b'b2'})]})
pa1, pa2 = cg11.sa.pending()
self.assertEqual(pa1['message_id'], ra1)
self.assertEqual(pa1['consumer'], b'cg11')
self.assertEqual(pa2['message_id'], ra2)
self.assertEqual(pa2['consumer'], b'cg12')
pb1, pb2 = cg11.sb.pending()
self.assertEqual(pb1['message_id'], rb1)
self.assertEqual(pb1['consumer'], b'cg11')
self.assertEqual(pb2['message_id'], rb2)
self.assertEqual(pb2['consumer'], b'cg12')
@stream_test
def test_read_api(self):
sa = db.Stream('a')
sb = db.Stream('b')
sc = db.Stream('c')
streams = [sa, sb, sc]
docids = []
for i in range(20):
stream = streams[i % 3]
docids.append(stream.add({'k': 'v%s' % i}, id=i + 1))
def assertData(ret, idxs, is_multi=False):
if is_multi:
ret = dict(ret)
accum = {}
for idx in idxs:
sname = encode('abc'[idx % 3])
accum.setdefault(sname, [])
accum[sname].append((
docids[idx], {b'k': encode('v%s' % idx)}))
else:
accum = []
for idx in idxs:
accum.append((docids[idx], {b'k': encode('v%s' % idx)}))
self.assertEqual(ret, accum)
assertData(sa.read(), [0, 3, 6, 9, 12, 15, 18])
assertData(sc.read(), [2, 5, 8, 11, 14, 17])
# We can specify a maximum number of records via "count".
assertData(sa.read(3), [0, 3, 6])
assertData(sb.read(2), [1, 4])
assertData(sc.read(4), [2, 5, 8, 11])
# We get the same values we read earlier.
assertData(sa.read(2), [0, 3])
# We can pass a minimum ID and will get newer data -- even if the ID
# does not exist in the stream. We can also pass an exact ID and unlike
# the range function, it is not inclusive.
assertData(sa.read(2, last_id=docids[3]), [6, 9])
assertData(sa.read(2, last_id=docids[4]), [6, 9])
# If the last ID exceeds the highest ID (indicating no data), None is
# returned. This is the same whether or not "count" is specified.
self.assertEqual(sa.read(last_id=docids[18]), [])
self.assertEqual(sa.read(2, last_id=docids[18]), [])
# The count is a maximum, so up-to 2 items are return -- but since only
# one item in the stream exceeds the given ID, we only get one result.
assertData(sa.read(2, last_id=docids[17]), [18])
# If a timeout is set and any stream can return a value, then that
# value is returned immediately.
assertData(sa.read(2, block=1, last_id=docids[17]), [18])
assertData(sb.read(2, block=1, last_id=docids[18]), [19])
# If no items are available and we timed-out, None is returned.
self.assertEqual(sc.read(block=1, last_id=docids[19]), [])
self.assertEqual(sc.read(2, block=1, last_id=docids[19]), [])
# When multiple keys are given, up-to "count" items per stream
# are returned.
normalized = _normalize_stream_keys(['a', 'b', 'c'])
res = db.xread(normalized, count=2)
assertData(res, [0, 1, 2, 3, 4, 5], True)
# Specify max-ids for each stream. The max value in "c" is 17, so
# nothing will be returned for "c".
uids = [decode(docid) for docid in docids]
res = db.xread({'a': uids[15], 'b': uids[16], 'c': uids[17]},
count=3)
assertData(res, [18, 19], True)
# Now we limit ourselves to being able to pull only a single item from
# stream "c".
res = db.xread({'a': uids[18], 'b': uids[19], 'c': uids[16]})
assertData(res, [17], True)
# None is returned when no results are present and timeout is None or
# if we reach the timeout.
res = db.xread({'a': uids[18], 'b': uids[19], 'c': uids[17]})
self.assertEqual(res, [])
res = db.xread({'a': uids[18], 'b': uids[19], 'c': uids[17]},
count=1, block=1)
self.assertEqual(res, [])
@stream_test
def test_set_id_stream(self):
stream = db.Stream('my-stream')
stream.add({'k': 'v1'}, id='3')
self.assertTrue(stream.set_id('5'))
self.assertRaises(Exception, stream.add, {'k': 'v2'}, id='4')
stream.add({'k': 'v3'}, id='6')
self.assertEqual(stream.read(), [
(b'3-0', {b'k': b'v1'}),
(b'6-0', {b'k': b'v3'})])
@stream_test
def test_basic_apis(self):
stream = db.Stream('my-stream')
# Item ids will be 1-0, 11-0, ...91-0.
item_ids = [stream.add({'k': 'v%s' % i}, id='%s1' % i)
for i in range(10)]
self.assertEqual(len(stream), 10)
# Redis automatically adds the sequence number.
self.assertEqual(item_ids[:3], [b'1-0', b'11-0', b'21-0'])
self.assertEqual(item_ids[7:], [b'71-0', b'81-0', b'91-0'])
def assertData(items, expected):
self.assertEqual(items, [(item_ids[e], {b'k': encode('v%s' % e)})
for e in expected])
# The sequence number is optional if it's zero.
assertData(stream[:'1'], [0])
assertData(stream[:'1-0'], [0])
assertData(stream['91':], [9])
assertData(stream['91-0':], [9])
assertData(stream['91-1':], [])
# We can slice up to a value. If the sequence number is omitted it will
# be treated as zero.
assertData(stream[:'31'], [0, 1, 2, 3])
assertData(stream[:'31-0'], [0, 1, 2, 3])
assertData(stream[:'31-1'], [0, 1, 2, 3])
# We can slice up from a value as well.
assertData(stream['71':], [7, 8, 9])
assertData(stream['71-0':], [7, 8, 9])
assertData(stream['71-1':], [8, 9])
# We can also slice between values.
assertData(stream['21':'41'], [2, 3, 4])
assertData(stream['21-0':'41'], [2, 3, 4])
assertData(stream['21':'41-0'], [2, 3, 4])
assertData(stream['21-1':'41'], [3, 4])
assertData(stream['21-1':'41-1'], [3, 4])
# The "step" parameter, the third part of the slice, indicates count.
assertData(stream['41'::3], [4, 5, 6])
assertData(stream[:'41':3], [0, 1, 2])
assertData(stream['81'::3], [8, 9])
# Test using in-between values. The endpoints of the slice are
# inclusive.
assertData(stream[:'5'], [0])
assertData(stream[:'5-1'], [0])
assertData(stream[:'25'], [0, 1, 2])
assertData(stream[:'25-1'], [0, 1, 2])
assertData(stream['25':'55'], [3, 4, 5])
assertData(stream['55':'92'], [6, 7, 8, 9])
assertData(stream['91':'92'], [9])
# If we go above or below, it returns an empty list.
assertData(stream['92':], [])
assertData(stream[:'0'], [])
# We can also provide a count when indexing in-between.
assertData(stream['25':'55':2], [3, 4])
assertData(stream['55':'92':1], [6])
# Use "del" to remove items by ID. The sequence number will be treated
# as zero if not provided.
del stream['21', '41-0', '61']
del stream['51-1'] # Has no effect since we only have 51-0.
assertData(stream['5':'65'], [1, 3, 5])
self.assertEqual(len(stream), 7)
del stream['21'] # Can delete non-existent items.
# Cannot add lower than maximum ID.
self.assertRaises(Exception, stream.add, {'k': 'v2'}, id='90-1')
self.assertRaises(Exception, stream.add, {'k': 'v2'}, id='91-0')
# Adding a "1" to the sequence works:
new_id = stream.add({'k': 'v10'}, id='91-1')
self.assertEqual(new_id, b'91-1')
# Length reflects the latest addition.
self.assertEqual(len(stream), 8)
# Range starting at 91-0 yields 91-0 and 91-1.
data = stream['91-0':]
self.assertEqual(len(data), 2)
self.assertEqual([obj_id for obj_id, _ in data], [b'91-0', b'91-1'])
# Remove the two 91-x items.
del stream['91', '91-1']
# Sanity check that the data was really remove.
self.assertEqual(len(stream), 6)
assertData(stream['61':], [7, 8])
# Can we add an item with an id lower than 91? We've deleted it so the
# last value is 81, but this still doesn't work (?).
for docid in ('90', '91', '91-1'):
self.assertRaises(Exception, stream.add, {'k': 'v9'}, id='90')
new_id = stream.add({'k': 'v9'}, id='91-2')
self.assertEqual(new_id, b'91-2')
self.assertEqual(stream['91':], [(b'91-2', {b'k': b'v9'})])
del stream['91-2']
nremoved = stream.trim(4, approximate=False)
self.assertEqual(nremoved, 2)
assertData(stream[:], [3, 5, 7, 8])
# Trimming again returns 0, no items removed.
self.assertEqual(stream.trim(4, approximate=False), 0)
# Verify we can iterate over the stream.
assertData(list(stream), [3, 5, 7, 8])
# Verify we can get items by id.
d5 = stream.get('51-0')
self.assertEqual(d5, (b'51-0', {b'k': b'v5'}))
# Nonexistant values return None.
self.assertTrue(stream.get('61-0') is None)
class TestBitField(WalrusTestCase):
def setUp(self):
super(TestBitField, self).setUp()
self.bf = db.bit_field('bf')
def test_simple_operations(self):
resp = (self.bf
.set('u8', 8, 255)
.get('u8', 0)
.get('u4', 8) # 1111
.get('u4', 12) # 1111
.get('u4', 13) # 1110
.execute())
self.assertEqual(resp, [0, 0, 15, 15, 14])
resp = (self.bf
.set('u8', 4, 1) # 00ff -> 001f (returns old val, 0x0f)
.get('u16', 0) # 001f (00011111)
.set('u16', 0, 0)) # 001f -> 0000
self.assertEqual(list(resp), [15, 31, 31])
resp = (self.bf
.incrby('u8', 8, 254)
.get('u16', 0))
self.assertEqual(list(resp), [254, 254])
# Verify overflow protection works:
resp = (self.bf
.incrby('u8', 8, 2, 'FAIL')
.incrby('u8', 8, 1)
.incrby('u8', 8, 1) # Still "FAIL".
.get('u16', 0))
self.assertEqual(list(resp), [None, 255, None, 255])
self.assertEqual(self.bf.get_raw(), b'\x00\xff')
def test_slicing(self):
resp = self.bf.set('u8', 0, 166).execute() # 10100110
self.assertEqual(self.bf[:8], 166)
self.assertEqual(self.bf[:4], 10) # 1010
self.assertEqual(self.bf[4:8], 6) # 0110
self.assertEqual(self.bf[2:6], 9) # 1001
self.assertEqual(self.bf[6:10], 8) # 10?? -> 1000
self.assertEqual(self.bf[8:16], 0) # Undefined, defaults to zero.
self.assertRaises(ValueError, lambda: self.bf[1])
self.assertRaises(ValueError, lambda: self.bf[1:])
self.assertRaises(ValueError, lambda: self.bf[4:1])
self.bf[:8] = 89 # 01011001
self.assertEqual(self.bf[:8], 89)
def overflow():
self.bf[:8] = 256
self.assertRaises(ValueError, overflow)
self.bf[:8] = 255
self.assertEqual(self.bf[:8], 255)
del self.bf[2:6]
self.assertEqual(self.bf[:8], 195) # 11000011
class TestBloomFilter(WalrusTestCase):
def setUp(self):
super(TestBloomFilter, self).setUp()
self.bf = db.bloom_filter('bf')
def test_bloom_filter(self):
data = ('foo', 'bar', 'baz', 'nugget', 'this is a test', 'testing',
'alpha', 'beta', 'delta', 'gamma')
# Verify that the bloom-filter does not contain any of our items.
for item in data:
self.assertFalse(item in self.bf)
# Add all the items to the bloom filter.
for item in data:
self.bf.add(item)
# Verify that all of our items are now present.
for item in data:
self.assertTrue(item in self.bf)
# Making some small modifications we can verify that all these other
# items are not present, however.
for item in data:
self.assertFalse(item.upper() in self.bf)
self.assertFalse(item.title() in self.bf)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,622
|
coleifer/walrus
|
refs/heads/master
|
/walrus/models.py
|
from copy import deepcopy
import datetime
import json
import pickle
import re
import sys
import time
import uuid
from warnings import warn
from walrus.containers import Array
from walrus.containers import Hash
from walrus.containers import HyperLogLog
from walrus.containers import List
from walrus.containers import Set
from walrus.containers import ZSet
from walrus.query import ABSOLUTE
from walrus.query import CONTINUOUS
from walrus.query import Desc
from walrus.query import Executor
from walrus.query import FTS
from walrus.query import Node
from walrus.search import Tokenizer
from walrus.utils import basestring_type
from walrus.utils import decode
from walrus.utils import decode_dict_keys
from walrus.utils import encode
from walrus.utils import PY3
from walrus.utils import unicode_type
class Field(Node):
"""
Named attribute on a model that will hold a value of the given
type. Fields are declared as attributes on a model class.
Example::
walrus_db = Database()
class User(Model):
__database__ = walrus_db
__namespace__ = 'my-app'
# Use the user's email address as the primary key.
# All primary key fields will also get a secondary
# index, so there's no need to specify index=True.
email = TextField(primary_key=True)
# Store the user's interests in a free-form text
# field. Also create a secondary full-text search
# index on this field.
interests = TextField(
fts=True,
stemmer=True,
min_word_length=3)
class Note(Model):
__database__ = walrus_app
__namespace__ = 'my-app'
# A note is associated with a user. We will create a
# secondary index on this field so we can efficiently
# retrieve all notes created by a specific user.
user_email = TextField(index=True)
# Store the note content in a searchable text field. Use
# the double-metaphone algorithm to index the content.
content = TextField(
fts=True,
stemmer=True,
metaphone=True)
# Store the timestamp the note was created automatically.
# Note that we do not call `now()`, but rather pass the
# function itself.
timestamp = DateTimeField(default=datetime.datetime.now)
"""
_coerce = None
def __init__(self, index=False, primary_key=False, default=None):
"""
:param bool index: Use this field as an index. Indexed
fields will support :py:meth:`Model.get` lookups.
:param bool primary_key: Use this field as the primary key.
"""
self._index = index or primary_key
self._primary_key = primary_key
self._default = default
def _generate_key(self):
raise NotImplementedError
def db_value(self, value):
if self._coerce:
return self._coerce(value)
return value
def python_value(self, value):
if self._coerce:
return self._coerce(value)
return value
def add_to_class(self, model_class, name):
self.model_class = model_class
self.name = name
setattr(model_class, name, self)
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance._data.get(self.name)
return self
def __set__(self, instance, value):
instance._data[self.name] = value
def get_index(self, op):
indexes = self.get_indexes()
for index in indexes:
if op in index.operations:
return index
raise ValueError('Operation %s is not supported by an index.' % op)
def get_indexes(self):
"""
Return a list of secondary indexes to create for the
field. For instance, a TextField might have a full-text
search index, whereas an IntegerField would have a scalar
index that supported range queries.
"""
return [AbsoluteIndex(self)]
class _ScalarField(Field):
def get_indexes(self):
return [AbsoluteIndex(self), ContinuousIndex(self)]
class IntegerField(_ScalarField):
"""Store integer values."""
_coerce = int
def db_value(self, value):
return 0 if value is None else int(value)
class AutoIncrementField(IntegerField):
"""Auto-incrementing primary key field."""
def __init__(self, *args, **kwargs):
kwargs['primary_key'] = True
return super(AutoIncrementField, self).__init__(*args, **kwargs)
def _generate_key(self):
query_helper = self.model_class._query
key = query_helper.make_key(self.name, '_sequence')
return self.model_class.__database__.incr(key)
class FloatField(_ScalarField):
"""Store floating point values."""
_coerce = float
def db_value(self, value):
return 0. if value is None else float(value)
class ByteField(Field):
"""Store arbitrary bytes."""
def db_value(self, value):
if isinstance(value, unicode_type):
value = value.encode('utf-8')
elif value is None:
value = b''
return value
class TextField(Field):
"""
Store unicode strings, encoded as UTF-8. :py:class:`TextField`
also supports full-text search through the optional ``fts``
parameter.
.. note:: If full-text search is enabled for the field, then
the ``index`` argument is implied.
:param bool fts: Enable simple full-text search.
:param bool stemmer: Use porter stemmer to process words.
:param bool metaphone: Use the double metaphone algorithm to
process words.
:param str stopwords_file: File containing stopwords, one per
line. If not specified, the default stopwords will be used.
:param int min_word_length: Minimum length (inclusive) of word
to be included in search index.
"""
def __init__(self, fts=False, stemmer=True, metaphone=False,
stopwords_file=None, min_word_length=None, *args, **kwargs):
super(TextField, self).__init__(*args, **kwargs)
self._fts = fts
self._stemmer = stemmer
self._metaphone = metaphone
self._stopwords_file = stopwords_file
self._min_word_length = min_word_length
self._index = self._index or self._fts
def db_value(self, value):
return b'' if value is None else encode(value)
def python_value(self, value):
return decode(value)
def get_indexes(self):
indexes = super(TextField, self).get_indexes()
if self._fts:
indexes.append(FullTextIndex(
self,
self._stemmer,
self._metaphone,
self._stopwords_file,
self._min_word_length))
return indexes
class BooleanField(Field):
"""Store boolean values."""
def db_value(self, value):
return '1' if value else '0'
def python_value(self, value):
return decode(value) == '1'
class UUIDField(Field):
"""Store unique IDs. Can be used as primary key."""
def __init__(self, **kwargs):
kwargs['index'] = True
super(UUIDField, self).__init__(**kwargs)
def db_value(self, value):
return encode(value.hex if value is not None else '')
def python_value(self, value):
return uuid.UUID(decode(value)) if value else None
def _generate_key(self):
return uuid.uuid4()
class DateTimeField(_ScalarField):
"""Store Python datetime objects."""
def db_value(self, value):
if value is None:
return 0.
timestamp = time.mktime(value.timetuple())
micro = value.microsecond * (10 ** -6)
return timestamp + micro
def python_value(self, value):
if not value:
return None
elif isinstance(value, (basestring_type, int, float)):
return datetime.datetime.fromtimestamp(float(value))
else:
return value
class DateField(DateTimeField):
"""Store Python date objects."""
def db_value(self, value):
if value is None:
return 0.
return time.mktime(value.timetuple())
def python_value(self, value):
if not value:
return None
elif isinstance(value, (basestring_type, int, float)):
return datetime.datetime.fromtimestamp(float(value)).date()
else:
return value
class JSONField(Field):
"""Store arbitrary JSON data."""
def db_value(self, value):
return encode(json.dumps(value))
def python_value(self, value):
return json.loads(decode(value))
class PickledField(Field):
"""Store arbitrary Python objects."""
def db_value(self, value):
return pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
def python_value(self, value):
return pickle.loads(value)
class _ContainerField(Field):
container_class = None
def __init__(self, *args, **kwargs):
super(_ContainerField, self).__init__(*args, **kwargs)
if self._primary_key:
raise ValueError('Container fields cannot be primary keys.')
if self._index:
raise ValueError('Container fields cannot be indexed.')
def _get_container(self, instance):
return self.container_class(
self.model_class.__database__,
self.__key__(instance))
def __key__(self, instance):
return self.model_class._query.make_key(
'container',
self.name,
instance.get_hash_id())
def __get__(self, instance, instance_type=None):
if instance is not None:
if not instance.get_id():
raise ValueError('Model must have a primary key before '
'container attributes can be accessed.')
return self._get_container(instance)
return self
def __set__(self, instance, instance_type=None):
raise ValueError('Cannot set the value of a container field.')
def _delete(self, instance):
self._get_container(instance).clear()
class HashField(_ContainerField):
"""Store values in a Redis hash."""
container_class = Hash
class ListField(_ContainerField):
"""Store values in a Redis list."""
container_class = List
class SetField(_ContainerField):
"""Store values in a Redis set."""
container_class = Set
class ZSetField(_ContainerField):
"""Store values in a Redis sorted set."""
container_class = ZSet
class Query(object):
def __init__(self, model_class):
self.model_class = model_class
@property
def _base_key(self):
model_name = self.model_class.__name__.lower()
if self.model_class.__namespace__:
return '%s|%s:' % (self.model_class.__namespace__, model_name)
return '%s:' % model_name
def make_key(self, *parts):
"""Generate a namespaced key for the given path."""
separator = getattr(self.model_class, 'index_separator', '.')
parts = map(decode, parts)
return '%s%s' % (self._base_key, separator.join(map(str, parts)))
def get_primary_hash_key(self, primary_key):
pk_field = self.model_class._fields[self.model_class._primary_key]
return self.make_key('id', pk_field.db_value(primary_key))
def all_index(self):
return self.model_class.__database__.Set(self.make_key('all'))
class BaseIndex(object):
operations = None
def __init__(self, field):
self.field = field
self.__database__ = self.field.model_class.__database__
self.query_helper = self.field.model_class._query
def field_value(self, instance):
return self.field.db_value(getattr(instance, self.field.name))
def get_key(self, value):
raise NotImplementedError
def store_instance(self, key, instance, value):
raise NotImplementedError
def delete_instance(self, key, instance, value):
raise NotImplementedError
def save(self, instance):
value = self.field_value(instance)
key = self.get_key(value)
self.store_instance(key, instance, value)
def remove(self, instance):
value = self.field_value(instance)
key = self.get_key(value)
self.delete_instance(key, instance, value)
class AbsoluteIndex(BaseIndex):
operations = ABSOLUTE
def get_key(self, value):
key = self.query_helper.make_key(
self.field.name,
'absolute',
value)
return self.__database__.Set(key)
def store_instance(self, key, instance, value):
key.add(instance.get_hash_id())
def delete_instance(self, key, instance, value):
key.remove(instance.get_hash_id())
if len(key) == 0:
key.clear()
class ContinuousIndex(BaseIndex):
operations = CONTINUOUS
def get_key(self, value):
key = self.query_helper.make_key(
self.field.name,
'continuous')
return self.__database__.ZSet(key)
def store_instance(self, key, instance, value):
key[instance.get_hash_id()] = value
def delete_instance(self, key, instance, value):
del key[instance.get_hash_id()]
if len(key) == 0:
key.clear()
class FullTextIndex(BaseIndex):
operations = FTS
def __init__(self, field, stemmer=True, metaphone=False,
stopwords_file=None, min_word_length=None):
super(FullTextIndex, self).__init__(field)
self.tokenizer = Tokenizer(
stemmer=stemmer,
metaphone=metaphone,
stopwords_file=stopwords_file or 'stopwords.txt',
min_word_length=min_word_length)
def get_key(self, value):
key = self.query_helper.make_key(
self.field.name,
'fts',
value)
return self.__database__.ZSet(key)
def store_instance(self, key, instance, value):
hash_id = instance.get_hash_id()
for word, score in self.tokenizer.tokenize(value).items():
key = self.get_key(word)
key[hash_id] = -score
def delete_instance(self, key, instance, value):
hash_id = instance.get_hash_id()
for word in self.tokenizer.tokenize(value):
key = self.get_key(word)
del key[hash_id]
if len(key) == 0:
key.clear()
class BaseModel(type):
def __new__(cls, name, bases, attrs):
if not bases:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
if 'database' in attrs:
warn('"database" has been deprecated in favor of "__database__" '
'for Walrus models.', DeprecationWarning)
attrs['__database__'] = attrs.pop('database')
if 'namespace' in attrs:
warn('"namespace" has been deprecated in favor of "__namespace__" '
'for Walrus models.', DeprecationWarning)
attrs['__namespace__'] = attrs.pop('namespace')
# Declarative base juju.
ignore = set()
primary_key = None
for key, value in attrs.items():
if isinstance(value, Field) and value._primary_key:
primary_key = (key, value)
for base in bases:
for key, value in base.__dict__.items():
if key in attrs:
continue
if isinstance(value, Field):
if value._primary_key and primary_key:
ignore.add(key)
else:
if value._primary_key:
primary_key = (key, value)
attrs[key] = deepcopy(value)
if not primary_key:
attrs['_id'] = AutoIncrementField()
primary_key = ('_id', attrs['_id'])
model_class = super(BaseModel, cls).__new__(cls, name, bases, attrs)
model_class._data = None
defaults = {}
fields = {}
indexes = []
for key, value in model_class.__dict__.items():
if isinstance(value, Field) and key not in ignore:
value.add_to_class(model_class, key)
if value._index:
indexes.append(value)
fields[key] = value
if value._default is not None:
defaults[key] = value._default
model_class._defaults = defaults
model_class._fields = fields
model_class._indexes = indexes
model_class._primary_key = primary_key[0]
model_class._query = Query(model_class)
return model_class
def _with_metaclass(meta, base=object):
return meta("NewBase", (base,), {'__database__': None,
'__namespace__': None})
class Model(_with_metaclass(BaseModel)):
"""
A collection of fields to be stored in the database. Walrus
stores model instance data in hashes keyed by a combination of
model name and primary key value. Instance attributes are
automatically converted to values suitable for storage in Redis
(i.e., datetime becomes timestamp), and vice-versa.
Additionally, model fields can be ``indexed``, which allows
filtering. There are three types of indexes:
* Absolute
* Scalar
* Full-text search
Absolute indexes are used for values like strings or UUIDs and
support only equality and inequality checks.
Scalar indexes are for numeric values as well as datetimes,
and support equality, inequality, and greater or less-than.
The final type of index, FullText, can only be used with the
:py:class:`TextField`. FullText indexes allow search using
the ``match()`` method. For more info, see :ref:`fts`.
"""
#: **Required**: the :py:class:`Database` instance to use to
#: persist model data.
__database__ = None
#: **Optional**: namespace to use for model data.
__namespace__ = None
#: **Required**: character to use as a delimiter for indexes, default "."
index_separator = '.'
def __init__(self, *args, **kwargs):
self._data = {}
self._load_default_dict()
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self.get_id())
def _load_default_dict(self):
for field_name, default in self._defaults.items():
if callable(default):
default = default()
setattr(self, field_name, default)
def incr(self, field, incr_by=1):
"""
Increment the value stored in the given field by the specified
amount. Any indexes will be updated at the time ``incr()`` is
called.
:param Field field: A field instance.
:param incr_by: An ``int`` or ``float``.
Example:
.. code-block:: python
# Retrieve a page counter object for the given URL.
page_count = PageCounter.get(PageCounter.url == url)
# Update the hit count, persisting to the database and
# updating secondary indexes in one go.
page_count.incr(PageCounter.hits)
"""
model_hash = self.to_hash()
# Remove the value from the index.
for index in field.get_indexes():
index.remove(self)
if isinstance(incr_by, int):
new_val = model_hash.incr(field.name, incr_by)
else:
new_val = model_hash.incr_float(field.name, incr_by)
setattr(self, field.name, new_val)
# Re-index the new value.
for index in field.get_indexes():
index.save(self)
return new_val
def get_id(self):
"""
Return the primary key for the model instance. If the
model is unsaved, then this value will be ``None``.
"""
try:
return getattr(self, self._primary_key)
except KeyError:
return None
def get_hash_id(self):
return self._query.get_primary_hash_key(self.get_id())
def _get_data_dict(self):
data = {}
for name, field in self._fields.items():
if name in self._data:
data[name] = field.db_value(self._data[name])
return data
def to_hash(self):
"""
Return a :py:class:`Hash` instance corresponding to the
raw model data.
"""
return self.__database__.Hash(self.get_hash_id())
@classmethod
def create(cls, **kwargs):
"""
Create a new model instance and save it to the database.
Values are passed in as keyword arguments.
Example::
user = User.create(first_name='Charlie', last_name='Leifer')
"""
instance = cls(**kwargs)
instance.save(_is_create=True)
return instance
@classmethod
def all(cls):
"""
Return an iterator that successively yields saved model
instances. Models are saved in an unordered :py:class:`Set`,
so the iterator will return them in arbitrary order.
Example::
for note in Note.all():
print note.content
To return models in sorted order, see :py:meth:`Model.query`.
Example returning all records, sorted newest to oldest::
for note in Note.query(order_by=Note.timestamp.desc()):
print note.timestamp, note.content
"""
for result in cls._query.all_index():
yield cls.load(result, convert_key=False)
@classmethod
def query(cls, expression=None, order_by=None):
"""
Return model instances matching the given expression (if
specified). Additionally, matching instances can be returned
sorted by field value.
Example::
# Get administrators sorted by username.
admin_users = User.query(
(User.admin == True),
order_by=User.username)
# List blog entries newest to oldest.
entries = Entry.query(order_by=Entry.timestamp.desc())
# Perform a complex filter.
values = StatData.query(
(StatData.timestamp < datetime.date.today()) &
((StatData.type == 'pv') | (StatData.type == 'cv')))
:param expression: A boolean expression to filter by.
:param order_by: A field whose value should be used to
sort returned instances.
"""
if expression is not None:
executor = Executor(cls.__database__)
result = executor.execute(expression)
else:
result = cls._query.all_index()
if order_by is not None:
desc = False
if isinstance(order_by, Desc):
desc = True
order_by = order_by.node
alpha = not isinstance(order_by, _ScalarField)
result = cls.__database__.sort(
result.key,
by='*->%s' % order_by.name,
alpha=alpha,
desc=desc)
elif isinstance(result, ZSet):
result = result.iterator(reverse=True)
for hash_id in result:
yield cls.load(hash_id, convert_key=False)
@classmethod
def query_delete(cls, expression=None):
"""
Delete model instances matching the given expression (if
specified). If no expression is provided, then all model instances
will be deleted.
:param expression: A boolean expression to filter by.
"""
if expression is not None:
executor = Executor(cls.__database__)
result = executor.execute(expression)
else:
result = cls._query.all_index()
for hash_id in result:
cls.load(hash_id, convert_key=False).delete()
@classmethod
def get(cls, expression):
"""
Retrieve the model instance matching the given expression.
If the number of matching results is not equal to one, then
a ``ValueError`` will be raised.
:param expression: A boolean expression to filter by.
:returns: The matching :py:class:`Model` instance.
:raises: ``ValueError`` if result set size is not 1.
"""
executor = Executor(cls.__database__)
result = executor.execute(expression)
if len(result) != 1:
raise ValueError('Got %s results, expected 1.' % len(result))
return cls.load(result._first_or_any(), convert_key=False)
@classmethod
def load(cls, primary_key, convert_key=True):
"""
Retrieve a model instance by primary key.
:param primary_key: The primary key of the model instance.
:returns: Corresponding :py:class:`Model` instance.
:raises: ``KeyError`` if object with given primary key does
not exist.
"""
if convert_key:
primary_key = cls._query.get_primary_hash_key(primary_key)
if not cls.__database__.hash_exists(primary_key):
raise KeyError('Object not found.')
raw_data = cls.__database__.hgetall(primary_key)
if PY3:
raw_data = decode_dict_keys(raw_data)
data = {}
for name, field in cls._fields.items():
if isinstance(field, _ContainerField):
continue
elif name in raw_data:
data[name] = field.python_value(raw_data[name])
else:
data[name] = None
return cls(**data)
@classmethod
def count(cls):
"""
Return the number of objects in the given collection.
"""
return len(cls._query.all_index())
def delete(self, for_update=False):
"""
Delete the given model instance.
"""
hash_key = self.get_hash_id()
try:
original_instance = self.load(hash_key, convert_key=False)
except KeyError:
return
# Remove from the `all` index.
all_index = self._query.all_index()
all_index.remove(hash_key)
# Remove from the secondary indexes.
for field in self._indexes:
for index in field.get_indexes():
index.remove(original_instance)
if not for_update:
for field in self._fields.values():
if isinstance(field, _ContainerField):
field._delete(self)
# Remove the object itself.
self.__database__.delete(hash_key)
def save(self, _is_create=False):
"""
Save the given model instance. If the model does not have
a primary key value, Walrus will call the primary key field's
``generate_key()`` method to attempt to generate a suitable
value.
"""
pk_field = self._fields[self._primary_key]
if not self._data.get(self._primary_key):
setattr(self, self._primary_key, pk_field._generate_key())
require_delete = False
else:
require_delete = not _is_create
if require_delete:
self.delete(for_update=True)
data = self._get_data_dict()
hash_obj = self.to_hash()
hash_obj.clear()
hash_obj.update(data)
all_index = self._query.all_index()
all_index.add(self.get_hash_id())
for field in self._indexes:
for index in field.get_indexes():
index.save(self)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,623
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/counter.py
|
from walrus.tests.base import WalrusTestCase
from walrus.tests.base import db
class TestCounter(WalrusTestCase):
def test_counter(self):
counter_a = db.counter('counter-a')
counter_b = db.counter('counter-b')
self.assertEqual(counter_a.value(), 0)
self.assertEqual(counter_a.incr(), 1)
self.assertEqual(counter_a.incr(3), 4)
self.assertEqual(counter_a.value(), 4)
self.assertEqual(counter_b.value(), 0)
counter_b += 3
self.assertEqual(counter_b.value(), 3)
counter_b = counter_b + counter_a
self.assertEqual(counter_b.value(), 7)
counter_b = counter_b - 5
self.assertEqual(counter_b.value(), 2)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
23,624
|
coleifer/walrus
|
refs/heads/master
|
/walrus/tests/__init__.py
|
import sys
import unittest
from walrus.tests.autocomplete import *
from walrus.tests.cache import *
from walrus.tests.containers import *
from walrus.tests.counter import *
from walrus.tests.database import *
from walrus.tests.fts import *
from walrus.tests.graph import *
from walrus.tests.lock import *
from walrus.tests.models import *
from walrus.tests.rate_limit import *
from walrus.tests.streams import *
try:
from walrus.tusks.ledisdb import TestWalrusLedis
except ImportError:
pass
try:
from walrus.tusks.rlite import TestWalrusLite
except ImportError:
pass
try:
from walrus.tusks.vedisdb import TestWalrusVedis
except ImportError:
pass
if __name__ == '__main__':
unittest.main(argv=sys.argv)
|
{"/walrus/streams.py": ["/walrus/containers.py", "/walrus/utils.py"], "/walrus/tests/models.py": ["/walrus/__init__.py", "/walrus/query.py", "/walrus/tests/base.py"], "/walrus/tusks/vedisdb.py": ["/walrus/__init__.py"], "/examples/twitter/app.py": ["/walrus/__init__.py"], "/walrus/containers.py": ["/walrus/utils.py"], "/runtests.py": ["/walrus/__init__.py"], "/walrus/autocomplete.py": ["/walrus/utils.py"], "/walrus/tests/streams.py": ["/walrus/streams.py", "/walrus/tests/base.py"], "/walrus/fts.py": ["/walrus/query.py", "/walrus/utils.py", "/walrus/search/__init__.py"], "/walrus/tests/graph.py": ["/walrus/tests/base.py"], "/walrus/tests/autocomplete.py": ["/walrus/tests/base.py"], "/walrus/tusks/ledisdb.py": ["/walrus/__init__.py", "/walrus/containers.py", "/walrus/tusks/helpers.py"], "/walrus/cache.py": ["/walrus/utils.py"], "/walrus/query.py": ["/walrus/containers.py"], "/walrus/tusks/helpers.py": ["/walrus/__init__.py"], "/walrus/database.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/rate_limit.py", "/walrus/streams.py"], "/walrus/graph.py": ["/walrus/utils.py"], "/walrus/search/__init__.py": ["/walrus/search/metaphone.py", "/walrus/utils.py"], "/examples/work_queue.py": ["/walrus/__init__.py"], "/walrus/tests/cache.py": ["/walrus/tests/base.py"], "/walrus/tests/base.py": ["/walrus/__init__.py"], "/walrus/tusks/rlite.py": ["/walrus/__init__.py", "/walrus/tusks/helpers.py"], "/walrus/tests/lock.py": ["/walrus/tests/base.py"], "/walrus/tests/containers.py": ["/walrus/containers.py", "/walrus/tests/base.py", "/walrus/utils.py"], "/walrus/models.py": ["/walrus/containers.py", "/walrus/query.py", "/walrus/search/__init__.py", "/walrus/utils.py"], "/walrus/tests/counter.py": ["/walrus/tests/base.py"], "/walrus/tests/__init__.py": ["/walrus/tests/autocomplete.py", "/walrus/tests/cache.py", "/walrus/tests/containers.py", "/walrus/tests/counter.py", "/walrus/tests/database.py", "/walrus/tests/fts.py", "/walrus/tests/graph.py", "/walrus/tests/lock.py", "/walrus/tests/models.py", "/walrus/tests/rate_limit.py", "/walrus/tests/streams.py", "/walrus/tusks/ledisdb.py", "/walrus/tusks/rlite.py", "/walrus/tusks/vedisdb.py"], "/walrus/tests/database.py": ["/walrus/containers.py", "/walrus/tests/base.py"], "/walrus/tests/rate_limit.py": ["/walrus/rate_limit.py", "/walrus/tests/base.py"], "/walrus/tests/fts.py": ["/walrus/tests/base.py"], "/examples/diary.py": ["/walrus/__init__.py"], "/walrus/__init__.py": ["/walrus/autocomplete.py", "/walrus/cache.py", "/walrus/containers.py", "/walrus/counter.py", "/walrus/database.py", "/walrus/fts.py", "/walrus/graph.py", "/walrus/lock.py", "/walrus/models.py", "/walrus/rate_limit.py", "/walrus/streams.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.