prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "Here is some code:\n```python\nprov_cap = {\n \"Hebei\": \"Shijiazhuang\",\n \"Shanxi\": \"Taiyuan\",\n \"Liaoning\": \"Shenyang\",\n \"Jilin\": \"Changchun\",\n \"Heilongjiang\": \"Harbin\",\n \"Jiangsu\": \"Nanjing\",\n \"Zhejiang\": \"Hangzhou\",\n \"Anhui\": \"Hefei\",\... | [
{
"content": "Here is some code:\n<|memory_start|>```python\nprov_cap = {\n \"Hebei\": \"Shijiazhuang\",\n \"Shanxi\": \"Taiyuan\",\n \"Liaoning\": \"Shenyang\",\n \"Jilin\": \"Changchun\",\n \"Heilongjiang\": \"Harbin\",\n \"Jiangsu\": \"Nanjing\",\n \"Zhejiang\": \"Hangzhou\",\n \"Anhu... | ```python
prov_cap = {
"Hebei": "Shijiazhuang",
"Shanxi": "Taiyuan",
"Liaoning": "Shenyang",
"Jilin": "Changchun",
"Heilongjiang": "Harbin",
"Jiangsu": "Nanjing",
"Zhejiang": "Hangzhou",
"Anhui": "Hefei",
"Fujian": "Fuzhou",
"Jiangxi": "Nanchang",
"Shandong": "Jinan",
"Henan": "Zhengzhou",
"Hubei": "Wuhan",
"Hunan": "Changsha",
"Guangdong": "Guangzhou",
"Hainan": "Haikou",
"Sichuan": "Chengdu",
"Guizhou": "Guiyang",
"Yunnan": "Kunming",
"Shaanxi": "Xi'an",
"Gansu": "Lanzhou",
"Qinghai": "Xining",
"Taiwan": "Taipei"
}
autregion = {
"Inner Mongolia": "NM",
"Guangxi Zhuang": "GX",
"Tibet": "XZ",
"Ningxia Hui": "NX",
"Xinjiang Uyghur": "XJ"
}
autregion_capitals = {
"Inner Mongolia": "Hohhot",
"Guangxi Zhuang": "Nanning",
"Tibet": "Lhasa",
"Ningxia Hui": "Yinchuan",
"Xinjiang Uyghur": "Urumqi"
}
admregion = {
"Hong Kong": "HK",
"Macau": "MC"
}
admregion_capitals = {
"Hong Kong": "Hong Kong",
"Macau": "Macau"
}
municipality = {
"Beijing": "BJ",
"Tianjin": "TJ",
"Shanghai": "SH",
"Chongqing": "CQ"
}
mun_capitals = {
"Beijing": "Beijing",
"Tianjin": "Tianjin",
"Shanghai": "Shanghai",
"Chongqing": "Chongqing"
}
``` |
[
{
"content": "```python\nimport py\nfrom util import assert_outcomes\n\npytest_plugins = 'pytester'\n\n\ndef test_shared_behavior(testdir):\n a_dir = testdir.mkpydir('a_dir')\n a_dir.join('test_something.py').write(py.code.Source(\"\"\"\n from pytest import fixture\n from pytest_describe imp... | [
{
"content": "<|memory_start|>```python\nimport py\nfrom util import assert_outcomes\n\npytest_plugins = 'pytester'\n\n\ndef test_shared_behavior(testdir):\n a_dir = testdir.mkpydir('a_dir')\n a_dir.join('test_something.py').write(py.code.Source(\"\"\"\n from pytest import fixture\n from pyt... | ```python
import py
from util import assert_outcomes
pytest_plugins = 'pytester'
def test_shared_behavior(testdir):
a_dir = testdir.mkpydir('a_dir')
a_dir.join('test_something.py').write(py.code.Source("""
from pytest import fixture
from pytest_describe import behaves_like
def a_duck():
def it_quacks(sound):
assert sound == "quack"
@behaves_like(a_duck)
def describe_something_that_quacks():
@fixture
def sound():
return "quack"
@behaves_like(a_duck)
def describe_something_that_barks():
@fixture
def sound():
return "bark"
"""))
result = testdir.runpytest()
assert_outcomes(result, failed=1, passed=1)
def test_multiple_shared_behaviors(testdir):
a_dir = testdir.mkpydir('a_dir')
a_dir.join('test_something.py').write(py.code.Source("""
from pytest import fixture
from pytest_describe import behaves_like
def a_duck():
def it_quacks(sound):
assert sound == "quack"
def a_bird():
def it_flies(medium):
assert medium == "air"
def describe_birds():
@fixture
def medium():
return "air"
@behaves_like(a_duck, a_bird)
def describe_something_that_quacks():
@fixture
def sound():
return "quack"
@behaves_like(a_duck, a_bird)
def describe_something_that_barks():
@fixture
def sound():
return "bark"
"""))
result = testdir.runpytest()
assert_outcomes(result, failed=1, passed=3)
def test_fixture(testdir):
a_dir = testdir.mkpydir('a_dir')
a_dir.join('test_something.py').write(py.code.Source("""
from pytest import fixture
from pytest_describe import behaves_like
def a_duck():
@fixture
def sound():
return "quack"
def it_quacks(sound):
assert sound == "quack"
@behaves_like(a_duck)
def describe_a_normal_duck():
pass
"""))
result = testdir.runpytest('-v')
assert_outcomes(result, passed=1)
def test_override_fixture(testdir):
a_dir = testdir.mkpydir('a_dir')
a_dir.join('test_something.py').write(py.code.Source("""
from pytest import fixture
from pytest_describe import behaves_like
def a_duck():
@fixture
def sound():
return "quack"
def it_quacks(sound):
assert sound == "quack"
@behaves_like(a_duck)
def describe_something_that_barks():
@fixture
def sound():
return "bark"
"""))
result = testdir.runpytest('-v')
assert_outcomes(result, failed=1)
def test_name_mangling(testdir):
a_dir = testdir.mkpydir('a_dir')
a_dir.join('test_something.py').write(py.code.Source("""
from pytest import fixture
from pytest_describe import behaves_like
def thing():
foo = 42
def it_does_something():
assert foo == 42
@behaves_like(thing)
def describe_something():
foo = 4242
def it_does_something():
assert foo == 4242
"""))
result = testdir.runpytest('-v')
assert_outcomes(result, passed=2)
def test_nested_name_mangling(testdir):
a_dir = testdir.mkpydir('a_dir')
a_dir.join('test_something.py').write(py.code.Source("""
from pytest import fixture
from pytest_describe import behaves_like
def thing():
def it_does_something():
pass
def describe_thing():
def it_does_something():
pass
def describe_thing():
def it_does_something():
pass
@behaves_like(thing)
def describe_thing():
def it_does_something():
pass
def describe_thing():
def it_does_something():
pass
"""))
result = testdir.runpytest('-v')
assert_outcomes(result, passed=5)
def test_evaluated_once(testdir):
a_dir = testdir.mkpydir('a_dir')
a_dir.join('test_something.py').write(py.code.Source("""
from pytest import fixture
from pytest_describe import behaves_like
count = 0
def thing():
global count
count += 1
def is_evaluated_once():
assert count == 1
@behaves_like(thing)
def describe_something():
pass
@behaves_like(thing)
def describe_something_else():
pass
"""))
result = testdir.runpytest('-v')
assert_outcomes(result, passed=2)
``` |
[
{
"content": "```python\nfrom django.db import migrations\nimport utilities.fields\nimport utilities.ordering\n\n\ndef _update_model_names(model):\n # Update each unique field value in bulk\n for name in model.objects.values_list('name', flat=True).order_by('name').distinct():\n model.objects.filte... | [
{
"content": "<|memory_start|>```python\nfrom django.db import migrations\nimport utilities.fields\nimport utilities.ordering\n\n\ndef _update_model_names(model):\n # Update each unique field value in bulk\n for name in model.objects.values_list('name', flat=True).order_by('name').distinct():\n mod... | ```python
from django.db import migrations
import utilities.fields
import utilities.ordering
def _update_model_names(model):
# Update each unique field value in bulk
for name in model.objects.values_list('name', flat=True).order_by('name').distinct():
model.objects.filter(name=name).update(_name=utilities.ordering.naturalize(name, max_length=100))
def naturalize_consoleports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'ConsolePort'))
def naturalize_consoleserverports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'ConsoleServerPort'))
def naturalize_powerports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'PowerPort'))
def naturalize_poweroutlets(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'PowerOutlet'))
def naturalize_frontports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'FrontPort'))
def naturalize_rearports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'RearPort'))
def naturalize_devicebays(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'DeviceBay'))
class Migration(migrations.Migration):
dependencies = [
('dcim', '0092_fix_rack_outer_unit'),
]
operations = [
migrations.AlterModelOptions(
name='consoleport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='consoleserverport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='devicebay',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='frontport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='inventoryitem',
options={'ordering': ('device__id', 'parent__id', '_name')},
),
migrations.AlterModelOptions(
name='poweroutlet',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='powerport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='rearport',
options={'ordering': ('device', '_name')},
),
migrations.AddField(
model_name='consoleport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='consoleserverport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='devicebay',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='frontport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='inventoryitem',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='poweroutlet',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='powerport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='rearport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.RunPython(
code=naturalize_consoleports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_consoleserverports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_powerports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_poweroutlets,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_frontports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_rearports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_devicebays,
reverse_code=migrations.RunPython.noop
),
]
``` |
[
{
"content": "Here is the code block:\n```python\nfrom ipv8.keyvault.crypto import default_eccrypto\n\nfrom pony.orm import db_session\n\nfrom tribler_core.modules.metadata_store.orm_bindings.channel_node import COMMITTED, TODELETE, UPDATED\nfrom tribler_core.modules.metadata_store.restapi.metadata_endpoint imp... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\nfrom ipv8.keyvault.crypto import default_eccrypto\n\nfrom pony.orm import db_session\n\nfrom tribler_core.modules.metadata_store.orm_bindings.channel_node import COMMITTED, TODELETE, UPDATED\nfrom tribler_core.modules.metadata_store.restapi.metad... | ```python
from ipv8.keyvault.crypto import default_eccrypto
from pony.orm import db_session
from tribler_core.modules.metadata_store.orm_bindings.channel_node import COMMITTED, TODELETE, UPDATED
from tribler_core.modules.metadata_store.restapi.metadata_endpoint import TORRENT_CHECK_TIMEOUT
from tribler_core.modules.torrent_checker.torrent_checker import TorrentChecker
from tribler_core.restapi.base_api_test import AbstractApiTest
from tribler_core.tests.tools.base_test import MockObject
from tribler_core.tests.tools.tools import timeout
from tribler_core.tests.tools.tracker.udp_tracker import UDPTracker
from tribler_core.utilities.random_utils import random_infohash
from tribler_core.utilities.unicode import hexlify
from tribler_core.utilities.utilities import has_bep33_support, succeed
class BaseTestMetadataEndpoint(AbstractApiTest):
async def setUp(self):
await super(BaseTestMetadataEndpoint, self).setUp()
self.infohashes = []
torrents_per_channel = 5
# Add a few channels
with db_session:
for ind in range(10):
self.ext_key = default_eccrypto.generate_key('curve25519')
channel = self.session.mds.ChannelMetadata(
title='channel%d' % ind,
subscribed=(ind % 2 == 0),
num_entries=torrents_per_channel,
infohash=random_infohash(),
id_=123,
sign_with=self.ext_key,
)
for torrent_ind in range(torrents_per_channel):
rand_infohash = random_infohash()
self.infohashes.append(rand_infohash)
self.session.mds.TorrentMetadata(
origin_id=channel.id_,
title='torrent%d' % torrent_ind,
infohash=rand_infohash,
sign_with=self.ext_key,
)
def setUpPreSession(self):
super(BaseTestMetadataEndpoint, self).setUpPreSession()
self.config.set_chant_enabled(True)
class TestMetadataEndpoint(AbstractApiTest):
def setUpPreSession(self):
super(AbstractApiTest, self).setUpPreSession()
self.config.set_chant_enabled(True)
@timeout(10)
async def test_update_multiple_metadata_entries(self):
"""
Test updating attributes of several metadata entities at once with a PATCH request to REST API
"""
# Test handling the wrong/empty JSON gracefully
await self.do_request('metadata', expected_code=400, request_type='PATCH', post_data='')
# Test trying update a non-existing entry
await self.do_request(
'metadata',
post_data=[{'public_key': hexlify(b'1' * 64), 'id': 111}],
expected_code=404,
request_type='PATCH',
)
with db_session:
md1 = self.session.mds.TorrentMetadata(title='old1', infohash=random_infohash())
md2 = self.session.mds.ChannelMetadata(title='old2', infohash=random_infohash(), subscribed=False)
NEW_NAME1 = "updated1"
NEW_NAME2 = "updated2"
patch_data = [
{'public_key': hexlify(md1.public_key), 'id': md1.id_, 'title': NEW_NAME1},
{'public_key': hexlify(md2.public_key), 'id': md2.id_, 'title': NEW_NAME2, 'subscribed': 1},
]
await self.do_request('metadata', post_data=patch_data, expected_code=200, request_type='PATCH')
with db_session:
entry1 = self.session.mds.ChannelNode.get(rowid=md1.rowid)
self.assertEqual(NEW_NAME1, entry1.title)
self.assertEqual(UPDATED, entry1.status)
entry2 = self.session.mds.ChannelNode.get(rowid=md2.rowid)
self.assertEqual(NEW_NAME2, entry2.title)
self.assertEqual(UPDATED, entry2.status)
self.assertTrue(entry2.subscribed)
@timeout(10)
async def test_delete_multiple_metadata_entries(self):
"""
Test deleting multiple entries with JSON REST API
"""
with db_session:
md1 = self.session.mds.TorrentMetadata(title='old1', infohash=random_infohash())
md2 = self.session.mds.TorrentMetadata(title='old2', infohash=random_infohash())
patch_data = [
{'public_key': hexlify(md1.public_key), 'id': md1.id_},
{'public_key': hexlify(md2.public_key), 'id': md2.id_},
]
await self.do_request('metadata', post_data=patch_data, expected_code=200, request_type='DELETE')
with db_session:
self.assertFalse(self.session.mds.ChannelNode.select().count())
class TestSpecificMetadataEndpoint(AbstractApiTest):
def setUpPreSession(self):
super(AbstractApiTest, self).setUpPreSession()
self.config.set_chant_enabled(True)
async def test_update_entry_missing_json(self):
"""
Test whether an error is returned if we try to change entry with the REST API and missing JSON data
"""
channel_pk = hexlify(self.session.mds.ChannelNode._my_key.pub().key_to_bin()[10:])
await self.do_request('metadata/%s/123' % channel_pk, expected_code=400, request_type='PATCH', post_data='')
async def test_update_entry_not_found(self):
"""
Test whether an error is returned if we try to change some metadata entry that is not there
"""
patch_params = {'subscribed': '1'}
await self.do_request('metadata/aa/123', expected_code=404, request_type='PATCH', post_data=patch_params)
@timeout(10)
async def test_update_entry_status_and_name(self):
"""
Test whether an error is returned if try to modify both the status and name of a torrent
"""
with db_session:
chan = self.session.mds.ChannelMetadata.create_channel(title="bla")
patch_params = {'status': TODELETE, 'title': 'test'}
await self.do_request(
'metadata/%s/%i' % (hexlify(chan.public_key), chan.id_),
request_type='PATCH',
post_data=patch_params,
expected_code=400,
)
@timeout(10)
async def test_update_entry(self):
"""
Test updating a metadata entry with REST API
"""
new_title = 'bla2'
new_tags = "Compressed"
with db_session:
chan = self.session.mds.ChannelMetadata.create_channel(title="bla")
chan.status = COMMITTED
patch_params = {'title': new_title, 'tags': new_tags}
result = await self.do_request(
'metadata/%s/%i' % (hexlify(chan.public_key), chan.id_),
request_type='PATCH',
post_data=patch_params,
expected_code=200,
)
self.assertEqual(new_title, result['name'])
self.assertEqual(new_tags, result['category'])
with db_session:
chan = self.session.mds.ChannelMetadata.get_my_channels().first()
self.assertEqual(chan.status, UPDATED)
self.assertEqual(chan.tags, new_tags)
self.assertEqual(chan.title, new_title)
@timeout(10)
async def test_get_entry(self):
"""
Test getting an entry with REST API GET request
"""
with db_session:
chan = self.session.mds.TorrentMetadata(
title="bla", infohash=random_infohash(), tracker_info="http://sometracker.local/announce"
)
chan.status = COMMITTED
await self.do_request(
'metadata/%s/%i' % (hexlify(chan.public_key), chan.id_),
expected_json=chan.to_simple_dict(include_trackers=True),
)
@timeout(10)
async def test_get_entry_not_found(self):
"""
Test trying to get a non-existing entry with the REST API GET request
"""
await self.do_request('metadata/%s/%i' % (hexlify(b"0" * 64), 123), expected_code=404)
class TestTorrentHealthEndpoint(AbstractApiTest):
def setUpPreSession(self):
super(TestTorrentHealthEndpoint, self).setUpPreSession()
self.config.set_chant_enabled(True)
async def setUp(self):
await super(TestTorrentHealthEndpoint, self).setUp()
self.udp_port = self.get_port()
self.udp_tracker = UDPTracker(self.udp_port)
async def tearDown(self):
self.session.dlmgr = None
if self.udp_tracker:
await self.udp_tracker.stop()
await super(TestTorrentHealthEndpoint, self).tearDown()
@timeout(5)
async def test_check_torrent_health(self):
"""
Test the endpoint to fetch the health of a chant-managed, infohash-only torrent
"""
infohash = b'a' * 20
tracker_url = 'udp://localhost:%s/announce' % self.udp_port
self.udp_tracker.tracker_info.add_info_about_infohash(infohash, 12, 11, 1)
with db_session:
tracker_state = self.session.mds.TrackerState(url=tracker_url)
torrent_state = self.session.mds.TorrentState(trackers=tracker_state, infohash=infohash)
self.session.mds.TorrentMetadata(
infohash=infohash, title='ubuntu-torrent.iso', size=42, tracker_info=tracker_url, health=torrent_state
)
url = 'metadata/torrents/%s/health?timeout=%s&refresh=1' % (hexlify(infohash), TORRENT_CHECK_TIMEOUT)
# Initialize the torrent checker
self.session.torrent_checker = TorrentChecker(self.session)
await self.session.torrent_checker.initialize()
# Add mock DHT response - we both need to account for the case when BEP33 is used and the old lookup method
self.session.dlmgr = MockObject()
self.session.dlmgr.get_metainfo = lambda _, **__: succeed(None)
self.session.dlmgr.dht_health_manager = MockObject()
dht_health_dict = {"infohash": hexlify(infohash), "seeders": 1, "leechers": 2}
self.session.dlmgr.dht_health_manager.get_health = lambda *_, **__: succeed({"DHT": [dht_health_dict]})
self.session.dlmgr.get_channel_downloads = lambda: []
# Left for compatibility with other tests in this object
await self.udp_tracker.start()
json_response = await self.do_request(url)
self.assertIn("health", json_response)
self.assertIn("udp://localhost:%s" % self.udp_port, json_response['health'])
if has_bep33_support():
self.assertIn("DHT", json_response['health'])
json_response = await self.do_request(url + '&nowait=1')
self.assertDictEqual(json_response, {'checking': '1'})
@timeout(5)
async def test_check_torrent_query(self):
"""
Test that the endpoint responds with an error message if the timeout parameter has a wrong value
"""
infohash = b'a' * 20
await self.do_request("metadata/torrents/%s/health?timeout=wrong_value&refresh=1" % infohash,
expected_code=400)
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/bin/sh -\n\n# Copyright 2011-2012 James McCauley\n#\n# This file is part of POX.\n#\n# POX is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/bin/sh -\n\n# Copyright 2011-2012 James McCauley\n#\n# This file is part of POX.\n#\n# POX is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as publishe... | ```python
#!/bin/sh -
# Copyright 2011-2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# If you have PyPy 1.6+ in a directory called pypy alongside pox.py, we
# use it.
# Otherwise, we try to use a Python interpreter called python2.7, which
# is a good idea if you're using Python from MacPorts, for example.
# We fall back to just "python" and hope that works.
#TODO: Make runnable by itself (paths need adjusting, etc.).
''''true
export OPT="-u -O"
export FLG=""
if [ "$(basename $0)" = "debug-pox.py" ]; then
export OPT=""
export FLG="--debug"
fi
if [ -x pypy/bin/pypy ]; then
exec pypy/bin/pypy $OPT "$0" $FLG "$@"
fi
if type python2.7 > /dev/null; then
exec python2.7 $OPT "$0" $FLG "$@"
fi
exec python $OPT "$0" $FLG "$@"
'''
from __future__ import print_function
import logging
import logging.config
import os
import sys
import traceback
import time
from pox.core import core
import pox.openflow
import pox.openflow.of_01
from pox.lib.util import str_to_bool
# Function to run on main thread
_main_thread_function = None
try:
import __pypy__
except ImportError:
__pypy__ = None
def _do_import (name):
"""
Try to import the named component.
Returns its module name if it was loaded or False on failure.
"""
def show_fail ():
traceback.print_exc()
print("Could not import module:", name)
def do_import2 (base_name, names_to_try):
if len(names_to_try) == 0:
print("Module not found:", base_name)
return False
name = names_to_try.pop(0)
if name in sys.modules:
return name
try:
__import__(name, globals(), locals())
return name
except ImportError:
# There are two cases why this might happen:
# 1. The named module could not be found
# 2. Some dependent module (import foo) or some dependent
# name-in-a-module (e.g., from foo import bar) could not be found.
# If it's the former, we might try a name variation (e.g., without
# a leading "pox."), but if we ultimately can't find the named
# module, we just say something along those lines and stop.
# On the other hand, if the problem is with a dependency, we should
# print a stack trace so that it can be fixed.
# Sorting out the two cases is an ugly hack.
s = sys.exc_info()[1].message.rsplit(" ", 1)
# Sadly, PyPy isn't consistent with CPython here.
if s[0] == "No module named" and (name.endswith(s[1]) or __pypy__):
# It was the one we tried to import itself. (Case 1)
# If we have other names to try, try them!
return do_import2(base_name, names_to_try)
elif (sys.exc_info()[1].message
== "Import by filename is not supported."):
print(sys.exc_info()[1].message)
import os.path
n = name.replace("/", ".").replace("\\", ".")
n = n.replace( os.path.sep, ".")
if n.startswith("pox.") or n.startswith("ext."):
n = n[4:]
print("Maybe you meant to run '%s'?" % (n,))
return False
else:
# This means we found the module we were looking for, but one
# of its dependencies was missing.
show_fail()
return False
except:
# There was some other sort of exception while trying to load the
# module. Just print a trace and call it a day.
show_fail()
return False
return do_import2(name, ["pox." + name, name])
def _do_launch (argv):
component_order = []
components = {}
curargs = {}
pox_options = curargs
for arg in argv:
if not arg.startswith("-"):
if arg not in components:
components[arg] = []
curargs = {}
components[arg].append(curargs)
component_order.append(arg)
else:
arg = arg.lstrip("-").split("=", 1)
arg[0] = arg[0].replace("-", "_")
if len(arg) == 1: arg.append(True)
curargs[arg[0]] = arg[1]
_options.process_options(pox_options)
_pre_startup()
inst = {}
for name in component_order:
cname = name
inst[name] = inst.get(name, -1) + 1
params = components[name][inst[name]]
name = name.split(":", 1)
launch = name[1] if len(name) == 2 else "launch"
name = name[0]
r = _do_import(name)
if r is False: return False
name = r
#print(">>",name)
if launch in sys.modules[name].__dict__:
f = sys.modules[name].__dict__[launch]
if f.__class__ is not _do_launch.__class__:
print(launch, "in", name, "isn't a function!")
return False
multi = False
if f.func_code.co_argcount > 0:
if (f.func_code.co_varnames[f.func_code.co_argcount-1]
== '__INSTANCE__'):
# It's a multi-instance-aware component.
multi = True
# Special __INSTANCE__ paramter gets passed a tuple with:
# 1. The number of this instance (0...n-1)
# 2. The total number of instances for this module
# 3. True if this is the last instance, False otherwise
# The last is just a comparison between #1 and #2, but it's
# convenient.
params['__INSTANCE__'] = (inst[cname], len(components[cname]),
inst[cname] + 1 == len(components[cname]))
if multi == False and len(components[cname]) != 1:
print(name, "does not accept multiple instances")
return False
try:
f(**params)
except TypeError as exc:
instText = ''
if inst[cname] > 0:
instText = "instance {0} of ".format(inst[cname] + 1)
print("Error executing {2}{0}.{1}:".format(name,launch,instText))
import inspect
if inspect.currentframe() is sys.exc_info()[2].tb_frame:
# Error is with calling the function
# Try to give some useful feedback
if _options.verbose:
traceback.print_exc()
else:
exc = sys.exc_info()[0:2]
print(''.join(traceback.format_exception_only(*exc)), end='')
print()
EMPTY = "<Unspecified>"
code = f.__code__
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
defaults = list((f.func_defaults) or [])
defaults = [EMPTY] * (argcount - len(defaults)) + defaults
args = {}
for n, a in enumerate(argnames):
args[a] = [EMPTY,EMPTY]
if n < len(defaults):
args[a][0] = defaults[n]
if a in params:
args[a][1] = params[a]
del params[a]
if '__INSTANCE__' in args:
del args['__INSTANCE__']
if f.__doc__ is not None:
print("Documentation for {0}:".format(name))
doc = f.__doc__.split("\n")
#TODO: only strip the same leading space as was on the first
# line
doc = map(str.strip, doc)
print('',("\n ".join(doc)).strip())
#print(params)
#print(args)
print("Parameters for {0}:".format(name))
if len(args) == 0:
print(" None.")
else:
print(" {0:25} {1:25} {2:25}".format("Name", "Default",
"Active"))
print(" {0:25} {0:25} {0:25}".format("-" * 15))
for k,v in args.iteritems():
print(" {0:25} {1:25} {2:25}".format(k,str(v[0]),
str(v[1] if v[1] is not EMPTY else v[0])))
if len(params):
print("This component does not have a parameter named "
+ "'{0}'.".format(params.keys()[0]))
return False
missing = [k for k,x in args.iteritems()
if x[1] is EMPTY and x[0] is EMPTY]
if len(missing):
print("You must specify a value for the '{0}'"
"parameter.".format(missing[0]))
return False
return False
else:
# Error is inside the function
raise
elif len(params) > 0 or launch is not "launch":
print("Module %s has no %s(), but it was specified or passed " \
"arguments" % (name, launch))
return False
return True
class Options (object):
def set (self, given_name, value):
name = given_name.replace("-", "_")
if name.startswith("_") or hasattr(Options, name):
# Hey, what's that about?
print("Illegal option:", given_name)
return False
has_field = hasattr(self, name)
has_setter = hasattr(self, "_set_" + name)
if has_field == False and has_setter == False:
print("Unknown option:", given_name)
return False
if has_setter:
setter = getattr(self, "_set_" + name)
setter(given_name, name, value)
else:
if isinstance(getattr(self, name), bool):
# Automatic bool-ization
value = str_to_bool(value)
setattr(self, name, value)
return True
def process_options (self, options):
for k,v in options.iteritems():
if self.set(k, v) is False:
# Bad option!
sys.exit(1)
_help_text = """
POX is a Software Defined Networking controller framework.
The commandline of POX is like:
pox.py [POX options] [C1 [C1 options]] [C2 [C2 options]] ...
Notable POX options include:
--verbose Print more debugging information (especially useful for
problems on startup)
--no-openflow Don't automatically load the OpenFlow module
--log-config=F Load a Python log configuration file (if you include the
option without specifying F, it defaults to logging.cfg)
C1, C2, etc. are component names (e.g., Python modules). Options they
support are up to the module. As an example, you can load a learning
switch app that listens on a non-standard port number by specifying an
option to the of_01 component, and loading the l2_learning component like:
./pox.py --verbose openflow.of_01 --port=6634 forwarding.l2_learning
""".strip()
class POXOptions (Options):
def __init__ (self):
# self.cli = True
self.verbose = False
self.enable_openflow = True
self.log_config = None
def _set_h (self, given_name, name, value):
self._set_help(given_name, name, value)
def _set_help (self, given_name, name, value):
print(_help_text)
#TODO: Summarize options, etc.
sys.exit(0)
def _set_version (self, given_name, name, value):
print(core._get_python_version())
sys.exit(0)
def _set_no_openflow (self, given_name, name, value):
self.enable_openflow = not str_to_bool(value)
# def _set_no_cli (self, given_name, name, value):
# self.cli = not str_to_bool(value)
def _set_log_config (self, given_name, name, value):
if value is True:
# I think I use a better method for finding the path elsewhere...
p = os.path.dirname(os.path.realpath(__file__))
value = os.path.join(p, "logging.cfg")
self.log_config = value
def _set_debug (self, given_name, name, value):
value = str_to_bool(value)
if value:
# Debug implies no openflow and no CLI and verbose
#TODO: Is this really an option we need/want?
self.verbose = True
self.enable_openflow = False
# self.cli = False
_options = POXOptions()
def _pre_startup ():
"""
This function is called after all the POX options have been read in
but before any components are loaded. This gives a chance to do
early setup (e.g., configure logging before a component has a chance
to try to log something!).
"""
_setup_logging()
if _options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if _options.enable_openflow:
pox.openflow.launch() # Default OpenFlow launch
def _post_startup ():
if _options.enable_openflow:
pox.openflow.of_01.launch() # Usually, we launch of_01
def _setup_logging ():
# First do some basic log config...
# This is kind of a hack, but we need to keep track of the handler we
# install so that we can, for example, uninstall it later. This code
# originally lived in pox.core, so we explicitly reference it here.
pox.core._default_log_handler = logging.StreamHandler()
formatter = logging.Formatter(logging.BASIC_FORMAT)
pox.core._default_log_handler.setFormatter(formatter)
logging.getLogger().addHandler(pox.core._default_log_handler)
logging.getLogger().setLevel(logging.INFO)
# Now set up from config file if specified...
#TODO:
# I think we could move most of the special log stuff into
# the log module. You'd just have to make a point to put the log
# module first on the commandline if you wanted later component
# initializations to honor it. Or it could be special-cased?
if _options.log_config is not None:
if not os.path.exists(_options.log_config):
print("Could not find logging config file:", _options.log_config)
sys.exit(2)
logging.config.fileConfig(_options.log_config,
disable_existing_loggers=True)
def set_main_function (f):
print("INSIDE MAIN THREAD")
print(str(_main_thread_function))
global _main_thread_function
if _main_thread_function == f: return True
if _main_thread_function is not None:
import logging
lg = logging.getLogger("boot")
lg.error("Could not set main thread function to: " + str(f))
lg.error("The main thread function is already "
+ "taken by: " + str(_main_thread_function))
return False
_main_thread_function = f
return True
def boot ():
"""
Start up POX.
"""
# Add pox directory to path
sys.path.append(os.path.abspath(os.path.join(sys.path[0], 'pox')))
sys.path.append(os.path.abspath(os.path.join(sys.path[0], 'ext')))
try:
argv = sys.argv[1:]
# Always load cli (first!)
#TODO: Can we just get rid of the normal options yet?
pre = []
while len(argv):
if argv[0].startswith("-"):
pre.append(argv.pop(0))
else:
break
argv = pre + "py --disable".split() + argv
if _do_launch(argv):
_post_startup()
core.goUp()
print("Inside LAunch")
else:
return
except SystemExit:
return
except:
traceback.print_exc()
return
if _main_thread_function:
print("Inside main_thread_function")
_main_thread_function()
else:
#core.acquire()
try:
while core.running:
time.sleep(10)
except:
pass
#core.scheduler._thread.join() # Sleazy
try:
pox.core.core.quit()
except:
pass
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.template.response import TemplateResponse\nf... | ```python
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect, get_object_or_404
from django.template.response import TemplateResponse
from django.utils.text import get_text_list
from django.utils.translation import ugettext_lazy as _
from oioioi.base import admin
from oioioi.base.permissions import is_superuser
from oioioi.contests.admin import ContestAdmin, contest_site
from oioioi.contests.models import Contest, ContestPermission
from oioioi.contests.utils import is_contest_admin
from oioioi.questions.forms import ChangeContestMessageForm
from oioioi.questions.models import Message, MessageNotifierConfig, \
ReplyTemplate
class MessageAdmin(admin.ModelAdmin):
list_display = ['id', 'date', 'topic', 'author']
fields = ['date', 'author', 'contest', 'round', 'problem_instance',
'kind', 'topic', 'content']
readonly_fields = ['date', 'author', 'contest', 'round',
'problem_instance']
def has_add_permission(self, request):
return is_contest_admin(request)
def has_change_permission(self, request, obj=None):
if obj and not obj.contest:
return False
return self.has_add_permission(request)
def has_delete_permission(self, request, obj=None):
return self.has_change_permission(request, obj)
def get_queryset(self, request):
queryset = super(MessageAdmin, self).get_queryset(request)
queryset = queryset.filter(contest=request.contest)
return queryset
def add_view(self, request, form_url='', extra_context=None):
return redirect('add_contest_message', contest_id=request.contest.id)
def get_list_select_related(self):
return super(MessageAdmin, self).get_list_select_related() \
+ ['author', 'problem_instance', 'contest']
def change_view(self, request, object_id, form_url='', extra_context=None):
message = get_object_or_404(Message, id=object_id)
if not self.has_change_permission(request, message):
raise PermissionDenied
if request.method == 'POST':
form = ChangeContestMessageForm(message.kind, request,
request.POST, instance=message)
if form.is_valid():
if form.changed_data:
change_message = _("Changed %s.") % \
get_text_list(form.changed_data, _("and"))
else:
change_message = _("No fields changed.")
form.save()
super(MessageAdmin, self).log_change(request, message,
change_message)
return redirect('contest_messages',
contest_id=request.contest.id)
else:
form = ChangeContestMessageForm(message.kind, request,
instance=message)
return TemplateResponse(request, 'admin/questions/change_message.html',
{'form': form, 'message': message})
def response_delete(self, request):
super(MessageAdmin, self).response_delete(request)
return redirect('contest_messages', contest_id=request.contest.id)
contest_site.contest_register(Message, MessageAdmin)
class MessageNotifierConfigInline(admin.TabularInline):
model = MessageNotifierConfig
can_delete = True
extra = 0
def has_add_permission(self, request):
return True
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
return True
def formfield_for_foreignkey(self, db_field, request, **kwargs):
contest_admin_perm = ContestPermission.objects \
.filter(contest=request.contest) \
.filter(permission='contests.contest_admin') \
.select_related('user')
admin_ids = [p.user.id for p in contest_admin_perm]
if request.user.is_superuser:
admin_ids += [u.id for u in User.objects.filter(is_superuser=True)]
elif is_contest_admin(request):
added = MessageNotifierConfig.objects \
.filter(contest=request.contest)
admin_ids += [request.user.id] + [conf.user.id for conf in added]
else:
admin_ids = []
if db_field.name == 'user':
kwargs['queryset'] = User.objects.filter(id__in=admin_ids) \
.order_by('username')
return super(MessageNotifierConfigInline, self) \
.formfield_for_foreignkey(db_field, request, **kwargs)
class MessageNotifierContestAdminMixin(object):
def __init__(self, *args, **kwargs):
super(MessageNotifierContestAdminMixin, self).__init__(*args, **kwargs)
self.inlines = self.inlines + [MessageNotifierConfigInline]
ContestAdmin.mix_in(MessageNotifierContestAdminMixin)
class ReplyTemplateAdmin(admin.ModelAdmin):
def get_list_display(self, request):
if is_superuser(request):
return ['visible_name', 'content', 'contest', 'usage_count']
return ['visible_name', 'content', 'usage_count']
def get_list_filter(self, request):
if is_superuser(request):
return ['contest']
return []
def get_readonly_fields(self, request, obj=None):
fields = []
if obj is None:
fields.append('usage_count')
return fields
def get_form(self, request, obj=None, **kwargs):
form = super(ReplyTemplateAdmin, self).get_form(request, obj, **kwargs)
if 'contest' in form.base_fields:
if not is_superuser(request):
qs = Contest.objects.filter(pk=request.contest.pk)
form.base_fields['contest']._set_queryset(qs)
form.base_fields['contest'].required = True
form.base_fields['contest'].empty_label = None
form.base_fields['contest'].initial = request.contest
return form
def has_add_permission(self, request):
# Correct object contest ensured by form.
return is_contest_admin(request)
def has_change_permission(self, request, obj=None):
if obj:
return is_superuser(request) or \
(is_contest_admin(request) and
obj.contest == request.contest)
return self.has_add_permission(request)
def has_delete_permission(self, request, obj=None):
return self.has_change_permission(request, obj)
def get_queryset(self, request):
queryset = super(ReplyTemplateAdmin, self).get_queryset(request)
if not is_superuser(request):
queryset = queryset.filter(contest=request.contest)
return queryset
contest_site.contest_register(ReplyTemplate, ReplyTemplateAdmin)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright 2013 Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright 2013 Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# ... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
from . import describe
from v1 import assetstore, file, collection, folder, group, item, resource,\
system, token, user, notification
class ApiDocs():
exposed = True
def GET(self):
# Since we only have v1 right now, just redirect to the v1 page.
# If we get more versions, this should show an index of them.
raise cherrypy.HTTPRedirect(cherrypy.url() + '/v1')
def addApiToNode(node):
node.api = ApiDocs()
_addV1ToNode(node.api)
return node
def _addV1ToNode(node):
node.v1 = describe.ApiDocs()
node.v1.describe = describe.Describe()
node.v1.assetstore = assetstore.Assetstore()
node.v1.collection = collection.Collection()
node.v1.file = file.File()
node.v1.folder = folder.Folder()
node.v1.group = group.Group()
node.v1.item = item.Item()
node.v1.notification = notification.Notification()
node.v1.resource = resource.Resource()
node.v1.system = system.System()
node.v1.token = token.Token()
node.v1.user = user.User()
return node
``` |
[
{
"content": "```python\n#!/usr/bin/env python\r\n#\r\n# A library that provides a Python interface to the Telegram Bot API\r\n# Copyright (C) 2015-2017\r\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the te... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\r\n#\r\n# A library that provides a Python interface to the Telegram Bot API\r\n# Copyright (C) 2015-2017\r\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n#... | ```python
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram LabeledPrice."""
from telegram import TelegramObject
class LabeledPrice(TelegramObject):
"""This object represents a portion of the price for goods or services.
Attributes:
label (:obj:`str`): Portion label.
amount (:obj:`int`): Price of the product in the smallest units of the currency.
Args:
label (:obj:`str`): Portion label
amount (:obj:`int`): Price of the product in the smallest units of the currency (integer,
not float/double). For example, for a price of US$ 1.45 pass amount = 145. See the exp
parameter in currencies.json, it shows the number of digits past the decimal point for
each currency (2 for the majority of currencies).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self, label, amount, **kwargs):
self.label = label
self.amount = amount
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# Run these tests with 'nosetests':\n# install the 'python-nose' package (Fedora/CentOS or Ubuntu)\n# run 'nosetests' in the root of the repository\n\nimport unittest\nimport platform\nimport planex.spec\n\n\ndef get_rpm_machine(... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# Run these tests with 'nosetests':\n# install the 'python-nose' package (Fedora/CentOS or Ubuntu)\n# run 'nosetests' in the root of the repository\n\nimport unittest\nimport platform\nimport planex.spec\n\n\ndef ... | ```python
# Run these tests with 'nosetests':
# install the 'python-nose' package (Fedora/CentOS or Ubuntu)
# run 'nosetests' in the root of the repository
import unittest
import platform
import planex.spec
def get_rpm_machine():
if platform.machine() == 'x86_64':
return 'x86_64'
return 'i386'
def get_deb_machine():
if platform.machine() == 'x86_64':
return 'amd64'
return 'i386'
class RpmTests(unittest.TestCase):
def setUp(self):
# 'setUp' breaks Pylint's naming rules
# pylint: disable=C0103
self.spec = planex.spec.Spec("tests/data/ocaml-cohttp.spec",
dist=".el6")
def test_good_filename_preprocessor(self):
planex.spec.Spec("tests/data/ocaml-cohttp.spec.in")
def test_bad_filename(self):
self.assertRaises(planex.spec.SpecNameMismatch, planex.spec.Spec,
"tests/data/bad-name.spec")
def test_bad_filename_preprocessor(self):
self.assertRaises(planex.spec.SpecNameMismatch, planex.spec.Spec,
"tests/data/bad-name.spec.in")
def test_name(self):
self.assertEqual(self.spec.name(), "ocaml-cohttp")
def test_specpath(self):
self.assertEqual(self.spec.specpath(), "./SPECS/ocaml-cohttp.spec")
def test_version(self):
self.assertEqual(self.spec.version(), "0.9.8")
def test_provides(self):
self.assertEqual(
self.spec.provides(),
set(["ocaml-cohttp", "ocaml-cohttp-devel"]))
def test_source_urls(self):
self.assertEqual(
self.spec.source_urls(),
["https://github.com/mirage/ocaml-cohttp/archive/"
"ocaml-cohttp-0.9.8/ocaml-cohttp-0.9.8.tar.gz",
"file:///code/ocaml-cohttp-extra#ocaml-cohttp-extra-0.9.8.tar.gz",
"ocaml-cohttp-init"])
def test_source_paths(self):
self.assertEqual(
self.spec.source_paths(),
["./SOURCES/ocaml-cohttp-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-extra-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-init"])
def test_buildrequires(self):
self.assertEqual(
self.spec.buildrequires(),
set(["ocaml", "ocaml-findlib", "ocaml-re-devel",
"ocaml-uri-devel", "ocaml-cstruct-devel",
"ocaml-lwt-devel", "ocaml-ounit-devel",
"ocaml-ocamldoc", "ocaml-camlp4-devel",
"openssl", "openssl-devel"]))
def test_source_package_path(self):
self.assertEqual(
self.spec.source_package_path(),
"./SRPMS/ocaml-cohttp-0.9.8-1.el6.src.rpm")
def test_binary_package_paths(self):
machine = get_rpm_machine()
self.assertEqual(
sorted(self.spec.binary_package_paths()),
[
path.format(machine=machine) for path in
sorted([
"./RPMS/{machine}/ocaml-cohttp-0.9.8-1.el6.{machine}.rpm",
"./RPMS/{machine}/" +
"ocaml-cohttp-devel-0.9.8-1.el6.{machine}.rpm"])
]
)
class DebTests(unittest.TestCase):
def setUp(self):
# 'setUp' breaks Pylint's naming rules
# pylint: disable=C0103
def map_rpm_to_deb(name):
mapping = {"ocaml-cohttp": ["libcohttp-ocaml"],
"ocaml-cohttp-devel": ["libcohttp-ocaml-dev"],
"ocaml": ["ocaml-nox", "ocaml-native-compilers"],
"ocaml-findlib": ["ocaml-findlib"],
"ocaml-re-devel": ["libre-ocaml-dev"],
"ocaml-uri-devel": ["liburi-ocaml-dev"],
"ocaml-cstruct-devel": ["libcstruct-ocaml-dev"],
"ocaml-lwt-devel": ["liblwt-ocaml-dev"],
"ocaml-ounit-devel": ["libounit-ocaml-dev"],
"ocaml-ocamldoc": ["ocaml-nox"],
"ocaml-camlp4-devel": ["camlp4", "camlp4-extra"],
"openssl": ["libssl1.0.0"],
"openssl-devel": ["libssl-dev"]}
return mapping[name]
self.spec = planex.spec.Spec("./tests/data/ocaml-cohttp.spec",
target="deb",
map_name=map_rpm_to_deb)
def test_name(self):
self.assertEqual(self.spec.name(), "ocaml-cohttp")
def test_specpath(self):
self.assertEqual(self.spec.specpath(), "./SPECS/ocaml-cohttp.spec")
def test_version(self):
self.assertEqual(self.spec.version(), "0.9.8")
def test_provides(self):
self.assertEqual(
self.spec.provides(),
set(["libcohttp-ocaml", "libcohttp-ocaml-dev"]))
def test_source_urls(self):
self.assertEqual(
self.spec.source_urls(),
["https://github.com/mirage/ocaml-cohttp/archive/" +
"ocaml-cohttp-0.9.8/ocaml-cohttp-0.9.8.tar.gz",
"file:///code/ocaml-cohttp-extra#ocaml-cohttp-extra-0.9.8.tar.gz",
"ocaml-cohttp-init"])
def test_source_paths(self):
self.assertEqual(
self.spec.source_paths(),
["./SOURCES/ocaml-cohttp-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-extra-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-init"])
def test_buildrequires(self):
self.assertEqual(
self.spec.buildrequires(),
set(["ocaml-nox", "ocaml-native-compilers",
"ocaml-findlib", "libre-ocaml-dev",
"liburi-ocaml-dev", "libcstruct-ocaml-dev",
"liblwt-ocaml-dev", "libounit-ocaml-dev",
"camlp4", "camlp4-extra", "libssl1.0.0",
"libssl-dev"]))
def test_source_package_path(self):
self.assertEqual(
self.spec.source_package_path(),
"./SRPMS/libcohttp-ocaml_0.9.8-1.dsc")
def test_binary_package_paths(self):
machine = get_deb_machine()
self.assertEqual(
sorted(self.spec.binary_package_paths()),
[path.format(machine=machine) for path
in sorted(["./RPMS/libcohttp-ocaml_0.9.8-1_{machine}.deb",
"./RPMS/libcohttp-ocaml-dev_0.9.8-1_{machine}.deb"])])
``` |
[
{
"content": "```python\n# coding: utf-8\n\n\"\"\"\n Kubernetes\n\n No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)\n\n OpenAPI spec version: v1.8.2\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint... | [
{
"content": "<|memory_start|>```python\n# coding: utf-8\n\n\"\"\"\n Kubernetes\n\n No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)\n\n OpenAPI spec version: v1.8.2\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\... | ```python
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1Ingress(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1IngressSpec',
'status': 'V1beta1IngressStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta1Ingress - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta1Ingress.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1Ingress.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1Ingress.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1Ingress.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1Ingress.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1Ingress.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1Ingress.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1Ingress.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1Ingress.
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1beta1Ingress.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1Ingress.
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1beta1Ingress.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1Ingress.
Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:return: The spec of this V1beta1Ingress.
:rtype: V1beta1IngressSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1Ingress.
Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:param spec: The spec of this V1beta1Ingress.
:type: V1beta1IngressSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta1Ingress.
Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:return: The status of this V1beta1Ingress.
:rtype: V1beta1IngressStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1Ingress.
Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:param status: The status of this V1beta1Ingress.
:type: V1beta1IngressStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1Ingress):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
``` |
[
{
"content": "Repeat the code exactly:\n```python\n#!/usr/bin/python3\n# Petter Strandmark\n\nimport sys\n\nimport matplotlib.pyplot as plt\n\nLOOKING_FOR_DATA = 1\nREADING_DATA = 2\nstate = LOOKING_FOR_DATA\n\niterations = []\nobjectives = []\nrelative_changes_in_x = []\nrelative_changes_... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n#!/usr/bin/python3\n# Petter Strandmark\n\nimport sys\n\nimport matplotlib.pyplot as plt\n\nLOOKING_FOR_DATA = 1\nREADING_DATA = 2\nstate = LOOKING_FOR_DATA\n\niterations = []\nobjectives = []\nrelative_changes_in_x = []\nr... | ```python
#!/usr/bin/python3
# Petter Strandmark
import sys
import matplotlib.pyplot as plt
LOOKING_FOR_DATA = 1
READING_DATA = 2
state = LOOKING_FOR_DATA
iterations = []
objectives = []
relative_changes_in_x = []
relative_changes_in_y = []
feasibilities = []
optimal_value = None
for line in sys.stdin.readlines():
if state == LOOKING_FOR_DATA:
if "--------" in line:
state = READING_DATA
print("Found convergence data.")
elif "Optimal value:" in line:
optimal_value = float(line.split(":")[1])
print("Found optimal value:", optimal_value)
elif state == READING_DATA:
try:
iteration, objective, rel_change_x, rel_change_y, feasibility = \
[float(n) for n in line.split()]
iterations.append(iteration)
objectives.append(objective)
relative_changes_in_x.append(rel_change_x)
relative_changes_in_y.append(rel_change_y)
feasibilities.append(feasibility)
except:
state = LOOKING_FOR_DATA
plt.semilogy(iterations,
feasibilities)
plt.xlabel("Iteration")
plt.title("Feasibility")
plt.show()
if optimal_value:
plt.semilogy(iterations,
[abs(obj - optimal_value) for obj in objectives])
plt.xlabel("Iteration")
plt.title("Objective value error")
plt.show()
``` |
[
{
"content": "```python\n# Author: Denis A. Engemann <d.engemann@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom nose.tools import assert_equal, assert_raises\n\nfrom mne import io, Epochs, read_events, pick_types\nf... | [
{
"content": "<|memory_start|>```python\n# Author: Denis A. Engemann <d.engemann@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom nose.tools import assert_equal, assert_raises\n\nfrom mne import io, Epochs, read_event... | ```python
# Author: Denis A. Engemann <d.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_equal, assert_raises
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn, check_version
from mne.decoding import compute_ems, EMS
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_ems():
"""Test event-matched spatial filters"""
raw = io.read_raw_fif(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs = epochs.equalize_event_counts(epochs.event_id, copy=False)[0]
assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs = epochs.equalize_event_counts(epochs.event_id, copy=False)[0]
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
assert_raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
# test compute_ems cv
epochs = epochs['aud_r', 'vis_l']
epochs.equalize_event_counts(epochs.event_id)
if check_version('sklearn', '0.18'):
from sklearn.model_selection import StratifiedKFold
cv = StratifiedKFold()
else:
from sklearn.cross_validation import StratifiedKFold
cv = StratifiedKFold(epochs.events[:, 2])
compute_ems(epochs, cv=cv)
compute_ems(epochs, cv=2)
assert_raises(ValueError, compute_ems, epochs, cv='foo')
assert_raises(ValueError, compute_ems, epochs, cv=len(epochs) + 1)
raw.close()
# EMS transformer, check that identical to compute_ems
X = epochs.get_data()
y = epochs.events[:, 2]
X = X / np.std(X) # X scaled outside cv in compute_ems
Xt, coefs = list(), list()
ems = EMS()
assert_equal(ems.__repr__(), '<EMS: not fitted.>')
# manual leave-one-out to avoid sklearn version problem
for test in range(len(y)):
train = np.setdiff1d(range(len(y)), test)
ems.fit(X[train], y[train])
coefs.append(ems.filters_)
Xt.append(ems.transform(X[[test]]))
assert_equal(ems.__repr__(), '<EMS: fitted with 4 filters on 2 classes.>')
assert_array_almost_equal(filters, np.mean(coefs, axis=0))
assert_array_almost_equal(surrogates, np.vstack(Xt))
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# -*- encoding: utf-8 -*-\n################################################################################\n# #\n# Copyright (C) 2013-Today Carlos Eduardo V... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# -*- encoding: utf-8 -*-\n################################################################################\n# #\n# Copyright (C) 2013-Today ... | ```python
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
from datetime import *
class clv_insurance_client(models.Model):
_inherit = 'clv_insurance_client'
state_date = fields.Datetime("Status change date", required=True, readonly=True)
state = fields.Selection([('new','New'),
('active','Active'),
('suspended','Suspended'),
('canceled','Canceled'),
], string='Status', default='new', readonly=True, required=True, help="")
_defaults = {
'state_date': lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
}
@api.one
def button_new(self):
self.state_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'new'
@api.one
def button_activate(self):
self.state_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'active'
@api.one
def button_suspend(self):
self.state_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'suspended'
@api.one
def button_cancel(self):
self.state_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.state = 'canceled'
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# h... | ```python
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
from google.appengine.api.api_base_pb import StringProto
class UserServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
REDIRECT_URL_TOO_LONG = 1
NOT_ALLOWED = 2
_ErrorCode_NAMES = {
0: "OK",
1: "REDIRECT_URL_TOO_LONG",
2: "NOT_ALLOWED",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
_TEXT = (
"ErrorCode",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
__all__ = ['UserServiceError']
``` |
[
{
"content": "Here is some code:\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n (c) 2014 - Copyright Pierre-Yves Chibon\n Author: Pierre-Yves Chibon <pingou@pingoured.fr>\n\n# This copyrighted material is made available to anyone wishing to use, modify,\n# copy, or redistribute it subject to... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n (c) 2014 - Copyright Pierre-Yves Chibon\n Author: Pierre-Yves Chibon <pingou@pingoured.fr>\n\n# This copyrighted material is made available to anyone wishing to use, modify,\n# copy, or redistribu... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Pierre-Yves Chibon
Author: Pierre-Yves Chibon <pingou@pingoured.fr>
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2, or (at your option) any later version. This
# program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the GNU
# General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public License and
# may only be used or replicated with the express permission of Red Hat, Inc.
fedora_elections.elections test script
"""
__requires__ = ['SQLAlchemy >= 0.7', 'jinja2 >= 2.4']
import pkg_resources
import logging
import unittest
import sys
import os
from datetime import time
from datetime import timedelta
import flask
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import fedora_elections
from tests import ModelFlasktests, Modeltests, TODAY, FakeUser, user_set
# pylint: disable=R0904
class FlaskSimpleElectionstests(ModelFlasktests):
""" Flask application tests range voting. """
def test_vote_simple(self):
""" Test the vote_simple function - the preview part. """
output = self.app.get(
'/vote/test_election', follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<title>OpenID transaction in progress</title>' in output.data
or 'discoveryfailure' in output.data)
self.setup_db()
user = FakeUser(['packager'], username='toshio')
with user_set(fedora_elections.APP, user):
output = self.app.get(
'/vote/test_election5', follow_redirects=True)
self.assertTrue(
'class="message">You have already voted in the election!</'
in output.data)
user = FakeUser(['packager'], username='pingou')
with user_set(fedora_elections.APP, user):
output = self.app.get(
'/vote/test_election5')
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
# Invalid vote: No candidate
data = {
'action': 'preview',
}
output = self.app.post('/vote/test_election5', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<td class="error">Not a valid choice</td>'
in output.data)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
# Invalid vote: No candidate
data = {
'action': 'preview',
'csrf_token': csrf_token,
}
output = self.app.post('/vote/test_election5', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<td class="error">Not a valid choice</td>'
in output.data)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
# Invalid vote: Not numeric
data = {
'candidate': 'a',
'action': 'preview',
'csrf_token': csrf_token,
}
output = self.app.post('/vote/test_election5', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
# Valid input
data = {
'candidate': 7,
'action': 'preview',
'csrf_token': csrf_token,
}
output = self.app.post('/vote/test_election5', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<input type="hidden" name="action" value="submit" />'
in output.data)
self.assertTrue(
'<li class="message">Please confirm your vote!</li>'
in output.data)
def test_vote_simple_process(self):
""" Test the vote_simple function - the voting part. """
output = self.app.get(
'/vote/test_election', follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<title>OpenID transaction in progress</title>' in output.data
or 'discoveryfailure' in output.data)
self.setup_db()
user = FakeUser(['packager'], username='pingou')
with user_set(fedora_elections.APP, user):
# Invalid candidate id - no csrf
data = {
'candidate': 1,
'action': 'submit',
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
# Invalid candidate id
data = {
'candidate': 1,
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
# Invalid vote: too low
data = {
'candidate': -1,
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
# Invalid vote: Not numeric
data = {
'candidate': 'a',
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
# Valid input
data = {
'candidate': 8,
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'class="message">Your vote has been recorded. Thank you!</'
in output.data)
self.assertTrue('<h3>Current elections</h3>' in output.data)
self.assertTrue('<h3>Next 1 elections</h3>' in output.data)
self.assertTrue('<h3>Last 2 elections</h3>' in output.data)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(
FlaskSimpleElectionstests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
``` |
[
{
"content": "Write the code verbatim:\n```python\nimport os\nfrom setuptools import setup\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='scRNApipe',\n version='0.1.0',\n description='Package for analysing scRNA-seq in Transcript Tag Counti... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nimport os\nfrom setuptools import setup\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='scRNApipe',\n version='0.1.0',\n description='Package for analysing scRNA-seq in Trans... | ```python
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='scRNApipe',
version='0.1.0',
description='Package for analysing scRNA-seq in Transcript Tag Counting data.',
long_description=read('README.md'),
author='Stavros Giannoukakos',
author_email='s.p.giannoukakos@hotmail.com',
packages=['scRNApipe'],
url=['https://github.com/MarinusVL/scRNApipe'],
keywords=['single cell RNA analysis'],
install_requires=['pysam>=0.8.3', 'numpy', 'multiqc', 'STAR', 'umis', 'umi_tools', ,'python>=2.5,<3','natsort'],
dependency_links=['https://sourceforge.net/projects/subread/files/subread-1.5.2/subread-1.5.2-source.tar.gz/download',
'https://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.5_source.zip'
],
package_data = {
'': ['configuration_file.txt']
},
entry_points={
'console_scripts': ['scRNApipe = scRNApipe.scRNApipe:main']
},
)
``` |
[
{
"content": "Replicate the source code:\n```python\nimport os, subprocess, xmlrpclib\nfrom optparse import make_option\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.mail.message import EmailMessage\nfrom django.core.management.base import BaseCommand\nfrom d... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport os, subprocess, xmlrpclib\nfrom optparse import make_option\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.mail.message import EmailMessage\nfrom django.core.management.base import Bas... | ```python
import os, subprocess, xmlrpclib
from optparse import make_option
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail.message import EmailMessage
from django.core.management.base import BaseCommand
from django.core.management import call_command, CommandError
from django.template.loader import render_to_string
class Command(BaseCommand):
"""
Update tendenci via pip and restarts the server
"""
option_list = BaseCommand.option_list + (
make_option(
'--user',
action='store',
dest='user',
default='',
help='Request user'),
)
def handle(self, *args, **options):
from tendenci.apps.site_settings.utils import get_setting
pass_update_tendenci = False
pass_update_tendenci_site = False
pass_restart_server = False
is_uwsgi = False
gunicorn_error_msg = None
uwsgi_error_msg = None
errors_list = []
pypi = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
latest_version = pypi.package_releases('tendenci')[0]
error_message = ""
email_context = {'site_url':get_setting('site', 'global', 'siteurl'),
'version':latest_version, 'error_message':error_message}
email_sender = get_setting('site', 'global', 'siteemailnoreplyaddress') or settings.DEFAULT_FROM_EMAIL
email_recipient = ""
user_id = options['user']
if User.objects.filter(pk=user_id).exists():
user = User.objects.get(pk=user_id)
if user.email:
email_recipient = user.email
try:
print "Updating tendenci"
subprocess.check_output("pip install tendenci --upgrade", stderr=subprocess.STDOUT, shell=True)
pass_update_tendenci = True
except subprocess.CalledProcessError as e:
errors_list.append(e.output)
# run python deploy.py iff update_tendenci is successful
if pass_update_tendenci:
try:
print "Updating tendenci site"
subprocess.check_output("python deploy.py", stderr=subprocess.STDOUT, shell=True)
pass_update_tendenci_site = True
except subprocess.CalledProcessError as e:
errors_list.append(e.output)
# run reload if update is done
if pass_update_tendenci_site:
try:
print "Restarting Server"
subprocess.check_output("sudo reload %s" % os.path.basename(settings.PROJECT_ROOT),
stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
gunicorn_error_msg = e.output
if "reload: Unknown job:" in e.output:
is_uwsgi = True
# run usgi command iff it was proven that the site is using uwsgi instead
if is_uwsgi:
try:
print "Restarting Server"
subprocess.check_output("sudo touch /etc/uwsgi/vassals/%s.ini" % os.path.basename(settings.PROJECT_ROOT),
stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
uwsgi_error_msg = e.output
if gunicorn_error_msg and uwsgi_error_msg:
errors_list.append(uwsgi_error_msg)
errors_list.append(gunicorn_error_msg)
try:
print "Clearing cache"
call_command('clear_cache')
except CommandError as e:
errors_list.append(e.output)
email_context['errors_list'] = errors_list
if email_recipient:
subject = render_to_string('notification/update_tendenci_notice/short.txt', email_context)
subject = subject.strip('\n').strip('\r')
body = render_to_string('notification/update_tendenci_notice/full.html', email_context)
email = EmailMessage()
email.subject = subject
email.body = body
email.from_email = email_sender
email.to = [email_recipient]
email.content_subtype = 'html'
email.send()
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# -*- coding:utf-8 -*-\n\"\"\"\nDescription:\n Transaction Basic Class\nUsage:\n from neo.Core.Transaction import Transaction\n\"\"\"\nfrom itertools import groupby\nfrom neo.Blockchain import *\nfrom neo.Core.TX.Transacti... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# -*- coding:utf-8 -*-\n\"\"\"\nDescription:\n Transaction Basic Class\nUsage:\n from neo.Core.Transaction import Transaction\n\"\"\"\nfrom itertools import groupby\nfrom neo.Blockchain import *\nfrom neo.C... | ```python
# -*- coding:utf-8 -*-
"""
Description:
Transaction Basic Class
Usage:
from neo.Core.Transaction import Transaction
"""
from itertools import groupby
from neo.Blockchain import *
from neo.Core.TX.TransactionAttribute import *
from neocore.Fixed8 import Fixed8
from neo.Network.Inventory import Inventory
from neo.Network.InventoryType import InventoryType
from neo.Network.Mixins import InventoryMixin
from neocore.Cryptography.Crypto import *
from neocore.IO.Mixins import SerializableMixin
from neo.IO.MemoryStream import StreamManager
from neocore.IO.BinaryReader import BinaryReader
from neo.Core.Mixins import EquatableMixin
from neo.Core.Helper import Helper
from neo.Core.Witness import Witness
from neocore.UInt256 import UInt256
from neo.Core.AssetType import AssetType
import inspect
class TransactionResult(EquatableMixin):
AssetId = None
Amount = Fixed8(0)
def __init__(self, asset_id, amount):
"""
Create an instance.
Args:
asset_id (UInt256):
amount (Fixed8):
"""
self.AssetId = asset_id
self.Amount = amount
def ToString(self):
"""
Get a string representation of the object.
Returns:
str:
"""
return "%s -> %s " % (self.AssetId.ToString(), self.Amount.value)
class TransactionType(object):
MinerTransaction = b'\x00'
IssueTransaction = b'\x01'
ClaimTransaction = b'\x02'
EnrollmentTransaction = b'\x20'
VotingTransaction = b'\x24'
RegisterTransaction = b'\x40'
ContractTransaction = b'\x80'
StateTransaction = b'\x90'
AgencyTransaction = b'\xb0'
PublishTransaction = b'\xd0'
InvocationTransaction = b'\xd1'
@staticmethod
def ToName(value):
if isinstance(value, int):
value = value.to_bytes(1, 'little')
for key, item in TransactionType.__dict__.items():
if value == item:
return key
return None
class TransactionOutput(SerializableMixin, EquatableMixin):
Value = None # should be fixed 8
ScriptHash = None
AssetId = None
"""docstring for TransactionOutput"""
def __init__(self, AssetId=None, Value=None, script_hash=None):
"""
Create an instance.
Args:
AssetId (UInt256):
Value (Fixed8):
script_hash (UInt160):
"""
super(TransactionOutput, self).__init__()
self.AssetId = AssetId
self.Value = Value
self.ScriptHash = script_hash
# if self.ScriptHash is None:
# raise Exception("Script hash is required!!!!!!!!")
@property
def Address(self):
"""
Get the public address of the transaction.
Returns:
str: base58 encoded string representing the address.
"""
return Crypto.ToAddress(self.ScriptHash)
@property
def AddressBytes(self):
"""
Get the public address of the transaction.
Returns:
bytes: base58 encoded address.
"""
return bytes(self.Address, encoding='utf-8')
def Serialize(self, writer):
"""
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
"""
writer.WriteUInt256(self.AssetId)
writer.WriteFixed8(self.Value)
writer.WriteUInt160(self.ScriptHash)
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.AssetId = reader.ReadUInt256()
self.Value = reader.ReadFixed8()
self.ScriptHash = reader.ReadUInt160()
if self.ScriptHash is None:
raise Exception("Script hash is required from deserialize!!!!!!!!")
def ToJson(self, index):
"""
Convert object members to a dictionary that can be parsed as JSON.
Args:
index (int): The index of the output in a transaction
Returns:
dict:
"""
return {
'n': index,
'asset': self.AssetId.To0xString(),
'value': self.Value.ToNeoJsonString(),
'address': self.Address
}
class TransactionInput(SerializableMixin, EquatableMixin):
"""docstring for TransactionInput"""
PrevHash = None
PrevIndex = None
def __init__(self, prevHash=None, prevIndex=None):
"""
Create an instance.
Args:
prevHash (UInt256):
prevIndex (int):
"""
super(TransactionInput, self).__init__()
self.PrevHash = prevHash
self.PrevIndex = prevIndex
def Serialize(self, writer):
"""
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
"""
writer.WriteUInt256(self.PrevHash)
writer.WriteUInt16(self.PrevIndex)
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.PrevHash = reader.ReadUInt256()
self.PrevIndex = reader.ReadUInt16()
def ToString(self):
"""
Get the string representation of the object.
Returns:
str: PrevHash:PrevIndex
"""
return self.PrevHash + ":" + self.PrevIndex
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
return {
'PrevHash': self.PrevHash.To0xString(),
'PrevIndex': self.PrevIndex
}
class Transaction(Inventory, InventoryMixin):
Type = None
Version = 0
Attributes = []
inputs = []
outputs = []
scripts = []
__system_fee = None
__network_fee = None
InventoryType = InventoryType.TX
__hash = None
__htbs = None
__height = 0
__references = None
MAX_TX_ATTRIBUTES = 16
withdraw_hold = None
"""docstring for Transaction"""
def __init__(self, inputs=[], outputs=[], attributes=[], scripts=[]):
"""
Create an instance.
Args:
inputs (list): of neo.Core.CoinReference.CoinReference.
outputs (list): of neo.Core.TX.Transaction.TransactionOutput items.
attributes (list): of neo.Core.TX.TransactionAttribute.
scripts:
"""
super(Transaction, self).__init__()
self.inputs = inputs
self.outputs = outputs
self.Attributes = attributes
self.scripts = scripts
self.InventoryType = 0x01 # InventoryType TX 0x01
self.__references = None
@property
def Hash(self):
"""
Get the hash of the transaction.
Returns:
UInt256:
"""
if not self.__hash:
ba = bytearray(binascii.unhexlify(self.GetHashData()))
hash = Crypto.Hash256(ba)
self.__hash = UInt256(data=hash)
return self.__hash
def GetHashData(self):
"""
Get the data used for hashing.
Returns:
bytes:
"""
return Helper.GetHashData(self)
def GetMessage(self):
"""
Get the data used for hashing.
Returns:
bytes:
"""
return self.GetHashData()
def getAllInputs(self):
"""
Get the inputs.
Returns:
list:
"""
return self.inputs
def ResetReferences(self):
"""Reset local stored references."""
self.__references = None
def ResetHashData(self):
"""Reset local stored hash data."""
self.__hash = None
@property
def Scripts(self):
"""
Get the scripts
Returns:
list:
"""
return self.scripts
@property
def References(self):
"""
Get all references.
Returns:
dict:
Key (UInt256): input PrevHash
Value (TransactionOutput): object.
"""
if self.__references is None:
refs = {}
# group by the input prevhash
for hash, group in groupby(self.inputs, lambda x: x.PrevHash):
tx, height = GetBlockchain().GetTransaction(hash.ToBytes())
if tx is not None:
for input in group:
refs[input] = tx.outputs[input.PrevIndex]
self.__references = refs
return self.__references
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
len_attributes = sys.getsizeof(self.Attributes)
len_inputs = sys.getsizeof(self.inputs)
len_outputs = sys.getsizeof(self.outputs)
len_scripts = sys.getsizeof(self.scripts)
return sys.getsizeof(self.Type) + sys.getsizeof(0) + len_attributes + len_inputs + len_outputs + len_scripts
def Height(self):
return self.__height
def SystemFee(self):
"""
Get the system fee.
Returns:
Fixed8: currently fixed to 0.
"""
return Fixed8(0)
def NetworkFee(self):
"""
Get the network fee.
Returns:
Fixed8:
"""
if self.__network_fee is None:
input = Fixed8(0)
for coin_ref in self.References.values():
if coin_ref.AssetId == GetBlockchain().SystemCoin().Hash:
input = input + coin_ref.Value
output = Fixed8(0)
for tx_output in self.outputs:
if tx_output.AssetId == GetBlockchain().SystemCoin().Hash:
output = output + tx_output.Value
self.__network_fee = input - output - self.SystemFee()
# logger.info("Determined network fee to be %s " % (self.__network_fee.value))
return self.__network_fee
# if self.__network_fee == Fixed8.Satoshi():
# Fixed8 input = References.Values.Where(p= > p.AssetId.Equals(.SystemCoin.Hash)).Sum(p= > p.Value);
# Fixed8 output = Outputs.Where(p= > p.AssetId.Equals(Blockchain.SystemCoin.Hash)).Sum(p= > p.Value);
# _network_fee = input - output - SystemFee;
# pass
# return self.__network_fee
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.DeserializeUnsigned(reader)
self.scripts = reader.ReadSerializableArray()
self.OnDeserialized()
def DeserializeExclusiveData(self, reader):
pass
@staticmethod
def DeserializeFromBufer(buffer, offset=0):
"""
Deserialize object instance from the specified buffer.
Args:
buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
offset: UNUSED
Returns:
Transaction:
"""
mstream = StreamManager.GetStream(buffer)
reader = BinaryReader(mstream)
tx = Transaction.DeserializeFrom(reader)
StreamManager.ReleaseStream(mstream)
return tx
@staticmethod
def DeserializeFrom(reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Returns:
Transaction:
"""
ttype = reader.ReadByte()
tx = None
from neo.Core.TX.RegisterTransaction import RegisterTransaction
from neo.Core.TX.IssueTransaction import IssueTransaction
from neo.Core.TX.ClaimTransaction import ClaimTransaction
from neo.Core.TX.MinerTransaction import MinerTransaction
from neo.Core.TX.PublishTransaction import PublishTransaction
from neo.Core.TX.InvocationTransaction import InvocationTransaction
from neo.Core.TX.EnrollmentTransaction import EnrollmentTransaction
from neo.Core.TX.StateTransaction import StateTransaction
if ttype == int.from_bytes(TransactionType.RegisterTransaction, 'little'):
tx = RegisterTransaction()
elif ttype == int.from_bytes(TransactionType.MinerTransaction, 'little'):
tx = MinerTransaction()
elif ttype == int.from_bytes(TransactionType.IssueTransaction, 'little'):
tx = IssueTransaction()
elif ttype == int.from_bytes(TransactionType.ClaimTransaction, 'little'):
tx = ClaimTransaction()
elif ttype == int.from_bytes(TransactionType.PublishTransaction, 'little'):
tx = PublishTransaction()
elif ttype == int.from_bytes(TransactionType.InvocationTransaction, 'little'):
tx = InvocationTransaction()
elif ttype == int.from_bytes(TransactionType.EnrollmentTransaction, 'little'):
tx = EnrollmentTransaction()
elif ttype == int.from_bytes(TransactionType.StateTransaction, 'little'):
tx = StateTransaction()
else:
tx = Transaction()
tx.Type = ttype
tx.DeserializeUnsignedWithoutType(reader)
tx.scripts = []
byt = reader.ReadVarInt()
if byt > 0:
for i in range(0, byt):
witness = Witness()
witness.Deserialize(reader)
tx.scripts.append(witness)
tx.OnDeserialized()
return tx
def DeserializeUnsigned(self, reader):
"""
Deserialize object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: if transaction type is incorrect.
"""
txtype = reader.ReadByte()
if txtype != int.from_bytes(self.Type, 'little'):
raise Exception('incorrect type {}, wanted {}'.format(txtype, int.from_bytes(self.Type, 'little')))
self.DeserializeUnsignedWithoutType(reader)
def DeserializeUnsignedWithoutType(self, reader):
"""
Deserialize object without reading transaction type data.
Args:
reader (neo.IO.BinaryReader):
"""
self.Version = reader.ReadByte()
self.DeserializeExclusiveData(reader)
self.Attributes = reader.ReadSerializableArray('neo.Core.TX.TransactionAttribute.TransactionAttribute',
max=self.MAX_TX_ATTRIBUTES)
self.inputs = reader.ReadSerializableArray('neo.Core.CoinReference.CoinReference')
self.outputs = reader.ReadSerializableArray('neo.Core.TX.Transaction.TransactionOutput')
def Equals(self, other):
if other is None or other is not self:
return False
return self.Hash == other.Hash
def ToArray(self):
"""
Get the byte data of self.
Returns:
bytes:
"""
return Helper.ToArray(self)
def Serialize(self, writer):
"""
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
"""
self.SerializeUnsigned(writer)
writer.WriteSerializableArray(self.scripts)
def SerializeUnsigned(self, writer):
"""
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
"""
writer.WriteByte(self.Type)
writer.WriteByte(self.Version)
self.SerializeExclusiveData(writer)
if len(self.Attributes) > self.MAX_TX_ATTRIBUTES:
raise Exception("Cannot have more than %s transaction attributes" % self.MAX_TX_ATTRIBUTES)
writer.WriteSerializableArray(self.Attributes)
writer.WriteSerializableArray(self.inputs)
writer.WriteSerializableArray(self.outputs)
def SerializeExclusiveData(self, writer):
pass
def OnDeserialized(self):
pass
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
jsn = {}
jsn["txid"] = self.Hash.To0xString()
jsn["type"] = TransactionType.ToName(self.Type)
jsn["version"] = self.Version
jsn["attributes"] = [attr.ToJson() for attr in self.Attributes]
jsn["vout"] = [out.ToJson(i) for i, out in enumerate(self.outputs)]
jsn["vin"] = [input.ToJson() for input in self.inputs]
jsn["sys_fee"] = self.SystemFee().ToNeoJsonString()
jsn["net_fee"] = self.NetworkFee().ToNeoJsonString()
jsn["scripts"] = [script.ToJson() for script in self.scripts]
return jsn
def Verify(self, mempool):
"""
Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise.
"""
logger.info("Verifying transaction: %s " % self.Hash.ToBytes())
return Helper.VerifyScripts(self)
# logger.info("return true for now ...")
# return True
# for i in range(1, len(self.inputs)):
# j=0
# while j < i:
# j = j+1
# if self.inputs[i].PrevHash == self.inputs[j].PrevHash and self.inputs[i].PrevIndex() == self.inputs[j].PrevIndex():
# return False
# logger.info("Verified inputs 1")
# for tx in mempool:
# if tx is not self:
# for ip in self.inputs:
# if ip in tx.inputs:
# return False
#
# logger.info("Verified inputs 2, checking double spend")
#
# if GetBlockchain().IsDoubleSpend(self):
# return False
#
# logger.info("verifying outputs ...")
# for txOutput in self.outputs:
# asset = GetBlockchain().GetAssetState(txOutput.AssetId)
#
# if asset is None: return False
#
# if txOutput.Value % pow(10, 8 - asset.Precision) != 0:
# return False
#
# logger.info("unimplemented after here ...")
# return True
# txResults = self.GetTransactionResults()
#
# if txResults is None: return False
#
# destroyedResults = []
# [destroyedResults.append(tx) for tx in txResults if tx.Amount==Fixed8(0)]
# numDestroyed = len(destroyedResults)
# if numDestroyed > 1:
# return False
# if numDestroyed == 1 and destroyedResults[0].AssetId != GetSystemCoin().Hash:
# return False
# if self.SystemFee() > Fixed8(0) and ( numDestroyed == 0 or destroyedResults[0].Amount < self.SystemFee()):
# return False
#
# issuedResults = []
#
# [issuedResults.append(tx) for tx in txResults if tx.Amount() < Fixed8(0)]
#
# if self.Type == TransactionType.MinerTransaction or self.Type == TransactionType.ClaimTransaction:
# for tx in issuedResults:
# if tx.AssetId != GetSystemCoin().Hash:
# return False
#
# elif self.Type == TransactionType.IssueTransaction:
# for tx in issuedResults:
# if tx.AssetId != GetSystemCoin().Hash:
# return False
#
# else:
# if len(issuedResults) > 0:
# return False
#
# usageECDH=0
#
# for attr in self.Attributes:
# if attr.Usage == TransactionAttributeUsage.ECDH02 or attr.Usage == TransactionAttributeUsage.ECDH03:
# usageECDH = usageECDH+1
# if usageECDH > 1:
# return False
#
def GetScriptHashesForVerifying(self):
"""
Get a list of script hashes for verifying transactions.
Raises:
Exception: if there are no valid assets in the transaction.
Returns:
list: of UInt160 type script hashes.
"""
if not self.References and len(self.Attributes) < 1:
return []
hashes = set()
for coinref, output in self.References.items():
hashes.add(output.ScriptHash)
for attr in self.Attributes:
if attr.Usage == TransactionAttributeUsage.Script:
if type(attr.Data) is UInt160:
hashes.add(attr.Data)
else:
hashes.add(UInt160(data=attr.Data))
for key, group in groupby(self.outputs, lambda p: p.AssetId):
asset = GetBlockchain().GetAssetState(key.ToBytes())
if asset is None:
raise Exception("Invalid operation")
if asset.AssetType == AssetType.DutyFlag:
for p in group:
hashes.add(p.ScriptHash)
hashlist = list(hashes)
hashlist.sort()
return hashlist
def GetTransactionResults(self):
"""
Get the execution results of the transaction.
Returns:
None: if the transaction has no references.
list: of TransactionResult objects.
"""
if self.References is None:
return None
results = []
realresults = []
for ref_output in self.References.values():
results.append(TransactionResult(ref_output.AssetId, ref_output.Value))
for output in self.outputs:
results.append(TransactionResult(output.AssetId, output.Value * Fixed8(-1)))
for key, group in groupby(results, lambda x: x.AssetId):
sum = Fixed8(0)
for item in group:
sum = sum + item.Amount
if sum != Fixed8.Zero():
realresults.append(TransactionResult(key, sum))
return realresults
class ContractTransaction(Transaction):
def __init__(self, *args, **kwargs):
"""
Create an instance.
Args:
*args:
**kwargs:
"""
super(ContractTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.ContractTransaction
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n frink.base\n ~~~~~~~~~~~~~\n BaseModel class\n\"\"\"\n\nimport datetime\nfrom schematics.models import Model\nfrom schematics.types.base import (\n StringType, BooleanTyp... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n frink.base\n ~~~~~~~~~~~~~\n BaseModel class\n\"\"\"\n\nimport datetime\nfrom schematics.models import Model\nfrom schematics.types.base import (\n String... | ```python
# -*- coding: utf-8 -*-
"""
frink.base
~~~~~~~~~~~~~
BaseModel class
"""
import datetime
from schematics.models import Model
from schematics.types.base import (
StringType, BooleanType, DateTimeType, IntType, UUIDType
)
from schematics.exceptions import ValidationError
# Frink
from .orm import InstanceLayerMixin
from .errors import NotUniqueError
import logging
log = logging.getLogger(__name__)
class BaseModel(Model, InstanceLayerMixin):
# __metaclass__ = ORMMeta # Do this in every model instead
_uniques = []
id = UUIDType()
created_at = DateTimeType(default=datetime.datetime.now)
updated_at = DateTimeType(default=datetime.datetime.now)
def validate(self):
log.debug('VALIDATING')
for field in self._uniques:
log.debug('Validate that {} is unique'.format(field))
if self._data.get(field, None) is None:
raise ValidationError('Unique fields cannot be None ({})'.format(field))
_ = self.query.get_by(column=field, value=self._data.get(field, None))
if _ is not None and _.id != self.id:
raise NotUniqueError('Field `{}` must be unique'.format(field))
return super(BaseModel, self).validate()
def __repr__(self):
if hasattr(self, 'email'):
return u'<{}: {}>'.format(self.__class__.__name__, self.email)
if hasattr(self, 'slug'):
return u'<{}: {}>'.format(self.__class__.__name__, self.slug)
if hasattr(self, 'name'):
return u'<{}: {}>'.format(self.__class__.__name__, self.name)
if hasattr(self, 'id'):
return u'<{}: {}>'.format(self.__class__.__name__, self.id)
return u'<{}: {} object>'.format(self.__class__.__name__, self.__class__.__name__)
``` |
[
{
"content": "```python\n# creatGraspICRA09.py - script for creating a hand poses database\r\n#\r\n# Copyright (c) 2009 Javier Romero\r\n#\r\n# Author: Javier Romero <jrgn@kth.se>\r\n#\r\n# This program is free software; you can redistribute it and/or\r\n# modify it under the terms of the GNU General Public Lic... | [
{
"content": "<|memory_start|>```python\n# creatGraspICRA09.py - script for creating a hand poses database\r\n#\r\n# Copyright (c) 2009 Javier Romero\r\n#\r\n# Author: Javier Romero <jrgn@kth.se>\r\n#\r\n# This program is free software; you can redistribute it and/or\r\n# modify it under the terms of the GNU Ge... | ```python
# creatGraspICRA09.py - script for creating a hand poses database
#
# Copyright (c) 2009 Javier Romero
#
# Author: Javier Romero <jrgn@kth.se>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import poser
import linecache
import os
import setCamAZEL
import setTexture
from os.path import join
scene = poser.Scene()
basedir = os.path.dirname(os.path.abspath(__file__))
dir = join(basedir, 'out')
lightdir = join(basedir, 'lights')
taxonomyDir = join(basedir, 'taxonomy')
texture = join(basedir, 'Hand Texture2.TIF')
listpath = join(basedir, 'poses', 'handjointssavinglist.txt')
#lights=["light1.lt2","light2.lt2","light3.lt2","light4.lt2"]
lights = ["light1.lt2"]
nAz = 24
nEl = 12
nRo = 9
nFrames = 6
grasps = ["largeDiameter", "smallDiameter", "mediumWrap", "adductedThumb",
"lightTool", "prismatic4Finger", "prismatic3Finger",
"prismatic2Finger", "palmarPinch", "powerDisk", "powerSphere",
"precisionDisk", "precisionSphere", "tripod", "fixedHook", "lateral",
"indexFingerExtension", "extensionType", "distalType",
"writingTripod", "tripodVariation", "parallelExtension",
"adductionGrip", "tipPinch", "lateralTripod", "sphere4Finger",
"quadpod", "sphere3Finger", "stick", "palmarGrasp",
"ringGrasp", "ventralGrasp", "inferiorPincerGrasp"]
#poser.SetNumRenderThreads(4)
#poser.SetRenderInSeparateProcess(1)
for graspIndex in range(len(grasps)):
outdir = join(dir, '%02d' % (graspIndex+1))
if not os.path.isdir(outdir):
os.mkdir(outdir)
for lightindex in range(len(lights)):
jointFileName0 = join(taxonomyDir, "rest.txt")
jointFileName1 = join(taxonomyDir, grasps[graspIndex] + ".txt")
graspCode = (graspIndex)*(len(lights)) + lightindex + 1
# close and discard changes
poser.CloseDocument(1)
poser.OpenDocument(join(taxonomyDir, grasps[graspIndex] + ".pz3"))
scene.LoadLibraryLight(lightdir+lights[lightindex])
setTexture.setTexture(texture)
linecache.checkcache(jointFileName0)
linecache.checkcache(jointFileName1)
setCamAZEL.setRenderOptions(scale=0)
gnd = scene.Actor("GROUND")
gnd.SetVisible(0)
gnd.SetVisibleInRender(0)
gnd.SetVisibleInReflections(0)
ffly = scene.CurrentFireFlyOptions()
ffly.SetManual(1)
setCamAZEL.multiViewSeqRender(basedir, nAz, nEl, nRo, outdir,
jointFileName0, jointFileName1,
nFrames, graspCode, listpath=listpath,
fullSphere=True, f=70,
camName="RHand Camera")
``` |
[
{
"content": "Here is some code:\n```python\nfrom panda3d.core import *\nfrom panda3d.direct import *\nimport CatalogItem\nfrom toontown.toonbase import ToontownGlobals\nfrom otp.otpbase import OTPLocalizer\nfrom toontown.toonbase import TTLocalizer\nbannedPhrases = [11009]\n\nclass CatalogChatItem(CatalogItem.... | [
{
"content": "Here is some code:\n<|memory_start|>```python\nfrom panda3d.core import *\nfrom panda3d.direct import *\nimport CatalogItem\nfrom toontown.toonbase import ToontownGlobals\nfrom otp.otpbase import OTPLocalizer\nfrom toontown.toonbase import TTLocalizer\nbannedPhrases = [11009]\n\nclass CatalogChatI... | ```python
from panda3d.core import *
from panda3d.direct import *
import CatalogItem
from toontown.toonbase import ToontownGlobals
from otp.otpbase import OTPLocalizer
from toontown.toonbase import TTLocalizer
bannedPhrases = [11009]
class CatalogChatItem(CatalogItem.CatalogItem):
def makeNewItem(self, customIndex):
self.customIndex = customIndex
CatalogItem.CatalogItem.makeNewItem(self)
def getPurchaseLimit(self):
return 1
def reachedPurchaseLimit(self, avatar):
if self in avatar.onOrder or self in avatar.mailboxContents or self in avatar.onGiftOrder or self in avatar.awardMailboxContents or self in avatar.onAwardOrder:
return 1
return avatar.customMessages.count(self.customIndex) != 0
def getTypeName(self):
return TTLocalizer.ChatTypeName
def getName(self):
return TTLocalizer.ChatItemQuotes % OTPLocalizer.CustomSCStrings[self.customIndex]
def getDisplayName(self):
return OTPLocalizer.CustomSCStrings[self.customIndex]
def recordPurchase(self, avatar, optional):
if avatar.customMessages.count(self.customIndex) != 0:
return ToontownGlobals.P_ReachedPurchaseLimit
if len(avatar.customMessages) >= ToontownGlobals.MaxCustomMessages:
if optional >= 0 and optional < len(avatar.customMessages):
del avatar.customMessages[optional]
if len(avatar.customMessages) >= ToontownGlobals.MaxCustomMessages:
return ToontownGlobals.P_NoRoomForItem
avatar.customMessages.append(self.customIndex)
avatar.d_setCustomMessages(avatar.customMessages)
return ToontownGlobals.P_ItemAvailable
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogAcceptChat
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def output(self, store = -1):
return 'CatalogChatItem(%s%s)' % (self.customIndex, self.formatOptionalData(store))
def compareTo(self, other):
return self.customIndex - other.customIndex
def getHashContents(self):
return self.customIndex
def getBasePrice(self):
if self.customIndex >= 10000:
return 150
return 100
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.customIndex = di.getUint16()
text = OTPLocalizer.CustomSCStrings[self.customIndex]
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint16(self.customIndex)
def acceptItem(self, mailbox, index, callback):
if len(base.localAvatar.customMessages) < ToontownGlobals.MaxCustomMessages:
mailbox.acceptItem(self, index, callback)
else:
self.showMessagePickerOnAccept(mailbox, index, callback)
def requestPurchase(self, phone, callback):
if len(base.localAvatar.customMessages) < ToontownGlobals.MaxCustomMessages:
CatalogItem.CatalogItem.requestPurchase(self, phone, callback)
else:
self.showMessagePicker(phone, callback)
def showMessagePicker(self, phone, callback):
self.phone = phone
self.callback = callback
import CatalogChatItemPicker
self.messagePicker = CatalogChatItemPicker.CatalogChatItemPicker(self.__handlePickerDone, self.customIndex)
self.messagePicker.show()
def showMessagePickerOnAccept(self, mailbox, index, callback):
self.mailbox = mailbox
self.callback = callback
self.index = index
import CatalogChatItemPicker
self.messagePicker = CatalogChatItemPicker.CatalogChatItemPicker(self.__handlePickerOnAccept, self.customIndex)
self.messagePicker.show()
def __handlePickerOnAccept(self, status, pickedMessage = None):
print 'Picker Status%s' % status
if status == 'pick':
self.mailbox.acceptItem(self, self.index, self.callback, pickedMessage)
else:
print 'picker canceled'
self.callback(ToontownGlobals.P_UserCancelled, None, self.index)
self.messagePicker.hide()
self.messagePicker.destroy()
del self.messagePicker
del self.callback
del self.mailbox
return
def __handlePickerDone(self, status, pickedMessage = None):
if status == 'pick':
CatalogItem.CatalogItem.requestPurchase(self, self.phone, self.callback, pickedMessage)
self.messagePicker.hide()
self.messagePicker.destroy()
del self.messagePicker
del self.callback
del self.phone
def getPicture(self, avatar):
chatBalloon = loader.loadModel('phase_3/models/props/chatbox')
chatBalloon.find('**/top').setPos(1, 0, 5)
chatBalloon.find('**/middle').setScale(1, 1, 3)
frame = self.makeFrame()
chatBalloon.reparentTo(frame)
chatBalloon.setPos(-2.19, 0, -1.74)
chatBalloon.setScale(0.4)
self.hasPicture = True
return (frame, None)
def getChatRange(fromIndex, toIndex, *otherRanges):
list = []
froms = [fromIndex]
tos = [toIndex]
i = 0
while i < len(otherRanges):
froms.append(otherRanges[i])
tos.append(otherRanges[i + 1])
i += 2
for chatId in OTPLocalizer.CustomSCStrings.keys():
for fromIndex, toIndex in zip(froms, tos):
if chatId >= fromIndex and chatId <= toIndex and chatId not in bannedPhrases:
list.append(CatalogChatItem(chatId))
return list
``` |
[
{
"content": "Here is a code file:\n```python\n# -*- coding: utf-8 -*-\n#\n# This file is part of EUDAT B2Share.\n# Copyright (C) 2016 CERN.\n#\n# B2Share is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundati... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# This file is part of EUDAT B2Share.\n# Copyright (C) 2016 CERN.\n#\n# B2Share is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free S... | ```python
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""PID Fetchers."""
from collections import namedtuple
from .providers import DepositUUIDProvider
FetchedPID = namedtuple('FetchedPID', ['provider', 'pid_type', 'pid_value'])
def b2share_deposit_uuid_fetcher(record_uuid, data):
"""Fetch a deposit's identifiers."""
return FetchedPID(
provider=DepositUUIDProvider,
pid_type=DepositUUIDProvider.pid_type,
pid_value=str(data['_deposit']['id']),
)
``` |
[
{
"content": "Here is a code file:\n```python\nimport threading\nimport collections\nimport log\nimport copy\nimport asyncio\nimport settings\nimport clargs\nfrom recognition.actions.library import pywindow\nfrom recognition.commands import loader\nfrom recognition.actions import perform\nfrom communication imp... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nimport threading\nimport collections\nimport log\nimport copy\nimport asyncio\nimport settings\nimport clargs\nfrom recognition.actions.library import pywindow\nfrom recognition.commands import loader\nfrom recognition.actions import perform\nfrom c... | ```python
import threading
import collections
import log
import copy
import asyncio
import settings
import clargs
from recognition.actions.library import pywindow
from recognition.commands import loader
from recognition.actions import perform
from communication import topics, pubsub
import time
def create_message_subscriptions(msg_list, command_module_controller):
pubsub.subscribe(topics.RELOAD_COMMAND_MODULE_FILES, lambda: set_message(msg_list, topics.RELOAD_COMMAND_MODULE_FILES))
pubsub.subscribe(topics.RELOAD_GRAMMAR, lambda: set_message(msg_list, topics.RELOAD_GRAMMAR))
pubsub.subscribe(topics.PERFORM_COMMANDS,
lambda grammar_id, words: perform_commands(command_module_controller, grammar_id, words))
def start_watching_user_state():
msg_list = [None]
command_module_file_pattern = settings.settings['file_pattern']
module_loader = loader.StaticFileCommandModuleLoader(settings.settings['command_directory'], command_module_file_pattern)
command_module_controller = loader.CommandModuleController(module_loader)
command_module_controller.command_modules = command_module_controller.initialize_command_modules()
engine_status_history = collections.deque([], 10)
create_message_subscriptions(msg_list, command_module_controller)
fut = watch_user_system_state(msg_list, command_module_controller)
asyncio.ensure_future(fut)
async def watch_user_system_state(msg_list, command_module_controller):
from recognition.actions.library.stdlib import namespace, engine
previous_window = None
previous_state = None
previous_engine_settings = copy.copy(engine.settings)
initial_load_done = False
while True:
current_state = copy.copy(namespace['state'])
current_window = pywindow.foreground_window().title.lower()
current_engine_settings = copy.copy(engine.settings)
is_different_window = current_window != previous_window
is_different_state = current_state != previous_state
is_different_engine_settings = current_engine_settings != previous_engine_settings
msg = msg_list[0]
if is_different_window or is_different_state or msg:
msg_list[0] = None
new_active_modules = command_module_controller.get_active_modules(current_window)
reload_files = msg == topics.RELOAD_COMMAND_MODULE_FILES
if new_active_modules != command_module_controller.active_command_modules or reload_files:
initialize_modules = not initial_load_done or reload_files
command_module_controller.load_modules(current_window, initialize_modules=False)
initial_load_done = True
elif msg == topics.RELOAD_GRAMMAR:
raise NotImplementedError
command_module_controller.load_and_send_grammar()
previous_window = current_window
previous_state = current_state
if is_different_engine_settings:
pubsub.publish(topics.SET_ENGINE_SETTINGS, current_engine_settings)
previous_engine_settings = current_engine_settings
await asyncio.sleep(1)
def set_message(msg_list, msg):
msg_list[0] = msg
def perform_commands(command_module_controller: loader.CommandModuleController, grammar_id: str, words):
try:
grammar_context = command_module_controller.grammars[grammar_id]
except KeyError:
log.logger.warning(f'Grammar {grammar_id} no longer exists')
return
perform.perform_commands(grammar_context, words)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n\"\"\"\n SleekXMPP: The Sleek XMPP Library\n Copyright (C) 2010 Nathanael C. Fritz\n This file is part of SleekXMPP.\n\n See the file LICENSE for copying permission.\n\"\"\"\n\nfrom socket import _fileobje... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n\"\"\"\n SleekXMPP: The Sleek XMPP Library\n Copyright (C) 2010 Nathanael C. Fritz\n This file is part of SleekXMPP.\n\n See the file LICENSE for copying permission.\n\"\"\"\n\nfrom socket ... | ```python
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from socket import _fileobject
import socket
class FileSocket(_fileobject):
"""
Create a file object wrapper for a socket to work around
issues present in Python 2.6 when using sockets as file objects.
The parser for xml.etree.cElementTree requires a file, but we will
be reading from the XMPP connection socket instead.
"""
def read(self, size=4096):
"""Read data from the socket as if it were a file."""
data = self._sock.recv(size)
if data is not None:
return data
class Socket26(socket._socketobject):
"""
A custom socket implementation that uses our own FileSocket class
to work around issues in Python 2.6 when using sockets as files.
"""
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return FileSocket(self._sock, mode, bufsize)
``` |
[
{
"content": "Here is the code block:\n```python\n#!/usr/bin/python\n#\n\n# Copyright (C) 2010, 2012, 2013 Google Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either versio... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n#!/usr/bin/python\n#\n\n# Copyright (C) 2010, 2012, 2013 Google Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundatio... | ```python
#!/usr/bin/python
#
# Copyright (C) 2010, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the RAPI rlib2 module
"""
import unittest
import itertools
import random
from ganeti import constants
from ganeti import opcodes
from ganeti import compat
from ganeti import http
from ganeti import query
import ganeti.rpc.errors as rpcerr
from ganeti import errors
from ganeti import rapi
from ganeti.rapi import rlib2
from ganeti.rapi import baserlib
from ganeti.rapi import connector
import testutils
class _FakeRequestPrivateData:
def __init__(self, body_data):
self.body_data = body_data
class _FakeRequest:
def __init__(self, body_data):
self.private = _FakeRequestPrivateData(body_data)
def _CreateHandler(cls, items, queryargs, body_data, client_cls):
return cls(items, queryargs, _FakeRequest(body_data),
_client_cls=client_cls)
class _FakeClient:
def __init__(self, address=None):
self._jobs = []
def GetNextSubmittedJob(self):
return self._jobs.pop(0)
def SubmitJob(self, ops):
job_id = str(1 + int(random.random() * 1000000))
self._jobs.append((job_id, ops))
return job_id
class _FakeClientFactory:
def __init__(self, cls):
self._client_cls = cls
self._clients = []
def GetNextClient(self):
return self._clients.pop(0)
def __call__(self, address=None):
cl = self._client_cls(address=address)
self._clients.append(cl)
return cl
class TestConstants(unittest.TestCase):
def testConsole(self):
# Exporting the console field without authentication might expose
# information
assert "console" in query.INSTANCE_FIELDS
self.assertTrue("console" not in rlib2.I_FIELDS)
def testFields(self):
checks = {
constants.QR_INSTANCE: rlib2.I_FIELDS,
constants.QR_NODE: rlib2.N_FIELDS,
constants.QR_GROUP: rlib2.G_FIELDS,
}
for (qr, fields) in checks.items():
self.assertFalse(set(fields) - set(query.ALL_FIELDS[qr].keys()))
class TestClientConnectError(unittest.TestCase):
@staticmethod
def _FailingClient(address=None):
raise rpcerr.NoMasterError("test")
def test(self):
resources = [
rlib2.R_2_groups,
rlib2.R_2_instances,
rlib2.R_2_nodes,
]
for cls in resources:
handler = _CreateHandler(cls, ["name"], {}, None, self._FailingClient)
self.assertRaises(http.HttpBadGateway, handler.GET)
class TestJobSubmitError(unittest.TestCase):
class _SubmitErrorClient:
def __init__(self, address=None):
pass
@staticmethod
def SubmitJob(ops):
raise errors.JobQueueFull("test")
def test(self):
handler = _CreateHandler(rlib2.R_2_redist_config, [], {}, None,
self._SubmitErrorClient)
self.assertRaises(http.HttpServiceUnavailable, handler.PUT)
class TestClusterModify(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_cluster_modify, [], {}, {
"vg_name": "testvg",
"candidate_pool_size": 100,
}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpClusterSetParams))
self.assertEqual(op.vg_name, "testvg")
self.assertEqual(op.candidate_pool_size, 100)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testInvalidValue(self):
for attr in ["vg_name", "candidate_pool_size", "beparams", "_-Unknown#"]:
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_cluster_modify, [], {}, {
attr: True,
}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.PUT)
self.assertRaises(IndexError, clfactory.GetNextClient)
class TestRedistConfig(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_redist_config, [], {}, None, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpClusterRedistConf))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestNodeMigrate(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_nodes_name_migrate, ["node1"], {}, {
"iallocator": "fooalloc",
}, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpNodeMigrate))
self.assertEqual(op.node_name, "node1")
self.assertEqual(op.iallocator, "fooalloc")
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testQueryArgsConflict(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_nodes_name_migrate, ["node2"], {
"live": True,
"mode": constants.HT_MIGRATION_NONLIVE,
}, None, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
self.assertRaises(IndexError, clfactory.GetNextClient)
def testQueryArgsMode(self):
clfactory = _FakeClientFactory(_FakeClient)
queryargs = {
"mode": [constants.HT_MIGRATION_LIVE],
}
handler = _CreateHandler(rlib2.R_2_nodes_name_migrate, ["node17292"],
queryargs, None, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpNodeMigrate))
self.assertEqual(op.node_name, "node17292")
self.assertEqual(op.mode, constants.HT_MIGRATION_LIVE)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testQueryArgsLive(self):
clfactory = _FakeClientFactory(_FakeClient)
for live in [False, True]:
queryargs = {
"live": [str(int(live))],
}
handler = _CreateHandler(rlib2.R_2_nodes_name_migrate, ["node6940"],
queryargs, None, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpNodeMigrate))
self.assertEqual(op.node_name, "node6940")
if live:
self.assertEqual(op.mode, constants.HT_MIGRATION_LIVE)
else:
self.assertEqual(op.mode, constants.HT_MIGRATION_NONLIVE)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestNodeEvacuate(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_nodes_name_evacuate, ["node92"], {
"dry-run": ["1"],
}, {
"mode": constants.NODE_EVAC_SEC,
}, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpNodeEvacuate))
self.assertEqual(op.node_name, "node92")
self.assertEqual(op.mode, constants.NODE_EVAC_SEC)
self.assertTrue(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestNodePowercycle(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_nodes_name_powercycle, ["node20744"], {
"force": ["1"],
}, None, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpNodePowercycle))
self.assertEqual(op.node_name, "node20744")
self.assertTrue(op.force)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestGroupAssignNodes(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_groups_name_assign_nodes, ["grp-a"], {
"dry-run": ["1"],
"force": ["1"],
}, {
"nodes": ["n2", "n3"],
}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupAssignNodes))
self.assertEqual(op.group_name, "grp-a")
self.assertEqual(op.nodes, ["n2", "n3"])
self.assertTrue(op.dry_run)
self.assertTrue(op.force)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceDelete(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name, ["inst30965"], {
"dry-run": ["1"],
}, {}, clfactory)
job_id = handler.DELETE()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceRemove))
self.assertEqual(op.instance_name, "inst30965")
self.assertTrue(op.dry_run)
self.assertFalse(op.ignore_failures)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceInfo(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_info, ["inst31217"], {
"static": ["1"],
}, {}, clfactory)
job_id = handler.GET()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceQueryData))
self.assertEqual(op.instances, ["inst31217"])
self.assertTrue(op.static)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceReboot(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_reboot, ["inst847"], {
"dry-run": ["1"],
"ignore_secondaries": ["1"],
"reason": ["System update"],
}, {}, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceReboot))
self.assertEqual(op.instance_name, "inst847")
self.assertEqual(op.reboot_type, constants.INSTANCE_REBOOT_HARD)
self.assertTrue(op.ignore_secondaries)
self.assertTrue(op.dry_run)
self.assertEqual(op.reason[0][0], constants.OPCODE_REASON_SRC_USER)
self.assertEqual(op.reason[0][1], "System update")
self.assertEqual(op.reason[1][0],
"%s:%s" % (constants.OPCODE_REASON_SRC_RLIB2,
"instances_name_reboot"))
self.assertEqual(op.reason[1][1], "")
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceStartup(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_startup, ["inst31083"], {
"force": ["1"],
"no_remember": ["1"],
"reason": ["Newly created instance"],
}, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceStartup))
self.assertEqual(op.instance_name, "inst31083")
self.assertTrue(op.no_remember)
self.assertTrue(op.force)
self.assertFalse(op.dry_run)
self.assertEqual(op.reason[0][0], constants.OPCODE_REASON_SRC_USER)
self.assertEqual(op.reason[0][1], "Newly created instance")
self.assertEqual(op.reason[1][0],
"%s:%s" % (constants.OPCODE_REASON_SRC_RLIB2,
"instances_name_startup"))
self.assertEqual(op.reason[1][1], "")
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceShutdown(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_shutdown, ["inst26791"], {
"no_remember": ["0"],
"reason": ["Not used anymore"],
}, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceShutdown))
self.assertEqual(op.instance_name, "inst26791")
self.assertFalse(op.no_remember)
self.assertFalse(op.dry_run)
self.assertEqual(op.reason[0][0], constants.OPCODE_REASON_SRC_USER)
self.assertEqual(op.reason[0][1], "Not used anymore")
self.assertEqual(op.reason[1][0],
"%s:%s" % (constants.OPCODE_REASON_SRC_RLIB2,
"instances_name_shutdown"))
self.assertEqual(op.reason[1][1], "")
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceActivateDisks(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_activate_disks, ["xyz"], {
"ignore_size": ["1"],
}, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceActivateDisks))
self.assertEqual(op.instance_name, "xyz")
self.assertTrue(op.ignore_size)
self.assertFalse(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceDeactivateDisks(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_deactivate_disks,
["inst22357"], {}, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceDeactivateDisks))
self.assertEqual(op.instance_name, "inst22357")
self.assertFalse(op.dry_run)
self.assertFalse(op.force)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceRecreateDisks(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_recreate_disks,
["inst22357"], {}, {}, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceRecreateDisks))
self.assertEqual(op.instance_name, "inst22357")
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceFailover(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_failover,
["inst12794"], {}, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceFailover))
self.assertEqual(op.instance_name, "inst12794")
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceDiskGrow(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
data = {
"amount": 1024,
}
handler = _CreateHandler(rlib2.R_2_instances_name_disk_grow,
["inst10742", "3"], {}, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceGrowDisk))
self.assertEqual(op.instance_name, "inst10742")
self.assertEqual(op.disk, 3)
self.assertEqual(op.amount, 1024)
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestBackupPrepare(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
queryargs = {
"mode": constants.EXPORT_MODE_REMOTE,
}
handler = _CreateHandler(rlib2.R_2_instances_name_prepare_export,
["inst17925"], queryargs, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpBackupPrepare))
self.assertEqual(op.instance_name, "inst17925")
self.assertEqual(op.mode, constants.EXPORT_MODE_REMOTE)
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestGroupRemove(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_groups_name,
["grp28575"], {}, {}, clfactory)
job_id = handler.DELETE()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupRemove))
self.assertEqual(op.group_name, "grp28575")
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestStorageQuery(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
queryargs = {
"storage_type": constants.ST_LVM_PV,
"output_fields": "name,other",
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage,
["node21075"], queryargs, {}, clfactory)
job_id = handler.GET()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpNodeQueryStorage))
self.assertEqual(op.nodes, ["node21075"])
self.assertEqual(op.storage_type, constants.ST_LVM_PV)
self.assertEqual(op.output_fields, ["name", "other"])
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testErrors(self):
clfactory = _FakeClientFactory(_FakeClient)
# storage type which does not support space reporting
queryargs = {
"storage_type": constants.ST_DISKLESS,
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage,
["node21273"], queryargs, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.GET)
queryargs = {
"storage_type": constants.ST_LVM_VG,
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage,
["node21273"], queryargs, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.GET)
queryargs = {
"storage_type": "##unknown_storage##",
"output_fields": "name,other",
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage,
["node10315"], queryargs, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.GET)
class TestStorageModify(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
for allocatable in [None, "1", "0"]:
queryargs = {
"storage_type": constants.ST_LVM_VG,
"name": "pv-a",
}
if allocatable is not None:
queryargs["allocatable"] = allocatable
handler = _CreateHandler(rlib2.R_2_nodes_name_storage_modify,
["node9292"], queryargs, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpNodeModifyStorage))
self.assertEqual(op.node_name, "node9292")
self.assertEqual(op.storage_type, constants.ST_LVM_VG)
self.assertEqual(op.name, "pv-a")
if allocatable is None:
self.assertFalse(op.changes)
else:
assert allocatable in ("0", "1")
self.assertEqual(op.changes, {
constants.SF_ALLOCATABLE: (allocatable == "1"),
})
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testErrors(self):
clfactory = _FakeClientFactory(_FakeClient)
# No storage type
queryargs = {
"name": "xyz",
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage_modify,
["node26016"], queryargs, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.PUT)
# No name
queryargs = {
"storage_type": constants.ST_LVM_VG,
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage_modify,
["node21218"], queryargs, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.PUT)
# Invalid value
queryargs = {
"storage_type": constants.ST_LVM_VG,
"name": "pv-b",
"allocatable": "noint",
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage_modify,
["node30685"], queryargs, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.PUT)
class TestStorageRepair(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
queryargs = {
"storage_type": constants.ST_LVM_PV,
"name": "pv16611",
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage_repair,
["node19265"], queryargs, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpRepairNodeStorage))
self.assertEqual(op.node_name, "node19265")
self.assertEqual(op.storage_type, constants.ST_LVM_PV)
self.assertEqual(op.name, "pv16611")
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testErrors(self):
clfactory = _FakeClientFactory(_FakeClient)
# No storage type
queryargs = {
"name": "xyz",
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage_repair,
["node11275"], queryargs, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.PUT)
# No name
queryargs = {
"storage_type": constants.ST_LVM_VG,
}
handler = _CreateHandler(rlib2.R_2_nodes_name_storage_repair,
["node21218"], queryargs, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.PUT)
class TestTags(unittest.TestCase):
TAG_HANDLERS = [
rlib2.R_2_instances_name_tags,
rlib2.R_2_nodes_name_tags,
rlib2.R_2_groups_name_tags,
rlib2.R_2_tags,
]
def testSetAndDelete(self):
clfactory = _FakeClientFactory(_FakeClient)
for method, opcls in [("PUT", opcodes.OpTagsSet),
("DELETE", opcodes.OpTagsDel)]:
for idx, handler in enumerate(self.TAG_HANDLERS):
dry_run = bool(idx % 2)
name = "test%s" % idx
queryargs = {
"tag": ["foo", "bar", "baz"],
"dry-run": str(int(dry_run)),
}
handler = _CreateHandler(handler, [name], queryargs, {}, clfactory)
job_id = getattr(handler, method)()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcls))
self.assertEqual(op.kind, handler.TAG_LEVEL)
if handler.TAG_LEVEL == constants.TAG_CLUSTER:
self.assertTrue(op.name is None)
else:
self.assertEqual(op.name, name)
self.assertEqual(op.tags, ["foo", "bar", "baz"])
self.assertEqual(op.dry_run, dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceCreation(testutils.GanetiTestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "inst863.example.com"
disk_variants = [
# No disks
[],
# Two disks
[{"size": 5, }, {"size": 100, }],
# Disk with mode
[{"size": 123, "mode": constants.DISK_RDWR, }],
]
nic_variants = [
# No NIC
[],
# Three NICs
[{}, {}, {}],
# Two NICs
[
{ "ip": "192.0.2.6", "mode": constants.NIC_MODE_ROUTED,
"mac": "01:23:45:67:68:9A",
},
{ "mode": constants.NIC_MODE_BRIDGED, "link": "br1" },
],
]
beparam_variants = [
None,
{},
{ constants.BE_VCPUS: 2, },
{ constants.BE_MAXMEM: 200, },
{ constants.BE_MEMORY: 256, },
{ constants.BE_VCPUS: 2,
constants.BE_MAXMEM: 1024,
constants.BE_MINMEM: 1024,
constants.BE_AUTO_BALANCE: True,
constants.BE_ALWAYS_FAILOVER: True, }
]
hvparam_variants = [
None,
{ constants.HV_BOOT_ORDER: "anc", },
{ constants.HV_KERNEL_PATH: "/boot/fookernel",
constants.HV_ROOT_PATH: "/dev/hda1", },
]
for mode in [constants.INSTANCE_CREATE, constants.INSTANCE_IMPORT]:
for nics in nic_variants:
for disk_template in constants.DISK_TEMPLATES:
for disks in disk_variants:
for beparams in beparam_variants:
for hvparams in hvparam_variants:
for dry_run in [False, True]:
queryargs = {
"dry-run": str(int(dry_run)),
}
data = {
rlib2._REQ_DATA_VERSION: 1,
"name": name,
"hypervisor": constants.HT_FAKE,
"disks": disks,
"nics": nics,
"mode": mode,
"disk_template": disk_template,
"os": "debootstrap",
}
if beparams is not None:
data["beparams"] = beparams
if hvparams is not None:
data["hvparams"] = hvparams
handler = _CreateHandler(rlib2.R_2_instances, [],
queryargs, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
self.assertTrue(isinstance(op, opcodes.OpInstanceCreate))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, mode)
self.assertEqual(op.disk_template, disk_template)
self.assertEqual(op.dry_run, dry_run)
self.assertEqual(len(op.disks), len(disks))
self.assertEqual(len(op.nics), len(nics))
for opdisk, disk in zip(op.disks, disks):
for key in constants.IDISK_PARAMS:
self.assertEqual(opdisk.get(key), disk.get(key))
self.assertFalse("unknown" in opdisk)
for opnic, nic in zip(op.nics, nics):
for key in constants.INIC_PARAMS:
self.assertEqual(opnic.get(key), nic.get(key))
self.assertFalse("unknown" in opnic)
self.assertFalse("foobar" in opnic)
if beparams is None:
self.assertTrue(op.beparams in [None, {}])
else:
self.assertEqualValues(op.beparams, beparams)
if hvparams is None:
self.assertTrue(op.hvparams in [None, {}])
else:
self.assertEqualValues(op.hvparams, hvparams)
def testLegacyName(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "inst29128.example.com"
data = {
rlib2._REQ_DATA_VERSION: 1,
"name": name,
"disks": [],
"nics": [],
"mode": constants.INSTANCE_CREATE,
"disk_template": constants.DT_PLAIN,
}
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceCreate))
self.assertEqual(op.instance_name, name)
self.assertFalse(hasattr(op, "name"))
self.assertFalse(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
# Define both
data["instance_name"] = "other.example.com"
assert "name" in data and "instance_name" in data
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
self.assertRaises(IndexError, clfactory.GetNextClient)
def testLegacyOs(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "inst4673.example.com"
os = "linux29206"
data = {
rlib2._REQ_DATA_VERSION: 1,
"name": name,
"os_type": os,
"disks": [],
"nics": [],
"mode": constants.INSTANCE_CREATE,
"disk_template": constants.DT_PLAIN,
}
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceCreate))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.os_type, os)
self.assertFalse(hasattr(op, "os"))
self.assertFalse(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
# Define both
data["os"] = "linux9584"
assert "os" in data and "os_type" in data
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
def testErrors(self):
clfactory = _FakeClientFactory(_FakeClient)
# Test all required fields
reqfields = {
rlib2._REQ_DATA_VERSION: 1,
"name": "inst1.example.com",
"disks": [],
"nics": [],
"mode": constants.INSTANCE_CREATE,
}
for name in reqfields.keys():
data = dict(i for i in reqfields.iteritems() if i[0] != name)
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
self.assertRaises(IndexError, clfactory.GetNextClient)
# Invalid disks and nics
for field in ["disks", "nics"]:
invalid_values = [None, 1, "", {}, [1, 2, 3], ["hda1", "hda2"],
[{"_unknown_": False, }]]
for invvalue in invalid_values:
data = reqfields.copy()
data[field] = invvalue
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
self.assertRaises(IndexError, clfactory.GetNextClient)
def testVersion(self):
clfactory = _FakeClientFactory(_FakeClient)
# No version field
data = {
"name": "inst1.example.com",
"disks": [],
"nics": [],
"mode": constants.INSTANCE_CREATE,
"disk_template": constants.DT_PLAIN,
}
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
# Old and incorrect versions
for version in [0, -1, 10483, "Hello World"]:
data[rlib2._REQ_DATA_VERSION] = version
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
self.assertRaises(IndexError, clfactory.GetNextClient)
# Correct version
data[rlib2._REQ_DATA_VERSION] = 1
handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceCreate))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestBackupExport(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "instmoo"
data = {
"mode": constants.EXPORT_MODE_REMOTE,
"destination": [(1, 2, 3), (99, 99, 99)],
"shutdown": True,
"remove_instance": True,
"x509_key_name": ["name", "hash"],
"destination_x509_ca": "---cert---"
}
handler = _CreateHandler(rlib2.R_2_instances_name_export, [name], {},
data, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpBackupExport))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, constants.EXPORT_MODE_REMOTE)
self.assertEqual(op.target_node, [(1, 2, 3), (99, 99, 99)])
self.assertEqual(op.shutdown, True)
self.assertEqual(op.remove_instance, True)
self.assertEqual(op.x509_key_name, ["name", "hash"])
self.assertEqual(op.destination_x509_ca, "---cert---")
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "inst1"
data = {
"destination": "node2",
"shutdown": False,
}
handler = _CreateHandler(rlib2.R_2_instances_name_export, [name], {},
data, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpBackupExport))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.target_node, "node2")
self.assertEqual(op.mode, "local")
self.assertFalse(op.remove_instance)
self.assertFalse(hasattr(op, "destination"))
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testErrors(self):
clfactory = _FakeClientFactory(_FakeClient)
for value in ["True", "False"]:
handler = _CreateHandler(rlib2.R_2_instances_name_export, ["err1"], {}, {
"remove_instance": value,
}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.PUT)
class TestInstanceMigrate(testutils.GanetiTestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "instYooho6ek"
for cleanup in [False, True]:
for mode in constants.HT_MIGRATION_MODES:
data = {
"cleanup": cleanup,
"mode": mode,
}
handler = _CreateHandler(rlib2.R_2_instances_name_migrate, [name], {},
data, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceMigrate))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, mode)
self.assertEqual(op.cleanup, cleanup)
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "instnohZeex0"
handler = _CreateHandler(rlib2.R_2_instances_name_migrate, [name], {}, {},
clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceMigrate))
self.assertEqual(op.instance_name, name)
self.assertTrue(op.mode is None)
self.assertFalse(op.cleanup)
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestParseRenameInstanceRequest(testutils.GanetiTestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "instij0eeph7"
for new_name in ["ua0aiyoo", "fai3ongi"]:
for ip_check in [False, True]:
for name_check in [False, True]:
data = {
"new_name": new_name,
"ip_check": ip_check,
"name_check": name_check,
}
handler = _CreateHandler(rlib2.R_2_instances_name_rename, [name],
{}, data, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceRename))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.new_name, new_name)
self.assertEqual(op.ip_check, ip_check)
self.assertEqual(op.name_check, name_check)
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "instahchie3t"
for new_name in ["thag9mek", "quees7oh"]:
data = {
"new_name": new_name,
}
handler = _CreateHandler(rlib2.R_2_instances_name_rename, [name],
{}, data, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceRename))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.new_name, new_name)
self.assertTrue(op.ip_check)
self.assertTrue(op.name_check)
self.assertFalse(op.dry_run)
self.assertFalse(hasattr(op, "force"))
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestParseModifyInstanceRequest(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "instush8gah"
test_disks = [
[],
[(1, { constants.IDISK_MODE: constants.DISK_RDWR, })],
]
for osparams in [{}, { "some": "value", "other": "Hello World", }]:
for hvparams in [{}, { constants.HV_KERNEL_PATH: "/some/kernel", }]:
for beparams in [{}, { constants.BE_MAXMEM: 128, }]:
for force in [False, True]:
for nics in [[], [(0, { constants.INIC_IP: "192.0.2.1", })]]:
for disks in test_disks:
for disk_template in constants.DISK_TEMPLATES:
data = {
"osparams": osparams,
"hvparams": hvparams,
"beparams": beparams,
"nics": nics,
"disks": disks,
"force": force,
"disk_template": disk_template,
}
handler = _CreateHandler(rlib2.R_2_instances_name_modify,
[name], {}, data, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceSetParams))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.hvparams, hvparams)
self.assertEqual(op.beparams, beparams)
self.assertEqual(op.osparams, osparams)
self.assertEqual(op.force, force)
self.assertEqual(op.nics, nics)
self.assertEqual(op.disks, disks)
self.assertEqual(op.disk_template, disk_template)
self.assertTrue(op.remote_node is None)
self.assertTrue(op.os_name is None)
self.assertFalse(op.force_variant)
self.assertFalse(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "instir8aish31"
handler = _CreateHandler(rlib2.R_2_instances_name_modify,
[name], {}, {}, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceSetParams))
self.assertEqual(op.instance_name, name)
for i in ["hvparams", "beparams", "osparams", "force", "nics", "disks",
"disk_template", "remote_node", "os_name", "force_variant"]:
self.assertTrue(hasattr(op, i))
class TestParseInstanceReinstallRequest(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.Parse = rlib2._ParseInstanceReinstallRequest
def _Check(self, ops, name):
expcls = [
opcodes.OpInstanceShutdown,
opcodes.OpInstanceReinstall,
opcodes.OpInstanceStartup,
]
self.assert_(compat.all(isinstance(op, exp)
for op, exp in zip(ops, expcls)))
self.assert_(compat.all(op.instance_name == name for op in ops))
def test(self):
name = "shoo0tihohma"
ops = self.Parse(name, {"os": "sys1", "start": True,})
self.assertEqual(len(ops), 3)
self._Check(ops, name)
self.assertEqual(ops[1].os_type, "sys1")
self.assertFalse(ops[1].osparams)
ops = self.Parse(name, {"os": "sys2", "start": False,})
self.assertEqual(len(ops), 2)
self._Check(ops, name)
self.assertEqual(ops[1].os_type, "sys2")
osparams = {
"reformat": "1",
}
ops = self.Parse(name, {"os": "sys4035", "start": True,
"osparams": osparams,})
self.assertEqual(len(ops), 3)
self._Check(ops, name)
self.assertEqual(ops[1].os_type, "sys4035")
self.assertEqual(ops[1].osparams, osparams)
def testDefaults(self):
name = "noolee0g"
ops = self.Parse(name, {"os": "linux1"})
self.assertEqual(len(ops), 3)
self._Check(ops, name)
self.assertEqual(ops[1].os_type, "linux1")
self.assertFalse(ops[1].osparams)
def testErrors(self):
self.assertRaises(http.HttpBadRequest, self.Parse,
"foo", "not a dictionary")
class TestGroupRename(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "group608242564"
data = {
"new_name": "ua0aiyoo15112",
}
handler = _CreateHandler(rlib2.R_2_groups_name_rename, [name], {}, data,
clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupRename))
self.assertEqual(op.group_name, name)
self.assertEqual(op.new_name, "ua0aiyoo15112")
self.assertFalse(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDryRun(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "group28548"
data = {
"new_name": "ua0aiyoo",
}
handler = _CreateHandler(rlib2.R_2_groups_name_rename, [name], {
"dry-run": ["1"],
}, data, clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupRename))
self.assertEqual(op.group_name, name)
self.assertEqual(op.new_name, "ua0aiyoo")
self.assertTrue(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestInstanceReplaceDisks(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "inst22568"
for disks in [range(1, 4), "1,2,3", "1, 2, 3"]:
data = {
"mode": constants.REPLACE_DISK_SEC,
"disks": disks,
"iallocator": "myalloc",
}
handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
[name], {}, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceReplaceDisks))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, constants.REPLACE_DISK_SEC)
self.assertEqual(op.disks, [1, 2, 3])
self.assertEqual(op.iallocator, "myalloc")
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "inst11413"
data = {
"mode": constants.REPLACE_DISK_AUTO,
}
handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
[name], {}, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpInstanceReplaceDisks))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, constants.REPLACE_DISK_AUTO)
self.assertTrue(op.iallocator is None)
self.assertEqual(op.disks, [])
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testNoDisks(self):
clfactory = _FakeClientFactory(_FakeClient)
handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
["inst20661"], {}, {}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
for disks in [None, "", {}]:
handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
["inst20661"], {}, {
"disks": disks,
}, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
def testWrong(self):
clfactory = _FakeClientFactory(_FakeClient)
data = {
"mode": constants.REPLACE_DISK_AUTO,
"disks": "hello world",
}
handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
["foo"], {}, data, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
class TestGroupModify(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "group6002"
for policy in constants.VALID_ALLOC_POLICIES:
data = {
"alloc_policy": policy,
}
handler = _CreateHandler(rlib2.R_2_groups_name_modify, [name], {}, data,
clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupSetParams))
self.assertEqual(op.group_name, name)
self.assertEqual(op.alloc_policy, policy)
self.assertFalse(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testUnknownPolicy(self):
clfactory = _FakeClientFactory(_FakeClient)
data = {
"alloc_policy": "_unknown_policy_",
}
handler = _CreateHandler(rlib2.R_2_groups_name_modify, ["xyz"], {}, data,
clfactory)
self.assertRaises(http.HttpBadRequest, handler.PUT)
self.assertRaises(IndexError, clfactory.GetNextClient)
def testDefaults(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "group6679"
handler = _CreateHandler(rlib2.R_2_groups_name_modify, [name], {}, {},
clfactory)
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupSetParams))
self.assertEqual(op.group_name, name)
self.assertTrue(op.alloc_policy is None)
self.assertFalse(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestGroupAdd(unittest.TestCase):
def test(self):
name = "group3618"
clfactory = _FakeClientFactory(_FakeClient)
for policy in constants.VALID_ALLOC_POLICIES:
data = {
"group_name": name,
"alloc_policy": policy,
}
handler = _CreateHandler(rlib2.R_2_groups, [], {}, data,
clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupAdd))
self.assertEqual(op.group_name, name)
self.assertEqual(op.alloc_policy, policy)
self.assertFalse(op.dry_run)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testUnknownPolicy(self):
clfactory = _FakeClientFactory(_FakeClient)
data = {
"alloc_policy": "_unknown_policy_",
}
handler = _CreateHandler(rlib2.R_2_groups, [], {}, data, clfactory)
self.assertRaises(http.HttpBadRequest, handler.POST)
self.assertRaises(IndexError, clfactory.GetNextClient)
def testDefaults(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "group15395"
data = {
"group_name": name,
}
handler = _CreateHandler(rlib2.R_2_groups, [], {}, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupAdd))
self.assertEqual(op.group_name, name)
self.assertTrue(op.alloc_policy is None)
self.assertFalse(op.dry_run)
def testLegacyName(self):
clfactory = _FakeClientFactory(_FakeClient)
name = "group29852"
data = {
"name": name,
}
handler = _CreateHandler(rlib2.R_2_groups, [], {
"dry-run": ["1"],
}, data, clfactory)
job_id = handler.POST()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpGroupAdd))
self.assertEqual(op.group_name, name)
self.assertTrue(op.alloc_policy is None)
self.assertTrue(op.dry_run)
class TestNodeRole(unittest.TestCase):
def test(self):
clfactory = _FakeClientFactory(_FakeClient)
for role in rlib2._NR_MAP.values():
handler = _CreateHandler(rlib2.R_2_nodes_name_role,
["node-z"], {}, role, clfactory)
if role == rlib2._NR_MASTER:
self.assertRaises(http.HttpBadRequest, handler.PUT)
else:
job_id = handler.PUT()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
(exp_job_id, (op, )) = cl.GetNextSubmittedJob()
self.assertEqual(job_id, exp_job_id)
self.assertTrue(isinstance(op, opcodes.OpNodeSetParams))
self.assertEqual(op.node_name, "node-z")
self.assertFalse(op.force)
self.assertFalse(op.dry_run)
if role == rlib2._NR_REGULAR:
self.assertFalse(op.drained)
self.assertFalse(op.offline)
self.assertFalse(op.master_candidate)
elif role == rlib2._NR_MASTER_CANDIDATE:
self.assertFalse(op.drained)
self.assertFalse(op.offline)
self.assertTrue(op.master_candidate)
elif role == rlib2._NR_DRAINED:
self.assertTrue(op.drained)
self.assertFalse(op.offline)
self.assertFalse(op.master_candidate)
elif role == rlib2._NR_OFFLINE:
self.assertFalse(op.drained)
self.assertTrue(op.offline)
self.assertFalse(op.master_candidate)
else:
self.fail("Unknown role '%s'" % role)
self.assertRaises(IndexError, cl.GetNextSubmittedJob)
class TestSimpleResources(unittest.TestCase):
def setUp(self):
self.clfactory = _FakeClientFactory(_FakeClient)
def tearDown(self):
self.assertRaises(IndexError, self.clfactory.GetNextClient)
def testFeatures(self):
handler = _CreateHandler(rlib2.R_2_features, [], {}, None, self.clfactory)
self.assertEqual(set(handler.GET()), rlib2.ALL_FEATURES)
def testEmpty(self):
for cls in [rlib2.R_root, rlib2.R_2]:
handler = _CreateHandler(cls, [], {}, None, self.clfactory)
self.assertTrue(handler.GET() is None)
def testVersion(self):
handler = _CreateHandler(rlib2.R_version, [], {}, None, self.clfactory)
self.assertEqual(handler.GET(), constants.RAPI_VERSION)
class TestClusterInfo(unittest.TestCase):
class _ClusterInfoClient:
def __init__(self, address=None):
self.cluster_info = None
def QueryClusterInfo(self):
assert self.cluster_info is None
self.cluster_info = object()
return self.cluster_info
def test(self):
clfactory = _FakeClientFactory(self._ClusterInfoClient)
handler = _CreateHandler(rlib2.R_2_info, [], {}, None, clfactory)
result = handler.GET()
cl = clfactory.GetNextClient()
self.assertRaises(IndexError, clfactory.GetNextClient)
self.assertEqual(result, cl.cluster_info)
class TestInstancesMultiAlloc(unittest.TestCase):
def testInstanceUpdate(self):
clfactory = _FakeClientFactory(_FakeClient)
data = {
"instances": [{
"instance_name": "bar",
"mode": "create",
}, {
"instance_name": "foo",
"mode": "create",
}],
}
handler = _CreateHandler(rlib2.R_2_instances_multi_alloc, [], {}, data,
clfactory)
(body, _) = handler.GetPostOpInput()
self.assertTrue(compat.all([inst["OP_ID"] == handler.POST_OPCODE.OP_ID
for inst in body["instances"]]))
class TestPermissions(unittest.TestCase):
def testEquality(self):
self.assertEqual(rlib2.R_2_query.GET_ACCESS, rlib2.R_2_query.PUT_ACCESS)
self.assertEqual(rlib2.R_2_query.GET_ACCESS,
rlib2.R_2_instances_name_console.GET_ACCESS)
def testMethodAccess(self):
for handler in connector.CONNECTOR.values():
for method in baserlib._SUPPORTED_METHODS:
access = baserlib.GetHandlerAccess(handler, method)
self.assertFalse(access is None)
self.assertFalse(set(access) - rapi.RAPI_ACCESS_ALL,
msg=("Handler '%s' uses unknown access options for"
" method %s" % (handler, method)))
self.assertTrue(rapi.RAPI_ACCESS_READ not in access or
rapi.RAPI_ACCESS_WRITE in access,
msg=("Handler '%s' gives query, but not write access"
" for method %s (the latter includes query and"
" should therefore be given as well)" %
(handler, method)))
if __name__ == "__main__":
testutils.GanetiTestProgram()
``` |
[
{
"content": "Replicate the source code:\n```python\nfrom django.db import connection\n\n\nclaim_to_polygon_join = \"\"\" \n LEFT OUTER JOIN ctracker_polygon_organizations ON (houses.polygon_id = ctracker_polygon_organizations.polygon_id) \n LEFT OUTER JOIN ctracker_organization ON (ctracker_polygon_o... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nfrom django.db import connection\n\n\nclaim_to_polygon_join = \"\"\" \n LEFT OUTER JOIN ctracker_polygon_organizations ON (houses.polygon_id = ctracker_polygon_organizations.polygon_id) \n LEFT OUTER JOIN ctracker_organization ON (ct... | ```python
from django.db import connection
claim_to_polygon_join = """
LEFT OUTER JOIN ctracker_polygon_organizations ON (houses.polygon_id = ctracker_polygon_organizations.polygon_id)
LEFT OUTER JOIN ctracker_organization ON (ctracker_polygon_organizations.organization_id = ctracker_organization.id)
LEFT OUTER JOIN ctracker_claim ON (ctracker_organization.id = ctracker_claim.organization_id)
"""
def get_claims_for_poly(polygon_id):
cursor = connection.cursor()
cursor.execute("""
SELECT COUNT(*) AS "__count" FROM "ctracker_organization"
INNER JOIN "ctracker_polygon_organizations" ON ("ctracker_organization"."id" = "ctracker_polygon_organizations"."organization_id")
INNER JOIN "ctracker_claim" ON ("ctracker_organization"."id" = "ctracker_claim"."organization_id")
WHERE ("ctracker_polygon_organizations"."polygon_id" = '%s')
""" % polygon_id)
return cursor.fetchone()[0]
def get_sum_for_layers(layers_ids, level):
cursor = connection.cursor()
if level==4:
cursor.execute("""
SELECT ctracker_organization.id, COUNT(ctracker_claim.content_ptr_id) AS claims FROM ctracker_organization
LEFT OUTER JOIN ctracker_claim ON (ctracker_organization.id = ctracker_claim.organization_id)
WHERE (ctracker_organization.id IN (%s) )
GROUP BY ctracker_organization.id
""" % ','.join([str(x) for x in layers_ids])
)
elif level==3:
cursor.execute("""
SELECT district_id, SUM(claimz) as sum_claims FROM
(SELECT houses.layer_id as district_id, COUNT(ctracker_claim.content_ptr_id) AS claimz FROM ctracker_polygon houses
%s
WHERE (houses.layer_id IN (%s) )
GROUP BY houses.polygon_id ) x
GROUP BY district_id
""" % (claim_to_polygon_join, ','.join(["'" + str(x) + "'" for x in layers_ids]))
)
elif level==2:
cursor.execute("""
SELECT area_id, SUM(claimz) as sum_claims FROM
(SELECT districts.layer_id as area_id, COUNT(ctracker_claim.content_ptr_id) AS claimz FROM ctracker_polygon districts
LEFT OUTER JOIN ctracker_polygon houses ON (houses.layer_id = districts.polygon_id)
%s
WHERE (districts.layer_id IN (%s) )
GROUP BY districts.polygon_id ) x
GROUP BY area_id
""" % (claim_to_polygon_join, ','.join(["'" + str(x) + "'" for x in layers_ids]))
)
elif level==1:
cursor.execute("""
SELECT region_id, SUM(claimz) as sum_claims FROM
(SELECT areas.layer_id as region_id, COUNT(ctracker_claim.content_ptr_id) AS claimz FROM ctracker_polygon areas
LEFT OUTER JOIN ctracker_polygon districts ON (districts.layer_id = areas.polygon_id)
LEFT OUTER JOIN ctracker_polygon houses ON (houses.layer_id = districts.polygon_id)
%s
WHERE (areas.layer_id IN (%s) )
GROUP BY areas.polygon_id ) x
GROUP BY region_id
""" % (claim_to_polygon_join, ','.join(["'" + str(x) + "'" for x in layers_ids]))
)
elif level==0:
cursor.execute("""
SELECT root_id, SUM(claimz) as sum_claims FROM
(SELECT regions.layer_id as root_id, COUNT(ctracker_claim.content_ptr_id) AS claimz FROM ctracker_polygon regions
LEFT OUTER JOIN ctracker_polygon areas ON (areas.layer_id = regions.polygon_id)
LEFT OUTER JOIN ctracker_polygon districts ON (districts.layer_id = areas.polygon_id)
LEFT OUTER JOIN ctracker_polygon houses ON (houses.layer_id = districts.polygon_id)
%s
WHERE (regions.layer_id IN (%s) )
GROUP BY regions.polygon_id ) x
GROUP BY root_id
""" % (claim_to_polygon_join, ','.join(["'" + str(x) + "'" for x in layers_ids]))
)
return dict(cursor.fetchall())
def get_max_for_layers(layer_id, level):
layers_dict = {}
cursor = connection.cursor()
if level==4:
# x = Polygon.objects.filter(layer_id=layer_id).annotate(claimz=Count('organizations__claim'))
cursor.execute("""
SELECT layer_id, MAX(claimz) FROM
(SELECT houses.layer_id as layer_id, COUNT(ctracker_claim.content_ptr_id) AS claimz FROM ctracker_polygon houses
%s
WHERE (houses.layer_id IN (%s) )
GROUP BY houses.polygon_id ) x
GROUP BY layer_id
""" % (claim_to_polygon_join, ','.join(["'" + str(x) + "'" for x in layer_id]))
)
elif level==3:
cursor = connection.cursor()
cursor.execute("""
SELECT district_id, MAX(claimz) as sum_claims FROM (
SELECT districts.layer_id as district_id, COUNT(ctracker_claim.content_ptr_id) AS claimz FROM ctracker_polygon districts
LEFT OUTER JOIN ctracker_polygon houses ON (houses.layer_id = districts.polygon_id)
%s
WHERE (districts.layer_id IN (%s) )
GROUP BY districts.polygon_id) x
GROUP BY district_id
""" % (claim_to_polygon_join, ','.join(["'" + str(x) + "'" for x in layer_id]))
)
elif level==2:
cursor.execute("""
SELECT district_id, MAX(claimz) as sum_claims FROM (
SELECT areas.layer_id as district_id, COUNT(ctracker_claim.content_ptr_id) AS claimz FROM ctracker_polygon areas
LEFT OUTER JOIN ctracker_polygon districts ON (districts.layer_id = areas.polygon_id)
LEFT OUTER JOIN ctracker_polygon houses ON (houses.layer_id = districts.polygon_id)
%s
WHERE (areas.layer_id IN (%s) )
GROUP BY areas.polygon_id) x
GROUP BY district_id
""" % (claim_to_polygon_join, ','.join(["'" + str(x) + "'" for x in layer_id]))
)
elif level==1:
cursor.execute("""
SELECT district_id, MAX(claimz) as sum_claims FROM (
SELECT regions.layer_id as district_id, COUNT(ctracker_claim.content_ptr_id) AS claimz FROM ctracker_polygon regions
LEFT OUTER JOIN ctracker_polygon areas ON (areas.layer_id = regions.polygon_id)
LEFT OUTER JOIN ctracker_polygon districts ON (districts.layer_id = areas.polygon_id)
LEFT OUTER JOIN ctracker_polygon houses ON (houses.layer_id = districts.polygon_id)
%s
WHERE (regions.layer_id IN (%s) )
GROUP BY regions.polygon_id) x
GROUP BY district_id
""" % (claim_to_polygon_join, ','.join(["'" + str(x) + "'" for x in layer_id]))
)
return dict(cursor.fetchall())
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n#!/usr/bin/env python\n\n# Copyright 2012 Josef Assad\n#\n# This file is part of Stock Data Cacher.\n#\n# Stock Data Cacher is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as publi... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n# Copyright 2012 Josef Assad\n#\n# This file is part of Stock Data Cacher.\n#\n# Stock Data Cacher is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public ... | ```python
#!/usr/bin/env python
# Copyright 2012 Josef Assad
#
# This file is part of Stock Data Cacher.
#
# Stock Data Cacher is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Stock Data Cacher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Stock Data Cacher. If not, see <http://www.gnu.org/licenses/>.
import pdb
import unittest
from sqlalchemy.orm import *
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import *
import os
import datetime
import hashlib
from nose.tools import with_setup
from StockCollection import StockCollection
from model import Datapoint, Stock, Day
import settings
dp_A_20120323=[u'NYSE', u"A", u"2012-03-23,43.57,44.30,43.15,44.30,3369400,44.20"]
dp_A_20120326=[u'NYSE', u"A", u"2012-03-26,44.87,45.12,44.63,45.05,3467100,44.95"]
dp_A_20120327=[u'NYSE', u"A", u"2012-03-27,45.05,46.28,44.99,45.67,3804100,45.57"]
dp_A_20120328=[u'NYSE', u"A", u"2012-03-28,45.42,45.47,44.09,44.59,3089300,44.49"]
dp_AA_20120323=[u'NYSE', u"AA", u"2012-03-23,10.01,10.26,9.96,10.11,20016500,10.08"]
dp_AA_20120326=[u'NYSE', u"AA", u"2012-03-26,10.25,10.30,10.12,10.22,13772200,10.19"]
dp_AA_20120327=[u'NYSE', u"AA", u"2012-03-27,10.25,10.31,10.06,10.06,19193300,10.03"]
dp_AA_20120328=[u'NYSE', u"AA", u"2012-03-28,10.06,10.06,9.79,9.83,36435200,9.80"]
dp_AAN_20120323=[u'NYSE', u"AAN", u"2012-03-23,25.73,25.83,25.12,25.65,406500,25.65"]
dp_AAN_20120326=[u'NYSE', u"AAN", u"2012-03-26,25.92,26.33,25.90,26.17,537400,26.17"]
dp_AAN_20120327=[u'NYSE', u"AAN", u"2012-03-27,26.12,26.50,26.05,26.06,609900,26.06"]
class testValidation(unittest.TestCase):
def setUp(self):
self.settings = settings
self.stock_collection = StockCollection(self.settings)
def tearDown(self):
pass
def testSymbolValidation(self):
"""Testing validation of stock symbols
"""
result = self.stock_collection.validate_symbol(u"ABC")
assert result[1] == True and result[0] == u"ABC",\
'expected symbol ABC to generate True, instead it generated False'
result = self.stock_collection.validate_symbol(u"AB.C")
assert result[1] == False and result[0] == u"AB.C",\
'expected symbol AB.C to generate False, instead it generated True'
result = self.stock_collection.validate_symbol(u"AB-C")
assert result[1] == False and result[0] == u"AB-PC",\
'expected symbol ABC to generate True and AB-PC, instead it generated [%s, %s]' %\
(result[0], result[1])
class testSymbolLoading(object):
def setUp(self):
self.settings = settings
self.engine = create_engine(settings.db_url)
self.Session = sessionmaker(bind=self.engine)
self.stock_collection = StockCollection(self.settings)
self.stock_collection.wipe()
self.stock_collection.create_db()
def tearDown(self):
cache_file_paths = []
for stock in self.stock_collection.stocks:
cache_file_paths.append(self.stock_collection.get_cache_file_path(stock.symbol, stock.market))
meta = MetaData(bind=self.stock_collection.engine)
meta.reflect()
self.stock_collection.wipe()
for cache_file_path in cache_file_paths:
assert not os.path.exists(cache_file_path),\
'cache file %s was not removed' % cache_file_path
engine = create_engine(self.settings.db_url)
meta2 = MetaData(bind=engine)
meta2.reflect()
assert len(meta2.tables) == 0, 'tables were not deleted. %s remain.' % len(meta.tables)
def test_gen(self):
"""testing stock entity loading
"""
data = []
def create_test_symbols_file(market_name, full_file, rows):
outfile_name = 'data/' + market_name + '_test.txt'
outfile = open(outfile_name, 'w')
infile = open(full_file, 'r')
lines = iter(infile)
for foo in xrange(1, rows+2):
line = lines.next()
outfile.write(line)
outfile.close()
infile.close()
tempdata = {u'name':u'NYSE', u'file_full':u'data/NYSE_full.txt', u'file':u"data/NYSE_test.txt", u'stocks':[]}
tempdata['stocks'].append({u'market':u'NYSE', u'symbol':u'A', u'name':u'Agilent Technologies'})
tempdata['stocks'].append({u'market':u'NYSE', u'symbol':u'AA', u'name':u'Alcoa Inc.'})
tempdata['stocks'].append({u'market':u'NYSE', u'symbol':u'AAN', u'name':u'Aaron\'s Inc.'})
data.append(tempdata)
tempdata = {'name':u'NASDAQ', u'file_full':u'data/NASDAQ_full.txt', u'file':u"data/NASDAQ_test.txt", u'stocks':[]}
tempdata['stocks'].append({u'market': u'NASDAQ', u'symbol':u'AAC', u'name':u'Australia Acquisition'})
tempdata['stocks'].append({u'market': u'NASDAQ', u'symbol':u'AACC', u'name':u'Asset Acceptance Capital'})
tempdata['stocks'].append({u'market': u'NASDAQ', u'symbol':u'AACOU', u'name':u'Australia Acquisition Corp.'})
data.append(tempdata)
max_markets = len(data)
max_stocks = 3
for num_markets in xrange(1, max_markets+1):
markets = data[:num_markets]
market_files = []
for market in markets:
market_files.append({u'name':market[u'name'], u'file':market[u'file']})
for num_stocks in xrange(1, max_stocks+1):
for market in markets:
create_test_symbols_file(market[u'name'], market[u'file_full'], num_stocks)
expected_symbols = []
for d in data[:num_markets]:
for s in d['stocks'][:num_stocks]:
expected_symbols.append(s)
yield self.check_symbol_loading_works, market_files, expected_symbols
@with_setup(setUp, tearDown)
def check_symbol_loading_works(self, markets_list, expected_symbols):
session = self.Session()
stocks_raw = []
self.settings.symbols_files = markets_list
self.stock_collection.load_symbols(self.settings)
for es in expected_symbols:
stock = self.get_stock_from_db(es['market'], es['symbol'], es['name'])
assert stock, 'stock \'%s\' not found in db' % es['name']
assert os.path.exists(self.stock_collection.\
get_cache_file_path(es['symbol'], es['market'])),\
'cache file not found for stock %s' % es['name']
num_stocks = len(session.query(Stock).all())
expected_num_stocks = len(expected_symbols)
assert num_stocks == expected_num_stocks,\
'expected %s stock in db, found %s' % (expected_num_stocks, num_stocks)
def testLoad1Symbol1Market(self):
"""loading of 1 symbol, 1 market file
"""
session = self.Session()
stocks_raw = []
self.settings.symbols_files = [{u'name':u'NYSE', u'file':u"data/NYSE_test1.txt"}]
stocks_raw.append({u'market':u'NYSE', u'symbol':u'A', u'name':u'Agilent Technologies'})
self.stock_collection.load_symbols(self.settings)
for stock_raw in stocks_raw:
stock = self.get_stock_from_db(stock_raw['market'],\
stock_raw['symbol'], stock_raw['name'])
assert stock, 'stock \'%s\' not found in db' % stock_raw['name']
assert os.path.exists(self.stock_collection.\
get_cache_file_path(stock_raw['symbol'], stock_raw['market'])),\
'cache file not found for stock %s' % stock_raw['name']
num_stocks = len(session.query(Stock).all())
expected_num_stocks = len(stocks_raw)
assert num_stocks == expected_num_stocks,\
'expected %s stock in db, found %s' % (expected_num_stocks, num_stocks)
def get_stock_from_db(self, market, symbol, name=""):
session = self.Session()
try:
if not name:
stock = session.query(Stock).\
filter(Stock.market == market).\
filter(Stock.symbol == symbol).one()
return stock
else:
stock = session.query(Stock).\
filter(Stock.market == market).\
filter(Stock.symbol == symbol).\
filter(Stock.name == name).one()
return stock
except NoResultFound:
return False
except:
return False
def testLoad2Symbol1Market(self):
"""loading of 2 symbols, 1 market file
"""
session = self.Session()
stocks_raw = []
self.settings.symbols_files = [{u'name':u'NYSE', u'file':u"data/NYSE_test2.txt"}]
stocks_raw.append({u'market':u'NYSE', u'symbol':u'A', u'name':u'Agilent Technologies'})
stocks_raw.append({u'market':u'NYSE', u'symbol':u'AA', u'name':u'Alcoa Inc.'})
self.stock_collection.load_symbols(self.settings)
for stock_raw in stocks_raw:
stock = self.get_stock_from_db(stock_raw['market'], stock_raw['symbol'], stock_raw['name'])
assert stock, 'stock \'%s\' not found in db' % stock_raw['name']
assert os.path.exists(self.stock_collection.\
get_cache_file_path(stock_raw['symbol'], stock_raw['market'])),\
'cache file not found for stock %s' % stock_raw['name']
num_stocks = len(session.query(Stock).all())
expected_num_stocks = len(stocks_raw)
assert num_stocks == expected_num_stocks, 'expected %s stock in db, found %s' % (expected_num_stocks, num_stocks)
def testLoad1Symbol2Market(self):
"""loading of 2 market files 1 symbol each
"""
session = self.Session()
stocks_raw = []
self.settings.symbols_files = [{u'name':u'NYSE', u'file':u"data/NYSE_test1.txt"},
{u'name':u'NASDAQ', u'file':u"data/NASDAQ_test1.txt"}]
stocks_raw.append({u'market':u'NYSE', u'symbol':u'A', u'name':u'Agilent Technologies'})
stocks_raw.append({u'market':u'NASDAQ', u'symbol':u'AAC', u'name':u'Australia Acquisition'})
self.stock_collection.load_symbols(self.settings)
for stock_raw in stocks_raw:
stock = self.get_stock_from_db(stock_raw['market'], stock_raw['symbol'], stock_raw['name'])
assert stock, 'stock \'%s\' not found in db' % stock_raw['name']
assert os.path.exists(self.stock_collection.\
get_cache_file_path(stock_raw['symbol'], stock_raw['market'])),\
'cache file not found for stock %s' % stock_raw['name']
num_stocks = len(session.query(Stock).all())
expected_num_stocks = len(stocks_raw)
assert num_stocks == expected_num_stocks,\
'expected %s stock in db, found %s' % (expected_num_stocks, num_stocks)
def testLoad2Symbol2Market(self):
"""loading of 2 market files 2 symbols each
"""
session = self.Session()
stocks_raw = []
self.settings.symbols_files = [{u'name':u'NYSE', u'file':u"data/NYSE_test2.txt"},
{u'name':u'NASDAQ', u'file':u"data/NASDAQ_test2.txt"}]
stocks_raw.append({u'market':u'NYSE', u'symbol':u'A', u'name':u'Agilent Technologies'})
stocks_raw.append({u'market':u'NASDAQ', u'symbol':u'AAC', u'name':u'Australia Acquisition'})
stocks_raw.append({u'market':u'NYSE', u'symbol':u'AA', u'name':u'Alcoa Inc.'})
stocks_raw.append({u'market':u'NASDAQ', u'symbol':u'AACC', u'name':u'Asset Acceptance Capital'})
self.stock_collection.load_symbols(self.settings)
for stock_raw in stocks_raw:
stock = self.get_stock_from_db(stock_raw['market'], stock_raw['symbol'], stock_raw['name'])
assert stock, 'stock \'%s\' not found in db' % stock_raw['name']
assert os.path.exists(self.stock_collection.\
get_cache_file_path(stock_raw['symbol'], stock_raw['market'])),\
'cache file not found for stock %s' % stock_raw['name']
num_stocks = len(session.query(Stock).all())
expected_num_stocks = len(stocks_raw)
assert num_stocks == expected_num_stocks,\
'expected %s stock in db, found %s' % (expected_num_stocks, num_stocks)
class testCache(unittest.TestCase):
def setUp(self):
self.settings = settings
self.engine = create_engine(settings.db_url)
self.Session = sessionmaker(bind=self.engine)
self.stock_collection = StockCollection(self.settings)
self.stock_collection.wipe()
self.stock_collection.create_db()
def tearDown(self):
cache_file_paths = []
for stock in self.stock_collection.stocks:
cache_file_paths.append(self.stock_collection.get_cache_file_path(stock.symbol, stock.market))
meta = MetaData(bind=self.stock_collection.engine)
meta.reflect()
self.stock_collection.wipe()
for cache_file_path in cache_file_paths:
assert not os.path.exists(cache_file_path),\
'cache file %s was not removed' % cache_file_path
engine = create_engine(self.settings.db_url)
meta2 = MetaData(bind=engine)
meta2.reflect()
assert len(meta2.tables) == 0, 'tables were not deleted. %s remain.' % len(meta.tables)
def testUseCase1(self):
"""Testing use case 1
This use case consists of following steps:
1. Initialise stock collection
2. Add 1 stock to it.
3. Update the cache
4. Update the db
5. Wait 1 day then update cache and db again
6. Add 1 stock
7. Update cache and db
8. Wait 1 day, then update cache and db
"""
session = self.Session()
# 1. Initialise stock collection was done in setUp()
# 2. Add 1 stock to it.
self.settings.symbols_files = [{u'name':u'NYSE', u'file':u"data/NYSE_test1.txt"}]
self.stock_collection.load_symbols(self.settings)
assert len(session.query(Stock).\
filter(Stock.symbol == u"A").\
filter(Stock.name == u"Agilent Technologies").all()) == 1, \
'error adding stock to db'
stock = session.query(Stock).\
filter(Stock.symbol == u"A").\
filter(Stock.name == u"Agilent Technologies").one()
assert os.path.exists(self.stock_collection.\
get_cache_file_path(stock.symbol, stock.market)), 'foo'
# 3. Update the cache
self.stock_collection.settings.start_date = datetime.date(year=2012, month=3, day=23)
self.stock_collection.settings.today = datetime.date(year=2012, month=3, day=26)
self.stock_collection.update_cache()
stock_cache_file = self.stock_collection.get_cache_file_path(stock.symbol, stock.market)
cache_file = open(stock_cache_file)
cache_contents = cache_file.read()
cache_file.close()
assert hashlib.sha1(cache_contents).\
hexdigest() == "d304d9962bc0c95ced93fe9826ed12b965d398b5",\
"cache file has wrong sha1 hexdigest after initial data load"
# 4. update the db from cache
self.stock_collection.update_db()
num_dps = len(session.query(Datapoint).all())
assert num_dps == 2, 'expected 2 datapoints, found %s' % num_dps
assert self.dps_are_in_db([dp_A_20120323, dp_A_20120326], to_exclusion=True),\
'didn\'t find all the db entries we expected'
# 5. Wait 1 day then update cache and db again
self.stock_collection.settings.today = datetime.date(year=2012, month=3, day=27)
self.stock_collection.update_cache()
cache_file = open(stock_cache_file)
cache_contents = cache_file.read()
cache_file.close()
assert hashlib.sha1(cache_contents).\
hexdigest() == "033aaa5c736c9f44074dfd4d2657b0c44c406793",\
"cache file has wrong sha1 hexdigest after first cache update"
self.stock_collection.update_db()
num_dps = len(session.query(Datapoint).all())
assert num_dps == 3, 'expected 3 datapoints, found %s' % num_dps
assert self.dps_are_in_db([dp_A_20120323, dp_A_20120326, dp_A_20120327], to_exclusion=True),\
'didn\'t find all the db entries we expected'
# 6. Add 1 stock
self.settings.symbols_files = [{u'name':u'NYSE', u'file':u"data/NYSE_test2.txt"}]
self.stock_collection.load_symbols(self.settings)
# 7. Update cache and db
self.stock_collection.update_cache()
self.stock_collection.update_db()
num_dps = len(session.query(Datapoint).all())
assert num_dps == 6, 'expected 6 datapoints, found %s' % num_dps
expected_dps = [dp_A_20120323, dp_A_20120326, dp_A_20120327, dp_AA_20120323, dp_AA_20120326, dp_AA_20120327]
assert self.dps_are_in_db(expected_dps, to_exclusion=True),\
'didn\'t find all the db entries we expected'
# 8. Wait 1 day, then update cache and db
self.stock_collection.settings.today = datetime.date(year=2012, month=3, day=28)
self.stock_collection.update_cache()
self.stock_collection.update_db()
num_dps = len(session.query(Datapoint).all())
assert num_dps == 8, 'expected 8 datapoints, found %s' % num_dps
assert self.dps_are_in_db([dp_A_20120323, dp_A_20120326, dp_A_20120327, dp_A_20120328,
dp_AA_20120323, dp_AA_20120326, dp_AA_20120327, dp_AA_20120328],\
to_exclusion=True), 'didn\'t find all the db entries we expected'
def dps_are_in_db(self, dps, to_exclusion=False):
session = self.Session()
parsed_dps = []
existing_dps = []
for dp in dps:
parsed_dp = self.stock_collection.parse_csv_line(dp[2])
parsed_dp['market'] = dp[0]
parsed_dp['stock'] = dp[1]
parsed_dps.append(parsed_dp)
for existing_dp in session.query(Datapoint).all():
foo = {}
foo['market'] = existing_dp.stock.market
foo['stock'] = existing_dp.stock.symbol
foo['open_val'] = existing_dp.open_val
foo['high'] = existing_dp.high
foo['low'] = existing_dp.low
foo['close'] = existing_dp.close
foo['volume'] = existing_dp.volume
foo['adj_close'] = existing_dp.adj_close
foo['date'] = existing_dp.day.date
existing_dps.append(foo)
if to_exclusion:
for dp in parsed_dps:
if dp not in existing_dps: return False
if len(parsed_dps) != len(existing_dps): return False
return True
else:
for dp in parsed_dps:
if dp not in existing_dps: return False
return True
``` |
[
{
"content": "```python\nimport functools\nimport logging\n\nfrom waldur_core.structure.exceptions import ServiceBackendNotImplemented\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_backend_action(action=None):\n \"\"\" Logging for backend method.\n\n Expects django model instance as first argument.... | [
{
"content": "<|memory_start|>```python\nimport functools\nimport logging\n\nfrom waldur_core.structure.exceptions import ServiceBackendNotImplemented\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_backend_action(action=None):\n \"\"\" Logging for backend method.\n\n Expects django model instance as... | ```python
import functools
import logging
from waldur_core.structure.exceptions import ServiceBackendNotImplemented
logger = logging.getLogger(__name__)
def log_backend_action(action=None):
""" Logging for backend method.
Expects django model instance as first argument.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(self, instance, *args, **kwargs):
action_name = func.__name__.replace('_', ' ') if action is None else action
logger.debug(
'About to %s `%s` (PK: %s).', action_name, instance, instance.pk
)
result = func(self, instance, *args, **kwargs)
logger.debug(
'Action `%s` was executed successfully for `%s` (PK: %s).',
action_name,
instance,
instance.pk,
)
return result
return wrapped
return decorator
class ServiceBackend:
""" Basic service backed with only common methods pre-defined. """
DEFAULTS = {}
def __init__(self, settings, **kwargs):
pass
def validate_settings(self):
self.ping(raise_exception=True)
def ping(self, raise_exception=False):
raise ServiceBackendNotImplemented
def sync(self):
self.pull_service_properties()
self.pull_resources()
self.pull_subresources()
def pull_service_properties(self):
pass
def pull_resources(self):
pass
def pull_subresources(self):
pass
def has_global_properties(self):
return False
def get_managed_resources(self):
raise ServiceBackendNotImplemented
def get_monthly_cost_estimate(self, resource):
raise ServiceBackendNotImplemented
@staticmethod
def gb2mb(val):
return int(val * 1024) if val else 0
@staticmethod
def tb2mb(val):
return int(val * 1024 * 1024) if val else 0
@staticmethod
def mb2gb(val):
return int(val / 1024) if val else 0
@staticmethod
def mb2tb(val):
return int(val / 1024 / 1024) if val else 0
@staticmethod
def b2gb(val):
return int(val / 1024 / 1024 / 1024) if val else 0
def get_importable_resources(self, resource_model, remote_resources):
local_backend_ids = resource_model.objects.filter(
service_settings=self.settings
).values_list('backend_id', flat=True)
result = []
for remote_resource in remote_resources:
if remote_resource['backend_id'] in local_backend_ids:
continue
result.append(remote_resource)
return result
``` |
[
{
"content": "Repeat the code precisely:\n```python\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LIC... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.... | ```python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import csv
import os
from random import randint
import tempfile
import time
from urllib.parse import urlparse
import requests
from airflow import models
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.contrib.hooks.bigquery_hook import BigQueryBaseCursor
from airflow.models import BaseOperator
from orchestra.google.marketing_platform.hooks.display_video_360 import (
GoogleDisplayVideo360Hook
)
from orchestra.google.marketing_platform.utils import erf_utils
from orchestra.google.marketing_platform.utils.schema.sdf import (
SDF_VERSIONED_SCHEMA_TYPES
)
logger = logging.getLogger(__name__)
class GoogleDisplayVideo360CreateReportOperator(BaseOperator):
"""Creates and runs a new Display & Video 360 query.
Attributes:
report: The query body to create the report from. (templated)
Can receive a json string representing the report or reference to a
template file. Template references are recognized by a string ending in
'.json'.
api_version: The DV360 API version.
gcp_conn_id: The connection ID to use when fetching connection info.
delegate_to: The account to impersonate, if any.
XComs:
query_id: The query ID for the report created.
"""
template_fields = ['params', 'report']
template_ext = ['.json']
def __init__(self,
report,
api_version='v1',
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleDisplayVideo360CreateReportOperator, self).__init__(*args, **kwargs)
self.report = report
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.hook = None
def execute(self, context):
if self.hook is None:
self.hook = GoogleDisplayVideo360Hook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
report_body = json.loads(self.report)
request = self.hook.get_service().queries().createquery(body=report_body)
response = request.execute()
context['task_instance'].xcom_push('query_id', response['queryId'])
class GoogleDisplayVideo360RunReportOperator(BaseOperator):
"""Runs a stored query to generate a report.
Attributes:
api_version: The DV360 API version.
query_id: The ID of the query to run. (templated)
gcp_conn_id: The connection ID to use when fetching connection info.
delegate_to: The account to impersonate, if any.
"""
template_fields = ['query_id']
def __init__(self,
query_id,
api_version='v1',
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleDisplayVideo360RunReportOperator, self).__init__(*args, **kwargs)
self.api_version = api_version
self.conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.service = None
self.query_id = query_id
def execute(self, context):
if self.service is None:
hook = GoogleDisplayVideo360Hook(
api_version=self.api_version,
gcp_conn_id=self.conn_id,
delegate_to=self.delegate_to
)
self.service = hook.get_service()
request = self.service.queries().runquery(
queryId=self.query_id, body={})
request.execute()
class GoogleDisplayVideo360DownloadReportOperator(BaseOperator):
"""Downloads a Display & Video 360 report into Google Cloud Storage.
Attributes:
report_url: The Google Cloud Storage url where the latest report is stored.
(templated)
destination_bucket: The destination Google cloud storage bucket where the
report should be written to. (templated)
destination_object: The destination name of the object in the destination
Google cloud storage bucket. (templated)
If the destination points to an existing folder, the report will be
written under the specified folder.
gcp_conn_id: The connection ID to use when fetching connection info.
delegate_to: The account to impersonate, if any.
XComs:
destination_bucket: The Google cloud storage bucket the report was written
to.
destination_object: The Google cloud storage URI for the report.
"""
template_fields = ['report_url', 'destination_bucket', 'destination_object']
def __init__(self,
report_url,
destination_bucket,
destination_object=None,
chunk_size=5 * 1024 * 1024,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleDisplayVideo360DownloadReportOperator, self).__init__(*args, **kwargs)
self.report_url = report_url
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.chunk_size = chunk_size
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.hook = None
@staticmethod
def _download_report(source_url, destination_file, chunk_size):
response = requests.head(source_url)
content_length = int(response.headers['Content-Length'])
start_byte = 0
while start_byte < content_length:
end_byte = start_byte + chunk_size - 1
if end_byte >= content_length:
end_byte = content_length - 1
headers = {'Range': 'bytes=%s-%s' % (start_byte, end_byte)}
response = requests.get(source_url, stream=True, headers=headers)
chunk = response.raw.read()
destination_file.write(chunk)
start_byte = end_byte + 1
destination_file.close()
@staticmethod
def _get_destination_uri(destination_object, report_url):
report_file_name = urlparse(report_url).path.split('/')[2]
if destination_object is None:
return report_file_name
if destination_object.endswith('/'):
return destination_object + report_file_name
return destination_object
def execute(self, context):
if self.hook is None:
self.hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
temp_file = tempfile.NamedTemporaryFile(delete=False)
try:
# TODO(efolgar): Directly stream to storage instead of temp file
self._download_report(self.report_url, temp_file, self.chunk_size)
destination_object_name = self._get_destination_uri(
self.destination_object, self.report_url)
self.hook.upload(
bucket=self.destination_bucket,
object=destination_object_name,
filename=temp_file.name,
multipart=True)
context['task_instance'].xcom_push(
'destination_bucket', self.destination_bucket)
context['task_instance'].xcom_push(
'destination_object', destination_object_name)
finally:
temp_file.close()
os.unlink(temp_file.name)
class GoogleDisplayVideo360DeleteReportOperator(BaseOperator):
"""Deletes Display & Video 360 queries and any associated reports.
Attributes:
api_version: The DV360 API version.
query_id: The DV360 query id to delete. (templated)
query_title: The DV360 query title to delete. (templated)
Any query with a matching title will be deleted.
ignore_if_missing: If True, return success even if the query is missing.
gcp_conn_id: The connection ID to use when fetching connection info.
delegate_to: The account to impersonate, if any.
"""
template_fields = ['query_id', 'query_title']
ui_color = '#ffd1dc'
def __init__(self,
api_version='v1',
query_id=None,
query_title=None,
ignore_if_missing=False,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleDisplayVideo360DeleteReportOperator, self).__init__(*args, **kwargs)
self.api_version = api_version
self.query_id = query_id
self.query_title = query_title
self.ignore_if_missing = ignore_if_missing
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.hook = None
def execute(self, context):
if self.hook is None:
self.hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
delegate_to=self.delegate_to)
if self.query_id is not None:
self.hook.deletequery(
self.query_id,
ignore_if_missing=self.ignore_if_missing)
if self.query_title is not None:
self.hook.deletequeries(
self.query_title,
ignore_if_missing=self.ignore_if_missing)
class GoogleDisplayVideo360ERFToBigQueryOperator(BaseOperator):
"""Upload Multiple Entity Read Files to specified big query dataset.
"""
def __init__(self,
gcp_conn_id='google_cloud_default',
report_body=None,
yesterday=False,
entity_type=None,
file_creation_date=None,
cloud_project_id=None,
bq_table=None,
schema=None,
gcs_bucket=None,
erf_bucket=None,
partner_ids=[],
write_disposition='WRITE_TRUNCATE',
*args,
**kwargs):
super(GoogleDisplayVideo360ERFToBigQueryOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.service = None
self.bq_hook = None
self.gcs_hook = None
self.report_body = report_body
self.erf_bucket = erf_bucket
self.yesterday = yesterday
self.cloud_project_id = cloud_project_id
self.bq_table = bq_table
self.gcs_bucket = gcs_bucket
self.schema = schema
self.entity_type = entity_type
self.erf_object = 'entity/%s.0.%s.json' % (file_creation_date, entity_type)
self.partner_ids = partner_ids
self.write_disposition = write_disposition
self.file_creation_date = file_creation_date
def execute(self, context):
if self.gcs_hook is None:
self.gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.gcp_conn_id)
if self.bq_hook is None:
self.bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id)
for i, partner_id in enumerate(self.partner_ids):
filename = erf_utils.download_and_transform_erf(self, partner_id)
entity_read_file_ndj = 'gs://%s/%s' % (self.gcs_bucket, filename)
if i > 0:
self.write_disposition = 'WRITE_APPEND'
bq_base_cursor = self.bq_hook.get_conn().cursor()
bq_base_cursor.run_load(
destination_project_dataset_table=self.bq_table,
schema_fields=self.schema,
source_uris=[entity_read_file_ndj],
source_format='NEWLINE_DELIMITED_JSON',
write_disposition=self.write_disposition)
self.gcs_hook.delete(self.gcs_bucket, filename)
class GoogleDisplayVideo360SDFToBigQueryOperator(BaseOperator):
"""Make a request to SDF API and upload the data to BQ."""
DEFAULT_SDF_TABLE_NAMES = {
'LINE_ITEM': 'SDFLineItem',
'AD_GROUP': 'SDFAdGroup',
'AD': 'SDFAd',
'INSERTION_ORDER': 'SDFInsertionOrder',
'CAMPAIGN': 'SDFCampaign'
}
SDF_API_RESPONSE_KEYS = {
'LINE_ITEM': 'lineItems',
'AD_GROUP': 'adGroups',
'AD': 'ads',
'INSERTION_ORDER': 'insertionOrders',
'CAMPAIGN': 'campaigns'
}
def __init__(self,
gcp_conn_id='google_cloud_default',
gcs_bucket=None,
schema=None,
bq_dataset=None,
write_disposition=None,
cloud_project_id=None,
file_types=None,
filter_ids=None,
api_version=None,
filter_type=None,
table_names=DEFAULT_SDF_TABLE_NAMES,
sdf_api_response_keys=SDF_API_RESPONSE_KEYS,
*args,
**kwargs):
super(GoogleDisplayVideo360SDFToBigQueryOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.service = None
self.hook = None
self.bq_hook = None
self.gcs_hook = None
self.gcs_bucket = gcs_bucket
self.schema = schema
self.bq_dataset = bq_dataset
self.write_disposition = write_disposition
self.cloud_project_id = cloud_project_id
self.file_types = file_types
self.filter_ids = filter_ids
self.api_version = api_version
self.filter_type = filter_type
self.table_names = table_names
self.sdf_api_response_keys = sdf_api_response_keys
def execute(self, context):
if self.hook is None:
self.hook = GoogleDisplayVideo360Hook(gcp_conn_id=self.gcp_conn_id)
if self.bq_hook is None:
self.bq_hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id)
if self.gcs_hook is None:
self.gcs_hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.gcp_conn_id)
request_body = {'fileTypes': self.file_types, 'filterType': self.filter_type, 'filterIds': self.filter_ids,
'version': self.api_version}
logger.info('Request body: %s ' % request_body)
request = self.hook.get_service().sdf().download(body=request_body)
response = request.execute()
for file_type in self.file_types:
temp_file = None
try:
logger.info('Uploading SDF to GCS')
temp_file = tempfile.NamedTemporaryFile(delete=False)
response_key = self.sdf_api_response_keys.get(file_type)
temp_file.write(response[response_key].encode('utf-8'))
temp_file.close()
filename = '%d_%s_%s_%s.json' % (time.time() * 1e+9, randint(
1, 1000000), response_key, 'sdf')
self.gcs_hook.upload(self.gcs_bucket, filename, temp_file.name)
logger.info('SDF upload to GCS complete')
finally:
if temp_file:
temp_file.close()
os.unlink(temp_file.name)
sdf_file = 'gs://%s/%s' % (self.gcs_bucket, filename)
bq_table = self.table_names.get(file_type)
bq_table = '%s.%s' % (self.bq_dataset, bq_table)
schema = SDF_VERSIONED_SCHEMA_TYPES.get(self.api_version).get(file_type)
try:
bq_base_cursor = self.bq_hook.get_conn().cursor()
logger.info('Uploading SDF to BigQuery')
bq_base_cursor.run_load(
destination_project_dataset_table=bq_table,
schema_fields=schema,
source_uris=[sdf_file],
source_format='CSV',
skip_leading_rows=1,
write_disposition=self.write_disposition)
finally:
logger.info('Deleting SDF from GCS')
self.gcs_hook.delete(self.gcs_bucket, filename)
class GoogleDisplayVideo360RecordSDFAdvertiserOperator(BaseOperator):
"""
Get Partner and Advertiser Ids from a report and populate an airflow variable.
"""
template_fields = ['report_url', 'variable_name']
def __init__(self,
report_url,
variable_name,
gcp_conn_id='google_cloud_default',
*args,
**kwargs):
super(GoogleDisplayVideo360RecordSDFAdvertiserOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.service = None
self.report_url = report_url
self.variable_name = variable_name
def execute(self, context):
try:
report_file = tempfile.NamedTemporaryFile(delete=False)
file_download = requests.get(self.report_url, stream=True)
for chunk in file_download.iter_content(chunk_size=1024 * 1024):
report_file.write(chunk)
report_file.close()
advertisers = {}
with open(report_file.name, 'r') as f:
csv_reader = csv.DictReader(f)
for line in csv_reader:
advertiser_id = line["Advertiser ID"]
partner_id = line["Partner ID"]
if advertiser_id.strip():
try:
advertisers[partner_id].append(advertiser_id)
message = 'ADDING to key %s new advertiser %s' % (
partner_id, advertiser_id)
logger.info(message)
except KeyError:
advertisers[partner_id] = [advertiser_id]
message = 'CREATING new key %s with advertiser %s' % (
partner_id, advertiser_id)
logger.info(message)
else:
break
models.Variable.set(self.variable_name, json.dumps(advertisers))
finally:
if report_file:
report_file.close()
os.unlink(report_file.name)
``` |
[
{
"content": "Here is a code file:\n```python\nfrom __future__ import absolute_import\n\nfrom opentracing import SpanContextCorruptedException\nfrom .context import SpanContext\nfrom .propagator import Propagator\n\nprefix_tracer_state = 'ot-tracer-'\nprefix_baggage = 'ot-baggage-'\nfield_name_trace_id = prefix... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom __future__ import absolute_import\n\nfrom opentracing import SpanContextCorruptedException\nfrom .context import SpanContext\nfrom .propagator import Propagator\n\nprefix_tracer_state = 'ot-tracer-'\nprefix_baggage = 'ot-baggage-'\nfield_name_t... | ```python
from __future__ import absolute_import
from opentracing import SpanContextCorruptedException
from .context import SpanContext
from .propagator import Propagator
prefix_tracer_state = 'ot-tracer-'
prefix_baggage = 'ot-baggage-'
field_name_trace_id = prefix_tracer_state + 'traceid'
field_name_span_id = prefix_tracer_state + 'spanid'
field_name_sampled = prefix_tracer_state + 'sampled'
field_count = 3
def parse_hex_for_field(field_name, value):
"""parses the hexadecimal value of a field into an integer.
Raises SpanContextCorruptedException in case of failure
"""
try:
return int(value, 16)
except ValueError:
msg = '{field_name} got an invalid hexadecimal value {value!r}'
msg = msg.format(field_name=field_name, value=value)
raise SpanContextCorruptedException(msg)
def parse_boolean_for_field(field_name, value):
"""parses the string value of a field into a boolean.
Raises SpanContextCorruptedException in case of failure
"""
if value in ('true', '1'):
return True
elif value in ('false', '0'):
return False
msg = (
'{field} got an invalid value {value!r}, '
"should be one of \'true\', \'false\', \'0\', \'1\'"
)
raise SpanContextCorruptedException(msg.format(
value=value,
field=field_name_sampled
))
class TextPropagator(Propagator):
"""A BasicTracer Propagator for Format.TEXT_MAP."""
def inject(self, span_context, carrier):
carrier[field_name_trace_id] = '{0:x}'.format(span_context.trace_id)
carrier[field_name_span_id] = '{0:x}'.format(span_context.span_id)
carrier[field_name_sampled] = str(span_context.sampled).lower()
if span_context.baggage is not None:
for k in span_context.baggage:
carrier[prefix_baggage+k] = span_context.baggage[k]
def extract(self, carrier): # noqa
count = 0
span_id, trace_id, sampled = (0, 0, False)
baggage = {}
for k in carrier:
v = carrier[k]
k = k.lower()
if k == field_name_span_id:
span_id = parse_hex_for_field(field_name_span_id, v)
count += 1
elif k == field_name_trace_id:
trace_id = parse_hex_for_field(field_name_trace_id, v)
count += 1
elif k == field_name_sampled:
sampled = parse_boolean_for_field(field_name_sampled, v)
count += 1
elif k.startswith(prefix_baggage):
baggage[k[len(prefix_baggage):]] = v
if count == 0:
if len(baggage) > 0:
raise SpanContextCorruptedException(
'found baggage without required fields')
return None
if count != field_count:
msg = (
'expected to parse {field_count} fields'
', but parsed {count} instead'
)
raise SpanContextCorruptedException(msg.format(
field_count=field_count,
count=count,
))
return SpanContext(
span_id=span_id,
trace_id=trace_id,
baggage=baggage,
sampled=sampled)
``` |
[
{
"content": "```python\nimport logging\nimport ibmsecurity.utilities.tools\n\nlogger = logging.getLogger(__name__)\n\n\ndef get(isamAppliance, directory_name, check_mode=False, force=False):\n \"\"\"\n Retrieving the list of suffixes for a particular federated directory\n \"\"\"\n return isamApplia... | [
{
"content": "<|memory_start|>```python\nimport logging\nimport ibmsecurity.utilities.tools\n\nlogger = logging.getLogger(__name__)\n\n\ndef get(isamAppliance, directory_name, check_mode=False, force=False):\n \"\"\"\n Retrieving the list of suffixes for a particular federated directory\n \"\"\"\n r... | ```python
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get(isamAppliance, directory_name, check_mode=False, force=False):
"""
Retrieving the list of suffixes for a particular federated directory
"""
return isamAppliance.invoke_get("Retrieving the list of suffixes for a particular federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/v1".format(
directory_name))
def add(isamAppliance, directory_name, suffix, use_ssl=False, client_cert_label=None,
check_mode=False,
force=False):
"""
Create a new suffix in a particular federated directory
"""
if force is True or _check(isamAppliance, directory_name, suffix) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Create a new suffix in a particular federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/v1".format(directory_name),
{
'suffix': suffix
})
return isamAppliance.create_return_object()
def delete(isamAppliance, directory_name, suffix_name, check_mode=False, force=False):
"""
Remove an existing suffix from a federated directory
"""
if force is True or _check(isamAppliance, directory_name, suffix_name) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Remove an existing suffix from a federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/{1}/v1".format(directory_name, suffix_name))
return isamAppliance.create_return_object()
def _check(isamAppliance, directory_name, suffix):
"""
Check if federated directory suffix exists - will return true if any match is found
:param isamAppliance:
:param directory_name:
:param suffix:
:return:
"""
ret_obj = get(isamAppliance, directory_name)
for suffix_obj in ret_obj['data']:
if isinstance(suffix, list): # Add passes a list
for new_suffix in suffix:
if new_suffix['id'] == suffix_obj['id']:
return True
else: # Update passes just suffix_name
if suffix_obj['id'] == suffix:
return True
return False
def compare(isamAppliance1, isamAppliance2, directory_name):
"""
Compare snmp objects between two appliances
"""
ret_obj1 = get(isamAppliance1, directory_name)
ret_obj2 = get(isamAppliance2, directory_name)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
``` |
[
{
"content": "Here is some code:\n```python\nimport copy\nimport importlib\nimport warnings\n\nfrom satella.coding.recast_exceptions import rethrow_as\nfrom satella.configuration import sources\nfrom satella.configuration.sources.base import BaseSource\nfrom satella.exceptions import ConfigurationError, Configu... | [
{
"content": "Here is some code:\n<|memory_start|>```python\nimport copy\nimport importlib\nimport warnings\n\nfrom satella.coding.recast_exceptions import rethrow_as\nfrom satella.configuration import sources\nfrom satella.configuration.sources.base import BaseSource\nfrom satella.exceptions import Configurati... | ```python
import copy
import importlib
import warnings
from satella.coding.recast_exceptions import rethrow_as
from satella.configuration import sources
from satella.configuration.sources.base import BaseSource
from satella.exceptions import ConfigurationError, ConfigurationMisconfiguredError
__all__ = [
'load_source_from_dict',
'load_source_from_list'
]
def handle_import(dct: dict):
def convert(v):
if 'cast_before' in dct:
v = EXTRA_TYPES[dct['cast_before']['type']](dct['cast_before'])(v)
return getattr(importlib.import_module(dct['module']), dct['attribute'])(v)
return convert
EXTRA_TYPES = {
'binary': lambda dct: dct['value'].encode(dct.get('encoding', 'ascii')),
'lambda': lambda dct: eval('lambda x: ' + dct['operation'], globals(),
locals()),
'import': handle_import,
}
@rethrow_as(Exception, ConfigurationError)
def load_source_from_dict(dct: dict) -> BaseSource:
"""
obj has a form of
{
"type": "BaseSource",
"args": [] # optional
... kwargs
}
:raises ConfigurationError: upon failure to instantiate
"""
dct = copy.copy(dct)
type_ = dct.pop('type') # type: str
if 'arg' in dct:
args = dct.pop('arg'),
else:
args = dct.pop('args', []) # type: tp.List
optional = dct.pop('optional', False) # type: bool
def to_arg(arg):
if isinstance(arg, dict) and 'type' in arg:
a_type = arg['type']
if a_type in EXTRA_TYPES:
return EXTRA_TYPES[a_type](arg)
elif a_type in sources.__dict__:
return load_source_from_dict(arg)
else:
warnings.warn(
'Caught %s attempting to parse a dict with type, returning original value' % (
e,), UserWarning)
return arg
else:
return arg
args = map(to_arg, args)
kwargs = {k: to_arg(v) for k, v in dct.items()}
try:
s = sources.__dict__[type_](*args, **kwargs)
except KeyError as e:
raise ConfigurationMisconfiguredError('unknown type %s' % (type_,))
if optional:
s = sources.OptionalSource(s)
return s
def load_source_from_list(obj: list) -> 'sources.MergingSource':
"""
Builds a MergingSource from dict-ed objects
"""
return sources.MergingSource(*map(load_source_from_dict, obj))
``` |
[
{
"content": "```python\nimport collections\nimport random\nimport logging\n\nfrom hetnetpy.hetnet import Graph\n\n\ndef permute_graph(graph, multiplier=10, seed=0, metaedge_to_excluded=dict(), log=False):\n \"\"\"\n Derive a permuted hetnet from an input hetnet. This method applies the\n XSwap algorit... | [
{
"content": "<|memory_start|>```python\nimport collections\nimport random\nimport logging\n\nfrom hetnetpy.hetnet import Graph\n\n\ndef permute_graph(graph, multiplier=10, seed=0, metaedge_to_excluded=dict(), log=False):\n \"\"\"\n Derive a permuted hetnet from an input hetnet. This method applies the\n ... | ```python
import collections
import random
import logging
from hetnetpy.hetnet import Graph
def permute_graph(graph, multiplier=10, seed=0, metaedge_to_excluded=dict(), log=False):
"""
Derive a permuted hetnet from an input hetnet. This method applies the
XSwap algorithm separately for each metaedge. Hence, node degree is
preserved for each type of edge. However, edges are randomized / shuffled.
Users are recommended to interrogate the reported statistics to ensure that
edges appear to be sufficiently randomized. Primarily, the number of edges
of a given metaedge that remain unchanged from the original hetnet should
have reached an assymptote. If the number of unchanged edges has not yet
stabalized, further randomization is possible with this approach.
Parameters
----------
graph : hetnetpy.hetnet.Graph
Input hetnet to create a permuted derivative from
multiplier : int or float
This is multiplied by the number of edges for each metaedge to
determine the number of swaps to attempt.
seed : int
Seed to initialize Python random number generator. When creating many
permuted hetnets, it's recommended to increment this number, such that
each round of permutation shuffles edges in a different order.
metaedge_to_excluded : dict (metaedge -> set)
Edges to exclude. This argument has not been extensively used in
practice.
log : bool
Whether to log diagnostic INFO via python's logging module.
Returns
-------
permuted_graph : hetnetpy.hetnet.Graph
A permuted hetnet derived from the input graph.
stats : list of dicts
A list where each item is a dictionary of permutation statistics at a
checkpoint for a specific metaedge. These statistics allow tracking the
progress of the permutation as the number of attempted swaps increases.
"""
if log:
logging.info("Creating permuted graph template")
permuted_graph = Graph(graph.metagraph)
for (metanode_identifier, node_identifier), node in graph.node_dict.items():
permuted_graph.add_node(
metanode_identifier, node_identifier, name=node.name, data=node.data
)
if log:
logging.info("Retrieving graph edges")
metaedge_to_edges = graph.get_metaedge_to_edges(exclude_inverts=True)
if log:
logging.info("Adding permuted edges")
all_stats = list()
for metaedge, edges in metaedge_to_edges.items():
if log:
logging.info(metaedge)
excluded_pair_set = metaedge_to_excluded.get(metaedge, set())
pair_list = [(edge.source.get_id(), edge.target.get_id()) for edge in edges]
directed = metaedge.direction != "both"
permuted_pair_list, stats = permute_pair_list(
pair_list,
directed=directed,
multiplier=multiplier,
excluded_pair_set=excluded_pair_set,
seed=seed,
log=log,
)
for stat in stats:
stat["metaedge"] = metaedge
stat["abbrev"] = metaedge.abbrev
all_stats.extend(stats)
for pair in permuted_pair_list:
permuted_graph.add_edge(pair[0], pair[1], metaedge.kind, metaedge.direction)
return permuted_graph, all_stats
def permute_pair_list(
pair_list,
directed=False,
multiplier=10,
excluded_pair_set=set(),
seed=0,
log=False,
inplace=False,
):
"""
Permute edges (of a single type) in a graph according to the XSwap function
described in https://doi.org/f3mn58. This method selects two edges and
attempts to swap their endpoints. If the swap would result in a valid edge,
the swap proceeds. Otherwise, the swap is skipped. The end result is that
node degree is preserved, but edges are shuffled, thereby losing their
original meaning.
Parameters
----------
pair_list : list of tuples
List of edges to permute. Each edge is represented as a (source,
target) tuple. source and target represent nodes and can be any Python
objects that define __eq__. In other words, this function does not
assume any specific format for nodes. If the edges are from a bipartite
or directed graph, then all tuples must have the same alignment. For
example, if the edges represent the bipartite Compound-binds-Gene
relationship, all tuples should be of the form (compound, gene) and not
intermixed with (gene, compound) tuples. The only instance where order
of the source and target is not important is for an undirected edge
type where the source and target nodes are of the same type, such as
Gene-interacts-Gene.
directed : bool
Whether the edge should be considered directed. If False, a swap that
creates an a-b edge will be invalid if a b-a edge already exists.
multiplier : int or float
This is multiplied by the number of edges in pair_list to determine the
number of swaps to attempt.
excluded_pair_set : set of tuples:
Set of possible edges to forbid. If a swap would create an edge in this
set, it would be considered invalid and hence skipped.
seed : int
Seed to initialize Python random number generator.
log : bool
Whether to log diagnostic INFO via python's logging module.
inplace : bool
Whether to modify the edge list in place.
Returns
-------
pair_list : list of tuples
The permuted edges, derived from the input pair_list.
stats : list of dicts
A list where each item is a dictionary of permutation statistics at a
checkpoint. Statistics are collected at 10 checkpoints, spaced evenly
by the number of attempts.
"""
random.seed(seed)
if not inplace:
pair_list = pair_list.copy()
pair_set = set(pair_list)
assert len(pair_set) == len(pair_list)
edge_number = len(pair_list)
n_perm = int(edge_number * multiplier)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
if log:
logging.info(
"{} edges, {} permutations (seed = {}, directed = {}, {} excluded_edges)".format(
edge_number, n_perm, seed, directed, len(excluded_pair_set)
)
)
orig_pair_set = pair_set.copy()
step = max(1, n_perm // 10)
print_at = list(range(step, n_perm, step)) + [n_perm - 1]
stats = list()
for i in range(n_perm):
# Same two random edges
i_0 = random.randrange(edge_number)
i_1 = random.randrange(edge_number)
# Same edge selected twice
if i_0 == i_1:
count_same_edge += 1
continue
pair_0 = pair_list.pop(i_0)
pair_1 = pair_list.pop(i_1 - 1 if i_0 < i_1 else i_1)
new_pair_0 = pair_0[0], pair_1[1]
new_pair_1 = pair_1[0], pair_0[1]
valid = False
for pair in new_pair_0, new_pair_1:
if pair[0] == pair[1]:
count_self_loop += 1
break # edge is a self-loop
if pair in pair_set:
count_duplicate += 1
break # edge is a duplicate
if not directed and (pair[1], pair[0]) in pair_set:
count_undir_dup += 1
break # edge is a duplicate
if pair in excluded_pair_set:
count_excluded += 1
break # edge is excluded
else:
# edge passed all validity conditions
valid = True
# If new edges are invalid
if not valid:
for pair in pair_0, pair_1:
pair_list.append(pair)
# If new edges are valid
else:
for pair in pair_0, pair_1:
pair_set.remove(pair)
for pair in new_pair_0, new_pair_1:
pair_set.add(pair)
pair_list.append(pair)
if i in print_at:
stat = collections.OrderedDict()
stat["cumulative_attempts"] = i
index = print_at.index(i)
stat["attempts"] = (
print_at[index] + 1
if index == 0
else print_at[index] - print_at[index - 1]
)
stat["complete"] = (i + 1) / n_perm
stat["unchanged"] = len(orig_pair_set & pair_set) / len(pair_set)
stat["same_edge"] = count_same_edge / stat["attempts"]
stat["self_loop"] = count_self_loop / stat["attempts"]
stat["duplicate"] = count_duplicate / stat["attempts"]
stat["undirected_duplicate"] = count_undir_dup / stat["attempts"]
stat["excluded"] = count_excluded / stat["attempts"]
stats.append(stat)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
assert len(pair_set) == edge_number
return pair_list, stats
``` |
[
{
"content": "Recreate the original code text:\n```python\nfrom bazydanych2.settingsshared import *\n\nDEBUG=True\nTEMPLATE_DEBUG=True\n\nSTATIC_ROOT = '/tmp/staticfiles'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '(... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\nfrom bazydanych2.settingsshared import *\n\nDEBUG=True\nTEMPLATE_DEBUG=True\n\nSTATIC_ROOT = '/tmp/staticfiles'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {... | ```python
from bazydanych2.settingsshared import *
DEBUG=True
TEMPLATE_DEBUG=True
STATIC_ROOT = '/tmp/staticfiles'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
"level": 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler'
}
},
'root':{
'handlers' : ['console']
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/tmp/app-messages'
INSTALLED_APPS += ('celery_test_app', )
ALLOW_OFFILNE_GRADING = False
SCHEMA_CHECKER_HOST = '192.168.56.30'
``` |
[
{
"content": "```python\n# command: python3 download_yt.py <uploads_playlistid>\n# example: python3 download_yt.py UUcD1pbEB9HNFIVwJih_ZWAA \nimport json\nimport urllib.request\nimport requests\nimport subprocess\nimport os\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('... | [
{
"content": "<|memory_start|>```python\n# command: python3 download_yt.py <uploads_playlistid>\n# example: python3 download_yt.py UUcD1pbEB9HNFIVwJih_ZWAA \nimport json\nimport urllib.request\nimport requests\nimport subprocess\nimport os\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser()\nparse... | ```python
# command: python3 download_yt.py <uploads_playlistid>
# example: python3 download_yt.py UUcD1pbEB9HNFIVwJih_ZWAA
import json
import urllib.request
import requests
import subprocess
import os
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('channel')
args = parser.parse_args()
streams_directory = r'G:\yt_live'
uploads_playlistid = args.channel
download_root = r'C:\video'
api_key = ''
search_word = ''
list_of_videos = []
def get_url(url2):
print(url2)
response = requests.get(url2)
data = json.loads(response.text)
for item in data['items']:
title = item['snippet']['title']
yt_id = item['snippet']['resourceId']['videoId']
channel_name = item['snippet']['channelTitle']
if search_word.lower() in title.lower():
list_of_videos.append(yt_id)
try:
print(yt_id, title)
except:
print(yt_id, title.encode("utf-8"))
if 'nextPageToken' in data:
print('getting page', data['nextPageToken'])
url2 = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet%2CcontentDetails&maxResults=25&pageToken={}&playlistId={}&key={}' .format(data['nextPageToken'], uploads_playlistid, api_key)
get_url(url2)
else:
pass
return list_of_videos, channel_name
url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet%2CcontentDetails&maxResults=25&playlistId={}&key={}' .format(uploads_playlistid, api_key)
info = get_url(url)
channel_name1 = info[1].replace(' ', '_')
channel_name = re.sub(r'[\;*?!<>|/:"]', '', channel_name1)
print(channel_name)
download_directory = os.path.join(download_root, channel_name)
if not os.path.exists(download_directory):
os.makedirs(download_directory)
list_of_local = []
for subdir, dirs, files in os.walk(download_root):
for fn2 in files:
list_of_local.append(fn2)
for fn3 in list_of_local:
for fn4 in list_of_videos:
if fn4 in fn3 and not fn3.endswith('.part') and not fn3.endswith('.ytdl') and not fn3.endswith('.webp') and not fn3.endswith('.jpg'):
list_of_videos.remove(fn4)
print('new videos/videos you dont have')
print('======')
for item1 in list_of_videos:
print(item1)
print('======')
for item3 in list_of_videos:
video_final = 'https://www.youtube.com/watch?v={}' .format(item3)
if os.name == 'nt':
proc = subprocess.Popen(['youtube-dl', '--write-all-thumbnails', '--match-filter', '!is_live', '{}' .format(video_final)], cwd=download_directory, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(['youtube-dl --match-filter !is_live {}' .format(video_final)], cwd=download_directory, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
print('RUNNING YOUTUBE-DL:')
print('###############')
output, errors = proc.communicate(timeout=950)
print('|||', output.decode("utf-8"))
print('///', errors.decode("utf-8"))
print('###############')
if 'skipping' in str(output).lower():
if not os.path.exists(streams_directory):
os.makedirs(streams_directory)
print(item3, 'video is a live stream, capturing separately')
file_format = r'%(title)s-%(id)s.%(ext)s'
final_cmd = 'start cmd /k youtube-dl -o "{}" {}' .format(streams_directory+'\\'+file_format, video_final)
final_cmd2 = "gnome-terminal -e 'youtube-dl -o {} {}'" .format(streams_directory+'\\'+file_format, video_final)
if os.name == 'nt':
os.system(final_cmd)
else:
os.system(final_cmd2)
if 'error' in str(errors).lower() and not 'premiere' in str(errors).lower():
with open(os.path.join(download_root, 'errors.txt'), 'a') as error_file:
print('unable to download:', video_final, 'logged to errors.txt')
error_file.write(video_final+'\n')
except Exception as e:
print(e)
proc.kill()
output, errors = proc.communicate()
print('all videos downloaded / no new videos')
``` |
[
{
"content": "Here is a code file:\n```python\n# Copyright 2012, Oliver Nagy <olitheolix@gmail.com>\n#\n# This file is part of Qtmacs.\n#\n# Qtmacs is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, eit... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# Copyright 2012, Oliver Nagy <olitheolix@gmail.com>\n#\n# This file is part of Qtmacs.\n#\n# Qtmacs is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software... | ```python
# Copyright 2012, Oliver Nagy <olitheolix@gmail.com>
#
# This file is part of Qtmacs.
#
# Qtmacs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Qtmacs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Qtmacs. If not, see <http://www.gnu.org/licenses/>.
"""
Qtmacs internal support classes.
The classes and functions in this module are used by various internal
modules and serve mostly administrative purposes that do not require
state information of the objects that use them.
While all classes in this file can safely be used in any applet/macro,
only ``QtmacsKeysequence`` is likely be of any practical value.
It is safe to use::
from auxiliary import something
"""
import re
import inspect
import qtmacs.type_check
import qtmacs.qte_global as qte_global
from PyQt4 import QtCore, QtGui
from qtmacs.exceptions import *
# Shorthands
type_check = qtmacs.type_check.type_check
# ----------------------------------------------------------------------
# Classes
# ----------------------------------------------------------------------
class QtmacsMessage(object):
"""
Data container that is passed along with every signal or hook.
The ``data`` field is an arbitrary Python object and ``senderObj``
specifies the object that triggered the delivery of the message.
The message recipient can query both fields directly via the
``data`` and ``senderObj`` attributes. Furthermore, the ``isHook``
flag indicates if the message was delivered via a hook (**True**)
or a signal (**False**). Finally, the ``messengerName`` attribute,
specifies the name of the signal or hook that delivered the
object.
|Args|
* ``data`` (**object**): arbitrary. The recipient must know what to
* ``senderObj`` (**QObject**): reference to calling object.
|Raises|
* **None**
"""
@type_check
def __init__(self, data=None, senderObj: QtCore.QObject=None):
super().__init__()
self.data = data
self.senderObj = senderObj
# Indicate whether this message was sent by a signal or a hook.
self.isHook = None
# Name of signal (without the `qtesig` prefix) or hook.
self.messengerName = None
@type_check
def setHookName(self, name: str):
"""
Specify that the message will be delivered with the hook ``name``.
"""
self.isHook = True
self.messengerName = name
@type_check
def setSignalName(self, name: str):
"""
Specify that the message will be delivered with the signal ``name``.
"""
self.isHook = False
self.messengerName = name
class QtmacsVersionStructure(object):
"""
Container object to maintain version information.
|Args|
* **None**
|Raises|
* **None**
"""
def __init__(self):
self.version = None
self.created = None
self.last_changed = None
class QtmacsAdminStructure(object):
"""
Container object carried by every applet and widget in the
instance variable ``_qteAdmin``.
This class holds all the information needed by Qtmacs to
administrate its applets and widgets to avoids name space
pollution of the Qt classes.
As a rule of thumb, do not set any values in this object
manually. Instead, use the dedicated access methods. If there is
no such method, then the variable is like not meant to be tempered
with.
|Args|
* ``qteApplet`` (**QtmacsApplet**): handle to applet holding this
either this structure directly, or the widget which holds it.
* ``appletID`` (**str**): applet ID.
* ``isFocusable`` (**bool**): whether a widget can have the focus
(ignored for``QtmacsApplets``).
* ``isQtmacsWindow`` (**bool**): whether or not the caller is
``QtmacsMain``. This flag only exists to avoid problems with
assigning this object to ``QtmacsMain`` at start up.
|Raises|
* **None**
"""
def __init__(self, qteApplet, appletID=None,
isFocusable=True, isQtmacsWindow=False):
# Keep a reference to the main Qtmacs class.
self.qteMain = qte_global.qteMain
# Save a handle to the parent applet.
self.qteApplet = qteApplet
# Save the applet name (a string).
self.appletID = appletID
# Unfocusable widgets are skipped when cycling the focus.
self.isFocusable = isFocusable
# If true, call the qteKeyPressEventBefore method of the
# applet (not the widget!) before it is processed by Qtmacs.
self.receiveBeforeQtmacsParser = False
# If true, call the qteKeyPressEventAfter method of the applet
# (not the widget!) after it was processed by Qtmacs.
self.receiveAfterQtmacsParser = False
# If True, Qtmacs will intercept the key events for this widget.
self.filterKeyEvents = True
if not isQtmacsWindow:
# Initially, the local key map mirrors the global one.
self.keyMap = self.qteMain.qteCopyGlobalKeyMap()
# Applet signature. This information determines which macros
# are compatible.
self.appletSignature = None
# Widget Signature. This variable is automatically set for
# every widget added via ``qteAddWidget``. If the object is
# not a widget but a reference then it defaults to the string
# "QWidget".
self.widgetSignature = "QWidget"
# List of widgets held by this applet. The ordering of this
# list determines the focus sequence.
self.widgetList = []
# Specify whether the widget is a QtmacsApplet. The default
# value is true because the qteAddWidget routine will
# overwrite this flag for widgets.
self.isQtmacsApplet = True
# Specify if the applet is a mini applet.
self.isMiniApplet = False
# Handle to parent window. This is always **None** if the
# widget is invisible. This flag is updated automatically by
# the show() and hide() methods.
self.parentWindow = None
# Visibility flag. This is usually the same as Qt's native
# ``isVisible`` but whereas Qt does not actually update this
# flag until the event loop had a chance to paint the applet,
# the isVisible flag will update as soon as the show/hide
# methods are called. This extra information is necessary
# because several methods in QtmacsMain make applets visible
# and invisible without calling the event loop in between,
# which makes it impossible to track the visibility states.
self.isVisible = False
# This is general purpose dictionary that macros can use to
# store applet specific information.
self.macroData = {}
# If True, then the applet can be killed without loosing
# data. This is mostly a convenience flag to facilitate a
# reasonably generic kill-applet macro, but the applet
# programmer is free to provide his own kill-applet macro for
# his applet. That macro may use applet specific variables to
# determine whether or not the applet can be safely killed and
# if not, how to achieve it.
self.readyToKill = True
@type_check
def qteSetKeyFilterPolicy(self, receiveBefore: bool=False,
useQtmacs: bool=None,
receiveAfter: bool=False):
"""
Set the policy on how Qtmacs filters keyboard events for a
particular widgets.
The options can be arbitrarily combined, eg. ::
widget.qteSetKeyFilterPolicy(True, True, False)
will first pass the event to the applet's ``keyPressEvent``
method and afterwards pass the same event to Qtmacs' keyboard
filter.
For all text-processing widgets (eg. ``QLineEdit``,
``QTextEdit``, ``QWebView``, etc.) it is almost always a good
idea to use the default, ie. (False, True, False, False),
which lets Qtmacs process everything. In this case the only
way to interact with the widget is via macros (and the mouse).
If ``receiveBefore`` and/or ``receiveAfter`` is set then
``qteKeyPressEventBefore`` and/or ``qteKeyPressEventAfter`` of
the QtmacsApplet (not widget) is called to inspect the event.
.. note:: The default behaviour is to let Qtmacs handle all
keyboard events and interact with the applet only via
macros. It may be more convenient for a programmer to
handle keyboard events directly in the keyPressEvent
routine, as is customary with Qt applications, but this
compromises the customisation ability of Qtmacs. As a rule
of thumb, applet classes should not implement keyPressEvent
at all. However, since there is an exception to every rule
Qtmacs allows it.
.. note:: This method must be part of the qteAdmin object
because which is attached to every object under the
control of Qtmacs.
|Args|
* ``receiveBefore`` (**bool**): pass the keyEvent to the applet
before Qtmacs processes it.
* ``useQtmacs`` (**bool**): let Qtmacs parse the key.
* ``receiveAfter`` (**bool**): pass the keyEvent to the applet
after Qtmacs processed it.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Store key filter policy flags.
self.filterKeyEvents = useQtmacs
self.receiveBeforeQtmacsParser = receiveBefore
self.receiveAfterQtmacsParser = receiveAfter
def keyFilterPolicy(self):
"""
Return the key filter policy for the current applet.
.. note:: This method must be part of the qteAdmin object
because which is attached to every object under the
control of Qtmacs.
|Args|
* **None**
|Returns|
``tuple``: (receiveBefore, useQtmacs, receiveAfter)
|Raises|
* **None**
"""
return (self.receiveBeforeQtmacsParser, self.filterKeyEvents,
self.receiveAfterQtmacsParser)
class QtmacsKeysequence(object):
"""
Parse and represent a Qtmacs keyboard sequence.
Without any argument, it represents an empty sequence. If the
argument is a string or a list/tuple, then a parser attempts to
convert it into a sequence of valid ``QKeyEvent`` objects. If the
argument is another QtmacsKeysequence, then a copy of the object
is returned.
Examples for instantiating this object with human readable
keyboard sequences::
QtmacsKeysequence('<ctrl>+f h <alt>+K <ctrl>+k')
QtmacsKeysequence('<ctrl>+f <ctrl>+<alt>++ <ctrl>+<alt>+<space>')
QtmacsKeysequence('<ctrl>+f <ctrl>+F <ctrl>++ <ctrl>+<space>'
'<ctrl>+< <alt>+> < > <space>')
The syntax of the string should be self explanatory. In addition,
everything in angular brackets is case insensitive, eg. '<ctrl>-f'
and '<CtRL>-f' are the same, and so is '<space>' and
'<SPAce>'. However, non-bracketed keys are case sensitive,
eg. '<ctrl>-f>' is not the same as '<ctrl>+F'. Note that it is not
necessary (in fact impossible) to specify a <shift> modifier.
Keyboard combination are separated by (an arbitrary number of)
white spaces. Non-printable characters have a bracketed mnemonic,
eg. <space>, <backspace>, <tab>, <F1>. The exact list of available
characters, as well as the necessity for <shift> modifiers,
depends on the used OS and keyboard. The used layout is specified
in ``Qt_keymap`` variable from the global name space which
``QtmacsMain`` sets at startup, although it utilises the
``platform_setup.py`` module to do the actual work. That module is
also the point of entry for adding new key maps, and/or extending
existing ones.
Instead of specifying a human readable string it is also possible
to instantiate ``QtmacsKeyboardsequence`` with sequence of Qt
constants from the ``QtCore.Qt`` name space, for instance::
QtmacsKeysequence([(QtCore.Qt.ControlModifier, QtCore.Qt.Key_H),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_K)])
is the same as::
QtmacsKeysequence('<ctrl>+h k').
The macro/applet programmer is unlikely to encounter this class at
all as the methods of these classes that require keyboard
sequences (eg. ``qteBindKeyWidget``) are usually called
with human readable strings anyway because they are convenient.
However, Qtmacs internally, the only accepted way to deal with
keyboard shortcuts is via this class.
|Args|
* ``keysequence`` (**str** or **tuple** or **list** or
**QtmacsKeysequence**)
|Raises|
* **QtmacsKeysequenceError** if ``keysequence`` could not be parsed.
"""
def __init__(self, keysequence=None):
# Only used when called as an iterator to yield the individual
# QKeyEvents that make up the key sequence represented by this
# class.
self._iterCnt = 0
# Get a reference to the key map for this machine. This
# reference is usually set by the constructor of the
# QtmacsMain class early on and should therefore be
# available. If not, then something is seriously wrong.
if hasattr(qte_global, 'Qt_key_map'):
# Dictionary that maps human readable keys to Qt
# constants.
self.keyDict = qte_global.Qt_key_map
else:
msg = '"Qt_key_map" variable does not exist in global name space'
raise QtmacsKeysequenceError(msg)
# Get a reference to the modifier map for this machine (set at
# the same time as Qt_key_map above).
if hasattr(qte_global, 'Qt_modifier_map'):
# Dictionary that maps modifier keys to Qt constants.
self.modDict = qte_global.Qt_modifier_map
else:
msg = '"Qt_modifier_map" variable does not exist '
msg += 'in global name space.'
raise QtmacsKeysequenceError(msg)
# Make a copy of keyDict but with keys as values and vice
# versa. This dictionary will be used to map the binary (Qt
# internal) representation of keys to human readable values.
self.keyDictReverse = {}
for key, value in self.keyDict.items():
self.keyDictReverse[value] = key
# A list of QKeyEvent events and numerical constants from the
# Qt library. Both lists represent the same key sequence and
# the reset() method clears both.
self.keylistQtConstants = None
self.keylistKeyEvent = None
self.reset()
# Act on the argument passed to the constructor.
if isinstance(keysequence, str):
# We were passed a string --> parse it to extract the key sequence.
self.str2key(keysequence)
elif isinstance(keysequence, list) or isinstance(keysequence, tuple):
# We were passed a list --> parse it to extract the key sequence.
self.list2key(keysequence)
elif isinstance(keysequence, QtmacsKeysequence):
# We were passed another QtmacsKeysequence object --> copy
# all its attributes.
self.keylistQtConstants = keysequence.keylistQtConstants
self.keylistKeyEvent = keysequence.keylistKeyEvent
elif keysequence is None:
# We were passed nothing --> do nothing.
pass
else:
msg = 'Argument must be either None, a string, a list, '
msg += 'or a QtmacsKeySequence.'
raise QtmacsKeysequenceError(msg)
def __repr__(self):
"""
Print a human readable version of the key sequence represented
by this object.
"""
return self.toString()
def reset(self):
"""
Flush the key sequences.
|Args|
* **None**
|Returns|
**None**
|Raises|
* **None**
"""
self.keylistQtConstants = []
self.keylistKeyEvent = []
def list2key(self, keyList):
"""
Convert a list of (``QtModifier``, ``QtCore.Qt.Key_*``) tuples
into a key sequence.
If no error is raised, then the list was accepted.
|Args|
* ``keyList`` (**list**): eg. (QtCore.Qt.ControlModifier,
QtCore.Qt.Key_F).
|Returns|
**None**
|Raises|
* **QtmacsKeysequenceError** if the provided ``keysequence``
could not be parsed.
"""
for keyCombo in keyList:
if not (isinstance(keyCombo, list) or isinstance(keyCombo, tuple)):
msg = ('Format of native key list is invalid.'
' Must be a list/tuple of list/tuples.')
raise QtmacsKeysequenceError(msg)
if len(keyCombo) != 2:
msg = 'Format of native key list is invalid.'
msg += 'Each element must have exactly 2 entries.'
raise QtmacsKeysequenceError(msg)
# Construct a new QKeyEvent. Note that the general
# modifier (ie. <ctrl> and <alt>) still need to be
# combined with shift modifier (which is never a general
# modifier) if the key demands it. This combination is a
# simple "or" on the QFlags structure. Also note that the
# "text" argument is omitted because Qt is smart enough to
# fill it internally. Furthermore, the QKeyEvent method
# will raise an error if the provided key sequence makes
# no sense, but to avoid raising an exception inside an
# exception the QtmacsKeysequenceError is not raised
# inside the exception block.
key_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, keyCombo[1],
keyCombo[0])
try:
key_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
keyCombo[1], keyCombo[0])
err = False
except TypeError:
err = True
if err:
msg = ('Format of native key list is invalid. '
'Must be a list/tuple of list/tuples.')
raise QtmacsKeysequenceError(msg)
else:
self.appendQKeyEvent(key_event)
def str2key(self, keyString):
"""
Parse a human readable key sequence.
If no error is raised, then ``keyString`` could be
successfully converted into a valid key sequence and is
henceforth represented by this object.
|Args|
* ``keyString`` (**QtmacsKeysequence**): eg. "<ctrl>+f"
|Returns|
**None**
|Raises|
* **QtmacsKeysequenceError** if ``keyString`` could not be parsed.
"""
# Ensure the string is non-empty.
if keyString == '':
raise QtmacsKeysequenceError('Cannot parse empty string')
tmp = str(keyString)
tmp = tmp.replace('<', '<')
tmp = tmp.replace('>', '>')
keyStringHtml = '<b>{}</b>.'.format(tmp)
del tmp
# Remove leading and trailing white spaces, and reduce
# sequences of white spaces to a single white space. If this
# results in an emtpy string (typically the case when the user
# tries to register a white space with ' ' instead of with
# '<space>') then raise an error.
rawKeyStr = keyString.strip()
if len(rawKeyStr) == 0:
msg = 'Cannot parse the key combination {}.'.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
# Split the string at these white spaces and convert eg.
# " <ctrl>+x <ctrl>+f " first into
# "<ctrl>+x <ctrl>+f" and from there into the list of
# individual key combinations ["<ctrl>+x", "<ctrl>+f"].
rawKeyStr = re.sub(' +', ' ', rawKeyStr)
rawKeyStr = rawKeyStr.split(' ')
# Now process the key combinations one by one. By definition.
for key in rawKeyStr:
# Find all bracketed keys in the key combination
# (eg. <ctrl>, <space>).
desc_keys = re.findall('<.*?>', key)
# There are four possibilities:
# * no bracketed key (eg. "x" or "X")
# * one bracketed key (eg. "<ctrl>+x", or just "<space>")
# * two bracketed keys (eg. "<ctrl>+<space>" or "<ctrl>+<alt>+f")
# * three bracketed keys (eg. <ctrl>+<alt>+<space>).
if len(desc_keys) == 0:
# No bracketed key means no modifier, so the key must
# stand by itself.
modStr = ['<NONE>']
keyStr = key
elif len(desc_keys) == 1:
if '+' not in key:
# If no '+' sign is present then it must be
# bracketed key without any modifier
# (eg. "<space>").
modStr = ['<NONE>']
keyStr = key
else:
# Since a '+' sign and exactly one bracketed key
# is available, it must be a modifier plus a
# normal key (eg. "<ctrl>+f", "<alt>++").
idx = key.find('+')
modStr = [key[:idx]]
keyStr = key[idx + 1:]
elif len(desc_keys) == 2:
# There are either two modifiers and a normal key
# (eg. "<ctrl>+<alt>+x") or one modifier and one
# bracketed key (eg. "<ctrl>+<space>").
if (key.count('+') == 0) or (key.count('+') > 3):
# A valid key combination must feature at least
# one- and at most three "+" symbols.
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
elif key.count('+') == 1:
# One modifier and one bracketed key
# (eg. "<ctrl>+<space>").
idx = key.find('+')
modStr = [key[:idx]]
keyStr = key[idx + 1:]
elif (key.count('+') == 2) or (key.count('+') == 3):
# Two modifiers and one normal key
# (eg. "<ctrl>+<alt>+f", "<ctrl>+<alt>++").
idx1 = key.find('+')
idx2 = key.find('+', idx1 + 1)
modStr = [key[:idx1], key[idx1 + 1:idx2]]
keyStr = key[idx2 + 1:]
elif len(desc_keys) == 3:
if key.count('+') == 2:
# There are two modifiers and one bracketed key
# (eg. "<ctrl>+<alt>+<space>").
idx1 = key.find('+')
idx2 = key.find('+', idx1 + 1)
modStr = [key[:idx1], key[idx1 + 1:idx2]]
keyStr = key[idx2 + 1:]
else:
# A key combination with three bracketed entries
# must have exactly two '+' symbols. It cannot be
# valid otherwise.
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
else:
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
# The dictionary keys that map the modifiers and bracketed
# keys to Qt constants are all upper case by
# convention. Therefore, convert all modifier keys and
# bracketed normal keys.
modStr = [_.upper() for _ in modStr]
if (keyStr[0] == '<') and (keyStr[-1] == '>'):
keyStr = keyStr.upper()
# Convert the text version of the modifier key into the
# QFlags structure used by Qt by "or"ing them
# together. The loop is necessary because more than one
# modifier may be active (eg. <ctrl>+<alt>).
modQt = QtCore.Qt.NoModifier
for mod in modStr:
# Ensure that the modifier actually exists (eg. the
# user might have made type like "<ctlr>" instead of
# "<ctrl>"). Also, the keys in the dictionary consist
# of only upper case letter for the modifier keys.
if mod not in self.modDict:
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
# Since the modifier exists in the dictionary, "or"
# them with the other flags.
modQt = modQt | self.modDict[mod]
# Repeat the modifier procedure for the key. However,
# unlike for the modifiers, no loop is necessary here
# because only one key can be pressed at the same time.
if keyStr in self.keyDict:
modQt_shift, keyQt = self.keyDict[keyStr]
else:
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
# Construct a new QKeyEvent. Note that the general
# modifier (ie. <ctrl> and <alt>) still need to be
# combined with shift modifier if the key demands it. This
# combination is a simple "or" on the QFlags structure.
# Also note that the "text" argument is omitted because Qt
# is smart enough to determine it internally.
key_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, keyQt,
modQt | modQt_shift)
# Finally, append this key to the key sequence represented
# by this object.
self.appendQKeyEvent(key_event)
@type_check
def appendQKeyEvent(self, keyEvent: QtGui.QKeyEvent):
"""
Append another key to the key sequence represented by this object.
|Args|
* ``keyEvent`` (**QKeyEvent**): the key to add.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Store the QKeyEvent.
self.keylistKeyEvent.append(keyEvent)
# Convenience shortcuts.
mod = keyEvent.modifiers()
key = keyEvent.key()
# Add the modifier and key to the list. The modifier is a
# QFlag structure and must by typecast to an integer to avoid
# difficulties with the hashing in the ``match`` routine of
# the ``QtmacsKeymap`` object.
self.keylistQtConstants.append((int(mod), key))
def toQtKeylist(self):
"""
Return the key sequence represented by this object as a tuple
of Qt constants.
The tuple contains as many elements as there are individual
key combination, each represented by a (QtModifier,
QtCore.Qt.Key_xxx) tuple itself. For instance, if the object
was created as Qtmacs('<Ctrl>+h k') then this function would
return the tuple ((67108864, 72), (0, 75)). Note that this
list is suitable as an argument to QtmacsKeysequence, which
would create another object representing the same key
sequence.
Note that the numerical constants may be machine dependent.
|Args|
* **None**
|Returns|
**list**: list of (QtModifer, Qt.Key_xxx) tuples.
|Raises|
* **None**
"""
return tuple(self.keylistQtConstants)
def toQKeyEventList(self):
"""
Return the key sequence represented by this object as a tuple
of Qt constants.
The tuple contains as many elements as there are individual
key combination, each represented by a
(QtModifier, QtCore.Qt.Key_***) tuple itself. For instance, if
the object was created as Qtmacs('<Ctrl>+h k') then this
function would return the tuple ((67108864, 72), (0, 75)).
Note that this list is suitable as an argument to QtmacsKeysequence,
which would create another object representing the same key sequence.
Note that the numerical constants may be machine dependent.
|Args|
**None**
|Returns|
**list**: list of QKeyEvents.
|Raises|
* **None**
"""
return tuple(self.keylistKeyEvent)
def toString(self):
"""
Return the key sequence as a human readable string, eg. "<ctrl>+x".
Note that this list is suitable as an argument to
QtmacsKeysequence, which would create another object
representing the same key sequence. If a key could not be
converted then it will be displayed as '<Unknown>'. If this
happens, then the key map in ``qte_global.default_qt_keymap``
is incomplete and should be amended accordingly.
|Args|
* **None**
|Returns|
**str**: the key sequence, eg. '<ctrl>+f', or '<F1>', or
'<Unknown>'.
|Raises|
* **None**
"""
# Initialise the final output string.
retVal = ''
for mod, key in self.keylistQtConstants:
out = ''
# Check for any modifiers except <shift> and add the
# corresponding string.
if (mod & QtCore.Qt.ControlModifier):
out += '<Ctrl>+'
if (mod & QtCore.Qt.AltModifier):
out += '<Alt>+'
if (mod & QtCore.Qt.MetaModifier):
out += '<Meta>+'
if (mod & QtCore.Qt.KeypadModifier):
out += '<Keypad>+'
if (mod & QtCore.Qt.GroupSwitchModifier):
out += '<GroupSwitch>+'
# Format the string representation depending on whether or
# not <Shift> is active.
if (mod & QtCore.Qt.ShiftModifier):
# If the key with the shift modifier exists in the
# reverse dictionary then use that string, otherwise
# construct it manually be printing the modifier and
# the key name. The first case is typically
# encountered for upper case characters, where eg. 'F'
# is preferable over '<Shift>+f'.
if (QtCore.Qt.ShiftModifier, key) in self.keyDictReverse:
# The shift-combined key exists in the dictionary,
# so use it.
out += self.keyDictReverse[(QtCore.Qt.ShiftModifier, key)]
elif (QtCore.Qt.NoModifier, key) in self.keyDictReverse:
# The shift-combined key does not exists in the
# dictionary, so assemble the modifier and key by
# hand.
out += ('<Shift>+' +
self.keyDictReverse[(QtCore.Qt.NoModifier, key)])
else:
out += '<Unknown>'
else:
if (QtCore.Qt.NoModifier, key) in self.keyDictReverse:
out += self.keyDictReverse[(QtCore.Qt.NoModifier, key)]
else:
out += '<Unknown>'
# Add a spacer.
retVal += out + ' '
# Return the final string (minus the last spacer).
return retVal[:-1]
class QtmacsKeymap(dict):
"""
Implement the required functionality for a Qtmacs key map.
This class is effectively a dictionary.
|Args|
** None **
|Raises|
* **None**
"""
@type_check
def qteInsertKey(self, keysequence: QtmacsKeysequence, macroName: str):
"""
Insert a new key into the key map and associate it with a
macro.
If the key sequence is already associated with a macro then it
will be overwritten.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): associate a macro with
a key sequence in this key map.
* ``macroName`` (**str**): macro name.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Get a dedicated reference to self to facilitate traversing
# through the key map.
keyMap = self
# Get the key sequence as a list of tuples, where each tuple
# contains the the control modifier and the key code, and both
# are specified as Qt constants.
keysequence = keysequence.toQtKeylist()
# Traverse the shortcut sequence and generate new keys as
# necessary.
for key in keysequence[:-1]:
# If the key does not yet exist add an empty dictionary
# (it will be filled later).
if key not in keyMap:
keyMap[key] = {}
# Similarly, if the key does exist but references anything
# other than a dictionary (eg. a previously installed
# ``QtmacdMacro`` instance), then delete it.
if not isinstance(keyMap[key], dict):
keyMap[key] = {}
# Go one level down in the key-map tree.
keyMap = keyMap[key]
# Assign the new macro object associated with this key.
keyMap[keysequence[-1]] = macroName
@type_check
def qteRemoveKey(self, keysequence: QtmacsKeysequence):
"""
Remove ``keysequence`` from this key map.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): key sequence to
remove from this key map.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Get a dedicated reference to self to facilitate traversing
# through the key map.
keyMap = self
# Keep a reference to the root element in the key map.
keyMapRef = keyMap
# Get the key sequence as a list of tuples, where each tuple
# contains the the control modifier and the key code, and both
# are specified as Qt constants.
keysequence = keysequence.toQtKeylist()
# ------------------------------------------------------------
# Remove the leaf element from the tree.
# ------------------------------------------------------------
for key in keysequence[:-1]:
# Quit if the key does not exist. This can happen if the
# user tries to remove a key that has never been
# registered.
if key not in keyMap:
return
# Go one level down in the key-map tree.
keyMap = keyMap[key]
# The specified key sequence does not exist if the leaf
# element (ie. last entry in the key sequence) is missing.
if keysequence[-1] not in keyMap:
return
else:
# Remove the leaf.
keyMap.pop(keysequence[-1])
# ------------------------------------------------------------
# Prune the prefix path defined by ``keysequence`` and remove
# all empty dictionaries. Start at the leaf level.
# ------------------------------------------------------------
# Drop the last element in the key sequence, because it was
# removed in the above code fragment already.
keysequence = keysequence[:-1]
# Now successively remove the key sequence in reverse order.
while(len(keysequence)):
# Start at the root and move to the last branch level
# before the leaf level.
keyMap = keyMapRef
for key in keysequence[:-1]:
keyMap = keyMap[key]
# If the leaf is a non-empty dictionary then another key
# with the same prefix still exists. In this case do
# nothing. However, if the leaf is now empty it must be
# removed.
if len(keyMap[key]):
return
else:
keyMap.pop(key)
@type_check
def match(self, keysequence: QtmacsKeysequence):
"""
Look up the key sequence in key map.
If ``keysequence`` leads to a macro in the key map represented
by this object then the method returns ``(macroName,
True)``. If it does not lead to a macro but is nonetheless
valid (ie. the sequence is still incomplete), then it returns
``(None, True)``. Finally, if the sequence cannot lead to a
macro because it is invalid then the return value is ``(None,
False)``.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): associate a macro
with a key sequence in this key map.
* ``macroName`` (**str**): macro name.
|Returns|
(**str**: macro name, **bool**: partial match)
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
try:
# Look up the ``keysequence`` in the current key map (ie.
# this very object which inherits from ``dict``). If
# ``keysequence`` does not lead to a valid macro then
# return **None**.
macroName = self
for _ in keysequence.toQtKeylist():
macroName = macroName[_]
except KeyError:
# This error occurs if the keyboard sequence does not lead
# to any macro and is therefore invalid.
return (None, False)
# At this point we know that the key sequence entered so far
# exists. Two possibilities from here on forward: 1) the key
# sequence now points to a macro or 2) the key sequence is
# still incomplete.
if isinstance(macroName, dict):
# Another dictionary --> key sequence is still incomplete.
return (None, True)
else:
# Macro object --> return it.
return (macroName, True)
# ----------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------
def qteIsQtmacsWidget(widgetObj):
"""
Determine if a widget is part of Qtmacs widget hierarchy.
A widget belongs to the Qtmacs hierarchy if it, or one of its
parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``).
Since every applet has this attribute is guaranteed that the
function returns **True** if the widget is embedded inside
somewhere.
|Args|
* ``widgetObj`` (**QWidget**): the widget to test.
|Returns|
* **bool**: **True** if the widget, or one of its ancestors
in the Qt hierarchy have a '_qteAdmin' attribute.
|Raises|
* **None**
"""
if widgetObj is None:
return False
if hasattr(widgetObj, '_qteAdmin'):
return True
# Keep track of the already visited objects to avoid infinite loops.
visited = [widgetObj]
# Traverse the hierarchy until a parent features the '_qteAdmin'
# attribute, the parent is None, or the parent is an already
# visited widget.
wid = widgetObj.parent()
while wid not in visited:
if hasattr(wid, '_qteAdmin'):
return True
elif wid is None:
return False
else:
visited.append(wid)
wid = wid.parent()
return False
def qteGetAppletFromWidget(widgetObj):
"""
Return the parent applet of ``widgetObj``.
|Args|
* ``widgetObj`` (**QWidget**): widget (if any) for which the
containing applet is requested.
|Returns|
* **QtmacsApplet**: the applet containing ``widgetObj`` or **None**.
|Raises|
* **None**
"""
if widgetObj is None:
return None
if hasattr(widgetObj, '_qteAdmin'):
return widgetObj._qteAdmin.qteApplet
# Keep track of the already visited objects to avoid infinite loops.
visited = [widgetObj]
# Traverse the hierarchy until a parent features the '_qteAdmin'
# attribute, the parent is None, or the parent is an already
# visited widget.
wid = widgetObj.parent()
while wid not in visited:
if hasattr(wid, '_qteAdmin'):
return wid._qteAdmin.qteApplet
elif wid is None:
return None
else:
visited.append(wid)
wid = wid.parent()
return None
class QtmacsModeBar(QtGui.QWidget):
"""
Represent a list of modes, each represented by a ``QLabel``.
The purpose of this class is to facilitate a flexible mechanims
to display various modes or status flags. It consists of a list
of modes, each with an associated value and a ``QLabel`` instance
that are lined up horizontally.
It is typically displayed beneath another widget eg. ``SciEditor``.
The class takes care that all but the rightmost label are only as
long and high as necessary.
A typical use case inside an applet with a ``QtmacsScintilla`` widget
could be as follows::
# Create a mode bar instance and add some modes.
self.qteScintilla = QtmacsScintilla(self)
self._qteModeBar = QtmacsModeBar()
self._qteModeBar.qteAddMode('EOL', 'U')
self._qteModeBar.qteAddMode('READONLY', 'R')
self._qteModeBar.qteAddMode('MODIFIED', '-')
# Arrange the layout so that the mode bar is at the bottom.
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.qteScintilla)
vbox.addWidget(self._qteModeBar)
self.setLayout(vbox)
|Args|
* **None**
|Raises|
* **None**
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setLayout(QtGui.QHBoxLayout())
self._qteModeList = []
def _qteGetLabelInstance(self):
"""
Return an instance of a ``QLabel`` with the correct color scheme.
|Args|
* **None**
|Returns|
* **QLabel**
|Raises|
* **None**
"""
# Create a label with the proper colour appearance.
layout = self.layout()
label = QtGui.QLabel(self)
style = 'QLabel { background-color : white; color : blue; }'
label.setStyleSheet(style)
return label
def _qteUpdateLabelWidths(self):
"""
Ensure all but the last ``QLabel`` are only as wide as necessary.
The width of the last label is manually set to a large value to
ensure that it stretches as much as possible. The height of all
widgets is also set appropriately. The method also takes care
or rearranging the widgets in the correct order, ie. in the
order specified by ``self._qteModeList``.
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
"""
layout = self.layout()
# Remove all labels from the list and add them again in the
# new order.
for ii in range(layout.count()):
label = layout.itemAt(ii)
layout.removeItem(label)
# Add all labels and ensure they have appropriate width.
for item in self._qteModeList:
label = item[2]
width = label.fontMetrics().size(0, str(item[1])).width()
label.setMaximumWidth(width)
label.setMinimumWidth(width)
layout.addWidget(label)
# Remove the width constraint from the last label so that
# it can expand to the right.
_, _, label = self._qteModeList[-1]
label.setMaximumWidth(1600000)
@type_check
def qteGetMode(self, mode: str):
"""
Return a tuple containing the ``mode``, its value, and
its associated ``QLabel`` instance.
|Args|
* ``mode`` (**str**): size and position of new window.
|Returns|
* (**str**, **object**, **QLabel**: (mode, value, label).
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
for item in self._qteModeList:
if item[0] == mode:
return item
return None
@type_check
def qteAddMode(self, mode: str, value):
"""
Append label for ``mode`` and display ``value`` on it.
|Args|
* ``mode`` (**str**): mode of mode.
* ``value`` (**object**): value of mode.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the label to the layout and the local mode list.
label = self._qteGetLabelInstance()
label.setText(value)
self._qteModeList.append((mode, value, label))
self._qteUpdateLabelWidths()
@type_check
def qteChangeModeValue(self, mode: str, value):
"""
Change the value of ``mode`` to ``value``.
If ``mode`` does not exist then nothing happens and the method
returns **False**, otherwise **True**.
|Args|
* ``mode`` (**str**): mode of mode.
* ``value`` (**object**): value of mode.
|Returns|
* **bool**: **True** if the item was removed and **False** if there
was an error (most likely ``mode`` does not exist).
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Search through the list for ``mode``.
for idx, item in enumerate(self._qteModeList):
if item[0] == mode:
# Update the displayed value in the label.
label = item[2]
label.setText(value)
# Overwrite the old data record with the updated one
# and adjust the widths of the modes.
self._qteModeList[idx] = (mode, value, label)
self._qteUpdateLabelWidths()
return True
return False
@type_check
def qteInsertMode(self, pos: int, mode: str, value):
"""
Insert ``mode`` at position ``pos``.
If ``pos`` is negative then this is equivalent to ``pos=0``. If it
is larger than the number of modes in the list then it is appended
as the last element.
|Args|
* ``pos`` (**int**): insertion point.
* ``mode`` (**str**): name of mode.
* ``value`` (**object**) value associated with ``mode``.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the label to the list.
label = self._qteGetLabelInstance()
label.setText(value)
self._qteModeList.insert(pos, (mode, value, label))
self._qteUpdateLabelWidths()
@type_check
def qteRemoveMode(self, mode: str):
"""
Remove ``mode`` and associated label.
If ``mode`` does not exist then nothing happens and the method
returns **False**, otherwise **True**.
|Args|
* ``pos`` (**QRect**): size and position of new window.
* ``windowID`` (**str**): unique window ID.
|Returns|
* **bool**: **True** if the item was removed and **False** if there
was an error (most likely ``mode`` does not exist).
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Search through the list for ``mode``.
for idx, item in enumerate(self._qteModeList):
if item[0] == mode:
# Remove the record and delete the label.
self._qteModeList.remove(item)
item[2].hide()
item[2].deleteLater()
self._qteUpdateLabelWidths()
return True
return False
def qteAllModes(self):
"""
|Args|
* ``pos`` (**QRect**): size and position of new window.
* ``windowID`` (**str**): unique window ID.
|Returns|
* **list**: a list of all modes.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
return [_[0] for _ in self._qteModeList]
``` |
[
{
"content": "Here is the code block:\n```python\n#!/usr/bin/env python\n\nimport json\nimport math\nimport re\n\nclass PManager(object):\n def __init__(self, pm_data):\n if isinstance(pm_data, (str, unicode)):\n self.pm_data = json.loads(pm_data)\n else:\n self.pm_data = ... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport json\nimport math\nimport re\n\nclass PManager(object):\n def __init__(self, pm_data):\n if isinstance(pm_data, (str, unicode)):\n self.pm_data = json.loads(pm_data)\n else:\n ... | ```python
#!/usr/bin/env python
import json
import math
import re
class PManager(object):
def __init__(self, pm_data):
if isinstance(pm_data, (str, unicode)):
self.pm_data = json.loads(pm_data)
else:
self.pm_data = pm_data
self.data = self.pm_data['ks_spaces']
self.kernel_params = self.pm_data['kernel_params']
self.factor = 1
self.unit = "MiB"
self._pre = []
self._kick = []
self._post = []
self.raid_count = 0
self._pcount = {}
self._pend = {}
self._rcount = 0
self._pvcount = 0
def _pseparator(self, devname):
pseparator = ''
if devname.find('cciss') >= 0:
pseparator = 'p'
return pseparator
def pcount(self, disk_id, increment=0):
self._pcount[disk_id] = self._pcount.get(disk_id, 0) + increment
return self._pcount.get(disk_id, 0)
def psize(self, disk_id, increment=0):
self._pend[disk_id] = self._pend.get(disk_id, 0) + increment
return self._pend.get(disk_id, 0)
def rcount(self, increment=0):
self._rcount += increment
return self._rcount
def pvcount(self, increment=0):
self._pvcount += increment
return self._pvcount
def pre(self, command=None):
if command:
return self._pre.append(command)
return self._pre
def kick(self, command=None):
if command:
return self._kick.append(command)
return self._kick
def post(self, command=None):
if command:
return self._post.append(command)
return self._post
def _disk_dev(self, disk):
command = "$(readlink -f $( ("
command += " || ".join(["ls /dev/{0}".format(d)
for d in disk.get("extra", [])])
if disk["extra"]:
command += " || "
command += "ls /dev/{0}".format(disk["id"])
command += ") 2>/dev/null) )"
return command
def iterdisks(self):
for item in self.data:
if item["type"] == "disk" and item["size"] > 0:
yield item
def get_partition_count(self, name):
count = 0
for disk in self.iterdisks():
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count
def num_ceph_journals(self):
return self.get_partition_count('cephjournal')
def num_ceph_osds(self):
return self.get_partition_count('ceph')
def _gettabfstype(self, vol):
if vol.get("file_system"):
return vol["file_system"]
elif vol["mount"] == "/":
return "ext4"
elif vol["mount"] == "/boot":
return "ext3"
elif vol["mount"] == "swap":
return "swap"
return "xfs"
def _gettabfsoptions(self, vol):
if self._gettabfstype(vol) == "xfs":
return "-f -s size=512"
return ""
def _getfstype(self, vol):
fstype = self._gettabfstype(vol)
if fstype == "swap":
return ""
return "--fstype=%s" % fstype
def _getlabel(self, label):
if not label:
return ""
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return " -L {0} ".format(label[:12])
def _parttype(self, n):
return "primary"
def _getsize(self, vol):
"""Anaconda has hard coded limitation in 16TB
for ext3/4 and xfs filesystems (the only filesystems
we are supposed to use). Besides there is no stable
64-bit ext4 implementation at the moment, so the
limitation in 16TB for ext4 is not only
anaconda limitation."""
"""Root partition can not be located on xfs file system
therefore we check if root filesystem is larger
than 16TB and set it size into 16TB if it is larger.
It is necessary to note that to format 16TB
volume on ext4 it is needed about 1G memory."""
if vol["size"] > 16777216 and vol["mount"] == "/":
return 16777216
return vol["size"]
def erase_lvm_metadata(self):
self.pre("for v in $(vgs | awk '{print $1}'); do "
"vgreduce -f --removemissing $v; vgremove -f $v; done")
self.pre("for p in $(pvs | grep '\/dev' | awk '{print $1}'); do "
"pvremove -ff -y $p ; done")
def erase_raid_metadata(self):
for disk in self.iterdisks():
self.pre("mdadm --zero-superblock --force {0}*"
"".format(self._disk_dev(disk)))
def clean(self, disk):
self.pre("hdparm -z {0}".format(self._disk_dev(disk)))
self.pre("test -e {0} && dd if=/dev/zero "
"of={0} bs=1M count=10".format(
self._disk_dev(disk)))
self.pre("sleep 10")
self.pre("hdparm -z {0}".format(self._disk_dev(disk)))
def gpt(self, disk):
self.pre("parted -s {0} mklabel gpt".format(self._disk_dev(disk)))
def bootable(self, disk):
"""Create and mark Bios Boot partition to which grub will
embed its code later, useable for legacy boot.
May be way smaller, but be aware that the parted may
shrink 1M partition to zero at some disks and versions."""
self.pre("parted -a none -s {0} "
"unit {3} mkpart primary {1} {2}".format(
self._disk_dev(disk),
self.psize(disk["id"]),
self.psize(disk["id"], 24 * self.factor),
self.unit
)
)
self.pre("parted -s {0} set {1} bios_grub on".format(
self._disk_dev(disk),
self.pcount(disk["id"], 1)
)
)
"""Create partition for the EFI boot, minimum
size is 100M, recommended is 200M, with fat32 and
future mountpoint in the /boot/efi. There is also
'/usr/sbin/parted -s /dev/sda set 2 boot on'
which is strictly needed for EFI boot."""
self.pre("parted -a none -s {0} "
"unit {3} mkpart primary fat32 {1} {2}".format(
self._disk_dev(disk),
self.psize(disk["id"]),
self.psize(disk["id"], 200 * self.factor),
self.unit
)
)
self.pre("parted -s {0} set {1} boot on".format(
self._disk_dev(disk),
self.pcount(disk["id"], 1)
)
)
def boot(self):
self.plains(volume_filter=lambda x: x["mount"] == "/boot")
self.raids(volume_filter=lambda x: x["mount"] == "/boot")
def notboot(self):
self.plains(volume_filter=lambda x: x["mount"] != "/boot")
self.raids(volume_filter=lambda x: x["mount"] != "/boot")
def plains(self, volume_filter=None):
if not volume_filter:
volume_filter = lambda x: True
ceph_osds = self.num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self.num_ceph_journals()
for disk in self.iterdisks():
for part in filter(lambda p: p["type"] == "partition" and
volume_filter(p), disk["volumes"]):
if part["size"] <= 0:
continue
if part.get('name') == 'cephjournal':
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on each device
ratio = math.ceil(float(ceph_osds) / ceph_journals)
# No more than 10GB will be allocated to a single journal partition
size = part["size"] / ratio
if size > 10240:
size = 10240
# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left
for i in range(0, end):
journals_left -= 1
pcount = self.pcount(disk["id"], 1)
self.pre("parted -a none -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"],
self._parttype(pcount),
self.psize(disk["id"]),
self.psize(disk["id"], size * self.factor),
self.unit))
self.post("chroot /mnt/sysimage sgdisk "
"--typecode={0}:{1} /dev/{2}".format(
pcount, part["partition_guid"],disk["id"]))
continue
pcount = self.pcount(disk["id"], 1)
self.pre("parted -a none -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk),
self._parttype(pcount),
self.psize(disk["id"]),
self.psize(disk["id"], part["size"] * self.factor),
self.unit))
fstype = self._getfstype(part)
size = self._getsize(part)
tabmount = part["mount"] if part["mount"] != "swap" else "none"
tabfstype = self._gettabfstype(part)
tabfsoptions = self._gettabfsoptions(part)
if part.get("partition_guid"):
self.post("chroot /mnt/sysimage sgdisk "
"--typecode={0}:{1} {2}".format(
pcount, part["partition_guid"],
self._disk_dev(disk)))
if size > 0 and size <= 16777216 and part["mount"] != "none" \
and tabfstype != "xfs":
self.kick("partition {0} "
"--onpart={2}"
"{3}{4}".format(part["mount"], size,
self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount))
else:
if part["mount"] != "swap" and tabfstype != "none":
disk_label = self._getlabel(part.get('disk_label'))
self.post("mkfs.{0} {1} {2}"
"{3}{4} {5}".format(
tabfstype,
tabfsoptions,
self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount, disk_label))
if part["mount"] != "none":
self.post("mkdir -p /mnt/sysimage{0}".format(
part["mount"]))
if tabfstype != "none":
self.post("echo 'UUID=$(blkid -s UUID -o value "
"{0}{1}{2}) "
"{3} {4} defaults 0 0'"
" >> /mnt/sysimage/etc/fstab".format(
self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount, tabmount, tabfstype))
def raids(self, volume_filter=None):
if not volume_filter:
volume_filter = lambda x: True
raids = {}
raid_info = {}
phys = {}
for disk in self.iterdisks():
for raid in filter(lambda p: p["type"] == "raid" and
volume_filter(p), disk["volumes"]):
if raid["size"] <= 0:
continue
raid_info[raid["mount"]] = raid
pcount = self.pcount(disk["id"], 1)
if not phys.get(raid["mount"]):
phys[raid["mount"]] = []
phys[raid["mount"]].append("{0}{1}{2}".
format(self._disk_dev(disk),
self._pseparator(disk["id"]), pcount))
rname = "raid.{0:03d}".format(self.rcount(1))
begin_size = self.psize(disk["id"])
end_size = self.psize(disk["id"], raid["size"] * self.factor)
self.pre("parted -a none -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk), self._parttype(pcount),
begin_size, end_size, self.unit))
self.kick("partition {0} "
"--onpart={2}{3}{4}"
"".format(rname, raid["size"], self._disk_dev(disk),
self._pseparator(disk["id"]), pcount))
if not raids.get(raid["mount"]):
raids[raid["mount"]] = []
raids[raid["mount"]].append(rname)
for (num, (mount, rnames)) in enumerate(raids.iteritems()):
raid = raid_info[mount]
fstype = self._gettabfstype(raid)
fsoptions = self._gettabfsoptions(raid)
label = raid.get('disk_label')
# Anaconda won't label a RAID array. It also can't create
# a single-drive RAID1 array, but mdadm can.
if label or len(rnames) == 1:
if len(rnames) == 1:
phys[mount].append('missing')
self.post("mdadm --create /dev/md{0} --run --level=1 "
"--raid-devices={1} {2}".format(self.raid_count,
len(phys[mount]), ' '.join(phys[mount])))
self.post("mkfs.{0} {1} {2} /dev/md{3}".format(
fstype, fsoptions,
self._getlabel(label), self.raid_count))
self.post("mdadm --detail --scan | grep '\/dev\/md{0}'"
">> /mnt/sysimage/etc/mdadm.conf".format(
self.raid_count))
self.post("mkdir -p /mnt/sysimage{0}".format(mount))
self.post("echo \\\"UUID=\$(blkid -s UUID -o value "
"/dev/md{0}) "
"{1} {2} defaults 0 0\\\""
" >> /mnt/sysimage/etc/fstab".format(
self.raid_count, mount, fstype))
else:
self.kick("raid {0} --device md{1} --fstype {3} "
"--level=RAID1 {2}".format(mount, self.raid_count,
" ".join(rnames), fstype))
self.raid_count += 1
def pvs(self):
pvs = {}
for disk in self.iterdisks():
for pv in [p for p in disk["volumes"] if p["type"] == "pv"]:
if pv["size"] <= 0:
continue
pcount = self.pcount(disk["id"], 1)
pvname = "pv.{0:03d}".format(self.pvcount(1))
begin_size = self.psize(disk["id"])
end_size = self.psize(disk["id"], pv["size"] * self.factor)
self.pre("parted -a none -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk), self._parttype(pcount),
begin_size, end_size, self.unit))
self.kick("partition {0} "
"--onpart={2}{3}{4}"
"".format(pvname, pv["size"], self._disk_dev(disk),
self._pseparator(disk["id"]), pcount))
if not pvs.get(pv["vg"]):
pvs[pv["vg"]] = []
pvs[pv["vg"]].append(pvname)
for vg, pvnames in pvs.iteritems():
self.kick("volgroup {0} {1}".format(vg, " ".join(pvnames)))
def lvs(self):
for vg in [g for g in self.data if g["type"] == "vg"]:
for lv in vg["volumes"]:
if lv["size"] <= 0:
continue
fstype = self._getfstype(lv)
size = self._getsize(lv)
tabmount = lv["mount"] if lv["mount"] != "swap" else "none"
tabfstype = self._gettabfstype(lv)
tabfsoptions = self._gettabfsoptions(lv)
if size > 0 and size <= 16777216 and tabfstype != "xfs":
self.kick("logvol {0} --vgname={1} --size={2} "
"--name={3} {4}".format(
lv["mount"], vg["id"], size,
lv["name"], fstype))
else:
self.post("lvcreate --size {0} --name {1} {2}".format(
size, lv["name"], vg["id"]))
if lv["mount"] != "swap" and tabfstype != "none":
self.post("mkfs.{0} {1} /dev/mapper/{2}-{3}".format(
tabfstype, tabfsoptions, vg["id"], lv["name"]))
self.post("mkdir -p /mnt/sysimage{0}"
"".format(lv["mount"]))
if tabfstype != "none":
"""
The name of the device. An LVM device is
expressed as the volume group name and the logical
volume name separated by a hyphen. A hyphen in
the original name is translated to two hyphens.
"""
self.post("echo '/dev/mapper/{0}-{1} {2} {3} "
"defaults 0 0'"
" >> /mnt/sysimage/etc/fstab".format(
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--"),
tabmount, tabfstype))
def bootloader(self):
devs = []
for disk in self.iterdisks():
devs.append("$(basename {0})"
"".format(self._disk_dev(disk)))
if devs:
self.kick("bootloader --location=mbr --driveorder={0} "
"--append=' {1} '".format(
",".join(devs),
self.kernel_params))
for dev in devs:
self.post("echo -n > /tmp/grub.script")
self.post("echo \\\"device (hd0) /dev/{0}\\\" >> "
"/tmp/grub.script".format(dev))
"""
This means that we set drive geometry manually into to
avoid grub register overlapping. We set it so that grub
thinks disk size is equal to 1G.
130 cylinders * (16065 * 512 = 8225280 bytes) = 1G
"""
self.post("echo \\\"geometry (hd0) 130 255 63\\\" >> "
"/tmp/grub.script")
self.post("echo \\\"root (hd0,2)\\\" >> /tmp/grub.script")
self.post("echo \\\"install /grub/stage1 (hd0) /grub/stage2 p "
"/grub/grub.conf\\\" >> /tmp/grub.script")
self.post("echo quit >> /tmp/grub.script")
self.post("cat /tmp/grub.script | chroot /mnt/sysimage "
"/sbin/grub --no-floppy --batch")
def expose(self,
kickfile="/tmp/partition.ks",
postfile="/tmp/post_partition.ks"
):
result = ""
for pre in self.pre():
result += "{0}\n".format(pre)
result += "echo > {0}\n".format(kickfile)
for kick in self.kick():
result += "echo \"{0}\" >> {1}\n".format(kick, kickfile)
result += "echo \"%post --nochroot\" > {0}\n".format(postfile)
result += "echo \"set -x -v\" >> {0}\n".format(postfile)
result += ("echo \"exec 1>/mnt/sysimage/root/post-partition.log "
"2>&1\" >> {0}\n".format(postfile))
for post in self.post():
result += "echo \"{0}\" >> {1}\n".format(post, postfile)
result += "echo \"%end\" >> {0}\n".format(postfile)
return result
def eval(self):
for disk in self.iterdisks():
self.clean(disk)
self.gpt(disk)
self.bootable(disk)
self.boot()
self.notboot()
self.pvs()
self.lvs()
self.bootloader()
self.pre("sleep 10")
for disk in self.iterdisks():
self.pre("hdparm -z {0}".format(self._disk_dev(disk)))
self.erase_lvm_metadata()
self.erase_raid_metadata()
class PreseedPManager(object):
def __init__(self, pm_data):
if isinstance(pm_data, (str, unicode)):
self.pm_data = json.loads(pm_data)
else:
self.pm_data = pm_data
self.data = self.pm_data['ks_spaces']
self.kernel_params = self.pm_data['kernel_params']
self.validate()
self.factor = 1
self.unit = "MiB"
self.disks = sorted([self._disk_dev(d) for d in self.iterdisks()])
self.os_disk = self.os_disks()[0]
self._pcount = {}
self._pend = {}
self._recipe = []
self._late = []
self._early = []
def os_disks(self):
return [self._disk_dev(d) for d in self.iterdisks() if
filter(lambda x: x.get("vg") == "os" and
x.get("size") > 0, d["volumes"])]
def validate(self):
# os volume group can not be distributed over more than one disk.
# it is because we use plain partition for / and swap on ubuntu.
if len(self.os_disks()) > 1:
raise Exception("OS volume group must be located on one disk")
def _disk_dev(self, disk):
command = "$(readlink -f $( ("
command += " || ".join(["ls /dev/{0}".format(d)
for d in disk.get("extra", [])])
if disk["extra"]:
command += " || "
command += "ls /dev/{0}".format(disk["id"])
command += ") 2>/dev/null) )"
return command
def iterdisks(self):
for item in self.data:
if item["type"] == "disk" and item["size"] > 0:
yield item
def recipe(self, command=None):
if command:
return self._recipe.append(command)
return self._recipe
def late(self, command=None, in_target=False):
if command:
return self._late.append((command, in_target))
return self._late
def early(self, command=None):
if command:
return self._early.append(command)
return self._early
def _pseparator(self, devname):
pseparator = ''
if devname.find('cciss') >= 0:
pseparator = 'p'
return pseparator
def _getlabel(self, label):
if not label:
return ""
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return " -L {0} ".format(label[:12])
def _parttype(self, n):
return "primary"
def _fsoptions(self, fstype):
if fstype == "xfs":
return "-f -s size=512"
return ""
def _umount_target(self):
self.late("umount /target/dev")
self.late("umount /target/sys")
self.late("umount /target/proc")
self.late("umount /target/boot")
self.late("umount /target")
self.late("umount {0}{1}3".format(self.os_disk,
self._pseparator(self.os_disk)))
self.late("swapoff {0}{1}4".format(self.os_disk,
self._pseparator(self.os_disk)))
def _mount_target(self):
self.late("mount {0}{1}3 /target".format(self.os_disk,
self._pseparator(self.os_disk)))
self.late("mount {0}{1}2 /target/boot".format(self.os_disk,
self._pseparator(self.os_disk)))
self.late("mount -t proc none /target/proc")
self.late("mount -o bind /dev /target/dev")
self.late("mount -o bind /sys /target/sys")
self.late("swapon {0}{1}4".format(self.os_disk,
self._pseparator(self.os_disk)))
def _long_logger(self):
"""This method puts script which splits
long line and sends them to logger
#!/bin/sh
chunk=80
while read string; do
iterations=`expr ${#string} / $chunk + 1`; i=0;
while [ $i -le $(( iterations - 1)) ]; do
start_sym=$(( $i * ${chunk} + 1 ))
end_sym=$(( $(( $i + 1 )) * ${chunk}))
echo $string | cut -c ${start_sym}-${end_sym} | logger
i=$(( i + 1 )); done; done;
"""
return (
"""echo '#!/bin/sh' > /tmp/long_logger.sh;""",
"""echo 'chunk=80;' >> /tmp/long_logger.sh;""",
"""echo 'while read string; do' >> /tmp/long_logger.sh;""",
"""echo 'iterations=`expr ${#string} / $chunk + 1`; i=0;' >> /tmp/long_logger.sh;""",
"""echo 'while [ $i -le $(( iterations - 1)) ]; do' >> /tmp/long_logger.sh;""",
"""echo 'start_sym=$(( $i * ${chunk} + 1 ))' >> /tmp/long_logger.sh;""",
"""echo 'end_sym=$(( $(( $i + 1 )) * ${chunk}))' >> /tmp/long_logger.sh;""",
"""echo 'echo $string | cut -c ${start_sym}-${end_sym} | logger' >> /tmp/long_logger.sh;""",
"""echo 'i=$(( i + 1 )); done; done;' >> /tmp/long_logger.sh;""",
"""chmod +x /tmp/long_logger.sh;"""
)
def non_boot_partitions(self, volumes):
for part in filter(lambda p: p["type"] == "partition" and
p["mount"] != "/boot", volumes):
if part["size"] > 0:
yield part
def pcount(self, disk_id, increment=0):
self._pcount[disk_id] = self._pcount.get(disk_id, 0) + increment
return self._pcount.get(disk_id, 0)
def psize(self, disk_id, increment=0):
self._pend[disk_id] = self._pend.get(disk_id, 0) + increment
return self._pend.get(disk_id, 0)
def get_partition_count(self, name):
count = 0
for disk in self.iterdisks():
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count
def num_ceph_journals(self):
return self.get_partition_count('cephjournal')
def num_ceph_osds(self):
return self.get_partition_count('ceph')
def erase_partition_table(self):
for disk in self.iterdisks():
self.early("test -e {0} && "
"dd if=/dev/zero of={0} "
"bs=1M count=10".format(self._disk_dev(disk)))
self.early("sleep 10")
self.early("hdparm -z {0}".format(self._disk_dev(disk)))
self.early("parted -s {0} print free".format(self._disk_dev(disk)))
def log_lvm(self, line, early=True):
func = self.early
if not early:
func = self.late
func("echo \"=== {0} ===\"".format(line))
func("vgs -a --noheadings")
func("pvs --noheadings")
def erase_lvm_metadata(self, early=True):
func = self.early
if not early:
func = self.late
func("for v in $(vgs -a --noheadings 2>/dev/null | "
"sed 's/^\([ ]*\)\([^ ]\+\)\(.*\)/\\2/g'); do "
"vgreduce --force --removemissing $v; "
"vgremove --force $v; done")
func("for p in $(pvs --noheadings 2>/dev/null | "
"sed 's/^\([ ]*\)\([^ ]\+\)\(.*\)/\\2/g'); do "
"pvremove -ff -y $p; done")
def boot(self):
self.recipe("24 24 24 ext3 "
"$gptonly{ } "
"$bios_boot{ } "
"method{ biosgrub } .")
self.psize(self.os_disk, 24 * self.factor)
self.pcount(self.os_disk, 1)
self.late("parted -s $(readlink -f {0}) set {1} bios_grub on".format(
self.os_disk,
self.pcount(self.os_disk)
)
)
self.recipe("200 200 200 ext3 $primary{ } "
"$gptonly{ } "
"$bootable{ } method{ format } format{ } use_filesystem{ } "
"filesystem{ ext3 } mountpoint{ /boot } .")
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, 200 * self.factor)
def os(self):
for vg in [v for v in self.data
if v["type"] == "vg" and v["id"] == "os"]:
for vol in vg["volumes"]:
if vol["mount"] == "swap":
swap_size = vol["size"]
elif vol["mount"] == "/":
root_size = vol["size"]
self.recipe("{0} {0} {0} ext4 "
"$gptonly{{ }} "
"method{{ format }} format{{ }} use_filesystem{{ }} "
"filesystem{{ ext4 }} mountpoint{{ / }} ."
"".format(root_size))
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, root_size * self.factor)
self.recipe("{0} {0} {0} linux-swap "
"$gptonly{{ }} "
"method{{ swap }} format{{ }} .".format(swap_size))
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, swap_size * self.factor)
"""
We need this line because debian-installer takes total disk space
for the last partition. So to be able to allocate custom partitions
during the late stage we need to create fake swap partition that
we then destroy.
"""
self.recipe("1 1 -1 ext3 $gptonly{ } method{ keep } .")
self.late("parted -s {0} print free".format(self.os_disk))
self._umount_target()
self.late("parted {0} rm 5".format(self.os_disk))
self.late("sleep 10")
self.late("hdparm -z {0}".format(self.os_disk))
self.late("parted -s {0} print free".format(self.os_disk))
self.late("find /dev \( -type l -o -type b \) -exec ls -l {} \;")
self._mount_target()
def partitions(self):
ceph_osds = self.num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self.num_ceph_journals()
self._umount_target()
for disk in self.iterdisks():
for part in self.non_boot_partitions(disk["volumes"]):
if self.pcount(self._disk_dev(disk)) == 0:
self.late("parted -s {0} mklabel gpt"
"".format(self._disk_dev(disk)))
self.late("parted -a none -s {0} "
"unit {3} mkpart primary {1} {2}".format(
self._disk_dev(disk),
self.psize(self._disk_dev(disk)),
self.psize(self._disk_dev(disk),
24 * self.factor),
self.unit
)
)
self.late("parted -s {0} set {1} "
"bios_grub on".format(
self._disk_dev(disk),
self.pcount(self._disk_dev(disk), 1)
)
)
self.late("parted -s {0} print free".format(self._disk_dev(disk)))
if part.get('name') == 'cephjournal':
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on each device
ratio = math.ceil(float(ceph_osds) / ceph_journals)
# No more than 10GB will be allocated to a single journal partition
size = part["size"] / ratio
if size > 10240:
size = 10240
# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left
for i in range(0, end):
journals_left -= 1
pcount = self.pcount(self._disk_dev(disk), 1)
part["pcount"] = pcount
self.late("parted -a none -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk),
self._parttype(pcount),
self.psize(self._disk_dev(disk)),
self.psize(self._disk_dev(disk),
size * self.factor),
self.unit))
self.late("parted -s {0} print free".format(self._disk_dev(disk)))
continue
pcount = self.pcount(self._disk_dev(disk), 1)
part["pcount"] = pcount
tabmount = part["mount"] if part["mount"] != "swap" else "none"
self.late("parted -a none -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk),
self._parttype(pcount),
self.psize(self._disk_dev(disk)),
self.psize(self._disk_dev(disk),
part["size"] * self.factor),
self.unit))
self.late("sleep 10")
self.late("hdparm -z {0}"
"".format(self._disk_dev(disk)))
self.late("parted -s {0} print free".format(self._disk_dev(disk)))
self.late("find /dev \( -type l -o -type b \) -exec ls -l {} \;")
self.late("mount")
self.late("cat /proc/swaps")
self.late("cat /proc/mdstat")
self.late("cat /proc/partitions")
# clear any fs info that may remain on newly created partition
self.late("dd if=/dev/zero of={0}{1}{2} bs=1M count=10"
"".format(self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount))
if part.get("file_system", "xfs") not in ("swap", None, "none"):
disk_label = self._getlabel(part.get("disk_label"))
self.late("mkfs.{0} {1} {2}{3}{4} {5}"
"".format(part.get("file_system", "xfs"),
self._fsoptions(part.get("file_system", "xfs")),
self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount, disk_label))
self._mount_target()
# partition guids must be set in-target, which requires target to be mounted
for disk in self.iterdisks():
for part in self.non_boot_partitions(disk["volumes"]):
if part.get("partition_guid"):
self.late("sgdisk --typecode={0}:{1} {2}"
"".format(part["pcount"], part["partition_guid"],
self._disk_dev(disk)), True)
for disk in self.iterdisks():
for part in filter(lambda p: p["type"] == "partition" and
p["mount"] != "/boot" and p["size"] > 0 and
p.get('name') != 'cephjournal',
disk["volumes"]):
if not part["mount"] in (None, "none", "swap"):
self.late("mkdir -p /target{0}".format(part["mount"]))
if not part["mount"] in (None, "none"):
self.late("echo 'UUID=$(blkid -s UUID -o value "
"{0}{1}{2}) "
"{3} {4} {5} 0 0'"
" >> /target/etc/fstab"
"".format(
self._disk_dev(disk),
self._pseparator(disk["id"]),
part["pcount"], tabmount,
part.get("file_system", "xfs"),
("defaults" if part["mount"] != "swap"
else "sw" )))
def lv(self):
self.log_lvm("before creating lvm", False)
devices_dict = {}
pvlist = []
self._umount_target()
for disk in self.iterdisks():
self.late("parted -s {0} print free".format(self._disk_dev(disk)))
for pv in [p for p in disk["volumes"]
if p["type"] == "pv" and p["vg"] != "os"]:
if pv["size"] <= 0:
continue
if self.pcount(self._disk_dev(disk)) == 0:
self.late("parted -s {0} mklabel gpt"
"".format(self._disk_dev(disk)))
self.late("parted -a none -s {0} "
"unit {3} mkpart primary {1} {2}".format(
self._disk_dev(disk),
self.psize(self._disk_dev(disk)),
self.psize(self._disk_dev(disk),
24 * self.factor),
self.unit
)
)
self.late("parted -s {0} set {1} "
"bios_grub on".format(
self._disk_dev(disk),
self.pcount(self._disk_dev(disk), 1)))
self.late("parted -s {0} print free".format(self._disk_dev(disk)))
pcount = self.pcount(self._disk_dev(disk), 1)
begin_size = self.psize(self._disk_dev(disk))
end_size = self.psize(self._disk_dev(disk),
pv["size"] * self.factor)
self.late("parted -a none -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk),
self._parttype(pcount),
begin_size,
end_size,
self.unit))
self.late("sleep 10")
self.log_lvm("after creating partition", False)
self.erase_lvm_metadata(False)
self.late("hdparm -z {0}"
"".format(self._disk_dev(disk)))
self.late("parted -s {0} print free".format(self._disk_dev(disk)))
self.late("find /dev \( -type l -o -type b \) -exec ls -l {} \;")
self.late("mount")
self.late("cat /proc/swaps")
self.late("cat /proc/mdstat")
self.late("cat /proc/partitions")
pvlist.append("pvcreate -ff {0}{1}{2}"
"".format(self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount))
if not devices_dict.get(pv["vg"]):
devices_dict[pv["vg"]] = []
devices_dict[pv["vg"]].append(
"{0}{1}{2}"
"".format(self._disk_dev(disk),
self._pseparator(disk["id"]), pcount)
)
self.log_lvm("before additional cleaning", False)
self.erase_lvm_metadata(False)
self.log_lvm("before pvcreate", False)
for pvcommand in pvlist:
self.late(pvcommand)
self.log_lvm("before vgcreate", False)
for vg, devs in devices_dict.iteritems():
self.late("vgcreate -s 32m {0} {1}".format(vg, " ".join(devs)))
self.log_lvm("after vgcreate", False)
self._mount_target()
for vg in [v for v in self.data
if v["type"] == "vg" and v["id"] != "os"]:
for lv in vg["volumes"]:
if lv["size"] <= 0:
continue
self.late("lvcreate -L {0}m -n {1} {2}".format(
lv["size"], lv["name"], vg["id"]))
self.late("sleep 10")
self.late("lvscan")
tabmount = lv["mount"] if lv["mount"] != "swap" else "none"
if ((not lv.get("file_system", "xfs") in ("swap", None, "none"))
and (not lv["mount"] in ("swap", "/"))):
self.late("mkfs.{0} {1} /dev/mapper/{2}-{3}".format(
lv.get("file_system", "xfs"),
self._fsoptions(lv.get("file_system", "xfs")),
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--")))
if not lv["mount"] in (None, "none", "swap", "/"):
self.late("mkdir -p /target{0}".format(lv["mount"]))
if not lv["mount"] in (None, "none", "swap", "/"):
self.late("echo '/dev/mapper/{0}-{1} "
"{2} {3} {4} 0 0' >> /target/etc/fstab"
"".format(
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--"),
tabmount,
lv.get("file_system", "xfs"),
("defaults" if lv["mount"] != "swap"
else "sw" )))
def eval(self):
self.log_lvm("before early lvm cleaning")
self.erase_lvm_metadata()
self.log_lvm("after early lvm cleaning")
self.erase_partition_table()
self.boot()
self.os()
self.partitions()
self.erase_lvm_metadata()
self.lv()
self.late("apt-get install -y grub-pc", True)
self.late("sed -i "
"-e 's/.*GRUB_TERMINAL.*/GRUB_TERMINAL=console/g' "
"-e 's/.*GRUB_GFXMODE.*/#GRUB_GFXMODE=640x480/g' "
"-e 's/.*GRUB_CMDLINE_LINUX.*/"
"GRUB_CMDLINE_LINUX=\" {0} \"/g' /etc/default/grub".format(
self.kernel_params),
True)
self._umount_target()
self._mount_target()
self.late("grub-mkconfig", True)
self.late("grub-mkdevicemap", True)
for disk in self.iterdisks():
self.late("grub-install {0}"
"".format(self._disk_dev(disk)), True)
self.late("update-grub", True)
self.late("find /dev \( -type l -o -type b \) -exec ls -l {} \;")
def expose_recipe(self):
return " \\\n".join(self.recipe())
def expose_late(self, gzip=False):
result = ""
for line, in_target in self.late():
line_to_append = "{0}{1}".format(
("in-target " if in_target else ""), line)
result += ("echo '{0}' | /tmp/long_logger.sh;\\\n"
"".format(re.sub("'", "'\"'\"'", line_to_append)))
result += line_to_append + " 2>&1 | /tmp/long_logger.sh;\\\n"
return result.rstrip()
def expose_early(self):
result = ""
for line in self._long_logger():
result += "{0}\\\n".format(line)
for line in self.early():
line_to_append = "{0}".format(line)
result += ("echo '{0}' | /tmp/long_logger.sh;\\\n"
"".format(re.sub("'", "'\"'\"'", line_to_append)))
result += line_to_append + " 2>&1 | /tmp/long_logger.sh;\\\n"
return result.rstrip()
def expose_disks(self):
return self.os_disk
def pm(data):
pmanager = PManager(data)
pmanager.eval()
return pmanager.expose()
example = """
[
{
"name": "sda",
"free_space": 101772,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 12352,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 89548,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:06.0-scsi-0:0:0:0",
"size": 102400
},
{
"name": "sdb",
"free_space": 101772,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 101836,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:06.0-scsi-0:0:1:0",
"size": 102400
},
{
"min_size": 12288,
"type": "vg",
"id": "os",
"volumes": [
{
"mount": "/",
"type": "lv",
"name": "root",
"size": 10240
},
{
"mount": "swap",
"type": "lv",
"name": "swap",
"size": 2048
}
],
"label": "Base System"
},
{
"min_size": 5120,
"type": "vg",
"id": "image",
"volumes": [
{
"mount": "/var/lib/glance",
"type": "lv",
"name": "glance",
"size": 191256
}
],
"label": "Image Storage"
}
]
"""
# pmanager = PreseedPManager(example)
# pmanager.eval()
# print pmanager.expose_late()
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of ... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it und... | ```python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import xlsxwriter
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrder(orm.Model):
""" Model name: SaleOrder
"""
_inherit = 'sale.order'
def send_sale_order_email_check(self, cr, uid, context=None):
''' Generate email for check deadline status
'''
query = '''
SELECT name
FROM res_partner
WHERE
email_invoice_id is null and
email is null and
id IN (
SELECT distinct partner_id
FROM sale_order
WHERE
state not in ('cancel', 'draft', 'sent'));
'''
cr.execute(query)
partner_name = [item[0] for item in cr.fetchall()]
if not partner_name:
_logger.info('No email missed in partner with order found!')
return True
body = '<table>'
for name in partner_name:
body += '''<tr><td>%s</td></tr>''' % name
body += '</table>'
# ---------------------------------------------------------------------
# Send report:
# ---------------------------------------------------------------------
# Send mail with attachment:
group_pool = self.pool.get('res.groups')
model_pool = self.pool.get('ir.model.data')
thread_pool = self.pool.get('mail.thread')
group_id = model_pool.get_object_reference(
cr, uid,
'auto_order_nomail_check',
'group_order_email_report_admin')[1]
partner_ids = []
for user in group_pool.browse(
cr, uid, group_id, context=context).users:
partner_ids.append(user.partner_id.id)
thread_pool = self.pool.get('mail.thread')
thread_pool.message_post(cr, uid, False,
type='email',
body=body,
subject='%s: Partner senza mail per invio fattura: %s' % (
cr.dbname,
datetime.now().strftime(DEFAULT_SERVER_DATE_FORMAT),
),
partner_ids=[(6, 0, partner_ids)],
context=context,
)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __BEGIN_LICENSE__\n# Copyright (c) 2009-2013, United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration. All\n# rights reserved.\n#\n# The NGT platform ... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __BEGIN_LICENSE__\n# Copyright (c) 2009-2013, United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration. All\n# rights reserved.\n#\n# T... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
"""IrgSystemFunctions.py - General systems related utilities"""
import sys, os, re, shutil, subprocess, string, time, errno, multiprocessing
def isCmdOption(arg):
"""Returns True if the string is a command line option,
False otherwise (if it is an argument)"""
# An option must start with '-' and not consist of all numbers
if ( arg.startswith('-') and not re.match('^-[0-9.]+$', arg) ):
return True
else:
return False
# The following functions are useful for going between string and list
# representations of command line arguments
def isNotString(a):
"""Returns true if the object is not a string"""
return (not isinstance(a, basestring))
def argListToString(argList):
"""Converts a list of arguments into a single argument string"""
string = ""
for arg in argList:
stringVersion = str(arg)
# Wrap arguments with spaces in them in "" so they stay together
if stringVersion.find(' ') >= 0:
string = string + '"' + stringVersion + '" '
else:
string = string + stringVersion + ' '
return string
def stringToArgList(string):
"""Converts a single argument string into a list of arguments"""
return string.split(" ")
# TODO: Improve this function a bit
def executeCommand(cmd,
outputPath=None, # If given, throw if the file is not created. Don't run if it already exists.
suppressOutput=False, # If true, don't print anything!
force=False): # If true , run even if outputPath already exists.
'''Executes a command with multiple options'''
if cmd == '': # An empty task
return
# Convert the input to list format if needed
if not isNotString(cmd):
cmd = stringToArgList(cmd)
# Run the command if conditions are met
if force or (not outputPath) or (not os.path.exists(outputPath)):
if suppressOutput: # Process silently
FNULL = open(os.devnull, 'w')
subprocess.call(cmd, stdout=FNULL, stderr=subprocess.STDOUT)
else: # Display output
print cmd
subprocess.call(cmd)
# Optionally check that the output file was created
if outputPath and (not os.path.exists(outputPath)):
raise CmdRunException('Failed to create output file: ' + outputPath)
return True
#==================================================
# This class implements a variant of OptionParser which ignores unknown options.
from optparse import (OptionParser,BadOptionError,AmbiguousOptionError)
class PassThroughOptionParser(OptionParser):
# Overwrite the default implementation which deletes newlines
def format_epilog(self, formatter):
return self.epilog
def _process_args(self, largs, rargs, values):
while rargs:
try:
self._process_args2(largs,rargs,values)
except (BadOptionError,AmbiguousOptionError) as e: # On failure, pass option to output list
if sys.version_info < (2, 6, 0):
# Port to Python 2.4
p = re.match("^.*?no such option:\s*(.*?)$", e.msg)
if p:
largs.append(p.group(1))
else:
largs.append(e.opt_str)
# This version of the function successfully passes through negative numbers
def _process_args2(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
p = re.match('^-[0-9.]+$', arg) # Identify a numeric argument
if p:
del rargs[0]
raise BadOptionError(arg)
#self.error(_("%s unrecognized number in arguments") % arg)
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
OptionParser._process_long_opt(self, rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
OptionParser._process_short_opts(self, rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
``` |
[
{
"content": "Here is the snippet:\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2017, Wei Gao <gaowei3@qq.com>\n# Copyright: (c) 2018, Ansible Project\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2017, Wei Gao <gaowei3@qq.com>\n# Copyright: (c) 2018, Ansible Project\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Wei Gao <gaowei3@qq.com>
# Copyright: (c) 2018, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_facts
short_description: Gathers facts about remote ESXi hostsystem
description:
- This module can be used to gathers facts like CPU, memory, datastore, network and system etc. about ESXi host system.
- Please specify hostname or IP address of ESXi host system as C(hostname).
- If hostname or IP address of vCenter is provided as C(hostname) and C(esxi_hostname) is not specified, then the
module will throw an error.
- VSAN facts added in 2.7 version.
version_added: 2.5
author:
- Wei Gao (@woshihaoren)
requirements:
- python >= 2.6
- PyVmomi
options:
esxi_hostname:
description:
- ESXi hostname.
- Host facts about the specified ESXi server will be returned.
- By specifying this option, you can select which ESXi hostsystem is returned if connecting to a vCenter.
version_added: 2.8
show_tag:
description:
- Tags related to Host are shown if set to C(True).
default: False
type: bool
required: False
version_added: 2.9
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather vmware host facts
vmware_host_facts:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
register: host_facts
delegate_to: localhost
- name: Gather vmware host facts from vCenter
vmware_host_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: "{{ esxi_hostname }}"
register: host_facts
delegate_to: localhost
- name: Gather vmware host facts from vCenter with tag information
vmware_host_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: "{{ esxi_hostname }}"
show_tag: True
register: host_facts_tag
delegate_to: localhost
- name: Get VSAN Cluster UUID from host facts
vmware_host_facts:
hostname: "{{ esxi_server }}"
username: "{{ esxi_username }}"
password: "{{ esxi_password }}"
register: host_facts
- set_fact:
cluster_uuid: "{{ host_facts['ansible_facts']['vsan_cluster_uuid'] }}"
'''
RETURN = r'''
ansible_facts:
description: system info about the host machine
returned: always
type: dict
sample:
{
"ansible_all_ipv4_addresses": [
"10.76.33.200"
],
"ansible_bios_date": "2011-01-01T00:00:00+00:00",
"ansible_bios_version": "0.5.1",
"ansible_datastore": [
{
"free": "11.63 GB",
"name": "datastore1",
"total": "12.50 GB"
}
],
"ansible_distribution": "VMware ESXi",
"ansible_distribution_build": "4887370",
"ansible_distribution_version": "6.5.0",
"ansible_hostname": "10.76.33.100",
"ansible_interfaces": [
"vmk0"
],
"ansible_memfree_mb": 2702,
"ansible_memtotal_mb": 4095,
"ansible_os_type": "vmnix-x86",
"ansible_processor": "Intel Xeon E312xx (Sandy Bridge)",
"ansible_processor_cores": 2,
"ansible_processor_count": 2,
"ansible_processor_vcpus": 2,
"ansible_product_name": "KVM",
"ansible_product_serial": "NA",
"ansible_system_vendor": "Red Hat",
"ansible_vmk0": {
"device": "vmk0",
"ipv4": {
"address": "10.76.33.100",
"netmask": "255.255.255.0"
},
"macaddress": "52:54:00:56:7d:59",
"mtu": 1500
},
"vsan_cluster_uuid": null,
"vsan_node_uuid": null,
"vsan_health": "unknown",
"tags": [
{
"category_id": "urn:vmomi:InventoryServiceCategory:8eb81431-b20d-49f5-af7b-126853aa1189:GLOBAL",
"category_name": "host_category_0001",
"description": "",
"id": "urn:vmomi:InventoryServiceTag:e9398232-46fd-461a-bf84-06128e182a4a:GLOBAL",
"name": "host_tag_0001"
}
],
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.formatters import bytes_to_human
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.vmware_rest_client import VmwareRestClient
try:
from com.vmware.vapi.std_client import DynamicID
except ImportError:
pass
class VMwareHostFactManager(PyVmomi):
def __init__(self, module):
super(VMwareHostFactManager, self).__init__(module)
esxi_host_name = self.params.get('esxi_hostname', None)
if self.is_vcenter():
if esxi_host_name is None:
self.module.fail_json(msg="Connected to a vCenter system without specifying esxi_hostname")
self.host = self.get_all_host_objs(esxi_host_name=esxi_host_name)
if len(self.host) > 1:
self.module.fail_json(msg="esxi_hostname matched multiple hosts")
self.host = self.host[0]
else:
self.host = find_obj(self.content, [vim.HostSystem], None)
if self.host is None:
self.module.fail_json(msg="Failed to find host system.")
def all_facts(self):
ansible_facts = {}
ansible_facts.update(self.get_cpu_facts())
ansible_facts.update(self.get_memory_facts())
ansible_facts.update(self.get_datastore_facts())
ansible_facts.update(self.get_network_facts())
ansible_facts.update(self.get_system_facts())
ansible_facts.update(self.get_vsan_facts())
ansible_facts.update(self.get_cluster_facts())
if self.params.get('show_tag'):
ansible_facts.update(self.get_tag_facts())
self.module.exit_json(changed=False, ansible_facts=ansible_facts)
def get_cluster_facts(self):
cluster_facts = {'cluster': None}
if self.host.parent and isinstance(self.host.parent, vim.ClusterComputeResource):
cluster_facts.update(cluster=self.host.parent.name)
return cluster_facts
def get_vsan_facts(self):
config_mgr = self.host.configManager.vsanSystem
if config_mgr is None:
return {
'vsan_cluster_uuid': None,
'vsan_node_uuid': None,
'vsan_health': "unknown",
}
status = config_mgr.QueryHostStatus()
return {
'vsan_cluster_uuid': status.uuid,
'vsan_node_uuid': status.nodeUuid,
'vsan_health': status.health,
}
def get_cpu_facts(self):
return {
'ansible_processor': self.host.summary.hardware.cpuModel,
'ansible_processor_cores': self.host.summary.hardware.numCpuCores,
'ansible_processor_count': self.host.summary.hardware.numCpuPkgs,
'ansible_processor_vcpus': self.host.summary.hardware.numCpuThreads,
}
def get_memory_facts(self):
return {
'ansible_memfree_mb': self.host.hardware.memorySize // 1024 // 1024 - self.host.summary.quickStats.overallMemoryUsage,
'ansible_memtotal_mb': self.host.hardware.memorySize // 1024 // 1024,
}
def get_datastore_facts(self):
facts = dict()
facts['ansible_datastore'] = []
for store in self.host.datastore:
_tmp = {
'name': store.summary.name,
'total': bytes_to_human(store.summary.capacity),
'free': bytes_to_human(store.summary.freeSpace),
}
facts['ansible_datastore'].append(_tmp)
return facts
def get_network_facts(self):
facts = dict()
facts['ansible_interfaces'] = []
facts['ansible_all_ipv4_addresses'] = []
for nic in self.host.config.network.vnic:
device = nic.device
facts['ansible_interfaces'].append(device)
facts['ansible_all_ipv4_addresses'].append(nic.spec.ip.ipAddress)
_tmp = {
'device': device,
'ipv4': {
'address': nic.spec.ip.ipAddress,
'netmask': nic.spec.ip.subnetMask,
},
'macaddress': nic.spec.mac,
'mtu': nic.spec.mtu,
}
facts['ansible_' + device] = _tmp
return facts
def get_system_facts(self):
sn = 'NA'
for info in self.host.hardware.systemInfo.otherIdentifyingInfo:
if info.identifierType.key == 'ServiceTag':
sn = info.identifierValue
facts = {
'ansible_distribution': self.host.config.product.name,
'ansible_distribution_version': self.host.config.product.version,
'ansible_distribution_build': self.host.config.product.build,
'ansible_os_type': self.host.config.product.osType,
'ansible_system_vendor': self.host.hardware.systemInfo.vendor,
'ansible_hostname': self.host.summary.config.name,
'ansible_product_name': self.host.hardware.systemInfo.model,
'ansible_product_serial': sn,
'ansible_bios_date': self.host.hardware.biosInfo.releaseDate,
'ansible_bios_version': self.host.hardware.biosInfo.biosVersion,
}
return facts
def get_tag_facts(self):
vmware_client = VmwareRestClient(self.module)
host_dynamic_obj = DynamicID(type='HostSystem', id=self.host._moId)
self.tag_service = vmware_client.api_client.tagging.Tag
self.tag_association_svc = vmware_client.api_client.tagging.TagAssociation
self.category_service = vmware_client.api_client.tagging.Category
facts = {
'tags': self.get_tags_for_object(host_dynamic_obj)
}
return facts
def get_tags_for_object(self, dobj):
"""
Return tags associated with an object
Args:
dobj: Dynamic object
Returns: List of tags associated with the given object
"""
tag_ids = self.tag_association_svc.list_attached_tags(dobj)
tags = []
for tag_id in tag_ids:
tag_obj = self.tag_service.get(tag_id)
tags.append({
'id': tag_obj.id,
'category_name': self.category_service.get(tag_obj.category_id).name,
'name': tag_obj.name,
'description': tag_obj.description,
'category_id': tag_obj.category_id,
})
return tags
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
esxi_hostname=dict(type='str', required=False),
show_tag=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
vm_host_manager = VMwareHostFactManager(module)
vm_host_manager.all_facts()
if __name__ == '__main__':
main()
``` |
[
{
"content": "```python\nimport os\nimport sys\nimport copy\nimport logging\n\nfrom checker import *\nfrom .ofp import register_ofp_creators\nfrom .ofp import OfpBase\nfrom .ofp_bucket import SCE_BUCKETS\nfrom .ofp_bucket import OfpBucketCreator\n\n# YAML:\n# group_mod:\n# command: 0\n# type: 0\n# group_i... | [
{
"content": "<|memory_start|>```python\nimport os\nimport sys\nimport copy\nimport logging\n\nfrom checker import *\nfrom .ofp import register_ofp_creators\nfrom .ofp import OfpBase\nfrom .ofp_bucket import SCE_BUCKETS\nfrom .ofp_bucket import OfpBucketCreator\n\n# YAML:\n# group_mod:\n# command: 0\n# type... | ```python
import os
import sys
import copy
import logging
from checker import *
from .ofp import register_ofp_creators
from .ofp import OfpBase
from .ofp_bucket import SCE_BUCKETS
from .ofp_bucket import OfpBucketCreator
# YAML:
# group_mod:
# command: 0
# type: 0
# group_id: 0
# buckets:
# - bucket:
# weight: 0
# watch_port: 0
# watch_group: 0
# actions
# - output:
# port: 0
SCE_GROUP_MOD = "group_mod"
@register_ofp_creators(SCE_GROUP_MOD)
class OfpGroupModCreator(OfpBase):
@classmethod
def create(cls, test_case_obj, dp, ofproto, ofp_parser, params):
# GroupMod.
kws = copy.deepcopy(params)
# buckets.
buckets = []
if SCE_BUCKETS in params:
buckets = OfpBucketCreator.create(test_case_obj,
dp, ofproto,
ofp_parser,
params[SCE_BUCKETS])
kws[SCE_BUCKETS] = buckets
# create GroupMod.
msg = ofp_parser.OFPGroupMod(dp, **kws)
return msg
``` |
[
{
"content": "Here is the snippet:\n```python\n\"\"\"Representation of Analysis.\"\"\"\nimport logging\n\nimport requests\nfrom cosmosid.api.files import Runs\nfrom cosmosid.helpers.exceptions import (AuthenticationFailed,\n CosmosidException,\n ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n\"\"\"Representation of Analysis.\"\"\"\nimport logging\n\nimport requests\nfrom cosmosid.api.files import Runs\nfrom cosmosid.helpers.exceptions import (AuthenticationFailed,\n CosmosidException,\n ... | ```python
"""Representation of Analysis."""
import logging
import requests
from cosmosid.api.files import Runs
from cosmosid.helpers.exceptions import (AuthenticationFailed,
CosmosidException,
NotFoundException)
LOGGER = logging.getLogger(__name__)
class Analysis(object):
"""Runs analysis interface."""
__resource_path = '/api/metagenid/v1/runs/{run_id}/analysis'
def __init__(self, base_url=None, api_key=None):
self.base_url = base_url
self.logger = LOGGER
self.header = {'X-Api-Key': api_key}
self.request_url = "{}{}".format(self.base_url, self.__resource_path)
self.runs = Runs(base_url=self.base_url,
api_key=self.header['X-Api-Key'])
def __is_runid_in_file(self, run_id, file_id):
"""Get given run meta and check is the run in sample."""
single_run = self.runs.get_single_run(run_id)
if single_run:
if single_run['status']:
if single_run['file']['id'] == file_id:
return True
return False
def __get_analysis_by_file_id(self, file_id):
last_run = self.runs.get_last_run_for_file(file_id)
result_data = None
if last_run:
result_data = self.__get_analysis_by_run_id(last_run['id'])
return result_data
def __get_analysis_by_run_id(self, run_id):
request_url = self.request_url.format(run_id=run_id)
try:
single_run_meta = self.runs.get_single_run(run_id)
if not single_run_meta:
raise CosmosidException('Response from service is empty for '
'run id %s' % run_id)
if not single_run_meta['status']:
raise NotFoundException(single_run_meta['message'])
results = requests.get(request_url, headers=self.header)
if results.status_code == 403:
raise AuthenticationFailed('Authentication Failed. '
'Wrong API Key.')
if results.status_code == 404:
result_data = results.json()
result_data.update({'status': 0})
result_data.update({'run_meta': single_run_meta})
return result_data
if requests.codes.ok:
result_data = results.json()
result_data.update({'status': 1})
result_data.update({'run_meta': single_run_meta})
return result_data
results.raise_for_status()
except AuthenticationFailed:
self.logger.error('Authentication Failed')
except NotFoundException:
self.logger.error('Not Found')
except CosmosidException:
self.logger.error('Got Analysis data exception.')
except requests.exceptions.RequestException:
self.logger.debug('Debug', exc_info=True)
self.logger.error('Error occured during request')
self.logger.error('Response Status Code: %s', results.status_code)
def get_list(self, file_id=None, run_id=None):
"""Get analysis data.
cli analysis --id ID
"""
if file_id and run_id:
if self.__is_runid_in_file(run_id, file_id):
return self.__get_analysis_by_run_id(run_id)
msg = 'File %s does not contain Run %s' % (self.file_id,
self.run_id)
return {'status': 0,
'message': msg}
elif run_id and not file_id:
return self.__get_analysis_by_run_id(run_id)
elif file_id and not run_id:
return self.__get_analysis_by_file_id(file_id)
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n#!/usr/bin/env python\n\nimport sys, re\n\n# Packet class\nclass Packet(object):\n\n def __init__(self):\n # These data types are taken directly from the APRS spec at http://aprs.org/doc/APRS101.PDF\n # This is not an exhaustive list. Th... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport sys, re\n\n# Packet class\nclass Packet(object):\n\n def __init__(self):\n # These data types are taken directly from the APRS spec at http://aprs.org/doc/APRS101.PDF\n # This is not an exh... | ```python
#!/usr/bin/env python
import sys, re
# Packet class
class Packet(object):
def __init__(self):
# These data types are taken directly from the APRS spec at http://aprs.org/doc/APRS101.PDF
# This is not an exhaustive list. These are the most common ones, and were added during
# testing.
self._data_type_list = {'!' : 'Position without timestamp',
'_' : 'Weather Report (without position)',
'@' : 'Position with timestamp (with APRS messaging)',
'/' : 'Position with timestamp (no APRS messaging)',
'=' : 'Position without timestamp (with APRS messaging)',
'T' : 'Telemetry data',
';' : 'Object',
'<' : 'Station Capabilities',
'>' : 'Status',
'`' : 'Current Mic-E Data (not used in TM-D700)',
'?' : 'Query',
'\'' : 'Old Mic-E Data (but Current data for TM-D700)',
':' : 'Message',
'$' : 'Raw GPS data or Ultimeter 2000',
}
self._date_type_list = {'z' : 'D/H/M format, zulu time',
'/' : 'D/H/M format, local time',
'h' : 'H/M/S format, zulu time'
}
# Raw packet
self._packet = None
# Station the packet originated from
self._source = None
# Destination of the packet
self._destination = None
# Packet path
self._path = None
# Information field
self._information = None
# Data type identifier
self._data_type = None
# Latitude
self._latitude = None
# Longitude
self._longitude = None
# Symbol
self._symbol = None
# Comment
self._comment = None
# PHG (Power-Height-Gain)
self._phg = None
# Data extension
self._data_extension = None
# Altitude
self._altitude = None
# Date
self._date = None
# Date type
self._date_type = None
# Month
self._month = None
# Day
self._day = None
# Hour
self._hour = None
# Minute
self._minute = None
# Second
self._second = None
# Parsed, read-only values of the above, populated by parse()
self._parsed_source = None
self._parsed_destination = None
self._parsed_path = None
self._parsed_information = None
# Internal class variables
# X1J flag
self._x1j = False
# packet
@property
def packet(self):
return self._packet
@packet.setter
def packet(self, value):
self._packet = value
self._parse()
# source
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
self._build()
# destination
@property
def destination(self):
return self._destination
@destination.setter
def destination(self, value):
self._destination = value
self._build()
# Path
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
self._build()
# Information field
@property
def information(self):
return self._information
@information.setter
def information(self, value):
self._information = value
self._build()
# Data type (usually first character of the Information field - not always)
@property
def data_type(self):
return self._data_type
@data_type.setter
def data_type(self, value):
self._data_type = value
self._build()
# Latitude
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
self._build()
# Longitude
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
self._build()
# Symbol
@property
def symbol(self):
return self._symbol
@symbol.setter
def symbol(self, value):
self._symbol = value
self._build()
# Comment (at the end of the Information field in status packets)
@property
def comment(self):
return self._comment
@comment.setter
def comment(self, value):
self._comment = value
self._build()
# Data extension (PHG, course/speed, radio range, etc.)
@property
def data_extension(self):
return self._data_extension
@data_extension.setter
def data_extension(self, value):
self._data_extension = value
self._build()
# Altitude
@property
def altitude(self):
return self._altitude
@altitude.setter
def altitude(self, value):
self._altitude = value
self._build()
# Power-Height-Gain
@property
def phg(self):
return self._phg
@phg.setter
def phg(self, value):
self._phg = value
self._build()
# Raw date
@property
def date(self):
return self._date
@date.setter
def date(self, value):
self._date = value
self._build()
# Date type
@property
def date_type(self):
return self._date_type
@date_type.setter
def date_type(self, value):
self._date_type = value
self._build()
# Month
@property
def month(self):
return self._month
@month.setter
def month(self, value):
self._month = value
self._build()
# Day
@property
def day(self):
return self._day
@day.setter
def day(self, value):
self._day = value
self._build()
# Hour
@property
def hour(self):
return self._hour
@hour.setter
def hour(self, value):
self._hour = value
self._build()
# Minute
@property
def minute(self):
return self._minute
@minute.setter
def minute(self, value):
self._minute = value
self._build()
# Second
@property
def second(self):
return self._second
@second.setter
def second(self, value):
self._second = value
self._build()
# Read-only attributes
# Friendly name for the data type
@property
def data_type_name(self):
return self._data_type_list.get(self._data_type)
# Friendly name for the date type
@property
def date_type_name(self):
return self._date_type_list.get(self._date_type)
# reset packet
def _reset(self):
self._source = self._parsed_source
self._destination = self._parsed_destination
self._path = self._parsed_path
self._information = self._parsed_information
self._parse()
# parse information
def _parse_information(self):
# Get the data type
first_char = self._information[0]
# Look to see if it is a valid data type.
if first_char in self._data_type_list:
# Assign it to _data_type
self._data_type = first_char
else:
# No valid data type found so far. However, the spec allows '!' (and
# *only* '!' to appear anywhere in the first 40 characters of the
# information field
if re.search(r"!", data[0:40]):
self._data_type = "!"
# Set the X1J flag to assist with parsing
self._x1j = True
else:
# Since we don't know the data type, we can't parse the information
# field any further
return
# Parse the information field
if self._data_type in [ '!', '=' ]:
# position reports - without timestamps (!, =)
# Check if the
(self._latitude, symbol_table, self._longitude, symbol_code, comment) = re.search(r"^[\!\=]([\d\s\.]+[NS])(\S)([\d\s\.]+[EW])(\S)(.*)$", self._information).groups()
# Join the two symbol characters together
self._symbol = symbol_table + symbol_code
elif self._data_type in [ '/', '@' ]:
# position reports - with timestamps (/, @)
(self._date, self._date_type, self._latitude, symbol_table, self._longitude, symbol_code, comment) = re.search(r"^[\/\@](\d{6})([zh\/])([\d\s\.]+[NS])(\S)([\d\s\.]+[EW])(\S)(.*)$", self._information).groups()
if self._date_type in [ "z", "/" ]:
self._day = self._date[0:2]
self._hour = self._date[2:2]
self._minute = self._date[4:2]
elif self._date_type == "/":
self._hour = self._date[0:2]
self._minute = self._date[2:2]
self._seconds = self._date[4:2]
# parse
def _parse(self):
# Split packet into segments
print "Packet: " + self._packet
packet_segments = re.search(r"([\w\-]+)>([\w\-]+),([\w\-\*\,]+):(.*)$", self._packet)
# Assign segments to variables
(self._source, self._destination, self._path, self._information) = packet_segments.groups()
# Set the read-only parse time versions of the above
(self._parsed_source, self._parsed_destination, self._parsed_path, self._parsed_information) = packet_segments.groups()
self._parse_information()
# build information
def _build_information(self):
pass
# build
def _build(self):
if self._source is not None and self._destination is not None and self._path is not None and self._information is not None:
packet = self._source + ">" + self._destination + "," + self._path + ":" + self._information
self._packet = packet
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n# UrbanFootprint v1.5\n# Copyright (C) 2017 Calthorpe Analytics\n#\n# This file is part of UrbanFootprint version 1.5\n#\n# UrbanFootprint is distributed under the terms of the GNU General\n# Public License version 3, as publish... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# UrbanFootprint v1.5\n# Copyright (C) 2017 Calthorpe Analytics\n#\n# This file is part of UrbanFootprint version 1.5\n#\n# UrbanFootprint is distributed under the terms of the GNU General\n# Public License versi... | ```python
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
# from memory_profiler import profile
import logging
from footprint.client.configuration import resolve_fixture
from footprint.main.lib.functions import map_to_dict
from footprint.main.models.config.global_config import GlobalConfig
from footprint.main.models.config.region import Region
from footprint.main.models.config.db_entity_interest import DbEntityInterest
from footprint.main.models.presentation.result_library import ResultLibrary
from footprint.main.models.presentation.result.result import Result
logger = logging.getLogger(__name__)
__author__ = 'calthorpe_analytics'
def on_config_entity_post_save_behevior(sender, **kwargs):
"""
Sync a ConfigEntity's Behaviors
"""
# TODO This is obviously not fully implemented
raise Exception("Not implemented")
config_entity = kwargs['instance']
logger.info("Handler: on_config_entity_post_save_behvior. ConfigEntity: %s" % config_entity.name)
update_or_create_behaviors(config_entity)
def update_or_create_behaviors(config_entity, **kwargs):
"""
Creates Behaviors when saving a config_entity if they do not yet exist.
:param config_entity
:return:
"""
# Just process Regions and GlobalConfig
if not isinstance(config_entity, GlobalConfig, Region):
return
from footprint.client.configuration.fixture import BehaviorFixture
client_behavior_fixture = resolve_fixture(
"behavior",
"behavior",
BehaviorFixture,
config_entity.schema(),
config_entity=config_entity)
# Create each ResultLibrary and store them as a dict keyed by their key
result_library_lookup = map_to_dict(lambda result_library_config: [
result_library_config.key,
ResultLibrary.objects.update_or_create(
key=result_library_config.key,
config_entity=config_entity,
scope=config_entity.schema(),
defaults=dict(
name=result_library_config.name.format(config_entity.name),
description=result_library_config.description.format(config_entity.name)
)
)[0]],
client_result.result_libraries())
#for key, result_library in result_library_lookup.items():
# result_library.results.all().delete()
# Create each configured Result
for result_config in filter(lambda result:
not db_entity_keys or
result.result_db_entity_key in db_entity_keys or
result.source_db_entity_key in db_entity_keys,
client_result.results()):
logger.info("Result Publishing Result DbEntity Key: %s" % result_config.result_db_entity_key)
# Create the db_entity and db_entity_interest for the result
db_entity = result_config.update_or_create_db_entity(config_entity)
# Make the db_entity the default selected one for its key
previous = config_entity._no_post_save_publishing
config_entity._no_post_save_publishing = True
config_entity.save()
config_entity._no_post_save_publishing = previous
# Test the query
db_entity.parse_query(config_entity)
db_entity_interest = DbEntityInterest.objects.get(
config_entity=config_entity,
db_entity__key=result_config.result_db_entity_key
)
# Create a result for each result key given.
result, created, updated = Result.objects.update_or_create(
db_enitty_interest=db_entity_interest,
defaults=dict(
# Use the Result's custom Medium, keyed by the Result key
medium=result_config.resolve_result_medium(),
configuration=result_config.get_presentation_medium_configuration())
)
# If created, add the result to the matching result library
if created:
result_library_lookup[result_config.result_library_key].presentation_media.add(result)
# Remove orphan results and their DbEntityInterests/DbEntities
result_library_ids = map(lambda result_library: result_library.id, ResultLibrary.objects.filter(config_entity=config_entity))
valid_result_keys = map(lambda result_config: result_config.result_db_entity_key, client_result.results())
orphan_results = Result.objects.filter(presentation__id__in=result_library_ids).exclude(db_entity_key__in=valid_result_keys)
DbEntityInterest.objects.filter(config_entity=config_entity, db_entity__key__in=map(lambda result: result.db_entity_key, orphan_results)).delete()
orphan_results.delete()
def on_config_entity_pre_delete_result(sender, **kwargs):
"""
"""
config_entity = kwargs['instance']
``` |
[
{
"content": "Here is the snippet:\n```python\n#! /usr/bin/env python\r\n# -*- coding: iso-8859-1 -*-\r\n\r\nfrom downloadCommon import DownloadCommon, getSeqName\r\nfrom DdlCommonInterface import DdlCommonInterface\r\nimport re\r\n\r\nclass PgDownloader(DownloadCommon):\r\n \"\"\" Silly me, I didn't know ab... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#! /usr/bin/env python\r\n# -*- coding: iso-8859-1 -*-\r\n\r\nfrom downloadCommon import DownloadCommon, getSeqName\r\nfrom DdlCommonInterface import DdlCommonInterface\r\nimport re\r\n\r\nclass PgDownloader(DownloadCommon):\r\n \"\"\" Silly me, ... | ```python
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
from downloadCommon import DownloadCommon, getSeqName
from DdlCommonInterface import DdlCommonInterface
import re
class PgDownloader(DownloadCommon):
""" Silly me, I didn't know about INFORMATION_SCHEMA """
def __init__(self):
self.strDbms = 'postgres'
def connect(self, info):
try:
import psycopg
except:
print "Missing PostgreSQL support through psycopg"
return
self.conn = psycopg.connect('host=%(host)s dbname=%(dbname)s user=%(user)s password=%(pass)s' % info)
self.cursor = self.conn.cursor()
#self.doSomeTests()
def useConnection(self, con, version):
self.conn = con
self.cursor = self.conn.cursor()
def doSomeTests(self):
sql = "select tablename from pg_tables where tablename in %s"
inList = (('sample', 'companies', 'table1'), )
self.cursor.execute(sql, inList)
print self.cursor.fetchall()
sql = "select tablename from pg_tables where tablename = %(tbl)s"
inDict = { 'tbl' : 'sample' }
self.cursor.execute(sql, inDict)
print self.cursor.fetchall()
sys.exit(-1)
def getTablesStandard(self, tableList):
""" Returns the list of tables as a array of strings """
strQuery = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA not in ('pg_catalog', 'information_schema') and TABLE_NAME NOT LIKE 'pg_%' AND TABLE_TYPE = 'BASE TABLE'"
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
if rows:
return self._confirmReturns([x[0] for x in rows], tableList)
return []
def getTables(self, tableList):
""" Returns the list of tables as a array of strings """
self.cursor.execute("select tablename from pg_tables where schemaname not in ('pg_catalog', 'information_schema')")
return self._confirmReturns([x[0] for x in self.cursor.fetchall() ], tableList)
def getTableColumnsStandard(self, strTable):
""" Returns column in this format
(nColIndex, strColumnName, strColType, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, bNotNull, strDefault, auto_increment)
"""
strSql = """
SELECT ORDINAL_POSITION, COLUMN_NAME, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, NUMERIC_PRECISION_RADIX, NUMERIC_SCALE, IS_NULLABLE, COLUMN_DEFAULT
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = %s
ORDER BY ORDINAL_POSITION"""
self.cursor.execute(strSql, [strTable])
rows = self.cursor.fetchall()
ret = []
for row in rows:
attnum, name, type, size, numsize, numprecradix, numprec, attnotnull, default = row
type = self._fixTypeNames(type)
if not size and numprecradix == 10:
size = numsize
if attnotnull.lower() == "yes":
attnotnull = False
else:
attnotnull = True
if default:
# remove the '::text stuff
default = default.replace('::text', '')
bAutoIncrement = False
if default == "nextval('%s')" % (getSeqName(strTable, name)):
default = ''
bAutoIncrement = True
ret.append((name, type, size, numprec, attnotnull, default, bAutoIncrement))
return ret
def getTableColumns(self, strTable):
""" Returns column in this format
(strColumnName, strColType, nColSize, nColPrecision, bNotNull, strDefault, bAutoIncrement)
"""
strSql = """
SELECT pa.attnum, pa.attname, pt.typname, pa.atttypmod, pa.attnotnull, pa.atthasdef, pc.oid
FROM pg_attribute pa, pg_type pt, pg_class pc
WHERE pa.atttypid = pt.oid
AND pa.attrelid = pc.oid
AND pa.attisdropped = 'f'
AND pc.relname = %s
AND pc.relkind = 'r'
ORDER BY attnum"""
self.cursor.execute(strSql, [strTable])
rows = self.cursor.fetchall()
specialCols = ['cmax', 'cmin', 'xmax', 'xmin', 'oid', 'ctid', 'tableoid']
ret = []
for row in rows:
attnum, name, type, attlen, attnotnull, atthasdef, clasoid = row
if name not in specialCols:
type = self._fixTypeNames(type)
attlen, precision = self.decodeLength(type, attlen)
default = None
bAutoIncrement = False
if atthasdef:
default = self.getColumnDefault(clasoid, attnum)
if default == "nextval('%s')" % (getSeqName(strTable, name)):
default = ''
bAutoIncrement = True
ret.append((name, type, attlen, precision, attnotnull, default, bAutoIncrement))
return ret
def _fixTypeNames(self, type):
fixNames = {
'int4' : 'integer',
'int' : 'integer',
'bool' : 'boolean',
'float8' : 'double precision',
'int8' : 'bigint',
'serial8' : 'bigserial',
'serial4' : 'serial',
'float4' : 'real',
'int2' : 'smallint',
'character varying' : 'varchar',
}
if type in fixNames:
return fixNames[type]
return type
def decodeLength(self, type, atttypmod):
# gleamed from http://www.postgresql-websource.com/psql713/source-format_type.htm
VARHDRSZ = 4
if type == 'varchar':
return (atttypmod - VARHDRSZ, None)
if type == 'numeric':
atttypmod -= VARHDRSZ
return ( (atttypmod >> 16) & 0xffff, atttypmod & 0xffff)
if type == 'varbit' or type == 'bit':
return (atttypmod, None)
return (None, None)
def getColumnDefault(self, clasoid, attnum):
""" Returns the default value for a comment or None """
strSql = "SELECT adsrc FROM pg_attrdef WHERE adrelid = %s AND adnum = %s"
self.cursor.execute(strSql, [clasoid, attnum])
rows = self.cursor.fetchall()
if not rows:
return None
strDefault = rows[0][0]
strDefault = strDefault.replace('::text', '')
return strDefault
def getTableComment(self, strTableName):
""" Returns the comment as a string """
strSql = """SELECT description FROM pg_description pd, pg_class pc
WHERE pc.relname = %s AND pc.relkind = 'r' AND pd.objoid = pc.oid AND pd.objsubid = 0"""
self.cursor.execute(strSql, [strTableName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getColumnCommentStandard(self, strTableName, strColumnName):
""" Returns the comment as a string """
strSql = """SELECT description FROM pg_description pd, pg_class pc, pg_attribute pa
WHERE pc.relname = %s AND pc.relkind = 'r'
AND pd.objoid = pc.oid AND pd.objsubid = pa.attnum AND pa.attname = %s AND pa.attrelid = pc.oid"""
self.cursor.execute(strSql, [strTableName, strColumnName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getTableIndexes(self, strTableName):
""" Returns
(strIndexName, [strColumns,], bIsUnique, bIsPrimary, bIsClustered)
or []
"""
strSql = """SELECT pc.relname, pi.indkey, indisunique, indisprimary, indisclustered
FROM pg_index pi, pg_class pc, pg_class pc2
WHERE pc2.relname = %s
AND pc2.relkind = 'r'
AND pc2.oid = pi.indrelid
AND pc.oid = pi.indexrelid
"""
self.cursor.execute(strSql, [strTableName])
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
for row in rows:
(strIndexName, strColumns, bIsUnique, bIsPrimary, bIsClustered) = row
colList = self._fetchTableColumnsNamesByNums(strTableName, strColumns.split())
ret.append((strIndexName, colList, bIsUnique, bIsPrimary, bIsClustered))
return ret
def getTableRelations(self, strTableName):
""" Returns
(strConstraintName, colName, fk_table, fk_columns, confupdtype, confdeltype)
or []
"""
strSql = """SELECT pcon.conname, pcon.conkey, pcla2.relname, pcon.confkey, pcon.confupdtype, pcon.confdeltype
FROM pg_constraint pcon, pg_class pcla, pg_class pcla2
WHERE pcla.relname = %s
AND pcla.relkind = 'r'
AND pcon.conrelid = pcla.oid
AND pcon.confrelid = pcla2.oid
AND pcon.contype = 'f'
"""
self.cursor.execute(strSql, [strTableName])
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
for row in rows:
(strConstraintName, cols, fk_table, fkeys, chUpdateType, chDelType) = row
cols = cols[1:-1]
colList = self._fetchTableColumnsNamesByNums(strTableName, cols.split(','))
fkeys = fkeys[1:-1]
fkColList = self._fetchTableColumnsNamesByNums(fk_table, fkeys.split(','))
ret.append((strConstraintName, colList, fk_table, fkColList, chUpdateType, chDelType))
return ret
def _fetchTableColumnsNamesByNums(self, strTableName, nums):
ret = []
for num in nums:
strSql = """
SELECT pa.attname
FROM pg_attribute pa, pg_class pc
WHERE pa.attrelid = pc.oid
AND pa.attisdropped = 'f'
AND pc.relname = %s
AND pc.relkind = 'r'
AND pa.attnum = %s
ORDER BY pa.attnum
"""
self.cursor.execute(strSql, [strTableName] + [num])
rows = self.cursor.fetchall()
ret.append(rows[0][0])
return ret
def _decodeLength(self, type, atttypmod):
# gleamed from http://www.postgresql-websource.com/psql713/source-format_type.htm
VARHDRSZ = 4
if type == 'varchar':
return (atttypmod - VARHDRSZ, None)
if type == 'numeric':
atttypmod -= VARHDRSZ
return ( (atttypmod >> 16) & 0xffff, atttypmod & 0xffff)
if type == 'varbit' or type == 'bit':
return (atttypmod, None)
return (None, None)
def getViews(self, viewList):
""" Returns the list of views as a array of strings """
self.cursor.execute("""
SELECT viewname
FROM pg_views
WHERE schemaname not in ('pg_catalog', 'information_schema')
AND viewname not in ('pg_logdir_ls')""")
return self._confirmReturns([x[0] for x in self.cursor.fetchall() ], viewList)
def getViewsStandard(self, viewList):
strQuery = """SELECT TABLE_NAME
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA not in ('pg_catalog', 'information_schema')
AND TABLE_NAME NOT LIKE 'pg_%' AND
TABLE_TYPE = 'VIEW'"""
#TODO add viewList constraint
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
if rows:
return self._confirmReturns([x[0] for x in rows], viewList)
return []
def getViewDefinition(self, strViewName):
strQuery = "SELECT definition FROM pg_views WHERE viewname = %s"
self.cursor.execute(strQuery, [strViewName])
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return []
def getFunctions(self, functionList):
""" Returns functions """
#TODO: Add function list constraint
strQuery = """SELECT proname
FROM pg_proc pp, pg_language pl
WHERE proname not in ('_get_parser_from_curcfg', 'ts_debug', 'pg_file_length', 'pg_file_rename')
AND pl.oid = pp.prolang
AND lower(pl.lanname) not in ('c', 'internal', 'sql')
"""
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
if rows:
return self._confirmReturns([x[0] for x in rows], functionList)
return []
def getFunctionsStandard(self, functionList):
""" Returns functions """
#TODO: Add function list constraint
strQuery = """SELECT SPECIFIC_NAME
FROM INFORMATION_SCHEMA.ROUTINES
WHERE SPECIFIC_SCHEMA not in ('pg_catalog', 'information_schema')
AND ROUTINE_NAME not in ('_get_parser_from_curcfg', 'ts_debug', 'pg_file_length', 'pg_file_rename')
AND lower(external_language) not in ('c', 'internal') """
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
if rows:
return [x[0] for x in rows]
return []
def getFunctionDefinition(self, strSpecifiName):
""" Returns (routineName, parameters, return, language, definition) """
strQuery = """SELECT pp.proname, pp.prosrc, pt.typname, pl.lanname, pp.proargtypes
FROM pg_proc pp, pg_type pt, pg_language pl
WHERE proname = %s
AND pt.oid = pp.prorettype
AND pl.oid = pp.prolang"""
self.cursor.execute(strQuery, [strSpecifiName])
rows = self.cursor.fetchall()
if not rows:
return (None, None, None, None, None)
strRoutineName, strDefinition, retType, strLanguage, strArgTypes = rows[0]
retType = self._fixTypeNames(retType)
argTypes = strArgTypes.split(',')
strQuery = """SELECT typname FROM pg_type WHERE oid = %s"""
params = []
for typeNum in argTypes:
self.cursor.execute(strQuery, [typeNum])
row = self.cursor.fetchone()
if row:
params.append(self._fixTypeNames(row[0]))
if self.strDbms != 'postgres7':
strQuery = """SELECT proargnames FROM pg_proc WHERE proname = %s"""
argnames = []
self.cursor.execute(strQuery, [strSpecifiName])
argnames = self.cursor.fetchone()
if argnames:
argnames = argnames[0]
if argnames != None:
argnames = argnames[1:-1]
argnames = argnames.split(',')
for nIndex, argName in enumerate(argnames):
params[nIndex] += ' ' + argName
# Cleanup definition by removing the stuff we added.
#strDefinition = re.compile('|'.join(repList), re.DOTALL | re.MULTILINE).sub('', strDefinition)
strDefinition = re.compile(r'\s*DECLARE\s+.*BEGIN', re.DOTALL | re.MULTILINE).sub('BEGIN', strDefinition)
return (strRoutineName, params, retType, strLanguage, strDefinition)
class DdlPostgres(DdlCommonInterface):
def __init__(self, strDbms):
DdlCommonInterface.__init__(self, strDbms)
self.params['max_id_len'] = { 'default' : 63 }
if self.dbmsType == 'postgres7':
self.params['change_col_type'] = [
'ALTER TABLE %(table_name)s ADD tmp_%(column_name)s %(column_type)s',
'UPDATE %(table_name)s SET tmp_%(column_name)s = %(column_name)s',
'ALTER TABLE %(table_name)s DROP %(column_name)s',
'ALTER TABLE %(table_name)s RENAME tmp_%(column_name)s TO %(column_name)s',
]
self.params['keywords'] = """
ALL AND ANY AS ASC AUTHORIZATION BETWEEN BINARY BOTH CASE CAST CHECK COLLATE COLUMN CONSTRAINT CREATE
CROSS CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DEFAULT DEFERRABLE DESC DISTINCT ELSE
END EXCEPT FALSE FOR FOREIGN FREEZE FROM FULL GRANT GROUP HAVING ILIKE IN INITIALLY INNER INTERSECT
INTO IS ISNULL JOIN LEADING LEFT LIKE LIMIT LOCALTIME LOCALTIMESTAMP NATURAL NEW NOT NOTNULL NULL
OFF OLD ON ONLY OR ORDER OUTER OVERLAPS PRIMARY REFERENCES RIGHT SELECT SESSION_USER SIMILAR SOME TABLE
THEN TO TRAILING TRUE UNION UNIQUE USER USING VERBOSE WHEN WHERE""".split()
def addFunction(self, strNewFunctionName, argumentList, strReturn, strContents, attribs, diffs):
newArgs = []
declares = []
if self.dbmsType == 'postgres7':
for nIndex, arg in enumerate(argumentList):
oneArg = arg.strip().split()
newArgs.append(oneArg[-1])
declares.append(' %s ALIAS FOR $%d;' % (oneArg[0], nIndex + 1))
else:
newArgs = argumentList
if len(declares) > 0:
match = re.compile('(\s*declare)(.*)', re.IGNORECASE | re.MULTILINE | re.DOTALL).match(strContents)
if match:
strContents = match.group(1) + '\n' + '\n'.join(declares) + match.group(2)
else:
strContents = 'DECLARE\n' + '\n'.join(declares) + "\n" + strContents
info = {
'functionname' : self.quoteName(strNewFunctionName),
'arguments' : ', '.join(newArgs),
'returns' : strReturn,
'contents' : strContents.replace("'", "''"),
}
if 'language' not in attribs:
info['language'] = ' LANGUAGE plpgsql'
else:
info['language'] = ' LANGUAGE %s' % (attribs['language'])
diffs.append(('Add view', # OR REPLACE
"CREATE FUNCTION %(functionname)s(%(arguments)s) RETURNS %(returns)s AS '\n%(contents)s'%(language)s" % info )
)
``` |
[
{
"content": "Here is the script:\n```python\nimport unittest\nimport doctest\n\nfrom zope.testing import doctestunit\nfrom zope.component import testing\nfrom Testing import ZopeTestCase as ztc\n\nfrom Products.Five import zcml\nfrom Products.Five import fiveconfigure\nfrom Products.PloneTestCase import PloneT... | [
{
"content": "Here is the script:\n<|memory_start|>```python\nimport unittest\nimport doctest\n\nfrom zope.testing import doctestunit\nfrom zope.component import testing\nfrom Testing import ZopeTestCase as ztc\n\nfrom Products.Five import zcml\nfrom Products.Five import fiveconfigure\nfrom Products.PloneTestCa... | ```python
import unittest
import doctest
from zope.testing import doctestunit
from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import zcml
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
from Products.Five.testbrowser import Browser
import uwosh.initiatives
ztc.installProduct("uwosh.initiatives")
ptc.setupPloneSite(products=('uwosh.initiatives',))
class Session(dict):
def set(self, key, value):
self[key] = value
class TestCase(ptc.PloneTestCase):
def _setup(self):
super(TestCase, self)._setup()
self.setRoles(("Manager", "Member"))
self.app.REQUEST['SESSION'] = Session()
self.browser = Browser()
self.app.acl_users.userFolderAddUser('root', 'secret', ['Manager'], [])
self.browser.addHeader('Authorization', 'Basic root:secret')
self.portal_url = 'http://nohost/plone'
def afterSetUp(self):
super(TestCase, self).afterSetUp()
def setStatusCode(self, key, value):
from ZPublisher import HTTPResponse
HTTPResponse.status_codes[key.lower()] = value
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
zcml.load_config('configure.zcml', uwosh.initiatives)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='uwosh.initiatives',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='uwosh.initiatives.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='uwosh.initiatives',
# test_class=TestCase),
ztc.ZopeDocFileSuite(
'README.txt', package='uwosh.initiatives',
test_class=TestCase,
optionflags=doctest.REPORT_ONLY_FIRST_FAILURE | doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
``` |
[
{
"content": "```python\nfrom flask import render_template, flash, url_for, redirect, send_from_directory, jsonify, request\nfrom app import app, tmdb, lib\nfrom app.forms import SearchForm, TVSettingsForm, MovieSettingsForm, QualityForm, SettingsForm\nfrom app.models import Movie, TV, TVSeason, TVEpisode, TVEp... | [
{
"content": "<|memory_start|>```python\nfrom flask import render_template, flash, url_for, redirect, send_from_directory, jsonify, request\nfrom app import app, tmdb, lib\nfrom app.forms import SearchForm, TVSettingsForm, MovieSettingsForm, QualityForm, SettingsForm\nfrom app.models import Movie, TV, TVSeason,... | ```python
from flask import render_template, flash, url_for, redirect, send_from_directory, jsonify, request
from app import app, tmdb, lib
from app.forms import SearchForm, TVSettingsForm, MovieSettingsForm, QualityForm, SettingsForm
from app.models import Movie, TV, TVSeason, TVEpisode, TVEpisodeFile, MovieFile, Settings, Log
from app.library import Library
from datetime import datetime
import urllib
@app.route('/tv')
def tvm():
return render_template("tv/index.html", title='TV', heading='TV', search_form=SearchForm(),
tv_shows=TV.query.filter_by(in_library=True).all(),
is_grid=request.cookies.get('mm_tv_sort') == 'grid')
@app.route('/tv/<tmdb_id>')
def tvp(tmdb_id):
tv = TV.query.filter_by(tmdb_id=tmdb_id).first()
if tv is None:
data = tmdb.get_tv(tmdb_id)
if not data:
flash('TV show %s not found.' % tmdb_id)
return redirect('/')
tv = TV(data)
imdb = 'http://www.dereferer.org/?' + urllib.parse.quote_plus('http://www.imdb.com/title/' + tv.imdb_id)
settings_form = TVSettingsForm()
settings_form.quality.data = tv.search_quality
return render_template('tv/view.html', title=tv.title,
heading=tv.title, media=tv, search_form=SearchForm(),
settings_form=settings_form, imdb_link=imdb, refreshing=Library.refreshing_tv(tmdb_id))
@app.route('/tv/watch/<tmdb_id>', methods=['GET', 'POST'])
def watch_tv(tmdb_id):
tv = TV.query.filter_by(tmdb_id=tmdb_id).first()
if tv is None:
data = tmdb.get_tv(tmdb_id)
if not data:
return jsonify({'result': False, 'data': 'TV show not found'})
tv = TV(data)
tv.watching = True
tv.save()
return jsonify({'result': True, 'data': 'TV show updated: now watching'})
@app.route('/tv/unwatch/<tmdb_id>', methods=['GET', 'POST'])
def unwatch_tv(tmdb_id):
tv = TV.query.filter_by(tmdb_id=tmdb_id).first()
if tv is None:
data = tmdb.get_tv(tmdb_id)
if not data:
return jsonify({'result': False, 'data': 'TV show not found'})
tv = TV(data)
tv.watching = False
tv.save()
return jsonify({'result': True, 'data': 'TV show updated: not watching'})
# TODO: Implement the TV manual search functionality
@app.route('/tv/search/<tmdb_id>', methods=['GET', 'POST'])
def research_tv(tmdb_id):
tv = TV.query.filter_by(tmdb_id=tmdb_id).first()
if tv is None:
data = tmdb.get_tv(tmdb_id)
if not data:
return jsonify({'result': False, 'data': 'TV show manual search scheduled'})
tv = TV(data)
return jsonify({'result': True, 'data': 'TV show updated'})
# TODO: Implement the TV refresh functionality
@app.route('/tv/refresh/<tmdb_id>', methods=['GET', 'POST'])
def refresh_tv(tmdb_id):
tv = TV.query.filter_by(tmdb_id=tmdb_id).first()
if tv is None:
data = tmdb.get_tv(tmdb_id)
if not data:
return jsonify({'result': False, 'data': 'TV show not found'})
tv = TV(data)
Library.refresh_tv_item(tv)
return jsonify({'result': True, 'data': 'TV show refresh scheduled'})
@app.route('/tv/refresh', methods=['GET', 'POST'])
def refresh_library_tv_all():
lib.refresh_tv_all()
return jsonify({'result': True, 'data': 'TV library refresh scheduled'})
@app.route('/tv/refresh_status/<tmdb_id>', methods=['GET', 'POST'])
def refresh_tv_status(tmdb_id):
return jsonify({'result': Library.refreshing_tv(tmdb_id), 'data': 'TV refresh status'})
@app.route('/tv/refresh_status', methods=['GET', 'POST'])
def refresh_tv_status_all(tmdb_id):
return jsonify({'result': Library.refreshing_tv(), 'data': 'TV refresh status'})
@app.route('/tv/add/<tmdb_id>', methods=['GET', 'POST'])
def add_tv(tmdb_id):
data = tmdb.get_tv(tmdb_id)
if not data:
return jsonify({'result': False, 'data': 'TV show not found'})
tv = TV.query.filter_by(tmdb_id=tmdb_id).first()
if tv is None:
tv = TV(data)
tv.in_library = True
tv.offline = True
tv.added = datetime.now()
tv.save()
for season in data['seasons']:
s = tmdb.get_tv_season(tmdb_id, season['season_number'])
if s is not None:
tvs = TVSeason(tv.id, s['season_number'])
tvs.populate(s)
tvs.save()
for episode in s['episodes']:
eps = TVEpisode(tv.id, tvs.id)
eps.populate(episode)
eps.save()
return jsonify({'result': True, 'data': 'TV show added to library'})
@app.route('/tv/save/<tmdb_id>', methods=['GET', 'POST'])
def save_tv(tmdb_id):
tv = TV.factory(tmdb_id=tmdb_id)
if tv is None:
flash('TV show not found')
return redirect('/')
form = TVSettingsForm()
if form.validate_on_submit():
tv.search_quality = form.quality.data
tv.save()
flash('TV show data saved')
return redirect('/tv/' + tmdb_id)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# coding: utf-8\nfrom content_plugin import ContentPlugin\nfrom z_whoosh import Whoosh\nfrom gluon import current, URL\nfrom gluon.storage import Storage\nfrom gluon.cache import Cache\nimport perms\n\n\nclass Application(object):\n\n def __init__(self)... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# coding: utf-8\nfrom content_plugin import ContentPlugin\nfrom z_whoosh import Whoosh\nfrom gluon import current, URL\nfrom gluon.storage import Storage\nfrom gluon.cache import Cache\nimport perms\n\n\nclass Application(object):\n\n de... | ```python
# coding: utf-8
from content_plugin import ContentPlugin
from z_whoosh import Whoosh
from gluon import current, URL
from gluon.storage import Storage
from gluon.cache import Cache
import perms
class Application(object):
def __init__(self):
super(Application, self).__init__()
# copy current context
self.db = current.db
self.T = current.T
self.auth = current.auth
self.request = current.request
self.response = current.response
self.session = current.session
self.mail = current.mail
self.conf = current.conf
self.registry = Storage()
self.cache = Cache(self.request)
def registerContentType(self, item_type, plug):
"""
Register a ContentPlugin for an Item Type
"""
assert isinstance(plug, ContentPlugin)
self.registry[item_type] = plug
plug.setController(self)
def getContentType(self, item_type):
return self.registry[item_type]
def getItemByUUID(self, unique_id):
db = self.db
query = (db.item.unique_id == unique_id)
item = db(query).select().first()
return item
def exportItem(self, item_id, export_dir):
"""
Put on export_dir all the item memta-data and content
"""
import os.path
import os
item = self.getItemByUUID(item_id)
meta_file = os.path.join(export_dir, "meta.json")
with open(meta_file, 'w') as f:
f.write(item.as_json())
ct = self.getContentType(item.item_type)
os.mkdir(os.path.join(export_dir, "content"))
ct.export(item, os.path.join(export_dir, "content"))
def canUpdateItem(self, unique_id, user=None):
item = self.getItemByUUID(unique_id)
desk = self.db(
self.db.desk.item_list.contains(item.id)).select().first()
is_owner = self.isOwner(unique_id, user=user) and (
desk.id == self.getUserDesk().id)
can_update_desk = self.auth.has_permission(
'update_items', self.db.desk, desk.id) or self.auth.has_permission(
'owner', self.db.desk, desk.id) or self.auth.has_permission(
'update', self.db.desk, desk.id)
return (is_owner or can_update_desk) and (item.id in desk.item_list)
def canReadItem(self, unique_id, user=None):
item = self.getItemByUUID(unique_id)
desk = self.db(
self.db.desk.item_list.contains(item.id)).select().first()
can_read_desk = self.auth.has_permission(
'read', self.db.desk, desk.id) or self.auth.has_permission(
'owner', self.db.desk, desk.id) or self.auth.has_permission(
'update', self.db.desk, desk.id)
return can_read_desk and (item.id in desk.item_list)
def isOwner(self, unique_id, user=None):
"""
Returns True if user is the owner of the item
"""
item = self.getItemByUUID(unique_id)
if item is None:
return False
if user is None:
return perms.isOwner(item.id)
return self.auth.has_permission(
'owner', self.db.item, record_id=item.id, user_id=user.id)
def getUserDesk(self, user=None):
db = self.db
auth = self.auth
if user is None:
user = auth.user
# setup user desk if necessary.
user_desk = db(
auth.accessible_query('owner', db.desk, user.id)).select().first()
if user_desk is None:
name = self.T("%s desk", (auth.user.first_name,))
desk_id = db.desk.insert(name=name)
g_id = auth.user_group(auth.user.id)
auth.add_permission(g_id, 'owner', db.desk, desk_id)
user_desk = db.desk(desk_id)
return user_desk
def indexItem(self, item_id, user=None):
"""
Add/update item to the user search index
"""
if user is None:
user = self.auth.user
item = self.getItemByUUID(item_id)
ct = self.getContentType(item.item_type)
text = ct.get_full_text(item)
w = Whoosh(str(user.id))
w.add_to_index(unicode(item_id), text)
def createItem(self, content_type, values):
db = self.db
auth = self.auth
values['item_type'] = content_type
item_id = db.item.insert(**db.item._filter_fields(values))
# give owner perm to the item
auth.add_permission(0, 'owner', db.item, item_id)
# add the item to the user desk
user_desk = self.getUserDesk()
item_list = user_desk.item_list
item_list.insert(0, item_id)
user_desk.update_record(item_list=item_list)
# --
# create te content instance
ct = self.getContentType(content_type)
ct.create_content(db.item(item_id))
# --
return db.item(item_id).unique_id
def getItemURL(self, unique_id):
item = self.getItemByUUID(unique_id)
c = "plugin_{}".format(item.item_type)
f = "index.html"
return URL(c=c, f=f, args=[item.unique_id])
def getContentChangesURL(self, unique_id):
item = self.getItemByUUID(unique_id)
c = "plugin_{}".format(item.item_type)
f = "changelog.html"
return URL(c=c, f=f, args=[item.unique_id])
def notifyChanges(self, item_id):
response = self.response
auth = self.auth
T = self.T
item = self.getItemByUUID(item_id)
message = response.render(
'changes_email.txt',
dict(item=item, user=auth.user)
)
subject = T("Changes on %s") % (item.headline,)
self.notifyCollaborators(
item.unique_id,
subject,
message
)
def getCollaborators(self, item_id, exclude_current=True):
"""
Given a item returns the list of user who have access to item.
"""
db = self.db
auth = self.auth
item = self.getItemByUUID(item_id)
desk = self.db(
self.db.desk.item_list.contains(item.id)).select().first()
query = (db.auth_permission.record_id == desk.id)
query &= (db.auth_permission.name != 'push_items')
query &= (db.auth_permission.table_name == db.desk)
query &= (db.auth_permission.group_id == db.auth_membership.group_id)
query &= (db.auth_user.id == db.auth_membership.user_id)
if exclude_current:
query &= (db.auth_user.id != auth.user.id)
return db(query).select(
db.auth_user.ALL,
distinct=True,
cache=(self.cache.ram, 30),
cacheable=True)
def notifyCollaborators(self, item_id, subject, message):
db = self.db
auth = self.auth
item = self.getItemByUUID(item_id)
myusers = self.getCollaborators(item.unique_id)
for u in myusers:
db.notification.insert(
subject=subject,
message_content=message,
from_user=auth.user.id,
to_user=u.id
)
def shareItem(self, item_id, src_desk, dst_desk):
"""
Move item_id from src_desk to dst_desk
"""
item = self.getItemByUUID(item_id)
src = self.db.desk(src_desk)
dst = self.db.desk(dst_desk)
src_list = src.item_list
src_list.remove(item.id)
src.update_record(item_list=src_list)
dst_list = dst.item_list
dst_list.insert(0, item.id)
dst.update_record(item_list=dst_list)
self.notifyChanges(item_id)
return
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# (C) British Crown Copyright 2012 - 2014, Met Office\n#\n# This file is part of Iris.\n#\n# Iris is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License a... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# (C) British Crown Copyright 2012 - 2014, Met Office\n#\n# This file is part of Iris.\n#\n# Iris is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General ... | ```python
# (C) British Crown Copyright 2012 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# Import Iris tests first so that some things can be initialised before importing anything else.
import iris.tests as tests
from . import extest_util
with extest_util.add_examples_to_path():
import SOI_filtering
class TestSOIFiltering(tests.GraphicsTest):
"""Test the SOI_filtering example code."""
def test_soi_filtering(self):
with extest_util.show_replaced_by_check_graphic(self):
SOI_filtering.main()
if __name__ == '__main__':
tests.main()
``` |
[
{
"content": "Here is a code snippet:\n```python\nimport sys\nfrom math import log, ceil\n\ndef set_config(rounds, teams, closeness, slots):\n global NUMROUNDS, NUMTEAMS, CLOSENESS, NUMSLOTS\n NUMROUNDS = rounds # How many rounds to schedule in the competition\n NUMTEAMS = teams # The number ... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\nimport sys\nfrom math import log, ceil\n\ndef set_config(rounds, teams, closeness, slots):\n global NUMROUNDS, NUMTEAMS, CLOSENESS, NUMSLOTS\n NUMROUNDS = rounds # How many rounds to schedule in the competition\n NUMTEAMS = teams ... | ```python
import sys
from math import log, ceil
def set_config(rounds, teams, closeness, slots):
global NUMROUNDS, NUMTEAMS, CLOSENESS, NUMSLOTS
NUMROUNDS = rounds # How many rounds to schedule in the competition
NUMTEAMS = teams # The number of teams taking part
CLOSENESS = closeness # Minimum number of matches between each teams
# appearance.
NUMSLOTS = slots # Number of slots per match
def compute_bitwidths():
global NUMROUNDS, NUMTEAMS, CLOSENESS, NUMSLOTS
global NUMMATCHES, ROUNDBITS, MATCHBITS, SLOTBITS, TEAMBITS
NUMMATCHES = NUMTEAMS / NUMSLOTS
# Underlying bitwidths, computed from other parameters
ROUNDBITS = int(ceil(log(NUMROUNDS, 2)))
MATCHBITS = int(ceil(log(NUMMATCHES, 2)))
SLOTBITS = int(ceil(log(NUMSLOTS, 2)))
TEAMBITS = int(ceil(log(NUMTEAMS, 2)))
# Validation
def validate_config():
global NUMROUNDS, NUMTEAMS, CLOSENESS, NUMSLOTS
if (NUMTEAMS % NUMSLOTS) != 0:
print >>sys.stderr, "Num of teams does not divide by number of matches"
sys.exit(1)
if CLOSENESS >= NUMMATCHES:
print >>sys.stderr, "Match close constraints allows no matches"
sys.exit(1)
if (NUMSLOTS % 4) != 0:
print >>sys.stderr, "Number of slots is not a multiple of four. The world will end."
sys.exit(1)
``` |
[
{
"content": "Here is some code:\n```python\n# Copyright (C) 2015 KillerInstinct\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your opt... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n# Copyright (C) 2015 KillerInstinct\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or... | ```python
# Copyright (C) 2015 KillerInstinct
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Multiple_UA(Signature):
name = "multiple_useragents"
description = "Network activity contains more than one unique useragent."
severity = 3
categories = ["network"]
authors = ["KillerInstinct"]
minimum = "1.2"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.useragents = list()
self.procs = list()
filter_apinames = set(["InternetOpenA", "InternetOpenW"])
def on_call(self, call, process):
# Dict whitelist with process name as key, and useragents as values
whitelist = {
"acrord32.exe": ["Mozilla/3.0 (compatible; Acrobat 5.0; Windows)"],
"iexplore.exe": ["VCSoapClient", "Shockwave Flash"],
}
ua = self.get_argument(call, "Agent")
proc = process["process_name"].lower()
if proc in whitelist.keys() and ua in whitelist[proc]:
return None
else:
if ua not in self.useragents:
if self.results["target"]["category"] == "file" or proc != "iexplore.exe":
self.useragents.append(ua)
self.procs.append((process["process_name"], ua))
def on_complete(self):
if len(self.useragents) < 2:
return False
for item in self.procs:
self.data.append({"Process" : item[0]})
self.data.append({"User-Agent" : item[1]})
return True
``` |
[
{
"content": "Here is a code snippet:\n```python\n# -*- coding: utf-8 -*-\n\n## This file is part of Invenio.\n## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU G... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n## This file is part of Invenio.\n## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the te... | ```python
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301,W0703
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import os
import re
import time
import string
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except ImportError:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from six import iteritems, string_types
## import Invenio stuff:
from invenio.base.globals import cfg
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_SCOAP3_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_BIBSORT_ENABLED, \
CFG_XAPIAN_ENABLED, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_BASE_URL, \
CFG_BIBFORMAT_HIDDEN_TAGS
try:
from invenio.config import CFG_BIBSORT_DEFAULT_FIELD, \
CFG_BIBSORT_DEFAULT_FIELD_ORDER
except ImportError:
CFG_BIBSORT_DEFAULT_FIELD = 'latest first'
CFG_BIBSORT_DEFAULT_FIELD_ORDER = 'd'
from invenio.modules.search.errors import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError
from invenio.legacy.bibrecord import (get_fieldvalues,
get_fieldvalues_alephseq_like)
from .utils import record_exists
from invenio.legacy.bibrecord import create_record, record_xml_output
from invenio.legacy.bibrank.record_sorter import (
get_bibrank_methods,
is_method_valid,
rank_records as rank_records_bibrank,
rank_by_citations)
from invenio.legacy.bibrank.downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.legacy.bibindex.engine_stemmer import stem
from invenio.modules.indexer.tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.modules.indexer.tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.legacy.bibindex.engine_utils import author_name_requires_phrase_search, \
get_field_tags
from invenio.legacy.bibindex.engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.legacy.bibindex.engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.legacy.bibindex.adminlib import get_idx_indexer
from invenio.modules.formatter import format_record, format_records, get_output_format_content_type, create_excel
from invenio.legacy.bibrank.downloads_grapher import create_download_history_graph_and_box
from invenio.modules.knowledge.api import get_kbr_values
from invenio.legacy.miscutil.data_cacher import DataCacher
from invenio.legacy.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.modules.access.control import acc_get_action_id
from invenio.modules.access.local_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.legacy.websearch.adminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from intbitset import intbitset
from invenio.legacy.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.modules.access.engine import acc_authorize_action
from invenio.ext.logging import register_exception
from invenio.ext.cache import cache
from invenio.utils.text import encode_for_xml, wash_for_utf8, strip_accents
from invenio.utils.html import get_mathjax_header
from invenio.utils.html import nmtoken_from_string
from invenio.legacy import bibrecord
import invenio.legacy.template
webstyle_templates = invenio.legacy.template.load('webstyle')
webcomment_templates = invenio.legacy.template.load('webcomment')
websearch_templates = invenio.legacy.template.load('websearch')
from invenio.legacy.bibrank.citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, \
get_refersto_hitset, get_citedby_hitset, get_cited_by_list, \
get_refers_to_list, get_citers_log
from invenio.legacy.bibrank.citation_grapher import create_citation_history_graph_and_box
from invenio.legacy.bibrank.selfcites_searcher import get_self_cited_by_list, \
get_self_cited_by, \
get_self_refers_to_list
from invenio.legacy.dbquery import run_sql, run_sql_with_limit, \
wash_table_column_name, get_table_update_time
from invenio.legacy.webuser import getUid, collect_user_info, session_param_set
from invenio.legacy.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.base.i18n import gettext_set_language
from invenio.legacy.search_engine.query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio.utils import apache
from invenio.legacy.miscutil.solrutils_bibindex_searcher import solr_get_bitset
from invenio.legacy.miscutil.xapianutils_bibindex_searcher import xapian_get_bitset
from invenio.modules.search import services
from invenio.legacy.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.legacy.websearch_external_collections.config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.legacy.bibauthorid.config import LIMIT_TO_COLLECTIONS as BIBAUTHORID_LIMIT_TO_COLLECTIONS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile(r'[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile(r'\sand\s', re.I)
re_logical_or = re.compile(r'\sor\s', re.I)
re_logical_not = re.compile(r'\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile(r"\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile(r"\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + r'\s')
## em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
"search_info" : "I",
"overview" : "O",
"all_portalboxes" : "P",
"te_portalbox" : "Pte",
"tp_portalbox" : "Ptp",
"np_portalbox" : "Pnp",
"ne_portalbox" : "Pne",
"lt_portalbox" : "Plt",
"rt_portalbox" : "Prt",
"search_services": "SER"};
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_slave=True)
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except NameError:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
auths = acc_authorize_action(
user_info,
'viewrestrcoll',
batch_args=True,
collection=restricted_collection_cache.cache
)
for collection, auth in zip(restricted_collection_cache.cache, auths):
if auth[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
if CFG_CERN_SITE:
#the egroup might be in the form egroup@cern.ch
if email_or_group.replace('@cern.ch', ' [CERN]') in user_info['group']:
return True
return False
###FIXME: This method needs to be refactorized
def is_user_viewer_of_record(user_info, recid):
"""
Check if the user is allow to view the record based in the marc tags
inside CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
i.e. his email is inside the 506__m tag or he is inside an e-group listed
in the 506__m tag
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'allow to view' the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
if is_user_viewer_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if not restricted_collections and record_public_p(recid):
## The record is public and not part of any restricted collection
return (0, '')
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in restricted_collections:
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (1, """The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record.""")
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class FieldTokenizerDataCacher(DataCacher):
"""
Provides cache for tokenizer information for fields corresponding to indexes.
This class is not to be used directly; use function
get_field_tokenizer_type() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT fld.code, ind.tokenizer FROM idxINDEX AS ind, field AS fld, idxINDEX_field AS indfld WHERE ind.id = indfld.id_idxINDEX AND indfld.id_field = fld.id""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
field_tokenizer_cache.is_ok_p
except Exception:
field_tokenizer_cache = FieldTokenizerDataCacher()
def get_field_tokenizer_type(field_name, recreate_cache_if_needed=True):
"""Return tokenizer type for given field corresponding to an index if applicable."""
if recreate_cache_if_needed:
field_tokenizer_cache.recreate_cache_if_needed()
tokenizer = None
try:
tokenizer = field_tokenizer_cache.cache[field_name]
except KeyError:
return None
return tokenizer
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
res = run_sql("SELECT name FROM collection")
for name in res:
ret[name[0]] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if coll not in collection_reclist_cache.cache:
return intbitset() # collection does not exist; return empty set
if not collection_reclist_cache.cache[coll]:
# collection's reclist not in the cache yet, so calculate it
# and fill the cache:
reclist = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
if res and res[0][1]:
reclist = intbitset(res[0][1])
collection_reclist_cache.cache[coll] = reclist
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({'value': code,
'text': name
})
else:
formats.append({'value': 'hb',
'text': "HTML brief"
})
return formats
# Flask cache for search results.
from invenio.modules.search.cache import search_results_cache, get_search_results_cache_key
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if c not in ret:
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if f not in ret:
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT name FROM collection ORDER BY name ASC")
for c_name in res:
c_name = c_name[0]
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score ASC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"""
Returns list of whitespace-separated words from pattern, removing any
trailing punctuation-like signs from words in pattern.
"""
words = {}
# clean trailing punctuation signs inside pattern
pattern = re_punctuation_followed_by_space.sub(' ', pattern)
for word in pattern.split():
if word not in words:
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'w'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
write_warning("Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning", req=req)
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and p.find(',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: x.group(1).replace(' ', '__SPACE__'), p)
# wash argument:
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in p.split(): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if pi.find(":") > 0:
fi, pi = pi.split(":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = pi.replace('"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = pi.replace("'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
write_warning("Ignoring standalone wildcard word.", "Warning", req=req)
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
write_warning("Ignoring empty <em>%s</em> search term." % fi, "Warning", req=req)
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o, p, wash_field(f), t] for o, p, f, t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p='', em=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = get_output_format_content_type(of, 'text/xml')
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "intbitset":
req.content_type = "application/octet-stream"
req.send_http_header()
elif of == "recjson":
req.content_type = "application/json"
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
# Add metadata in meta tags for Google scholar-esque harvesting...
# only if we have a detailed meta format and we are looking at a
# single record
if recID != -1 and CFG_WEBSEARCH_DETAILED_META_FORMAT and \
record_exists(recID) == 1:
metaheaderadd += format_record(recID,
CFG_WEBSEARCH_DETAILED_META_FORMAT,
ln=ln)
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_BASE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.', '_').replace('-', '_').replace(':', '_')
body_css_classes.append(css)
## finally, print page header:
if em == '' or EM_REPOSITORY["header"] in em:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
else:
req.content_type = content_type
req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG, em=""):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if of == "intbitset":
return intbitset()
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
if em == "" or EM_REPOSITORY["footer"] in em:
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_add_to_search_pattern(p, p1, f1, m1, op1):
"""Create the search pattern """
if not p1:
return p
init_search_pattern = p
# operation: AND, OR, AND NOT
if op1 == 'a' and p: # we don't want '+' at the begining of the query
op = ' +'
elif op1 == 'o':
op = ' |'
elif op1 == 'n':
op = ' -'
else:
op = ''
# field
field = ''
if f1:
field = f1 + ':'
# type of search
pattern = p1
start = '('
end = ')'
if m1 == 'e':
start = end = '"'
elif m1 == 'p':
start = end = "'"
elif m1 == 'r':
start = end = '/'
else: # m1 == 'o' or m1 =='a'
words = p1.strip().split(' ')
if len(words) == 1:
start = end = ''
pattern = field + words[0]
elif m1 == 'o':
pattern = ' |'.join([field + word for word in words])
else:
pattern = ' '.join([field + word for word in words])
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + start + pattern + end
if not pattern:
return ''
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + field + start + pattern + end
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")),
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")),
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action="", em=""):
"""Create search box for 'search again in the results page' functionality."""
if em != "" and EM_REPOSITORY["search_box"] not in em:
if EM_REPOSITORY["body"] in em and cc != CFG_SITE_NAME:
return '''
<h1 class="headline">%(ccname)s</h1>''' % {'ccname' : cgi.escape(cc), }
else:
return ""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({'value': cx,
'text': cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({'value': CFG_SITE_NAME,
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({'value': '',
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("remove this publisher or journal") or _("remove this collection"))
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({'value': val['value'],
'text': val['text'],
'selected' : (c == re.sub(r"^[\s\-]*", "", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{'value': '',
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("add another publisher or journal") or _("add another collection"))
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{'value': CFG_SITE_NAME,
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower(), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value': code,
'text': name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title and (em=="" or EM_REPOSITORY["body"] in em)
)
def create_exact_author_browse_help_link(p=None, p1=None, p2=None, p3=None, f=None, f1=None, f2=None, f3=None,
rm=None, cc=None, ln=None, jrec=None, rg=None, aas=0, action=""):
"""Creates a link to help switch from author to exact author while browsing"""
if action == 'browse':
search_fields = (f, f1, f2, f3)
if 'author' in search_fields or 'firstauthor' in search_fields:
def add_exact(field):
if field == 'author' or field == 'firstauthor':
return 'exact' + field
return field
fe, f1e, f2e, f3e = [add_exact(field) for field in search_fields]
link_name = f or f1
link_name = (link_name == 'firstauthor' and 'exact first author') or 'exact author'
return websearch_templates.tmpl_exact_author_browse_help_link(p=p, p1=p1, p2=p2, p3=p3, f=fe, f1=f1e, f2=f2e, f3=f3e,
rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg, aas=aas, action=action,
link_name=link_name)
return ""
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value': '',
'text': _(CFG_BIBSORT_DEFAULT_FIELD)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if ci in collection_reclist_cache.cache:
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if cc not in collection_reclist_cache.cache:
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if coll in collection_reclist_cache.cache:
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def get_synonym_terms(term, kbr_name, match_type, use_memoise=False):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@param use_memoise: can we memoise while doing lookups?
@type use_memoise: bool
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_comma']:
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_number']:
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e',
use_memoise=use_memoise):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(ouput_format):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(ouput_format[0:3]).isdigit() and len(ouput_format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
else:
return ouput_format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = p.strip()
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
if not res or not res[0][0]:
return False
try:
return res[0][0].startswith("hostedcollection:")
except IndexError:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
res = run_sql("SELECT name FROM collection WHERE name=%s", (c,))
if res:
return res[0][0]
else:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, coll_type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'coll_type' for collection 'coll'.
If coll_type = '*', both regular and virtual collections will be returned.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
if coll_type == '*':
coll_type_query = " IN ('r', 'v')"
query_params = (coll, )
else:
coll_type_query = "=%s"
query_params = (coll_type, coll)
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type%s AND ccc.name=%%s" % coll_type_query
query += " ORDER BY cc.score ASC"
res = run_sql(query, query_params)
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
class CollectionAllChildrenDataCacher(DataCacher):
"""Cache for all children of a collection (regular & virtual, public & private)"""
def __init__(self):
def cache_filler():
def get_all_children(coll, coll_type='r', public_only=1, d_internal_coll_sons=None):
"""Return a list of all children of type 'coll_type' for collection 'coll'.
If public_only, then return only non-restricted child collections.
If coll_type='*', then return both regular and virtual collections.
d_internal_coll_sons is an internal dictionary used in recursion for
minimizing the number of database calls and should not be used outside
this scope.
"""
if not d_internal_coll_sons:
d_internal_coll_sons = {}
children = []
if coll not in d_internal_coll_sons:
d_internal_coll_sons[coll] = get_coll_sons(coll, coll_type, public_only)
for child in d_internal_coll_sons[coll]:
children.append(child)
children.extend(get_all_children(child, coll_type, public_only, d_internal_coll_sons)[0])
return children, d_internal_coll_sons
ret = {}
d_internal_coll_sons = None
collections = collection_reclist_cache.cache.keys()
for collection in collections:
ret[collection], d_internal_coll_sons = get_all_children(collection, '*', public_only=0, d_internal_coll_sons=d_internal_coll_sons)
return ret
def timestamp_verifier():
return max(get_table_update_time('collection'), get_table_update_time('collection_collection'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_allchildren_cache.is_ok_p:
raise Exception
except Exception:
collection_allchildren_cache = CollectionAllChildrenDataCacher()
def get_collection_allchildren(coll, recreate_cache_if_needed=True):
"""Returns the list of all children of a collection."""
if recreate_cache_if_needed:
collection_allchildren_cache.recreate_cache_if_needed()
if coll not in collection_allchildren_cache.cache:
return [] # collection does not exist; return empty list
return collection_allchildren_cache.cache[coll]
def get_coll_real_descendants(coll, coll_type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score ASC""",
(coll, coll_type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern_phrases(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Returns either biliographic phrases or words indexes."""
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and p.find(":") > 0: # does 'p' contain ':'?
f, p = p.split(":", 1)
## do we search in words indexes?
# FIXME uncomment this
#if not f:
# return browse_in_bibwords(req, p, f)
coll_hitset = intbitset()
for coll_name in colls:
coll_hitset |= get_collection_reclist(coll_name)
index_id = get_index_id_from_field(f)
if index_id != 0:
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll_hitset)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
register_exception(req=req, alert_admin=True)
# probably there are no hits at all:
#req.write(_("No values found."))
return []
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#write_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
nbhits = get_nbhits_in_bibxxx(phrase, f, coll_hitset)
if nbhits > 0:
browsed_phrases_in_colls.append([phrase, nbhits])
return browsed_phrases_in_colls
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Displays either biliographic phrases or words indexes."""
# load the right message language
_ = gettext_set_language(ln)
browsed_phrases_in_colls = browse_pattern_phrases(req, colls, p, f, rg, ln)
if len(browsed_phrases_in_colls) == 0:
req.write(_("No values found."))
return
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
'ap' is also internally used for allowing hidden tag search
(for requests coming from webcoll, for example). In this
case ap=-9
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)), req=req)
write_warning("Search stage 1: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = cfg['CFG_BIBFORMAT_HIDDEN_TAGS']
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if not req and ap == -9: # special request, coming from webcoll
can_see_hidden = True
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for dummy_o, p, f, m in basic_search_units]
if 'fulltext' in fields_to_be_searched:
write_warning(_("Full-text search is currently available for all arXiv papers, many theses, a few report series and some journal articles"), req=req)
elif 'caption' in fields_to_be_searched:
write_warning(_("Warning: figure caption search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") %
{'x_range_from_year': '2008',
'x_range_to_year': '2012'}, req=req)
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
write_warning(_("There is no index %(x_name)s. Searching for %(x_text)s in all fields.", x_name=bsu_f, x_text=bsu_p), req=req)
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
write_warning(_('Instead searching %(x_name)s.', x_name=str([bsu_o, bsu_p, bsu_f, bsu_m])), req=req)
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError as excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term too generic, displaying only partial results..."), req=req)
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
write_warning(_("No phrase index available for fulltext yet, looking for word combination..."), req=req)
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
write_warning("Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(cgi.escape(repr(bsu_p)), repr(myhiddens)), req=req)
display_nearest_terms_box = False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
write_warning("Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset), req=req)
if len(basic_search_unit_hitset) > 0 or \
ap<1 or \
bsu_o in ("|", "-") or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
write_warning("Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)), req=req)
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
write_warning(_("No exact match found for %(x_query1)s, using %(x_query2)s instead...") %
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"}, req=req)
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
write_warning("Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])), req=req)
write_warning("Search stage 2: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(this_unit_operation), "Error", req=req)
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
write_warning(text, req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection), req=req)
write_warning("Search stage 3: execution took %.2f seconds." % (t2 - t1), req=req)
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1) but still call it for searches like ('U(1)' | 'U(2)'):
if not re_pattern_parens.search(re_pattern_parens_quotes.sub('_', p)):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
write_warning("Search stage 1: search_pattern_parenthesised() searched %s." % repr(p), req=req)
write_warning("Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result), req=req)
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box = False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
write_warning(_("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."), req=req)
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0, ignore_synonyms=None):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
Parameter 'ignore_synonyms' is a list of terms for which we
should not try to further find a synonym.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
tokenizer = get_field_tokenizer_type(f)
hitset_cjk = intbitset()
if tokenizer == "BibIndexCJKTokenizer":
if is_there_any_CJK_character_in_text(p):
cjk_tok = BibIndexCJKTokenizer()
chars = cjk_tok.tokenize_for_words(p)
for char in chars:
hitset_cjk |= search_unit_in_bibwords(char, f, wl)
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f or 'anyfield'):
if ignore_synonyms is None:
ignore_synonyms = []
ignore_synonyms.append(p)
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][1]):
if p_synonym != p and \
not p_synonym in ignore_synonyms:
hitset_synonyms |= search_unit(p_synonym, f, m, wl,
ignore_synonyms)
## look up hits:
if f == 'fulltext' and get_idx_indexer('fulltext') == 'SOLR' and CFG_SOLR_URL:
# redirect to Solr
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
elif f == 'fulltext' and get_idx_indexer('fulltext') == 'XAPIAN' and CFG_XAPIAN_ENABLED:
# redirect to Xapian
try:
return search_unit_in_xapian(p, f, m)
except:
# There were troubles with getting full-text search
# results from Xapian. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'referstoexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_refersto_excluding_selfcites(p)
elif f == 'cataloguer':
# we are doing search by the cataloguer nickname
hitset = search_unit_in_record_history(p)
elif f == 'rawref':
from invenio.legacy.refextract.api import search_from_reference
field, pattern = search_from_reference(p)
return search_unit(pattern, field)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif f == 'collection':
# we are doing search by the collection name or MARC field
hitset = search_unit_collection(p, m, wl=wl)
elif f == 'tag':
module_found = False
try:
from invenio.modules.tags.search_units import search_unit_in_tags
module_found = True
except:
# WebTag module is disabled, so ignore 'tag' selector
pass
if module_found:
return search_unit_in_tags(p)
elif f == 'citedbyexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_citedby_excluding_selfcites(p)
elif m == 'a' or m == 'r' or f == 'subject':
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
if m == 'a' and index_id in get_idxpair_field_ids():
#for exact match on the admin configured fields we are searching in the pair tables
hitset = search_unit_in_idxpairs(p, f, m, wl)
else:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
# if not hitset and m == 'a' and (p[0] != '%' and p[-1] != '%'):
# #if we have no results by doing exact matching, do partial matching
# #for removing the distinction between simple and double quotes
# hitset = search_unit_in_bibxxx('%' + p + '%', f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
elif p.startswith("citedexcludingselfcites:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:], exclude_selfcites=True)
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
hitset |= hitset_cjk
return hitset
def get_idxpair_field_ids():
"""Returns the list of ids for the fields that idxPAIRS should be used on"""
index_dict = dict(run_sql("SELECT name, id FROM idxINDEX"))
return [index_dict[field] for field in index_dict if field in cfg['CFG_WEBSEARCH_IDXPAIRS_FIELDS']]
def search_unit_in_bibwords(word, f, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
# wash 'word' argument and run query:
if f.endswith('count') and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = word.replace('*', '%') # we now use '*' as the truncation character
words = word.split("->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
# We remove trailing truncation character before stemming
if word0.endswith('%'):
word0 = stem(word0[:-1], stemming_language) + '%'
else:
word0 = stem(word0, stemming_language)
if word1.endswith('%'):
word1 = stem(word1[:-1], stemming_language) + '%'
else:
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f.endswith('count'):
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX,
(word0_washed, word1_washed), wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
# We remove trailing truncation character before stemming
if word.endswith('%'):
word = stem(word[:-1], stemming_language) + '%'
else:
word = stem(word, stemming_language)
if word.find('%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX,
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(wash_index_term(word),))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibwrd)
else:
hitset = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_idxpairs(p, f, search_type, wl=0):
"""Searches for pair 'p' inside idxPAIR table for field 'f' and
returns hitset of recIDs found."""
limit_reached = 0 # flag for knowing if the query limit has been reached
do_exact_search = True # flag to know when it makes sense to try to do exact matching
result_set = intbitset()
#determine the idxPAIR table to read from
index_id = get_index_id_from_field(f)
if not index_id:
return intbitset()
stemming_language = get_index_stemming_language(index_id)
pairs_tokenizer = BibIndexDefaultTokenizer(stemming_language)
idxpair_table_washed = wash_table_column_name("idxPAIR%02dF" % index_id)
if p.startswith("%") and p.endswith("%"):
p = p[1:-1]
original_pattern = p
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
queries_releated_vars = [] # contains tuples of (query_addons, query_params, use_query_limit)
#is it a span query?
ps = p.split("->", 1)
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
#so we are dealing with a span query
pairs_left = pairs_tokenizer.tokenize_for_pairs(ps[0])
pairs_right = pairs_tokenizer.tokenize_for_pairs(ps[1])
if not pairs_left or not pairs_right:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
elif len(pairs_left) != len(pairs_right):
# it is kind of hard to know what the user actually wanted
# we have to do: foo bar baz -> qux xyz, so let's swith to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
elif len(pairs_left) > 1 and \
len(pairs_right) > 1 and \
pairs_left[:-1] != pairs_right[:-1]:
# again we have something like: foo bar baz -> abc xyz qux
# so we'd better switch to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
else:
# finally, we can treat the search using idxPairs
# at this step we have either: foo bar -> abc xyz
# or foo bar abc -> foo bar xyz
queries_releated_vars = [("BETWEEN %s AND %s", (pairs_left[-1], pairs_right[-1]), True)]
for pair in pairs_left[:-1]:# which should be equal with pairs_right[:-1]
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False # no exact search for span queries
elif p.find('%') > -1:
#tokenizing p will remove the '%', so we have to make sure it stays
replacement = 'xxxxxxxxxx' #hopefuly this will not clash with anything in the future
p = string.replace(p, '%', replacement)
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
if string.find(pair, replacement) > -1:
pair = string.replace(pair, replacement, '%') #we replace back the % sign
queries_releated_vars.append(("LIKE %s", (pair, ), True))
else:
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False
else:
#normal query
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
queries_releated_vars.append(("= %s", (pair, ), False))
first_results = 1 # flag to know if it's the first set of results or not
for query_var in queries_releated_vars:
query_addons = query_var[0]
query_params = query_var[1]
use_query_limit = query_var[2]
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params, wildcard_limit=wl) #kwalitee:disable=sql
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params) #kwalitee:disable=sql
if not res:
return intbitset()
for pair, hitlist in res:
hitset_idxpairs = intbitset(hitlist)
if first_results:
result_set = hitset_idxpairs
first_results = 0
else:
result_set.intersection_update(hitset_idxpairs)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(result_set)
# check if we need to eliminate the false positives
if cfg['CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH'] and do_exact_search:
# we need to eliminate the false positives
idxphrase_table_washed = wash_table_column_name("idxPHRASE%02dR" % index_id)
not_exact_search = intbitset()
for recid in result_set:
res = run_sql("SELECT termlist FROM %s WHERE id_bibrec %s" %(idxphrase_table_washed, '=%s'), (recid, )) #kwalitee:disable=sql
if res:
termlist = deserialize_via_marshal(res[0][0])
if not [term for term in termlist if term.lower().find(p.lower()) > -1]:
not_exact_search.add(recid)
else:
not_exact_search.add(recid)
# remove the recs that are false positives from the final result
result_set.difference_update(not_exact_search)
return result_set
def search_unit_in_idxphrases(p, f, search_type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
# detect query type (exact phrase, partial phrase, regexp):
if search_type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = p.replace('*', '%') # we now use '*' as the truncation character
ps = p.split("->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if p.find('%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons), query_params)
# fill the result set:
for dummy_word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibphrase)
else:
hitset = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
hitset = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
return hitset
def search_unit_in_solr(p, f=None, m=None):
"""
Query a Solr index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(f, p)
def search_unit_in_xapian(p, f=None, m=None):
"""
Query a Xapian index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return xapian_get_bitset(f, p)
def search_unit_in_bibrec(datetext1, datetext2, search_type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
hitset = intbitset()
if search_type and search_type.startswith("m"):
search_type = "modification_date"
else:
search_type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (search_type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (search_type, search_type),
(datetext1, datetext2))
for row in res:
hitset += row[0]
return hitset
def search_unit_by_times_cited(p, exclude_selfcites=False):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
#this is sort of stupid but since we may need to
#get the records that do _not_ have cites, we have to
#know the ids of all records, too
#but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = []
if p == 0 or p == "0" or \
p.startswith("0->") or p.endswith("->0"):
allrecs = intbitset(run_sql("SELECT id FROM bibrec"))
return get_records_with_num_cites(numstr, allrecs,
exclude_selfcites=exclude_selfcites)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
return get_refersto_hitset(ahitset)
else:
return intbitset([])
def search_unit_refersto_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citers = intbitset()
citations = get_cited_by_list(ahitset)
selfcitations = get_self_cited_by_list(ahitset)
for cites, selfcites in zip(citations, selfcitations):
# cites is in the form [(citee, citers), ...]
citers += cites[1] - selfcites[1]
return citers
else:
return intbitset([])
def search_unit_in_record_history(query):
"""
Return hitset of recIDs that were modified by the given cataloguer
"""
if query:
try:
cataloguer_name, modification_date = query.split(":")
except ValueError:
cataloguer_name = query
modification_date = ""
if modification_date:
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
modification_date = spires_syntax_converter.convert_date(modification_date)
parts = modification_date.split('->', 1)
if len(parts) > 1:
start_date, end_date = parts
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date>=%s AND job_date<=%s",
(cataloguer_name, start_date, end_date))
else:
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date LIKE %s",
(cataloguer_name, modification_date + '%',))
return intbitset(res)
else:
sql = "SELECT id_bibrec FROM hstRECORD WHERE job_person=%s"
res = intbitset(run_sql(sql, (cataloguer_name,)))
return res
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_citedby_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def search_unit_collection(query, m, wl=None):
"""
Search for records satisfying the query (e.g. collection:"BOOK" or
collection:"Books") and return list of records in the collection.
"""
if len(query):
ahitset = get_collection_reclist(query)
if not ahitset:
return search_unit_in_bibwords(query, 'collection', m, wl=wl)
return ahitset
else:
return intbitset([])
def search_unit_citedby_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citees = intbitset()
references = get_refers_to_list(ahitset)
selfreferences = get_self_refers_to_list(ahitset)
for refs, selfrefs in zip(references, selfreferences):
# refs is in the form [(citer, citees), ...]
citees += refs[1] - selfrefs[1]
return citees
else:
return intbitset([])
def get_records_that_can_be_displayed(user_info,
hitset_in_any_collection,
current_coll=CFG_SITE_NAME,
colls=None,
permitted_restricted_collections=None):
"""
Return records that can be displayed.
"""
records_that_can_be_displayed = intbitset()
if colls is None:
colls = [current_coll]
# let's get the restricted collections the user has rights to view
if permitted_restricted_collections is None:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
if policy == 'ANY':# the user needs to have access to at least one collection that restricts the records
#we need this to be able to remove records that are both in a public and restricted collection
permitted_recids = intbitset()
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection in permitted_restricted_collections:
permitted_recids |= get_collection_reclist(collection)
else:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - (notpermitted_recids - permitted_recids)
else:# the user needs to have access to all collections that restrict a records
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection not in permitted_restricted_collections:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - notpermitted_recids
if records_that_can_be_displayed.is_infinite():
# We should not return infinite results for user.
records_that_can_be_displayed = intbitset()
for coll in colls_to_be_displayed:
records_that_can_be_displayed |= get_collection_reclist(coll)
return records_that_can_be_displayed
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {} # all final results
results_nbhits = 0
# calculate the list of recids (restricted or not) that the user has rights to access and we should display (only those)
if not req or isinstance(req, cStringIO.OutputType): # called from CLI
user_info = {}
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
records_that_can_be_displayed = hitset_in_any_collection
permitted_restricted_collections = []
else:
user_info = collect_user_info(req)
# let's get the restricted collections the user has rights to view
if user_info['guest'] == '1':
## For guest users that are actually authorized to some restricted
## collection (by virtue of the IP address in a FireRole rule)
## we explicitly build the list of permitted_restricted_collections
permitted_restricted_collections = get_permitted_restricted_collections(user_info)
else:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
# let's build the list of the both public and restricted
# child collections of the collection from which the user
# started his/her search. This list of children colls will be
# used in the warning proposing a search in that collections
try:
current_coll = req.argd['cc'] # current_coll: coll from which user started his/her search
except:
from flask import request
current_coll = request.args.get('cc', CFG_SITE_NAME) # current_coll: coll from which user started his/her search
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
records_that_can_be_displayed = get_records_that_can_be_displayed(
user_info,
hitset_in_any_collection,
current_coll,
colls,
permitted_restricted_collections)
for coll in colls_to_be_displayed:
results[coll] = results.get(coll, intbitset()) | (records_that_can_be_displayed & get_collection_reclist(coll))
results_nbhits += len(results[coll])
if results_nbhits == 0:
# no hits found, try to search in Home and restricted and/or hidden collections:
results = {}
results_in_Home = records_that_can_be_displayed & get_collection_reclist(CFG_SITE_NAME)
results_in_restricted_collections = intbitset()
results_in_hidden_collections = intbitset()
for coll in permitted_restricted_collections:
if not get_coll_ancestors(coll): # hidden collection
results_in_hidden_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
else:
results_in_restricted_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
# in this way, we do not count twice, records that are both in Home collection and in a restricted collection
total_results = len(results_in_Home.union(results_in_restricted_collections))
if total_results > 0:
# some hits found in Home and/or restricted collections, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
len_colls_to_display = len(colls_to_be_displayed)
# trim the list of collections to first two, since it might get very large
write_warning(_("No match found in collection %(x_collection)s. Other collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %
{'x_collection': '<em>' +
string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed[:2]], ', ') +
(len_colls_to_display > 2 and ' et al' or '') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': total_results,
'x_url_close': '</a>'}, req=req)
# display the hole list of collections in a comment
if len_colls_to_display > 2:
write_warning("<!--No match found in collection <em>%(x_collection)s</em>.-->" %
{'x_collection': string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed], ', ')},
req=req)
else:
# no hits found, either user is looking for a document and he/she has not rights
# or user is looking for a hidden document:
if of.startswith("h") and display_nearest_terms_box:
if len(results_in_hidden_collections) > 0:
write_warning(_("No public collection matched your query. "
"If you were looking for a hidden document, please type "
"the correct URL for this record."), req=req)
else:
write_warning(_("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."), req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits, req=req)
write_warning("Search stage 4: execution took %.2f seconds." % (t2 - t1), req=req)
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
final_results = {}
for coll in results.keys():
final_results[coll] = results[coll].intersection(hitset)
nb_total += len(final_results[coll])
if nb_total == 0:
if of.startswith("h"):
write_warning(aptext, req=req)
final_results = results_ap
return final_results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:] == '%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto' or f == 'referstoexcludingselfcites':
return _("There are no records referring to %(x_rec)s.", x_rec=cgi.escape(p))
if f == 'cataloguer':
return _("There are no records modified by %(x_rec)s.", x_rec=cgi.escape(p))
if f == 'citedby' or f == 'citedbyexcludingselfcites':
return _("There are no records cited by %(x_rec)s.", x_rec=cgi.escape(p))
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %(x_name)s.",
x_name=('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>'))
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f == 'datecreated' or f == 'datemodified':
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %(x_name)s.",
x_name=('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>'))
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f == 'datecreated' or f == 'datemodified':
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for px, dummy_fx in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %(x_name)s did not match any record. Nearest terms in any collection are:",
x_name=("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>"))
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = [x[0] for x in res_above]
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = [x[0] for x in res_below]
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_bibwords(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:] == '%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except ValueError:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f, in_hitset=None):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_bibwords(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
if in_hitset is None:
nbhits = len(recIDs)
else:
nbhits = len(intbitset(recIDs.keys()).intersection(in_hitset))
return nbhits
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
recID = int(recID)
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': ['Current Price Enquiries', 'Archived Price Enquiries'],
'IT': ['Current Invitation for Tenders', 'Archived Invitation for Tenders'],
'MS': ['Current Market Surveys', 'Archived Market Surveys']}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
for coll_name in FP_collections[coll]:
if recID in get_collection_reclist(coll_name):
return coll_name
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]), "\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
return bibrecord.record_empty(get_record(recID))
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False, em=""):
"""Prints results overview box with links to particular collections below."""
if em != "" and EM_REPOSITORY["overview"] not in em:
return ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, em = ""):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit,
display_body = em == "" or EM_REPOSITORY["body"] in em,
display_add_to_basket = em == "" or EM_REPOSITORY["basket"] in em)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except IndexError:
data_dict_ordered = {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except IndexError:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
res = run_sql("""SELECT m.name, m.definition
FROM bsrMETHOD m, bsrMETHODDATA md
WHERE m.id = md.id_bsrMETHOD""")
return dict(res)
SORTING_METHODS = get_sorting_methods()
CACHE_SORTED_DATA = {}
for sorting_method in SORTING_METHODS:
try:
CACHE_SORTED_DATA[sorting_method].is_ok_p
except KeyError:
CACHE_SORTED_DATA[sorting_method] = BibSortDataCacher(sorting_method)
def get_tags_from_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and (len(sort_field) > 1 and str(sort_field[0:2]).isdigit()):
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, field='', sorting_methods=SORTING_METHODS):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
# Special case: sorting by citations is fast because we store the
# ranking dictionary in memory, so we do not use bibsort buckets.
if CFG_BIBSORT_ENABLED and sorting_methods and rank_method_code != 'citation':
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:', '').strip().lower() == rank_method_code.lower():
solution_recs, solution_scores = \
sort_records_bibsort(req, hitset_global, sort_method,
'', sort_order, verbose, of, ln,
rg, jrec, 'r')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' % [[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return solution_recs, solution_scores, '(', ')', comment
if rank_method_code.lower() == 'citation':
related_to = []
else:
related_to = pattern
solution_recs, solution_scores, prefix, suffix, comment = \
rank_records_bibrank(rank_method_code=rank_method_code,
rank_limit_relevance=rank_limit_relevance,
hitset=hitset_global,
verbose=verbose,
field=field,
related_to=related_to,
rg=rg,
jrec=jrec)
# Solution recs can be None, in case of error or other cases
# which should be all be changed to return an empty list.
if solution_recs and sort_order == 'd':
solution_recs.reverse()
solution_scores.reverse()
return solution_recs, solution_scores, prefix, suffix, comment
def sort_records_latest(recIDs, jrec, rg, sort_order):
if sort_order == 'd':
recIDs.reverse()
return slice_records(recIDs, jrec, rg)
def sort_or_rank_records(req, recIDs, rm, sf, so, sp, p, verbose=0, of='hb',
ln=CFG_SITE_LANG, rg=None, jrec=None, field='',
sorting_methods=SORTING_METHODS):
"""Sort or rank records.
Entry point for deciding to either sort or rank records."""
if rm:
ranking_result = rank_records(req, rm, 0, recIDs, p, verbose, so,
of, ln, rg, jrec, field,
sorting_methods=sorting_methods)
if ranking_result[0]:
return ranking_result[0] # ranked recids
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS):
return sort_records(req, recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
return recIDs.tolist()
def sort_records(req, recIDs, sort_field='', sort_order='a', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sorting_methods=SORTING_METHODS):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
# bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order,
sort_pattern, verbose, of, ln, rg, jrec)
# ignore the use of buckets, use old fashion sorting
use_sorting_buckets = CFG_BIBSORT_ENABLED and sorting_methods
# Default sorting
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, CFG_BIBSORT_DEFAULT_FIELD, sort_field, CFG_BIBSORT_DEFAULT_FIELD_ORDER, verbose, of, ln, rg, jrec)
else:
return sort_records_latest(recIDs, jrec, rg, sort_order)
sort_fields = sort_field.split(",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and
definition.replace('FIELD:', '').strip().lower() == sort_fields[0].lower()) or
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, CFG_BIBSORT_DEFAULT_FIELD, sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
write_warning(_("Sorry, %(x_option)s does not seem to be a valid sort option. The records will not be sorted.", x_option=cgi.escape(error_field)), "Error", req=req)
return slice_records(recIDs, jrec, rg)
elif tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:', '').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
else:
return slice_records(recIDs, jrec, rg)
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=1, sort_or_rank='s', sorting_methods=SORTING_METHODS):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
if not jrec:
jrec = 1
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting (using BibSort cache) by method %s (definition %s)."
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))), req=req)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset()
input_recids = intbitset(recIDs)
CACHE_SORTED_DATA[sort_method].recreate_cache_if_needed()
sort_cache = CACHE_SORTED_DATA[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
write_warning("Not all buckets have been constructed.. switching to old fashion sorting.", req=req)
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field,
sort_order, '', verbose, of, ln, rg,
jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(
input_recids & sort_cache['bucket_data'][bucket_no]
)
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = intbitset()
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
# recid is in buckets, but not in the bsrMETHODDATA,
# maybe because the value has been deleted, but the change has not
# yet been propagated to the buckets
missing_records.add(recid)
# check if there are recids that are not in any bucket -> to be added at
# the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records += input_recids - solution
reverse = sort_order == 'd'
if sort_method.strip().lower() == CFG_BIBSORT_DEFAULT_FIELD and reverse:
# If we want to sort the records on their insertion date, add the
# missing records at the top.
solution = sorted(missing_records, reverse=True) + \
sorted(dict_solution, key=dict_solution.__getitem__, reverse=True)
else:
solution = sorted(dict_solution, key=dict_solution.__getitem__,
reverse=reverse) + sorted(missing_records)
# Only keep records, we are going to display
solution = slice_records(solution, jrec, rg)
if sort_or_rank == 'r':
# We need the recids, with their ranking score
return solution, [dict_solution.get(record, 0) for record in solution]
else:
return solution
def slice_records(recIDs, jrec, rg):
if not jrec:
jrec = 1
if rg:
recIDs = recIDs[jrec-1:jrec-1+rg]
else:
recIDs = recIDs[jrec-1:]
return recIDs
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
## check arguments:
if not sort_field:
return slice_records(recIDs, jrec, rg)
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
write_warning(_("Sorry, sorting is allowed on sets of up to %(x_name)d records only. Using default sort order.", x_name=CFG_WEBSEARCH_NB_RECORDS_TO_SORT), "Warning", req=req)
return slice_records(recIDs, jrec, rg)
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = sort_field.split(',')
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
write_warning(_("Sorry, %(x_name)s does not seem to be a valid sort option. The records will not be sorted.", x_name=cgi.escape(error_field)), "Error", req=req)
return slice_records(recIDs, jrec, rg)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting by tags %s." % cgi.escape(repr(tags)), req=req)
if sort_pattern:
write_warning("Sorting preferentially by %s." % cgi.escape(sort_pattern), req=req)
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-", 1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + ''.join(vals)
else:
# no sort pattern defined, so join them all together
val = ''.join(vals)
val = strip_accents(val.lower()) # sort values regardless of accents and case
if val in recIDs_dict:
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# create output array:
for k in sorted(recIDs_dict.keys()):
recIDs_out.extend(recIDs_dict[k])
# ascending or descending?
if sort_order == 'd':
recIDs_out.reverse()
recIDs = recIDs_out
# return only up to the maximum that we need
return slice_records(recIDs, jrec, rg)
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, format='hb', ot='', ln=CFG_SITE_LANG,
relevances=[], relevances_prologue="(", relevances_epilogue="%%)",
decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True,
print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='',
rm='', em='', nb_found=-1):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if em != "" and EM_REPOSITORY["body"] not in em:
return
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if nb_found == -1:
nb_found = len(recIDs)
if nb_found:
if not rg or rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if len(recIDs) > rg and rg != -9999:
recIDs = slice_records(recIDs, jrec, rg)
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
if ot:
# asked to print some filtered fields only, so call print_record() on the fly:
for recid in recIDs:
x = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(x)
if x:
req.write('\n')
else:
format_records(recIDs,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for recid in recIDs:
x = print_record(recid, format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format.startswith('recjson'):
# we are doing recjson output:
req.write('[')
for idx, recid in enumerate(recIDs):
if idx > 0:
req.write(',')
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
req.write(']')
elif format == 'excel':
create_excel(recIDs=recIDs, req=req, ot=ot, user_info=user_info)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for recid in recIDs:
req.write(print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
if em != "" and EM_REPOSITORY["basket"] not in em:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(ln=ln))
for irec, recid in enumerate(recIDs):
row_number = jrec+irec
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln=ln,
recid=recid,
row_number=row_number,
relevance=relevance,
record=record,
relevances_prologue=relevances_prologue,
relevances_epilogue=relevances_epilogue,
display_add_to_basket=display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln=ln,
display_add_to_basket=display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
referer = user_info.get('referer', '')
for recid in recIDs:
if record_exists(recid) == -1:
write_warning(_("The record has been deleted."), req=req)
merged_recid = get_merged_recid(recid)
if merged_recid:
write_warning(_("The record %(x_rec)d replaces it.", x_rec=merged_recid), req=req)
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_collection_of_a_record(recid, referer, False)),
recid, ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in iteritems(unordered_tabs)]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'],
'%s/%s/%s/%s%s' % (CFG_BASE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln),
tab_id == tab,
unordered_tabs[tab_id]['enabled'])
for (tab_id, dummy_order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] is True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recid, "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recid, ln)
r = calculate_reading_similarity_list(recid, "pageviews")
viewsimilarity = None
if r:
viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recid,
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'citations':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
selfcited = rank_by_citations(get_self_cited_by(recid), verbose=verbose)
selfcited = reversed(selfcited[0])
selfcited = [recid for recid, dummy in selfcited]
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
write_warning("Citation graph debug: " +
str(len(citationhistory)), req=req)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(ln, citationhistory))
# Citation log
entries = get_citers_log(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_log(ln, entries))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recid, 'HDREF', ln=ln, user_info=user_info, verbose=verbose, force_2nd_pass=True))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'keywords':
from invenio.legacy.bibclassify.webinterface import main_page
main_page(req, recid, tabs, ln,
webstyle_templates)
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recid,
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'hepdata':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
include_jquery=True,
include_mathjax=True))
from invenio.utils import hepdata as hepdatautils
from invenio.utils.hepdata import display as hepdatadisplayutils
data = hepdatautils.retrieve_data_for_record(recid)
if data:
content = websearch_templates.tmpl_record_hepdata(data, recid, True)
else:
content = websearch_templates.tmpl_record_no_hepdata()
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(
recid,
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
if record_exists(recid) == 1:
creationdate = get_creation_date(recid)
modificationdate = get_modification_date(recid)
content = print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID=recid,
ln=ln,
format=format,
creationdate=creationdate,
modificationdate=modificationdate,
content=content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recid,
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.modules.comments.api import get_mini_reviews
reviews = get_mini_reviews(recid=recid, ln=ln)
else:
reviews = ''
actions = format_record(recid, 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recid, 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recid,
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for recid in recIDs:
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
write_warning(_("Use different search terms."), req=req)
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe8x'):
prologue = websearch_templates.tmpl_xml_endnote_8x_prologue()
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe8x'):
epilogue = websearch_templates.tmpl_xml_endnote_8x_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
val = value[0][0]
except IndexError:
### In case it does not exist, let's build it!
pass
else:
return deserialize_via_marshal(val)
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d',
sp='', rm='', brief_links=True):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = False
if user_info:
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if format == 'recjson':
import json
from invenio.modules.records.api import get_record as get_recjson
ot = ot if ot and len(ot) else None
return json.dumps(get_recjson(recID).dumps(
keywords=ot, filter_hidden=not can_see_hidden))
_ = gettext_set_language(ln)
# The 'attribute this paper' link is shown only if the session states it should and
# the record is included in the collections to which bibauthorid is limited.
if user_info:
display_claim_this_paper = (user_info.get("precached_viewclaimlink", False) and
recID in intbitset.union(*[get_collection_reclist(x)
for x in BIBAUTHORID_LIMIT_TO_COLLECTIONS]))
else:
display_claim_this_paper = False
can_edit_record = False
if check_user_can_edit_record(user_info, recID):
can_edit_record = True
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (format.lower().startswith('t')
or format.lower().startswith('hm')
or str(format[0:3]).isdigit()
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %(x_rec)d replaces it.", x_rec=merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if brief_links and format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
return out
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1 and not ot:
# record 'recID' is formatted in 'format', and we are not
# asking for field-filtered output; so print it:
out += "%s" % decompress(res[0][0])
elif ot:
# field-filtered output was asked for; print only some fields
record = get_record(recID)
if not can_see_hidden:
for tag in cfg['CFG_BIBFORMAT_HIDDEN_TAGS']:
del record[tag]
ot = list(set(ot) - set(cfg['CFG_BIBFORMAT_HIDDEN_TAGS']))
out += record_xml_output(record, ot)
else:
# record 'recID' is not formatted in 'format' or we ask
# for field-filtered output -- they are not in "bibfmt"
# table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s">%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in cfg['CFG_BIBFORMAT_HIDDEN_TAGS']:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.modules.formatter.utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, str(search_pattern), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri'].lower():
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = ''
try:
snippets = get_pdf_snippets(recID, keywords, user_info)
except:
register_exception()
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except IndexError:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
def clean_dictionary(dictionary, list_of_items):
"""Returns a copy of the dictionary with all the items
in the list_of_items as empty strings"""
out_dictionary = dictionary.copy()
out_dictionary.update((item, '') for item in list_of_items)
return out_dictionary
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=None, sf="", so="a", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="",
wl=0, em=""):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed. (Note that `rg' is ignored in case of `of=id'.)
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found, "intbitset" means to return an intbitset
representation of the recIDs found (no sorting or ranking
will be performed). (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
em - output only part of the page.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results. (Note that `jrec' is ignored
in case of `of=id'.)
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
kwargs = prs_wash_arguments(req=req, cc=cc, c=c, p=p, f=f, rg=rg, sf=sf, so=so, sp=sp, rm=rm, of=of, ot=ot, aas=aas,
p1=p1, f1=f1, m1=m1, op1=op1, p2=p2, f2=f2, m2=m2, op2=op2, p3=p3, f3=f3, m3=m3, sc=sc, jrec=jrec,
recid=recid, recidb=recidb, sysno=sysno, id=id, idb=idb, sysnb=sysnb, action=action, d1=d1,
d1y=d1y, d1m=d1m, d1d=d1d, d2=d2, d2y=d2y, d2m=d2m, d2d=d2d, dt=dt, verbose=verbose, ap=ap, ln=ln, ec=ec,
tab=tab, wl=wl, em=em)
return prs_perform_search(kwargs=kwargs, **kwargs)
def prs_perform_search(kwargs=None, **dummy):
"""Internal call which does the search, it is calling standard Invenio;
Unless you know what you are doing, don't use this call as an API
"""
# separately because we can call it independently
out = prs_wash_arguments_colls(kwargs=kwargs, **kwargs)
if not out:
return out
return prs_search(kwargs=kwargs, **kwargs)
def prs_wash_arguments_colls(kwargs=None, of=None, req=None, cc=None, c=None, sc=None, verbose=None,
aas=None, ln=None, em="", **dummy):
"""
Check and wash collection list argument before we start searching.
If there are troubles, e.g. a collection is not defined, print
warning to the browser.
@return: True if collection list is OK, and various False values
(empty string, empty list) if there was an error.
"""
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
kwargs['colls_to_display'] = colls_to_display
kwargs['colls_to_search'] = colls_to_search
kwargs['hosted_colls'] = hosted_colls
kwargs['wash_colls_debug'] = wash_colls_debug
except InvenioWebSearchUnknownCollectionError as exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
page_end(req, of, ln, em)
return ''
elif of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
page_end(req, of, ln, em)
return ''
else:
page_end(req, of, ln, em)
return ''
return True
def prs_wash_arguments(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="",
sc=0, jrec=0, recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG,
ec=None, tab="", uid=None, wl=0, em="", **dummy):
"""
Sets the (default) values and checks others for the PRS call
"""
# wash output format:
of = wash_output_format(of)
# wash all arguments requiring special care
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
(d1y, d1m, d1d, d2y, d2m, d2d) = map(int, (d1y, d1m, d1d, d2y, d2m, d2d))
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, (cStringIO.OutputType, dict)) \
and getattr(req, 'args', None): # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldcode in fieldargs:
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
if uid is None:
try:
uid = getUid(req)
except:
uid = 0
_ = gettext_set_language(ln)
if aas == 2: #add-to-search interface
p = create_add_to_search_pattern(p, p1, f1, m1, op1)
default_addtosearch_args = websearch_templates.restore_search_args_to_default(['p1', 'f1', 'm1', 'op1'])
if req:
req.argd.update(default_addtosearch_args)
req.argd['p'] = p
kwargs = {'req': req, 'cc': cc, 'c': c, 'p': p, 'f': f, 'rg': rg, 'sf': sf,
'so': so, 'sp': sp, 'rm': rm, 'of': of, 'ot': ot, 'aas': aas,
'p1': p1, 'f1': f1, 'm1': m1, 'op1': op1, 'p2': p2, 'f2': f2,
'm2': m2, 'op2': op2, 'p3': p3, 'f3': f3, 'm3': m3, 'sc': sc,
'jrec': jrec, 'recid': recid, 'recidb': recidb, 'sysno': sysno,
'id': id, 'idb': idb, 'sysnb': sysnb, 'action': action, 'd1': d1,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d, 'd2': d2, 'd2y': d2y,
'd2m': d2m, 'd2d': d2d, 'dt': dt, 'verbose': verbose, 'ap': ap,
'ln': ln, 'ec': ec, 'tab': tab, 'wl': wl, 'em': em,
'datetext1': datetext1, 'datetext2': datetext2, 'uid': uid,
'pl': pl, 'pl_in_url': pl_in_url, '_': _,
'selected_external_collections_infos': None,
}
kwargs.update(**dummy)
return kwargs
def prs_search(kwargs=None, recid=0, req=None, cc=None, p=None, p1=None, p2=None, p3=None,
f=None, ec=None, verbose=None, ln=None, selected_external_collections_infos=None,
action=None, rm=None, of=None, em=None,
**dummy):
"""
This function write various bits into the req object as the search
proceeds (so that pieces of a page are rendered even before the
search ended)
"""
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
output = prs_detailed_record(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif action == "browse":
## 2 - browse needed
of = 'hb'
output = prs_browse(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
output = prs_search_similar_records(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
output = prs_search_cocitedwith(kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3 - common search needed
output = prs_search_common(kwargs=kwargs, **kwargs)
if output is not None:
return output
# External searches
if of.startswith("h"):
if not of in ['hcs', 'hcs2']:
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_detailed_record(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, recid=None, recidb=None,
p=None, verbose=None, tab=None, sf=None, so=None, sp=None, rm=None, ot=None, _=None, em=None,
**dummy):
"""Formats and prints one record"""
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab, em)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of in ["id", "intbitset"]:
result = [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
if of == "intbitset":
return intbitset(result)
else:
return result
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln,
search_pattern=p, verbose=verbose, tab=tab, sf=sf,
so=so, sp=sp, rm=rm, em=em, nb_found=len(range(recid, recidb)))
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
def prs_browse(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
colls_to_search=None, verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
write_warning(create_exact_author_browse_help_link(p, p1, p2, p3, f, f1, f2, f3,
rm, cc, ln, jrec, rg, aas, action),
req=req)
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_search_similar_records(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, em=None,
verbose=None, **dummy):
if req and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recid = p[6:]
if record_exists(recid) != 1:
# record does not exist
if of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
if of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
(results_similar_recIDs,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
results_similar_comments) = \
rank_records_bibrank(rank_method_code=rm,
rank_limit_relevance=0,
hitset=get_collection_reclist(cc),
related_to=[p],
verbose=verbose,
field=f,
rg=rg,
jrec=jrec)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
write_warning(results_similar_comments, req=req)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
elif of == "id":
return results_similar_recIDs
elif of == "intbitset":
return intbitset(results_similar_recIDs)
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning(results_similar_relevances_prologue, req=req)
write_warning(results_similar_relevances_epilogue, req=req)
write_warning(results_similar_comments, req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_cocitedwith(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = [x[0] for x in calculate_co_cited_with_list(int(recID))]
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
elif of == "id":
return results_cocited_recIDs
elif of == "intbitset":
return intbitset(results_cocited_recIDs)
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning("nothing found", req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_hosted_collections(kwargs=None, req=None, of=None, ln=None, _=None, p=None,
p1=None, p2=None, p3=None, hosted_colls=None, f=None,
colls_to_search=None, hosted_colls_actual_or_potential_results_p=None,
verbose=None, **dummy):
hosted_colls_results = hosted_colls_timeouts = hosted_colls_true_results = None
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] is None or result[1] is False:
# these are the searches the returned no or zero results
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned no results" % result[0][1].name, req=req)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]), req=req)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time", req=req)
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
write_warning("Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name, req=req)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections to be searched", req=req)
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
kwargs['hosted_colls_actual_or_potential_results_p'] = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
kwargs['hosted_colls_potential_results_p'] = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
kwargs['only_hosted_colls_actual_or_potential_results_p'] = not colls_to_search and hosted_colls_actual_or_potential_results_p
kwargs['hosted_colls_results'] = hosted_colls_results
kwargs['hosted_colls_timeouts'] = hosted_colls_timeouts
kwargs['hosted_colls_true_results'] = hosted_colls_true_results
def prs_advanced_search(results_in_any_collection, kwargs=None, req=None, of=None,
cc=None, ln=None, _=None, p=None, p1=None, p2=None, p3=None,
f=None, f1=None, m1=None, op1=None, f2=None, m2=None,
op2=None, f3=None, m3=None, ap=None, ec=None,
selected_external_collections_infos=None, verbose=None,
wl=None, em=None, **dummy):
len_results_p1 = 0
len_results_p2 = 0
len_results_p3 = 0
try:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl))
len_results_p1 = len(results_in_any_collection)
if len_results_p1 == 0:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec,
verbose, ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p2 = len(results_tmp)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op1), "Error", req=req)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
if len_results_p2:
#each individual query returned results, but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p3 = len(results_tmp)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op2), "Error", req=req)
if len(results_in_any_collection) == 0 and len_results_p3 and of.startswith("h"):
#each individual query returned results but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
if p2:
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
nearestterms.append((p3, len_results_p3, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p2', 'f2', 'm2'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_simple_search(results_in_any_collection, kwargs=None, req=None, of=None, cc=None, ln=None, p=None, f=None,
p1=None, p2=None, p3=None, ec=None, verbose=None, selected_external_collections_infos=None,
only_hosted_colls_actual_or_potential_results_p=None, query_representation_in_cache=None,
ap=None, hosted_colls_actual_or_potential_results_p=None, wl=None, em=None,
**dummy):
try:
results_in_cache = intbitset().fastload(
search_results_cache.get(query_representation_in_cache))
except:
results_in_cache = None
if results_in_cache is not None:
# query is not in the cache already, so reuse it:
results_in_any_collection.union_update(results_in_cache)
if verbose and of.startswith("h"):
write_warning("Search stage 0: query found in cache, reusing cached results.", req=req)
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln,
display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p,
wl=wl))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_intersect_results_with_collrecs(results_final, results_in_any_collection,
kwargs=None, colls_to_search=None,
req=None, of=None, ln=None,
cc=None, p=None, p1=None, p2=None, p3=None, f=None,
ec=None, verbose=None, selected_external_collections_infos=None,
em=None, **dummy):
display_nearest_terms_box=not kwargs['hosted_colls_actual_or_potential_results_p']
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final.update(intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, of,
verbose, ln, display_nearest_terms_box=display_nearest_terms_box))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, req=None, verbose=None, of=None, **dummy):
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE > 0:
search_results_cache.set(query_representation_in_cache,
results_in_any_collection.fastdump(),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
search_results_cache.set(query_representation_in_cache + '::cc',
dummy.get('cc', CFG_SITE_NAME),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if req:
from flask import request
req = request
search_results_cache.set(query_representation_in_cache + '::p',
req.values.get('p', ''),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if verbose and of.startswith("h"):
write_warning(req, "Search stage 3: storing query results in cache.", req=req)
def prs_apply_search_limits(results_final, kwargs=None, req=None, of=None, cc=None, ln=None, _=None,
p=None, p1=None, p2=None, p3=None, f=None, pl=None, ap=None, dt=None,
ec=None, selected_external_collections_infos=None,
hosted_colls_actual_or_potential_results_p=None,
datetext1=None, datetext2=None, verbose=None, wl=None, em=None,
**dummy):
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying search pattern limit %s..." % cgi.escape(pl), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_split_into_collections(kwargs=None, results_final=None, colls_to_search=None, hosted_colls_results=None,
cpu_time=0, results_final_nb_total=None, hosted_colls_actual_or_potential_results_p=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, **dummy):
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if results_final.keys() == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
kwargs['results_final_nb'] = results_final_nb
kwargs['results_final_nb_total'] = results_final_nb_total
kwargs['results_final_for_all_selected_colls'] = results_final_for_all_selected_colls
kwargs['cpu_time'] = cpu_time #rca TODO: check where the cpu_time is used, this line was missing
return (results_final_nb, results_final_nb_total, results_final_for_all_selected_colls)
def prs_summarize_records(kwargs=None, req=None, p=None, f=None, aas=None,
p1=None, p2=None, p3=None, f1=None, f2=None, f3=None, op1=None, op2=None,
ln=None, results_final_for_all_selected_colls=None, of='hcs', **dummy):
# feed the current search to be summarized:
from invenio.legacy.search_engine.summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, of, ln, search_p, search_f, req)
def prs_print_records(kwargs=None, results_final=None, req=None, of=None, cc=None, pl_in_url=None,
ln=None, _=None, p=None, p1=None, p2=None, p3=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, d1y=None, d1m=None,
d1d=None, d2y=None, d2m=None, d2d=None, dt=None, jrec=None, colls_to_search=None,
hosted_colls_actual_or_potential_results_p=None, hosted_colls_results=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, results_final_nb=None,
cpu_time=None, verbose=None, em=None, **dummy):
if len(colls_to_search) > 1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if coll in results_final and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
results_final_recIDs = list(results_final[coll])
results_final_nb_found = len(results_final_recIDs)
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec, kwargs['f'])
if of.startswith("h"):
write_warning(results_final_comments, req=req)
if results_final_recIDs_ranked:
results_final_recIDs = results_final_recIDs_ranked
else:
# rank_records failed and returned some error message to display:
write_warning(results_final_relevances_prologue, req=req)
write_warning(results_final_relevances_epilogue, req=req)
else:
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm,
em=em,
nb_found=results_final_nb_found)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1, em=em))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] is None or result[1] is False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
def prs_log_query(kwargs=None, req=None, uid=None, of=None, ln=None, p=None, f=None,
colls_to_search=None, results_final_nb_total=None, em=None, **dummy):
# FIXME move query logging to signal receiver
# log query:
try:
from flask.ext.login import current_user
if req:
from flask import request
req = request
id_query = log_query(req.host,
'&'.join(map(lambda (k,v): k+'='+v, request.values.iteritems(multi=True))),
uid)
#id_query = log_query(req.remote_host, req.args, uid)
#of = request.values.get('of', 'hb')
if of.startswith("h") and id_query and (em == '' or EM_REPOSITORY["alert"] in em):
if not of in ['hcs', 'hcs2']:
# display alert/RSS teaser for non-summary formats:
display_email_alert_part = True
if current_user:
if current_user['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not current_user['precached_usealerts']:
display_email_alert_part = False
from flask import flash
flash(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query, \
ln=ln, display_email_alert_part=display_email_alert_part), 'search-results-after')
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
def prs_search_common(kwargs=None, req=None, of=None, cc=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, colls_to_search=None, wash_colls_debug=None,
verbose=None, wl=None, em=None, **dummy):
query_representation_in_cache = get_search_results_cache_key(**kwargs)
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
if of.startswith("h") and verbose and wash_colls_debug:
write_warning("wash_colls debugging info : %s" % wash_colls_debug, req=req)
prs_search_hosted_collections(kwargs=kwargs, **kwargs)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
# WebSearch services
if jrec <= 1 and \
(em == "" and True or (EM_REPOSITORY["search_services"] in em)):
user_info = collect_user_info(req)
# display only on first search page, and only if wanted
# when 'em' param set.
for answer_relevance, answer_html in services.get_answers(
req, user_info, of, cc, colls_to_search, p, f, ln):
req.write('<div class="searchservicebox">')
req.write(answer_html)
if verbose > 8:
write_warning("Service relevance: %i" % answer_relevance, req=req)
req.write('</div>')
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 2 and not (p2 or p3):
## 3A add-to-search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
elif aas == 1 or (p1 or p2 or p3):
## 3B - advanced search
output = prs_advanced_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3C - simple search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
if len(results_in_any_collection) == 0 and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return None
# store this search query results into search results cache if needed:
prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, **kwargs)
# search stage 4 and 5: intersection with collection universe and sorting/limiting
try:
output = prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except: # no results to display
return None
t2 = os.times()[4]
cpu_time = t2 - t1
kwargs['cpu_time'] = cpu_time
## search stage 6: display results:
return prs_display_results(kwargs=kwargs, **kwargs)
def prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection,
kwargs=None, req=None, of=None,
**dummy):
# search stage 4: intersection with collection universe:
results_final = {}
output = prs_intersect_results_with_collrecs(results_final, results_in_any_collection, kwargs, **kwargs)
if output is not None:
return output
# another external search if we still don't have something
if results_final == {} and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
kwargs['results_final'] = results_final
raise Exception
# search stage 5: apply search option limits and restrictions:
output = prs_apply_search_limits(results_final, kwargs=kwargs, **kwargs)
kwargs['results_final'] = results_final
if output is not None:
return output
def prs_display_results(kwargs=None, results_final=None, req=None, of=None, sf=None,
so=None, sp=None, verbose=None, p=None, p1=None, p2=None, p3=None,
cc=None, ln=None, _=None, ec=None, colls_to_search=None, rm=None, cpu_time=None,
f=None, em=None, jrec=None, rg=None, **dummy
):
## search stage 6: display results:
# split result set into collections
(results_final_nb, results_final_nb_total, results_final_for_all_selected_colls) = prs_split_into_collections(kwargs=kwargs, **kwargs)
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total == 0 and not kwargs['hosted_colls_potential_results_p']:
if of.startswith("h"):
write_warning("No match found, please enter different search terms.", req=req)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
prs_log_query(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "intbitset":
#return the result as an intbitset
return results_final_for_all_selected_colls
elif of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, ln, kwargs['rg'], kwargs['jrec'], kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln)
return slice_records(recIDs, jrec, rg)
elif of.startswith("h"):
if of not in ['hcs', 'hcs2', 'hcv', 'htcv', 'tlcv']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time,
ln, ec, hosted_colls_potential_results_p=kwargs['hosted_colls_potential_results_p'], em=em))
kwargs['selected_external_collections_infos'] = print_external_results_overview(req, cc, [p, p1, p2, p3],
f, ec, verbose, ln, print_overview=em == "" or EM_REPOSITORY["overview"] in em)
# print number of hits found for XML outputs:
if of.startswith("x") or of == 'mobb':
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % kwargs['results_final_nb_total'])
# print records:
if of in ['hcs', 'hcs2']:
prs_summarize_records(kwargs=kwargs, **kwargs)
elif of in ['hcv', 'htcv', 'tlcv'] and CFG_INSPIRE_SITE:
from invenio.legacy.search_engine.cvifier import cvify_records
cvify_records(results_final_for_all_selected_colls, of, req, so)
else:
prs_print_records(kwargs=kwargs, **kwargs)
# this is a copy of the prs_display_results with output parts removed, needed for external modules
def prs_rank_results(kwargs=None, results_final=None, req=None, colls_to_search=None,
sf=None, so=None, sp=None, of=None, rm=None, p=None, p1=None, p2=None, p3=None,
verbose=None, **dummy
):
## search stage 6: display results:
# split result set into collections
dummy_results_final_nb, dummy_results_final_nb_total, results_final_for_all_selected_colls = prs_split_into_collections(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, field=kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of)
return recIDs
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = line.split("#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>"
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" %
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True, split_by=0):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
@return: list of tuples containing tag and its frequency
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
[('PREPRINT', 10), ('THESIS', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
[('Ellis, J', 10), ('Ellis, N', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
[('Ellis, N', 7), ...]
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"""Compare VAL1 and VAL2 according to, firstly, frequency, then
secondly, alphabetically."""
compared_via_frequencies = cmp(valuefreqdict[val2],
valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, string_types):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False,
split_by=split_by))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if val in valuefreqdict:
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
if not CFG_NUMPY_IMPORTABLE:
## original version
out = []
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if val in displaytmp:
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out.append((tmpdisplv, valuefreqdict[val]))
return out
else:
f = [] # frequencies
n = [] # original names
ln = [] # lowercased names
## build lists within one iteration
for (val, freq) in iteritems(valuefreqdict):
f.append(-1 * freq)
if val in displaytmp:
n.append(displaytmp[val])
else:
n.append(val)
ln.append(val.lower())
## sort by frequency (desc) and then by lowercased name.
return [(n[i], -1 * f[i]) for i in numpy.lexsort([ln, f])]
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile as pyprofile
import pstats
pyprofile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
def perform_external_collection_search_with_em(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, em=""):
perform_external_collection_search(req, current_collection, pattern_list, field, external_collection,
verbosity_level, lang, selected_external_collections_infos,
print_overview=em == "" or EM_REPOSITORY["overview"] in em,
print_search_info=em == "" or EM_REPOSITORY["search_info"] in em,
print_see_also_box=em == "" or EM_REPOSITORY["see_also_box"] in em,
print_body=em == "" or EM_REPOSITORY["body"] in em)
@cache.memoize(timeout=5)
def get_fulltext_terms_from_search_pattern(search_pattern):
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, search_pattern.encode('utf-8'), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
return keywords
def check_user_can_edit_record(req, recid):
""" Check if user has authorization to modify a collection
the recid belongs to
"""
record_collections = get_all_collections_of_a_record(recid)
if not record_collections:
# Check if user has access to all collections
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection='')
if auth_code == 0:
return True
else:
for collection in record_collections:
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection=collection)
if auth_code == 0:
return True
return False
``` |
[
{
"content": "Here is the source code:\n```python\n# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nimport inspect\r\nimport types\r\n\r\nfrom django.apps import apps\r\nfrom django.core.checks import Error, Tags, register\r\n\r\n\r\n@register(Tags.models)\r\ndef check_all_models(app_co... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nimport inspect\r\nimport types\r\n\r\nfrom django.apps import apps\r\nfrom django.core.checks import Error, Tags, register\r\n\r\n\r\n@register(Tags.models)\r\ndef check_a... | ```python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import types
from django.apps import apps
from django.core.checks import Error, Tags, register
@register(Tags.models)
def check_all_models(app_configs=None, **kwargs):
errors = []
for model in apps.get_models():
if app_configs is None or model._meta.app_config in app_configs:
if not inspect.ismethod(model.check):
errors.append(
Error(
"The '%s.check()' class method is "
"currently overridden by %r." % (
model.__name__, model.check),
hint=None,
obj=model,
id='models.E020'
)
)
else:
errors.extend(model.check(**kwargs))
return errors
@register(Tags.models, Tags.signals)
def check_model_signals(app_configs=None, **kwargs):
"""
Ensure lazily referenced model signals senders are installed.
"""
# Avoid circular import
from django.db import models
errors = []
for name in dir(models.signals):
obj = getattr(models.signals, name)
if isinstance(obj, models.signals.ModelSignal):
for reference, receivers in obj.unresolved_references.items():
for receiver, _, _ in receivers:
# The receiver is either a function or an instance of class
# defining a `__call__` method.
if isinstance(receiver, types.FunctionType):
description = "The '%s' function" % receiver.__name__
else:
description = "An instance of the '%s' class" % receiver.__class__.__name__
errors.append(
Error(
"%s was connected to the '%s' signal "
"with a lazy reference to the '%s' sender, "
"which has not been installed." % (
description, name, '.'.join(reference)
),
obj=receiver.__module__,
hint=None,
id='signals.E001'
)
)
return errors
``` |
[
{
"content": "Here is the snippet:\n```python\n# Copyright 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# Copyright 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the Licens... | ```python
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.api.compute import base
from tempest import config
from tempest.lib import decorators
from tempest import test
CONF = config.CONF
class VirtualInterfacesTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_credentials(cls):
# This test needs a network and a subnet
cls.set_network_resources(network=True, subnet=True)
super(VirtualInterfacesTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(VirtualInterfacesTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
super(VirtualInterfacesTestJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@decorators.skip_because(bug="1183436",
condition=CONF.service_available.neutron)
@test.idempotent_id('96c4e2ef-5e4d-4d7f-87f5-fed6dca18016')
@test.services('network')
def test_list_virtual_interfaces(self):
# Positive test:Should be able to GET the virtual interfaces list
# for a given server_id
output = self.client.list_virtual_interfaces(self.server_id)
self.assertIsNotNone(output)
virt_ifaces = output
self.assertNotEqual(0, len(virt_ifaces['virtual_interfaces']),
'Expected virtual interfaces, got 0 interfaces.')
for virt_iface in virt_ifaces['virtual_interfaces']:
mac_address = virt_iface['mac_address']
self.assertTrue(netaddr.valid_mac(mac_address),
"Invalid mac address detected. mac address: %s"
% mac_address)
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n\"\"\"\nA Windows and ANSII coloring library.\n\"\"\"\n\nfrom sys import stdout\n\n__ALL__ = ['out', 'outln']\nVERSION = (1, 0, 0)\n\ntry:\n # Colors text in console mode application (win32).\n # Uses ctypes and Win32 methods... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n\"\"\"\nA Windows and ANSII coloring library.\n\"\"\"\n\nfrom sys import stdout\n\n__ALL__ = ['out', 'outln']\nVERSION = (1, 0, 0)\n\ntry:\n # Colors text in console mode application (win32).\n # Uses ctypes a... | ```python
"""
A Windows and ANSII coloring library.
"""
from sys import stdout
__ALL__ = ['out', 'outln']
VERSION = (1, 0, 0)
try:
# Colors text in console mode application (win32).
# Uses ctypes and Win32 methods SetConsoleTextAttribute and
# GetConsoleScreenBufferInfo.
#
# source: https://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
#
# $Id: color_console.py 534 2009-05-10 04:00:59Z andre $
from ctypes import windll, Structure, c_short, c_ushort, byref
SHORT = c_short
WORD = c_ushort
class COORD(Structure):
"""struct in wincon.h."""
_fields_ = [
("X", SHORT),
("Y", SHORT)
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT)
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD)
]
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
stdout_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
def get_text_attr():
"""Returns the character attributes (colors) of the console screen
buffer."""
csbi = CONSOLE_SCREEN_BUFFER_INFO()
GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
return csbi.wAttributes
def set_text_attr(color):
"""Sets the character attributes (colors) of the console screen
buffer. Color is a combination of foreground and background color,
foreground and background intensity."""
SetConsoleTextAttribute(stdout_handle, color)
HIGHLIGHTS = {
'on_grey': BACKGROUND_GREY,
'on_red': BACKGROUND_RED,
'on_green': BACKGROUND_GREEN,
'on_yellow': BACKGROUND_YELLOW,
'on_blue': BACKGROUND_BLUE,
'on_magenta': BACKGROUND_MAGENTA,
'on_cyan': BACKGROUND_CYAN,
'on_black': BACKGROUND_BLACK
}
BOLD = FOREGROUND_INTENSITY
COLORS = {
'grey': FOREGROUND_GREY,
'red': FOREGROUND_RED,
'green': FOREGROUND_GREEN,
'yellow': FOREGROUND_YELLOW,
'blue': FOREGROUND_BLUE,
'magenta': FOREGROUND_MAGENTA,
'cyan': FOREGROUND_CYAN,
'black': FOREGROUND_BLACK,
}
DEFAULT_COLORS = get_text_attr()
DEFAULT_BACKGROUND = DEFAULT_COLORS & 0x00f0
DEFAULT_FOREGROUND = DEFAULT_COLORS & 0x000f
def set_color(val):
assert isinstance(val, int)
set_text_attr(val)
def set_default_colors():
set_color(DEFAULT_COLORS)
def colored(text, color=None, on_color=None, attrs=None):
color_index = 0
if color not in COLORS:
color_index |= DEFAULT_FOREGROUND
else:
color_index |= COLORS[color]
if on_color not in HIGHLIGHTS:
color_index |= DEFAULT_BACKGROUND
else:
color_index |= HIGHLIGHTS[on_color]
if attrs is not None:
for attr in attrs:
if attr == 'bold':
color_index |= BOLD
return str(text), color_index
def out(*text):
if isinstance(text, str):
set_default_colors()
stdout.write(text)
return
for c in text:
if isinstance(c, str):
set_default_colors()
stdout.write(c)
else:
assert isinstance(c, tuple) or isinstance(c, list)
assert len(c) == 2
assert isinstance(c[0], str)
assert isinstance(c[1], int)
set_color(c[1])
stdout.write(c[0])
set_default_colors()
except ImportError:
# from the great "termcolor.py" library.
# Copyright (c) 2008-2011 Volvox Development Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <konstantin.lepa@gmail.com>
import os
ATTRIBUTES = dict(
list(zip([
'bold',
'dark',
'',
'underline',
'blink',
'',
'reverse',
'concealed'
],
list(range(1, 9))
))
)
del ATTRIBUTES['']
HIGHLIGHTS = dict(
list(zip([
'on_black',
'on_red',
'on_green',
'on_yellow',
'on_blue',
'on_magenta',
'on_cyan',
'on_grey'
],
list(range(40, 48))
))
)
COLORS = dict(
list(zip([
'black',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'grey',
],
list(range(30, 38))
))
)
RESET = '\033[0m'
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
:param text: text to format in the requested color
:param color: font color
:param on_color: background color
:param attrs: additional font attributes
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
def out(*text):
if isinstance(text, str):
stdout.write(text)
else:
for c in text:
stdout.write(str(c))
stdout.flush()
def outln(*text):
out(*text)
stdout.write("\n")
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# Copyright 2017 The Armada Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/lice... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# Copyright 2017 The Armada Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www... | ```python
# Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import yaml
import testtools
from armada import const
from armada import exceptions
from armada.handlers import manifest
from armada.utils import validate
class ManifestTestCase(testtools.TestCase):
def setUp(self):
super(ManifestTestCase, self).setUp()
examples_dir = os.path.join(
os.getcwd(), 'armada', 'tests', 'unit', 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read()))
def test_get_documents(self):
armada_manifest = manifest.Manifest(self.documents)
self.assertIsInstance(armada_manifest.charts, list)
self.assertIsInstance(armada_manifest.groups, list)
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(4, len(armada_manifest.charts))
self.assertEqual(2, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(4)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(4, 6)],
armada_manifest.groups)
self.assertEqual(self.documents[-1], armada_manifest.manifest)
def test_get_documents_with_target_manifest(self):
# Validate that specifying `target_manifest` flag returns the correct
# manifest.
armada_manifest = manifest.Manifest(
self.documents, target_manifest='armada-manifest')
self.assertIsInstance(armada_manifest.charts, list)
self.assertIsInstance(armada_manifest.groups, list)
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(4, len(armada_manifest.charts))
self.assertEqual(2, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(4)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(4, 6)],
armada_manifest.groups)
self.assertEqual(self.documents[-1], armada_manifest.manifest)
self.assertEqual('armada-manifest',
self.documents[-1]['metadata']['name'])
def test_get_documents_with_multi_manifest_and_target_manifest(self):
# Validate that specifying `target_manifest` flag returns the correct
# manifest even if there are multiple existing manifests. (Only works
# when the manifest names are distinct or else should raise error.)
documents = copy.deepcopy(self.documents)
other_manifest = copy.deepcopy(self.documents[-1])
other_manifest['metadata']['name'] = 'alt-armada-manifest'
documents.append(other_manifest)
# Specify the "original" manifest and verify it works.
armada_manifest = manifest.Manifest(
documents, target_manifest='armada-manifest')
self.assertIsInstance(armada_manifest.charts, list)
self.assertIsInstance(armada_manifest.groups, list)
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(4, len(armada_manifest.charts))
self.assertEqual(2, len(armada_manifest.groups))
self.assertEqual([self.documents[x] for x in range(4)],
armada_manifest.charts)
self.assertEqual([self.documents[x] for x in range(4, 6)],
armada_manifest.groups)
self.assertEqual(armada_manifest.manifest, self.documents[-1])
self.assertEqual('armada-manifest',
armada_manifest.manifest['metadata']['name'])
# Specify the alternative manifest and verify it works.
armada_manifest = manifest.Manifest(
documents, target_manifest='alt-armada-manifest')
self.assertIsNotNone(armada_manifest.manifest)
self.assertEqual(other_manifest, armada_manifest.manifest)
self.assertEqual('alt-armada-manifest',
armada_manifest.manifest['metadata']['name'])
def test_get_manifest(self):
armada_manifest = manifest.Manifest(self.documents)
obtained_manifest = armada_manifest.get_manifest()
self.assertIsInstance(obtained_manifest, dict)
self.assertEqual(obtained_manifest['armada'],
armada_manifest.manifest['data'])
def test_find_documents(self):
armada_manifest = manifest.Manifest(self.documents)
chart_documents, chart_groups, manifests = armada_manifest. \
_find_documents()
# checking if all the chart documents are present
self.assertIsInstance(chart_documents, list)
helm_toolkit_chart = armada_manifest. \
find_chart_document('helm-toolkit')
self.assertEqual(chart_documents[0], helm_toolkit_chart)
mariadb_chart = armada_manifest.find_chart_document('mariadb')
self.assertEqual(chart_documents[1], mariadb_chart)
memcached_chart = armada_manifest.find_chart_document('memcached')
self.assertEqual(chart_documents[2], memcached_chart)
keystone_chart = armada_manifest.find_chart_document('keystone')
self.assertEqual(chart_documents[3], keystone_chart)
# checking if all the chart group documents are present
self.assertIsInstance(chart_groups, list)
keystone_infra_services_chart_group = armada_manifest. \
find_chart_group_document('keystone-infra-services')
self.assertEqual(chart_groups[0],
keystone_infra_services_chart_group)
openstack_keystone_chart_group = armada_manifest. \
find_chart_group_document('openstack-keystone')
self.assertEqual(chart_groups[1], openstack_keystone_chart_group)
# verifying the manifests
self.assertIsInstance(manifests, list)
self.assertEqual(manifests[0], armada_manifest.manifest)
def test_verify_chart_documents(self):
armada_manifest = manifest.Manifest(self.documents)
helm_toolkit_chart = armada_manifest. \
find_chart_document('helm-toolkit')
self.assertIsInstance(helm_toolkit_chart, dict)
self.assertEqual(self.documents[0], helm_toolkit_chart)
mariadb_chart = armada_manifest.find_chart_document('mariadb')
self.assertIsInstance(mariadb_chart, dict)
self.assertEqual(self.documents[1], mariadb_chart)
memcached_chart = armada_manifest.find_chart_document('memcached')
self.assertIsInstance(memcached_chart, dict)
self.assertEqual(self.documents[2], memcached_chart)
keystone_chart = armada_manifest.find_chart_document('keystone')
self.assertIsInstance(keystone_chart, dict)
self.assertEqual(self.documents[3], keystone_chart)
def test_verify_chart_group_documents(self):
armada_manifest = manifest.Manifest(self.documents)
ok_chart = armada_manifest. \
find_chart_group_document('openstack-keystone')
self.assertIsInstance(ok_chart, dict)
self.assertEqual(self.documents[-2], ok_chart)
armada_manifest = manifest.Manifest(self.documents)
kis_chart = armada_manifest.find_chart_group_document(
'keystone-infra-services')
self.assertIsInstance(kis_chart, dict)
self.assertEqual(self.documents[-3], kis_chart)
def test_verify_build_armada_manifest(self):
armada_manifest = manifest.Manifest(self.documents)
built_armada_manifest = armada_manifest.build_armada_manifest()
self.assertIsNotNone(built_armada_manifest)
self.assertIsInstance(built_armada_manifest, dict)
# the first chart group in the Armada manifest
keystone_infra_services_chart_group = armada_manifest. \
find_chart_group_document('keystone-infra-services')
keystone_infra_services_chart_group_data = \
keystone_infra_services_chart_group.get('data')
self.assertEqual(keystone_infra_services_chart_group_data,
built_armada_manifest['data']['chart_groups'][0])
# the first chart group in the Armada manifest
openstack_keystone_chart_group = armada_manifest. \
find_chart_group_document('openstack-keystone')
openstack_keystone_chart_group_data = \
openstack_keystone_chart_group.get('data')
self.assertEqual(openstack_keystone_chart_group_data,
built_armada_manifest['data']['chart_groups'][1])
def test_verify_build_chart_group_deps(self):
armada_manifest = manifest.Manifest(self.documents)
# building the deps for openstack-keystone chart group
chart_group = armada_manifest.find_chart_group_document(
'openstack-keystone')
openstack_keystone_chart_group_deps = armada_manifest. \
build_chart_group(chart_group)
openstack_keystone_chart_group_deps_dep_added = \
openstack_keystone_chart_group_deps[
'data']['chart_group'][0]['chart']['dependencies']
# keystone chart dependencies
keystone_chart = armada_manifest.find_chart_document('keystone')
keystone_chart_with_deps = armada_manifest.build_chart_deps(
keystone_chart)
keystone_dependencies = keystone_chart_with_deps[
'data']['dependencies']
self.assertEqual(openstack_keystone_chart_group_deps_dep_added[0],
keystone_dependencies[0])
# building the deps for openstack-keystone chart group
chart_group = armada_manifest.find_chart_group_document(
'keystone-infra-services')
openstack_keystone_chart_group_deps = armada_manifest. \
build_chart_group(chart_group)
keystone_infra_services_dep_added = \
openstack_keystone_chart_group_deps[
'data']['chart_group'][0]['chart']['dependencies']
# building mariadb chart dependencies
mariadb_chart = armada_manifest.find_chart_document('mariadb')
mariadb_chart_with_deps = armada_manifest.build_chart_deps(
mariadb_chart)
mariadb_dependencies = mariadb_chart_with_deps[
'data']['dependencies']
# building memcached chart dependencies
memcached_chart = armada_manifest.find_chart_document('memcached')
memcached_chart_with_deps = armada_manifest.build_chart_deps(
memcached_chart)
memcached_dependencies = memcached_chart_with_deps[
'data']['dependencies']
self.assertEqual(keystone_infra_services_dep_added[0],
mariadb_dependencies[0])
self.assertEqual(keystone_infra_services_dep_added[0],
memcached_dependencies[0])
def test_verify_build_chart_deps(self):
armada_manifest = manifest.Manifest(self.documents)
# helm-toolkit chart
helm_toolkit_chart = armada_manifest.find_chart_document(
'helm-toolkit')
helm_toolkit_original_dependency = helm_toolkit_chart.get('data')
helm_toolkit_chart_with_deps = armada_manifest.build_chart_deps(
helm_toolkit_chart).get('data')
# since not dependent on other charts, the original and modified
# dependencies are the same
self.assertEqual(helm_toolkit_original_dependency,
helm_toolkit_chart_with_deps)
# helm-toolkit dependency, the basis for comparison of d
# ependencies in other charts
expected_helm_toolkit_dependency = {'chart': helm_toolkit_chart.get(
'data')}
# keystone chart dependencies
keystone_chart = armada_manifest.find_chart_document('keystone')
original_keystone_chart = copy.deepcopy(keystone_chart)
keystone_chart_with_deps = armada_manifest.build_chart_deps(
keystone_chart)
self.assertNotEqual(original_keystone_chart, keystone_chart_with_deps)
self.assertIn('data', keystone_chart_with_deps)
self.assertIn('dependencies', keystone_chart_with_deps['data'])
keystone_dependencies = keystone_chart_with_deps[
'data']['dependencies']
self.assertIsInstance(keystone_dependencies, list)
self.assertEqual(1, len(keystone_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
keystone_dependencies[0])
# mariadb chart dependencies
mariadb_chart = armada_manifest.find_chart_document('mariadb')
original_mariadb_chart = copy.deepcopy(mariadb_chart)
mariadb_chart_with_deps = armada_manifest.build_chart_deps(
mariadb_chart)
self.assertNotEqual(original_mariadb_chart, mariadb_chart_with_deps)
self.assertIn('data', mariadb_chart_with_deps)
self.assertIn('dependencies', mariadb_chart_with_deps['data'])
mariadb_dependencies = mariadb_chart_with_deps[
'data']['dependencies']
self.assertIsInstance(mariadb_dependencies, list)
self.assertEqual(1, len(mariadb_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
mariadb_dependencies[0])
# memcached chart dependencies
memcached_chart = armada_manifest.find_chart_document('memcached')
original_memcached_chart = copy.deepcopy(memcached_chart)
memcached_chart_with_deps = armada_manifest.build_chart_deps(
memcached_chart)
self.assertNotEqual(original_memcached_chart,
memcached_chart_with_deps)
self.assertIn('data', memcached_chart_with_deps)
self.assertIn('dependencies', memcached_chart_with_deps['data'])
memcached_dependencies = memcached_chart_with_deps[
'data']['dependencies']
self.assertIsInstance(memcached_dependencies, list)
self.assertEqual(1, len(memcached_dependencies))
self.assertEqual(expected_helm_toolkit_dependency,
memcached_dependencies[0])
class ManifestNegativeTestCase(testtools.TestCase):
def setUp(self):
super(ManifestNegativeTestCase, self).setUp()
examples_dir = os.path.join(
os.getcwd(), 'armada', 'tests', 'unit', 'resources')
with open(os.path.join(examples_dir, 'keystone-manifest.yaml')) as f:
self.documents = list(yaml.safe_load_all(f.read()))
def test_get_documents_multi_manifests_raises_value_error(self):
# Validates that finding multiple manifests without `target_manifest`
# flag raises exceptions.ManifestException.
documents = copy.deepcopy(self.documents)
documents.append(documents[-1]) # Copy the last manifest.
error_re = r'Multiple manifests are not supported.*'
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents)
def test_get_documents_multi_target_manifests_raises_value_error(self):
# Validates that finding multiple manifests with `target_manifest`
# flag raises exceptions.ManifestException.
documents = copy.deepcopy(self.documents)
documents.append(documents[-1]) # Copy the last manifest.
error_re = r'Multiple manifests are not supported.*'
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents, target_manifest='armada-manifest')
def test_get_documents_missing_manifest(self):
# Validates exceptions.ManifestException is thrown if no manifest is
# found. Manifest is last document in sample YAML.
error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*')
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
self.documents[:-1])
def test_get_documents_missing_charts(self):
# Validates exceptions.ManifestException is thrown if no chart is
# found. Charts are first 4 documents in sample YAML.
error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*')
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
self.documents[4:])
def test_get_documents_missing_chart_groups(self):
# Validates exceptions.ManifestException is thrown if no chart is
# found. ChartGroups are 5-6 documents in sample YAML.
documents = self.documents[:4] + [self.documents[-1]]
error_re = ('Documents must be a list of documents with at least one '
'of each of the following schemas: .*')
self.assertRaisesRegexp(
exceptions.ManifestException, error_re, manifest.Manifest,
documents)
def test_find_chart_document_negative(self):
armada_manifest = manifest.Manifest(self.documents)
error_re = r'Could not find a %s named "%s"' % (
const.DOCUMENT_CHART, 'invalid')
self.assertRaisesRegexp(exceptions.ManifestException, error_re,
armada_manifest.find_chart_document, 'invalid')
def test_find_group_document_negative(self):
armada_manifest = manifest.Manifest(self.documents)
error_re = r'Could not find a %s named "%s"' % (
const.DOCUMENT_GROUP, 'invalid')
self.assertRaisesRegexp(exceptions.ManifestException, error_re,
armada_manifest.find_chart_group_document,
'invalid')
def test_build_chart_deps_with_missing_dependency_fails(self):
"""Validate that attempting to build a chart that points to
a missing dependency fails.
"""
self.documents[1]['data']['dependencies'] = ['missing-dependency']
valid, details = validate.validate_armada_documents(self.documents)
self.assertFalse(valid)
def test_build_chart_group_with_missing_chart_grp_fails(self):
"""Validate that attempting to build a chart group document with
missing chart group fails.
"""
self.documents[5]['data']['chart_group'] = ['missing-chart-group']
valid, details = validate.validate_armada_documents(self.documents)
self.assertFalse(valid)
def test_build_armada_manifest_with_missing_chart_grps_fails(self):
"""Validate that attempting to build a manifest with missing
chart groups fails.
"""
self.documents[6]['data']['chart_groups'] = ['missing-chart-groups']
valid, details = validate.validate_armada_documents(self.documents)
self.assertFalse(valid)
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom admissions.models import *\n\nclass Command(BaseCommand):\n help = 'Recalculate Jelley scores and ranks'\n\n def add_arguments(self, parser):\n parser.add_argumen... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom admissions.models import *\n\nclass Command(BaseCommand):\n help = 'Recalculate Jelley scores and ranks'\n\n def add_arguments(self, parser):\n pa... | ```python
from django.core.management.base import BaseCommand, CommandError
from admissions.models import *
class Command(BaseCommand):
help = 'Recalculate Jelley scores and ranks'
def add_arguments(self, parser):
parser.add_argument('tag', nargs='?', default='test')
def handle(self, *args, **options):
weights = Weights.objects.last()
all_students = Candidate.objects.all()
for s in all_students:
s.stored_jell_score = s.calc_jell_score(weights)
s.save()
self.stdout.write('Jelley score of {0} is {1}'.format(s.ucas_id, s.stored_jell_score))
ordered = Candidate.objects.order_by('-stored_jell_score').all()
first = True
index = 1
for s in ordered:
if first:
s.stored_rank = index
previous_score = s.stored_jell_score
previous_rank = index
first = False
else:
if s.stored_jell_score == previous_score:
s.stored_rank = previous_rank
else:
s.stored_rank = index
previous_score = s.stored_jell_score
previous_rank = index
s.save()
self.stdout.write('Rank of {0} is {1} ({2})'.format(s.ucas_id, s.stored_rank, index))
index = index + 1
``` |
[
{
"content": "```python\nfrom setuptools import setup\nimport os\nimport sys\n\nfrom distutils.core import Extension\n\nversion = '0.12.2'\n\nextLevensthein = Extension('Levenshtein._levenshtein',\n sources = ['Levenshtein/_levenshtein.c'],\n )\n\nif sys.versi... | [
{
"content": "<|memory_start|>```python\nfrom setuptools import setup\nimport os\nimport sys\n\nfrom distutils.core import Extension\n\nversion = '0.12.2'\n\nextLevensthein = Extension('Levenshtein._levenshtein',\n sources = ['Levenshtein/_levenshtein.c'],\n )... | ```python
from setuptools import setup
import os
import sys
from distutils.core import Extension
version = '0.12.2'
extLevensthein = Extension('Levenshtein._levenshtein',
sources = ['Levenshtein/_levenshtein.c'],
)
if sys.version_info >= (3, 0):
_open = lambda f: open(f, encoding='utf8')
else:
_open = open
setup(name='python-Levenshtein',
version=version,
description="Python extension for computing string edit distances and similarities.",
long_description=_open("README.rst").read() + "\n" +
_open(os.path.join("HISTORY.txt")).read(),
# Get more strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython"
],
keywords='string Levenshtein comparison edit-distance',
author='Antti Haapala',
author_email='antti@haapala.name',
url='http://github.com/ztane/python-Levenshtein',
license='GPL',
packages=['Levenshtein'],
namespace_packages=[],
include_package_data=True,
zip_safe=False,
ext_modules = [extLevensthein],
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
""",
)
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n#!/usr/bin/env python\n#\n# OmsAgentForLinux Extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in com... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# OmsAgentForLinux Extension\n#\n# Copyright 2015 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this fi... | ```python
#!/usr/bin/env python
#
# OmsAgentForLinux Extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import re
import sys
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib
import urllib2
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# Global Variables
PackagesDirectory = 'packages'
BundleFileName = 'omsagent-1.4.4-210.universal.x64.sh'
GUIDRegex = r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
GUIDOnlyRegex = r'^' + GUIDRegex + '$'
SCOMCertIssuerRegex = r'^[\s]*Issuer:[\s]*CN=SCX-Certificate/title=SCX' + GUIDRegex + ', DC=.*$'
SCOMPort = 1270
PostOnboardingSleepSeconds = 5
InitialRetrySleepSeconds = 30
IsUpgrade = False
# Paths
OMSAdminPath = '/opt/microsoft/omsagent/bin/omsadmin.sh'
OMSAgentServiceScript = '/opt/microsoft/omsagent/bin/service_control'
OMIConfigEditorPath = '/opt/omi/bin/omiconfigeditor'
OMIServerConfPath = '/etc/opt/omi/conf/omiserver.conf'
EtcOMSAgentPath = '/etc/opt/microsoft/omsagent/'
VarOMSAgentPath = '/var/opt/microsoft/omsagent/'
SCOMCertPath = '/etc/opt/microsoft/scx/ssl/scx.pem'
# Commands
# Always use upgrade - will handle install if scx, omi are not installed or
# upgrade if they are
InstallCommandTemplate = '{0} --upgrade'
UninstallCommandTemplate = '{0} --remove'
WorkspaceCheckCommand = '{0} -l'.format(OMSAdminPath)
OnboardCommandWithOptionalParams = '{0} -w {1} -s {2} {3}'
RestartOMSAgentServiceCommand = '{0} restart'.format(OMSAgentServiceScript)
DisableOMSAgentServiceCommand = '{0} disable'.format(OMSAgentServiceScript)
# Error codes
DPKGLockedErrorCode = 12
InstallErrorCurlNotInstalled = 64
EnableErrorOMSReturned403 = 5
EnableErrorOMSReturnedNon200 = 6
EnableErrorResolvingHost = 7
EnableErrorOnboarding = 8
EnableCalledBeforeSuccessfulInstall = 9
UnsupportedOpenSSL = 60
# OneClick error codes
OneClickErrorCode = 40
ManagedIdentityExtMissingErrorCode = 41
ManagedIdentityExtErrorCode = 42
MetadataAPIErrorCode = 43
OMSServiceOneClickErrorCode = 44
MissingorInvalidParameterErrorCode = 11
UnwantedMultipleConnectionsErrorCode = 10
CannotConnectToOMSErrorCode = 55
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# OneClick Constants
ManagedIdentityExtListeningURLPath = '/var/lib/waagent/ManagedIdentity-Settings'
GUIDRegex = '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
OAuthTokenResource = 'https://management.core.windows.net/'
OMSServiceValidationEndpoint = 'https://global.oms.opinsights.azure.com/ManagedIdentityService.svc/Validate'
AutoManagedWorkspaceCreationSleepSeconds = 20
# vmResourceId Metadata Service
VMResourceIDMetadataHost = '169.254.169.254'
VMResourceIDMetadataEndpoint = 'http://{0}/metadata/instance?api-version=2017-08-01'.format(VMResourceIDMetadataHost)
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.chmod(ext_log_path, 700)
except:
pass
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('OmsAgentForLinux started to handle.')
global IsUpgrade
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
IsUpgrade = True
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', 1, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
exit_code = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code is 1 and operation == 'Install':
message = 'Install failed with exit code 1. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.EnterpriseCloud.' \
'Monitoring.OmsAgentForLinux'
elif exit_code is DPKGLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGLockedErrorCode)
elif exit_code is not 0:
message = '{0} failed with exit code {1}'.format(operation,
exit_code)
except OmsAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = 1
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def dummy_command():
"""
Do nothing and return 0
"""
return 0
def install():
"""
Ensure that this VM distro and version are supported.
Install the OMSAgent shell bundle, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
exit_if_vm_not_supported('Install')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
workspaceId = public_settings.get('workspaceId')
check_workspace_id(workspaceId)
# In the case where a SCOM connection is already present, we should not
# create conflicts by installing the OMSAgent packages
stopOnMultipleConnections = public_settings.get('stopOnMultipleConnections')
if (stopOnMultipleConnections is not None
and stopOnMultipleConnections is True):
detect_multiple_connections(workspaceId)
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
cmd = InstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since install can fail due to concurrent package operations
exit_code = run_command_with_retries(cmd, retries = 15,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
return exit_code
def uninstall():
"""
Uninstall the OMSAgent shell bundle.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
global IsUpgrade
os.chmod(bundle_path, 100)
cmd = UninstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since uninstall can fail due to concurrent package operations
exit_code = run_command_with_retries(cmd, retries = 5,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
if IsUpgrade:
IsUpgrade = False
else:
remove_workspace_configuration()
return exit_code
def enable():
"""
Onboard the OMSAgent to the specified OMS workspace.
This includes enabling the OMS process on the VM.
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
exit_if_vm_not_supported('Enable')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
if protected_settings is None:
raise ParameterMissingException('Private configuration must be ' \
'provided')
vmResourceId = protected_settings.get('vmResourceId')
# If vmResourceId is not provided in private settings, get it from metadata API
if vmResourceId is None or not vmResourceId:
vmResourceId = get_vmresourceid_from_metadata()
hutil_log_info('vmResourceId from Metadata API is {0}'.format(vmResourceId))
if vmResourceId is None:
raise MetadataAPIException('Failed to get vmResourceId from ' \
'Metadata API')
enableAutomaticManagement = public_settings.get('enableAutomaticManagement')
if (enableAutomaticManagement is not None
and enableAutomaticManagement is True):
hutil_log_info('enableAutomaticManagement is set to true; the ' \
'workspace ID and key will be determined by the OMS ' \
'service.')
workspaceInfo = retrieve_managed_workspace(vmResourceId)
if (workspaceInfo is None or 'WorkspaceId' not in workspaceInfo
or 'WorkspaceKey' not in workspaceInfo):
raise OneClickException('Workspace info was not determined')
else:
# Note: do NOT log workspace keys!
hutil_log_info('Managed workspaceInfo has been retrieved')
workspaceId = workspaceInfo['WorkspaceId']
workspaceKey = workspaceInfo['WorkspaceKey']
try:
check_workspace_id_and_key(workspaceId, workspaceKey)
except InvalidParameterError as e:
raise OMSServiceOneClickException('Received invalid ' \
'workspace info: ' \
'{0}'.format(e))
else:
workspaceId = public_settings.get('workspaceId')
workspaceKey = protected_settings.get('workspaceKey')
check_workspace_id_and_key(workspaceId, workspaceKey)
# Check if omsadmin script is available
if not os.path.exists(OMSAdminPath):
log_and_exit('Enable', EnableCalledBeforeSuccessfulInstall,
'OMSAgent onboarding script {0} does not exist. Enable ' \
'cannot be called before install.'.format(OMSAdminPath))
vmResourceIdParam = '-a {0}'.format(vmResourceId)
proxy = protected_settings.get('proxy')
proxyParam = ''
if proxy is not None:
proxyParam = '-p {0}'.format(proxy)
optionalParams = '{0} {1}'.format(proxyParam, vmResourceIdParam)
onboard_cmd = OnboardCommandWithOptionalParams.format(OMSAdminPath,
workspaceId,
workspaceKey,
optionalParams)
hutil_log_info('Handler initiating onboarding.')
exit_code = run_command_with_retries(onboard_cmd, retries = 5,
retry_check = retry_onboarding,
final_check = raise_if_no_internet,
check_error = True, log_cmd = False)
if exit_code is 0:
# Create a marker file to denote the workspace that was
# onboarded using the extension. This will allow supporting
# multi-homing through the extension like Windows does
extension_marker_path = os.path.join(EtcOMSAgentPath, workspaceId,
'conf/.azure_extension_marker')
if os.path.exists(extension_marker_path):
hutil_log_info('Extension marker file {0} already ' \
'created'.format(extension_marker_path))
else:
try:
open(extension_marker_path, 'w').close()
hutil_log_info('Created extension marker file ' \
'{0}'.format(extension_marker_path))
except IOError as e:
hutil_log_error('Error creating {0} with error: ' \
'{1}'.format(extension_marker_path, e))
# Sleep to prevent bombarding the processes, then restart all processes
# to resolve any issues with auto-started processes from --upgrade
time.sleep(PostOnboardingSleepSeconds)
run_command_and_log(RestartOMSAgentServiceCommand)
return exit_code
def remove_workspace_configuration():
"""
This is needed to distinguish between extension removal vs extension upgrade.
Its a workaround for waagent upgrade routine calling 'remove' on an old version
before calling 'upgrade' on new extension version issue.
In upgrade case, we need workspace configuration to persist when in
remove case we need all the files be removed.
This method will remove all the files/folders from the workspace path in Etc and Var.
"""
public_settings, _ = get_settings()
workspaceId = public_settings.get('workspaceId')
etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)
var_remove_path = os.path.join(VarOMSAgentPath, workspaceId)
for main_dir in [etc_remove_path, var_remove_path]:
for root, dirs, files in os.walk(main_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(os.path.join(main_dir))
hutil_log_info('Removed Workspace Configuration')
def get_vmresourceid_from_metadata():
req = urllib2.Request(VMResourceIDMetadataEndpoint)
req.add_header('Metadata', 'True')
try:
response = json.loads(urllib2.urlopen(req).read())
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['name'])
except urllib2.HTTPError as e:
hutil_log_error('Request to Metadata service URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from Metadata service: ' \
'{0}'.format(e.read()))
return None
except:
hutil_log_error('Unexpected error from Metadata service')
return None
def retrieve_managed_workspace(vm_resource_id):
"""
EnableAutomaticManagement has been set to true; the
ManagedIdentity extension and the VM Resource ID are also
required for the OneClick scenario
Using these and the Metadata API, we will call the OMS service
to determine what workspace ID and key to onboard to
"""
# Check for OneClick scenario requirements:
if not os.path.exists(ManagedIdentityExtListeningURLPath):
raise ManagedIdentityExtMissingException
# Determine the Tenant ID using the Metadata API
tenant_id = get_tenant_id_from_metadata_api(vm_resource_id)
# Retrieve an OAuth token using the ManagedIdentity extension
if tenant_id is not None:
hutil_log_info('Tenant ID from Metadata API is {0}'.format(tenant_id))
access_token = get_access_token(tenant_id, OAuthTokenResource)
else:
return None
# Query OMS service for the workspace info for onboarding
if tenant_id is not None and access_token is not None:
return get_workspace_info_from_oms(vm_resource_id, tenant_id,
access_token)
else:
return None
def disable():
"""
Disable all OMS workspace processes on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
# Check if the service control script is available
if not os.path.exists(OMSAgentServiceScript):
log_and_exit('Disable', 1, 'OMSAgent service control script {0} does' \
'not exist. Disable cannot be called ' \
'before install.'.format(OMSAgentServiceScript))
return 1
exit_code, output = run_command_and_log(DisableOMSAgentServiceCommand)
return exit_code
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
# Upgrade is noop since omsagent.py->install() will be called
# everytime upgrade is done due to upgradeMode =
# "UpgradeWithInstall" set in HandlerManifest
'Update' : dummy_command
}
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def is_vm_supported_for_extension():
"""
Checks if the VM this extension is running on is supported by OMSAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the OMSAgent-for-Linux, as well as Ubuntu 16.10,
are allowed to utilize this VM extension. All other distros will get
error code 51
"""
supported_dists = {'redhat' : ('5', '6', '7'), # CentOS
'centos' : ('5', '6', '7'), # CentOS
'red hat' : ('5', '6', '7'), # Oracle, RHEL
'oracle' : ('5', '6', '7'), # Oracle
'debian' : ('6', '7', '8', '9'), # Debian
'ubuntu' : ('12.04', '14.04', '15.04', '15.10',
'16.04', '16.10'), # Ubuntu
'suse' : ('11', '12') #SLES
}
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
vm_dist, vm_ver, vm_id = platform.dist()
vm_supported = False
# Find this VM distribution in the supported list
for supported_dist in supported_dists.keys():
if not vm_dist.lower().startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num is not supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the OMSAgent.
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension()
if not vm_supported:
log_and_exit(operation, 51, 'Unsupported operation system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def exit_if_openssl_unavailable(operation):
"""
Check if the openssl commandline interface is available to use
If not, throw error to return UnsupportedOpenSSL error code
"""
exit_code, output = run_get_output('which openssl', True, False)
if exit_code is not 0:
log_and_exit(operation, UnsupportedOpenSSL, 'OpenSSL is not available')
return 0
def check_workspace_id_and_key(workspace_id, workspace_key):
"""
Validate formats of workspace_id and workspace_key
"""
check_workspace_id(workspace_id)
# Validate that workspace_key is of the correct format (base64-encoded)
if workspace_key is None:
raise ParameterMissingException('Workspace key must be provided')
try:
encoded_key = base64.b64encode(base64.b64decode(workspace_key))
if encoded_key != workspace_key:
raise InvalidParameterError('Workspace key is invalid')
except TypeError:
raise InvalidParameterError('Workspace key is invalid')
def check_workspace_id(workspace_id):
"""
Validate that workspace_id matches the GUID regex
"""
if workspace_id is None:
raise ParameterMissingException('Workspace ID must be provided')
search = re.compile(GUIDOnlyRegex, re.M)
if not search.match(workspace_id):
raise InvalidParameterError('Workspace ID is invalid')
def detect_multiple_connections(workspace_id):
"""
If the VM already has a workspace/SCOM configured, then we should
disallow a new connection when stopOnMultipleConnections is used
Throw an exception in these cases:
- The workspace with the given workspace_id has not been onboarded
to the VM, but at least one other workspace has been
- The workspace with the given workspace_id has not been onboarded
to the VM, and the VM is connected to SCOM
If the extension operation is connecting to an already-configured
workspace, it is not a stopping case
"""
other_connection_exists = False
if os.path.exists(OMSAdminPath):
exit_code, output = run_get_output(WorkspaceCheckCommand,
chk_err = False)
if output.strip().lower() != 'no workspace':
for line in output.split('\n'):
if workspace_id in line:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
else:
# Note: if scom workspace dir is created, a line containing
# "Workspace(SCOM Workspace): scom" will be here
# If any other line is here, it may start sending data later
other_connection_exists = True
else:
for dir_name, sub_dirs, files in os.walk(EtcOMSAgentPath):
for sub_dir in sub_dirs:
sub_dir_name = os.path.basename(sub_dir)
workspace_search = re.compile(GUIDOnlyRegex, re.M)
if sub_dir_name == workspace_id:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
elif (workspace_search.match(sub_dir_name)
or sub_dir_name == 'scom'):
other_connection_exists = True
if other_connection_exists:
err_msg = ('This machine is already connected to some other Log ' \
'Analytics workspace, please set ' \
'stopOnMultipleConnections to false in public ' \
'settings or remove this property, so this machine ' \
'can connect to new workspaces, also it means this ' \
'machine will get billed multiple times for each ' \
'workspace it report to. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
# This exception will get caught by the main method
raise UnwantedMultipleConnectionsException(err_msg)
else:
detect_scom_connection()
def detect_scom_connection():
"""
If these two conditions are met, then we can assume the
VM is monitored
by SCOM:
1. SCOMPort is open and omiserver is listening on it
2. scx certificate is signed by SCOM server
To determine it check for existence of below two
conditions:
1. SCOMPort is open and omiserver is listening on it:
/etc/omi/conf/omiserver.conf can be parsed to
determine it.
2. scx certificate is signed by SCOM server: scom cert
is present @ /etc/opt/omi/ssl/omi-host-<hostname>.pem
(/etc/opt/microsoft/scx/ssl/scx.pem is a softlink to
this). If the VM is monitored by SCOM then issuer
field of the certificate will have a value like
CN=SCX-Certificate/title=<GUID>, DC=<SCOM server hostname>
(e.g CN=SCX-Certificate/title=SCX94a1f46d-2ced-4739-9b6a-1f06156ca4ac,
DC=NEB-OM-1502733)
Otherwise, if a scom configuration directory has been
created, we assume SCOM is in use
"""
scom_port_open = None # return when determine this is false
cert_signed_by_scom = False
if os.path.exists(OMSAdminPath):
scom_port_open = detect_scom_using_omsadmin()
if scom_port_open is False:
return
# If omsadmin.sh option is not available, use omiconfigeditor
if (scom_port_open is None and os.path.exists(OMIConfigEditorPath)
and os.path.exists(OMIServerConfPath)):
scom_port_open = detect_scom_using_omiconfigeditor()
if scom_port_open is False:
return
# If omiconfigeditor option is not available, directly parse omiserver.conf
if scom_port_open is None and os.path.exists(OMIServerConfPath):
scom_port_open = detect_scom_using_omiserver_conf()
if scom_port_open is False:
return
if scom_port_open is None:
hutil_log_info('SCOM port could not be determined to be open')
return
# Parse the certificate to determine if SCOM issued it
if os.path.exists(SCOMCertPath):
exit_if_openssl_unavailable('Install')
cert_cmd = 'openssl x509 -in {0} -noout -text'.format(SCOMCertPath)
cert_exit_code, cert_output = run_get_output(cert_cmd, chk_err = False,
log_cmd = False)
if cert_exit_code is 0:
issuer_re = re.compile(SCOMCertIssuerRegex, re.M)
if issuer_re.search(cert_output):
hutil_log_info('SCOM cert exists and is signed by SCOM server')
cert_signed_by_scom = True
else:
hutil_log_info('SCOM cert exists but is not signed by SCOM ' \
'server')
else:
hutil_log_error('Error reading SCOM cert; cert could not be ' \
'determined to be signed by SCOM server')
else:
hutil_log_info('SCOM cert does not exist')
if scom_port_open and cert_signed_by_scom:
err_msg = ('This machine may already be connected to a System ' \
'Center Operations Manager server. Please set ' \
'stopOnMultipleConnections to false in public settings ' \
'or remove this property to allow connection to the Log ' \
'Analytics workspace. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
raise UnwantedMultipleConnectionsException(err_msg)
def detect_scom_using_omsadmin():
"""
This method assumes that OMSAdminPath exists; if packages have not
been installed yet, this may not exist
Returns True if omsadmin.sh indicates that SCOM port is open
"""
omsadmin_cmd = '{0} -o'.format(OMSAdminPath)
exit_code, output = run_get_output(omsadmin_cmd, False, False)
# Guard against older omsadmin.sh versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omsadmin_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omsadmin_cmd))
return False
def detect_scom_using_omiconfigeditor():
"""
This method assumes that the relevant files exist
Returns True if omiconfigeditor indicates that SCOM port is open
"""
omi_cmd = '{0} httpsport -q {1} < {2}'.format(OMIConfigEditorPath,
SCOMPort, OMIServerConfPath)
exit_code, output = run_get_output(omi_cmd, False, False)
# Guard against older omiconfigeditor versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omi_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omi_cmd))
return False
def detect_scom_using_omiserver_conf():
"""
This method assumes that the relevant files exist
Returns True if omiserver.conf indicates that SCOM port is open
"""
with open(OMIServerConfPath, 'r') as omiserver_file:
omiserver_txt = omiserver_file.read()
httpsport_search = r'^[\s]*httpsport[\s]*=(.*)$'
httpsport_re = re.compile(httpsport_search, re.M)
httpsport_matches = httpsport_re.search(omiserver_txt)
if (httpsport_matches is not None and
httpsport_matches.group(1) is not None):
ports = httpsport_matches.group(1)
ports = ports.replace(',', ' ')
ports_list = ports.split(' ')
if str(SCOMPort) in ports_list:
hutil_log_info('SCOM port is listed in ' \
'{0}'.format(OMIServerConfPath))
return True
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
return False
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd, output))
else:
hutil_log_info('Output: \n{0}'.format(output))
return exit_code, output
def run_command_with_retries(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check returns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code
def is_dpkg_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code is not 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
return False
def was_curl_found(exit_code, output):
"""
Returns false if exit_code indicates that curl was not installed; this can
occur when package lists need to be updated, or when some archives are
out-of-date
"""
if exit_code is InstallErrorCurlNotInstalled:
return False
return True
def retry_if_dpkg_locked_or_curl_is_not_found(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
Sometimes curl's dependencies (i.e. libcurl) are not installed; if this
is the case on a VM with apt-get, 'apt-get -f install' should be run
Sometimes curl is not installed and is also not found in the package list;
if this is the case on a VM with apt-get, update the package list
"""
retry_verbosely = False
dpkg_locked = is_dpkg_locked(exit_code, output)
curl_found = was_curl_found(exit_code, output)
apt_get_exit_code, apt_get_output = run_get_output('which apt-get',
chk_err = False,
log_cmd = False)
if dpkg_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
elif (not curl_found and apt_get_exit_code is 0 and
('apt-get -f install' in output
or 'Unmet dependencies' in output.lower())):
hutil_log_info('Installing all dependencies of curl:')
run_command_and_log('apt-get -f install')
return True, 'Retrying command because curl and its dependencies ' \
'needed to be installed', retry_verbosely
elif not curl_found and apt_get_exit_code is 0:
hutil_log_info('Updating package lists to make curl available')
run_command_and_log('apt-get update')
return True, 'Retrying command because package lists needed to be ' \
'updated', retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_locked(exit_code, output):
"""
If dpkg is still locked after the retries, we want to return a specific
error code
"""
dpkg_locked = is_dpkg_locked(exit_code, output)
if dpkg_locked:
exit_code = DPKGLockedErrorCode
return exit_code
def retry_onboarding(exit_code, output):
"""
Retry under any of these conditions:
- If the onboarding request returns 403: this may indicate that the agent
GUID and certificate should be re-generated
- If the onboarding request returns a different non-200 code: the OMS
service may be temporarily unavailable
- If the onboarding curl command returns an unaccounted-for error code,
we should retry with verbose logging
"""
retry_verbosely = False
if exit_code is EnableErrorOMSReturned403:
return True, 'Retrying the onboarding command to attempt generating ' \
'a new agent ID and certificate.', retry_verbosely
elif exit_code is EnableErrorOMSReturnedNon200:
return True, 'Retrying; the OMS service may be temporarily ' \
'unavailable.', retry_verbosely
elif exit_code is EnableErrorOnboarding:
return True, 'Retrying with verbose logging.', True
return False, '', False
def raise_if_no_internet(exit_code, output):
"""
Raise the CannotConnectToOMSException exception if the onboarding
script returns the error code to indicate that the OMS service can't be
resolved
"""
if exit_code is EnableErrorResolvingHost:
raise CannotConnectToOMSException
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if (h_settings.has_key('protectedSettings')
and h_settings.has_key('protectedSettingsCertThumbprint')
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', 1, 'Failed decrypting ' \
'protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output
return exit_code, output.encode('utf-8').strip()
def get_tenant_id_from_metadata_api(vm_resource_id):
"""
Retrieve the Tenant ID using the Metadata API of the VM resource ID
Since we have not authenticated, the Metadata API will throw a 401, but
the headers of the 401 response will contain the tenant ID
"""
tenant_id = None
metadata_endpoint = get_metadata_api_endpoint(vm_resource_id)
metadata_request = urllib2.Request(metadata_endpoint)
try:
# This request should fail with code 401
metadata_response = urllib2.urlopen(metadata_request)
hutil_log_info('Request to Metadata API did not fail as expected; ' \
'attempting to use headers from response to ' \
'determine Tenant ID')
metadata_headers = metadata_response.headers
except urllib2.HTTPError as e:
metadata_headers = e.headers
if metadata_headers is not None and 'WWW-Authenticate' in metadata_headers:
auth_header = metadata_headers['WWW-Authenticate']
auth_header_regex = r'authorization_uri=\"https:\/\/login\.windows\.net/(' + GUIDRegex + ')\"'
auth_header_search = re.compile(auth_header_regex)
auth_header_matches = auth_header_search.search(auth_header)
if not auth_header_matches:
raise MetadataAPIException('The WWW-Authenticate header in the ' \
'response does not contain expected ' \
'authorization_uri format')
else:
tenant_id = auth_header_matches.group(1)
else:
raise MetadataAPIException('Expected information from Metadata API ' \
'is not present')
return tenant_id
def get_metadata_api_endpoint(vm_resource_id):
"""
Extrapolate Metadata API endpoint from VM Resource ID
Example VM resource ID: /subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup/providers/Microsoft.Compute/virtualMachines/lagalbraOCUb16C
Corresponding example endpoint: https://management.azure.com/subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup?api-version=2016-09-01
"""
# Will match for ARM and Classic VMs, Availability Sets, VM Scale Sets
vm_resource_id_regex = r'^\/subscriptions\/(' + GUIDRegex + ')\/' \
'resourceGroups\/([^\/]+)\/providers\/Microsoft' \
'\.(?:Classic){0,1}Compute\/(?:virtualMachines|' \
'availabilitySets|virtualMachineScaleSets)' \
'\/[^\/]+$'
vm_resource_id_search = re.compile(vm_resource_id_regex, re.M)
vm_resource_id_matches = vm_resource_id_search.search(vm_resource_id)
if not vm_resource_id_matches:
raise InvalidParameterError('VM Resource ID is invalid')
else:
subscription_id = vm_resource_id_matches.group(1)
resource_group = vm_resource_id_matches.group(2)
metadata_url = 'https://management.azure.com/subscriptions/{0}' \
'/resourceGroups/{1}'.format(subscription_id,
resource_group)
metadata_data = urllib.urlencode({'api-version' : '2016-09-01'})
metadata_endpoint = '{0}?{1}'.format(metadata_url, metadata_data)
return metadata_endpoint
def get_access_token(tenant_id, resource):
"""
Retrieve an OAuth token by sending an OAuth2 token exchange
request to the local URL that the ManagedIdentity extension is
listening to
"""
# Extract the endpoint that the ManagedIdentity extension is listening on
with open(ManagedIdentityExtListeningURLPath, 'r') as listening_file:
listening_settings_txt = listening_file.read()
try:
listening_settings = json.loads(listening_settings_txt)
listening_url = listening_settings['url']
except:
raise ManagedIdentityExtException('Could not extract listening URL ' \
'from settings file')
# Send an OAuth token exchange request
oauth_data = {'authority' : 'https://login.microsoftonline.com/' \
'{0}'.format(tenant_id),
'resource' : resource
}
oauth_request = urllib2.Request(listening_url + '/oauth2/token',
urllib.urlencode(oauth_data))
oauth_request.add_header('Metadata', 'true')
try:
oauth_response = urllib2.urlopen(oauth_request)
oauth_response_txt = oauth_response.read()
except urllib2.HTTPError as e:
hutil_log_error('Request to ManagedIdentity extension listening URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from ManagedIdentity extension: ' \
'{0}'.format(e.read()))
raise ManagedIdentityExtException('Request to listening URL failed ' \
'with HTTPError {0}'.format(e))
except:
raise ManagedIdentityExtException('Unexpected error from request to ' \
'listening URL')
try:
oauth_response_json = json.loads(oauth_response_txt)
except:
raise ManagedIdentityExtException('Error parsing JSON from ' \
'listening URL response')
if (oauth_response_json is not None
and 'access_token' in oauth_response_json):
return oauth_response_json['access_token']
else:
raise ManagedIdentityExtException('Could not retrieve access token ' \
'in the listening URL response')
def get_workspace_info_from_oms(vm_resource_id, tenant_id, access_token):
"""
Send a request to the OMS service with the VM information to
determine the workspace the OMSAgent should onboard to
"""
oms_data = {'ResourceId' : vm_resource_id,
'TenantId' : tenant_id,
'JwtToken' : access_token
}
oms_request_json = json.dumps(oms_data)
oms_request = urllib2.Request(OMSServiceValidationEndpoint)
oms_request.add_header('Content-Type', 'application/json')
retries = 5
initial_sleep_time = AutoManagedWorkspaceCreationSleepSeconds
sleep_increase_factor = 1
try_count = 0
sleep_time = initial_sleep_time
# Workspace may not be provisioned yet; sleep and retry if
# provisioning has been accepted
while try_count <= retries:
try:
oms_response = urllib2.urlopen(oms_request, oms_request_json)
oms_response_txt = oms_response.read()
except urllib2.HTTPError as e:
hutil_log_error('Request to OMS threw HTTPError: {0}'.format(e))
hutil_log_info('Response from OMS: {0}'.format(e.read()))
raise OMSServiceOneClickException('ValidateMachineIdentity ' \
'request returned an error ' \
'HTTP code: {0}'.format(e))
except:
raise OMSServiceOneClickException('Unexpected error from ' \
'ValidateMachineIdentity ' \
'request')
should_retry = retry_get_workspace_info_from_oms(oms_response)
if not should_retry:
# TESTED
break
elif try_count == retries:
# TESTED
hutil_log_error('Retries for ValidateMachineIdentity request ran ' \
'out: required workspace information cannot be ' \
'extracted')
raise OneClickException('Workspace provisioning did not complete ' \
'within the allotted time')
# TESTED
try_count += 1
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if not oms_response_txt:
raise OMSServiceOneClickException('Body from ValidateMachineIdentity ' \
'response is empty; required ' \
'workspace information cannot be ' \
'extracted')
try:
oms_response_json = json.loads(oms_response_txt)
except:
raise OMSServiceOneClickException('Error parsing JSON from ' \
'ValidateMachineIdentity response')
if (oms_response_json is not None and 'WorkspaceId' in oms_response_json
and 'WorkspaceKey' in oms_response_json):
return oms_response_json
else:
hutil_log_error('Could not retrieve both workspace ID and key from ' \
'the OMS service response {0}; cannot determine ' \
'workspace ID and key'.format(oms_response_json))
raise OMSServiceOneClickException('Required workspace information ' \
'was not found in the ' \
'ValidateMachineIdentity response')
def retry_get_workspace_info_from_oms(oms_response):
"""
Return True to retry if the response from OMS for the
ValidateMachineIdentity request incidates that the request has
been accepted, but the managed workspace is still being
provisioned
"""
try:
oms_response_http_code = oms_response.getcode()
except:
hutil_log_error('Unable to get HTTP code from OMS repsonse')
return False
if (oms_response_http_code is 202 or oms_response_http_code is 204
or oms_response_http_code is 404):
hutil_log_info('Retrying ValidateMachineIdentity OMS request ' \
'because workspace is still being provisioned; HTTP ' \
'code from OMS is {0}'.format(oms_response_http_code))
return True
else:
hutil_log_info('Workspace is provisioned; HTTP code from OMS is ' \
'{0}'.format(oms_response_http_code))
return False
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = 1, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code is 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class OmsAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = 1
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(OmsAgentForLinuxException):
"""
There is a missing parameter for the OmsAgentForLinux Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self)
class InvalidParameterError(OmsAgentForLinuxException):
"""
There is an invalid parameter for the OmsAgentForLinux Extension
ex. Workspace ID does not match GUID regex
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to an invalid parameter: {1}'.format(operation,
self)
class UnwantedMultipleConnectionsException(OmsAgentForLinuxException):
"""
This VM is already connected to a different Log Analytics workspace
and stopOnMultipleConnections is set to true
"""
error_code = UnwantedMultipleConnectionsErrorCode
def get_error_message(self, operation):
return '{0} failed due to multiple connections: {1}'.format(operation,
self)
class CannotConnectToOMSException(OmsAgentForLinuxException):
"""
The OMSAgent cannot connect to the OMS service
"""
error_code = CannotConnectToOMSErrorCode # error code to indicate no internet access
def get_error_message(self, operation):
return 'The agent could not connect to the Microsoft Operations ' \
'Management Suite service. Please check that the system ' \
'either has Internet access, or that a valid HTTP proxy has ' \
'been configured for the agent. Please also check the ' \
'correctness of the workspace ID.'
class OneClickException(OmsAgentForLinuxException):
"""
A generic exception for OneClick-related issues
"""
error_code = OneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue related to the OneClick scenario: ' \
'{0}'.format(self)
class ManagedIdentityExtMissingException(OneClickException):
"""
This extension being present is required for the OneClick scenario
"""
error_code = ManagedIdentityExtMissingErrorCode
def get_error_message(self, operation):
return 'The ManagedIdentity extension is required to be installed ' \
'for Automatic Management to be enabled. Please set ' \
'EnableAutomaticManagement to false in public settings or ' \
'install the ManagedIdentityExtensionForLinux Azure VM ' \
'extension.'
class ManagedIdentityExtException(OneClickException):
"""
Thrown when we encounter an issue with ManagedIdentityExtensionForLinux
"""
error_code = ManagedIdentityExtErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the ManagedIdentity extension: ' \
'{0}'.format(self)
class MetadataAPIException(OneClickException):
"""
Thrown when we encounter an issue with Metadata API
"""
error_code = MetadataAPIErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the Metadata API: {0}'.format(self)
class OMSServiceOneClickException(OneClickException):
"""
Thrown when prerequisites were satisfied but could not retrieve the managed
workspace information from OMS service
"""
error_code = OMSServiceOneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the OMS service: ' \
'{0}'.format(self)
if __name__ == '__main__' :
main()
``` |
[
{
"content": "```python\n#! -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport pprint\n\nfrom decimal import Decimal\nfrom botocore.exceptions import ClientError\nfrom botocore.vendored.requests.exceptions import ConnectionError\n\nfrom .connection import db\nfrom .helpers import get_attribu... | [
{
"content": "<|memory_start|>```python\n#! -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport pprint\n\nfrom decimal import Decimal\nfrom botocore.exceptions import ClientError\nfrom botocore.vendored.requests.exceptions import ConnectionError\n\nfrom .connection import db\nfrom .helpers im... | ```python
#! -*- coding: utf-8 -*-
from __future__ import print_function
import pprint
from decimal import Decimal
from botocore.exceptions import ClientError
from botocore.vendored.requests.exceptions import ConnectionError
from .connection import db
from .helpers import get_attribute_type
from .errors import ClientException, ConnectionException, ParameterException
pp = pprint.PrettyPrinter(indent=4)
pprint = pp.pprint
__all__ = ['Table']
class Table(object):
def __init__(self, instance):
self.instance = instance
self.table_name = instance.__table_name__
self.table = db.Table(self.table_name)
def info(self):
try:
response = db.meta.client.describe_table(TableName=self.table_name)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
else:
table_info = response['Table']
return table_info
def _prepare_hash_key(self):
hash_key = self.instance._hash_key
param = {
'AttributeName': hash_key,
'KeyType': 'HASH'
}
return param
def _prepare_range_key(self, range_key=None):
if not range_key:
range_key = self.instance._range_key
if range_key:
param = {
'AttributeName': range_key,
'KeyType': 'RANGE'
}
return param
return {}
def _prepare_key_schema(self):
KeySchema = []
hash_key_param = self._prepare_hash_key()
KeySchema.append(hash_key_param)
range_key_param = self._prepare_range_key()
if range_key_param:
KeySchema.append(range_key_param)
return KeySchema
def _prepare_attribute_definitions(self):
AttributeDefinitions = []
attributes = self.instance.attributes
hash_key = self.instance._hash_key
AttributeDefinitions.append({
'AttributeName': hash_key,
'AttributeType': get_attribute_type(attributes[hash_key]),
})
range_key = self.instance._range_key
if range_key:
AttributeDefinitions.append({
'AttributeName': range_key,
'AttributeType': get_attribute_type(attributes[range_key]),
})
for field in self.instance._local_indexed_fields:
AttributeDefinitions.append({
'AttributeName': field,
'AttributeType': get_attribute_type(attributes[field]),
})
return AttributeDefinitions
def _prepare_primary_key(self, params):
params['KeySchema'] = self._prepare_key_schema()
params['AttributeDefinitions'] = self._prepare_attribute_definitions()
return params
def _prepare_local_indexes(self):
indexes = []
for field in self.instance._local_indexed_fields:
index_name = '{table_name}_ix_{field}'.format(
table_name=self.table_name, field=field)
KeySchema = [self._prepare_hash_key()]
range_key_param = self._prepare_range_key(field)
if range_key_param:
KeySchema.append(range_key_param)
indexes.append({
'IndexName': index_name,
'KeySchema': KeySchema,
'Projection': {
'ProjectionType': 'ALL'
}
})
return indexes
def _prepare_global_indexes(self):
return []
def _prepare_create_table_params(self):
# TableName
table_params = {
'TableName': self.table_name
}
# KeySchema && AttributeDefinitions
table_params = self._prepare_primary_key(table_params)
# LocalSecondaryIndexes
local_indexes = self._prepare_local_indexes()
if local_indexes:
table_params['LocalSecondaryIndexes'] = local_indexes
# GlobalSecondaryIndexes
global_indexes = self._prepare_global_indexes()
if global_indexes:
table_params['GlobalSecondaryIndexes'] = global_indexes
# ProvisionedThroughput
table_params['ProvisionedThroughput'] = {
'ReadCapacityUnits': self.instance.ReadCapacityUnits,
'WriteCapacityUnits': self.instance.WriteCapacityUnits
}
return table_params
def create(self):
'''
# create table
create_table Request Syntax
# http://boto3.readthedocs.io/en/sinstance/reference/services/dynamodb.html#DynamoDB.Client.create_instance
response = client.create_table(
AttributeDefinitions=[
{
'AttributeName': 'string',
'AttributeType': 'S'|'N'|'B'
},
],
TableName='string',
KeySchema=[
{
'AttributeName': 'string',
'KeyType': 'HASH'|'RANGE'
},
],
LocalSecondaryIndexes=[
{
'IndexName': 'string',
'KeySchema': [
{
'AttributeName': 'string',
'KeyType': 'HASH'|'RANGE'
},
],
'Projection': {
'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',
'NonKeyAttributes': [
'string',
]
}
},
],
GlobalSecondaryIndexes=[
{
'IndexName': 'string',
'KeySchema': [
{
'AttributeName': 'string',
'KeyType': 'HASH'|'RANGE'
},
],
'Projection': {
'ProjectionType': 'ALL'|'KEYS_ONLY'|'INCLUDE',
'NonKeyAttributes': [
'string',
]
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 123,
'WriteCapacityUnits': 123
}
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 123,
'WriteCapacityUnits': 123
},
StreamSpecification={
'StreamEnabled': True|False,
'StreamViewType': 'NEW_IMAGE'|'OLD_IMAGE'|'NEW_AND_OLD_IMAGES'|'KEYS_ONLY'
}
)
AttributeType (string) -- [REQUIRED]
The data type for the attribute, where:
* S - the attribute is of type String
* N - the attribute is of type Number
* B - the attribute is of type Binary
KeySchema (list) -- [REQUIRED]
KeyType - The role that the key attribute will assume:
* HASH - partition key
* RANGE - sort key
'''
try:
params = self._prepare_create_table_params()
return db.create_table(**params)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
except ConnectionError:
raise ConnectionException('Connection refused')
def _update_throughput(self, ProvisionedThroughput):
ReadCapacityUnits = ProvisionedThroughput['ReadCapacityUnits']
WriteCapacityUnits = ProvisionedThroughput['WriteCapacityUnits']
if (ReadCapacityUnits != self.instance.ReadCapacityUnits or
WriteCapacityUnits != self.instance.WriteCapacityUnits):
self.table.update(ProvisionedThroughput={
'ReadCapacityUnits': self.instance.ReadCapacityUnits,
'WriteCapacityUnits': self.instance.WriteCapacityUnits
})
def _update_streams(self):
# TODO
pass
def _update_global_indexes(self):
# TODO
pass
def update(self):
'''
# update table
http://boto3.readthedocs.io/en/stable/reference/services/dynamodb.html#DynamoDB.Table.update
You can only perform one of the following operations at once:
* Modify the provisioned throughput settings of the table.
* Enable or disable Streams on the table.
* Remove a global secondary index from the table.
* Create a new global secondary index on the table.
Once the index begins backfilling, you can use UpdateTable to perform
other operations.
UpdateTable is an asynchronous operation; while it is executing,
the table status changes from ACTIVE to UPDATING. While it is UPDATING,
you cannot issue another UpdateTable request.
When the table returns to the ACTIVE state, the UpdateTable operation is
complete.
# Request Syntax
{
"AttributeDefinitions": [
{
"AttributeName": "string",
"AttributeType": "string"
}
],
"GlobalSecondaryIndexUpdates": [
{
"Create": {
"IndexName": "string",
"KeySchema": [
{
"AttributeName": "string",
"KeyType": "string"
}
],
"Projection": {
"NonKeyAttributes": [ "string" ],
"ProjectionType": "string"
},
"ProvisionedThroughput": {
"ReadCapacityUnits": number,
"WriteCapacityUnits": number
}
},
"Delete": {
"IndexName": "string"
},
"Update": {
"IndexName": "string",
"ProvisionedThroughput": {
"ReadCapacityUnits": number,
"WriteCapacityUnits": number
}
}
}
],
"ProvisionedThroughput": {
"ReadCapacityUnits": number,
"WriteCapacityUnits": number
},
"StreamSpecification": {
"StreamEnabled": boolean,
"StreamViewType": "string"
},
"TableName": "string"
}
'''
table_info = self.info()
ProvisionedThroughput = table_info['ProvisionedThroughput']
self._update_throughput(ProvisionedThroughput)
def delete(self):
# delete table
try:
return self.table.delete()
except ClientError:
raise ClientException('Cannot do operations on a non-existent table')
except ConnectionError:
raise ConnectionException('Connection refused')
def _get_primary_key(self, **kwargs):
hash_key, range_key = self.instance._hash_key, self.instance._range_key
hash_value = kwargs.get(hash_key) or getattr(self.instance, hash_key)
if isinstance(hash_value, (int, float)):
hash_value = Decimal(hash_value)
key = {
hash_key: hash_value
}
if not range_key:
return key
range_value = kwargs.get(range_key) or getattr(self.instance, range_key, None)
if range_key and not range_value:
raise ParameterException('Invalid range key value type')
elif range_key:
if isinstance(range_value, (int, float)):
range_value = Decimal(range_value)
key[range_key] = range_value
return key
def get_item(self, **kwargs):
"""
primary_key: params: primary_key dict
"""
kwargs['Key'] = kwargs.get('Key') or self._get_primary_key()
try:
response = self.table.get_item(**kwargs)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationException':
return None
raise ClientException(e.response['Error']['Message'])
else:
item = response.get('Item')
return item
def batch_get_item(self, *primary_keys):
"""
primary_key: params: primary_keys list
"""
_primary_keys = []
for primary_key in primary_keys:
key = self._get_primary_key(**primary_key)
_primary_keys.append(key)
params = {
'RequestItems': {
self.table_name: {
'Keys': _primary_keys
}
},
'ReturnConsumedCapacity': 'TOTAL'
}
try:
response = db.batch_get_item(**params)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
else:
items = response['Responses'][self.table_name]
return items
def put_item(self, item):
self.table.put_item(Item=item)
return True
def batch_write(self, items, overwrite=False):
pkeys = []
if overwrite:
instance = self.instance
pkeys = [instance._hash_key, instance._range_key]
try:
with self.table.batch_writer(overwrite_by_pkeys=pkeys) as batch:
for item in items:
batch.put_item(Item=item)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
def query(self, **kwargs):
"""
response = table.query(
IndexName='string',
Select='ALL_ATTRIBUTES'|'ALL_PROJECTED_ATTRIBUTES'|'SPECIFIC_ATTRIBUTES'|'COUNT',
Limit=123,
ConsistentRead=True|False,
ScanIndexForward=True|False,
ExclusiveStartKey={
'string': 'string'|123|Binary(b'bytes')|True|None|set(['string'])|set([123])|set([Binary(b'bytes')])|[]|{}
},
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ProjectionExpression='string',
FilterExpression=Attr('myattribute').eq('myvalue'),
KeyConditionExpression=Key('mykey').eq('myvalue'),
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': 'string'|123|Binary(b'bytes')|True|None|set(['string'])|set([123])|set([Binary(b'bytes')])|[]|{}
}
)
ExclusiveStartKey: 起始查询的key,也就是上一页的最后一条数据
ConsistentRead: 是否使用强制一致性 默认False
ScanIndexForward: 索引的排序方式 True 为正序 False 为倒序 默认True
ReturnConsumedCapacity: DynamoDB 写入期间使用的写入容量单位
TOTAL 会返回由表及其所有global secondary index占用的写入容量;
INDEXES 仅返回由global secondary index占用的写入容量;
NONE 表示您不需要返回任何占用容量统计数据。
ProjectionExpression: 用于指定要在扫描结果中包含的属性
FilterExpression: 指定一个条件,以便仅返回符合条件的项目
KeyConditionExpression: 要查询的键值
ExpressionAttributeNames: 提供名称替换功能
ExpressionAttributeValues: 提供值替换功能
"""
try:
response = self.table.query(**kwargs)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
return response
def scan(self, **kwargs):
try:
response = self.table.scan(**kwargs)
except ClientError as e:
raise ClientException(e.response['Error']['Message'])
return response
def _prepare_update_item_params(self, update_fields=None, *args, **kwargs):
params = {
'Key': self._get_primary_key()
}
ConditionExpression = getattr(self.instance, 'ConditionExpression', None)
if ConditionExpression:
params['ConditionExpression'] = ConditionExpression
ExpressionAttributeValues = getattr(self.instance,
'ExpressionAttributeValues', {})
ExpressionAttributeNames = getattr(self.instance,
'ExpressionAttributeNames', {})
action_exp_dict = {}
if update_fields:
set_expression_str = ''
for k, v in update_fields.items():
label = ':{k}'.format(k=k)
path = '#{k}'.format(k=k)
if set_expression_str:
set_expression_str += ', {k} = {v}'.format(k=path, v=label)
else:
set_expression_str += '{k} = {v}'.format(k=path, v=label)
ExpressionAttributeValues[label] = v
ExpressionAttributeNames[path] = k
action_exp_dict['SET'] = set_expression_str
for arg in args:
exp, exp_attr, action = arg
eav = exp_attr.get('value', {})
ean = exp_attr.get('name', {})
action_exp = action_exp_dict.get(action)
if action_exp:
action_exp = '{action_exp}, {exp}'.format(action_exp=action_exp,
exp=exp)
else:
action_exp = exp
action_exp_dict[action] = action_exp
ExpressionAttributeValues.update(eav)
ExpressionAttributeNames.update(ean)
for action, _exp in action_exp_dict.iteritems():
action_exp_dict[action] = '{action} {exp}'.format(action=action,
exp=_exp)
if ExpressionAttributeValues:
params['ExpressionAttributeValues'] = ExpressionAttributeValues
if ExpressionAttributeNames:
params['ExpressionAttributeNames'] = ExpressionAttributeNames
params['UpdateExpression'] = " ".join(action_exp_dict.values())
params.update(kwargs)
return params
def update_item(self, update_fields, *args, **kwargs):
'''
update_fields: update_fields (dict)
http://boto3.readthedocs.io/en/stable/reference/services/dynamodb.html#DynamoDB.Table.update_item
response = table.update_item(
Key={
'string': 'string'|123|Binary(b'bytes')|True|None|set(['string'])|set([123])|set([Binary(b'bytes')])|[]|{}
},
ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ReturnItemCollectionMetrics='SIZE'|'NONE',
UpdateExpression='string',
ConditionExpression=Attr('myattribute').eq('myvalue'),
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': 'string'|123|Binary(b'bytes')|True|None|set(['string'])|set([123])|set([Binary(b'bytes')])|[]|{}
}
)
## example
item.update_item(a=12, b=12, c=12)
'''
params = self._prepare_update_item_params(update_fields, *args, **kwargs)
try:
item = self.table.update_item(**params)
attributes = item.get('Attributes')
return attributes
except ClientError as e:
if e.response['Error']['Code'] == "ConditionalCheckFailedException":
print(e.response['Error']['Message'])
raise ClientException(e.response['Error']['Message'])
def _prepare_delete_item_params(self):
params = {
'Key': self._get_primary_key()
}
ConditionExpression = getattr(self.instance, 'ConditionExpression', None)
if ConditionExpression:
params['ConditionExpression'] = ConditionExpression
ExpressionAttributeValues = getattr(self.instance,
'ExpressionAttributeValues', {})
if ExpressionAttributeValues:
params['ExpressionAttributeValues'] = ExpressionAttributeValues
ExpressionAttributeNames = getattr(self.instance,
'ExpressionAttributeNames', {})
if ExpressionAttributeNames:
params['ExpressionAttributeNames'] = ExpressionAttributeNames
return params
def delete_item(self, **kwargs):
'''
http://boto3.readthedocs.io/en/stable/reference/services/dynamodb.html#DynamoDB.Table.delete_item
Deletes a single item in a table by primary key. You can perform a
conditional delete operation that deletes the item if it exists,
or if it has an expected attribute value.
In addition to deleting an item, you can also return the item's
attribute values in the same operation, using the ReturnValues parameter.
Unless you specify conditions, the DeleteItem is an idempotent operation;
running it multiple times on the same item or attribute does not result
in an error response.
Conditional deletes are useful for deleting items only if specific
conditions are met. If those conditions are met, DynamoDB performs the
delete. Otherwise, the item is not deleted.
Request Syntax
response = table.delete_item(
Key={
'string': 'string'|123|Binary(b'bytes')|True|None|set(['string'])|set([123])|set([Binary(b'bytes')])|[]|{}
},
ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ReturnItemCollectionMetrics='SIZE'|'NONE',
ConditionExpression=Attr('myattribute').eq('myvalue'),
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': 'string'|123|Binary(b'bytes')|True|None|set(['string'])|set([123])|set([Binary(b'bytes')])|[]|{}
}
)
Parameters:
Key (dict) -- [REQUIRED]
'''
key = self._get_primary_key()
try:
self.table.delete_item(Key=key)
except ClientError as e:
if e.response['Error']['Code'] == "ConditionalCheckFailedException":
raise ClientException(e.response['Error']['Message'])
return True
def item_count(self):
return self.table.item_count
``` |
[
{
"content": "Here is a code snippet:\n```python\nfrom unittest import TestCase\nfrom unittest.mock import Mock\n\nfrom Schemes import Blueprint\nfrom ItemStack import ItemStack\n\nclass TestProcess(TestCase):\n\n\tdef test_InitProcess(self):\n\t\tscheme = Blueprint(0, \"Name\", 0, [ItemStack(0, 1)], ItemStack(... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\nfrom unittest import TestCase\nfrom unittest.mock import Mock\n\nfrom Schemes import Blueprint\nfrom ItemStack import ItemStack\n\nclass TestProcess(TestCase):\n\n\tdef test_InitProcess(self):\n\t\tscheme = Blueprint(0, \"Name\", 0, [ItemStack(0,... | ```python
from unittest import TestCase
from unittest.mock import Mock
from Schemes import Blueprint
from ItemStack import ItemStack
class TestProcess(TestCase):
def test_InitProcess(self):
scheme = Blueprint(0, "Name", 0, [ItemStack(0, 1)], ItemStack(0, 1))
process = Process(scheme)
assert process.inputs[0].ammount == 1
def test_SetRuns(self):
scheme = Blueprint(0, "Name", 0, [ItemStack(0, 1)], ItemStack(0, 2))
process = Process(scheme)
process.SetRuns(2)
assert process.inputs[0].ammount == 2
assert process.outputs[0].ammount == 4
from copy import copy
class Process:
def __init__(self, aScheme):
self.scheme = aScheme
self.runs = 1
self.inputs = [copy(inp) for inp in aScheme.GetInputs()]
self.outputs = [copy(out) for out in aScheme.GetOutputs()]
self.runsChangedCallback = None
self.manual = False
def SetRuns(self, aRuns):
if self.runs == aRuns:
return
self.runs = aRuns
schemeInputs = self.scheme.GetInputs()
for i in range(len(self.inputs)):
self.inputs[i].ammount = schemeInputs[i].ammount * aRuns
schemeOutputs = self.scheme.GetOutputs()
for i in range(len(self.outputs)):
self.outputs[i].ammount = schemeOutputs[i].ammount * aRuns
if self.manual and self.runsChangedCallback:
self.runsChangedCallback()
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python\n\nimport email\nimport threading\nimport urllib.parse\nimport urllib.request\nimport http.server\nimport unittest\nimport hashlib\nfrom test import support\n\n# Loopback http server infrastructure\n\nclass LoopbackHttpServer(http.server.HTTPS... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport email\nimport threading\nimport urllib.parse\nimport urllib.request\nimport http.server\nimport unittest\nimport hashlib\nfrom test import support\n\n# Loopback http server infrastructure\n\nclass LoopbackHttpServer(h... | ```python
#!/usr/bin/env python
import email
import threading
import urllib.parse
import urllib.request
import http.server
import unittest
import hashlib
from test import support
# Loopback http server infrastructure
class LoopbackHttpServer(http.server.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
http.server.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(1.0)
def get_request(self):
"""HTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
class LoopbackHttpServerThread(threading.Thread):
"""Stoppable thread that runs a loopback http server."""
def __init__(self, request_handler):
threading.Thread.__init__(self)
self._stop_server = False
self.ready = threading.Event()
request_handler.protocol_version = "HTTP/1.0"
self.httpd = LoopbackHttpServer(("127.0.0.1", 0),
request_handler)
#print "Serving HTTP on %s port %s" % (self.httpd.server_name,
# self.httpd.server_port)
self.port = self.httpd.server_port
def stop(self):
"""Stops the webserver if it's currently running."""
# Set the stop flag.
self._stop_server = True
self.join()
def run(self):
self.ready.set()
while not self._stop_server:
self.httpd.handle_request()
# Authentication infrastructure
class DigestAuthHandler:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num).encode("ascii")).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str.encode("ascii")).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str.encode("ascii")).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str.encode("ascii")).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write(b"Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if "Proxy-Authorization" not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers["Proxy-Authorization"]
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib2 uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
# Proxy test infrastructure
class FakeProxyHandler(http.server.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
digest_auth_handler = DigestAuthHandler()
def log_message(self, format, *args):
# Uncomment the next line for debugging.
# sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urllib.parse.urlparse(
self.path, "http")
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("You've reached %s!<BR>" % self.path,
"ascii"))
self.wfile.write(b"Our apologies, but our server is down due to "
b"a sudden zombie invasion.")
# Test cases
class ProxyAuthTests(unittest.TestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
def setUp(self):
FakeProxyHandler.digest_auth_handler.set_users({
self.USER : self.PASSWD
})
FakeProxyHandler.digest_auth_handler.set_realm(self.REALM)
self.server = LoopbackHttpServerThread(FakeProxyHandler)
self.server.start()
self.server.ready.wait()
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib.request.ProxyHandler({"http" : proxy_url})
self._digest_auth_handler = urllib.request.ProxyDigestAuthHandler()
self.opener = urllib.request.build_opener(
handler, self._digest_auth_handler)
def tearDown(self):
self.server.stop()
def test_proxy_with_bad_password_raises_httperror(self):
self._digest_auth_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
FakeProxyHandler.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_with_no_password_raises_httperror(self):
FakeProxyHandler.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_qop_auth_works(self):
self._digest_auth_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
FakeProxyHandler.digest_auth_handler.set_qop("auth")
result = self.opener.open(self.URL)
while result.read():
pass
result.close()
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self._digest_auth_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
FakeProxyHandler.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib.error.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
result = None
if result:
while result.read():
pass
result.close()
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
if body:
self.wfile.write(body)
def do_POST(self):
content_length = self.headers["Content-Length"]
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % {'port':self.port})
if body:
self.send_header("Content-type", "text/plain")
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
class TestUrlopen(unittest.TestCase):
"""Tests urllib2.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
"""
def setUp(self):
self.server = None
def tearDown(self):
if self.server is not None:
self.server.stop()
def urlopen(self, url, data=None):
l = []
f = urllib.request.urlopen(url, data)
try:
# Exercise various methods
l.extend(f.readlines(200))
l.append(f.readline())
l.append(f.read(1024))
l.append(f.read())
finally:
f.close()
return b"".join(l)
def start_server(self, responses=None):
if responses is None:
responses = [(200, [], b"we don't care")]
handler = GetRequestHandler(responses)
self.server = LoopbackHttpServerThread(handler)
self.server.start()
self.server.ready.wait()
port = self.server.port
handler.port = port
return handler
def test_redirection(self):
expected_response = b"We got here..."
responses = [
(302, [("Location", "http://localhost:%(port)s/somewhere_else")],
""),
(200, [], expected_response)
]
handler = self.start_server(responses)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEquals(data, expected_response)
self.assertEquals(handler.requests, ["/", "/somewhere_else"])
def test_chunked(self):
expected_response = b"hello world"
chunked_start = (
b'a\r\n'
b'hello worl\r\n'
b'1\r\n'
b'd\r\n'
b'0\r\n'
)
response = [(200, [("Transfer-Encoding", "chunked")], chunked_start)]
handler = self.start_server(response)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEquals(data, expected_response)
def test_404(self):
expected_response = b"Bad bad bad..."
handler = self.start_server([(404, [], expected_response)])
try:
self.urlopen("http://localhost:%s/weeble" % handler.port)
except urllib.error.URLError as f:
data = f.read()
f.close()
else:
self.fail("404 should raise URLError")
self.assertEquals(data, expected_response)
self.assertEquals(handler.requests, ["/weeble"])
def test_200(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port)
self.assertEquals(data, expected_response)
self.assertEquals(handler.requests, ["/bizarre"])
def test_200_with_parameters(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port,
b"get=with_feeling")
self.assertEquals(data, expected_response)
self.assertEquals(handler.requests, ["/bizarre", b"get=with_feeling"])
def test_sending_headers(self):
handler = self.start_server()
req = urllib.request.Request("http://localhost:%s/" % handler.port,
headers={"Range": "bytes=20-39"})
urllib.request.urlopen(req)
self.assertEqual(handler.headers_received["Range"], "bytes=20-39")
def test_basic(self):
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
for attr in ("read", "close", "info", "geturl"):
self.assert_(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assert_(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_info(self):
handler = self.start_server()
try:
open_url = urllib.request.urlopen(
"http://localhost:%s" % handler.port)
info_obj = open_url.info()
self.assert_(isinstance(info_obj, email.message.Message),
"object returned by 'info' is not an instance of "
"email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "plain")
finally:
self.server.stop()
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
url = open_url.geturl()
self.assertEqual(url, "http://localhost:%s" % handler.port)
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
self.assertRaises(IOError,
# SF patch 809915: In Sep 2003, VeriSign started
# highjacking invalid .com and .net addresses to
# boost traffic to their own site. This test
# started failing then. One hopes the .invalid
# domain will be spared to serve its defined
# purpose.
urllib.request.urlopen,
"http://sadflkjsasf.i.nvali.d/")
def test_main():
support.run_unittest(ProxyAuthTests)
support.run_unittest(TestUrlopen)
if __name__ == "__main__":
test_main()
``` |
[
{
"content": "Here is the snippet:\n```python\nfrom __future__ import unicode_literals\nfrom . import __version__ as app_version\n\n\napp_name = \"frappe\"\napp_title = \"Frappe Framework\"\napp_publisher = \"Frappe Technologies\"\napp_description = \"Full stack web framework with Python, Javascript, MariaDB, R... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\nfrom __future__ import unicode_literals\nfrom . import __version__ as app_version\n\n\napp_name = \"frappe\"\napp_title = \"Frappe Framework\"\napp_publisher = \"Frappe Technologies\"\napp_description = \"Full stack web framework with Python, Javasc... | ```python
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Frappe Technologies"
app_description = "Full stack web framework with Python, Javascript, MariaDB, Redis, Node"
app_icon = "octicon octicon-circuit-board"
app_color = "orange"
source_link = "https://github.com/frappe/frappe"
app_license = "MIT"
app_logo_url = '/assets/frappe/images/frappe-framework-logo.png'
develop_version = '13.x.x-develop'
app_email = "info@frappe.io"
docs_app = "frappe_io"
translator_url = "https://translatev2.erpnext.com"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
page_js = {
"setup-wizard": "public/js/frappe/setup_wizard.js"
}
# website
app_include_js = [
"assets/js/libs.min.js",
"assets/js/desk.min.js",
"assets/js/list.min.js",
"assets/js/form.min.js",
"assets/js/control.min.js",
"assets/js/report.min.js",
]
app_include_css = [
"assets/css/desk.min.css",
"assets/css/list.min.css",
"assets/css/form.min.css",
"assets/css/report.min.css",
]
doctype_js = {
"Web Page": "public/js/frappe/utils/web_template.js",
"Website Settings": "public/js/frappe/utils/web_template.js"
}
web_include_js = [
"website_script.js"
]
web_include_css = []
website_route_rules = [
{"from_route": "/blog/<category>", "to_route": "Blog Post"},
{"from_route": "/kb/<category>", "to_route": "Help Article"},
{"from_route": "/newsletters", "to_route": "Newsletter"},
{"from_route": "/profile", "to_route": "me"},
]
base_template = "templates/base.html"
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
email_append_to = ["Event", "ToDo", "Communication"]
get_rooms = 'frappe.chat.doctype.chat_room.chat_room.get_rooms'
calendars = ["Event"]
leaderboards = "frappe.desk.leaderboard.get_leaderboards"
# login
on_session_creation = [
"frappe.core.doctype.activity_log.feed.login_feed",
"frappe.core.doctype.user.user.notify_admin_access_to_system_manager"
]
on_logout = "frappe.core.doctype.session_default_settings.session_default_settings.clear_session_defaults"
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Dashboard Settings": "frappe.desk.doctype.dashboard_settings.dashboard_settings.get_permission_query_conditions",
"Notification Log": "frappe.desk.doctype.notification_log.notification_log.get_permission_query_conditions",
"Dashboard Chart": "frappe.desk.doctype.dashboard_chart.dashboard_chart.get_permission_query_conditions",
"Number Card": "frappe.desk.doctype.number_card.number_card.get_permission_query_conditions",
"Notification Settings": "frappe.desk.doctype.notification_settings.notification_settings.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions",
"Kanban Board": "frappe.desk.doctype.kanban_board.kanban_board.get_permission_query_conditions",
"Contact": "frappe.contacts.address_and_contact.get_permission_query_conditions_for_contact",
"Address": "frappe.contacts.address_and_contact.get_permission_query_conditions_for_address",
"Communication": "frappe.core.doctype.communication.communication.get_permission_query_conditions_for_communication",
"Workflow Action": "frappe.workflow.doctype.workflow_action.workflow_action.get_permission_query_conditions",
"Prepared Report": "frappe.core.doctype.prepared_report.prepared_report.get_permission_query_condition"
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission",
"Dashboard Chart": "frappe.desk.doctype.dashboard_chart.dashboard_chart.has_permission",
"Number Card": "frappe.desk.doctype.number_card.number_card.has_permission",
"Kanban Board": "frappe.desk.doctype.kanban_board.kanban_board.has_permission",
"Contact": "frappe.contacts.address_and_contact.has_permission",
"Address": "frappe.contacts.address_and_contact.has_permission",
"Communication": "frappe.core.doctype.communication.communication.has_permission",
"Workflow Action": "frappe.workflow.doctype.workflow_action.workflow_action.has_permission",
"File": "frappe.core.doctype.file.file.has_permission",
"Prepared Report": "frappe.core.doctype.prepared_report.prepared_report.has_permission"
}
has_website_permission = {
"Address": "frappe.contacts.doctype.address.address.has_website_permission"
}
standard_queries = {
"User": "frappe.core.doctype.user.user.user_query"
}
doc_events = {
"*": {
"after_insert": [
"frappe.event_streaming.doctype.event_update_log.event_update_log.notify_consumers"
],
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.core.doctype.activity_log.feed.update_feed",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions",
"frappe.automation.doctype.assignment_rule.assignment_rule.apply",
"frappe.automation.doctype.milestone_tracker.milestone_tracker.evaluate_milestone",
"frappe.core.doctype.file.file.attach_files_to_document",
"frappe.event_streaming.doctype.event_update_log.event_update_log.notify_consumers",
"frappe.automation.doctype.assignment_rule.assignment_rule.update_due_date",
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions"
],
"on_trash": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions",
"frappe.event_streaming.doctype.event_update_log.event_update_log.notify_consumers"
],
"on_change": [
"frappe.social.doctype.energy_point_rule.energy_point_rule.process_energy_points"
]
},
"Event": {
"after_insert": "frappe.integrations.doctype.google_calendar.google_calendar.insert_event_in_google_calendar",
"on_update": "frappe.integrations.doctype.google_calendar.google_calendar.update_event_in_google_calendar",
"on_trash": "frappe.integrations.doctype.google_calendar.google_calendar.delete_event_from_google_calendar",
},
"Contact": {
"after_insert": "frappe.integrations.doctype.google_contacts.google_contacts.insert_contacts_to_google_contacts",
"on_update": "frappe.integrations.doctype.google_contacts.google_contacts.update_contacts_to_google_contacts",
},
"DocType": {
"after_insert": "frappe.cache_manager.build_domain_restriced_doctype_cache",
"after_save": "frappe.cache_manager.build_domain_restriced_doctype_cache",
},
"Page": {
"after_insert": "frappe.cache_manager.build_domain_restriced_page_cache",
"after_save": "frappe.cache_manager.build_domain_restriced_page_cache",
}
}
scheduler_events = {
"cron": {
"0/15 * * * *": [
"frappe.oauth.delete_oauth2_data",
"frappe.website.doctype.web_page.web_page.check_publish_status",
"frappe.twofactor.delete_all_barcodes_for_users"
]
},
"all": [
"frappe.email.queue.flush",
"frappe.email.doctype.email_account.email_account.pull",
"frappe.email.doctype.email_account.email_account.notify_unreplied",
"frappe.integrations.doctype.razorpay_settings.razorpay_settings.capture_payment",
'frappe.utils.global_search.sync_global_search',
"frappe.monitor.flush",
],
"hourly": [
"frappe.model.utils.link_count.update_link_count",
'frappe.model.utils.user_settings.sync_user_settings',
"frappe.utils.error.collect_error_snapshots",
"frappe.desk.page.backups.backups.delete_downloadable_backups",
"frappe.deferred_insert.save_to_db",
"frappe.desk.form.document_follow.send_hourly_updates",
"frappe.integrations.doctype.google_calendar.google_calendar.sync",
"frappe.email.doctype.newsletter.newsletter.send_scheduled_email",
"frappe.utils.password.delete_password_reset_cache"
],
"daily": [
"frappe.email.queue.set_expiry_for_email_queue",
"frappe.desk.notifications.clear_notifications",
"frappe.core.doctype.error_log.error_log.set_old_logs_as_seen",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.notification.notification.trigger_daily_alerts",
"frappe.realtime.remove_old_task_logs",
"frappe.utils.scheduler.restrict_scheduler_events_if_dormant",
"frappe.email.doctype.auto_email_report.auto_email_report.send_daily",
"frappe.website.doctype.personal_data_deletion_request.personal_data_deletion_request.remove_unverified_record",
"frappe.desk.form.document_follow.send_daily_updates",
"frappe.social.doctype.energy_point_settings.energy_point_settings.allocate_review_points",
"frappe.integrations.doctype.google_contacts.google_contacts.sync",
"frappe.automation.doctype.auto_repeat.auto_repeat.make_auto_repeat_entry",
"frappe.automation.doctype.auto_repeat.auto_repeat.set_auto_repeat_as_completed",
"frappe.email.doctype.unhandled_email.unhandled_email.remove_old_unhandled_emails",
"frappe.core.doctype.prepared_report.prepared_report.delete_expired_prepared_reports",
"frappe.core.doctype.log_settings.log_settings.run_log_clean_up"
],
"daily_long": [
"frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backups_daily",
"frappe.utils.change_log.check_for_update",
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_daily",
"frappe.integrations.doctype.google_drive.google_drive.daily_backup"
],
"weekly_long": [
"frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backups_weekly",
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_weekly",
"frappe.desk.doctype.route_history.route_history.flush_old_route_records",
"frappe.desk.form.document_follow.send_weekly_updates",
"frappe.social.doctype.energy_point_log.energy_point_log.send_weekly_summary",
"frappe.integrations.doctype.google_drive.google_drive.weekly_backup"
],
"monthly": [
"frappe.email.doctype.auto_email_report.auto_email_report.send_monthly",
"frappe.social.doctype.energy_point_log.energy_point_log.send_monthly_summary"
],
"monthly_long": [
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_monthly"
]
}
get_translated_dict = {
("doctype", "System Settings"): "frappe.geo.country_info.get_translated_dict",
("page", "setup-wizard"): "frappe.geo.country_info.get_translated_dict"
}
sounds = [
{"name": "email", "src": "/assets/frappe/sounds/email.mp3", "volume": 0.1},
{"name": "submit", "src": "/assets/frappe/sounds/submit.mp3", "volume": 0.1},
{"name": "cancel", "src": "/assets/frappe/sounds/cancel.mp3", "volume": 0.1},
{"name": "delete", "src": "/assets/frappe/sounds/delete.mp3", "volume": 0.05},
{"name": "click", "src": "/assets/frappe/sounds/click.mp3", "volume": 0.05},
{"name": "error", "src": "/assets/frappe/sounds/error.mp3", "volume": 0.1},
{"name": "alert", "src": "/assets/frappe/sounds/alert.mp3", "volume": 0.2},
# {"name": "chime", "src": "/assets/frappe/sounds/chime.mp3"},
# frappe.chat sounds
{ "name": "chat-message", "src": "/assets/frappe/sounds/chat-message.mp3", "volume": 0.1 },
{ "name": "chat-notification", "src": "/assets/frappe/sounds/chat-notification.mp3", "volume": 0.1 }
# frappe.chat sounds
]
bot_parsers = [
'frappe.utils.bot.ShowNotificationBot',
'frappe.utils.bot.GetOpenListBot',
'frappe.utils.bot.ListBot',
'frappe.utils.bot.FindBot',
'frappe.utils.bot.CountBot'
]
setup_wizard_exception = [
"frappe.desk.page.setup_wizard.setup_wizard.email_setup_wizard_exception",
"frappe.desk.page.setup_wizard.setup_wizard.log_setup_wizard_exception"
]
before_migrate = ['frappe.patches.v11_0.sync_user_permission_doctype_before_migrate.execute']
after_migrate = ['frappe.website.doctype.website_theme.website_theme.after_migrate']
otp_methods = ['OTP App','Email','SMS']
user_privacy_documents = [
{
'doctype': 'File',
'match_field': 'attached_to_name',
'personal_fields': ['file_name', 'file_url'],
'applies_to_website_user': 1
},
{
'doctype': 'Email Group Member',
'match_field': 'email',
},
{
'doctype': 'Email Unsubscribe',
'match_field': 'email',
},
{
'doctype': 'Email Queue',
'match_field': 'sender',
},
{
'doctype': 'Email Queue Recipient',
'match_field': 'recipient',
},
{
'doctype': 'Contact',
'match_field': 'email_id',
'personal_fields': ['first_name', 'last_name', 'phone', 'mobile_no'],
},
{
'doctype': 'Contact Email',
'match_field': 'email_id',
},
{
'doctype': 'Address',
'match_field': 'email_id',
'personal_fields': ['address_title', 'address_line1', 'address_line2', 'city', 'county', 'state', 'pincode',
'phone', 'fax'],
},
{
'doctype': 'Communication',
'match_field': 'sender',
'personal_fields': ['sender_full_name', 'phone_no', 'content'],
},
{
'doctype': 'Communication',
'match_field': 'recipients',
},
{
'doctype': 'User',
'match_field': 'name',
'personal_fields': ['email', 'username', 'first_name', 'middle_name', 'last_name', 'full_name', 'birth_date',
'user_image', 'phone', 'mobile_no', 'location', 'banner_image', 'interest', 'bio', 'email_signature'],
'applies_to_website_user': 1
},
]
global_search_doctypes = {
"Default": [
{"doctype": "Contact"},
{"doctype": "Address"},
{"doctype": "ToDo"},
{"doctype": "Note"},
{"doctype": "Event"},
{"doctype": "Blog Post"},
{"doctype": "Dashboard"},
{"doctype": "Country"},
{"doctype": "Currency"},
{"doctype": "Newsletter"},
{"doctype": "Letter Head"},
{"doctype": "Workflow"},
{"doctype": "Web Page"},
{"doctype": "Web Form"}
]
}
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom future.utils import string_types, text_type, native_str\n\nimport unittest\nimport pickle\nimport re\nimport ... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom future.utils import string_types, text_type, native_str\n\nimport unittest\nimport pickle\nim... | ```python
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from future.utils import string_types, text_type, native_str
import unittest
import pickle
import re
import types
from fileseq.utils import *
from fileseq import FrameSet, framesToFrameRange, ParseException
def _yrange(first, last=None, incr=1):
"""
Simple value generator for the 1-20y5 syntax.
:param first: as per xrange
:param last: as per xrange
:param incr: as per xrange
:return: generator
"""
if last is None:
first, last = 0, first
whole = list(range(first, last, 1 if incr >= 0 else -1))
filt = set(whole[::abs(incr)])
for i in whole:
if i not in filt:
yield i
def _srange(first, last=None, incr=1):
"""
Simple value generator for the 1-20:5 syntax.
:param first: as per xrange
:param last: as per xrange
:param incr: as per xrange
:return: generator
"""
if last is None:
first, last = 0, first
whole = list(range(first, last, 1 if incr >= 0 else -1))
sent = set()
for stagger in range(abs(incr), 0, -1):
for i in whole[::stagger]:
if i not in sent:
sent.add(i)
yield i
def _uchain(*args):
"""
As per itertools.chain, but will only yield items not previously yielded.
:param args: one or more iterables to chain
:return: generator
"""
sent = set()
for i in chain(*args):
if i not in sent:
yield i
sent.add(i)
FRAME_SET_SHOULD_SUCCEED = [
# the null value
("Empty", '', []),
# individual frames
('Zero', '0', [0]),
('NegZero', '-0', [0]),
('Pos', '1', [1]),
('Neg', '-1', [-1]),
# permutations on comma separated individual frames
('DupePos', '1,1,1', [1]),
('DupeNeg', '-1,-1,-1', [-1]),
('DupeMix', '-1,1,-1,1', [-1,1]),
('CommaSepPos', '1,3,17', [1,3,17]),
('CommaSepNeg', '-1,-3,-17', [-1,-3,-17]),
('CommaSepMix', '1,-3,17', [1,-3,17]),
('CommaSepPosInv', '17,3,1', [17,3,1]),
('CommaSepNegInv', '-17,-3,-1', [-17,-3,-1]),
('CommaSepMixInv', '17,-3,1', [17,-3,1]),
('CommaSepMixInv', '17,-3,1', [17,-3,1]),
("CommaTrailing", "1,", [1]),
("CommaLeading", ",1", [1]),
("CommaDupes", "1,,,,,,2,,,,,3,,,", [1,2,3]),
# args that str(arg) cast to a valid FrameSet
('PosInt', 1, [1]),
('NegInt', -1, [-1]),
('FrameSet', FrameSet("1-20"), list(range(1,21))),
# unicode args that are the equivalent of a valid FrameSet
('UnicodeEquivalentRange', u'-1--20', list(range(-1,-21,-1))),
('UnicodeEquivalentRangeChunk', u'-1--20x5', list(range(-1,-21,-5))),
('UnicodeEquivalentRangeFill', u'-1--20y5', list(_yrange(-1,-21,-5))),
('UnicodeEquivalentRangeStagger', u'-1--20:5', list(_srange(-1,-21,-5))),
]
LO_RANGES = [
# low value permutations of signed integer ranges, these will all be individually tested
('PosToPos', '1-20', list(range(1,21,1))),
('NegToPos', '-1-20', list(range(-1,21,1))),
('NegToNeg', '-1--20', list(range(-1,-21,-1))),
('PosToNeg', '1--20', list(range(1,-21,-1))),
('PosToPosInv', '20-1', list(range(20,0,-1))),
('NegToPosInv', '-20-1', list(range(-20,2,1))),
('NegToNegInv', '-20--1', list(range(-20,0,1))),
('PosToNegInv', '20--1', list(range(20,-2,-1))),
('PosToPosChunk', '1-20x5', list(range(1,21,5))),
('NegToPosChunk', '-1-20x5', list(range(-1,21,5))),
('NegToNegChunk', '-1--20x5', list(range(-1,-21,-5))),
('PosToNegChunk', '1--20x5', list(range(1,-21,-5))),
('PosToPosChunkInv', '20-1x5', list(range(20,0,-5))),
('NegToPosChunkInv', '-20-1x5', list(range(-20,2,5))),
('NegToNegChunkInv', '-20--1x5', list(range(-20,0,5))),
('PosToNegChunkInv', '20--1x5', list(range(20,-2,-5))),
('PosToPosNegChunkInv', '20-1x-1', list(range(20,0,-1))),
('PosToPosFill', '1-20y5', list(_yrange(1,21,5))),
('NegToPosFill', '-1-20y5', list(_yrange(-1,21,5))),
('NegToNegFill', '-1--20y5', list(_yrange(-1,-21,-5))),
('PosToNegFill', '1--20y5', list(_yrange(1,-21,-5))),
('PosToPosFillInv', '20-1y5', list(_yrange(20,0,-5))),
('NegToPosFillInv', '-20-1y5', list(_yrange(-20,2,5))),
('NegToNegFillInv', '-20--1y5', list(_yrange(-20,0,5))),
('PosToNegFillInv', '20--1y5', list(_yrange(20,-2,-5))),
('PosToPosStagger', '1-20:5', list(_srange(1,21,5))),
('NegToPosStagger', '-1-20:5', list(_srange(-1,21,5))),
('NegToNegStagger', '-1--20:5', list(_srange(-1,-21,-5))),
('PosToNegStagger', '1--20:5', list(_srange(1,-21,-5))),
('PosToPosStaggerInv', '20-1:5', list(_srange(20,0,-5))),
('NegToPosStaggerInv', '-20-1:5', list(_srange(-20,2,5))),
('NegToNegStaggerInv', '-20--1:5', list(_srange(-20,0,5))),
('PosToNegStaggerInv', '20--1:5', list(_srange(20,-2,-5)))]
HI_RANGES = [
# high value permutations of signed integer ranges, these will be permuted with the LO_RANGES for testing
('PosToPos', '21-30', list(range(21,31,1))),
('NegToPos', '-21-30', list(range(-21,31,1))),
('NegToNeg', '-21--30', list(range(-21,-31,-1))),
('PosToNeg', '21--30', list(range(21,-31,-1))),
('PosToPosInv', '30-21', list(range(30,20,-1))),
('NegToPosInv', '-30-21', list(range(-30,22,1))),
('NegToNegInv', '-30--21', list(range(-30,-20,1))),
('PosToNegInv', '30--21', list(range(30,-22,-1))),
('PosToPosChunk', '21-30x5', list(range(21,31,5))),
('NegToPosChunk', '-21-30x5', list(range(-21,31,5))),
('NegToNegChunk', '-21--30x5', list(range(-21,-31,-5))),
('PosToNegChunk', '21--30x5', list(range(21,-31,-5))),
('PosToPosChunkInv', '30-21x5', list(range(30,20,-5))),
('NegToPosChunkInv', '-30-21x5', list(range(-30,22,5))),
('NegToNegChunkInv', '-30--21x5', list(range(-30,-20,5))),
('PosToNegChunkInv', '30--21x5', list(range(30,-22,-5))),
('PosToPosFill', '21-30y5', list(_yrange(21,31,5))),
('NegToPosFill', '-21-30y5', list(_yrange(-21,31,5))),
('NegToNegFill', '-21--30y5', list(_yrange(-21,-31,-5))),
('PosToNegFill', '21--30y5', list(_yrange(21,-31,-5))),
('PosToPosFillInv', '30-21y5', list(_yrange(30,20,-5))),
('NegToPosFillInv', '-30-21y5', list(_yrange(-30,22,5))),
('NegToNegFillInv', '-30--21y5', list(_yrange(-30,-20,5))),
('PosToNegFillInv', '30--21y5', list(_yrange(30,-22,-5))),
('PosToPosStagger', '21-30:5', list(_srange(21,31,5))),
('NegToPosStagger', '-21-30:5', list(_srange(-21,31,5))),
('NegToNegStagger', '-21--30:5', list(_srange(-21,-31,-5))),
('PosToNegStagger', '21--30:5', list(_srange(21,-31,-5))),
('PosToPosStaggerInv', '30-21:5', list(_srange(30,20,-5))),
('NegToPosStaggerInv', '-30-21:5', list(_srange(-30,22,5))),
('NegToNegStaggerInv', '-30--21:5', list(_srange(-30,-20,5))),
('PosToNegStaggerInv', '30--21:5', list(_srange(30,-22,-5)))]
for lo in LO_RANGES:
FRAME_SET_SHOULD_SUCCEED.append(lo)
for hi in HI_RANGES:
name = 'CommaSep{0}To{1}'.format(lo[0], hi[0])
test = ','.join([lo[1], hi[1]])
expect = list(_uchain(lo[2], hi[2]))
FRAME_SET_SHOULD_SUCCEED.append((name, test, expect))
FRAME_SET_SHOULD_FAIL = [
("PosWChunkChar", "1x5"),
("NegWChunkChar", "-1x5"),
("PosWFillChar", "1y5"),
("NegWFillChar", "-1y5"),
("PosWStaggerChar", "1:5"),
("NegWStaggerChar", "-1:5"),
("PosWSepChar", "1-"),
("NegWSepChar", "-1-"),
("BadAlphaChars", "bilbo"),
("RangeWDupeSpecialChar", "1-20x:y5"),
("RangeWBadCaseChunkChar", "1-20X5"),
("RangeWBadCaseFillChar", "1-20Y5"),
("RangeWChunkZero", "1-20x0"),
("RangeWFillZero", "1-20y0"),
("RangeWStaggerZero", "1-20:0"),
("RangeWNegChunk", "1-20x-5"),
("RangeWNegFill", "1-20y-5"),
("RangeWNegStagger", "1-20:-5"),
("ActualNone", None),
]
FRAME_SET_FROM_RANGE_SHOULD_SUCCEED = [
# individual frames
('Zero', 0, 0, 1, '0'),
('Pos', 1, 1, 1, '1'),
# ranges
('PosToPos', 1, 20, 1, '1-20'),
('NegToPos', -1, 1, 1, '-1-1'),
('PosToNeg', 1, -1, 1, '1--1'),
('PosToPosInv', 20, 1, 1, '20-1'),
('NegToPosInv', -20, 1, 1, '-20-1'),
('NegToNegInv', -20, -1, 1, '-20--1'),
('PosToNegInv', 20, -1, 1, '20--1'),
('PosToPosChunk', 1, 20, 5, '1-20x5'),
('NegToPosChunk', -1, 20, 5, '-1-20x5'),
('NegToNegChunk', -1, -20, 5, '-1--20x5'),
('PosToNegChunk', 1, -20, 5, '1--20x5'),
('PosToPosChunkInv', 20, 1, 5, '20-1x5'),
('NegToPosChunkInv', -20, 1, 5, '-20-1x5'),
('NegToNegChunkInv', -20, -1, 5, '-20--1x5'),
('PosToNegChunkInv', 20, -1, 5, '20--1x5'),
('PosToPosNegChunkInv', 20, 1, -1, '20-1x-1'),
]
class TestFrameSet(unittest.TestCase):
"""
Exercise the TestFrame object. Due to the sheer number of permutations, we'll add most tests dynamically.
"""
def _check___init___range(self, test, expect):
"""
Harness to test if the FrameSet.__init__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'FrameSet("{0}")._frange != {0}: got {1}'
r = f._frange
self.assertEqual(r, native_str(test), m.format(test, r))
m = u'FrameSet("{0}")._frange returns {1}: got {2}'
self.assertIsInstance(r, native_str, m.format(test, native_str, type(r)))
def _check___init___items(self, test, expect):
"""
Harness to test if the FrameSet.__init__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'FrameSet("{0}")._items != {1}: got {2}'
r = f._items
self.assertEqual(r, set(expect), m.format(test, set(expect), r))
m = u'FrameSet("{0}")._FrameSet__items returns {1}: got {2}'
self.assertIsInstance(r, frozenset, m.format(test, frozenset, type(r)))
def _check___init___order(self, test, expect):
"""
Harness to test if the FrameSet.__init__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'FrameSet("{0}")._order != {1}: got {2}'
r = f._order
self.assertEqual(r, tuple(expect), m.format(test, tuple(expect), r))
m = u'FrameSet("{0}")._order returns {1}: got {2}'
self.assertIsInstance(r, tuple, m.format(test, tuple, type(r)))
def _check___init____malformed(self, test):
"""
Harness to test if the FrameSet.__init__ call properly handles malformed strings.
:param test: the string to pass to FrameSet
:return: None
"""
try:
r = FrameSet(test)
except ParseException as err:
r = err
except Exception as err:
r = err
m = u'FrameSet("{0}") should fail: got {1}'
self.assertIsInstance(r, ParseException, m.format(test, r))
def _check___str__(self, test, expect):
"""
Harness to test if the FrameSet.__str__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'str(FrameSet("{0}")) != {0}: got {1}'
r = str(f)
self.assertEqual(r, native_str(test), m.format(test, r))
m = u'str(FrameSet("{0}")) returns {1}: got {2}'
self.assertIsInstance(r, native_str, m.format(test, native_str, type(r)))
def _check___len__(self, test, expect):
"""
Harness to test if the FrameSet.__len__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'len(FrameSet("{0}")) != {1}: got {2}'
r = len(f)
self.assertEqual(r, len(expect), m.format(test, len(expect), r))
m = u'len(FrameSet("{0}")) returns {1}: got {2}'
self.assertIsInstance(r, int, m.format(test, int, type(r)))
def _check___getitem__(self, test, expect):
"""
Harness to test if the FrameSet.__getitem__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
i = len(expect) // 2
m = u'FrameSet("{0}")[{1}] != {2}: got {3}'
# the empty FrameSet is expected to always fail
if not test and not expect:
self.assertRaises(IndexError, f.__getitem__, i)
return
try:
r = f[i]
except Exception as err:
r = repr(err)
self.assertEqual(r, expect[i], m.format(test, i, expect[i], r))
m = u'FrameSet("{0}")[{1}] returns {2}: got {3}'
self.assertIsInstance(r, int, m.format(test, i, int, type(r)))
try:
r = f[:-1:2]
except Exception as err:
r = repr(err)
e = tuple(expect[:-1:2])
m = u'FrameSet("{0}")[:1:2] != {1}: got {2}'
self.assertEqual(r, e, m.format(test, e, r))
def _check_start(self, test, expect):
"""
Harness to test if the FrameSet.start call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'FrameSet("{0}").start() != {1}: got {2}'
# the empty FrameSet is expected to always fail
if not test and not expect:
self.assertRaises(IndexError, f.start)
return
try:
r = f.start()
except Exception as err:
r = repr(err)
self.assertEqual(r, expect[0], m.format(test, expect[0], r))
m = u'FrameSet("{0}").start() returns {1}: got {2}'
self.assertIsInstance(r, int, m.format(test, int, type(r)))
def _check_end(self, test, expect):
"""
Harness to test if the FrameSet.end call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'FrameSet("{0}").end() != {1}: got {2}'
# the empty FrameSet is expected to always fail
if not test and not expect:
self.assertRaises(IndexError, f.end)
return
try:
r = f.end()
except Exception as err:
r = repr(err)
self.assertEqual(r, expect[-1], m.format(test, expect[-1], r))
m = u'FrameSet("{0}").end() returns {1}: got {2}'
self.assertIsInstance(r, int, m.format(test, int, type(r)))
def _check_index(self, test, expect):
"""
Harness to test if the FrameSet.index call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is expected to always fail
if not test and not expect:
self.assertRaises(IndexError, f.frame, 0)
return
i = expect[len(expect) // 2]
m = u'FrameSet("{0}").index({1}) != {2}: got {3}'
try:
r = f.index(i)
except Exception as err:
r = repr(err)
self.assertEqual(r, expect.index(i), m.format(test, i, expect.index(i), r))
m = u'FrameSet("{0}").index({1}) returns {2}: got {3}'
self.assertIsInstance(r, int, m.format(test, i, int, type(r)))
def _check_frame(self, test, expect):
"""
Harness to test if the FrameSet.frame call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is expected to always fail
if not test and not expect:
self.assertRaises(IndexError, f.frame, 0)
return
i = len(expect) // 2
m = u'FrameSet("{0}").frame({1}) != {2}: got {3}'
try:
r = f.frame(i)
except Exception as err:
r = repr(err)
self.assertEqual(r, expect[i], m.format(test, i, expect[i], r))
m = u'FrameSet("{0}").frame({1}) returns {2}: got {3}'
self.assertIsInstance(r, int, m.format(test, i, int, type(r)))
def _check_hasFrameTrue(self, test, expect):
"""
Harness to test if the FrameSet.hasFrame call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is expected to always fail
if not test and not expect:
self.assertFalse(f.hasFrame(1))
return
i = max(expect)
m = u'FrameSet("{0}").hasFrame({1}) != {2}: got {3}'
r = f.hasFrame(i)
self.assertTrue(r, m.format(test, i, i in expect, r))
m = u'FrameSet("{0}").frame({1}) returns {2}: got {3}'
self.assertIsInstance(r, bool, m.format(test, i, bool, type(r)))
def _check_hasFrameFalse(self, test, expect):
"""
Harness to test if the FrameSet.hasFrame call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is expected to always fail
if not test and not expect:
self.assertFalse(f.hasFrame(1))
return
i = max(expect) + 1
m = u'FrameSet("{0}").hasFrame({1}) != {2}: got {3}'
r = f.hasFrame(i)
self.assertFalse(r, m.format(test, i, i in expect, r))
m = u'FrameSet("{0}").frame({1}) returns {2}: got {3}'
self.assertIsInstance(r, bool, m.format(test, i, bool, type(r)))
def _check___iter__(self, test, expect):
"""
Harness to test if the FrameSet.__iter__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'list(FrameSet("{0}")) != {1}: got {2}'
r = f.__iter__()
self.assertEqual(list(r), expect, m.format(test, expect, list(r)))
m = u'FrameSet("{0}").__iter__ returns {1}: got {2}'
self.assertIsInstance(r, types.GeneratorType, m.format(test, types.GeneratorType, type(r)))
def _check_canSerialize(self, test, expect):
"""
Harness to test if the FrameSet.__getstate__ and FrameSet.__setstate__ calls allowing pickling.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
f2 = pickle.loads(pickle.dumps(f))
m = u'FrameSet("{0}") does not pickle correctly'
self.assertIsInstance(f2, FrameSet, m.format(test))
self.assertTrue(str(f) == str(f2) and list(f) == list(f2), m.format(test))
# test old objects being unpickled through new lib
state = {'__frange': f._frange, '__set': set(f._items), '__list': list(f._order)}
f2 = FrameSet.__new__(FrameSet)
f2.__setstate__(state)
self.assertTrue(str(f) == str(f2) and list(f) == list(f2), m.format(test))
def _check_frameRange(self, test, expect):
"""
Harness to test if the FrameSet.frameRange call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet always has a frameRange of ''
if not test and not expect:
self.assertEqual(f.frameRange(), '')
return
p1 = r'((?<![xy:-])-?\d+)'
l = max([max([len(i) for i in re.findall(p1, str(f))]) + 1, 4])
p2 = r'(-?\d+)(?:(-)(-?\d+)([xy:]\d+)?)?'
def replace(match):
start, sep, end, step = match.groups()
if start:
start = start.zfill(l)
if end:
end = end.zfill(l)
return ''.join(o for o in [start, sep, end, step] if o)
expect = re.sub(p2, replace, str(f))
try:
r = f.frameRange(l)
except Exception as err:
r = repr(err)
m = u'FrameSet("{0}").frameRange({1}) != "{2}": got "{3}"'
self.assertEqual(r, expect, m.format(test, l, expect, r))
m = u'FrameSet("{0}").frameRange({1}) returns {2}: got {3}'
self.assertIsInstance(r, native_str, m.format(test, l, native_str, type(r)))
def _check_invertedFrameRange(self, test, expect):
"""
Harness to test if the FrameSet.invertedFrameRange call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'FrameSet("{0}").invertedFrameRange() != "{1}": got "{2}"'
r = f.invertedFrameRange()
t = sorted(f)
c = sorted(FrameSet(r) if r else [])
# the empty FrameSet will always return '' for inverted and normal
# FrameRange
if not test and not expect:
self.assertEqual(r, '')
else:
e = [i for i in range(t[0], t[-1]) if i not in t]
self.assertEqual(c, e, m.format(test, e, c))
m = u'FrameSet("{0}").invertedFrameRange() returns {1}: got {2}'
self.assertIsInstance(r, native_str, m.format(test, native_str, type(r)))
def _check_normalize(self, test, expect):
"""
Harness to test if the FrameSet.normalize call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
m = u'set(FrameSet("{0}").normalize()) != {1}: got {2}'
r = f.normalize()
self.assertEqual(set(f), set(r), m.format(test, set(expect), set(r)))
m = u'FrameSet("{0}").normalize() returns {1}: got {2}'
self.assertIsInstance(r, FrameSet, m.format(test, FrameSet, type(r)))
def _check_isFrameRange(self, test, expect):
"""
Harness to test if the FrameSet.isFrameRange call works properly.
:param test: the string to pass to FrameSet.isFrameRange
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
r = FrameSet.isFrameRange(test)
m = u'FrameSet.isFrameRange("{0}") != {1}: got {2}'
self.assertEqual(r, expect, m.format(test, expect, r))
m = u'FrameSet.isFrameRange("{0}") returns {1}: got {2}'
self.assertIsInstance(r, bool, m.format(test, bool, type(r)))
def _check_fromIterable(self, expect, iterable):
"""
Harness to test if the FrameSet.fromIterable call works properly.
:param expect: the string to use to build the expected FrameRange, which will be normalized for comparison
:param iterable: the iterable to test
:return: None
"""
e = FrameSet(expect)
r = FrameSet.from_iterable(iterable)
m = u'FrameSet.fromIterable({0}) != {1!r}: got {2!r}'
self.assertEqual(r, e, m.format(iterable, e, r))
m = u'FrameSet.fromIterable({0}) returns {1}: got {2}'
self.assertIsInstance(r, FrameSet, m.format(expect, FrameSet, type(r)))
def _check___repr__(self, test, expect):
"""
Harness to test if the FrameSet.__repr__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
e = 'FrameSet("{0}")'.format(test)
m = u'repr(FrameSet("{0}")) != {1}: got {2}'
self.assertEqual(repr(f), e, m.format(test, e, repr(f)))
m = u'repr(FrameSet("{0}")) returns {1}: got {2}'
self.assertIsInstance(repr(f), native_str, m.format(test, native_str, type(repr(f))))
def _check___reversed__(self, test, expect):
"""
Harness to test if the FrameSet.__reversed__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
e = list(reversed(expect))
r = reversed(f)
m = u'reversed(FrameSet("{0}")) != {1}: got {2}'
self.assertEqual(list(r), e, m.format(test, e, r))
m = u'reversed(FrameSet("{0}")) returns {1}: got {2}'
self.assertIsInstance(r, types.GeneratorType, m.format(test, types.GeneratorType, type(r)))
def _check___contains__(self, test, expect):
"""
Harness to test if the FrameSet.__contains__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
e = expect[-1] if len(expect) else None
should_succeed = e in f
e = (max(expect) + 1) if len(expect) else None
should_fail = e in f
m = u'{0} in FrameSet("{1}"))'
# the empty FrameSet contains nothing
if not test and not expect:
self.assertFalse(should_succeed, m.format(e, test))
self.assertFalse(should_fail, m.format(e, test))
else:
self.assertTrue(should_succeed, m.format(e, test))
self.assertFalse(should_fail, m.format(e, test))
m = u'FrameSet("{0}").__contains__ returns {1}: got {2}'
self.assertIsInstance(should_succeed, bool, m.format(test, bool, type(should_succeed)))
self.assertIsInstance(should_fail, bool, m.format(test, bool, type(should_fail)))
def _check___hash__(self, test, expect):
"""
Harness to test if the FrameSet.__hash__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
try:
r = hash(f)
except Exception as err:
r = err
m = u'hash(FrameSet("{0}")) returns {1}: got {2}'
self.assertIsInstance(r, int, m.format(test, int, type(r)))
def _check___lt__(self, test, expect):
"""
Harness to test if the FrameSet.__lt__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is less than everything, except for itself
if not test and not expect:
self.assertTrue(f < FrameSet('1'))
self.assertTrue(f < FrameSet('-1'))
self.assertFalse(f < expect)
return
r = FrameSet.from_iterable(expect + [max(expect) + 1])
should_succeed = f < r
should_fail = r < f
m = u'FrameSet("{0}") < FrameSet("{1}")'
self.assertTrue(should_succeed, m.format(test, r))
self.assertFalse(should_fail, m.format(r, test))
m = u'FrameSet("{0}") < FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(should_succeed, bool, m.format(test, r, bool, type(should_succeed)))
self.assertIsInstance(should_fail, bool, m.format(r, test, bool, type(should_fail)))
def _check___le__(self, test, expect):
"""
Harness to test if the FrameSet.__le__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is less than everything, equal only to itself
if not test and not expect:
self.assertTrue(f <= FrameSet('1'))
self.assertTrue(f <= FrameSet('-1'))
self.assertTrue(f <= expect)
return
for i in [expect, expect + [max(expect) + 1]]:
r = FrameSet.from_iterable(i)
should_succeed = f <= r
m = u'FrameSet("{0}") <= FrameSet("{1}")'
self.assertTrue(should_succeed, m.format(test, r))
m = u'FrameSet("{0}") <= FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(should_succeed, bool, m.format(test, r, bool, type(should_succeed)))
def _check___eq__(self, test, expect):
"""
Harness to test if the FrameSet.__eq__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
r = FrameSet(','.join((str(i) for i in expect)))
should_succeed = f == r
m = u'FrameSet("{0}") == FrameSet("{1}")'
self.assertTrue(should_succeed, m.format(test, r))
m = u'FrameSet("{0}") == FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(should_succeed, bool, m.format(test, r, bool, type(should_succeed)))
def _check___ne__(self, test, expect):
"""
Harness to test if the FrameSet.__ne__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is not equal to anything, except for itself
if not test and not expect:
self.assertTrue(f != FrameSet('1'))
self.assertTrue(f != FrameSet('-1'))
self.assertFalse(f != expect)
return
r = FrameSet(','.join((str(i) for i in (expect + [max(expect) + 1]))))
should_succeed = f != r
m = u'FrameSet("{0}") != FrameSet("{1}")'
self.assertTrue(should_succeed, m.format(test, r))
m = u'FrameSet("{0}") != FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(should_succeed, bool, m.format(test, r, bool, type(should_succeed)))
def _check___ge__(self, test, expect):
"""
Harness to test if the FrameSet.__ge__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is greater than nothing, except for itself
if not test and not expect:
self.assertFalse(f >= FrameSet('1'))
self.assertFalse(f >= FrameSet('-1'))
self.assertTrue(f >= expect)
return
for i in [expect, expect[:-1]]:
try:
r = FrameSet.from_iterable(i)
except ParseException:
# this will happen if len(expect) == 1
continue
should_succeed = f >= r
m = u'FrameSet("{0}") >= FrameSet("{1}"'
self.assertTrue(should_succeed, m.format(test, r))
m = u'FrameSet("{0}") >= FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(should_succeed, bool, m.format(test, r, bool, type(should_succeed)))
def _check___gt__(self, test, expect):
"""
Harness to test if the FrameSet.__gt__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is greater than nothing, except for itself
if not test and not expect:
self.assertFalse(f > FrameSet('1'))
self.assertFalse(f > FrameSet('-1'))
self.assertFalse(f > expect)
return
try:
r = FrameSet.from_iterable(expect[:-1])
except ParseException:
# this will happen if len(expect) == 1
return
should_succeed = f > r
should_fail = r > f
m = u'FrameSet("{0}") > FrameSet("{1}")'
self.assertTrue(should_succeed, m.format(test, r))
self.assertFalse(should_fail, m.format(r, test))
m = u'FrameSet("{0}") > FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(should_succeed, bool, m.format(test, r, bool, type(should_succeed)))
self.assertIsInstance(should_fail, bool, m.format(r, test, bool, type(should_fail)))
def _check___and__(self, test, expect):
"""
Harness to test if the FrameSet.__and__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
v = [i + max(expect) + 1 for i in expect] or list(range(999, 1999))
t = FrameSet.from_iterable(v)
r = f & t
e = FrameSet.from_iterable(set(expect) & set(v), sort=True)
m = u'FrameSet("{0}") & FrameSet("{1}") != FrameSet("{2}")'
self.assertEqual(r, e, m.format(f, t, e))
m = u'FrameSet("{0}") & FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(test, t, FrameSet, type(r)))
def _check___rand__(self, test, expect):
"""
Harness to test if the FrameSet.__rand__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
v = [i + max(expect) + 1 for i in expect] or list(range(999, 1999))
t = FrameSet.from_iterable(v)
r = t & f
e = FrameSet.from_iterable(set(v) & set(expect), sort=True)
m = u'FrameSet("{0}") & FrameSet("{1}") != FrameSet("{2}")'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}") & FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(t, test, FrameSet, type(r)))
def _check___sub__(self, test, expect):
"""
Harness to test if the FrameSet.__sub__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
v = [i + max(expect) + 1 for i in expect] or list(range(999, 1999))
t = FrameSet.from_iterable(v)
r = f - t
e = FrameSet.from_iterable(set(expect) - set(v), sort=True)
m = u'FrameSet("{0}") - FrameSet("{1}") != FrameSet("{2}")'
self.assertEqual(r, e, m.format(f, t, e))
m = u'FrameSet("{0}") - FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(test, t, FrameSet, type(r)))
def _check___rsub__(self, test, expect):
"""
Harness to test if the FrameSet.__rsub__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
v = [i + max(expect) + 1 for i in expect] or list(range(999, 1999))
t = FrameSet.from_iterable(v)
r = t - f
e = FrameSet.from_iterable(set(v) - set(expect), sort=True)
m = u'FrameSet("{0}") - FrameSet("{1}") != FrameSet("{2}")'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}") - FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(t, test, FrameSet, type(r)))
def _check___or__(self, test, expect):
"""
Harness to test if the FrameSet.__or__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
v = [i + max(expect) + 1 for i in expect] or list(range(999, 1999))
t = FrameSet.from_iterable(v)
r = f | t
e = FrameSet.from_iterable(set(expect) | set(v), sort=True)
m = u'FrameSet("{0}") | FrameSet("{1}") != FrameSet("{2}")'
self.assertEqual(r, e, m.format(f, t, e))
m = u'FrameSet("{0}") | FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(test, t, FrameSet, type(r)))
def _check___ror__(self, test, expect):
"""
Harness to test if the FrameSet.__ror__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
v = [i + max(expect) + 1 for i in expect] or list(range(999, 1999))
t = FrameSet.from_iterable(v)
r = t | f
e = FrameSet.from_iterable(set(v) | set(expect), sort=True)
m = u'FrameSet("{0}") | FrameSet("{1}") != FrameSet("{2}")'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}") | FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(t, test, FrameSet, type(r)))
def _check___xor__(self, test, expect):
"""
Harness to test if the FrameSet.__xor__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
v = [i + max(expect) + 1 for i in expect] or list(range(999, 1999))
t = FrameSet.from_iterable(v)
r = f ^ t
e = FrameSet.from_iterable(set(expect) ^ set(v), sort=True)
m = u'FrameSet("{0}") ^ FrameSet("{1}") != FrameSet("{2}")'
self.assertEqual(r, e, m.format(f, t, e))
m = u'FrameSet("{0}") ^ FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(test, t, FrameSet, type(r)))
def _check___rxor__(self, test, expect):
"""
Harness to test if the FrameSet.__rxor__ call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
v = [i + max(expect) + 1 for i in expect] or list(range(999, 1999))
t = FrameSet.from_iterable(v)
r = t ^ f
e = FrameSet.from_iterable(set(v) ^ set(expect), sort=True)
m = u'FrameSet("{0}") ^ FrameSet("{1}") != FrameSet("{2}")'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}") ^ FrameSet("{1}") returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(t, test, FrameSet, type(r)))
def _check_isdisjoint(self, test, expect):
"""
Harness to test if the FrameSet.isdisjoint call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is the disjoint of everything, including itself
if not test and not expect:
self.assertTrue(f.isdisjoint(FrameSet('1')))
self.assertTrue(f.isdisjoint(FrameSet('-1')))
self.assertTrue(f.isdisjoint(expect))
return
for v in [[expect[0]], expect, expect + [max(expect)+1], [i + max(expect) + 1 for i in expect]]:
t = FrameSet.from_iterable(v)
r = f.isdisjoint(t)
e = set(expect).isdisjoint(v)
m = u'FrameSet("{0}").isdisjoint(FrameSet("{1}")) != {2}'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}").isdisjoint(FrameSet("{1}")) returns {2}: got {3}'
self.assertIsInstance(r, bool, m.format(test, t, bool, type(r)))
def _check_issubset(self, test, expect):
"""
Harness to test if the FrameSet.issubset call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is the subset of everything, including itself
if not test and not expect:
self.assertTrue(f.issubset(FrameSet('1')))
self.assertTrue(f.issubset(FrameSet('-1')))
self.assertTrue(f.issubset(expect))
return
for v in [[expect[0]], expect, expect + [max(expect)+1], [i + max(expect) + 1 for i in expect]]:
t = FrameSet.from_iterable(v)
r = f.issubset(t)
e = set(expect).issubset(v)
m = u'FrameSet("{0}").issubset(FrameSet("{1}")) != {2}'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}").issubset(FrameSet("{1}")) returns {2}: got {3}'
self.assertIsInstance(r, bool, m.format(test, t, bool, type(r)))
def _check_issuperset(self, test, expect):
"""
Harness to test if the FrameSet.issuperset call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the empty FrameSet is the superset of everything, except itself
if not test and not expect:
self.assertFalse(f.issuperset(FrameSet('1')))
self.assertFalse(f.issuperset(FrameSet('-1')))
self.assertTrue(f.issuperset(expect))
return
for v in [[expect[0]], expect, expect + [max(expect)+1], [i + max(expect) + 1 for i in expect]]:
t = FrameSet.from_iterable(v)
r = f.issuperset(t)
e = set(expect).issuperset(v)
m = u'FrameSet("{0}").issuperset(FrameSet("{1}")) != {2}'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}").issuperset(FrameSet("{1}")) returns {2}: got {3}'
self.assertIsInstance(r, bool, m.format(test, t, bool, type(r)))
def _check_union(self, test, expect):
"""
Harness to test if the FrameSet.union call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the union of the empty FrameSet with any other is always the other
if not test and not expect:
self.assertEqual(f.union(FrameSet('1')), FrameSet('1'))
self.assertEqual(f.union(FrameSet('-1')), FrameSet('-1'))
self.assertEqual(f.union(expect), FrameSet.from_iterable(expect, sort=True))
return
for v in [[expect[0]], expect, expect + [max(expect)+1], [i + max(expect) + 1 for i in expect]]:
t = FrameSet.from_iterable(v)
r = f.union(t)
e = FrameSet.from_iterable(set(expect).union(v), sort=True)
m = u'FrameSet("{0}").union(FrameSet("{1}")) != {2}'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}").union(FrameSet("{1}")) returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(test, t, FrameSet, type(r)))
def _check_intersection(self, test, expect):
"""
Harness to test if the FrameSet.intersection call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the intersection of the empty FrameSet with any other is always the empty FrameSet
if not test and not expect:
self.assertEqual(f.intersection(FrameSet('1')), f)
self.assertEqual(f.intersection(FrameSet('-1')), f)
self.assertEqual(f.intersection(expect), f)
return
for v in [[expect[0]], expect, expect + [max(expect)+1], [i + max(expect) + 1 for i in expect]]:
t = FrameSet.from_iterable(v)
r = f.intersection(t)
e = FrameSet.from_iterable(set(expect).intersection(v), sort=True)
m = u'FrameSet("{0}").intersection(FrameSet("{1}")) != {2}'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}").intersection(FrameSet("{1}")) returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(test, t, FrameSet, type(r)))
def _check_difference(self, test, expect):
"""
Harness to test if the FrameSet.difference call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the difference of the empty FrameSet with any other is always the empty FrameSet
if not test and not expect:
self.assertEqual(f.intersection(FrameSet('1')), f)
self.assertEqual(f.intersection(FrameSet('-1')), f)
self.assertEqual(f.intersection(expect), f)
return
for v in [[expect[0]], expect, expect + [max(expect)+1], [i + max(expect) + 1 for i in expect]]:
t = FrameSet.from_iterable(v)
r = f.difference(t)
e = FrameSet.from_iterable(set(expect).difference(v), sort=True)
m = u'FrameSet("{0}").difference(FrameSet("{1}")) != {2}'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}").difference(FrameSet("{1}")) returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(test, t, FrameSet, type(r)))
def _check_symmetric_difference(self, test, expect):
"""
Harness to test if the FrameSet.symmetric_difference call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
# the symmetric_difference of the empty FrameSet with any other is always the empty FrameSet
if not test and not expect:
self.assertEqual(f.intersection(FrameSet('1')), f)
self.assertEqual(f.intersection(FrameSet('-1')), f)
self.assertEqual(f.intersection(expect), f)
return
for v in [[expect[0]], expect, expect + [max(expect)+1], [i + max(expect) + 1 for i in expect]]:
t = FrameSet.from_iterable(v)
r = f.symmetric_difference(t)
e = FrameSet.from_iterable(set(expect).symmetric_difference(v), sort=True)
m = u'FrameSet("{0}").symmetric_difference(FrameSet("{1}")) != {2}'
self.assertEqual(r, e, m.format(t, f, e))
m = u'FrameSet("{0}").symmetric_difference(FrameSet("{1}")) returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(test, t,
FrameSet, type(r)))
def _check_copy(self, test, expect):
"""
Harness to test if the FrameSet.copy call works properly.
:param test: the string to pass to FrameSet
:param expect: the expected list of values that FrameSet will hold
:return: None
"""
f = FrameSet(test)
r = f.copy()
self.assertIsNot(f, r)
self.assertEqual(f, r)
# due to the sheer number of combinations, we build the bulk of our tests on to TestFrameSet dynamically
for name, tst, exp in FRAME_SET_SHOULD_SUCCEED:
setattr(
TestFrameSet, 'testFrameSet%sInitSetsRange' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___init___range(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sInitSetsItems' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___init___items(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sInitSetsOrder' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___init___order(self, t, e))
setattr(
TestFrameSet, 'testFromIterable%s' % name,
lambda self, e=tst, i=exp: TestFrameSet._check_fromIterable(self, e, i))
setattr(
TestFrameSet, 'testFrameSet%sIndex' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_index(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sFrame' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_frame(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sHasFrameTrue' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_hasFrameTrue(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sHasFrameFalse' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_hasFrameTrue(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sStart' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_start(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sEnd' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_end(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sFrameRange' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_frameRange(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sInvertedFrameRange' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_invertedFrameRange(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sNormalize' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_normalize(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sSerialize' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_canSerialize(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sGetItem' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___getitem__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sLen' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___len__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sStr' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___str__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sRepr' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___repr__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sIter' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___iter__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sReversed' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___reversed__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sContains' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___contains__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sHash' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___hash__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sLessThan' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___lt__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sLessEqual' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___le__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sEqual' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___eq__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sNotEqual' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___ne__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sGreaterEqual' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___ge__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sGreaterThan' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___gt__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sAnd' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___and__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sRightAnd' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___rand__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sSub' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___sub__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sRightSub' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___rsub__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sOr' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___or__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sRightOr' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___ror__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sExclusiveOr' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___xor__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sRightExclusiveOr' % name,
lambda self, t=tst, e=exp: TestFrameSet._check___rxor__(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sIsDisjoint' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_isdisjoint(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sIsSubset' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_issubset(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sIsSubset' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_issuperset(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sUnion' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_union(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sIntersection' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_intersection(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sDifference' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_difference(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sSymmetricDifference' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_symmetric_difference(self, t, e))
setattr(
TestFrameSet, 'testFrameSet%sCopy' % name,
lambda self, t=tst, e=exp: TestFrameSet._check_copy(self, t, e))
setattr(
TestFrameSet, 'testIsFrameRange%sShouldSucceed' % name,
lambda self, t=tst: TestFrameSet._check_isFrameRange(self, t, True))
for name, tst in FRAME_SET_SHOULD_FAIL:
setattr(
TestFrameSet, 'testFrameSet%sInitHandlesMalformed' % name,
lambda self, t=tst: TestFrameSet._check___init____malformed(self, t))
setattr(
TestFrameSet, 'testIsFrameRange%sShouldFail' % name,
lambda self, t=tst: TestFrameSet._check_isFrameRange(self, t, False))
class TestFramesToFrameRange(unittest.TestCase):
"""
Exercise the frameToRange func. Due to the sheer number of permutations, we'll add most tests dynamically.
"""
def _check_frameToRangeEquivalence(self, test, expect):
f = FrameSet(test)
frange = framesToFrameRange(expect, sort=False)
r = FrameSet(frange)
m = '{0!r} != {1!r}'
self.assertEqual(f, r, m.format(f, r))
m = '{0!r} != {1!r} ; got type {2!r}'
self.assertIsInstance(frange, native_str, m.format(frange, native_str, type(frange)))
# due to the sheer number of combinations, we build the bulk of our tests on to TestFramesToFrameRange dynamically
for name, tst, exp in FRAME_SET_SHOULD_SUCCEED:
setattr(
TestFramesToFrameRange, 'testFramesToRangeEquivalence%s' % name,
lambda self, t=tst, e=exp: TestFramesToFrameRange._check_frameToRangeEquivalence(self, t, e))
class TestFrameSetFromRangeConstructor(unittest.TestCase):
"""
Exercise the TestFrame.from_range() constructor. Due to the sheer number of permutations, we'll add most tests dynamically.
"""
def _check_fromRange(self, start, end, step, expect):
"""
Harness to test if the FrameSet.fromRange call works properly.
:param expect: the string to use to build the expected FrameSet, which will be normalized for comparison
:param start: the start frame
:param end: the end frame
:return: None
"""
e = FrameSet(expect)
r = FrameSet.from_range(start, end, step)
m = u'FrameSet.fromRange({0}, {1}) != {2!r}: got {3!r}'
self.assertEqual(r, e, m.format(start, end, e, r))
m = u'FrameSet.fromRange({0}, {1}) returns {2}: got {3}'
self.assertIsInstance(r, FrameSet, m.format(start, end, FrameSet, type(r)))
# add tests dynamically
for name, start, end, step_, exp in FRAME_SET_FROM_RANGE_SHOULD_SUCCEED:
setattr(
TestFrameSetFromRangeConstructor, 'testFromRange%s' % name,
lambda self, s=start, e=end, step=step_, exp=exp: TestFrameSetFromRangeConstructor._check_fromRange(self, s, e, step, exp))
if __name__ == '__main__':
unittest.main(verbosity=1)
``` |
[
{
"content": "Repeat the code precisely:\n```python\n__author__ = 'Robert Cope'\n\nfrom unittest import TestCase\n\nfrom PyHT6022.LibUsbScope import Oscilloscope\nfrom PyHT6022.HantekFirmware import stock_firmware, mod_firmware_01\n\n\n# TODO: Add more unit tests, add unit tests for changing number of active ch... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n__author__ = 'Robert Cope'\n\nfrom unittest import TestCase\n\nfrom PyHT6022.LibUsbScope import Oscilloscope\nfrom PyHT6022.HantekFirmware import stock_firmware, mod_firmware_01\n\n\n# TODO: Add more unit tests, add unit tests for changing num... | ```python
__author__ = 'Robert Cope'
from unittest import TestCase
from PyHT6022.LibUsbScope import Oscilloscope
from PyHT6022.HantekFirmware import stock_firmware, mod_firmware_01
# TODO: Add more unit tests, add unit tests for changing number of active channels.
class BasicTests(TestCase):
def test_find_device(self):
print "Testing finding device and flashing stock firmware."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
assert scope.close_handle()
def test_flash_firmware(self):
print "Testing flashing multiple firmwares."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware(stock_firmware, supports_single_channel=False)
assert scope.flash_firmware(mod_firmware_01)
assert scope.flash_firmware(stock_firmware, supports_single_channel=False)
assert scope.close_handle()
def test_get_cal_values(self):
print "Testing getting calibration values."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
cal_values = scope.get_calibration_values()
assert cal_values
assert scope.close_handle()
def test_read_data(self):
print "Testing reading data from the oscilloscope."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
ch1_data, _ = scope.read_data(data_size=0x400)
print ch1_data
assert ch1_data
assert scope.close_handle()
def test_read_many_sizes(self):
print "Testing reading many different data sizes"
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
data_size = 0x400
for _ in xrange(11):
print "DATA SIZE", data_size
ch1_data, ch2_data = scope.read_data(data_size=data_size, raw=True)
print len(ch1_data)
print len(ch2_data)
assert ch1_data, ch2_data
data_size <<= 1
assert scope.close_handle()
def test_set_sample_rate(self):
print "Testing setting the sample rate."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
for rate_index in scope.SAMPLE_RATES.keys():
scope.set_sample_rate(rate_index)
assert scope.close_handle()
def test_set_channel_voltage_range(self):
print "Testing setting the voltage range."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
for vrange in scope.VOLTAGE_RANGES.keys():
assert scope.set_ch1_voltage_range(vrange)
assert scope.set_ch1_voltage_range(vrange)
assert scope.close_handle()
def test_data_scaling(self):
print "Testing setting various scale facotrs and reading."
scale_factor = 0x01
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
assert scope.set_ch1_voltage_range(scale_factor)
assert scope.set_sample_rate(27)
ch1_data, _ = scope.read_data(0x100000)
ch1_data = scope.scale_read_data(ch1_data, scale_factor)
print "Max:", max(ch1_data), "(V), Min:", min(ch1_data), "(V)"
assert ch1_data
assert scope.close_handle()
def test_set_num_channels(self):
print "Testing setting the number of channels with modified firmware."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware(mod_firmware_01)
assert scope.set_num_channels(1)
assert scope.set_num_channels(2)
assert scope.set_num_channels(1)
assert scope.close_handle()
def test_set_one_channel_and_read(self):
print "Testing setting one channel and reading it."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware(mod_firmware_01)
assert scope.set_ch1_voltage_range(0xA)
assert scope.set_sample_rate(0x10)
assert scope.set_num_channels(1)
ch1_data, ch2_data = scope.read_data(0x4000)
assert ch1_data
assert not ch2_data
assert scope.close_handle()
def test_read_firmware(self):
print "Testing read_firmware method on scope."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
assert scope.read_firmware()
assert scope.close_handle()
def test_clear_fifo(self):
print "Testing explicitly clearing the FIFO."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
assert scope.clear_fifo()
assert scope.close_handle()
``` |
[
{
"content": "```python\nfrom django import template\nfrom django.utils.encoding import smart_str\nimport re\nfrom django.template.base import FilterExpression, NodeList\nfrom django.template.loader import get_template\n\nkwarg_re = re.compile( r\"(?:(\\w+)=)?(.+)\" )\nclass WidgetLibrary(template.Library):\n\n... | [
{
"content": "<|memory_start|>```python\nfrom django import template\nfrom django.utils.encoding import smart_str\nimport re\nfrom django.template.base import FilterExpression, NodeList\nfrom django.template.loader import get_template\n\nkwarg_re = re.compile( r\"(?:(\\w+)=)?(.+)\" )\nclass WidgetLibrary(templa... | ```python
from django import template
from django.utils.encoding import smart_str
import re
from django.template.base import FilterExpression, NodeList
from django.template.loader import get_template
kwarg_re = re.compile( r"(?:(\w+)=)?(.+)" )
class WidgetLibrary(template.Library):
def widget_tag_compile_function(self, cls, widget_name):
def widget_tag(parser, token):
"""
{% xwidget 'valami nev' %}
{% xwidget 'valami nev' as valtozo %}
{% xwidget 'valami nev' with 'template.html' as valtozo %}
{% xwidget 'valami nev' with variable as valtozo %}
{% xwidget 'valami nev' with-inline as valtozo %}...{% endxwidget %}
{% xwidget 'valami nev' with-inline %}...{% endxwidget %}
"""
bits = token.split_contents()
#widget_name = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
templ = None
bits = bits[1:]
if len( bits ) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len( bits ) >=1 and bits[-1] == 'with-inline':
templ = True
bits = bits[:-1]
elif len( bits ) >=2 and bits[-2] == 'with':
templ = bits[-1]
bits = bits[:-2]
if len( bits ):
for bit in bits:
match = kwarg_re.match( bit )
if not match:
raise template.TemplateSyntaxError( "Malformed arguments to widget tag" )
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter( value )
else:
args.append( parser.compile_filter( value ) )
if templ == True:
templ = parser.parse(('end'+widget_name,))
parser.delete_first_token()
elif templ:
templ = parser.compile_filter( templ )
return cls(args, kwargs, templ, asvar)
return widget_tag
def widget(self, name):
def inner(cls):
self.tag(name, self.widget_tag_compile_function(cls, name))
return inner
class XWidgetBase(template.Node):
def __init__(self, args, kwargs, template, asvar):
self.args = args
self.kwargs = kwargs
self.template = template
self.asvar = asvar
def render(self, context):
def resolve(v, context):
if unicode(v)==u"False": return False
elif unicode(v)==u"True": return True
elif unicode(v)==u"None": return None
else:
return v.resolve(context)
args = [arg.resolve( context ) for arg in self.args]
kwargs = dict( [( smart_str( k, 'ascii' ), resolve(v, context) ) for k, v in self.kwargs.items()] )
if isinstance(self.template, FilterExpression):
kwargs['TEMPLATE']=get_template(self.template.resolve( context ))
if isinstance(self.template, NodeList):
kwargs['TEMPLATE']=self.template
if not self.asvar:
return self.value(context, *args, **kwargs)
context[self.asvar]=self.value(context, *args, **kwargs)
return ""
def value(self, context, *args, **kwargs):
return ""
``` |
[
{
"content": "```python\nfrom twisted.internet.defer import inlineCallbacks, returnValue\nfrom twisted.plugin import IPlugin\nfrom twisted.python.usage import UsageError\n\nfrom zope.interface import implements\n\nfrom pub.client import cli\nfrom pub.iface import EntityNotFound\n\n\nclass Command(cli.Command):\... | [
{
"content": "<|memory_start|>```python\nfrom twisted.internet.defer import inlineCallbacks, returnValue\nfrom twisted.plugin import IPlugin\nfrom twisted.python.usage import UsageError\n\nfrom zope.interface import implements\n\nfrom pub.client import cli\nfrom pub.iface import EntityNotFound\n\n\nclass Comman... | ```python
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.plugin import IPlugin
from twisted.python.usage import UsageError
from zope.interface import implements
from pub.client import cli
from pub.iface import EntityNotFound
class Command(cli.Command):
def _getMaxLen(self, fields):
m = 0
for f in fields:
m = max(m, len(f))
return m
_longEntityFmt = "{0.id:{p}} {0.species} {0.primaryKeyId}"
@inlineCallbacks
def execute(self):
if self.config["ids"]:
ids = self.config["ids"]
else:
ids = yield self.pub.listEntities()
p = self._getMaxLen(ids)
if self.config["long"]:
for id in ids:
try:
ent = yield self.pub.getEntity(id)
except EntityNotFound:
print "{0:{p}} Not found".format(id, p=p)
else:
print self._longEntityFmt.format(ent, p=p)
else:
print "\n".join(ids)
class Options(cli.Options):
optFlags = [
["long", "l", "Print more information about entities."],
]
def parseArgs(self, *ids):
self["ids"] = ids
def postOptions(self):
if self["ids"]:
self["long"] = True
class Loader(cli.CommandFactory):
implements(IPlugin)
command = Command
options = Options
name = "list-entities"
shortcut = "e"
description = "List all entities"
loader = Loader()
``` |
[
{
"content": "Here is a code file:\n```python\nfrom typing import Callable\n\nfrom vnpy.trader.object import (\n TickData,\n OrderData,\n TradeData,\n PositionData,\n AccountData,\n ContractData,\n OrderRequest,\n CancelRequest,\n SubscribeRequest,\n HistoryRequest,\n)\nfrom vnpy.t... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom typing import Callable\n\nfrom vnpy.trader.object import (\n TickData,\n OrderData,\n TradeData,\n PositionData,\n AccountData,\n ContractData,\n OrderRequest,\n CancelRequest,\n SubscribeRequest,\n HistoryRequest,... | ```python
from typing import Callable
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest,
)
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
OrderType,
Product,
Status,
OptionType
)
from vnpy.trader.gateway import BaseGateway
from vnpy.api.websocket import WebsocketClient
from datetime import datetime
from copy import copy
WEBSOCKET_HOST = "wss://www.deribit.com/ws/api/v2"
PRODUCT_DERIBIT2VT = {
"future": Product.FUTURES,
"option": Product.OPTION
}
OPTIONTYPE_DERIBIT2VT = {
"call": OptionType.CALL,
"put": OptionType.PUT
}
DIRECTION_VT2DERIBIT = {Direction.LONG: "buy", Direction.SHORT: "sell"}
ORDERTYPE_VT2DERIBIT = {
OrderType.LIMIT: "limit",
OrderType.MARKET: "market",
}
ORDERTYPE_DERIBIT2VT = {v: k for k, v in ORDERTYPE_VT2DERIBIT.items()}
DIRECTION_DERIBIT2VT = {v: k for k, v in DIRECTION_VT2DERIBIT.items()}
STATUS_DERIBIT2VT = {
"open": Status.NOTTRADED,
"filled": Status.ALLTRADED,
"rejected": Status.REJECTED,
"cancelled": Status.CANCELLED,
}
class DeribitGateway(BaseGateway):
""""""
default_setting = {
"key": "",
"secret": "",
"proxy_host": "",
"proxy_port": ""
}
exchanges = [Exchange.DERIBIT]
def __init__(self, event_engine):
""""""
super().__init__(event_engine, "DERIBIT")
self.ws_api = DeribitWebsocketApi(self)
def connect(self, setting: dict):
""""""
key = setting["key"]
secret = setting["secret"]
proxy_host = setting["proxy_host"]
proxy_port = setting["proxy_port"]
if proxy_port.isdigit():
proxy_port = int(proxy_port)
else:
proxy_port = 0
self.ws_api.connect(
key,
secret,
proxy_host,
proxy_port,
)
def subscribe(self, req: SubscribeRequest):
""""""
self.ws_api.subscribe(req)
def send_order(self, req: OrderRequest):
""""""
return self.ws_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
return self.ws_api.cancel_order(req)
def query_account(self):
""""""
self.ws_api.query_account()
def query_position(self):
"""
Query holding positions.
"""
pass
def query_history(self, req: HistoryRequest):
"""
Query bar history data.
"""
pass
def close(self):
""""""
self.ws_api.stop()
class DeribitWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.key = ""
self.secret = ""
self.access_token = ""
self.reqid = 1
self.reqid_callback_map = {}
self.reqid_currency_map = {}
self.reqid_order_map = {}
self.connect_time = 0
self.order_count = 1000000
self.local_sys_map = {}
self.sys_local_map = {}
self.cancel_requests = {}
self.callbacks = {
"ticker": self.on_ticker,
"book": self.on_orderbook,
"user": self.on_user_update,
}
self.ticks = {}
def connect(
self,
key: str,
secret: str,
proxy_host: str,
proxy_port: int
):
""""""
self.key = key
self.secret = secret
self.connect_time = (
int(datetime.now().strftime("%y%m%d%H%M%S")) * self.order_count
)
self.init(WEBSOCKET_HOST, proxy_host, proxy_port)
self.start()
def subscribe(self, req: SubscribeRequest):
""""""
symbol = req.symbol
self.ticks[symbol] = TickData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=Exchange.DERIBIT,
datetime=datetime.now(),
)
params = {
"channels": [
f"ticker.{symbol}.100ms",
f"book.{symbol}.none.10.100ms",
f"user.changes.{symbol}.raw"
],
"access_token": self.access_token
}
self.send_request("private/subscribe", params)
def send_order(self, req: OrderRequest):
""""""
self.order_count += 1
orderid = str(self.connect_time + self.order_count)
order = req.create_order_data(orderid, self.gateway_name)
self.gateway.on_order(order)
side = DIRECTION_VT2DERIBIT[req.direction]
method = "private/" + side
params = {
"instrument_name": req.symbol,
"amount": int(req.volume),
"type": ORDERTYPE_VT2DERIBIT[req.type],
"label": orderid,
"price": req.price
}
if req.offset == Offset.CLOSE:
params["reduce_only"] = True
reqid = self.send_request(
method,
params,
self.on_send_order
)
self.reqid_order_map[reqid] = order
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
if req.orderid not in self.local_sys_map:
self.cancel_requests[req.orderid] = req
return
sys_id = self.local_sys_map[req.orderid]
params = {
"order_id": sys_id,
"access_token": self.access_token,
}
self.send_request(
"private/cancel",
params,
self.on_cancel_order
)
def get_access_token(self):
"""
use the access key and secret to get access token
"""
params = {
"grant_type": "client_credentials",
"client_id": self.key,
"client_secret": self.secret
}
self.send_request(
"public/auth",
params,
self.on_access_token
)
def query_instrument(self):
""""""
for currency in ["BTC", "ETH"]:
params = {
"currency": currency,
"expired": False,
}
self.send_request(
"public/get_instruments",
params,
self.on_query_instrument
)
def query_account(self):
""""""
for currency in ["BTC", "ETH"]:
params = {
"currency": currency,
"access_token": self.access_token
}
self.send_request(
"private/get_account_summary",
params,
self.on_query_account
)
def query_position(self):
""""""
for currency in ["BTC", "ETH"]:
params = {
"currency": currency,
"access_token": self.access_token
}
self.send_request(
"private/get_positions",
params,
self.on_query_position
)
def query_order(self):
""""""
for currency in ["BTC", "ETH"]:
params = {
"currency": currency,
"access_token": self.access_token
}
self.send_request(
"private/get_open_orders_by_currency",
params,
self.on_query_order
)
def on_connected(self):
"""
Callback when websocket is connected successfully.
"""
self.gateway.write_log("服务器连接成功")
self.get_access_token()
self.query_instrument()
def on_disconnected(self):
"""
Callback when websocket connection is lost.
"""
self.gateway.write_log("服务器连接断开")
def on_packet(self, packet: dict):
"""
callback when data is received and unpacked
"""
if "id" in packet:
packet_id = packet["id"]
if packet_id in self.reqid_callback_map.keys():
callback = self.reqid_callback_map[packet_id]
callback(packet)
elif "params" in packet:
channel = packet["params"]["channel"]
kind = channel.split(".")[0]
callback = self.callbacks[kind]
callback(packet)
def on_access_token(self, packet: dict):
""""""
data = packet["result"]
self.access_token = data["access_token"]
self.gateway.write_log("服务器登录成功")
self.query_position()
self.query_account()
self.query_order()
# Subscribe to account update
params = {
"channels": [
"user.portfolio.btc",
"user.portfolio.eth"
],
"access_token": self.access_token
}
self.send_request("private/subscribe", params)
def on_query_instrument(self, packet: dict):
""""""
currency = self.reqid_currency_map[packet["id"]]
for d in packet["result"]:
contract = ContractData(
symbol=d["instrument_name"],
exchange=Exchange.DERIBIT,
name=d["instrument_name"],
product=PRODUCT_DERIBIT2VT[d["kind"]],
pricetick=d["tick_size"],
size=d["contract_size"],
min_volume=d["min_trade_amount"],
net_position=True,
history_data=False,
gateway_name=self.gateway_name,
)
if contract.product == Product.OPTION:
contract.option_portfolio = d["base_currency"]
contract.option_strike = d["strike"]
contract.option_index = str(d["strike"])
contract.option_underlying = d["base_currency"]
contract.option_type = OPTIONTYPE_DERIBIT2VT[d["option_type"]]
contract.option_expiry = datetime.fromtimestamp(
d["expiration_timestamp"] / 1000
)
self.gateway.on_contract(contract)
self.gateway.write_log(f"{currency}合约信息查询成功")
def on_query_position(self, packet: dict):
""""""
data = packet["result"]
currency = self.reqid_currency_map[packet["id"]]
for pos in data:
position = PositionData(
symbol=pos["instrument_name"],
exchange=Exchange.DERIBIT,
direction=Direction.NET,
volume=pos["size"],
pnl=float(pos["floating_profit_loss"]),
gateway_name=self.gateway_name,
)
self.gateway.on_position(position)
self.gateway.write_log(f"{currency}持仓查询成功")
def on_query_account(self, packet: dict):
""""""
data = packet["result"]
currency = data["currency"]
account = AccountData(
accountid=currency,
balance=data["balance"],
frozen=data["balance"] - data["available_funds"],
gateway_name=self.gateway_name,
)
self.gateway.on_account(account)
self.gateway.write_log(f"{currency}资金查询成功")
def on_query_order(self, packet: dict):
""""""
data = packet["result"]
currency = self.reqid_currency_map[packet["id"]]
for d in data:
self.on_order(d)
self.gateway.write_log(f"{currency}委托查询成功")
def on_send_order(self, packet: dict):
""""""
error = packet.get("error", None)
if not error:
return
msg = error["message"]
reason = error["data"]["reason"]
code = error["code"]
self.gateway.write_log(
f"委托失败,代码:{code},类型:{msg},原因:{reason}"
)
order = self.reqid_order_map[packet["id"]]
order.status = Status.REJECTED
self.gateway.on_order(order)
def on_cancel_order(self, packet: dict):
""""""
data = packet["result"]
orderid = data["label"]
order = OrderData(
symbol=data["instrument_name"],
exchange=Exchange.DERIBIT,
type=ORDERTYPE_DERIBIT2VT[data["order_type"]],
orderid=orderid,
direction=DIRECTION_DERIBIT2VT[data["direction"]],
price=float(data["price"]),
volume=float(data["amount"]),
traded=float(data["filled_amount"]),
time=str(datetime.fromtimestamp(data["last_update_timestamp"] / 1000)),
status=STATUS_DERIBIT2VT[data["order_state"]],
gateway_name=self.gateway_name,
)
self.gateway.on_order(copy(order))
def on_user_update(self, packet: dict):
""""""
if "portfolio" in packet["params"]["channel"]:
self.on_account(packet)
return
data = packet["params"]["data"]
trades = data["trades"]
positions = data["positions"]
orders = data["orders"]
if orders:
for order in orders:
self.on_order(order)
if trades:
for trade in trades:
self.on_trade(trade, orders[0]["order_id"])
if positions:
for position in positions:
self.on_position(position)
def on_order(self, data: dict):
""""""
if data["label"]:
local_id = data["label"]
else:
local_id = data["order_id"]
sys_id = data["order_id"]
self.local_sys_map[local_id] = sys_id
self.sys_local_map[sys_id] = local_id
order = OrderData(
symbol=data["instrument_name"],
exchange=Exchange.DERIBIT,
type=ORDERTYPE_DERIBIT2VT[data["order_type"]],
orderid=local_id,
direction=DIRECTION_DERIBIT2VT[data["direction"]],
price=float(data["price"]),
volume=float(data["amount"]),
traded=float(data["filled_amount"]),
time=str(datetime.fromtimestamp(data["last_update_timestamp"] / 1000)),
status=STATUS_DERIBIT2VT[data["order_state"]],
gateway_name=self.gateway_name,
)
if data["reduce_only"]:
order.offset = Offset.CLOSE
self.gateway.on_order(order)
# Send cancel requests if necessary
if order.orderid in self.cancel_requests:
req = self.cancel_requests.pop(order.orderid)
if order.is_active():
self.cancel_order(req)
def on_trade(self, data: list, orderid):
""""""
sys_id = data["order_id"]
local_id = self.sys_local_map[sys_id]
trade = TradeData(
symbol=data["instrument_name"],
exchange=Exchange.DERIBIT,
orderid=local_id,
tradeid=data["trade_id"],
direction=DIRECTION_DERIBIT2VT[data["direction"]],
price=float(data["price"]),
volume=float(data["amount"]),
time=str(datetime.fromtimestamp(data["timestamp"] / 1000)),
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
def on_position(self, data: dict):
""""""
pos = PositionData(
symbol=data["instrument_name"],
exchange=Exchange.DERIBIT,
direction=Direction.NET,
volume=data["size"],
price=data["average_price"],
pnl=float(data["floating_profit_loss"]),
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def on_account(self, packet: dict):
""""""
data = packet["params"]["data"]
account = AccountData(
accountid=data["currency"],
balance=data["balance"],
frozen=data["balance"] - data["available_funds"],
gateway_name=self.gateway_name,
)
self.gateway.on_account(account)
def on_ticker(self, packet: dict):
""""""
data = packet["params"]["data"]
symbol = data["instrument_name"]
tick = self.ticks.get(symbol, None)
if not tick:
return
tick.last_price = data["last_price"]
tick.high_price = data["stats"]["high"]
tick.low_price = data["stats"]["low"]
tick.volume = data["stats"]["volume"]
tick.datetime = datetime.fromtimestamp(data["timestamp"] / 1000)
self.gateway.on_tick(copy(tick))
def on_orderbook(self, packet: dict):
""""""
data = packet["params"]["data"]
symbol = data["instrument_name"]
bids = data["bids"]
asks = data["asks"]
tick = self.ticks[symbol]
for i in range(min(len(bids), 5)):
ix = i + 1
bp, bv = bids[i]
setattr(tick, f"bid_price_{ix}", bp)
setattr(tick, f"bid_volume_{ix}", bv)
for i in range(min(len(asks), 5)):
ix = i + 1
ap, av = asks[i]
setattr(tick, f"ask_price_{ix}", ap)
setattr(tick, f"ask_volume_{ix}", av)
self.gateway.on_tick(copy(tick))
def send_request(
self,
method: str,
params: dict,
callback: Callable = None
):
""""""
self.reqid += 1
msg = {
"jsonrpc": "2.0",
"id": self.reqid,
"method": method,
"params": params
}
self.send_packet(msg)
if callback:
self.reqid_callback_map[self.reqid] = callback
if "currency" in params:
self.reqid_currency_map[self.reqid] = params["currency"]
return self.reqid
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n#!/usr/bin/env python2\nimport ctypes\nimport platform\nfrom logging import getLogger\n\n\nlogger = getLogger(__name__)\n\n\nclass c_cudaDeviceProp(ctypes.Structure):\n \"\"\"\n Passed to cudart.cudaGetDeviceProperties()\n \"\... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n#!/usr/bin/env python2\nimport ctypes\nimport platform\nfrom logging import getLogger\n\n\nlogger = getLogger(__name__)\n\n\nclass c_cudaDeviceProp(ctypes.Structure):\n \"\"\"\n Passed to cudart.cudaGetDevicePrope... | ```python
#!/usr/bin/env python2
import ctypes
import platform
from logging import getLogger
logger = getLogger(__name__)
class c_cudaDeviceProp(ctypes.Structure):
"""
Passed to cudart.cudaGetDeviceProperties()
"""
_fields_ = [
('name', ctypes.c_char * 256),
('totalGlobalMem', ctypes.c_size_t),
('sharedMemPerBlock', ctypes.c_size_t),
('regsPerBlock', ctypes.c_int),
('warpSize', ctypes.c_int),
('memPitch', ctypes.c_size_t),
('maxThreadsPerBlock', ctypes.c_int),
('maxThreadsDim', ctypes.c_int * 3),
('maxGridSize', ctypes.c_int * 3),
('clockRate', ctypes.c_int),
('totalConstMem', ctypes.c_size_t),
('major', ctypes.c_int),
('minor', ctypes.c_int),
('textureAlignment', ctypes.c_size_t),
('texturePitchAlignment', ctypes.c_size_t),
('deviceOverlap', ctypes.c_int),
('multiProcessorCount', ctypes.c_int),
('kernelExecTimeoutEnabled', ctypes.c_int),
('integrated', ctypes.c_int),
('canMapHostMemory', ctypes.c_int),
('computeMode', ctypes.c_int),
('maxTexture1D', ctypes.c_int),
('maxTexture1DMipmap', ctypes.c_int),
('maxTexture1DLinear', ctypes.c_int),
('maxTexture2D', ctypes.c_int * 2),
('maxTexture2DMipmap', ctypes.c_int * 2),
('maxTexture2DLinear', ctypes.c_int * 3),
('maxTexture2DGather', ctypes.c_int * 2),
('maxTexture3D', ctypes.c_int * 3),
('maxTexture3DAlt', ctypes.c_int * 3),
('maxTextureCubemap', ctypes.c_int),
('maxTexture1DLayered', ctypes.c_int * 2),
('maxTexture2DLayered', ctypes.c_int * 3),
('maxTextureCubemapLayered', ctypes.c_int * 2),
('maxSurface1D', ctypes.c_int),
('maxSurface2D', ctypes.c_int * 2),
('maxSurface3D', ctypes.c_int * 3),
('maxSurface1DLayered', ctypes.c_int * 2),
('maxSurface2DLayered', ctypes.c_int * 3),
('maxSurfaceCubemap', ctypes.c_int),
('maxSurfaceCubemapLayered', ctypes.c_int * 2),
('surfaceAlignment', ctypes.c_size_t),
('concurrentKernels', ctypes.c_int),
('ECCEnabled', ctypes.c_int),
('pciBusID', ctypes.c_int),
('pciDeviceID', ctypes.c_int),
('pciDomainID', ctypes.c_int),
('tccDriver', ctypes.c_int),
('asyncEngineCount', ctypes.c_int),
('unifiedAddressing', ctypes.c_int),
('memoryClockRate', ctypes.c_int),
('memoryBusWidth', ctypes.c_int),
('l2CacheSize', ctypes.c_int),
('maxThreadsPerMultiProcessor', ctypes.c_int),
('streamPrioritiesSupported', ctypes.c_int),
('globalL1CacheSupported', ctypes.c_int),
('localL1CacheSupported', ctypes.c_int),
('sharedMemPerMultiprocessor', ctypes.c_size_t),
('regsPerMultiprocessor', ctypes.c_int),
('managedMemSupported', ctypes.c_int),
('isMultiGpuBoard', ctypes.c_int),
('multiGpuBoardGroupID', ctypes.c_int),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_int * 128),
# added later with cudart.cudaDeviceGetPCIBusId
# (needed by NVML)
('pciBusID_str', ctypes.c_char * 16),
]
class struct_c_nvmlDevice_t(ctypes.Structure):
"""
Handle to a device in NVML
"""
pass # opaque handle
c_nvmlDevice_t = ctypes.POINTER(struct_c_nvmlDevice_t)
class c_nvmlMemory_t(ctypes.Structure):
"""
Passed to nvml.nvmlDeviceGetMemoryInfo()
"""
_fields_ = [
('total', ctypes.c_ulonglong),
('free', ctypes.c_ulonglong),
('used', ctypes.c_ulonglong),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_ulonglong * 8),
]
class c_nvmlUtilization_t(ctypes.Structure):
"""
Passed to nvml.nvmlDeviceGetUtilizationRates()
"""
_fields_ = [
('gpu', ctypes.c_uint),
('memory', ctypes.c_uint),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_uint * 8),
]
def get_library(name):
"""
Returns a ctypes.CDLL or None
"""
try:
if platform.system() == 'Windows':
return ctypes.windll.LoadLibrary(name)
else:
return ctypes.cdll.LoadLibrary(name)
except OSError:
pass
return None
def get_cudart():
"""
Return the ctypes.DLL object for cudart or None
"""
if platform.system() == 'Windows':
arch = platform.architecture()[0]
for ver in range(90, 50, -5):
cudart = get_library('cudart%s_%d.dll' % (arch[:2], ver))
if cudart is not None:
return cudart
elif platform.system() == 'Darwin':
for major in xrange(9, 5, -1):
for minor in (5, 0):
cudart = get_library('libcudart.%d.%d.dylib' % (major, minor))
if cudart is not None:
return cudart
return get_library('libcudart.dylib')
else:
for major in xrange(9, 5, -1):
for minor in (5, 0):
cudart = get_library('libcudart.so.%d.%d' % (major, minor))
if cudart is not None:
return cudart
return get_library('libcudart.so')
return None
def get_nvml():
"""
Return the ctypes.DLL object for cudart or None
"""
if platform.system() == 'Windows':
return get_library('nvml.dll')
else:
for name in (
'libnvidia-ml.so.1',
'libnvidia-ml.so',
'nvml.so'):
nvml = get_library(name)
if nvml is not None:
return nvml
return None
devices = None
def get_devices(force_reload=False):
"""
Returns a list of c_cudaDeviceProp's
Prints an error and returns None if something goes wrong
Keyword arguments:
force_reload -- if False, return the previously loaded list of devices
"""
global devices
if not force_reload and devices is not None:
# Only query CUDA once
return devices
devices = []
cudart = get_cudart()
if cudart is None:
return []
# check CUDA version
cuda_version = ctypes.c_int()
rc = cudart.cudaRuntimeGetVersion(ctypes.byref(cuda_version))
if rc != 0:
logger.error('cudaRuntimeGetVersion() failed with error #%s' % rc)
return []
if cuda_version.value < 6050:
logger.error('ERROR: Cuda version must be >= 6.5, not "%s"' % cuda_version.valu)
return []
# get number of devices
num_devices = ctypes.c_int()
rc = cudart.cudaGetDeviceCount(ctypes.byref(num_devices))
if rc != 0:
logger.error('cudaGetDeviceCount() failed with error #%s' % rc)
return []
# query devices
for x in xrange(num_devices.value):
properties = c_cudaDeviceProp()
rc = cudart.cudaGetDeviceProperties(ctypes.byref(properties), x)
if rc == 0:
pciBusID_str = ' ' * 16
# also save the string representation of the PCI bus ID
rc = cudart.cudaDeviceGetPCIBusId(ctypes.c_char_p(pciBusID_str), 16, x)
if rc == 0:
properties.pciBusID_str = pciBusID_str
devices.append(properties)
else:
logger.error('cudaGetDeviceProperties() failed with error #%s' % rc)
del properties
return devices
def get_device(device_id):
"""
Returns a c_cudaDeviceProp
"""
return get_devices()[int(device_id)]
def get_nvml_info(device_id):
"""
Gets info from NVML for the given device
Returns a dict of dicts from different NVML functions
"""
device = get_device(device_id)
if device is None:
return None
nvml = get_nvml()
if nvml is None:
return None
rc = nvml.nvmlInit()
if rc != 0:
raise RuntimeError('nvmlInit() failed with error #%s' % rc)
try:
# get device handle
handle = c_nvmlDevice_t()
rc = nvml.nvmlDeviceGetHandleByPciBusId(ctypes.c_char_p(device.pciBusID_str), ctypes.byref(handle))
if rc != 0:
raise RuntimeError('nvmlDeviceGetHandleByPciBusId() failed with error #%s' % rc)
# Grab info for this device from NVML
info = {
'minor_number': device_id,
'product_name': device.name
}
uuid = ' ' * 41
rc = nvml.nvmlDeviceGetUUID(handle, ctypes.c_char_p(uuid), 41)
if rc == 0:
info['uuid'] = uuid[:-1]
temperature = ctypes.c_int()
rc = nvml.nvmlDeviceGetTemperature(handle, 0, ctypes.byref(temperature))
if rc == 0:
info['temperature'] = temperature.value
speed = ctypes.c_uint()
rc = nvml.nvmlDeviceGetFanSpeed(handle, ctypes.byref(speed))
if rc == 0:
info['fan'] = speed.value
power_draw = ctypes.c_uint()
rc = nvml.nvmlDeviceGetPowerUsage(handle, ctypes.byref(power_draw))
if rc == 0:
info['power_draw'] = power_draw.value
power_limit = ctypes.c_uint()
rc = nvml.nvmlDeviceGetPowerManagementLimit(handle, ctypes.byref(power_limit))
if rc == 0:
info['power_limit'] = power_limit.value
memory = c_nvmlMemory_t()
rc = nvml.nvmlDeviceGetMemoryInfo(handle, ctypes.byref(memory))
if rc == 0:
info['memory_total'] = memory.total
info['memory_used'] = memory.used
utilization = c_nvmlUtilization_t()
rc = nvml.nvmlDeviceGetUtilizationRates(handle, ctypes.byref(utilization))
if rc == 0:
info['gpu_util'] = utilization.gpu
return info
finally:
rc = nvml.nvmlShutdown()
if rc != 0:
pass
def add_unit(data):
temperature = 'temperature'
if temperature in data:
data[temperature] = '{} C'.format(data[temperature])
fan = 'fan'
if fan in data:
data[fan] = '{} %'.format(data[fan])
power_draw = 'power_draw'
if power_draw in data:
data[power_draw] = '{:.2f} W'.format(float(data[power_draw]) / pow(10, 3))
power_limit = 'power_limit'
if power_limit in data:
data[power_limit] = '{:.2f} W'.format(float(data[power_limit]) / pow(10, 3))
memory_total = 'memory_total'
if memory_total in data:
data[memory_total] = '{} MiB'.format(data[memory_total] / pow(2, 20))
memory_used = 'memory_used'
if memory_used in data:
data[memory_used] = '{} MiB'.format(data[memory_used] / pow(2, 20))
gpu_util = 'gpu_util'
if gpu_util in data:
data[gpu_util] = '{} %'.format(data[gpu_util])
def get_devices_info():
if not len(get_devices()):
return None
nvml = get_nvml()
nvml.nvmlInit()
version = ' ' * 80
nvml.nvmlSystemGetDriverVersion(ctypes.c_char_p(version), 80)
version = version.strip()[:-1]
gpus = []
for i, device in enumerate(get_devices()):
info = get_nvml_info(i)
if info:
gpus.append(info)
for gpu in gpus:
add_unit(gpu)
return {
'gpus': gpus,
'driver_version': version
}
``` |
[
{
"content": "```python\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law... | [
{
"content": "<|memory_start|>```python\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required b... | ```python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import directory
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy.ext import baked
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_auto_ptg_db as auto_ptg_db)
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_intra_ptg_db as intra_ptg_db)
from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db
from gbpservice.neutron.extensions import cisco_apic_gbp
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
LOG = logging.getLogger(__name__)
BAKERY = baked.bakery(_size_alert=lambda c: LOG.warning(
"sqlalchemy baked query cache size exceeded in %s", __name__))
class AIMExtensionDriver(api.ExtensionDriver,
intra_ptg_db.ApicIntraPtgDBMixin,
auto_ptg_db.ApicAutoPtgDBMixin):
_supported_extension_alias = cisco_apic_gbp.ALIAS
_extension_dict = cisco_apic_gbp.EXTENDED_ATTRIBUTES_2_0
def __init__(self):
LOG.info("AIM Extension __init__")
self._policy_driver = None
@property
def _pd(self):
if not self._policy_driver:
gbp_plugin = directory.get_plugin("GROUP_POLICY")
policy_mgr = gbp_plugin.policy_driver_manager
self._policy_driver = policy_mgr.policy_drivers['aim_mapping'].obj
return self._policy_driver
def initialize(self):
pass
@property
def extension_alias(self):
return self._supported_extension_alias
def _set_intra_ptg_allow(self, session, data, result):
ptg = data['policy_target_group']
query = BAKERY(lambda s: s.query(
gp_db.PolicyTargetGroup))
query += lambda q: q.filter_by(
id=sa.bindparam('id'))
ptg_db = query(session).params(
id=result['id']).one()
if not ptg_db:
raise gpolicy.PolicyTargetGroupNotFound(
policy_target_group_id=result['id'])
if 'intra_ptg_allow' in ptg:
self.set_intra_ptg_allow(
session, policy_target_group_id=result['id'],
intra_ptg_allow=ptg['intra_ptg_allow'])
result['intra_ptg_allow'] = ptg['intra_ptg_allow']
else:
self._extend_ptg_dict_with_intra_ptg_allow(session, result)
def _extend_ptg_dict_with_intra_ptg_allow(self, session, result):
result['intra_ptg_allow'] = self.get_intra_ptg_allow(
session, policy_target_group_id=result['id'])
def process_create_policy_target_group(self, session, data, result):
self._set_intra_ptg_allow(session, data, result)
result['is_auto_ptg'] = bool(
gpolicy.AUTO_PTG_REGEX.match(result['id']))
self.set_is_auto_ptg(
session, policy_target_group_id=result['id'],
is_auto_ptg=result['is_auto_ptg'])
def process_update_policy_target_group(self, session, data, result):
self._set_intra_ptg_allow(session, data, result)
def extend_policy_target_group_dict(self, session, result):
self._extend_ptg_dict_with_intra_ptg_allow(session, result)
result['is_auto_ptg'] = self.get_is_auto_ptg(
session, policy_target_group_id=result['id'])
self._pd.extend_policy_target_group_dict(session, result)
def extend_application_policy_group_dict(self, session, result):
self._pd.extend_application_policy_group_dict(session, result)
def extend_policy_rule_dict(self, session, result):
self._pd.extend_policy_rule_dict(session, result)
def extend_policy_rule_set_dict(self, session, result):
self._pd.extend_policy_rule_set_dict(session, result)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nimport os\nimport shutil\nimport sys\n\nimport click\n\nimport apistar\nfrom apistar import schema\n\nAPISTAR_PACKAGE_DIR = os.path.dirname(apistar.__file__)\nLAYOUTS_DIR = os.path.join(APISTAR_PACKAGE_DIR, 'layouts')\nLAYOUT_CHOICES =... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nimport os\nimport shutil\nimport sys\n\nimport click\n\nimport apistar\nfrom apistar import schema\n\nAPISTAR_PACKAGE_DIR = os.path.dirname(apistar.__file__)\nLAYOUTS_DIR = os.path.join(APISTAR_PACKAGE_DIR, 'layouts')\n... | ```python
import os
import shutil
import sys
import click
import apistar
from apistar import schema
APISTAR_PACKAGE_DIR = os.path.dirname(apistar.__file__)
LAYOUTS_DIR = os.path.join(APISTAR_PACKAGE_DIR, 'layouts')
LAYOUT_CHOICES = os.listdir(LAYOUTS_DIR)
IGNORED_DIRECTORIES = ['__pycache__']
class TargetDir(schema.String):
pass
class Layout(schema.String):
description = 'Select the project layout to use.'
default = 'standard'
choices = LAYOUT_CHOICES
class Force(schema.Boolean):
description = 'Overwrite any existing project files.'
default = False
def new(target_dir: TargetDir, layout: Layout, force: Force) -> None:
"""
Create a new project in TARGET_DIR.
"""
source_dir = os.path.join(LAYOUTS_DIR, layout)
copy_paths = []
for dir_path, dirs, filenames in os.walk(source_dir):
dirs[:] = [d for d in dirs if d not in IGNORED_DIRECTORIES]
for filename in filenames:
source_path = os.path.join(dir_path, filename)
rel_path = os.path.relpath(source_path, source_dir)
target_path = os.path.join(target_dir, rel_path)
if os.path.exists(target_path) and not force:
click.echo('Project files already exist. Use `-f` to overwrite.')
sys.exit(1)
copy_paths.append((source_path, target_path))
for source_path, target_path in copy_paths:
click.echo(target_path)
parent = os.path.dirname(target_path)
if parent:
os.makedirs(parent, exist_ok=True)
shutil.copy(source_path, target_path)
``` |
[
{
"content": "Here is the code block:\n```python\nimport gtk\nimport ns3\nfrom visualizer.base import InformationWindow\n\nNODE_STATISTICS_MEMORY = 10\n\n\nclass StatisticsCollector(object):\n \"\"\"\n Collects interface statistics for all nodes.\n \"\"\"\n\n class NetDevStats(object):\n __sl... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\nimport gtk\nimport ns3\nfrom visualizer.base import InformationWindow\n\nNODE_STATISTICS_MEMORY = 10\n\n\nclass StatisticsCollector(object):\n \"\"\"\n Collects interface statistics for all nodes.\n \"\"\"\n\n class NetDevStats(object... | ```python
import gtk
import ns3
from visualizer.base import InformationWindow
NODE_STATISTICS_MEMORY = 10
class StatisticsCollector(object):
"""
Collects interface statistics for all nodes.
"""
class NetDevStats(object):
__slots__ = ['rxPackets', 'rxBytes', 'txPackets', 'txBytes',
'rxPacketRate', 'rxBitRate', 'txPacketRate', 'txBitRate']
def __init__(self, visualizer):
self.node_statistics = {} # nodeid -> list(raw statistics)
self.visualizer = visualizer
def simulation_periodic_update(self, viz):
nodes_statistics = viz.simulation.sim_helper.GetNodesStatistics()
for stats in nodes_statistics:
try:
raw_stats_list = self.node_statistics[stats.nodeId]
except KeyError:
raw_stats_list = []
self.node_statistics[stats.nodeId] = raw_stats_list
raw_stats_list.append(stats.statistics)
while len(raw_stats_list) > NODE_STATISTICS_MEMORY:
raw_stats_list.pop(0)
def get_interface_statistics(self, nodeId):
try:
raw_stats_list = self.node_statistics[nodeId]
except KeyError:
return []
if len(raw_stats_list) < NODE_STATISTICS_MEMORY:
return []
assert len(raw_stats_list) == NODE_STATISTICS_MEMORY
tx_packets1 = [] # transmitted packets, one value per interface
rx_packets1 = []
tx_bytes1 = []
rx_bytes1 = []
for iface, stats in enumerate(raw_stats_list[0]):
tx_packets1.append(stats.transmittedPackets)
tx_bytes1.append(stats.transmittedBytes)
rx_packets1.append(stats.receivedPackets)
rx_bytes1.append(stats.receivedBytes)
retval = []
k = self.visualizer.sample_period*(NODE_STATISTICS_MEMORY-1)
for iface, stats in enumerate(raw_stats_list[-1]):
outStat = self.NetDevStats()
outStat.txPackets = stats.transmittedPackets
outStat.txBytes = stats.transmittedBytes
outStat.rxPackets = stats.receivedPackets
outStat.rxBytes = stats.receivedBytes
outStat.txPacketRate = (stats.transmittedPackets - tx_packets1[iface])/k
outStat.rxPacketRate = (stats.receivedPackets - rx_packets1[iface])/k
outStat.txBitRate = (stats.transmittedBytes - tx_bytes1[iface])*8/k
outStat.rxBitRate = (stats.receivedBytes - rx_bytes1[iface])*8/k
retval.append(outStat)
return retval
class ShowInterfaceStatistics(InformationWindow):
(
COLUMN_INTERFACE,
COLUMN_TX_PACKETS,
COLUMN_TX_BYTES,
COLUMN_TX_PACKET_RATE,
COLUMN_TX_BIT_RATE,
COLUMN_RX_PACKETS,
COLUMN_RX_BYTES,
COLUMN_RX_PACKET_RATE,
COLUMN_RX_BIT_RATE,
) = range(9)
def __init__(self, visualizer, node_index, statistics_collector):
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.connect("response", self._response_cb)
self.win.set_title("Mesh Statistics for node %i" % node_index)
self.visualizer = visualizer
self.statistics_collector = statistics_collector
self.node_index = node_index
self.viz_node = visualizer.get_node(node_index)
self.table_model = gtk.ListStore(*([str]*13))
treeview = gtk.TreeView(self.table_model)
treeview.show()
self.win.vbox.add(treeview)
def add_column(descr, colid):
column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid)
treeview.append_column(column)
add_column("Interface", self.COLUMN_INTERFACE)
add_column("Tx Packets", self.COLUMN_TX_PACKETS)
add_column("Tx Bytes", self.COLUMN_TX_BYTES)
add_column("Tx pkt/1s", self.COLUMN_TX_PACKET_RATE)
add_column("Tx bit/1s", self.COLUMN_TX_BIT_RATE)
add_column("Rx Packets", self.COLUMN_RX_PACKETS)
add_column("Rx Bytes", self.COLUMN_RX_BYTES)
add_column("Rx pkt/1s", self.COLUMN_RX_PACKET_RATE)
add_column("Rx bit/1s", self.COLUMN_RX_BIT_RATE)
self.visualizer.add_information_window(self)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
node = ns3.NodeList.GetNode(self.node_index)
stats_list = self.statistics_collector.get_interface_statistics(self.node_index)
self.table_model.clear()
for iface, stats in enumerate(stats_list):
tree_iter = self.table_model.append()
netdevice = node.GetDevice(iface)
interface_name = ns3.Names.FindName(netdevice)
if not interface_name:
interface_name = "(interface %i)" % iface
self.table_model.set(tree_iter,
self.COLUMN_INTERFACE, interface_name,
self.COLUMN_TX_PACKETS, str(stats.txPackets),
self.COLUMN_TX_BYTES, str(stats.txBytes),
self.COLUMN_TX_PACKET_RATE, str(stats.txPacketRate),
self.COLUMN_TX_BIT_RATE, str(stats.txBitRate),
self.COLUMN_RX_PACKETS, str(stats.rxPackets),
self.COLUMN_RX_BYTES, str(stats.rxBytes),
self.COLUMN_RX_PACKET_RATE, str(stats.rxPacketRate),
self.COLUMN_RX_BIT_RATE, str(stats.rxBitRate)
)
def populate_node_menu(viz, node, menu):
menu_item = gtk.MenuItem("Switch On")
menu_item.show()
def _show_it_on(dummy):
print "Switching on\n"
menu_item.connect("activate", _show_it_on)
menu.add(menu_item)
menu_item = gtk.MenuItem("Show Mesh Statistics")
menu_item.show()
def _show_it(dummy_menu_item):
ShowInterfaceStatistics(viz, node.node_index, statistics_collector)
menu_item.connect("activate", _show_it)
menu.add(menu_item)
def register(viz):
statistics_collector = StatisticsCollector(viz)
viz.connect("populate-node-menu", populate_node_menu)
viz.connect("simulation-periodic-update", statistics_collector.simulation_periodic_update)
``` |
[
{
"content": "Here is the code block:\n```python\n# Copyright 2014-2017 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at you... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n# Copyright 2014-2017 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the Licens... | ```python
# Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from functools import partial
import errno
from . import _cache_manager
from . import _pool
from . import libnl
from .link import _nl_link_cache, _link_index_to_name
def iter_addrs():
"""Generator that yields an information dictionary for each network address
in the system."""
with _pool.socket() as sock:
with _nl_addr_cache(sock) as addr_cache:
with _nl_link_cache(sock) as link_cache: # for index to label
addr = libnl.nl_cache_get_first(addr_cache)
while addr:
yield _addr_info(addr, link_cache=link_cache)
addr = libnl.nl_cache_get_next(addr)
def _addr_info(addr, link_cache=None):
"""Returns a dictionary with the address information."""
index = libnl.rtnl_addr_get_ifindex(addr)
local_address = libnl.rtnl_addr_get_local(addr)
data = {
'index': index,
'family': libnl.nl_af2str(libnl.rtnl_addr_get_family(addr)),
'prefixlen': libnl.rtnl_addr_get_prefixlen(addr),
'scope': libnl.rtnl_scope2str(libnl.rtnl_addr_get_scope(addr)),
'flags': _addr_flags(addr),
'address': libnl.nl_addr2str(local_address) if local_address else None
}
try:
data['label'] = _link_index_to_name(index, cache=link_cache)
except IOError as err:
if err.errno != errno.ENODEV:
raise
return data
def split(addr):
"""Split an addr dict from iter_addrs"""
# for 32bits address, the address field is slashless
return addr['address'].split('/')[0], addr['prefixlen']
def cidr_form(addr):
return '{}/{}'.format(*split(addr))
def is_primary(addr):
return 'secondary' not in addr['flags']
def is_permanent(addr):
return 'permanent' in addr['flags']
def _addr_flags(addr):
"""Returns the textual representation of the address flags"""
return frozenset(
libnl.rtnl_addr_flags2str(libnl.rtnl_addr_get_flags(addr)).split(','))
_nl_addr_cache = partial(_cache_manager, libnl.rtnl_addr_alloc_cache)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n#!/usr/bin/env python2\n# Copyright (c) 2014-2015 The Bitcoin Core developers\n# Copyright (c) 2016 The Bitcoin developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://w... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n#!/usr/bin/env python2\n# Copyright (c) 2014-2015 The Bitcoin Core developers\n# Copyright (c) 2016 The Bitcoin developers\n# Distributed under the MIT software license, see the accompanying\n# file COP... | ```python
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2016 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# MVF-Core
"""
Exercise the signature change (replay protection) code.
Derived from walletbackupauto.py.
Test case is:
4 nodes - 2 forking and 2 non-forking, sending transactions between each other.
Prior to the fork, anything goes.
Post fork, the nodes of the same kind can still send between each other,
but not to the nodes of the other kind (2 way check).
"""
import os
import fnmatch
import hashlib
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from random import randint
import logging
import time
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
FORKHEIGHT = 120
class ReplayProtectionTest(BitcoinTestFramework):
def setup_chain(self):
#logging.info("Initializing test directory "+self.options.tmpdir)
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
#logging.info("Starting nodes")
print("Starting nodes")
# all nodes are spenders, let's give them a keypool=100
self.extra_args = [
['-debug', '-whitelist=127.0.0.1', "-keypool=100"],
['-debug', '-whitelist=127.0.0.1', "-keypool=100"],
['-debug', '-whitelist=127.0.0.1', "-keypool=100", "-forkheight=%s"%FORKHEIGHT],
['-debug', '-whitelist=127.0.0.1', "-keypool=100", "-forkheight=%s"%FORKHEIGHT]]
self.nodes = start_nodes(4, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[3], 2)
self.is_network_split=False
self.sync_all()
def send_and_check(self, from_node, to_node, expect_to_succeed=True, force_sync=True, check=True, check_for_fail=False):
''' try sending 0.1 BTC from one node to another,
and optionally check if successful '''
to_addr = self.nodes[to_node].getnewaddress()
amount = Decimal(1) / Decimal(10)
txid = self.nodes[from_node].sendtoaddress(to_addr, amount)
if force_sync:
sync_mempools([self.nodes[from_node], self.nodes[to_node]])
else:
time.sleep(1)
if check:
if check_for_fail:
assert_equal(txid in self.nodes[from_node].getrawmempool(), True)
assert_equal(txid in self.nodes[to_node].getrawmempool(), False)
else:
assert_equal(txid in self.nodes[from_node].getrawmempool() and (txid in self.nodes[to_node].getrawmempool() or not expect_to_succeed), True)
return txid
def run_test(self):
#logging.info("Fork height configured for block %s"%(FORKHEIGHT))
print("Fork height configured for block %s"%(FORKHEIGHT))
#logging.info("Generating initial 104 blocks")
print("Generating initial 104 blocks")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(101)
sync_blocks(self.nodes)
#logging.info("Current height %s blocks"%(self.nodes[0].getblockcount()))
print("Current height %s blocks"%(self.nodes[0].getblockcount()))
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 50)
assert_equal(self.nodes[0].getblockcount(), 104)
#logging.info("Check all sending works after setup")
print("Check all sending works after setup")
# from any node to the others should be ok now
# this should generate 4*3 = 12 more blocks
for src_node in range(4):
for dst_node in range(4):
if src_node != dst_node:
#logging.info("... from %d to %d" %(src_node, dst_node))
print("... from %d to %d" %(src_node, dst_node))
self.send_and_check(src_node, dst_node, True)
self.nodes[dst_node].generate(1)
sync_blocks(self.nodes)
current_height = self.nodes[0].getblockcount()
assert_equal(current_height, 116)
# generate blocks, one on each node in turn, until we reach pre-fork block height
blocks_to_fork = FORKHEIGHT - current_height - 1
self.nodes[0].generate(blocks_to_fork)
# not sure why this loop didn't work reliably...
# maybe it was the round-robin generation
while False: #blocks_to_fork > 0:
#logging.info("blocks left to fork height: %d" % blocks_to_fork)
print("blocks left to fork height: %d" % blocks_to_fork)
self.nodes[blocks_to_fork % 4].generate(1)
blocks_to_fork -= 1
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), FORKHEIGHT - 1)
#logging.info("Current height %s blocks (pre-fork block)"%(self.nodes[0].getblockcount()))
print("Current height %s blocks (pre-fork block)"%(self.nodes[0].getblockcount()))
# check that we can still send to all other nodes for the pre-fork block
# collect a bunch of tx's sent by the nodes to each other
#logging.info("sending tx's between all nodes at pre-fork")
print("sending tx's between all nodes at pre-fork")
should_be_fine_txs = []
for src_node in range(4):
for dst_node in range(4):
if src_node != dst_node:
#logging.info("... from %d to %d" %(src_node, dst_node))
print("... from %d to %d" %(src_node, dst_node))
should_be_fine_txs.append(self.send_and_check(src_node, dst_node, True))
#logging.info("Verifying tx's were still accepted by all nodes")
print("Verifying tx's were still accepted by all nodes")
sync_mempools(self.nodes)
mempools = [self.nodes[i].getrawmempool() for i in range(4)]
for tx in should_be_fine_txs:
for n in range(4):
assert_equal(tx in mempools[n], True)
# generate the fork block
#logging.info("Generate fork block at height %s" % FORKHEIGHT)
print("Generate fork block at height %s" % FORKHEIGHT)
self.nodes[0].generate(1)
# check the previous round of tx's not in mempool anymore
self.sync_all()
assert_equal(self.nodes[0].getblockcount(), FORKHEIGHT)
#logging.info("Verifying tx's no longer in any mempool")
print("Verifying tx's no longer in any mempool")
mempools = [self.nodes[i].getrawmempool() for i in range(4)]
for tx in should_be_fine_txs:
for n in range(4):
assert_equal(tx in mempools[n], False)
# check that now, only nodes of the same kind can transact
# these pairs should work fine
#logging.info("Checking transactions between same-kind nodes")
print("Checking transactions between same-kind nodes")
for pair in ((0,1), (1,0), (2,3), (3,2)):
#logging.info("... from %d to %d" %(pair[0], pair[1]))
print("... from %d to %d" %(pair[0], pair[1]))
self.send_and_check(pair[0], pair[1], True)
# re-connect the nodes which have been disconnected due to the
# above post-fork transactions, so we can test them separately
#logging.info("Re-connecting nodes which disconnected due to prior step")
print("Re-connecting nodes which disconnected due to prior step")
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,1,3)
#logging.info("Checking transactions between forked/unforked nodes")
print("Checking transactions between forked/unforked nodes")
# these should not work anymore
# MVF-Core TODO: decide whether to accept old-style signatures post-fork (maybe limited-time only?)
# if you only want to deny new->old, then use the commented out code
#for pair in ((2,0), (2,1), (3,0), (3,1)):
# check both forked->unforked and vice versa are blocked now
for pair in ((0,2), (0,3), (1,2), (1,3), (2,0), (2,1), (3,0), (3,1)):
#logging.info("... from %d to %d" %(pair[0], pair[1]))
print("... from %d to %d" %(pair[0], pair[1]))
self.send_and_check(pair[0], pair[1], expect_to_succeed=False, force_sync=False, check=True, check_for_fail=True)
if __name__ == '__main__':
ReplayProtectionTest().main()
``` |
[
{
"content": "Here is a code snippet:\n```python\nfrom __future__ import absolute_import\n\nimport mock\nfrom random import random\n\nfrom .MockPrinter import MockPrinter\nfrom redeem.Gcode import Gcode\n\n\nclass M114_Tests(MockPrinter):\n def test_gcodes_M114(self):\n A = round(random() * 200, 1)\n B =... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\nfrom __future__ import absolute_import\n\nimport mock\nfrom random import random\n\nfrom .MockPrinter import MockPrinter\nfrom redeem.Gcode import Gcode\n\n\nclass M114_Tests(MockPrinter):\n def test_gcodes_M114(self):\n A = round(random() * ... | ```python
from __future__ import absolute_import
import mock
from random import random
from .MockPrinter import MockPrinter
from redeem.Gcode import Gcode
class M114_Tests(MockPrinter):
def test_gcodes_M114(self):
A = round(random() * 200, 1)
B = round(random() * 200, 1)
C = round(random() * 200, 1)
X = round(random() * 200, 1)
Y = round(random() * 200, 1)
Z = round(random() * 200, 1)
E = round(random() * 200, 1)
H = round(random() * 200, 1)
self.printer.path_planner.get_current_pos = mock.Mock(return_value={
'A': A,
'C': C,
'B': B,
'E': E,
'H': H,
'Y': Y,
'X': X,
'Z': Z
})
g = Gcode({"message": "M114"})
self.printer.processor.gcodes[g.gcode].execute(g)
self.printer.path_planner.get_current_pos.assert_called_with(
ideal=True, mm=True) # kinda redundant, but hey.
self.assertEqual(
g.answer,
"ok C: X:{:.1f} Y:{:.1f} Z:{:.1f} E:{:.1f} A:{:.1f} B:{:.1f} C:{:.1f} H:{:.1f}".format(
X, Y, Z, E, A, B, C, H))
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo.addons.sale_timesheet.tests.test_reporting import TestReporting\nfrom odoo.tools import float_compare\nfrom odoo.tests import tagged\n\n\n@tagged('-at_install', 'post_install... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo.addons.sale_timesheet.tests.test_reporting import TestReporting\nfrom odoo.tools import float_compare\nfrom odoo.tests import tagged\n\n\n@tagged('-at_install... | ```python
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale_timesheet.tests.test_reporting import TestReporting
from odoo.tools import float_compare
from odoo.tests import tagged
@tagged('-at_install', 'post_install')
class TestSaleProject(TestReporting):
def test_project_overview_by_project(self):
rounding = self.env.company.currency_id.rounding
so_line_deliver_global_project = self.env['sale.order.line'].create({
'name': self.product_delivery_timesheet2.name,
'product_id': self.product_delivery_timesheet2.id,
'product_uom_qty': 50,
'product_uom': self.product_delivery_timesheet2.uom_id.id,
'price_unit': self.product_delivery_timesheet2.list_price,
'order_id': self.sale_order_2.id,
})
self.sale_order_2.action_confirm()
project_so = self.so_line_order_project.project_id
# log timesheet for billable time
timesheet1 = self._log_timesheet_manager(project_so, 10, so_line_deliver_global_project.task_id)
task_so = self.so_line_order_project.task_id
# logged some timesheets: on project only, then on tasks with different employees
timesheet2 = self._log_timesheet_user(project_so, 2)
timesheet3 = self._log_timesheet_user(project_so, 3, task_so)
timesheet4 = self._log_timesheet_manager(project_so, 1, task_so)
# create a task which is not linked to sales order and fill non-billable timesheet
task = self.env['project.task'].create({
'name': 'Task',
'project_id': project_so.id,
'allow_billable': False,
'sale_line_id': False
})
timesheet5 = self._log_timesheet_user(project_so, 5, task)
# invoice the Sales Order SO2
context = {
"active_model": 'sale.order',
"active_ids": [self.sale_order_2.id],
"active_id": self.sale_order_2.id,
'open_invoices': True,
}
payment = self.env['sale.advance.payment.inv'].create({
'advance_payment_method': 'delivered',
})
action_invoice = payment.with_context(context).create_invoices()
invoice = self.env['account.move'].browse(action_invoice['res_id'])
invoice.action_post()
# simulate the auto creation of the SO line for expense, like we confirm a vendor bill.
so_line_expense = self.env['sale.order.line'].create({
'name': self.product_expense.name,
'product_id': self.product_expense.id,
'product_uom_qty': 0.0,
'product_uom': self.product_expense.uom_id.id,
'price_unit': self.product_expense.list_price, # reinvoice at sales price
'order_id': self.sale_order_2.id,
'is_expense': True,
})
expense = self.env['account.analytic.line'].create({
'name': 'expense on project_so',
'account_id': project_so.analytic_account_id.id,
'so_line': so_line_expense.id,
'employee_id': self.employee_user.id,
'unit_amount': 4,
'amount': 4 * self.product_expense.list_price * -1,
'product_id': self.product_expense.id,
'product_uom_id': self.product_expense.uom_id.id,
})
other_revenues = self.env['account.analytic.line'].create({
'name': 'pther revenues on project_so',
'account_id': project_so.analytic_account_id.id,
'employee_id': self.employee_user.id,
'unit_amount': 1,
'amount': self.product_expense.list_price,
'product_id': self.product_expense.id,
'product_uom_id': self.product_expense.uom_id.id,
})
view_id = self.env.ref('sale_timesheet.project_timesheet_action_client_timesheet_plan').id
vals = self.env['project.project']._qweb_prepare_qcontext(view_id, [['id', '=', project_so.id]])
dashboard_value = timesheet2.unit_amount + timesheet3.unit_amount + timesheet4.unit_amount + timesheet5.unit_amount + timesheet1.unit_amount
project_so_timesheet_sold_unit = timesheet3.unit_amount + timesheet4.unit_amount
project_rate_non_billable = timesheet5.unit_amount / dashboard_value * 100
project_rate_non_billable_project = timesheet2.unit_amount / dashboard_value * 100
project_rate_billable_time = timesheet1.unit_amount / dashboard_value * 100
project_rate_billable_fixed = project_so_timesheet_sold_unit / dashboard_value * 100
project_rate_total = project_rate_non_billable + project_rate_non_billable_project + project_rate_billable_time + project_rate_billable_fixed
project_invoiced = self.so_line_order_project.price_unit * self.so_line_order_project.product_uom_qty * timesheet1.unit_amount
project_timesheet_cost = timesheet2.amount + timesheet3.amount + timesheet4.amount + timesheet5.amount + timesheet1.amount
self.assertEqual(float_compare(vals['dashboard']['time']['non_billable'], timesheet5.unit_amount, precision_rounding=rounding), 0, "The hours non-billable should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['non_billable_project'], timesheet2.unit_amount, precision_rounding=rounding), 0, "The hours non-billable-project should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['billable_time'], timesheet1.unit_amount, precision_rounding=rounding), 0, "The hours billable-time should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['billable_fixed'], project_so_timesheet_sold_unit, precision_rounding=rounding), 0, "The hours billable-fixed should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['total'], dashboard_value, precision_rounding=rounding), 0, "The total hours should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['non_billable'], project_rate_non_billable, precision_rounding=rounding), 0, "The rate non-billable should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['non_billable_project'], project_rate_non_billable_project, precision_rounding=rounding), 0, "The rate non-billable-project should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['billable_time'], project_rate_billable_time, precision_rounding=rounding), 0, "The rate billable-time should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['billable_fixed'], project_rate_billable_fixed, precision_rounding=rounding), 0, "The rate billable-fixed should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['total'], project_rate_total, precision_rounding=rounding), 0, "The total rates should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['invoiced'], project_invoiced, precision_rounding=rounding), 0, "The amount invoiced should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['cost'], project_timesheet_cost, precision_rounding=rounding), 0, "The amount cost should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['expense_cost'], expense.amount, precision_rounding=rounding), 0, "The amount expense-cost should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['other_revenues'], other_revenues.amount, precision_rounding=rounding), 0, "The amount of the other revenues should be equal to the created other_revenues account analytic line")
self.assertEqual(float_compare(vals['dashboard']['profit']['total'], project_invoiced + project_timesheet_cost + expense.amount + other_revenues.amount, precision_rounding=rounding), 0, "The total amount should be the sum of the SO2 line and the created other_revenues account analytic line")
self.assertEqual(float_compare(vals['repartition_employee_max'], 11.0, precision_rounding=rounding), 0, "The amount of repartition-employee-max should be the one from SO2 line")
``` |
[
{
"content": "Here is the source code:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport time\nfrom slackclient import SlackClient\nimport yaml\nimport datetime\nimport pytz\nimport json\n\n\nglobal message_string\n\ntoken = \"ADD-YOUR-TOKEN\"# found at https://api.slack.com/web#authenticati... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport time\nfrom slackclient import SlackClient\nimport yaml\nimport datetime\nimport pytz\nimport json\n\n\nglobal message_string\n\ntoken = \"ADD-YOUR-TOKEN\"# found at https://api.slack.com/... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from slackclient import SlackClient
import yaml
import datetime
import pytz
import json
global message_string
token = "ADD-YOUR-TOKEN"# found at https://api.slack.com/web#authentication
sc = SlackClient(token)
users = sc.api_call("users.list")
users_dict = users['members']
def nom_utilisateur(id):
for item in users_dict:
if item['id'] == id:
nom_user = item['name']
return nom_user
def conversion_date(ts):
ma_date = datetime.datetime.fromtimestamp(ts, tz=pytz.timezone('America/Montreal')).strftime('%d-%m-%Y %H:%M:%S')
return ma_date
def reception_message():
global message_string
if sc.rtm_connect():
while True:
contenu_recu = sc.rtm_read()
# Verify if the list list is not empty
if contenu_recu:
mon_dict = contenu_recu[0]
# Change this line by adding the channel id that you want to select
if mon_dict['type'] == "message" and mon_dict['channel'] == "YOUR-CHANNEL-ID" and mon_dict['user']!="USLACKBOT":
message_string = nom_utilisateur(mon_dict["user"]) + "%,%" + mon_dict['text'] + "%,%" + conversion_date(float(mon_dict['ts']))
return message_string
time.sleep(1)
else:
return "Erreur de connexion"
if __name__ == "__main__":
reception_message()
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n# -*- coding: UTF-8 -*-\n#\n# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>\n#\n# This file is part of Wammu <https://wammu.eu/>\n#\n# This program is free software: you can redistribute it and/or modif... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n# -*- coding: UTF-8 -*-\n#\n# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>\n#\n# This file is part of Wammu <https://wammu.eu/>\n#\n# This program is free software: you can redistribute... | ```python
# -*- coding: UTF-8 -*-
#
# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>
#
# This file is part of Wammu <https://wammu.eu/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Wammu - Phone manager
Logging window and thread for log reading
'''
import threading
import wx
import os
import sys
import time
import Wammu.Events
from Wammu.Locales import ugettext as _
class LoggerDebug(threading.Thread):
'''
Thread which reads defined files and prints it to stderr.
'''
def __init__(self, filename):
'''
Initializes reader on filename, text will be printed to stderr.
'''
threading.Thread.__init__(self)
self.file_descriptor = open(filename, 'r')
self.filename = filename
self.canceled = False
def run(self):
"""
This is basically tail -f reimplementation
"""
while not self.canceled:
where = self.file_descriptor.tell()
txt = self.file_descriptor.readlines()
if len(txt) == 0:
fd_results = os.fstat(self.file_descriptor.fileno())
try:
st_results = os.stat(self.filename)
except OSError:
st_results = fd_results
if st_results[1] == fd_results[1] or sys.platform == 'win32':
time.sleep(1)
self.file_descriptor.seek(where)
else:
self.file_descriptor = open(self.filename, 'r')
else:
sys.stderr.write(''.join(txt))
self.file_descriptor.close()
class Logger(threading.Thread):
'''
Thread which reads defined files and posts events on change.
'''
def __init__(self, win, filename):
'''
Initializes reader on filename, events will be sent to win.
'''
threading.Thread.__init__(self)
self.win = win
self.file_descriptor = open(filename, 'r')
self.filename = filename
self.canceled = False
def run(self):
"""
This is basically tail -f reimplementation
"""
while not self.canceled:
where = self.file_descriptor.tell()
txt = self.file_descriptor.readlines()
if len(txt) == 0:
fd_results = os.fstat(self.file_descriptor.fileno())
try:
st_results = os.stat(self.filename)
except OSError:
st_results = fd_results
if st_results[1] == fd_results[1] or sys.platform == 'win32':
time.sleep(1)
self.file_descriptor.seek(where)
else:
self.file_descriptor = open(self.filename, 'r')
else:
evt = Wammu.Events.LogEvent(txt=''.join(txt))
wx.PostEvent(self.win, evt)
self.file_descriptor.close()
class LogFrame(wx.Frame):
'''
Window with debug log.
'''
def __init__(self, parent, cfg):
'''
Creates window and initializes event handlers.
'''
self.cfg = cfg
if cfg.HasEntry('/Debug/X') and cfg.HasEntry('/Debug/Y'):
pos = wx.Point(
cfg.ReadInt('/Debug/X'),
cfg.ReadInt('/Debug/Y'))
else:
pos = wx.DefaultPosition
size = wx.Size(
cfg.ReadInt('/Debug/Width'),
cfg.ReadInt('/Debug/Height')
)
wx.Frame.__init__(
self,
parent,
-1,
_('Wammu debug log'),
pos,
size,
wx.DEFAULT_FRAME_STYLE | wx.RESIZE_BORDER
)
self.txt = wx.TextCtrl(
self,
-1,
_('Here will appear debug messages from Gammu…\n'),
style=wx.TE_MULTILINE | wx.TE_READONLY
)
self.txt.SetFont(wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL))
Wammu.Events.EVT_LOG(self, self.OnLog)
wx.EVT_SIZE(self, self.OnSize)
self.OnSize(None)
def OnLog(self, evt):
'''
Event handler for text events from Logger.
'''
self.txt.AppendText(evt.txt)
def OnSize(self, evt):
'''
Resize handler to correctly resize text area.
'''
width, height = self.GetClientSizeTuple()
self.txt.SetDimensions(0, 0, width, height)
``` |
[
{
"content": "Here is the code content:\n```python\nfrom parsl.providers import AWSProvider\n\nfrom parsl.config import Config\nfrom parsl.executors import HighThroughputExecutor\n\n# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py\n# If you are a user copying-and-past... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nfrom parsl.providers import AWSProvider\n\nfrom parsl.config import Config\nfrom parsl.executors import HighThroughputExecutor\n\n# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py\n# If you are a user ... | ```python
from parsl.providers import AWSProvider
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py
# If you are a user copying-and-pasting this as an example, make sure to either
# 1) create a local `user_opts.py`, or
# 2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value
# (i.e., user_opts['swan']['username'] -> 'your_username')
from parsl.tests.configs.user_opts import user_opts
config = Config(
executors=[
HighThroughputExecutor(
label='ec2_single_node',
address=user_opts['public_ip'],
provider=AWSProvider(
user_opts['ec2']['image_id'],
region=user_opts['ec2']['region'],
key_name=user_opts['ec2']['key_name'],
spot_max_bid='1.0',
profile="default",
state_file='awsproviderstate.json',
nodes_per_block=1,
init_blocks=1,
max_blocks=1,
min_blocks=0,
walltime='01:00:00',
),
)
]
)
``` |
[
{
"content": "Replicate the source code:\n```python\n#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\nimport os, functions, settings\n\n# STARTUP BAZY DANYCH\n################################################################################\ndatabase = settings.cDatabase(settings.databaseFile)\n\nfor kraina in set... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\nimport os, functions, settings\n\n# STARTUP BAZY DANYCH\n################################################################################\ndatabase = settings.cDatabase(settings.databaseFile)\n\nf... | ```python
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
import os, functions, settings
# STARTUP BAZY DANYCH
################################################################################
database = settings.cDatabase(settings.databaseFile)
for kraina in settings.swiat:
print kraina.nazwa + "."
# KAMERY
################################################################################
for kamera in kraina.listaKamer:
print "->"+kamera.nazwa + "."
# Jezeli nie pobrano obrazu z kamery
if (not kamera.pobrano):
for obraz in kamera.fetchData():
functions.checkDirectoryExists(settings.photosFolder + kraina.folder + kamera.folder)
functions.saveFile(settings.photosFolder + kraina.folder + kamera.folder + settings.actualDate + kamera.rozszerzenie, obraz)
kraina.kolazKamer()
# POMIARY
################################################################################
for dokumentPomiarowy in kraina.listaPomiarow:
dokumentPomiarowy.fetchData()
dokumentPomiarowy.doFiltering()
for pomiar in dokumentPomiarowy.measurments:
database.addMeasurment(pomiar.sqlFormat())
print "Dodano " + pomiar.desc + pomiar.localization
# ZAKOŃCZENIE BAZY DANYCH
################################################################################
database.close()
``` |
[
{
"content": "Return the code unaltered:\n```python\n__author__ = 'andriu'\n\n\nimport pygame, random, sys\nfrom pygame.locals import *\n\nCOLLISION_VISIBLE = False\nDEFAULT_FPS = 60\n\nclass GameObject(pygame.sprite.Sprite):\n\n # Constructor.\n def __init__(self, img_path, pos_xy=(0, 0)):\n\n \"\... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n__author__ = 'andriu'\n\n\nimport pygame, random, sys\nfrom pygame.locals import *\n\nCOLLISION_VISIBLE = False\nDEFAULT_FPS = 60\n\nclass GameObject(pygame.sprite.Sprite):\n\n # Constructor.\n def __init__(self, img_path, pos_xy=(0, 0))... | ```python
__author__ = 'andriu'
import pygame, random, sys
from pygame.locals import *
COLLISION_VISIBLE = False
DEFAULT_FPS = 60
class GameObject(pygame.sprite.Sprite):
# Constructor.
def __init__(self, img_path, pos_xy=(0, 0)):
"""
Inicializa un objeto de juego, carga la imagen especificada
para el sprite y actualiza las dimensiones del sprite segun el
tamaño de la imagen. Opcionalmente puede pasarse una tupla con
la posición inicial que tendrá el objeto en el juego al ser creado.
:type img_path: String
:param img_path: Ruta de la imagen a cargar para
el sprite
:type pos_xy: Tupla de dos elementos
:param pos_xy: Posición X,Y inicial del objeto
"""
# Llama al constructor padre
pygame.sprite.Sprite.__init__(self)
# Carga la imagen y la asigna al sprite
self.image = pygame.image.load(img_path)
# Asigna el 'Rect' con las dimensiones de la imagen
# Actualiza tambien la posicion del objeto al asignar los valores
# correspondientes a rect.x y rect.y
self.rect = self.image.get_rect()
# Suma la posición inicial si es que se ha especificado
pos_inicial_x, pos_inicial_y = pos_xy
self.rect.x += pos_inicial_x
self.rect.y += pos_inicial_y
# Desplazamientos en X,Y del objeto, se usan para actualizar la
# posición del objeto.
self.despl_x, self.despl_y = (0, 0)
# Usamos un 'bound_rect' para comprobación de colisiones propio
# independiente del sprite.rect de pygame. De esta forma
# podemos modificar la forma del 'bound_rect' de colisiones sin
# alterar el posicionado del sprite.
# GameObject actualiza automáticamente y de forma correcta el
# posicionado del 'bound_rect' cuando sus coordenadas x,y
# cambian.
#
# Por defecto se asigna a una copia del sprite.rect actual.
self.bound_rect = self.rect.copy()
# Ejecuta el método 'on_create'
self.on_create()
@ property
def pos_x (self):
"""
Obtiene el valor actual de la coordenada X del sprite
:return: Valor actual de la coordenada X
"""
return self.rect.x
@ pos_x.setter
def pos_x (self, x):
"""
Asigna valor a la coordenada X del sprite. Actualiza de forma
correcta el bound_rect que envuelve al sprite.
:param x: Nueva coordenada X
"""
diff = self.bound_rect.left - self.rect.left
self.rect.x = x
self.bound_rect.x = self.rect.x + diff
@ property
def pos_y (self):
"""
Obtiene el valor actual de la coordenada Y del sprite
:return: Valor actual de la coordenada Y
"""
return self.rect.y
@ pos_y.setter
def pos_y (self, y):
"""
Asigna valor a la coordenada Y del sprite. Actualiza de forma
correcta el 'bound_rect' que envuelve al sprite.
:param y: Nueva coordenada Y
"""
diff = self.bound_rect.top - self.rect.top
self.rect.y = y
self.bound_rect.y = self.rect.y + diff
@ property
def width (self):
"""
Obtiene el ancho del sprite asociado al objeto
:return: Ancho del sprite
"""
return self.image.get_width()
@ property
def height (self):
"""
Obtiene la altura del sprite asociado al objeto
:return: Altura del sprite
"""
return self.image.get_height()
def set_colorkey (self, color_key):
"""
Establece el color_key (color usado como transparencia)
:param color_key: Tupla en formato (R, G, B)
"""
self.image.set_colorkey (color_key)
def draw(self, canvas, draw_rect=False):
"""
Transfiere la imagen correspondiente al sprite a la superficie
de trabajo.
:param canvas: Superficie de trabajo donde copiar la imagen
"""
canvas.blit(self.image, self.rect)
if draw_rect:
pygame.draw.rect(canvas, (255,255,255), self.bound_rect, 2)
def procesa_evento (self, evento):
"""
Procesa los eventos asociados al objeto llamando a la función
on_xxxx correspondiente al evento.
:param evento: Evento a procesar
:return:
"""
# KEYDOWN
if evento.type == KEYDOWN:
self.on_key_down (evento.key)
# KEYUP
if evento.type == KEYUP:
self.on_key_up (evento.key)
def update (self, width, height):
"""
Actualiza el estado del objeto: cambios de posición, etc.
También comprueba si el objeto toca los límites de la habitación
donde está y si está fuera de los límites. En caso que se produzca
algunas de estas situaciones ejecuta los eventos 'intersect_boundary'
o 'out_of_bounds' respectivamente'
:return:
"""
# Comprueba si el objeto tocará los bordes de la habitación
if (self.pos_x + self.despl_x >= width
or self.pos_x + self.despl_x <= 0
or self.pos_y+self.height + self.despl_y >= height
or self.pos_y + self.despl_y <= 0):
# Si es así, ejecuta el evento 'intersect_boundary'
self.intersect_boundary()
self.pos_x += self.despl_x
self.pos_y += self.despl_y
# Comprueba si el objeto está fuera de las dimensiones de la
# habitación
if (self.pos_x >= width or self.pos_x <= 0 or
self.pos_y >= height or self.pos_y <= 0):
# Si es así, ejecuta el evento 'out_of_bounds'
self.out_of_bounds()
def check_for_collisions(self):
for sprite2 in self.room.objetos_de_juego:
if (self != sprite2):
self.check_for_collision (sprite2)
def check_for_collision (self, sprite2):
#if pygame.sprite.collide_rect(self, sprite2):
# Utiliza pygame.Rect.colliderect entre dos pygame.Rect para
# comprobar colisión entre dos sprites
if self.bound_rect.colliderect(sprite2.bound_rect):
self.collision(sprite2)
def step(self):
"""
Step() se ejecuta despues de procesar eventos pero antes de actualizar
el estado de los objetos de juego.
:return:
"""
pass
# -------------------------------------------------------------------
# Metodos equivalentes a los objetos en GameMaker
# -------------------------------------------------------------------
#
# Eventos a los que responden los objetos. Por defecto están vacíos
# y es responsabilidad de las subclases proporcionar la funcionalidad
# requerida para cada evento.
#
# on_create
def on_create(self):
"""
Evento que se ejecuta nada más crear el objeto. Este método es invocado
justo al final de self.__init__ con lo que se garantiza que el objeto
ya está inicializado en este punto.
:return:
"""
pass
# intersect_boundary
def intersect_boundary(self):
"""
Este evento se ejecuta cuando la nueva posición del objeto al sumar
su desplazamiento toca alguno de los bordes de la habitación.
:return:
"""
pass
# out_of_bounds
def out_of_bounds(self):
"""
Este evento se ejecuta cuando una de las coordenadas x o y del objeto
caen fuera de los límites de la habitación, indicando que el objeto
ha salido del área visible del juego.
:return:
"""
pass
# collision
def collision(self, sprite_colliding):
pass
# on_key_down
def on_key_down(self, key):
pass
# on_key_up
def on_key_up(self, key):
pass
class RoomObject():
def __init__(
self,
img_path,
dimensions,
title='New Room',
room_fps=DEFAULT_FPS,
is_fullscreen=False,
hw_surface=False):
"""
Inicializa una habitación con las dimensiones y el fondo de
pantalla indicados. Opcionalmente se puede especificar si se quiere
mostrar a pantalla completa o en ventana, y si se desea una superficie
con aceleración hardware
:param img_path: Ruta completa del fichero imagen para el fondo de pantalla
:type img_path: string
:param dimensions: Ancho y alto de pantalla en formato tupla
:type dimensions: Tuple
:param title: Título de la ventana
:type title: str
:param room_fps: Fotogramas por segundo para ésta habitación, normalmente
los mismos que para todo el juego
:type room_fps: int
:param hw_surface: Si se desea crear una superficie en hardware
:type hw_surface: bool
:param is_fullscreen: True para crear la habitación a pantalla completa
, False para crearla en ventana
:type is_fullscreen: bool
:return: None
"""
# Flags para la creación de la ventana
self.display_flags = (HWSURFACE | DOUBLEBUF) if hw_surface else 0
# Actualizar los flags según se desee crear una ventana
# a pantalla completa
self.is_fullscreen = is_fullscreen
self.display_flags |= pygame.FULLSCREEN if self.is_fullscreen else 0
# Crea la superficie de trabajo con los flags indicados
self.canvas = pygame.display.set_mode (dimensions, self.display_flags)
self.title = title
# Establece el título
pygame.display.set_caption (self.title)
# Objetos en la Room
self.objetos_de_juego = pygame.sprite.Group()
if img_path is not None:
# Imagen de fondo
self.image_background = pygame.image.load (img_path).convert()
else:
self.image_background = pygame.Surface(dimensions)
self.image_background.fill((20, 50, 210))
# Dimensiones de la Room
self.width, self.height = dimensions
# Reloj para el control de FPS
self.clock = pygame.time.Clock()
# Fotogramas por segundo, por defecto 60
self.frames_per_second = room_fps
def blit (self):
"""
Dibuja todos los elementos de juego
:return:
"""
# Primero dibuja el fondo
self.canvas.blit (self.image_background, (0,0))
# Ahora dibuja todos los objetos de la habitación
# llamando al metodo 'blit' de cada objeto
for objeto_de_juego in self.objetos_de_juego:
objeto_de_juego.draw(self.canvas, COLLISION_VISIBLE)
# Y finalmente muestra la superficie de trabajo
pygame.display.flip()
def add (self, objeto_de_juego):
"""
Añade un elemento a la lista 'objetos_de_juego'
:param objeto_de_juego: Objeto de juego a añadir
:return:
"""
assert self is not None, "No hay ninguna habitación creada"
# Convierte la imagen del sprite al formato de pantalla para
# acelerar las operaciones de blit.
objeto_de_juego.image.convert()
# Añade el objeto a la lista de objetos en la habitación actual
self.objetos_de_juego.add (objeto_de_juego)
# y añade una referencia a la habitación actual al objeto de juego
# para así poder referenciar la habitación desde éste.
objeto_de_juego.room = self
def procesa_eventos (self):
"""
Procesa los eventos del juego. Llama a cada objeto de
la habitación para que procese los eventos que les corresponda.
:return:
"""
for evento in pygame.event.get():
if (evento.type == QUIT or
(evento.type == KEYDOWN and
evento.key == K_ESCAPE)):
self.on_close ()
for objeto_de_juego in self.objetos_de_juego:
objeto_de_juego.procesa_evento (evento)
def actualiza_estado (self):
"""
Actualiza el estado de todos los objetos pidiendo a cada objeto
que actualice su estado.
:return:
"""
self.check_for_collisions()
self.objetos_de_juego.update(self.width, self.height)
def check_for_collisions(self):
# Comprueba colisiones entre sprites dentro del grupo
for object_sprite in self.objetos_de_juego:
object_sprite.check_for_collisions()
def loop(self):
while True:
# Procesa los eventos
self.procesa_eventos()
# Llama al metodo step
self.step()
# Actualiza el estado del juego
self.actualiza_estado()
# Muestra el contenido del juego por pantalla
self.blit()
self.clock.tick (self.frames_per_second)
def step(self):
"""
Step() se ejecuta despues de procesar eventos pero antes de actualizar
el estado de los objetos de juego.
:return:
"""
for objeto_dejuego in self.objetos_de_juego:
objeto_dejuego.step()
#
# Eventos a los que reacciona una Habitación
#
# on_close
def on_close (self):
"""
Evento por defecto cuando se pulsa el botón salir de la ventana.
Cierra la aplicación y sale al sistema con código de error 0.
:return:
"""
pygame.mixer.quit()
pygame.quit()
sys.exit(0)
class SoundObject():
def __init__(self, file_name, is_music=True):
"""
SoundObject sirve tanto para música de fondo como para efectos
sonoros. Si
is_music = True
el objeto se usará para controlar la música de fondo. Si
is_music = False
el objeto controlará efectos sonoros.
:param file_name: Nombre completo del archivo a usar para el sonido
:param is_music: True, crea un objeto para música de fondo, False
para crear un objeto de efectos de sonido.
:return: Nada
"""
self.is_music = is_music
self.__objeto_sonido = None
#pygame.mix
if is_music:
# Load a mixer.music file
pygame.mixer.music.load(file_name)
else:
# Load a mixer.sound file
self.__objeto_sonido = pygame.mixer.Sound(file_name)
self.__objeto_sonido.set_volume(0.5)
def play(self, loop=0):
if self.is_music:
pygame.mixer.music.play(loop)
else:
assert self.__objeto_sonido is not None
self.__objeto_sonido.play(loop)
class Game():
def __init__(self, fps=60):
"""
Inicializa PyGame y el mixer, random, etc.
"""
# mixer.pre_init soluciona los problemas de lag que tenía con los
# efectos sonoros.
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
random.seed()
self.game_fps = fps
self.room = None
def loop(self):
'''
Bucle principal del juego, llama al bucle de la habitación.
:return:
'''
assert self.room is not None, "No hay ninguna habitación creada"
self.room.loop()
``` |
[
{
"content": "```python\nimport numpy as np\nfrom numba import cuda, float32, float64, int32\nfrom numba.cuda.testing import unittest, CUDATestCase\n\n\nclass TestCudaIDiv(CUDATestCase):\n def test_inplace_div(self):\n\n @cuda.jit(argtypes=[float32[:, :], int32, int32])\n def div(grid, l_x, l_y... | [
{
"content": "<|memory_start|>```python\nimport numpy as np\nfrom numba import cuda, float32, float64, int32\nfrom numba.cuda.testing import unittest, CUDATestCase\n\n\nclass TestCudaIDiv(CUDATestCase):\n def test_inplace_div(self):\n\n @cuda.jit(argtypes=[float32[:, :], int32, int32])\n def di... | ```python
import numpy as np
from numba import cuda, float32, float64, int32
from numba.cuda.testing import unittest, CUDATestCase
class TestCudaIDiv(CUDATestCase):
def test_inplace_div(self):
@cuda.jit(argtypes=[float32[:, :], int32, int32])
def div(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
x = np.ones((2, 2), dtype=np.float32)
grid = cuda.to_device(x)
div[1, 1](grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
def test_inplace_div_double(self):
@cuda.jit(argtypes=[float64[:, :], int32, int32])
def div_double(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
x = np.ones((2, 2), dtype=np.float64)
grid = cuda.to_device(x)
div_double[1, 1](grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n# -*- Mode: Python; test-case-name: flumotion.test.test_wizard_models -*-\n# vi:si:et:sw=4:sts=4:ts=4\n#\n# Flumotion - a streaming media server\n# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).\n# All rights reserved.\n\n# Th... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# -*- Mode: Python; test-case-name: flumotion.test.test_wizard_models -*-\n# vi:si:et:sw=4:sts=4:ts=4\n#\n# Flumotion - a streaming media server\n# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).\n# All rights r... | ```python
# -*- Mode: Python; test-case-name: flumotion.test.test_wizard_models -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import unittest
from kiwi.python import Settable
from flumotion.admin.assistant.configurationwriter import ConfigurationWriter
from flumotion.admin.assistant.models import Component, Plug, Porter, \
AudioProducer, VideoProducer, AudioEncoder, VideoEncoder, HTTPServer
from flumotion.admin.assistant.save import AssistantSaver
from flumotion.common import testsuite
from flumotion.common.xmlwriter import XMLWriter
from flumotion.configure import configure
from flumotion.component.producers.firewire.wizard_gtk import FireWireProducer
from flumotion.component.consumers.httpstreamer.wizard_gtk import HTTPStreamer
from flumotion.component.encoders.vorbis.wizard_gtk import VorbisAudioEncoder
from flumotion.component.encoders.theora.wizard_gtk import TheoraVideoEncoder
from flumotion.component.producers.videotest.wizard_gtk import \
TestVideoProducer
from flumotion.component.producers.audiotest.wizard_gtk import \
TestAudioProducer
from flumotion.admin.gtk.overlaystep import Overlay
class TestXMLWriter(testsuite.TestCase):
def testEmpty(self):
writer = ConfigurationWriter('', [], [])
testsuite.diffStrings(
XMLWriter.encoding + \
("<planet>\n"
"</planet>\n"),
writer.getXML())
def testFlowComponent(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
writer = ConfigurationWriter('flow', [c], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testAtmosphereComponent(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
c.properties.foo = 'bar'
writer = ConfigurationWriter('', [], [c])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="foo">bar</property>\n'
' </component>\n'
' </atmosphere>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testComponentWithPlug(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
plug = Plug()
plug.plugType = 'plug-type'
plug.properties.foo = 'bar'
c.plugs.append(plug)
writer = ConfigurationWriter('flow', [c], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <plugs>\n'
' <plug type="plug-type">\n'
' \n'
' <property name="foo">bar</property>\n'
' </plug>\n'
' </plugs>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testComponentWithFeeders(self):
c1 = Component()
c1.name = 'name'
c1.componentType = 'first'
c1.worker = 'worker'
c2 = Component()
c2.name = 'name'
c2.componentType = 'second'
c2.worker = 'worker'
c2.link(c1)
writer = ConfigurationWriter('flow', [c1, c2], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="first"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>name</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="name"\n'
' type="second"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
class TestWizardSave(testsuite.TestCase):
def _createAudioProducer(self, componentType='audio-producer',
worker='audio-producer-worker'):
audioProducer = AudioProducer()
audioProducer.componentType = componentType
audioProducer.worker = worker
return audioProducer
def _createVideoProducer(self, componentType='video-producer',
worker='video-producer-worker'):
videoProducer = VideoProducer()
videoProducer.componentType = componentType
videoProducer.worker = worker
videoProducer.properties.width = 640
videoProducer.properties.height = 480
return videoProducer
def _createVideoOverlay(self, videoProducer):
overlay = Overlay(videoProducer)
overlay.worker = 'overlay-worker'
return overlay
def _createAudioEncoder(self):
audioEncoder = AudioEncoder()
audioEncoder.componentType = 'audio-encoder'
audioEncoder.worker = 'audio-encoder-worker'
return audioEncoder
def _createVideoEncoder(self):
videoEncoder = VideoEncoder()
videoEncoder.componentType = 'video-encoder'
videoEncoder.worker = 'video-encoder-worker'
return videoEncoder
def _createPorter(self):
return Porter('porter-worker',
port=8080,
username='username',
password='password',
socketPath='flu-XXXX.socket')
def _createHTTPStreamer(self):
streamer = HTTPStreamer()
streamer.worker = 'streamer-worker'
return streamer
def _createFirewireProducer(self):
producer = FireWireProducer()
producer.worker = 'firewire-video-producer-worker'
producer.properties.width = 640
producer.properties.height = 480
return producer
def testDefaultStream(self):
save = AssistantSaver()
save.setFlowName('flow')
save.setAudioProducer(self._createAudioProducer())
videoProducer = self._createVideoProducer()
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audio-producer"\n'
' project="flumotion"\n'
' worker="audio-producer-worker"\n'
' version="%(version)s">\n'
' </component>\n'
' <component name="producer-video"\n'
' type="video-producer"\n'
' project="flumotion"\n'
' worker="video-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">480</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testMultiFeedProducer(self):
save = AssistantSaver()
save.setFlowName('flow')
save.setAudioProducer(self._createAudioProducer(
worker='both-producer-worker',
componentType='both-producer'))
save.setVideoProducer(self._createVideoProducer(
componentType='both-producer',
worker='both-producer-worker'))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio-video"\n'
' type="both-producer"\n'
' project="flumotion"\n'
' worker="both-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">480</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testOggStream(self):
save = AssistantSaver()
save.setFlowName('flow')
audioProducer = TestAudioProducer()
audioProducer.worker = 'worker'
save.setAudioProducer(audioProducer)
videoProducer = TestVideoProducer()
videoProducer.worker = 'worker'
videoProducer.properties.width = 320
videoProducer.properties.height = 240
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
audioEncoder = VorbisAudioEncoder()
audioEncoder.worker = 'worker'
save.setAudioEncoder(audioEncoder)
videoEncoder = TheoraVideoEncoder()
videoEncoder.worker = 'worker'
save.setVideoEncoder(videoEncoder)
save.setMuxer('ogg-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audiotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="samplerate">44100</property>\n'
' </component>\n'
' <component name="producer-video"\n'
' type="videotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">240</property>\n'
' <property name="pattern">0</property>\n'
' <property name="width">320</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video</feed>\n'
' </eater>\n'
' \n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' <property name="xiph-logo">True</property>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="theora-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">400000</property>\n'
' <property name="keyframe-maxdistance">50</property>\n'
' <property name="speed">3</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="vorbis-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">64000</property>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="ogg-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testAudioOnlyStream(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
audioProducer = TestAudioProducer()
audioProducer.worker = 'worker'
save.setAudioProducer(audioProducer)
audioEncoder = VorbisAudioEncoder()
audioEncoder.worker = 'worker'
save.setAudioEncoder(audioEncoder)
videoProducer = self._createVideoEncoder()
self.assertRaises(ValueError, save.setVideoOverlay,
self._createVideoOverlay(videoProducer))
save.setMuxer('ogg-muxer', 'muxer')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audiotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="samplerate">44100</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="vorbis-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">64000</property>\n'
' </component>\n'
' <component name="muxer-audio"\n'
' type="ogg-muxer"\n'
' project="flumotion"\n'
' worker="muxer"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testFirewireStreamer(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
producer = self._createFirewireProducer()
save.setAudioProducer(producer)
save.setVideoProducer(producer)
save.setVideoOverlay(self._createVideoOverlay(producer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio-video"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="firewire-video-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video:video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video:audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testFirewireStreamerDifferentWorkers(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
audioProducer = self._createFirewireProducer()
audioProducer.worker = 'audio-worker'
save.setAudioProducer(audioProducer)
videoProducer = self._createFirewireProducer()
videoProducer.worker = 'video-worker'
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
streamer = self._createHTTPStreamer()
streamer.has_bandwidth_limit = True
streamer.bandwidth_limit = 123
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="audio-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="producer-video"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="video-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video:video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio:audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="bandwidth-limit">123000000</property>\n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testOndemand(self):
save = AssistantSaver()
server = HTTPServer('ondemand-server-worker', '/mount-point/')
save.addServerConsumer(server, 'ondemand')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-ondemand"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="ondemand-server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount-point/</property>\n'
' </component>\n'
' </atmosphere>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
class TestNameConflicts(testsuite.TestCase):
def setUp(self):
self.save = AssistantSaver()
def _addServer(self, name):
server = HTTPServer('ondemand-server-worker', '/mount-point/')
self.save.addServerConsumer(server, name)
def testNameConflicts(self):
self.save.setExistingComponentNames(['http-server-ondemand'])
self._addServer('ondemand')
self.save.getXML()
components = self.save.getAtmosphereComponents()
self.assertEquals(components[0].name, 'http-server-ondemand2')
def testNameConflictsDoubleDigits(self):
componentNames = ['http-server-ondemand'] + [
'http-server-ondemand%d' % i for i in range(2, 10)]
self.save.setExistingComponentNames(componentNames)
self._addServer('ondemand')
self.save.getXML()
components = self.save.getAtmosphereComponents()
self.assertEquals(components[0].name, 'http-server-ondemand10')
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Repeat the following code:\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2013-2015 clowwindy\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License ... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2013-2015 clowwindy\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data, block_pattern):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 2 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
dest_addr = to_bytes(dest_addr)
if block_pattern.match(dest_addr) != None:
print('deny ' + dest_addr)
dest_addr = '127.0.0.1'
return addrtype, dest_addr, dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\n\ntry:\n from functools import wraps\nexcept ImportError:\n from django.utils.functional import wraps # Python 2.4 fallback.\nfrom django.utils.decorators import available_attrs\nfrom dj... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\ntry:\n from functools import wraps\nexcept ImportError:\n from django.utils.functional import wraps # Python 2.4 fallback.\nfrom django.utils.decorators import availabl... | ```python
# -*- coding: utf-8 -*-
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.decorators import available_attrs
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.contrib import messages
default_message = "您沒有管理者權限。"
def user_passes_test(test_func, message=default_message, redirect_url="/"):
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
decorated_view_func = login_required(request)
if not decorated_view_func.user.is_authenticated():
return decorated_view_func(request) # return redirect to signin
if not test_func(request.user):
messages.error(request, message)
return redirect(redirect_url)
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator
def super_login_required(view_func=None, message=default_message, redirect_url="/"):
super_login_func = user_passes_test(
lambda u: u.is_superuser,
message=message,
redirect_url=redirect_url
)
if view_func:
return super_login_func(view_func)
return super_login_func
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\nimport random\nimport pygame\n\nBLACK = (0, 0, 0)\n\nclass Stars():\n\n def __init__(self, background, width, height, max_stars):\n self.background = background\n self.width = width\n self.heigh... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\nimport random\nimport pygame\n\nBLACK = (0, 0, 0)\n\nclass Stars():\n\n def __init__(self, background, width, height, max_stars):\n self.background = background\n self.width = width\n ... | ```python
import random
import pygame
BLACK = (0, 0, 0)
class Stars():
def __init__(self, background, width, height, max_stars):
self.background = background
self.width = width
self.height = height
self.total_stars = max_stars
self.positions = self.generate_positions()
def generate_positions(self):
return [[random.randint(0, self.width), random.randint(0, self.height)] for i in range(self.total_stars)]
def draw(self, star):
pygame.draw.line(self.background, (255, 255, 255), (star[0], star[1]), (star[0], star[1]))
star[0] = star[0] - 1
if star[0] < 0:
star[0] = self.width
star[1] = random.randint(0, self.height)
class Asteroid(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/med_1_p1.png').convert()
self.rect = self.image.get_rect()
self.image.set_colorkey(BLACK)
self.asteroid_list = pygame.sprite.Group()
def get_random_position(self):
return {'x': random.randint(800, 1281), 'y': random.randint(0, 800)}
def generate_asteroids(self, minimum, maximum):
for i in range(random.randint(minimum, maximum)):
position = self.get_random_position()
asteroid = Asteroid()
asteroid.rect.x = position['x']
asteroid.rect.y = position['y']
self.asteroid_list.add(asteroid)
def update(self):
self.rect.x -= 2
if self.rect.x < -5:
self.rect.y = random.randrange(0, 800)
self.rect.x = random.randint(800, 1281)
``` |
[
{
"content": "Here is the script:\n```python\n\"\"\"Compile samples that are infeasible or difficult by svg compilation.\n\"\"\"\n\nimport datetime\nfrom pathlib import Path\nfrom fontTools import fontBuilder\nfrom fontTools import ttLib\nfrom fontTools.colorLib import builder as colorBuilder\nfrom fontTools.pe... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n\"\"\"Compile samples that are infeasible or difficult by svg compilation.\n\"\"\"\n\nimport datetime\nfrom pathlib import Path\nfrom fontTools import fontBuilder\nfrom fontTools import ttLib\nfrom fontTools.colorLib import builder as colorBuilder\nf... | ```python
"""Compile samples that are infeasible or difficult by svg compilation.
"""
import datetime
from pathlib import Path
from fontTools import fontBuilder
from fontTools import ttLib
from fontTools.colorLib import builder as colorBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib.tables._g_l_y_f import Glyph
import sys
from typing import Any, Mapping, NamedTuple, Optional
from fontTools.ttLib.tables import otTables as ot
from nanoemoji.colors import css_colors, Color
from fontTools.misc.transform import Transform
_UPEM = 1000
_ASCENT = 950
_DESCENT = 250
_FAMILY = "More COLR v1 Samples"
_STYLE = "Regular"
_PALETTE = {} # <3 mutable globals
class SampleGlyph(NamedTuple):
glyph_name: str
accessor: str
advance: int
glyph: Glyph
colr: Optional[Mapping[str, Any]] = None
def _cpal(color_str):
color = Color.fromstring(color_str).to_ufo_color()
if color not in _PALETTE:
_PALETTE[color] = len(_PALETTE)
return _PALETTE[color]
def _sample_sweep():
glyph_name = "sweep"
pen = TTGlyphPen(None)
pen.moveTo((100, 500))
pen.qCurveTo((500, 1000), (900, 500))
pen.qCurveTo((500, 0), (100, 500))
pen.closePath()
colr = {
"Format": ot.PaintFormat.PaintGlyph,
"Glyph": glyph_name,
"Paint": {
"Format": ot.PaintFormat.PaintSweepGradient,
"ColorLine": {
"ColorStop": [
(0.0, _cpal("red")),
(0.5, _cpal("yellow")),
(1.0, _cpal("red")),
]
},
"centerX": 500,
"centerY": 500,
"startAngle": 0,
"endAngle": 360,
},
}
return SampleGlyph(
glyph_name=glyph_name, accessor="c", advance=_UPEM, glyph=pen.glyph(), colr=colr
)
def _sample_colr_glyph():
glyph_name = "transformed_sweep"
# Paint the sweep shifted and rotated
colr = {
"Format": ot.PaintFormat.PaintTranslate,
"dx": 250,
"dy": 0,
"Paint": {
"Format": ot.PaintFormat.PaintRotate,
"centerX": _UPEM / 2,
"centerY": _UPEM / 2,
"angle": 60,
"Paint": {
"Format": ot.PaintFormat.PaintColrGlyph,
"Glyph": "sweep",
},
},
}
pen = TTGlyphPen(None)
pen.moveTo((0, 0))
pen.lineTo((_UPEM, _UPEM))
pen.endPath()
return SampleGlyph(
glyph_name=glyph_name, accessor="t", advance=_UPEM, glyph=pen.glyph(), colr=colr
)
def _sample_composite_colr_glyph():
glyph_name = "composite_colr_glyph"
# Scale down the sweep and use it to cut a hole in the sweep
# Transforms combine f(g(x)); build up backwards
t = Transform(dx=-500, dy=-500) # move to origin
t = Transform(xx=0.75, yy=0.75).transform(t)
t = Transform(dx=500, dy=500).transform(t)
t = tuple(t)
colr = {
"Format": ot.PaintFormat.PaintComposite,
"CompositeMode": "SRC_OUT",
"SourcePaint": {
"Format": ot.PaintFormat.PaintColrGlyph,
"Glyph": "sweep",
},
"BackdropPaint": {
"Format": ot.PaintFormat.PaintTransform,
"Paint": {
"Format": ot.PaintFormat.PaintColrGlyph,
"Glyph": "sweep",
},
"Transform": t,
},
}
pen = TTGlyphPen(None)
pen.moveTo((0, 0))
pen.lineTo((_UPEM, _UPEM))
pen.endPath()
return SampleGlyph(
glyph_name=glyph_name, accessor="o", advance=_UPEM, glyph=pen.glyph(), colr=colr
)
def _gradient_stops_repeat(first_stop, second_stop, accessor_char):
glyph_name = f"linear_repeat_{first_stop}_{second_stop}"
pen = TTGlyphPen(None)
pen.moveTo((100, 250))
pen.lineTo((100, 950))
pen.lineTo((900, 950))
pen.lineTo((900, 250))
pen.closePath()
colr = {
"Format": ot.PaintFormat.PaintGlyph,
"Glyph": glyph_name,
"Paint": {
"Format": ot.PaintFormat.PaintLinearGradient,
"ColorLine": {
"ColorStop": [
(first_stop, _cpal("red")),
(second_stop, _cpal("blue")),
],
"Extend": ot.ExtendMode.REPEAT,
},
"x0": 100,
"y0": 250,
"x1": 900,
"y1": 250,
"x2": 100,
"y2": 300,
},
}
return SampleGlyph(
glyph_name=glyph_name,
accessor=accessor_char,
advance=_UPEM,
glyph=pen.glyph(),
colr=colr,
)
def main():
assert len(sys.argv) == 2
build_dir = Path(sys.argv[1])
build_dir.mkdir(exist_ok=True)
out_file = (build_dir / _FAMILY.replace(" ", "")).with_suffix(".ttf")
version = datetime.datetime.now().isoformat()
names = {
"familyName": _FAMILY,
"styleName": _STYLE,
"uniqueFontIdentifier": " ".join((_FAMILY, version)),
"fullName": " ".join((_FAMILY, _STYLE)),
"version": version,
"psName": "-".join((_FAMILY.replace(" ", ""), _STYLE)),
}
glyphs = [
SampleGlyph(glyph_name=".notdef", accessor="", advance=600, glyph=Glyph()),
SampleGlyph(glyph_name=".null", accessor="", advance=0, glyph=Glyph()),
_sample_sweep(),
_sample_colr_glyph(),
_sample_composite_colr_glyph(),
_gradient_stops_repeat(0, 1, "p"),
_gradient_stops_repeat(0.2, 0.8, "q"),
_gradient_stops_repeat(0, 1.5, "r"),
_gradient_stops_repeat(0.5, 1.5, "s"),
]
fb = fontBuilder.FontBuilder(_UPEM)
fb.setupGlyphOrder([g.glyph_name for g in glyphs])
fb.setupCharacterMap(
{ord(g.accessor): g.glyph_name for g in glyphs if len(g.accessor) == 1}
)
fb.setupGlyf({g.glyph_name: g.glyph for g in glyphs})
fb.setupHorizontalMetrics({g.glyph_name: (_UPEM, g.glyph.xMin) for g in glyphs})
fb.setupHorizontalHeader(ascent=_ASCENT, descent=-_DESCENT)
fb.setupOS2(sTypoAscender=_ASCENT, usWinAscent=_ASCENT, usWinDescent=_DESCENT)
fb.setupNameTable(names)
fb.setupPost()
fb.font["head"].xMin = 0
fb.font["head"].yMin = -_DESCENT
fb.font["head"].xMax = _UPEM
fb.font["head"].yMax = _ASCENT
fb.font["OS/2"].fsType = 0
fb.font["OS/2"].version = 4
fb.font["OS/2"].fsSelection |= 1 << 7
fb.font["hhea"].advanceWidthMax = _UPEM
fb.font["COLR"] = colorBuilder.buildCOLR(
{g.glyph_name: g.colr for g in glyphs if g.colr}
)
fb.font["CPAL"] = colorBuilder.buildCPAL([list(_PALETTE)])
fb.save(out_file)
print(f"Wrote {out_file}")
if __name__ == "__main__":
main()
``` |
[
{
"content": "Here is the snippet:\n```python\n# -*- coding: utf-8 -*-\n\nimport imaplib\nimport re\n\ntry:\n # urlparse was moved to urllib.parse in Python 3\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\nfrom jobs import AbstractJob\n\n\nclass IMAP(AbstractJ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nimport imaplib\nimport re\n\ntry:\n # urlparse was moved to urllib.parse in Python 3\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\nfrom jobs import AbstractJob\n\n\nclas... | ```python
# -*- coding: utf-8 -*-
import imaplib
import re
try:
# urlparse was moved to urllib.parse in Python 3
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from jobs import AbstractJob
class IMAP(AbstractJob):
def __init__(self, conf):
self.interval = conf['interval']
self.email = conf['email']
self.url = urlparse(conf['url'])
self.tls = conf.get('tls', True)
self.starttls = conf.get('starttls', False)
self.folder = conf['folder']
def _parse_count(self, message):
count = re.search(r'\w+ (\d+)', message.decode('utf-8'))
return int(count.group(1)) if count is not None else 0
def _get_count(self):
_, message = self.mail.status(self.folder, '(MESSAGES)')
return self._parse_count(message[0])
def _get_unread_count(self):
_, message = self.mail.status(self.folder, '(UNSEEN)')
return self._parse_count(message[0])
def get(self):
if self.tls:
self.mail = imaplib.IMAP4_SSL(self.url.hostname, self.url.port)
else:
self.mail = imaplib.IMAP4(self.url.hostname, self.url.port)
if self.starttls:
self.mail.starttls()
self.mail.login(self.url.username, self.url.password)
count = self._get_count()
unread = self._get_unread_count()
self.mail.logout()
return {
'email': self.email,
'folder': self.folder,
'count': count,
'unread': unread
}
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# This file contains the default stats and general stat categories\r\n\r\nATTRIBUTE_TYPE = {'mental': ['intelligence', 'wits', 'resolve'],\r\n 'physical': ['strength', 'dexterity', 'stamina'],\r\n ... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# This file contains the default stats and general stat categories\r\n\r\nATTRIBUTE_TYPE = {'mental': ['intelligence', 'wits', 'resolve'],\r\n 'physical': ['strength', 'dexterity', 'stamina... | ```python
# This file contains the default stats and general stat categories
ATTRIBUTE_TYPE = {'mental': ['intelligence', 'wits', 'resolve'],
'physical': ['strength', 'dexterity', 'stamina'],
'social': ['presence', 'manipulation', 'composure']
}
SKILL_TYPE = {'mental': ['academics',
'computer',
'crafts',
'enigmas',
'investigation',
'medicine',
'occult',
'politics',
'science'],
'physical': ['archery',
'athletics',
'brawl',
'drive',
'firearms',
'larceny',
'ride',
'stealth',
'survival',
'weaponry'],
'social': ['animal ken',
'empathy',
'expression',
'intimidation',
'persuasion',
'socialize',
'streetwise',
'subterfuge']}
STATS = {'skill specialties': {},
'merits': {},
'size': 5,
'conditions': {},
'weapons': {},
'aspirations': {},
'willpower': 0,
'willpower filled' : 0,
'health': [0, # max (derived)
0, # bashing
0, # lethal
0 # agg
],
'xp': 0,
'beats': 0,
'armor': 0,
'initiative mod': 0,
'size mod': 0,
'speed mod': 0,
'defense mod': 0}
SKILLS = list(SKILL_TYPE.values())
SKILLS = sum(SKILLS, [])
ATTRIBUTES = list(ATTRIBUTE_TYPE.values())
ATTRIBUTES = sum(ATTRIBUTES, [])
``` |
[
{
"content": "Here is the code content:\n```python\n#!/usr/bin/env python\n\nimport flask\nfrom flask import Flask\nfrom flask import request\nfrom werkzeug import secure_filename\nimport os, random, datetime, codecs\nimport sys, json, magic\nimport cPickle as pickle\nimport regex as re\nimport keywords\nimport... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport flask\nfrom flask import Flask\nfrom flask import request\nfrom werkzeug import secure_filename\nimport os, random, datetime, codecs\nimport sys, json, magic\nimport cPickle as pickle\nimport regex as re\nimport ... | ```python
#!/usr/bin/env python
import flask
from flask import Flask
from flask import request
from werkzeug import secure_filename
import os, random, datetime, codecs
import sys, json, magic
import cPickle as pickle
import regex as re
import keywords
import argparse
import xml.etree.ElementTree
import zipfile
app = Flask(__name__)
upload_dir = "uploads"
cs_tagger = None
cs_idf_doc_count = None
cs_idf_table = None
en_tagger = None
en_idf_doc_count = None
en_idf_table = None
@app.route('/')
def index():
return "{}\n"
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
def get_file(file_name):
try:
src = os.path.join(root_dir(), file_name)
return open(src).read()
except IOError as exc:
return str(exc)
@app.route('/web', methods=['GET'])
def show_web():
content = get_file("web.html")
print content
return flask.Response(content, mimetype="text/html")
@app.route('/demo', methods=['GET'])
def show_simple_demo():
content = get_file("web.html")
content = re.sub(r"\$\(\'#header", "//", content)
content = re.sub(r"\$\(\'#footer", "//", content)
return flask.Response(content, mimetype="text/html")
@app.route('/', methods=['POST'])
def post_request():
start_time = datetime.datetime.now()
if 'file' in request.files:
file = request.files['file']
else:
class _file_wrapper(object):
def __init__(self, data):
self._data = data
import uuid
self.filename = str(uuid.uuid4())
def save(self, path):
with codecs.open(path, mode="w+", encoding="utf-8") as fout:
fout.write(self._data)
file = _file_wrapper(request.form["data"])
tagger = cs_tagger
idf_doc_count = cs_idf_doc_count
idf_table = cs_idf_table
json_response = None
try:
post_id = datetime.datetime.now().strftime("%Y-%m-%d/%H/%M-%S-")+\
str(random.randint(10000, 99999))
post_dir = os.path.join(upload_dir, post_id)
os.makedirs(post_dir)
if request.args.get('language') == 'en':
tagger = en_tagger
idf_doc_count = en_idf_doc_count
idf_table = en_idf_table
elif request.args.get('language') == 'cs':
pass
elif request.args.get('language'):
raise Exception('Unsupported language {}'.format(request.args.get('language')))
if request.args.get('threshold'):
try:
threshold = float(request.args.get('threshold'))
except:
raise Exception("Threshold \"{}\" is not valid float.".format(request.args.get("threshold")))
else:
threshold = 0.2
if request.args.get("maximum-words"):
try:
maximum_words = int(request.args.get('maximum-words'))
except:
raise Exception("Maximum number of words \"{}\" is not an integer.".format(request.args.get("maximum-words")))
else:
maximum_words = 15
file_name = secure_filename(file.filename)
file_path = os.path.join(post_dir, file_name)
file.save(os.path.join(file_path))
data, code = \
process_file(file_path, tagger, idf_doc_count, idf_table, threshold, maximum_words)
except Exception as e:
code = 400
data = {"error": e.message}
finally:
json_response = json.dumps(data)
print json_response.encode('unicode-escape')
log = {}
log['remote_addr'] = request.remote_addr
log['response_json'] = data
log['response_code'] = code
log['time'] = start_time.strftime("%Y-%m-%d %H:%M:%S")
log['duration'] = (datetime.datetime.now() - start_time).total_seconds()
f_log = open(os.path.join(post_dir, "log.json"), 'w')
json.dump(log, f_log)
f_log.close()
response = flask.Response(json_response,
content_type='application/json; charset=utf-8')
response.headers.add('content-length', len(json_response.encode('utf-8')))
response.status_code = code
return response
def process_file(file_path, tagger, idf_doc_count, idf_table, threshold, maximum_words):
"""
Takes the uploaded file, detecs its type (plain text, alto XML, zip)
and calls a parsing function accordingly. If everything succeeds it
returns keywords and 200 code, returns an error otherwise.
"""
file_info = magic.from_file(file_path)
lines = []
if re.match("^UTF-8 Unicode (with BOM) text", file_info):
lines = lines_from_txt_file(file_path, encoding='utf-8-sig')
elif re.match("^UTF-8 Unicode", file_info):
lines = lines_from_txt_file(file_path, encoding='utf-8')
elif re.match("^ASCII text", file_info):
lines = lines_from_txt_file(file_path, encoding='utf-8')
elif re.match('^XML 1.0 document', file_info) and \
(file_path.endswith('.alto') or file_path.endswith('.xml')):
lines = lines_from_alto_file(file_path)
elif re.match('^Zip archive data', file_info):
lines = lines_from_zip_file(file_path)
else:
return {"eror": "Unsupported file type: {}".format(file_info)}, 400
if not lines:
return {"error": "Empty file"}, 400
return keywords.get_keywords(lines, tagger, idf_doc_count, idf_table, threshold, maximum_words), 200
def lines_from_txt_file(file_path, encoding='utf-8'):
"""
Loads lines of text from a plain text file.
:param file_path: Path to the alto file or a file-like object.
"""
if type(file_path) is str:
f = codecs.open(file_path, 'r', encoding)
else:
f = file_path
content = [l.strip() for l in f]
f.close()
return content
def lines_from_alto_file(file_path):
"""
Loads lines of text from a provided alto file.
:param file_path: Path to the alto file or a file-like object.
"""
e = xml.etree.ElementTree.parse(file_path).getroot()
layout = None
for c in e.getchildren():
if c.tag.endswith('Layout'):
layout = c
break
if layout is None:
raise Exception("XML is not ALTO file (does not contain layout object).")
for page in layout.getchildren():
if not page.tag.endswith("Page"):
continue
text_lines = layout.findall(".//{http://www.loc.gov/standards/alto/ns-v2#}TextLine")
for text_line in text_lines:
line_words = []
for string in text_line.getchildren():
if not string.tag.endswith('String'):
continue
line_words.append(string.attrib['CONTENT'])
yield " ".join(line_words)
def lines_from_zip_file(file_path):
"""
Loads lines of text from a provided zip file. If it contains alto file, it
uses them, otherwise looks for txt files. Files can in an arbitrary depth.
:param file_path: Path to the uploaded zip file.
:type file_path: str
"""
archive = zipfile.ZipFile(file_path)
alto_files = [n for n in archive.namelist() if n.endswith(".alto") or n.endswith(".xml")]
if alto_files:
for f_name in alto_files:
for line in lines_from_alto_file(archive.open(f_name)):
yield line
else:
txt_files = [n for n in archive.namelist() if n.endswith(".txt")]
if not txt_files:
raise Exception("Archive contains neither alto files nor text files.")
for f_name in txt_files:
for line in lines_from_txt_file(archive.open(f_name)):
yield line
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs the KER server.')
parser.add_argument("--cs-morphodita", help="Path to a Czech tagger model for Morphodita.", required=True)
parser.add_argument("--cs-idf", help="Czech idf model.", required=True)
parser.add_argument("--en-morphodita", help="Path to a English tagger model for Morphodita.", required=True)
parser.add_argument("--en-idf", help="English idf model.", required=True)
parser.add_argument("--port", help="Port the server runs on", type=int, default=5000)
parser.add_argument("--host", help="IP address the server will run at", type=str, default="127.0.0.1")
args = parser.parse_args()
if os.path.exists(args.cs_morphodita):
cs_tagger = keywords.Morphodita(args.cs_morphodita)
else:
print >> sys.stderr, "File with Czech Morphodita model does not exist: {}".format(args.cs_morphodita)
exit(1)
if os.path.exists(args.cs_idf):
f_idf = open(args.cs_idf, 'rb')
cs_idf_doc_count = float(pickle.load(f_idf))
cs_idf_table = pickle.load(f_idf)
f_idf.close()
else:
print >> sys.stderr, "File with Czech IDF model does not exist: {}".format(args.cs_idf)
exit(1)
if os.path.exists(args.en_morphodita):
en_tagger = keywords.Morphodita(args.en_morphodita)
else:
print >> sys.stderr, "File with English Morphodita model does not exist: {}".format(args.en_morphodita)
exit(1)
if os.path.exists(args.en_idf):
f_idf = open(args.en_idf, 'rb')
en_idf_doc_count = float(pickle.load(f_idf))
en_idf_table = pickle.load(f_idf)
f_idf.close()
else:
print >> sys.stderr, "File with English IDF model does not exist: {}".format(args.en_idf)
exit(1)
app.run(debug=True, host=args.host, port=args.port)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\"\"\"\nExtension that generates configuration files for Yelp `pre-commit`_.\n\n.. _pre-commit: http://pre-commit.com\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ..templates import pre_commit_config\nfrom ..api import Extension\nfrom ..api import hel... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nExtension that generates configuration files for Yelp `pre-commit`_.\n\n.. _pre-commit: http://pre-commit.com\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ..templates import pre_commit_config\nfrom ..api import Extension\nfrom ... | ```python
# -*- coding: utf-8 -*-
"""
Extension that generates configuration files for Yelp `pre-commit`_.
.. _pre-commit: http://pre-commit.com
"""
from __future__ import absolute_import
from ..templates import pre_commit_config
from ..api import Extension
from ..api import helpers
class PreCommit(Extension):
"""Generate pre-commit configuration file"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return self.register(
actions,
self.add_files,
after='define_structure')
def add_files(self, struct, opts):
"""Add .pre-commit-config.yaml file to structure
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
files = {
'.pre-commit-config.yaml': (
pre_commit_config(opts), helpers.NO_OVERWRITE
),
}
return helpers.merge(struct, {opts['project']: files}), opts
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\nAlbum = {\n\n'artist': 'MF DOOM',\n'album': 'Mm.. Food',\n'date' : '2004',\n'total': '15 songs',\n'tracks':\n [{\n\n 'name': 'Beef Rapp',\n 'duration': '4:39',\n 'prize': '1,29',\n\n },\n\n {\n\n 'name... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nAlbum = {\n\n'artist': 'MF DOOM',\n'album': 'Mm.. Food',\n'date' : '2004',\n'total': '15 songs',\n'tracks':\n [{\n\n 'name': 'Beef Rapp',\n 'duration': '4:39',\n 'prize': '1,29',\n\n },\n\n ... | ```python
Album = {
'artist': 'MF DOOM',
'album': 'Mm.. Food',
'date' : '2004',
'total': '15 songs',
'tracks':
[{
'name': 'Beef Rapp',
'duration': '4:39',
'prize': '1,29',
},
{
'name': 'Hoe Cakes',
'duration': '3:54',
'prize': '1,29',
},
{
'name': 'Potholderz (feat. Count Bass D',
'duration': '3:20',
'prize': '1,29',
},
{
'name': 'One Beer',
'duration': '4:18',
'prize': '1,29',
},
{
'name': 'Deep Fried Frenz',
'duration': '4:59',
'prize': '1,29',
},
{
'name': 'Poo-Putt Platter',
'duration': '1:13',
'prize': '1,29',
},
{
'name': 'Fillet-O-Rapper',
'duration': '1:03',
'prize': '1.29',
},
{
'name': 'Gumbo',
'duration': '0:49',
'prize': '1,29',
},
{
'name': 'Fig Leaf Bi-Carbonate',
'duration': '3:19',
'prize': '1,29',
},
{
'name': 'Kon Karne',
'duration': '2:51',
'prize': '1,29',
},
{
'name': 'Guinessses (feat. 4lze & Angelika)',
'duration': '4:41',
'prize': '1,29',
},
{
'name': 'Kon Queso',
'duration': '4:00',
'prize': '1,29',
},
{
'name': 'Rapp Snitch Knishes',
'duration': '2:52',
'prize': '1,29',
},
{
'name': 'Vomitspit',
'duration': '2:48',
'prize': '1,29',
},
{
'name': 'Kookies',
'duration': '4:01',
'prize': '1,29',
},
{
'name': 'Hoe Cakes (Jake One Remix)',
'duration': '2:56',
'prize': '1,29',
},
{
'name': 'Beef Rap (Live)',
'duration': '2:51',
'prize': '1,29',
},
]
}
print Album
``` |
[
{
"content": "Replicate the source code:\n```python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------------\n# pelisalacarta - XBMC Plugin\n# Canal para cuevana\n# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n#----------------------------------------------------------... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------------\n# pelisalacarta - XBMC Plugin\n# Canal para cuevana\n# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n#------------------------------------------... | ```python
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para cuevana
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import sys
import urlparse
from core import config
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
DEBUG = config.get_setting("debug")
def mainlist(item):
logger.info("[pelisadicto.py] mainlist")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Últimas agregadas" , action="agregadas", url="http://pelisadicto.com", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, title="Listado por género" , action="porGenero", url="http://pelisadicto.com"))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search", url="http://pelisadicto.com") )
return itemlist
def porGenero(item):
logger.info("[pelisadicto.py] porGenero")
itemlist = []
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Acción",url="http://pelisadicto.com/genero/Acción/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Adulto",url="http://pelisadicto.com/genero/Adulto/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Animación",url="http://pelisadicto.com/genero/Animación/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Aventura",url="http://pelisadicto.com/genero/Aventura/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Biográfico",url="http://pelisadicto.com/genero/Biográfico/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Ciencia Ficción",url="http://pelisadicto.com/genero/Ciencia Ficción/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Cine Negro",url="http://pelisadicto.com/genero/Cine Negro/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Comedia",url="http://pelisadicto.com/genero/Comedia/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Corto",url="http://pelisadicto.com/genero/Corto/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Crimen",url="http://pelisadicto.com/genero/Crimen/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Deporte",url="http://pelisadicto.com/genero/Deporte/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Documental",url="http://pelisadicto.com/genero/Documental/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Drama",url="http://pelisadicto.com/genero/Drama/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Familiar",url="http://pelisadicto.com/genero/Familiar/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Fantasía",url="http://pelisadicto.com/genero/Fantasía/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Guerra",url="http://pelisadicto.com/genero/Guerra/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Historia",url="http://pelisadicto.com/genero/Historia/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Misterio",url="http://pelisadicto.com/genero/Misterio/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Música",url="http://pelisadicto.com/genero/Música/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Musical",url="http://pelisadicto.com/genero/Musical/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Romance",url="http://pelisadicto.com/genero/Romance/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Terror",url="http://pelisadicto.com/genero/Terror/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Thriller",url="http://pelisadicto.com/genero/Thriller/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Western",url="http://pelisadicto.com/genero/Western/1", viewmode="movie_with_plot"))
return itemlist
def search(item,texto):
logger.info("[pelisadicto.py] search")
'''
texto_get = texto.replace(" ","%20")
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post)
'''
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s" % texto
try:
return agregadas(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
return busqueda(item)
def agregadas(item):
logger.info("[pelisadicto.py] agregadas")
itemlist = []
'''
# Descarga la pagina
if "?search=" in item.url:
url_search = item.url.split("?search=")
data = scrapertools.cache_page(url_search[0], url_search[1])
else:
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
'''
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
# Extrae las entradas
fichas = re.sub(r"\n|\s{2}","",scrapertools.get_match(data,'<ul class="thumbnails">(.*?)</ul>'))
#<li class="col-xs-6 col-sm-2 CALDVD"><a href="/pelicula/101-dalmatas" title="Ver 101 dálmatas Online" class="thumbnail thumbnail-artist-grid"><img class="poster" style="width: 180px; height: 210px;" src="/img/peliculas/101-dalmatas.jpg" alt="101 dálmatas"/><div class="calidad">DVD</div><div class="idiomas"><img src="/img/1.png" height="20" width="30" /></div><div class="thumbnail-artist-grid-name-container-1"><div class="thumbnail-artist-grid-name-container-2"><span class="thumbnail-artist-grid-name">101 dálmatas</span></div></div></a></li>
patron = 'href="([^"]+)".*?' # url
patron+= 'src="([^"]+)" ' # thumbnail
patron+= 'alt="([^"]+)' # title
matches = re.compile(patron,re.DOTALL).findall(fichas)
for url,thumbnail,title in matches:
url=urlparse.urljoin(item.url,url)
thumbnail = urlparse.urljoin(url,thumbnail)
itemlist.append( Item(channel=item.channel, action="findvideos", title=title+" ", fulltitle=title , url=url , thumbnail=thumbnail , show=title) )
# Paginación
try:
#<ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul>
current_page_number = int(scrapertools.get_match(item.url,'/(\d+)$'))
item.url = re.sub(r"\d+$","%s",item.url)
next_page_number = current_page_number + 1
next_page = item.url % (next_page_number)
itemlist.append( Item(channel=item.channel, action="agregadas", title="Página siguiente >>" , url=next_page, viewmode="movie_with_plot") )
except: pass
return itemlist
def findvideos(item):
logger.info("[pelisadicto.py] findvideos")
itemlist = []
data = re.sub(r"\n|\s{2}","",scrapertools.cache_page(item.url))
#<!-- SINOPSIS --> <h2>Sinopsis de 101 dálmatas</h2> <p>Pongo y Perdita, los dálmatas protagonistas, son una feliz pareja canina que vive rodeada de sus cachorros y con sus amos Roger y Anita. Pero su felicidad está amenazada. Cruella de Ville, una pérfida mujer que vive en una gran mansión y adora los abrigos de pieles, se entera de que los protagonistas tienen quince cachorros dálmatas. Entonces, la idea de secuestrarlos para hacerse un exclusivo abrigo de pieles se convierte en una obsesión enfermiza. Para hacer realidad su sueño contrata a dos ladrones.</p>
patron = "<!-- SINOPSIS --> "
patron += "<h2>[^<]+</h2> "
patron += "<p>([^<]+)</p>"
matches = re.compile(patron,re.DOTALL).findall(data)
plot = matches[0]
# Descarga la pagina
data = scrapertools.cache_page(item.url)
patron = '<tr>.*?'
patron += '<td><img src="(.*?)".*?<td>(.*?)</td>.*?<td>(.*?)</td>.*?<a href="(.*?)".*?</tr>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
idioma =""
if "/img/1.png" in scrapedidioma: idioma="Castellano"
if "/img/2.png" in scrapedidioma: idioma="Latino"
if "/img/3.png" in scrapedidioma: idioma="Subtitulado"
title = item.title + " ["+scrapedcalidad+"][" + idioma + "][" + scrapedserver + "]"
itemlist.append( Item(channel=item.channel, action="play", title=title, fulltitle=title , url=scrapedurl , thumbnail="" , plot=plot , show = item.show) )
return itemlist
def play(item):
logger.info("[pelisadicto.py] play")
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nfrom datetime import datetime\nimport datetime as DT\nimport time\nimport calendar\nclass Clock(object):\n def __init__(self,offset=None):\n self.timezone=None\n if offset is not None:\n self.timezone=DT.timezone(DT.timed... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nfrom datetime import datetime\nimport datetime as DT\nimport time\nimport calendar\nclass Clock(object):\n def __init__(self,offset=None):\n self.timezone=None\n if offset is not None:\n self.timezone=DT.t... | ```python
from datetime import datetime
import datetime as DT
import time
import calendar
class Clock(object):
def __init__(self,offset=None):
self.timezone=None
if offset is not None:
self.timezone=DT.timezone(DT.timedelta(hours=offset))
def to_str(self,timestamp=None,with_orig=False):
if not timestamp:
timestamp=datetime.now(self.timezone)
if with_orig:
return timestamp,"{month_name} {day}, {year} {clock}".format(**self.as_dict(timestamp))
return "{month_name} {day}, {year} {clock}".format(**self.as_dict(timestamp))
def date(self,D=None):
if D is None:
D=datetime.now(self.timezone)
months=[
"Unesamber","Dutesamber","Trisesamber",
"Tetresamber","Pentesamber","Hexesamber",
"Sevesamber","Octesamber","Novesamber",
"Desamber","Undesamber","Dodesamber",
"Tridesamber","Year Day","Leap Day"
]
D=D.timetuple()
yd=D.tm_yday-1
if calendar.isleap(D.tm_year):
if yd==365:
return "Leap Day"
if yd==366:
return "Year Day"
elif yd==365:
return "Year Day"
P=yd/(365+int(calendar.isleap(D.tm_year)))
month=int(P*(len(months)-2))
month_name=months[month]
day=((yd-1)%28)+1
ret={"month_name":month_name,"month":month+1,"day":day,"year":D.tm_year}
ret['date']="{month_name} {day}, {year}".format(**ret)
return ret
def time(self,D=None):
if D is None:
D=datetime.now(self.timezone)
T=(D.time().microsecond/1000000+time.mktime(D.timetuple()))%(24*60*60)
T="{:03.03f}".format((T/(24*60*60))*1000).zfill(7)
T=T.replace(".",":")
return {"clock":T,"above":T.split(":")[0],"below":T.split(":")[1]}
def as_dict(self,D=None):
if D is None:
D=datetime.now(self.timezone)
ret={'calendar':{
"day":D.day,
"month":D.month,
"year":D.year,
"time":D.time(),
"date":D.date(),
"hour":D.hour,
"minute":D.minute,
"second":D.second,
}}
ret.update(self.date(D))
ret.update(self.time(D))
ret['timestamp']="{month_name} {day}, {year} {clock}".format(**ret)
return ret
Clock().time()
``` |
[
{
"content": "Repeat the following code:\n```python\n# coding=utf-8\n\"\"\"\nYou are on an infinite grid of squares, sort of like New York City.\n\nWalk in any direction, N, S, E, W. At intersections, pick a route at random, but not doubling back.\n\nOn average how long will it take to get back to the starting ... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# coding=utf-8\n\"\"\"\nYou are on an infinite grid of squares, sort of like New York City.\n\nWalk in any direction, N, S, E, W. At intersections, pick a route at random, but not doubling back.\n\nOn average how long will it take to get back ... | ```python
# coding=utf-8
"""
You are on an infinite grid of squares, sort of like New York City.
Walk in any direction, N, S, E, W. At intersections, pick a route at random, but not doubling back.
On average how long will it take to get back to the starting point?
How often does it fail to return to the starting point after x iterations?
"""
from __future__ import print_function, unicode_literals, absolute_import
# configure logging for file and console output.
import logging
import os.path
if os.path.isfile("log.txt"):
os.remove("log.txt")
logging.basicConfig(filename='log.txt',level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler())
# strongly discourage using console input and output.
# You can make testable code full of input and print statements, but it introduces
# unnecessary complexity. See kata on testing input and print with fakes and spies.
def input(*args, **kwargs):
raise TypeError("Don't use input, log or get input from function arguments.")
def raw_input(*args, **kwargs):
raise TypeError("Don't use raw_input, either, log or get input from function arguments.")
def run():
"""
Main entry point for your application.
"""
pass
# the functions/classes you write here should have no print or input statements.
if __name__ == "__main__" or __name__ == "builtins":
# Need an environment to run this?
# https://repl.it/languages/python3
logging.info("The application is starting.")
run()
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/python2.4\n#\n# Copyright (C) 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/python2.4\n#\n# Copyright (C) 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at... | ```python
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import logging
import urllib
from apiclient import __version__
from errors import HttpError
from oauth2client.anyjson import simplejson
dump_request_response = False
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/%s (gzip)' % __version__
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
if self.alt_param is not None:
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python
object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
content = content.decode('utf-8')
body = simplejson.loads(content)
if self._data_wrapper and isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class RawModel(JsonModel):
"""Model class for requests that don't return JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = None
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class MediaModel(JsonModel):
"""Model class for requests that return Media.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = 'media'
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a
response from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
def makepatch(original, modified):
"""Create a patch object.
Some methods support PATCH, an efficient way to send updates to a resource.
This method allows the easy construction of patch bodies by looking at the
differences between a resource before and after it was modified.
Args:
original: object, the original deserialized resource
modified: object, the modified deserialized resource
Returns:
An object that contains only the changes from original to modified, in a
form suitable to pass to a PATCH method.
Example usage:
item = service.activities().get(postid=postid, userid=userid).execute()
original = copy.deepcopy(item)
item['object']['content'] = 'This is updated.'
service.activities.patch(postid=postid, userid=userid,
body=makepatch(original, item)).execute()
"""
patch = {}
for key, original_value in original.iteritems():
modified_value = modified.get(key, None)
if modified_value is None:
# Use None to signal that the element is deleted
patch[key] = None
elif original_value != modified_value:
if type(original_value) == type({}):
# Recursively descend objects
patch[key] = makepatch(original_value, modified_value)
else:
# In the case of simple types or arrays we just replace
patch[key] = modified_value
else:
# Don't add anything to patch if there's no change
pass
for key in modified:
if key not in original:
patch[key] = modified[key]
return patch
``` |
[
{
"content": "Here is a code snippet:\n```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apach... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ... | ```python
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.videointelligence_v1p1beta1.types import video_intelligence
from google.longrunning import operations_pb2 # type: ignore
from .base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import VideoIntelligenceServiceGrpcTransport
class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTransport):
"""gRPC AsyncIO backend transport for VideoIntelligenceService.
Service that implements Google Cloud Video Intelligence API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'videointelligence.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'videointelligence.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def annotate_video(self) -> Callable[
[video_intelligence.AnnotateVideoRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the annotate video method over gRPC.
Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
Returns:
Callable[[~.AnnotateVideoRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'annotate_video' not in self._stubs:
self._stubs['annotate_video'] = self.grpc_channel.unary_unary(
'/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo',
request_serializer=video_intelligence.AnnotateVideoRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['annotate_video']
__all__ = (
'VideoIntelligenceServiceGrpcAsyncIOTransport',
)
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nA file compress utility module. You can easily programmatically add files\nand directorys to zip archives. And compress arbitrary binary content.\n\n- :func:`zip_a_folder`: add folder to archiv... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nA file compress utility module. You can easily programmatically add files\nand directorys to zip archives. And compress arbitrary binary content.\n\n- :func:`zip_a_folder`: add ... | ```python
# -*- coding: utf-8 -*-
"""
A file compress utility module. You can easily programmatically add files
and directorys to zip archives. And compress arbitrary binary content.
- :func:`zip_a_folder`: add folder to archive.
- :func:`zip_everything_in_a_folder`: add everything in a folder to archive.
- :func:`zip_many_files`: Add many files to a zip archive.
- :func:`write_gzip`: Write binary content to gzip file.
- :func:`read_gzip`: Read binary content from gzip file.
**中文文档**
提供了若干个文件和数据压缩的快捷函数。
- :func:`zip_a_folder`: 将目录添加到压缩包。
- :func:`zip_everything_in_a_folder`: 将目录内的所有文件添加到压缩包。
- :func:`zip_many_files`: 将多个文件添加到压缩包。
- :func:`write_gzip`: 将二进制数据写入文件, 例如python pickle, bytes string。
- :func:`read_gzip`: 读取解压后的二进制数据内容。
注: python中zipfile包自带的ZipFile方法的用法如下:
基本用法::
with ZipFile("filename.zip", "w") as f:
f.write(path)
其中path是文件路径。 如果path是文件夹, 并不会将文件夹内所有的文件添加到压缩包中。
相对路径压缩:
比如你有一个路径 ``C:\download\readme.txt``, 如果当前路径是 ``C:\``,
而此时你将 ``readme.txt`` 添加到压缩包时则是在压缩包内添加一个: ``download\readme.txt``,
如果当前路径是 ``C:\download\``, 则在压缩包内添加的路径则是: ``readme.txt``
"""
from __future__ import print_function
import os
from zipfile import ZipFile
def zip_a_folder(src, dst):
"""Add a folder and everything inside to zip archive.
Example::
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
zip_a_folder("paper", "paper.zip")
paper.zip
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
**中文文档**
将整个文件夹添加到压缩包, 包括根目录本身。
"""
if os.path.exists(dst):
print("destination '%s' already exist." % dst)
return
src, dst = os.path.abspath(src), os.path.abspath(dst)
cwd = os.getcwd()
todo = list()
dirname, basename = os.path.split(src)
os.chdir(dirname)
for dirname, _, fnamelist in os.walk(basename):
for fname in fnamelist:
newname = os.path.join(dirname, fname)
todo.append(newname)
with ZipFile(dst, "w") as f:
for newname in todo:
f.write(newname)
os.chdir(cwd)
def zip_everything_in_a_folder(src, dst):
"""Add everything in a folder except the root folder it self to zip archive.
Example::
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
zip_everything_in_folder("paper", "paper.zip")
paper.zip
|--- algorithm.pdf
|--- images
|--- 1.jpg
**中文文档**
将目录内部的所有文件添加到压缩包, 不包括根目录本身。
"""
if os.path.exists(dst):
print("destination '%s' already exist." % dst)
return
src, dst = os.path.abspath(src), os.path.abspath(dst)
cwd = os.getcwd()
todo = list()
os.chdir(src)
for dirname, _, fnamelist in os.walk(os.getcwd()):
for fname in fnamelist:
newname = os.path.relpath(os.path.join(dirname, fname), src)
todo.append(newname)
with ZipFile(dst, "w") as f:
for newname in todo:
f.write(newname)
os.chdir(cwd)
def zip_many_files(list_of_abspath, dst):
"""Add many files to a zip archive.
**中文文档**
将一系列的文件压缩到一个压缩包中, 若有重复的文件名, 在zip中保留所有的副本。
"""
if os.path.exists(dst):
print("destination '%s' already exist." % dst)
return
base_dir = os.getcwd()
with ZipFile(dst, "w") as f:
for abspath in list_of_abspath:
dirname, basename = os.path.split(abspath)
os.chdir(dirname)
f.write(basename)
os.chdir(base_dir)
``` |
[
{
"content": "Here is a code file:\n```python\n# Copyright 2012 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LI... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# Copyright 2012 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache... | ```python
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap.filter
import ldappool
from oslo_log import log
import six
from six.moves import map, zip
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises: TypeError if value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
raise TypeError("value must be basestring, "
"not %s" % value.__class__.__name__)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises: UnicodeDecodeError for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return utf8_decode(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are unicode friendly.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in attrs.items():
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
This is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Returns True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Returns True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle multiple names for an
attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
Note that this function uses is_ava_value_equal to compare AVAs so the
limitations of that function apply here.
"""
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
"""Returns True if and only if the DNs are equal.
Two DNs are equal if they've got the same number of RDNs and if the RDNs
are the same at each position. See RFC4517.
Note that this function uses is_rdn_equal to compare RDNs so the
limitations of that function apply here.
:param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
"""Returns True if and only if the descendant_dn is under the dn.
:param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
# Use the last len(dn) RDNs.
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
'''Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
boudary, examples of type conversions are:
* booleans map to the strings 'TRUE' and 'FALSE'
* integer values map to their string representation.
* unicode strings are encoded in UTF-8
In addition to handling type conversions at the API boundary we
have the requirement to support more than one LDAP API
provider. Currently we have:
* python-ldap, this is the standard LDAP API for Python, it
requires access to a live LDAP server.
* Fake LDAP which emulates python-ldap. This is used for
testing without requiring a live LDAP server.
To support these requirements we need a layer that performs type
conversions and then calls another LDAP API which is configurable
(e.g. either python-ldap or the fake emulation).
We have an additional constraint at the time of this writing due to
limitations in the logging module. The logging module is not
capable of accepting UTF-8 encoded strings, it will throw an
encoding exception. Therefore all logging MUST be performed prior
to UTF-8 conversion. This means no logging can be performed in the
ldap APIs that implement the python-ldap API because those APIs
are defined to accept only UTF-8 strings. Thus the layer which
performs type conversions must also do the logging. We do the type
conversions in two steps, once to convert all Python types to
unicode strings, then log, then convert the unicode strings to
UTF-8.
There are a variety of ways one could accomplish this, we elect to
use a chaining technique whereby instances of this class simply
call the next member in the chain via the "conn" attribute. The
chain is constructed by passing in an existing instance of this
class as the conn attribute when the class is instantiated.
Here is a brief explanation of why other possible approaches were
not used:
subclassing
To perform the wrapping operations in the correct order
the type convesion class would have to subclass each of
the API providers. This is awkward, doubles the number of
classes, and does not scale well. It requires the type
conversion class to be aware of all possible API
providers.
decorators
Decorators provide an elegant solution to wrap methods and
would be an ideal way to perform type conversions before
calling the wrapped function and then converting the
values returned from the wrapped function. However
decorators need to be aware of the method signature, it
has to know what input parameters need conversion and how
to convert the result. For an API like python-ldap which
has a large number of different method signatures it would
require a large number of specialized
decorators. Experience has shown it's very easy to apply
the wrong decorator due to the inherent complexity and
tendency to cut-n-paste code. Another option is to
parameterize the decorator to make it "smart". Experience
has shown such decorators become insanely complicated and
difficult to understand and debug. Also decorators tend to
hide what's really going on when a method is called, the
operations being performed are not visible when looking at
the implemation of a decorated method, this too experience
has shown leads to mistakes.
Chaining simplifies both wrapping to perform type conversion as
well as the substitution of alternative API providers. One simply
creates a new instance of the API interface and insert it at the
front of the chain. Type conversions are explicit and obvious.
If a new method needs to be added to the API interface one adds it
to the abstract class definition. Should one miss adding the new
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
'''
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_s(self, dn):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
class PythonLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which calls the
python-ldap API.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
# The resp_ctrl_classes parameter is a recent addition to the
# API. It defaults to None. We do not anticipate using it.
# To run with older versions of python-ldap we do not pass it.
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def delete_s(self, dn):
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
'''Method for common ldap initialization between PythonLDAPHandler and
PooledLDAPHandler.
'''
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
# The certificate trust options apply for both LDAPS and TLS.
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in list(LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
tls_req_cert)
class MsgId(list):
'''Wrapper class to hold connection and msgid.'''
pass
def use_conn_pool(func):
'''Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
'''
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which uses pooled
connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
Keystone LDAP authentication logic authenticates an end user using its DN
and password via LDAP bind to establish supplied password is correct.
This can fill up the pool quickly (as pool re-uses existing connection
based on its bind data) and would not leave space in pool for connection
re-use for other LDAP operations.
Now a separate pool can be established for those requests when related flag
'use_auth_pool' is enabled. That pool can have its own size and
connection lifetime. Other pool attributes are shared between those pools.
If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
If 'use_auth_pool' is not enabled, then connection pooling is not used for
those LDAP operations.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {} # static connector pool dict
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {} # connection specific options
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.page_size = page_size
# Following two options are not added in common initialization as they
# need to follow a sequence in PythonLDAPHandler code.
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool: # separate pool when use_auth_pool enabled
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
# if option was not specified explicitly, then use connection default
# value for that option if there.
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
for option, invalue in self.conn_options.items():
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
'''Not using use_conn_pool decorator here as this API takes cred as
input.
'''
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
# After connection generator is done `with` statement execution block
# connection is always released via finally block in ldappool.
# So this unbind is a no op.
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
'''This API is asynchoronus API which returns MsgId instance to be used
in result3 call.
To work with result3 API in predicatable manner, same LDAP connection
is needed which provided msgid. So wrapping used connection and msgid
in MsgId class. The connection associated with search_ext is released
once last hard reference to MsgId object is freed. This will happen
when the method is done with returned MsgId usage.
'''
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
'''This method is used to wait for and return the result of an
operation previously initiated by one of the LDAP asynchronous
operation routines (eg search_ext()) It returned an invocation
identifier (a message id) upon successful initiation of their
operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
'''
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
@use_conn_pool
def delete_s(self, conn, dn):
return conn.delete_s(dn)
@use_conn_pool
def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
return conn.delete_ext_s(dn, serverctrls, clientctrls)
class KeystoneLDAPHandler(LDAPHandler):
'''Convert data types and perform logging.
This LDAP inteface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
OpenStack logging framework at the time of this writing is not
capable of accepting strings encoded in UTF-8, the log functions
will throw decoding errors if a non-ascii character appears in a
string.
Prior to the call Python data types are converted to a string
representation as required by the LDAP APIs.
Then logging is performed so we can track what is being
sent/received from LDAP. Also the logging filters security
sensitive items (i.e. passwords).
Then the string values are encoded into UTF-8.
Then the LDAP API entry point is invoked.
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
'''
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
return self
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None,
pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug("LDAP bind: who=%s", who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = list(map(utf8_encode, attrlist))
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s'
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
# The API for the simple paged results control changed between
# python-ldap 2.3 and 2.4. We need to detect the capabilities
# of the python-ldap version we are using.
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = list(map(utf8_encode, attrlist))
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
# LDAP server supports pagination
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
# There is more data still on the server
# so we request another page
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
py_result = convert_ldap_result(ldap_result)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_s(dn_utf8)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
dn, serverctrls, clientctrls)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
def __exit__(self, exc_type, exc_val, exc_tb):
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in _HANDLERS.items():
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
"""Filter out private items in an entity dict.
:param entity_ref: the entity dictionary. The 'dn' field will be removed.
'dn' is used in LDAP, but should not be returned to the user. This
value may be modified.
:returns: entity_ref
"""
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.suffix = conf.ldap.suffix
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in self.attribute_options_names.items():
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = conf.ldap.use_dumb_member
self.dumb_member = (conf.ldap.dumb_member or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warn(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def _is_dumb_member(self, member_dn):
"""Checks that member is a dumb member.
:param member_dn: DN of member to be checked.
"""
return (self.use_dumb_member
and is_dn_equal(member_dn, self.dumb_member))
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime
)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = {k.lower(): v for k, v in res[1].items()}
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
LOG.warn(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
# Ignore attributes that are mapped to None.
continue
v = lower_res[map_attr.lower()]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def check_allow_create(self):
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_update(self):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_delete(self):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in values.items():
if k in self.attribute_ignore:
continue
if k == 'id':
# no need to check if v is None as 'id' will always have
# a value
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in self.extra_attr_mapping.items()
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_all(self, ldap_filter=None):
query = u'(&%s(objectClass=%s))' % (ldap_filter or
self.ldap_filter or
'', self.object_class)
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
query_params.items()])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None):
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in values.items():
if k == 'id':
# id can't be modified.
continue
if k in self.attribute_ignore:
# Handle 'enabled' specially since can't disable if ignored.
if k == 'enabled' and (not v):
action = _("Disabling an entity where the 'enable' "
"attribute is ignored by configuration.")
raise exception.ForbiddenAction(action=action)
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def delete(self, object_id):
with self.get_connection() as conn:
try:
conn.delete_s(self._id_to_dn(object_id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
def deleteTree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
with self.get_connection() as conn:
try:
conn.delete_ext_s(self._id_to_dn(object_id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
except ldap.NOT_ALLOWED_ON_NONLEAF:
# Most LDAP servers do not support the tree_delete_control.
# In these servers, the usual idiom is to first perform a
# search to get the entries to delete, then delete them in
# in order of child to parent, since LDAP forbids the
# deletion of a parent entry before deleting the children
# of that parent. The simplest way to do that is to delete
# the entries in order of the length of the DN, from longest
# to shortest DN.
dn = self._id_to_dn(object_id)
scope = ldap.SCOPE_SUBTREE
# With some directory servers, an entry with objectclass
# ldapsubentry will not be returned unless it is explicitly
# requested, by specifying the objectclass in the search
# filter. We must specify this, with objectclass=*, in an
# LDAP filter OR clause, in order to return all entries
filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
# We only need the DNs of the entries. Since no attributes
# will be returned, we do not have to specify attrsonly=1.
entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
if entries:
for dn in sorted((e[0] for e in entries),
key=len, reverse=True):
conn.delete_s(dn)
else:
LOG.debug('No entries in LDAP subtree %s', dn)
def add_member(self, member_dn, member_list_dn):
"""Add member to the member list.
:param member_dn: DN of member to be added.
:param member_list_dn: DN of group to which the
member will be added.
:raises: exception.Conflict: If the user was already a member.
self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def remove_member(self, member_dn, member_list_dn):
"""Remove member from the member list.
:param member_dn: DN of member to be removed.
:param member_list_dn: DN of group from which the
member will be removed.
:raises: self.NotFound: If the group entry didn't exist.
ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def _delete_tree_nodes(self, search_base, scope, query_params=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
query = (u'(&%s%s)' %
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
query_params.items()])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
nodes = conn.search_s(search_base, scope, query,
attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
LOG.debug('Could not find entry with dn=%s', search_base)
raise self._not_found(self._dn_to_id(search_base))
else:
for node_dn, _t in nodes:
try:
conn.delete_s(node_dn)
except ldap.NO_SUCH_OBJECT:
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
LOG.warn(_LW("When deleting entries for %(search_base)s, could not"
" delete nonexistent entries %(entries)s%(dots)s"),
{'search_base': search_base,
'entries': not_deleted_nodes[:3],
'dots': '...' if len(not_deleted_nodes) > 3 else ''})
def filter_query(self, hints, query=None):
"""Applies filtering to a query.
:param hints: contains the list of filters, which may be None,
indicating that there are no filters to be applied.
If it's not None, then any filters satisfied here will be
removed so that the caller will know if any filters
remain to be applied.
:param query: LDAP query into which to include filters
:returns query: LDAP query, updated with any filters satisfied
"""
def build_filter(filter_, hints):
"""Build a filter for the query.
:param filter_: the dict that describes this filter
:param hints: contains the list of filters yet to be satisfied.
:returns query: LDAP query term to be added
"""
ldap_attr = self.attribute_mapping[filter_['name']]
val_esc = ldap.filter.escape_filter_chars(filter_['value'])
if filter_['case_sensitive']:
# NOTE(henry-nash): Although dependent on the schema being
# used, most LDAP attributes are configured with case
# insensitive matching rules, so we'll leave this to the
# controller to filter.
return
if filter_['name'] == 'enabled':
# NOTE(henry-nash): Due to the different options for storing
# the enabled attribute (e,g, emulated or not), for now we
# don't try and filter this at the driver level - we simply
# leave the filter to be handled by the controller. It seems
# unlikley that this will cause a signifcant performance
# issue.
return
# TODO(henry-nash): Currently there are no booleans (other than
# 'enabled' that is handled above) on which you can filter. If
# there were, we would need to add special handling here to
# convert the booleans values to 'TRUE' and 'FALSE'. To do that
# we would also need to know which filter keys were actually
# booleans (this is related to bug #1411478).
if filter_['comparator'] == 'equals':
query_term = (u'(%(attr)s=%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'contains':
query_term = (u'(%(attr)s=*%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'startswith':
query_term = (u'(%(attr)s=%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'endswith':
query_term = (u'(%(attr)s=*%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
else:
# It's a filter we don't understand, so let the caller
# work out if they need to do something with it.
return
return query_term
if query is None:
# make sure query is a string so the ldap filter is properly
# constructed from filter_list later
query = ''
if hints is None:
return query
filter_list = []
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in self.attribute_mapping:
continue
new_filter = build_filter(filter_, hints)
if new_filter is not None:
filter_list.append(new_filter)
satisfied_filters.append(filter_)
if filter_list:
query = u'(&%s%s)' % (query, ''.join(filter_list))
# Remove satisfied filters, then the caller will know remaining filters
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
return query
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates groupOfNames holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that groupOfNames, default is
cn=enabled_${name}s,${tree_dn}
Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
${tree_dn} is self.tree_dn.
"""
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
# Extract the attribute name and value from the configured DN.
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
query = '(member=%s)' % dn
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, ['cn'])
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
with self.get_connection() as conn:
if not self._get_enabled(object_id, conn):
modlist = [(ldap.MOD_ADD,
'member',
[self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', ['groupOfNames']),
('member', [self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
with self.get_connection() as conn:
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if ('enabled' not in self.attribute_ignore and
self.enabled_emulation):
ref['enabled'] = self._get_enabled(object_id, conn)
return ref
def get_all(self, ldap_filter=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
tenant_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)
if x[0] != self.enabled_emulation_dn]
with self.get_connection() as conn:
for tenant_ref in tenant_list:
tenant_ref['enabled'] = self._get_enabled(
tenant_ref['id'], conn)
return tenant_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
class ProjectLdapStructureMixin(object):
"""Project LDAP Structure shared between LDAP backends.
This is shared between the resource and assignment LDAP backends.
"""
DEFAULT_OU = 'ou=Groups'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'groupOfNames'
DEFAULT_ID_ATTR = 'cn'
NotFound = exception.ProjectNotFound
notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
options_name = 'project'
attribute_options_names = {'name': 'name',
'description': 'desc',
'enabled': 'enabled',
'domain_id': 'domain_id'}
immutable_attrs = ['name']
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n\"\"\"Computes partition function for RBM-like models using Annealed Importance Sampling.\"\"\"\nimport numpy as np\nfrom deepnet import dbm\nfrom deepnet import util\nfrom deepnet import trainer as tr\nfrom choose_matrix_library import *\nimport sy... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n\"\"\"Computes partition function for RBM-like models using Annealed Importance Sampling.\"\"\"\nimport numpy as np\nfrom deepnet import dbm\nfrom deepnet import util\nfrom deepnet import trainer as tr\nfrom choose_matrix_library imp... | ```python
"""Computes partition function for RBM-like models using Annealed Importance Sampling."""
import numpy as np
from deepnet import dbm
from deepnet import util
from deepnet import trainer as tr
from choose_matrix_library import *
import sys
import numpy as np
import pdb
import time
import itertools
import matplotlib.pyplot as plt
from deepnet import visualize
import deepnet
import scipy.io as sio
def LogMeanExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).mean())
def LogSumExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).sum())
def Display(w, hid_state, input_state, w_var=None, x_axis=None):
w = w.asarray().flatten()
plt.figure(1)
plt.clf()
plt.hist(w, 100)
visualize.display_hidden(hid_state.asarray(), 2, 'activations', prob=True)
# plt.figure(3)
# plt.clf()
# plt.imshow(hid_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
# plt.figure(4)
# plt.clf()
# plt.imshow(input_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#, state.shape[0], state.shape[1], state.shape[0], 3, title='Markov chains')
# plt.tight_layout(pad=0, w_pad=0, h_pad=0)
# plt.figure(5)
# plt.clf()
# plt.suptitle('Variance')
# plt.plot(np.array(x_axis), np.array(w_var))
# plt.draw()
def impute_dbm_ais(model):
"""Run approximate pll using AIS on a DBM """
def impute_rbm_gaussian_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('bernoulli_hidden1')
bern2_hidden_layer = model.GetLayerByName('bernoulli2_hidden1')
gaussian_layer = model.GetLayerByName('gaussian_hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchzeroslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
datasize_squared = cm.CUDAMatrix(np.zeros([batchsize, batchsize]))
datasize_eye = cm.CUDAMatrix(np.eye(batchsize))
datasize_eye2 = cm.CUDAMatrix(np.eye(batchsize))
if hidden_layer:
hidden_bias = hidden_layer.params['bias']
bedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli_hidden1')
w = bedge.params['weight']
if bern2_hidden_layer:
bern2_hidden_bias = bern2_hidden_layer.params['bias']
bedge2 = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli2_hidden1')
w2 = bedge2.params['weight']
if 'bias' in input_layer.params:
input_bias = input_layer.params['bias']
if gaussian_layer:
gedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'gaussian_hidden1')
gw = gedge.params['weight']
input_diag = input_layer.params['diag']
diag_val = input_diag.sum() / (input_layer.dimensions * input_layer.numlabels)
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
batchslice.assign(batchzeroslice)
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
if hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
if bern2_hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w2.T, input_layer.state, target=bern2_hidden_layer.state)
bern2_hidden_layer.state.add_col_vec(bern2_hidden_bias)
cm.log_1_plus_exp(bern2_hidden_layer.state)
batchslice.add_sums(bern2_hidden_layer.state, axis=0)
if 'bias' in input_layer.params:
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
if gaussian_layer:
# Add contributions from gaussian hidden layer
cm.dot(gw.T, input_layer.state, target=gaussian_layer.state)
cm.dot(gaussian_layer.state.T, gaussian_layer.state, target= datasize_squared)
datasize_squared.mult(datasize_eye, target=datasize_eye2)
datasize_eye2.sum(axis=0, target=batchslice2)
# Add constants from gaussian hidden layer
integration_constant = gaussian_layer.dimensions * np.log(2*np.pi)
integration_constant += input_layer.dimensions * diag_val
batchslice2.add(integration_constant)
batchslice2.mult(0.5)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_rbm_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
hidden_bias = hidden_layer.params['bias']
input_bias = input_layer.params['bias']
edge = model.edge[0]
w = edge.params['weight']
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_mf(model, mf_steps, hidden_mf_steps, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
# Run MF steps
for mf_idx in range(mf_steps):
for hid_mf_idx in range(hidden_mf_steps):
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
input_layer.state.get_row_slice(offset, offset + numlabels , \
target=input_layer.fooslice)
input_layer.GetData()
input_layer.state.set_row_slice(offset, offset + numlabels , \
input_layer.fooslice)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (dimensions+0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def multicol_mf(model, multicols, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# Get the multicol dimensions
nBlocks, nCols = multicols.shape
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for mult_idx in range(nBlocks):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (nBlocks * nCols +0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def Usage():
print '%s <model file> <number of Markov chains to run> [number of words (for Replicated Softmax models)]'
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Run AIS")
parser.add_argument("--model_file", type=str)
parser.add_argument("--train_file", type=str)
parser.add_argument("--infer-method", type=str, default='exact', \
help='mf/gibbs/exact/gaussian_exact')
parser.add_argument("--mf-steps", type=int, default=1)
parser.add_argument("--hidden-mf-steps", type=int, default=1)
parser.add_argument("--outf", type=str, help='Output File')
parser.add_argument("--valid_only", action='store_true', help="only run the validation set")
parser.add_argument("--blosum90", action='store_true', help="Calculate blosum90 scores")
parser.add_argument("--ncols", type=int, help="Number of multiple columns")
parser.add_argument("--multmode", type=str, help="Multicol mode",default='rand')
args = parser.parse_args()
if not args.outf :
raise ValueError('Output file not defined')
if not args.train_file or not args.model_file :
raise ValueError('Models and data missing')
board = tr.LockGPU()
model_file = args.model_file
train_file = args.train_file
model = dbm.DBM(model_file, train_file)
trainer_pb = util.ReadOperation(train_file)
dataset = os.path.basename(trainer_pb.data_proto_prefix)
# Fix paths
dirname = os.path.split(model.t_op.data_proto_prefix)[1]
model.t_op.data_proto_prefix = os.path.join('datasets/',\
dirname)
model.t_op.skip_last_piece = False
model.t_op.get_last_piece = True
model.t_op.randomize = False
model.LoadModelOnGPU()
model.SetUpData()
if args.valid_only:
data_types = ['valid']
else:
data_types = ['train', 'valid', 'test']
datagetters = {
'train' : model.GetTrainBatch,
'valid' : model.GetValidationBatch,
'test' : model.GetTestBatch
}
batchsizes = {
'train' : model.train_data_handler.num_batches,
'valid' : model.validation_data_handler.num_batches,
'test' : model.test_data_handler.num_batches
}
opts = {}
cm.CUDAMatrix.init_random(seed=int(time.time()))
if len(model.layer) > 2 and args.infer_method=='exact':
raise ValueError('Cannot use exact Exact inference for DBMs')
from collections import defaultdict
pll_data = defaultdict(list)
imperr_data = defaultdict(list)
for data_type in data_types:
num_batches = batchsizes[data_type]
datagetter = datagetters[data_type]
for batch_idx in range(num_batches):
print("Evalutating batch {}".format(batch_idx+1))
datagetter()
if args.infer_method == 'mf':
if args.blosum90:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps, blosum90=True)
else:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps)
elif args.infer_method == 'multicol':
ncols = args.ncols;
multicol_file = 'datasets/{0}/multicol/{1}_{2}.mat'.format(dataset,args.multmode, ncols)
multicols = sio.loadmat(multicol_file)['multicols']
multicols = np.asarray(multicols, dtype=np.int)
multicols = multicols - 1; # convert from matlab indexing
if args.blosum90:
pll, imperr = multicol_mf(model, multicols, blosum90=True)
else:
pll, imperr = multicol_mf(model, multicols)
elif args.infer_method == 'exact':
pll, imperr = impute_rbm_exact(model)
elif args.infer_method == 'gaussian_exact':
pll, imperr = impute_rbm_gaussian_exact(model)
else:
raise ValueError("Unknown infer method")
pll, imperr = pll.flatten(), imperr.flatten()
pll_data[data_type].append(pll)
imperr_data[data_type].append(imperr)
pll_data[data_type] = np.concatenate(pll_data[data_type])
imperr_data[data_type] = np.concatenate(imperr_data[data_type])
#-------------------------------------------------------------------
# Print and save the results
for dtype in pll_data :
pll = pll_data[dtype]
imperr = imperr_data[dtype]
print '%s : Pseudo-LogLikelihood %.5f, std %.5f' % (dtype, pll.mean(), pll.std())
print '%s : Imputation Error %.5f, std %.5f' % (dtype, imperr.mean(), imperr.std())
tr.FreeGPU(board)
import pickle
with open(args.outf,'wb') as fout:
pkldata = { 'pll' : pll_data, 'imperr' : imperr_data }
pickle.dump(pkldata, fout)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.