index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
54,144 | ucldc/harvester | refs/heads/master | /test/test_oac_fetcher.py | # -*- coding: utf-8 -*-
import os
import json
from unittest import TestCase
import shutil
from mock import patch
from xml.etree import ElementTree as ET
from mypretty import httpretty
# import httpretty
import harvester.fetcher as fetcher
from harvester.collection_registry_client import Collection
from test.utils import ConfigFileOverrideMixin, LogOverrideMixin
from test.utils import DIR_FIXTURES
class HarvestOAC_JSON_ControllerTestCase(ConfigFileOverrideMixin,
LogOverrideMixin, TestCase):
'''Test the function of an OAC harvest controller'''
@httpretty.activate
def setUp(self):
super(HarvestOAC_JSON_ControllerTestCase, self).setUp()
# self.testFile = DIR_FIXTURES+'/collection_api_test_oac.json'
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/178/",
body=open(DIR_FIXTURES + '/collection_api_test_oac.json').read())
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf2v19n928',
body=open(DIR_FIXTURES + '/testOAC.json').read())
self.collection = Collection(
'https://registry.cdlib.org/api/v1/collection/178/')
self.setUp_config(self.collection)
self.controller = fetcher.HarvestController(
'email@example.com',
self.collection,
config_file=self.config_file,
profile_path=self.profile_path)
def tearDown(self):
super(HarvestOAC_JSON_ControllerTestCase, self).tearDown()
self.tearDown_config()
shutil.rmtree(self.controller.dir_save)
@httpretty.activate
@patch('boto3.resource', autospec=True)
def testOAC_JSON_Harvest(self, mock_boto3):
'''Test the function of the OAC harvest'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf2v19n928',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
self.assertTrue(hasattr(self.controller, 'harvest'))
self.controller.harvest()
self.assertEqual(len(self.test_log_handler.records), 2)
self.assertTrue('UCB Department of Statistics' in
self.test_log_handler.formatted_records[0])
self.assertEqual(self.test_log_handler.formatted_records[1],
'[INFO] HarvestController: 28 records harvested')
@httpretty.activate
@patch('boto3.resource', autospec=True)
def testObjectsHaveRegistryData(self, mock_boto3):
# test OAC objsets
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf2v19n928',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.json').read())
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf2v19n928&startDoc=26',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
self.testFile = DIR_FIXTURES + '/testOAC-url_next-1.json'
self.ranGet = False
self.controller.harvest()
dir_list = os.listdir(self.controller.dir_save)
self.assertEqual(len(dir_list), 2)
objset_saved = json.loads(
open(os.path.join(self.controller.dir_save, dir_list[0])).read())
obj = objset_saved[2]
self.assertIn('source_collection_name', obj)
self.assertEqual(obj['source_collection_name'],
'record source collection name')
self.assertIn('collection', obj)
self.assertIn('@id', obj['collection'][0])
self.assertIn('title', obj['collection'][0])
self.assertIn('campus', obj['collection'][0])
self.assertIn('dcmi_type', obj['collection'][0])
self.assertIn('description', obj['collection'][0])
self.assertIn('enrichments_item', obj['collection'][0])
self.assertIn('name', obj['collection'][0])
self.assertIn('harvest_extra_data', obj['collection'][0])
self.assertIn('repository', obj['collection'][0])
self.assertIn('rights_statement', obj['collection'][0])
self.assertIn('rights_status', obj['collection'][0])
self.assertIn('url_harvest', obj['collection'][0])
self.assertEqual(obj['collection'][0]['@id'],
'https://registry.cdlib.org/api/v1/collection/178/')
self.assertNotIn('campus', obj)
self.assertEqual(obj['collection'][0]['campus'], [{
'@id': 'https://registry.cdlib.org/api/v1/'
'campus/6/',
'slug': 'UCSD',
'resource_uri': '/api/v1/campus/6/',
'position': 6,
'name': 'UC San Diego'
}, {
'@id': 'https://registry.cdlib.org/api/v1/'
'campus/1/',
'slug': 'UCB',
'resource_uri': '/api/v1/campus/1/',
'position': 0,
'name': 'UC Berkeley'
}])
self.assertNotIn('repository', obj)
self.assertEqual(obj['collection'][0]['repository'], [{
'@id': 'https://registry.cdlib.org/api/v1/'
'repository/22/',
'resource_uri': '/api/v1/repository/22/',
'name': 'Mandeville Special Collections Library',
'slug': 'Mandeville-Special-Collections-Library',
'campus': [{
'slug': 'UCSD',
'resource_uri': '/api/v1/campus/6/',
'position': 6,
'name': 'UC San Diego'
}, {
'slug': 'UCB',
'resource_uri': '/api/v1/campus/1/',
'position': 0,
'name': 'UC Berkeley'
}]
}, {
'@id': 'https://registry.cdlib.org/api/v1/'
'repository/36/',
'resource_uri': '/api/v1/repository/36/',
'name': 'UCB Department of Statistics',
'slug': 'UCB-Department-of-Statistics',
'campus': {
'slug': 'UCB',
'resource_uri': '/api/v1/campus/1/',
'position': 0,
'name': 'UC Berkeley'
}
}])
class HarvestOAC_XML_ControllerTestCase(ConfigFileOverrideMixin,
LogOverrideMixin, TestCase):
'''Test the function of an OAC XML harvest controller'''
@httpretty.activate
def setUp(self):
super(HarvestOAC_XML_ControllerTestCase, self).setUp()
# self.testFile = DIR_FIXTURES+'/collection_api_test_oac.json'
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/178/",
body=open(DIR_FIXTURES + '/collection_api_test_oac_xml.json').read(
))
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf0c600134',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.xml').read())
self.collection = Collection(
'https://registry.cdlib.org/api/v1/collection/178/')
self.setUp_config(self.collection)
self.controller = fetcher.HarvestController(
'email@example.com',
self.collection,
config_file=self.config_file,
profile_path=self.profile_path)
print "DIR SAVE::::: {}".format(self.controller.dir_save)
def tearDown(self):
super(HarvestOAC_XML_ControllerTestCase, self).tearDown()
self.tearDown_config()
# shutil.rmtree(self.controller.dir_save)
@httpretty.activate
@patch('boto3.resource', autospec=True)
def testOAC_XML_Harvest(self, mock_boto3):
'''Test the function of the OAC harvest'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf0c600134',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.xml').read())
self.assertTrue(hasattr(self.controller, 'harvest'))
self.controller.harvest()
print "LOGS:{}".format(self.test_log_handler.formatted_records)
self.assertEqual(len(self.test_log_handler.records), 2)
self.assertTrue('UCB Department of Statistics' in
self.test_log_handler.formatted_records[0])
self.assertEqual(self.test_log_handler.formatted_records[1],
'[INFO] HarvestController: 24 records harvested')
class OAC_XML_FetcherTestCase(LogOverrideMixin, TestCase):
'''Test the OAC_XML_Fetcher
'''
@httpretty.activate
def setUp(self):
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf0c600134',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.xml').read())
super(OAC_XML_FetcherTestCase, self).setUp()
self.fetcher = fetcher.OAC_XML_Fetcher(
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf0c600134', 'extra_data')
def tearDown(self):
super(OAC_XML_FetcherTestCase, self).tearDown()
@httpretty.activate
def testBadOACSearch(self):
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj--xxxx',
body=open(DIR_FIXTURES + '/testOAC-badsearch.xml').read())
self.assertRaises(
ValueError, fetcher.OAC_XML_Fetcher,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj--xxxx', 'extra_data')
@httpretty.activate
def testOnlyTextResults(self):
'''Test when only texts are in result'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
body=open(DIR_FIXTURES + '/testOAC-noimages-in-results.xml').read(
))
h = fetcher.OAC_XML_Fetcher(
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj', 'extra_data')
self.assertEqual(h.totalDocs, 11)
recs = self.fetcher.next()
self.assertEqual(self.fetcher.groups['text']['end'], 10)
self.assertEqual(len(recs), 10)
@httpretty.activate
def testUTF8ResultsContent(self):
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
body=open(DIR_FIXTURES + '/testOAC-utf8-content.xml').read())
h = fetcher.OAC_XML_Fetcher(
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj', 'extra_data')
self.assertEqual(h.totalDocs, 25)
self.assertEqual(h.currentDoc, 0)
objset = h.next()
self.assertEqual(h.totalDocs, 25)
self.assertEqual(h.currentDoc, 25)
self.assertEqual(len(objset), 25)
@httpretty.activate
def testAmpersandInDoc(self):
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
body=open(DIR_FIXTURES + '/testOAC-utf8-content.xml').read())
h = fetcher.OAC_XML_Fetcher(
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj', 'extra_data')
self.assertEqual(h.totalDocs, 25)
self.assertEqual(h.currentDoc, 0)
h.next()
def testDocHitsToObjset(self):
'''Check that the _docHits_to_objset to function returns expected
object for a given input'''
docHits = ET.parse(open(DIR_FIXTURES + '/docHit.xml')).getroot()
objset = self.fetcher._docHits_to_objset([docHits])
obj = objset[0]
self.assertEqual(obj['relation'][0], {
'attrib': {},
'text': 'http://www.oac.cdlib.org/findaid/ark:/13030/tf0c600134'
})
self.assertIsInstance(obj['relation'], list)
self.assertIsNone(obj.get('google_analytics_tracking_code'))
self.assertIsInstance(obj['reference-image'][0], dict)
self.assertEqual(len(obj['reference-image']), 2)
self.assertIn('X', obj['reference-image'][0])
self.assertEqual(750, obj['reference-image'][0]['X'])
self.assertIn('Y', obj['reference-image'][0])
self.assertEqual(564, obj['reference-image'][0]['Y'])
self.assertIn('src', obj['reference-image'][0])
self.assertEqual('http://content.cdlib.org/ark:/13030/kt40000501/FID3',
obj['reference-image'][0]['src'])
self.assertIsInstance(obj['thumbnail'], dict)
self.assertIn('X', obj['thumbnail'])
self.assertEqual(125, obj['thumbnail']['X'])
self.assertIn('Y', obj['thumbnail'])
self.assertEqual(93, obj['thumbnail']['Y'])
self.assertIn('src', obj['thumbnail'])
self.assertEqual(
'http://content.cdlib.org/ark:/13030/kt40000501/thumbnail',
obj['thumbnail']['src'])
self.assertIsInstance(obj['publisher'][0], dict)
self.assertEqual(obj['date'], [{
'attrib': {
'q': 'created'
},
'text': '7/21/42'
}, {
'attrib': {
'q': 'published'
},
'text': '7/21/72'
}])
def testDocHitsToObjsetBadImageData(self):
'''Check when the X & Y for thumbnail or reference image is not an
integer. Text have value of "" for X & Y'''
docHits = ET.parse(
open(DIR_FIXTURES + '/docHit-blank-image-sizes.xml')).getroot()
objset = self.fetcher._docHits_to_objset([docHits])
obj = objset[0]
self.assertEqual(0, obj['reference-image'][0]['X'])
self.assertEqual(0, obj['reference-image'][0]['Y'])
self.assertEqual(0, obj['thumbnail']['X'])
self.assertEqual(0, obj['thumbnail']['Y'])
def testDocHitsToObjsetRemovesBlanks(self):
'''Blank xml tags (no text value) get propagated through the system
as "null" values. Eliminate them here to make data cleaner.
'''
docHit = ET.parse(open(DIR_FIXTURES +
'/testOAC-blank-value.xml')).getroot()
objset = self.fetcher._docHits_to_objset([docHit])
obj = objset[0]
self.assertEqual(obj['title'], [{
'attrib': {},
'text': 'Main Street, Cisko Placer Co. [California]. '
'Popular American Scenery.'
}])
@httpretty.activate
def testFetchOnePage(self):
'''Test fetching one "page" of results where no return trips are
necessary
'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.xml').read())
self.assertTrue(hasattr(self.fetcher, 'totalDocs'))
self.assertTrue(hasattr(self.fetcher, 'totalGroups'))
self.assertTrue(hasattr(self.fetcher, 'groups'))
self.assertIsInstance(self.fetcher.totalDocs, int)
self.assertEqual(self.fetcher.totalDocs, 24)
self.assertEqual(self.fetcher.groups['image']['total'], 13)
self.assertEqual(self.fetcher.groups['image']['start'], 1)
self.assertEqual(self.fetcher.groups['image']['end'], 0)
self.assertEqual(self.fetcher.groups['text']['total'], 11)
self.assertEqual(self.fetcher.groups['text']['start'], 0)
self.assertEqual(self.fetcher.groups['text']['end'], 0)
recs = self.fetcher.next()
self.assertEqual(self.fetcher.groups['image']['end'], 10)
self.assertEqual(len(recs), 10)
class OAC_XML_Fetcher_text_contentTestCase(LogOverrideMixin, TestCase):
'''Test when results only contain texts'''
@httpretty.activate
def testFetchTextOnlyContent(self):
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&DocsPerPage=10',
body=open(DIR_FIXTURES + '/testOAC-noimages-in-results.xml').read(
))
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&DocsPerPage=10&startDoc=1&'
'group=text',
body=open(DIR_FIXTURES + '/testOAC-noimages-in-results.xml').read(
))
oac_fetcher = fetcher.OAC_XML_Fetcher(
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
'extra_data',
docsPerPage=10)
first_set = oac_fetcher.next()
self.assertEqual(len(first_set), 10)
self.assertEqual(
oac_fetcher._url_current,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=1&'
'group=text')
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&DocsPerPage=10&startDoc=11&'
'group=text',
body=open(DIR_FIXTURES + '/testOAC-noimages-in-results-1.xml')
.read())
second_set = oac_fetcher.next()
self.assertEqual(len(second_set), 1)
self.assertEqual(
oac_fetcher._url_current,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=11&'
'group=text')
self.assertRaises(StopIteration, oac_fetcher.next)
class OAC_XML_Fetcher_mixed_contentTestCase(LogOverrideMixin, TestCase):
@httpretty.activate
def testFetchMixedContent(self):
'''This interface gets tricky when image & text data are in the
collection.
My test Mock object will return an xml with 10 images
then with 3 images
then 10 texts
then 1 text then quit
'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.xml').read())
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=1&'
'group=image',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.xml').read())
oac_fetcher = fetcher.OAC_XML_Fetcher(
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
'extra_data',
docsPerPage=10)
first_set = oac_fetcher.next()
self.assertEqual(len(first_set), 10)
self.assertEqual(
oac_fetcher._url_current,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=1&'
'group=image')
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=11&'
'group=image',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.xml').read())
second_set = oac_fetcher.next()
self.assertEqual(len(second_set), 3)
self.assertEqual(
oac_fetcher._url_current,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=11&'
'group=image')
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=1&'
'group=text',
body=open(DIR_FIXTURES + '/testOAC-url_next-2.xml').read())
third_set = oac_fetcher.next()
self.assertEqual(len(third_set), 10)
self.assertEqual(
oac_fetcher._url_current,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=1&'
'group=text')
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=11&'
'group=text',
body=open(DIR_FIXTURES + '/testOAC-url_next-3.xml').read())
fourth_set = oac_fetcher.next()
self.assertEqual(len(fourth_set), 1)
self.assertEqual(
oac_fetcher._url_current,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&docsPerPage=10&startDoc=11&'
'group=text')
self.assertRaises(StopIteration, oac_fetcher.next)
class OAC_JSON_FetcherTestCase(LogOverrideMixin, TestCase):
'''Test the OAC_JSON_Fetcher
'''
@httpretty.activate
def setUp(self):
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.json').read())
super(OAC_JSON_FetcherTestCase, self).setUp()
self.fetcher = fetcher.OAC_JSON_Fetcher(
'http://dsc.cdlib.org/search?rmode=json&facet=type-tab&'
'style=cui&relation=ark:/13030/hb5d5nb7dj', 'extra_data')
def tearDown(self):
super(OAC_JSON_FetcherTestCase, self).tearDown()
def testParseArk(self):
self.assertEqual(
self.fetcher._parse_oac_findaid_ark(self.fetcher.url),
'ark:/13030/hb5d5nb7dj')
@httpretty.activate
def testOAC_JSON_FetcherReturnedData(self):
'''test that the data returned by the OAC Fetcher is a proper dc
dictionary
'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&startDoc=26',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
rec = self.fetcher.next()[0]
self.assertIsInstance(rec, dict)
@httpretty.activate
def testHarvestByRecord(self):
'''Test the older by single record interface'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.json').read())
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&startDoc=26',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
self.testFile = DIR_FIXTURES + '/testOAC-url_next-1.json'
records = []
r = self.fetcher.next_record()
try:
while True:
records.append(r)
r = self.fetcher.next_record()
except StopIteration:
pass
self.assertEqual(len(records), 28)
@httpretty.activate
def testHarvestIsIter(self):
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&startDoc=26',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
self.assertTrue(hasattr(self.fetcher, '__iter__'))
self.assertEqual(self.fetcher, self.fetcher.__iter__())
self.fetcher.next_record()
self.fetcher.next()
@httpretty.activate
def testNextGroupFetch(self):
'''Test that the OAC Fetcher will fetch more records when current
response set records are all consumed'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.json').read())
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&startDoc=26',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
self.testFile = DIR_FIXTURES + '/testOAC-url_next-1.json'
records = []
self.ranGet = False
for r in self.fetcher:
records.extend(r)
self.assertEqual(len(records), 28)
@httpretty.activate
def testObjsetFetch(self):
'''Test fetching data in whole objsets'''
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj',
body=open(DIR_FIXTURES + '/testOAC-url_next-0.json').read())
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/hb5d5nb7dj&startDoc=26',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
self.assertTrue(hasattr(self.fetcher, 'next_objset'))
self.assertTrue(hasattr(self.fetcher.next_objset, '__call__'))
objset = self.fetcher.next_objset()
self.assertIsNotNone(objset)
self.assertIsInstance(objset, list)
self.assertEqual(len(objset), 25)
objset2 = self.fetcher.next_objset()
self.assertTrue(objset != objset2)
self.assertRaises(StopIteration, self.fetcher.next_objset)
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,145 | ucldc/harvester | refs/heads/master | /harvester/couchdb_pager.py | def couchdb_pager(db, view_name='_all_docs',
startkey=None, startkey_docid=None,
endkey=None, endkey_docid=None,
key=None,
bulk=200000, **extra_options):
# Request one extra row to resume the listing there later.
options = {'limit': bulk + 1}
print("EXTRA: {}".format(extra_options))
if extra_options:
options.update(extra_options)
if startkey:
#works with underscore, but should without?
###options['startkey'] = startkey #not working
options['start_key'] = startkey
if startkey_docid:
options['startkey_docid'] = startkey_docid
if endkey:
###options['endkey'] = endkey
options['end_key'] = endkey
if endkey_docid:
options['endkey_docid'] = endkey_docid
if key:
options['key'] = key
done = False
while not done:
print("OPTS:{}".format(options))
view = db.view(view_name, **options)
print("VIEW:{} LEN:{}".format(view, len(view)))
rows = []
# If we got a short result (< limit + 1), we know we are done.
if len(view) <= bulk:
done = True
rows = view.rows
else:
# Otherwise, continue at the new start position.
rows = view.rows[:-1]
last = view.rows[-1]
###options['start_key'] = last.key
options['startkey_docid'] = last.id
for row in rows:
yield row
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,146 | ucldc/harvester | refs/heads/master | /test/test_ucsf_xml_fetcher.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from mypretty import httpretty
# import httpretty
import harvester.fetcher as fetcher
from test.utils import DIR_FIXTURES
from test.utils import LogOverrideMixin
class UCSFXMLFetcherTestCase(LogOverrideMixin, TestCase):
'''Test the fetcher for the UCSF xml search interface'''
@httpretty.activate
def testInit(self):
'''Basic tdd start'''
url = 'https://example.edu/action/search/xml?q=ddu%3A20*&' \
'asf=ddu&asd=&fd=1&_hd=&hd=on&sf=&_rs=&_ef=&ef=on&sd=&ed=&c=ga'
httpretty.register_uri(
httpretty.GET,
url,
body=open(DIR_FIXTURES+'/ucsf-page-1.xml').read())
h = fetcher.UCSF_XML_Fetcher(url, None, page_size=3)
self.assertEqual(h.url_base, url)
self.assertEqual(h.page_size, 3)
self.assertEqual(h.page_current, 1)
self.assertEqual(h.url_current, url+'&ps=3&p=1')
self.assertEqual(h.docs_total, 7)
@httpretty.activate
def testFetch(self):
'''Test the httpretty mocked fetching of documents'''
url = 'https://example.edu/action/search/xml?q=ddu%3A20*&' \
'asf=ddu&asd=&fd=1&_hd=&hd=on&sf=&_rs=&_ef=&ef=on&sd=&ed=&c=ga'
httpretty.register_uri(
httpretty.GET,
url,
responses=[
httpretty.Response(
open(DIR_FIXTURES+'/ucsf-page-1.xml').read(),
status=200),
httpretty.Response(
open(DIR_FIXTURES+'/ucsf-page-1.xml').read(),
status=200),
httpretty.Response(
open(DIR_FIXTURES+'/ucsf-page-2.xml').read(),
status=200),
httpretty.Response(
open(DIR_FIXTURES+'/ucsf-page-3.xml').read(),
status=200),
]
)
h = fetcher.UCSF_XML_Fetcher(url, None, page_size=3)
docs = []
for d in h:
docs.extend(d)
self.assertEqual(len(docs), 7)
testy = docs[0]
self.assertIn('tid', testy)
self.assertEqual(testy['tid'], "nga13j00")
self.assertEqual(testy['uri'],
'http://legacy.library.ucsf.edu/tid/nga13j00')
self.assertIn('aup', testy['metadata'])
self.assertEqual(testy['metadata']['aup'], ['Whent, Peter'])
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,147 | ucldc/harvester | refs/heads/master | /test/test_config.py | import os
from unittest import TestCase
from harvester import config
class QueueListTestCase(TestCase):
'''Check config has queue list static member'''
def testQueueList(self):
self.assertTrue(hasattr(config, 'RQ_Q_LIST'))
class ConfigReturnTestCase(TestCase):
'''Verify config returns expected values from env'''
def setUp(self):
self.os_cached = dict(os.environ)
os.environ['REDIS_HOST'] = 'test_redis_host'
os.environ['REDIS_PORT'] = 'test_redis_port'
os.environ['REDIS_CONNECT_TIMEOUT'] = 'test_redis_timeout'
os.environ['REDIS_PASSWORD'] = 'test_redis_password'
os.environ['COUCHDB_URL'] = 'test_couchdb_url'
os.environ['COUCHDB_USER'] = 'test_couchdb_user'
os.environ['COUCHDB_PASSWORD'] = 'test_couchdb_password'
os.environ['COUCHDB_DB'] = 'test_couchdb_dbname'
os.environ['COUCHDB_DASHBOARD'] = 'test_couchdb_dashname'
def tearDown(self):
del os.environ['REDIS_HOST']
del os.environ['REDIS_PORT']
del os.environ['REDIS_CONNECT_TIMEOUT']
del os.environ['COUCHDB_URL']
del os.environ['COUCHDB_USER']
del os.environ['COUCHDB_PASSWORD']
del os.environ['COUCHDB_DB']
del os.environ['COUCHDB_DASHBOARD']
os.environ = self.os_cached
def testConfigValues(self):
cfg = config.config()
self.assertEqual(cfg['redis_host'], 'test_redis_host')
self.assertEqual(cfg['redis_port'], 'test_redis_port')
self.assertEqual(cfg['redis_connect_timeout'], 'test_redis_timeout')
self.assertEqual(cfg['redis_password'], 'test_redis_password')
self.assertEqual(cfg['couchdb_url'], 'test_couchdb_url')
self.assertEqual(cfg['couchdb_username'], 'test_couchdb_user')
self.assertEqual(cfg['couchdb_password'], 'test_couchdb_password')
self.assertEqual(cfg['couchdb_dbname'], 'test_couchdb_dbname')
self.assertEqual(cfg['couchdb_dashboard'], 'test_couchdb_dashname')
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,148 | ucldc/harvester | refs/heads/master | /rqw-settings.py | REDIS_HOST="{{ redis_host }}"
REDIS_PORT={{ redis_port }}
REDIS_CONNECT_TIMEOUT={{ redis_connect_timeout }}
QUEUES= [ 'high{{ name_suffix }}', 'normal{{ name_suffix }}',
'low{{ name_suffix }}']
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,149 | ucldc/harvester | refs/heads/master | /harvester/harvest_all_registry_collections.py | from harvester.collection_registry_client import ResourceIterator
from harvester.collection_registry_client import url_base, api_path
from harvester.config import config
from harvester.scripts.queue_harvest import main as queue_harvest
for c in ResourceIterator(url_base, api_path+'collection', 'collection'):
if c.harvest_type != 'X':
print c.name, c.slug, c.harvest_type, c.url_harvest
env = config()
queue_harvest('mark.redar@ucop.edu', url_base+c.resource_uri,
redis_host=env['redis_host'],
redis_port=env['redis_port'],
redis_pswd=env['redis_password'],
id_ec2_ingest=env['id_ec2_ingest'],
id_ec2_solr=env['id_ec2_solr_build'],
job_timeout=6000
)
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,150 | ucldc/harvester | refs/heads/master | /harvester/fetcher/solr_fetcher.py | # -*- coding: utf-8 -*-
import solr
import pysolr
from .fetcher import Fetcher
import urlparse
import requests
class SolrFetcher(Fetcher):
def __init__(self, url_harvest, query, **query_params):
super(SolrFetcher, self).__init__(url_harvest, query)
self.solr = solr.Solr(url_harvest) # , debug=True)
self.query = query
self.resp = self.solr.select(self.query)
self.numFound = self.resp.numFound
self.index = 0
def next(self):
if self.index < len(self.resp.results):
self.index += 1
return self.resp.results[self.index - 1]
self.index = 1
self.resp = self.resp.next_batch()
if not len(self.resp.results):
raise StopIteration
return self.resp.results[self.index - 1]
class PySolrFetcher(Fetcher):
def __init__(self,
url_harvest,
query,
handler_path='select',
**query_params):
super(PySolrFetcher, self).__init__(url_harvest, query, **query_params)
self.solr = pysolr.Solr(url_harvest, timeout=1)
self._handler_path = handler_path
self._query_params = {
'q': query,
'wt': 'json',
'sort': 'id asc',
'rows': 100,
'cursorMark': '*'
}
self._query_params.update(query_params)
self._nextCursorMark = '*'
self.get_next_results()
self.numFound = self.results['response'].get('numFound')
self.index = 0
@property
def _query_path(self):
self._query_params_encoded = pysolr.safe_urlencode(self._query_params)
return '{}?{}'.format(self._handler_path, self._query_params_encoded)
def get_next_results(self):
self._query_params['cursorMark'] = self._nextCursorMark
resp = self.solr._send_request('get', path=self._query_path)
self.results = self.solr.decoder.decode(resp)
self._nextCursorMark = self.results.get('nextCursorMark')
self.iter = self.results['response']['docs'].__iter__()
def next(self):
try:
next_result = self.iter.next()
self.index += 1
return next_result
except StopIteration:
if self.index >= self.numFound:
raise StopIteration
self.get_next_results()
if self._nextCursorMark == self._query_params['cursorMark']:
if self.index >= self.numFound:
raise StopIteration
if len(self.results['response']['docs']) == 0:
raise StopIteration
self.index += 1
return self.iter.next()
class PySolrQueryFetcher(PySolrFetcher):
''' Use the `select` url path for querying instead of 'query'. This is
more typical for most Solr applications.
'''
def __init__(self, url_harvest, query, **query_params):
super(PySolrQueryFetcher, self).__init__(
url_harvest, query, handler_path='query', **query_params)
class RequestsSolrFetcher(Fetcher):
'''A fetcher for solr that uses just the requests library
The URL is the URL up to the "select" bit (may change in future)
Extra_data is one of 2 formats:
just a string -- it is the "q" string
URL encoded query string ->
q=<query>&header=<name>:<value>&header=<name>:<value>
The auth parameter will be parsed to figure out type of authentication
needed, right now just deal with "header" token authentication
'''
def __init__(self, url_harvest, extra_data, **kwargs):
super(RequestsSolrFetcher, self).__init__(url_harvest, extra_data,
**kwargs)
# will need to change URLs for existing to add /select in general
# the bampfa has NO /select or /query
self._query_iter_template = \
'?rows={rows}&cursorMark={cursorMark}'
self._query_params = urlparse.parse_qs(extra_data)
if not self._query_params: # Old style, just "q" bit of query
self._query_params = {'q': [extra_data]}
self._page_size = 1000
self._cursorMark = None
self._nextCursorMark = '*'
self._headers = {}
for name, value in self._query_params.items():
if name == 'header':
for value in self._query_params[name]: # its a list
header_name, header_value = value.split(':', 1)
self._headers[header_name] = header_value
del self._query_params[name]
if 'wt' not in self._query_params:
self._query_params.update({'wt': ['json']})
if 'sort' not in self._query_params:
self._query_params.update({'sort': ['id asc']})
@property
def end_of_feed(self):
return self._cursorMark == self._nextCursorMark
@property
def url_request(self):
# build current URL
url_request = ''.join((
self.url,
self._query_iter_template.format(
rows=self._page_size, cursorMark=self._cursorMark), ))
# join 'q' and all other params
for name, value in self._query_params.items():
url_request = ''.join((url_request, '&', name, '=', value[0]))
return url_request
def get_response(self):
'''Get the correct response for the given combo of params'''
return requests.get(self.url_request, headers=self._headers)
def next(self):
'''get the next page of solr data, using the cursor mark to build
URL
'''
if (self.end_of_feed):
raise StopIteration
# get resp
self._cursorMark = self._nextCursorMark
resp = self.get_response()
resp.raise_for_status()
resp_obj = resp.json()
self._nextCursorMark = resp_obj['nextCursorMark']
return resp_obj['response']['docs']
# Copyright © 2017, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,151 | ucldc/harvester | refs/heads/master | /harvester/sync_couch_to_solr.py | '''Push contents of couchdb to solr index'''
import os
import sys
import urllib
import requests
import solr
import couchdb
from solr import SolrException
from solr_updater import map_couch_to_solr_doc, push_doc_to_solr
from harvester.couchdb_pager import couchdb_pager
from harvester.couchdb_init import get_couchdb
URL_SOLR = os.environ.get('URL_SOLR', None)
URL_COUCHDB = os.environ.get('URL_COUCHDB', 'http://localhost:5984')
COUCHDB_DB = os.environ.get('COUCHDB_DB', 'ucldc')
def main(url_solr=URL_SOLR, url_couchdb=None, couchdb_db=None):
solr_db = solr.Solr(url_solr)
db = get_couchdb(url=url_couchdb, dbname=couchdb_db)
v = couchdb_pager(db, include_docs='true')
# update or create new solr doc for each couchdb doc
for r in v:
doc_couch = r.doc
if '_design' not in doc_couch['_id']:
try:
if not isinstance(doc_couch['originalRecord']['collection'], list):
doc_couch['originalRecord']['collection'] = [
doc_couch['originalRecord']['collection'],
]
print("orgRec.Collection: {}".format(doc_couch['sourceResource']['collection']))
except KeyError:
pass
try:
if not isinstance(doc_couch['sourceResource']['collection'], list):
doc_couch['sourceResource']['collection'] = [
doc_couch['sourceResource']['collection'],
]
print("srcRes.Collection: {}".format(doc_couch['sourceResource']['subject']))
except KeyError:
pass
try:
subject = doc_couch['sourceResource'].get('subject', None)
if not isinstance(subject, list):
subject = [subject]
subjects_norm = []
for sub in subject:
if not isinstance(sub, dict):
subjects_norm.append({'name': sub})
else:
subjects_norm.append(sub)
doc_couch['sourceResource']['subject'] = subjects_norm
except KeyError:
pass
db.save(doc_couch)
try:
doc_solr = push_doc_to_solr(map_couch_to_solr_doc(doc_couch),
solr_db=solr_db)
print("PUSHED {} to solr".format(doc_couch['_id']))
except TypeError:
pass
solr_db.commit()
if __name__ == '__main__':
main()
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,152 | ucldc/harvester | refs/heads/master | /test/test_xml_fetcher.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from mypretty import httpretty
# import httpretty
import harvester.fetcher as fetcher
from test.utils import DIR_FIXTURES
from test.utils import LogOverrideMixin
class XMLFetcherTestCase(LogOverrideMixin, TestCase):
'''Test the fetcher for the generic xml search interface'''
@httpretty.activate
def testInit(self):
'''Basic tdd start'''
url = 'https://s3.amazonaws.com/pastperfectonline/xmlfiles/museum_231'
httpretty.register_uri(
httpretty.GET,
url,
body=open(DIR_FIXTURES + '/xml-fetch.xml').read())
h = fetcher.XML_Fetcher(url, None)
self.assertEqual(h.url_base, url)
docs = []
d = h.next()
self.assertEqual(len(d), 999)
docs.extend(d)
for d in h:
docs.extend(d)
self.assertEqual(len(docs), 2320)
test1 = docs[0]
test2 = docs[2]
self.assertIn('title', test1['metadata'])
self.assertEqual(test1['metadata']['title'], [
'California desperadoes : stories of early California outlaws in their own word'
])
# test that attributes are captured, even from empty elements
self.assertEqual(test1['metadata']['q'], ['taken'])
self.assertEqual(test1['metadata']['d'], ['Kodak'])
self.assertEqual(test2['metadata']['q'], ['scanned'])
self.assertEqual(test2['metadata']['d'], ['Epson'])
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,153 | ucldc/harvester | refs/heads/master | /test/test_dedupe_sourceresource.py | from unittest import TestCase
import json
from test.utils import DIR_FIXTURES
from harvester.post_processing import dedupe_sourceresource
class DeduperTestCase(TestCase):
def setUp(self):
self.path_to_test_doc = DIR_FIXTURES+'/couchdb_doc_with_dups.json'
# reference to check against, as doc will be modified by func
self.reference_doc = json.loads(
open(self.path_to_test_doc).read())
def test_dedupe(self):
''' Test that the de-duplication function returns a dedupe'd
document
'''
doc_with_dups = json.loads(
open(self.path_to_test_doc).read())
self.assertEqual(len(doc_with_dups['sourceResource']['relation']), 11)
self.assertEqual(len(doc_with_dups['sourceResource']['subject']), 7)
new_doc = dedupe_sourceresource.dedupe_sourceresource(doc_with_dups)
# make sure other stuff didn't change
self.assertEqual(self.reference_doc['originalRecord'],
new_doc['originalRecord'])
self.assertEqual(self.reference_doc['_id'],
new_doc['_id'])
self.assertEqual(self.reference_doc['id'],
new_doc['id'])
self.assertEqual(self.reference_doc['object'],
new_doc['object'])
self.assertEqual(self.reference_doc['isShownAt'],
new_doc['isShownAt'])
self.assertNotEqual(self.reference_doc['sourceResource'],
new_doc['sourceResource'])
self.assertEqual(len(new_doc['sourceResource']['relation']), 6)
self.assertEqual(new_doc['sourceResource']['relation'],
[u'http://www.oac.cdlib.org/findaid/ark:/13030/ft6k4007pc',
u'http://bancroft.berkeley.edu/collections/jarda.html',
u'hb158005k9',
u'BANC PIC 1986.059--PIC',
u'http://calisphere.universityofcalifornia.edu/',
u'http://bancroft.berkeley.edu/']
)
self.assertEqual(len(new_doc['sourceResource']['subject']), 2)
self.assertEqual(new_doc['sourceResource']['subject'],
[{u'name': u'Yoshiko Uchida photograph collection'},
{u'name': u'Japanese American Relocation Digital Archive'}]
)
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,154 | ucldc/harvester | refs/heads/master | /scripts/rq-requeue-registry-fails.py | from rq import Queue, requeue_job
from rq.queue import FailedQueue
from redis import Redis
import os
conn_redis = Redis(host=os.environ['REDIS_HOST'],
password=os.environ['REDIS_PASSWORD'])
qfailed=FailedQueue(connection=conn_redis)
fail_registry_api = []
fail_no_xtf_results = []
fail_timeout = []
fail_other = []
###for job in Queue(connection=conn_redis).jobs:
### job.timeout = 2*job.timeout
### job.save()
### print job, job.timeout
for job in qfailed.jobs:
"ConnectionError: HTTPSConnectionPool(host='registry.cdlib.org'"
if "HTTPSConnectionPool(host='registry.cdlib.org'" in job.exc_info:
fail_registry_api.append(job)
elif "ValueError: http://dsc.cdlib.org/search" in job.exc_info:
fail_no_xtf_results.append(job)
elif "Job exceeded maximum timeout value" in job.exc_info:
fail_timeout.append(job)
else:
fail_other.append(job)
print(80*'=')
print('Registry connection fails:{0}, XTF No results:{1}, Timeout:{2} Other:{3}'.format(
len(fail_registry_api),
len(fail_no_xtf_results),
len(fail_timeout),
len(fail_other)
)
)
print(80*'=')
print('\n\n')
#for job in fail_other:
# print job.exc_info
for job in fail_no_xtf_results:
print('ValueError job: {}\n\n'.format(job))#, job.exc_info))
#job.cancel()
for job in fail_other:
try:
if "26094" in job.args[1]:
print('LAPL MARC exc_info:{}'.format(job.exc_info))
except IndexError:
pass
for job in fail_registry_api:
print('Job to requeue:{}'.format(job))
#requeue_job(job.get_id(), connection=conn_redis)
for job in fail_timeout:
print('TIMEOUT Before:{} {}'.format(job.timeout, job))
job.timeout = 2*job.timeout
job.save()
print('TIMEOUT after:{} {}'.format(job.timeout, job))
#requeue_job(job.get_id(), connection=conn_redis)
n_run_ingest = n_img_harv = n_no_shown_by = 0
for job in fail_other:
if '__getitem__' in job.exc_info:
n_no_shown_by += 1
print('{} {}'.format(job, job.exc_info))
job.cancel()
if 'Image_harvest' in job.get_call_string():
n_img_harv += 1
#print('{} {}'.format(job, job.exc_info))
if 'run_ingest' in job.get_call_string():
n_run_ingest += 1
job.timeout = 2*job.timeout
job.save()
#print('REQUEUE: {}\nTIMEOUT: {}'.format(job, job.timeout))
#requeue_job(job.get_id(), connection=conn_redis)
else:
print(job.get_call_string())
print('RIngest:{} IMG:{} NOSHOWNBY:{}'.format(n_run_ingest, n_img_harv, n_no_shown_by))
#print('id {} get_call_string {}, args {}'.format(job.get_id(), job.get_call_string(), job.args))
dir_job_listing= '''
['__class__', '__delattr__', '__dict__', '__doc__', '__eq__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_args', '_data', '_dependency_id', '_func_name', '_get_status', '_id', '_instance', '_kwargs', '_result', '_set_status', '_status', '_unpickle_data', 'args', 'cancel', 'cleanup', 'connection', 'create', 'created_at', 'data', 'delete', 'dependency', 'dependents_key', 'dependents_key_for', 'description', 'dump', 'ended_at', 'enqueued_at', 'exc_info', 'exists', 'fetch', 'func', 'func_name', 'get_call_string', 'get_id', 'get_status', 'get_ttl', 'id', 'instance', 'is_failed', 'is_finished', 'is_queued', 'is_started', 'key', 'key_for', 'kwargs', 'meta', 'origin', 'perform', 'refresh', 'register_dependency', 'result', 'result_ttl', 'return_value', 'save', 'set_id', 'set_status', 'status', 'timeout']
'''
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,155 | ucldc/harvester | refs/heads/master | /test/test_integration_tests.py | from unittest import TestCase
from test.utils import skipUnlessIntegrationTest
from test.utils import ConfigFileOverrideMixin
from harvester.fetcher import get_log_file_path
@skipUnlessIntegrationTest()
class CouchIntegrationTestCase(ConfigFileOverrideMixin, TestCase):
def setUp(self):
super(CouchIntegrationTestCase, self).setUp()
self.collection = Collection('fixtures/collection_api_test.json')
config_file, profile_path = self.setUp_config(self.collection)
self.controller_oai = fetcher.HarvestController('email@example.com', self.collection, profile_path=profile_path, config_file=config_file)
self.remove_log_dir = False
if not os.path.isdir('logs'):
os.makedirs('logs')
self.remove_log_dir = True
def tearDown(self):
super(CouchIntegrationTestCase, self).tearDown()
if self.remove_log_dir:
shutil.rmtree('logs')
def testCouchDocIntegration(self):
'''Test the couch document creation in a test environment'''
self.ingest_doc_id = self.controller_oai.create_ingest_doc()
self.controller_oai.update_ingest_doc('error', error_msg='This is an error')
@skipUnlessIntegrationTest()
class HarvesterLogSetupTestCase(TestCase):
'''Test that the log gets setup and run'''
def testLogDirExists(self):
log_file_path = fetcher.get_log_file_path('x')
log_file_dir = log_file_path.rsplit('/', 1)[0]
self.assertTrue(os.path.isdir(log_file_dir))
@skipUnlessIntegrationTest()
class MainMailIntegrationTestCase(TestCase):
'''Test that the main function emails?'''
def setUp(self):
'''Need to run fakesmtp server on local host'''
sys.argv = ['thisexe', 'email@example.com', 'https://xregistry-dev.cdlib.org/api/v1/collection/197/']
def testMainFunctionMail(self):
'''This should error out and send mail through error handler'''
self.assertRaises(requests.exceptions.ConnectionError, fetcher.main, 'email@example.com', 'https://xregistry-dev.cdlib.org/api/v1/collection/197/')
@skipUnlessIntegrationTest()
class ScriptFileTestCase(TestCase):
'''Test that the script file exists and is executable. Check that it
starts the correct proecss
'''
def testScriptFileExists(self):
'''Test that the ScriptFile exists'''
path_script = os.environ.get('HARVEST_SCRIPT', os.path.join(os.environ['HOME'], 'code/harvester/start_harvest.bash'))
self.assertTrue(os.path.exists(path_script))
@skipUnlessIntegrationTest()
class FullOACHarvestTestCase(ConfigFileOverrideMixin, TestCase):
def setUp(self):
self.collection = Collection('http://localhost:8000/api/v1/collection/200/')
self.setUp_config(self.collection)
def tearDown(self):
self.tearDown_config()
def testFullOACHarvest(self):
self.assertIsNotNone(self.collection)
self.controller = fetcher.HarvestController('email@example.com',
self.collection,
config_file=self.config_file,
profile_path=self.profile_path
)
n = self.controller.harvest()
self.assertEqual(n, 26)
@skipUnlessIntegrationTest()
class FullOAIHarvestTestCase(ConfigFileOverrideMixin, TestCase):
def setUp(self):
self.collection = Collection('http://localhost:8000/api/v1/collection/197/')
self.setUp_config(self.collection)
def tearDown(self):
self.tearDown_config()
shutil.rmtree(self.controller.dir_save)
def testFullOAIHarvest(self):
self.assertIsNotNone(self.collection)
self.controller = fetcher.HarvestController('email@example.com',
self.collection,
config_file=self.config_file,
profile_path=self.profile_path
)
n = self.controller.harvest()
self.assertEqual(n, 128)
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,156 | ucldc/harvester | refs/heads/master | /harvester/couchdb_sync_db_by_collection.py | '''This is a script and class? to sync 2 couchdb databases by a registry
collection.
It uses the view all_provider_docs/_view/by_provider_name_wdoc with the key
given by the id of the collection.
It checks that the collection has "ready_for_publication" set before syncing.
It then syncs to the harvesting environments default couchdb instance by just
adding the documents from the source.
'''
from os import environ
import sys
from harvester.collection_registry_client import Collection
from harvester.couchdb_init import get_couchdb
from harvester.post_processing.couchdb_runner import CouchDBCollectionFilter
from harvester.post_processing.couchdb_runner import get_collection_doc_ids
from harvester.sns_message import publish_to_harvesting
from harvester.sns_message import format_results_subject
COUCHDB_VIEW_COLL_IDS = 'all_provider_docs/by_provider_name'
# use couchdb_init to get environment couchdb
def delete_id_list(ids, _couchdb=None):
'''For a list of couchdb ids & given couchdb, delete the docs'''
deleted = []
num_deleted = 0
for did in ids:
doc = _couchdb.get(did)
if not doc:
continue
_couchdb.delete(doc)
deleted.append(did)
print >> sys.stderr, "DELETED: {0}".format(did)
num_deleted += 1
return num_deleted, deleted
def delete_collection(cid):
print >> sys.stderr, "DELETING COLLECTION: {}".format(cid)
_couchdb = get_couchdb()
rows = CouchDBCollectionFilter(collection_key=cid, couchdb_obj=_couchdb)
ids = [row['id'] for row in rows]
num_deleted, deleted_docs = delete_id_list(ids, _couchdb=_couchdb)
subject = format_results_subject(cid,
'Deleted documents from CouchDB {env} ')
publish_to_harvesting(
subject,
'Deleted {} documents from CouchDB collection CID: {}'.format(
num_deleted,
cid))
return num_deleted, deleted_docs
def collection_ready_for_publication(url_api_collection):
'''Check if the collection has "ready_for_publication" currently
checked
'''
collection = Collection(url_api_collection)
return collection.ready_for_publication
def update_from_remote(doc_id,
url_remote_couchdb=None,
couchdb_remote=None,
couchdb_env=None):
'''Update the environment's couchdb from a remote couchdb document
'''
msg = None
if not couchdb_remote:
couchdb_remote = get_couchdb(url_remote_couchdb)
if not couchdb_env:
couchdb_env = get_couchdb()
doc = couchdb_remote.get(doc_id)
# need to remove the revision data, as will be different
del doc['_rev']
# if doc exists, need to update metadata for the existing document
# and then save that, due to revision number in couch
doc_in_target = couchdb_env.get(doc_id)
if doc_in_target:
doc_in_target.update(doc)
couchdb_env[doc_id] = doc_in_target
msg = "updated {}".format(doc_id)
else:
doc_no_rev = doc.copy()
couchdb_env[doc_id] = doc_no_rev
msg = "created {}".format(doc_id)
print >> sys.stderr, msg
return msg
def queue_update_from_remote(queue,
url_api_collection,
url_couchdb_source=None):
'''for this environment, put a couchdb doc id on another environments
queue. This environment's couchdb url becomes the remote in the call to
update_from_remote
This can be overridden to run from target environment
'''
pass
def update_collection_from_remote(url_remote_couchdb,
url_api_collection,
delete_first=True):
'''Update a collection from a remote couchdb.
'''
if delete_first:
delete_collection(url_api_collection.rsplit('/', 2)[1])
collection = Collection(url_api_collection)
# guard against updating production for not ready_for_publication
# collections
if 'prod' in environ.get('DATA_BRANCH', ''):
if not collection.ready_for_publication:
raise Exception(
'In PRODUCTION ENV and collection {} not ready for '
'publication'.format(collection.id))
doc_ids = get_collection_doc_ids(collection.id, url_remote_couchdb)
couchdb_remote = get_couchdb(url_remote_couchdb)
couchdb_env = get_couchdb()
created = 0
updated = 0
for doc_id in doc_ids:
msg = update_from_remote(
doc_id, couchdb_remote=couchdb_remote, couchdb_env=couchdb_env)
if 'created' in msg:
created += 1
else:
updated += 1
return len(doc_ids), updated, created
def main(url_remote_couchdb, url_api_collection):
'''Update to the current environment's couchdb a remote couchdb collection
'''
collection = Collection(url_api_collection)
total, updated, created = update_collection_from_remote(
url_remote_couchdb, url_api_collection)
msg = 'Synced {} documents to production for CouchDB collection {}'.format(
total,
collection.id)
msg += '\nUpdated {} documents, created {} documents.'.format(
updated,
created)
publish_to_harvesting(
'Synced CouchDB Collection {}'.format(collection.id),
msg)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Update current env couchdb from a remote couchdb for \
given collection')
parser.add_argument(
'url_remote_couchdb', help='URL to the remote (source) couchdb')
parser.add_argument(
'url_api_collection', help='Registry api endpoint for the collection')
args = parser.parse_args(sys.argv[1:])
main(args.url_remote_couchdb, args.url_api_collection)
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,157 | ucldc/harvester | refs/heads/master | /harvester/fetcher/__init__.py | from .fetcher import Fetcher
from .fetcher import NoRecordsFetchedException
from .oai_fetcher import OAIFetcher
from .solr_fetcher import SolrFetcher
from .solr_fetcher import PySolrFetcher
from .solr_fetcher import PySolrQueryFetcher
from .solr_fetcher import RequestsSolrFetcher
from .marc_fetcher import MARCFetcher
from .marc_fetcher import AlephMARCXMLFetcher
from .nuxeo_fetcher import NuxeoFetcher
from .nuxeo_fetcher import UCLDCNuxeoFetcher
from .oac_fetcher import OAC_XML_Fetcher
from .oac_fetcher import OAC_JSON_Fetcher
from .ucsf_xml_fetcher import UCSF_XML_Fetcher
from .cmis_atom_feed_fetcher import CMISAtomFeedFetcher
from .flickr_fetcher import Flickr_Fetcher
from .youtube_fetcher import YouTube_Fetcher
from .xml_fetcher import XML_Fetcher
from .ucd_json_fetcher import UCD_JSON_Fetcher
from .emuseum_fetcher import eMuseum_Fetcher
from .ia_fetcher import IA_Fetcher
from .preservica_api_fetcher import PreservicaFetcher
from .controller import HARVEST_TYPES
from .controller import HarvestController
from .controller import get_log_file_path
from .controller import main
from .controller import EMAIL_RETURN_ADDRESS
__all__ = (
Fetcher,
NoRecordsFetchedException,
HARVEST_TYPES,
OAIFetcher,
SolrFetcher,
PySolrFetcher,
MARCFetcher,
AlephMARCXMLFetcher,
NuxeoFetcher,
UCLDCNuxeoFetcher,
OAC_XML_Fetcher,
OAC_JSON_Fetcher,
UCSF_XML_Fetcher,
CMISAtomFeedFetcher,
HarvestController,
XML_Fetcher,
eMuseum_Fetcher,
IA_Fetcher,
PreservicaFetcher,
UCD_JSON_Fetcher,
PySolrQueryFetcher,
Flickr_Fetcher,
YouTube_Fetcher,
RequestsSolrFetcher,
EMAIL_RETURN_ADDRESS,
get_log_file_path,
main
)
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,158 | ucldc/harvester | refs/heads/master | /test/test_emuseum_fetcher.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from mypretty import httpretty
# import httpretty
import harvester.fetcher as fetcher
from test.utils import DIR_FIXTURES
from test.utils import LogOverrideMixin
class eMuseumFetcherTestCase(LogOverrideMixin, TestCase):
'''Test the fetcher for eMuseum API interface'''
@httpretty.activate
def testFetch(self):
httpretty.register_uri(
httpretty.GET,
'http://digitalcollections.hoover.org/search/*/objects/xml?filter=approved:true&page=1',
responses=[
httpretty.Response(
body=open(DIR_FIXTURES + '/eMuseum-page-1.xml').read()),
httpretty.Response(
body=open(DIR_FIXTURES + '/eMuseum-page-2.xml').read()),
httpretty.Response(
body=open(DIR_FIXTURES + '/eMuseum-page-3.xml').read()),
])
url = 'http://digitalcollections.hoover.org'
h = fetcher.eMuseum_Fetcher(url, None)
self.assertEqual(h.url_base, url)
docs = []
d = h.next()
docs.extend(d)
for d in h:
docs.extend(d)
self.assertEqual(len(docs), 24)
test1 = docs[12]
self.assertIn('title', test1)
self.assertEqual(test1['title']['text'],
'Money is power. A war savings certificate in every Canadian home. Get yours now at post offices or banks.')
self.assertIn('unknown2', test1)
self.assertIn('text2', test1['primaryMaker'])
self.assertNotIn('attrib', test1['unknown1'])
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,159 | ucldc/harvester | refs/heads/master | /scripts/fix_11747_isShownBy.py | import sys
from harvester.post_processing.run_transform_on_couchdb_docs import run_on_couchdb_by_collection
def get_best_oac_image(doc):
'''From the list of images, choose the largest one'''
best_image = None
if doc.has_key('originalRecord'): # guard weird input
x = 0
thumb = doc['originalRecord'].get('thumbnail', None)
if thumb:
if 'src' in thumb:
x = thumb['X']
best_image = thumb['src']
ref_images = doc['originalRecord'].get('reference-image', [])
if type(ref_images) == dict:
ref_images = [ref_images]
for obj in ref_images:
if int(obj['X']) > x:
x = int(obj['X'])
best_image = obj['src']
if best_image and not best_image.startswith('http'):
best_image = '/'.join((URL_OAC_CONTENT_BASE, best_image))
return best_image
url_content_base = 'http://content.cdlib.org/'
def fix_isShownBy_11747(doc):
doc_ark = doc['isShownAt'].split('ark:')[1]
doc_ark = 'ark:' + doc_ark
doc['originalRecord']['thumbnail']['src'] = ''.join((url_content_base,
doc_ark, '/thumbnail'))
best_image = get_best_oac_image(doc)
doc['isShownBy'] = best_image
print "DOC: {} shownBy:{}".format(doc['_id'], doc['isShownBy'])
return doc
run_on_couchdb_by_collection(fix_isShownBy_11747, collection_key="11747")
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,160 | ucldc/harvester | refs/heads/master | /scripts/external-redirect-get-solr_prod-id.py | #! /bin/env python
# -*- coding: utf-8 -*-
# Use when Calisphere Object URLs for a collection change, to generate
# a redirect file mapping 'old' (on SOLR-PROD) to 'new' (on SOLR-TEST) URLs.
#
# This script takes a Collection ID and a 'match' field in SOLR (i.e. best
# field to use for matching SOLR-PROD record to corresponding SOLR-TEST)
# and generates a JSON file containing the SOLR-PROD Solr ID value and
# 'match' field value for each object in given Collection, to use as input
# for external-redirect-generate-URL-redirect-map.py
import os
import argparse
import json
import requests
import solr
# to get rid of ssl key warning
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
URL_REGISTRY_API='https://registry.cdlib.org/api/v1/collection/'
SOLR_URL='https://solr.calisphere.org/solr/'
SOLR_API_KEY = os.environ.get('SOLR_API_KEY', '')
def get_solr_id(cid, matchVal, url_solr=SOLR_URL, api_key=SOLR_API_KEY):
solr_auth = { 'X-Authentication-Token': api_key } if api_key else None
url_collection = URL_REGISTRY_API + cid + '/'
query = { 'q': 'collection_url:{}'.format(url_collection), 'rows':1000000, 'fl': 'id,{}'.format(matchVal)}
solr_endpoint = url_solr + 'query'
print "Getting ids from : {}\n{}".format(solr_endpoint, query)
resp_obj = requests.get(solr_endpoint,
headers=solr_auth,
params=query,
verify=False)
results = resp_obj.json()
if results:
with open('prod-URLs-{}.json'.format(cid), 'w') as foo:
foo.write(json.dumps(results, sort_keys=True, indent=4))
def main(cid, matchVal):
get_solr_id(cid, matchVal)
if __name__=='__main__':
parser = argparse.ArgumentParser('This script takes a Collection ID ' \
'and a "match" field in SOLR (i.e. best field to use for matching SOLR-PROD ' \
'record to corresponding SOLR-TEST) and generates a JSON file containing ' \
'the SOLR-PROD Solr ID value and "match" field value for each object in ' \
'given Collection, to use as input for ' \
'external-redirect-generate-redirect-map.py' \
'\nUsage: external-redirect-get-solr_prod-id.py [Collection ID] [match field]' )
parser.add_argument('cid')
parser.add_argument('matchVal')
argv = parser.parse_args()
if not argv.cid:
raise Exception(
"Please include valid Registry Collection ID")
if not argv.matchVal:
raise Exception(
"Please include valid SOLR metadata match field")
print "CID: {} MATCH FIELD: {}".format(
argv.cid,
argv.matchVal)
main(argv.cid, argv.matchVal)
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,161 | ucldc/harvester | refs/heads/master | /scripts/sync_solr_documents.py | #! /bin/env python
# -*- coding: utf-8 -*-
# We've had a couple of cases where the pre-prodution index has had a
# collection deleted for re-harvesting but the re-harvest has not been
# successful and we want to publish a new image.
# This script will take the documents from one solr index and push them to
# another solr index
import os
import argparse
import json
import requests
# to get rid of ssl key warning
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
URL_SOLR_API='https://solr.calisphere.org/solr/'
URL_SOLR=None
URL_REGISTRY_API='https://registry.cdlib.org/api/v1/collection/'
def get_ids_for_collection(url_collection, url_solr=URL_SOLR_API,
api_key=None):
'''Return solr IDs for a given collection.'''
solr_auth = { 'X-Authentication-Token': api_key } if api_key else None
query = { 'q': 'collection_url:{}'.format(url_collection), 'rows':100000,
'fl': 'id'}
solr_endpoint = url_solr + 'query'
print "Getting ids from : {}\n{}".format(solr_endpoint, query)
resp_obj = requests.get(solr_endpoint,
headers=solr_auth,
params=query,
verify=False)
print resp_obj
return [ d['id'] for d in resp_obj.json()['response']['docs']]
def get_solr_doc(sid, url_solr, api_key):
solr_auth = { 'X-Authentication-Token': api_key } if api_key else None
query = { 'q': 'id:"{}"'.format(sid) }
resp_obj = json.loads(requests.get(url_solr+'query',
headers=solr_auth,
params=query,
verify=False).content)
# need to filter out _version_
doc = resp_obj['response']['docs'][0]
del doc['_version_']
for k in doc.keys():
if '_ss' in k:
del doc[k]
return doc
def add_doc(doc, solr_endpoint):
resp = requests.post(solr_endpoint,
headers={'Content-Type': 'application/json'},
data=json.dumps(doc),
verify=False)
if resp.status_code == 200:
print 'synced {}'.format(doc['id'])
else:
print 'failed {} : {}'.format(doc['id'], resp.status_code)
return True if resp.status_code == 200 else False
def get_update_endpoint(dest_solr):
return dest_solr + '/update/json/docs'
def sync_id_list(ids, source_solr=None, update_endpoint=None,
source_api_key=None):
n_success = n_failed = 0
for sid in ids:
source_doc = get_solr_doc(sid, url_solr=source_solr,
api_key=source_api_key)
added = add_doc(source_doc, update_endpoint)
if added:
n_success += 1
else:
n_failed += 1
# commit to index
requests.get(dest_solr+'/update?commit=true')
return n_success, n_failed
def sync_collection(url_collection, source_solr=None, update_endpoint=None,
source_api_key=None):#, dest_api_key=None):
ids = get_ids_for_collection(url_collection, source_solr, source_api_key)
print "Syncing {} records".format(len(ids))
return sync_id_list(
ids,
source_solr=source_solr,
update_endpoint=update_endpoint,
source_api_key=source_api_key)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('collection_id')
parser.add_argument('-y', action='store_true',
help="-y to answer yes to input")
argv = parser.parse_args()
print "Y? {}".format(argv.y)
url_collection = URL_REGISTRY_API + argv.collection_id + '/'
source_solr = os.environ.get('URL_SOLR_API', URL_SOLR_API)
source_api_key = os.environ.get('SOLR_API_KEY', '')
dest_solr = os.environ.get('URL_SOLR', None)
print "CID:{} SOLR SOURCE: {} SOLR DESTINATION: {}".format(
argv.collection_id,
source_solr,
dest_solr)
solr_endpoint = get_update_endpoint(dest_solr)
print "DESTINANTION ENDPOINT: {}".format(solr_endpoint)
if not argv.y:
raw_input('Press any key to continue')
success, failed = sync_collection(url_collection, source_solr=source_solr,
update_endpoint=solr_endpoint,
source_api_key=source_api_key)
print "{} updated successfully. {} failed".format(success, failed)
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,162 | ucldc/harvester | refs/heads/master | /harvester/solr_updater.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import argparse
import re
import hashlib
import json
from collections import defaultdict
from urlparse import urlparse
import requests
import boto3
from botocore.exceptions import ClientError
from solr import Solr, SolrException
from harvester.couchdb_init import get_couchdb
from harvester.post_processing.couchdb_runner import CouchDBCollectionFilter
from harvester.sns_message import publish_to_harvesting
from harvester.sns_message import format_results_subject
from facet_decade import facet_decade
from mediajson import MediaJson
import datetime
reload(sys)
sys.setdefaultencoding('utf8')
S3_BUCKET = 'solr.ucldc'
RE_ARK_FINDER = re.compile('(ark:/\d\d\d\d\d/[^/|\s]*)')
RE_ALPHANUMSPACE = re.compile(r'[^0-9A-Za-z\s]*') # \W include "_" as does A-z
def dict_for_data_field(field_src, data, field_dest):
'''For a given field_src in the data, create a dictionary to
update the field_dest with.
If no values, make the dict {}, this will avoid empty data values
'''
ddict = {}
items_not_blank = []
items = data.get(field_src)
if items:
items = dejson(field_src, items) # this handles list, scalar
# remove blanks
if isinstance(items, basestring):
if items:
items_not_blank = items
else:
for i in items:
if i:
items_not_blank.append(i)
if items_not_blank:
ddict = {field_dest: items_not_blank}
return ddict
def dict_for_data_to_fields(field_src, data, field_dests):
'''Copy sourceResource field to 2 or more solr doc fields'''
data_dict = {}
for field in field_dests:
data_dict.update(dict_for_data_field(field_src, data, field))
return data_dict
COUCHDOC_TO_SOLR_MAPPING = {
'_id': lambda d: {'harvest_id_s': d['_id']},
'object': lambda d: {'reference_image_md5': d['object']},
'object_dimensions': lambda d: {'reference_image_dimensions':
'{0}:{1}'.format(
d['object_dimensions'][0],
d['object_dimensions'][1])},
'isShownAt': lambda d: {'url_item': d['isShownAt']},
# NOTE: if no item_count field, this will be omitted from solr doc
'item_count': lambda d: {'item_count': d.get('item_count', 0)},
}
# So no "coverage" has been in the sourceResource, it's always mapped to
# spatial. With QDC we have a better fidelity.
# for the interim, spatial needs to map to coverage & spatial.
# Will this wind up wiping out any sourceResource coverage values?
COUCHDOC_SRC_RESOURCE_TO_SOLR_MAPPING = {
'alternativeTitle': lambda d: dict_for_data_field('alternativeTitle', d,
'alternative_title'),
'contributor': lambda d: dict_for_data_field('contributor', d,
'contributor'),
'coverage': lambda d: dict_for_data_field('coverage', d, 'coverage'),
'spatial': lambda d: dict_for_data_to_fields('spatial', d, ('spatial',
'coverage')),
'creator': lambda d: dict_for_data_field('creator', d, 'creator'),
'date': lambda d: map_date(d),
'description': lambda d: dict_for_data_field('description', d,
'description'),
'extent': lambda d: dict_for_data_field('extent', d, 'extent'),
'format': lambda d: dict_for_data_field('format', d, 'format'),
'genre': lambda d: dict_for_data_field('genre', d, 'genre'),
'identifier': lambda d: dict_for_data_field('identifier', d, 'identifier'),
'language': lambda d: {
'language': [
l.get('name', l.get('iso639_3', None))
if isinstance(l, dict) else l for l in d['language']]},
'publisher': lambda d: dict_for_data_field('publisher', d, 'publisher'),
'relation': lambda d: dict_for_data_field('relation', d, 'relation'),
'rights': lambda d: dict_for_data_field('rights', d, 'rights'),
'rightsURI': lambda d: dict_for_data_field('rightsURI', d, 'rights_uri'),
'subject': lambda d: {'subject': [s['name']
if isinstance(s, dict)
else dejson('subject', s)
for s in d['subject']]},
'temporal': lambda d: {'temporal': unpack_date(d.get('temporal',
None))[0]},
'title': lambda d: dict_for_data_field('title', d, 'title'),
'type': lambda d: dict_for_data_field('type', d, 'type'),
'provenance': lambda d: dict_for_data_field('provenance', d, 'provenance'),
}
COUCHDOC_ORIGINAL_RECORD_TO_SOLR_MAPPING = {
# 'location': lambda d: {'location': d.get('location', None)},
'dateCopyrighted':
lambda d: dict_for_data_field('dateCopyrighted', d, 'rights_date'),
'rightsHolder':
lambda d: dict_for_data_field('rightsHolder', d, 'rights_holder'),
'rightsNote':
lambda d: dict_for_data_field('rightsNote', d, 'rights_note'),
'source': lambda d: dict_for_data_field('source', d, 'source'),
'structmap_text':
lambda d: dict_for_data_field('structmap_text', d, 'structmap_text'),
'structmap_url':
lambda d: dict_for_data_field('structmap_url', d, 'structmap_url'),
'transcription':
lambda d: dict_for_data_field('transcription', d, 'transcription'),
# UCLDC/DC metadata: use schema prefix & d['properties']
'ucldc_schema:physlocation':
lambda d: dict_for_data_field('ucldc_schema:physlocation', d['properties'], 'location'),
}
def getjobj(data):
jobj = None
try:
jobj = json.loads(data)
except ValueError:
pass
return jobj
def unpack_if_json(field, data):
'''If data is a valid json object, attempt to flatten data to a string.
All the json data at this point should be a scalar or a dict
In general if there is a field 'name' that is the data
'''
flatdata = data
j = getjobj(data)
if j:
try:
flatdata = j.get('name', flatdata)
except AttributeError: # not a dict
pass
return flatdata
def dejson(field, data):
'''de-jsonfy the data.
For valid json strings, unpack in sensible way?
'''
dejson_data = data
if not dejson_data:
return dejson_data
if isinstance(data, list):
dejson_data = []
for d in data:
flatdata = dejson(field, d)
dejson_data.append(flatdata)
elif isinstance(data, dict):
# already parsed json?
flatdata = data.get('item', data.get('name', data.get('text', None)))
if flatdata:
dejson_data = flatdata
else:
dejson_data = unpack_if_json(field, data)
return dejson_data
class UTCtz(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=0)
def tzname(self, dt):
return 'GMT'
def dst(self, dt):
return datetime.timedelta(hours=0)
UTC = UTCtz()
def make_datetime(dstring):
'''make a datetime from a valid date string
Right now formats are YYYY or YYYY-MM-DD
'''
dt = None
if not dstring:
return dt
try:
dint = int(dstring)
dt = datetime.datetime(dint, 1, 1)
except ValueError:
pass
except TypeError as e:
print(
'Date type err DATA:{} ERROR:{}'.format(dstring, e),
file=sys.stderr)
try:
strfmt = '%Y-%m-%d'
dt = datetime.datetime.strptime(dstring, strfmt)
except ValueError:
pass
except TypeError as e:
print(
'Date type err in strptime:{} {}'.format(dstring, e),
file=sys.stderr)
# add UTC as timezone, solrpy looks for tzinfo
if dt:
dt = datetime.datetime(dt.year, dt.month, dt.day, tzinfo=UTC)
return dt
def get_dates_from_date_obj(date_obj):
'''Return list of display date, start dates, end dates'''
if isinstance(date_obj, dict):
date_start = make_datetime(date_obj.get('begin', None))
date_end = make_datetime(date_obj.get('end', None))
dates = (date_obj.get('displayDate', None), date_start, date_end)
return dates
elif isinstance(date_obj, basestring):
return date_obj, None, None
else:
return None, None, None
def unpack_date(date_obj):
'''Unpack a couchdb date object'''
dates = []
dates_start = []
dates_end = []
if not date_obj or not len(date_obj):
return None, None, None
else:
if isinstance(date_obj, dict):
try:
displayDate, dt_start, dt_end = get_dates_from_date_obj(
date_obj)
dates.append(displayDate)
if dt_start:
dates_start.append(dt_start)
if dt_end:
dates_end.append(dt_end)
except KeyError:
pass
else: # should be list
for dt in date_obj:
displayDate, dt_start, dt_end = get_dates_from_date_obj(dt)
dates.append(displayDate)
if dt_start:
dates_start.append(dt_start)
if dt_end:
dates_end.append(dt_end)
return dates, dates_start, dates_end
def map_date(d):
date_map = {}
date_source = d.get('date', None)
if not date_source:
return date_map
dates = []
start_date = end_date = None
dates_start = []
dates_end = []
dates, dates_start, dates_end = unpack_date(date_source)
date_map['date'] = dates
dates_start = sorted(dates_start)
dates_end = sorted(dates_end)
if len(dates_start):
start_date = dates_start[0]
if len(dates_end):
end_date = dates_end[0]
# fill in start_date == end_date if only one exists
start_date = end_date if not start_date else start_date
end_date = start_date if not end_date else end_date
if start_date:
# add timezone as UTC, for solrpy (uses astimzone())
date_map['sort_date_start'] = start_date
date_map['sort_date_end'] = end_date
return date_map
def find_ark_in_identifiers(doc):
identifiers = doc['sourceResource'].get('identifier', None)
if identifiers:
for identifier in identifiers:
match = RE_ARK_FINDER.search(identifier)
if match:
return match.group(0)
return None
def uuid_if_nuxeo(doc):
collection = doc['originalRecord']['collection'][0]
harvest_type = collection['harvest_type']
if harvest_type == 'NUX':
return doc['originalRecord'].get('uid', None)
return None
def ucsd_ark(doc):
# is this UCSD?
campus = None
ark = None
collection = doc['originalRecord']['collection'][0]
campus_list = collection.get('campus', None)
if campus_list:
campus = campus_list[0]['@id']
if campus == "https://registry.cdlib.org/api/v1/campus/6/":
# UCSD get ark id
ark_frag = doc['originalRecord'].get('id', None)
if ark_frag:
ark = 'ark:/20775/' + ark_frag
return ark
def ucla_ark(doc):
'''UCLA ARKs are buried in a mods field in originalRecord:
"mods_recordInfo_recordIdentifier_mlt": "21198-zz002b1833",
"mods_recordInfo_recordIdentifier_s": "21198-zz002b1833",
"mods_recordInfo_recordIdentifier_t": "21198-zz002b1833",
If one is found, safe to assume UCLA & make the ARK
NOTE: I cut & pasted this to the ucla_solr_dc_mapper to get it
into the "identifier" field
'''
ark = None
id_fields = ("mods_recordInfo_recordIdentifier_mlt",
"mods_recordInfo_recordIdentifier_s",
"mods_recordInfo_recordIdentifier_t")
for f in id_fields:
try:
mangled_ark = doc['originalRecord'][f]
naan, arkid = mangled_ark.split('-') # could fail?
ark = '/'.join(('ark:', naan, arkid))
break
except KeyError:
pass
return ark
def get_solr_id(couch_doc):
''' Extract a good ID to use in the solr index.
see : https://github.com/ucldc/ucldc-docs/wiki/pretty_id
arks are always pulled if found, gets first.
Some institutions have known ark framents, arks are constructed
for these.
Nuxeo objects retain their UUID
All other objects the couchdb _id is md5 sum
'''
# look in sourceResoure.identifier for an ARK if found return it
solr_id = find_ark_in_identifiers(couch_doc)
# no ARK in identifiers. See if is a nuxeo object
if not solr_id:
solr_id = uuid_if_nuxeo(couch_doc)
if not solr_id:
solr_id = ucsd_ark(couch_doc)
if not solr_id:
solr_id = ucla_ark(couch_doc)
if not solr_id:
# no recognized special id, just has couchdb id
hash_id = hashlib.md5()
hash_id.update(couch_doc['_id'])
solr_id = hash_id.hexdigest()
return solr_id
def normalize_type(solr_doc):
'''Normalize the type field for the solr doc. Should be a lower case, word
separated DCMI type to match our UI
This is being done on a "as found" basis. When results that don't agreee
with the current list, handle them here.
Could be done in couch but easier here?
'''
def norm_type(d):
if d not in DCMI_TYPES:
if 'physical' in d.lower():
return 'physical object'
elif 'moving' in d.lower():
return 'moving image'
else:
return d # don't drop, will show in facets
else:
return d
DCMI_TYPES = ('collection', 'dataset', 'event', 'image',
'interactive resource', 'moving image', 'service',
'software', 'sound', 'text', 'physical object')
doc_type = solr_doc.get('type', None)
if doc_type:
if isinstance(doc_type, list):
norm_types = []
for d in doc_type:
norm_types.append(norm_type(d))
solr_doc['type'] = norm_types
else: # string?
solr_doc['type'] = norm_type(doc_type)
class MissingSourceResource(KeyError):
dict_key = 'Missing SourceResource'
class MissingTitle(KeyError):
dict_key = 'Missing Title'
class MissingRights(KeyError):
dict_key = 'Missing Rights'
class MissingIsShownAt(KeyError):
dict_key = 'Missing isShownAt'
class isShownAtNotURL(ValueError):
dict_key = 'isShownAt not a URL'
class MissingImage(KeyError):
dict_key = 'Missing Image'
def has_required_fields(doc):
'''Check the couchdb doc has required fields and reasonable values'''
if 'sourceResource' not in doc:
raise MissingSourceResource(
'---- OMITTED: Doc:{0} has no sourceResource.'.format(doc['_id']))
if 'title' not in doc['sourceResource']:
raise MissingTitle(
'---- OMITTED: Doc:{0} has no title.'.format(doc['_id']))
if 'rights' not in doc['sourceResource'] and 'rightsURI' not in doc['sourceResource']:
raise MissingRights(
'---- OMITTED: Doc:{0} has no rights.'.format(doc['_id']))
if 'isShownAt' not in doc:
raise MissingIsShownAt(
'---- OMITTED: Doc:{0} has no isShownAt.'.format(doc['_id']))
# check that value in isShownAt is at least a valid URL format
parsed = urlparse(doc['isShownAt'])
if not parsed.scheme or not parsed.netloc or \
not (parsed.path or parsed.params or parsed.query):
raise isShownAtNotURL(
'---- OMITTED: Doc:{0} isShownAt doesn\'t appear to be'
'a URL: {1}'.format(doc['_id'], doc['isShownAt']))
doc_type = doc['sourceResource'].get('type', '')
if not isinstance(doc_type, list) and \
'image' == doc_type.lower():
# if doesnt have a reference_image_md5, reject
if 'object' not in doc:
raise MissingImage(
'---- OMITTED: Doc:{0} is image type with no harvested image.'.format(doc['_id']))
return True
def add_slash(url):
'''Add slash to url is it is not there.'''
return os.path.join(url, '')
class OldCollectionException(Exception):
pass
def map_registry_data(collections):
'''Map the collections data to corresponding data fields in the solr doc
'''
collection_urls = []
collection_names = []
collection_datas = []
collection_sort_datas = []
repository_urls = []
repository_names = []
repository_datas = []
campus_urls = campus_names = campus_datas = None
for collection in collections: # can have multiple collections
collection_urls.append(add_slash(collection['@id']))
collection_names.append(collection['name'])
collection_datas.append('::'.join((add_slash(collection['@id']),
collection['name'])))
scd = get_sort_collection_data_string(collection)
collection_sort_datas.append(scd)
if 'campus' in collection:
campus_urls = []
campus_names = []
campus_datas = []
campuses = collection['campus']
campus_urls.extend(
[add_slash(campus['@id']) for campus in campuses])
campus_names.extend([campus['name'] for c in campuses])
campus_datas.extend([
'::'.join((add_slash(campus['@id']), campus['name']))
for campus in campuses
])
try:
repositories = collection['repository']
except KeyError:
raise OldCollectionException
repository_urls.extend(
[add_slash(repo['@id']) for repo in repositories])
repository_names.extend([repo['name'] for repo in repositories])
repo_datas = []
for repo in repositories:
repo_data = '::'.join((add_slash(repo['@id']), repo['name']))
if 'campus' in repo and len(repo['campus']):
repo_data = '::'.join((add_slash(repo['@id']), repo['name'],
repo['campus'][0]['name']))
repo_datas.append(repo_data)
repository_datas.extend(repo_datas)
registry_dict = dict(
collection_url=collection_urls,
collection_name=collection_names,
collection_data=collection_datas,
sort_collection_data=collection_sort_datas,
repository_url=repository_urls,
repository_name=repository_names,
repository_data=repository_datas, )
if campus_urls:
registry_dict.update({
'campus_url': campus_urls,
'campus_name': campus_names,
'campus_data': campus_datas
})
return registry_dict
def get_facet_decades(date):
'''Return set of decade string for given date structure.
date is a dict with a "displayDate" key.
'''
if isinstance(date, dict):
facet_decades = facet_decade(date.get('displayDate', ''))
else:
facet_decades = facet_decade(str(date))
facet_decade_set = set() # don't repeat values
for decade in facet_decades:
facet_decade_set.add(decade)
return facet_decade_set
def normalize_sort_field(sort_field,
default_missing='~title unknown',
missing_equivalents=['title unknown']):
sort_field = sort_field.lower()
# remove punctuation
sort_field = RE_ALPHANUMSPACE.sub('', sort_field)
words = sort_field.split()
if words:
if words[0] in ('the', 'a', 'an'):
sort_field = ' '.join(words[1:])
if not sort_field or sort_field in missing_equivalents:
sort_field = default_missing
return sort_field
def get_sort_collection_data_string(collection):
'''Return the string form of the collection data.
sort_collection_data ->
[sort_collection_name::collection_name::collection_url, <>,<>]
'''
sort_name = normalize_sort_field(
collection['name'],
default_missing='~collection unknown',
missing_equivalents=[])
sort_string = ':'.join((sort_name, collection['name'],
add_slash(collection['@id'])))
return sort_string
def add_sort_title(couch_doc, solr_doc):
'''Add a sort title to the solr doc'''
if isinstance(couch_doc['sourceResource']['title'], basestring):
sort_title = couch_doc['sourceResource']['title']
else:
sort_title = couch_doc['sourceResource']['title'][0]
if 'sort-title' in couch_doc['originalRecord']: # OAC mostly
sort_obj = couch_doc['originalRecord']['sort-title']
if isinstance(sort_obj, list):
sort_obj = sort_obj[0]
if isinstance(sort_obj, dict):
sort_title = sort_obj.get(
'text', couch_doc['sourceResource']['title'][0])
else:
sort_title = sort_obj
else: # assume flat string
sort_title = sort_obj
sort_title = normalize_sort_field(sort_title)
solr_doc['sort_title'] = sort_title
def fill_in_title(couch_doc):
'''if title has no entries, set to ['Title unknown']
'''
if 'sourceResource' not in couch_doc:
raise MissingSourceResource(
"ERROR: KeyError - NO SOURCE RESOURCE in DOC:{}".format(
couch_doc['_id']))
if not couch_doc['sourceResource'].get('title', None):
couch_doc['sourceResource']['title'] = ['Title unknown']
elif not couch_doc['sourceResource'].get('title'): # empty string?
couch_doc['sourceResource']['title'] = ['Title unknown']
return couch_doc
def add_facet_decade(couch_doc, solr_doc):
'''Add the facet_decade field to the solr_doc dictionary
If no date field in sourceResource, pass fake value to set
as 'unknown' in solr_doc
'''
solr_doc['facet_decade'] = set()
if 'date' in couch_doc['sourceResource']:
date_field = couch_doc['sourceResource']['date']
if isinstance(date_field, list):
for date in date_field:
try:
facet_decades = get_facet_decades(date)
solr_doc['facet_decade'] = facet_decades
except AttributeError as e:
print(
'Attr Error for facet_decades in doc:{} ERROR:{}'.
format(couch_doc['_id'], e),
file=sys.stderr)
else:
try:
facet_decades = get_facet_decades(date_field)
solr_doc['facet_decade'] = facet_decades
except AttributeError as e:
print(
'Attr Error for doc:{} ERROR:{}'.format(couch_doc['_id'],
e),
file=sys.stderr)
else:
facet_decades = get_facet_decades('none')
solr_doc['facet_decade'] = facet_decades
class MediaJSONError(ValueError):
dict_key = 'Missing reference media file'
class MissingMediaJSON(ValueError):
dict_key = 'Missing Media Json'
def check_nuxeo_media(doc):
'''Check that the media_json and jp2000 exist for a given solr doc.
Raise exception if not
'''
if 'structmap_url' not in doc:
return
# check that there is an object at the structmap_url
try:
MediaJson(doc['structmap_url']).check_media()
except ClientError as e:
message = '---- OMITTED: Doc:{} missing media json {}'.format(
doc['harvest_id_s'],
e)
print(message, file=sys.stderr)
raise MissingMediaJSON(message)
except ValueError as e:
message = '---- OMITTED: Doc:{} Missing reference media file: {}'.format(
doc['harvest_id_s'],
e)
print(message, file=sys.stderr)
raise MediaJSONError(message)
def map_couch_to_solr_doc(doc):
'''Return a json document suitable for updating the solr index
how to make schema aware mapping?'''
solr_doc = {}
for p in doc.keys():
if p in COUCHDOC_TO_SOLR_MAPPING:
try:
solr_doc.update(COUCHDOC_TO_SOLR_MAPPING[p](doc))
except TypeError as e:
print(
'TypeError for doc {} on COUCHDOC_TO_SOLR_MAPPING {}'.
format(doc['_id'], p),
file=sys.stderr)
raise e
reg_data_dict = map_registry_data(doc['originalRecord']['collection'])
solr_doc.update(reg_data_dict)
sourceResource = doc['sourceResource']
for p in sourceResource.keys():
if p in COUCHDOC_SRC_RESOURCE_TO_SOLR_MAPPING:
try:
solr_doc.update(COUCHDOC_SRC_RESOURCE_TO_SOLR_MAPPING[p](
sourceResource))
except TypeError as e:
print(
'TypeError for doc {} on sourceResource {}'.format(
doc['_id'], p),
file=sys.stderr)
raise e
originalRecord = doc['originalRecord']
for k in originalRecord.keys():
if k in COUCHDOC_ORIGINAL_RECORD_TO_SOLR_MAPPING:
try:
solr_doc.update(COUCHDOC_ORIGINAL_RECORD_TO_SOLR_MAPPING[k](originalRecord))
except TypeError as e:
print(
'TypeError for doc {} on originalRecord {}'.format(
doc['_id'], k),
file=sys.stderr)
raise e
if k == 'properties':
for p in originalRecord['properties']:
if p in COUCHDOC_ORIGINAL_RECORD_TO_SOLR_MAPPING:
try:
solr_doc.update(COUCHDOC_ORIGINAL_RECORD_TO_SOLR_MAPPING[p](
originalRecord))
except TypeError as e:
print(
'TypeError for doc {} on originalRecord {}'.format(
doc['_id'], p),
file=sys.stderr)
raise e
normalize_type(solr_doc)
add_sort_title(doc, solr_doc)
add_facet_decade(doc, solr_doc)
solr_doc['id'] = get_solr_id(doc)
return solr_doc
def push_doc_to_solr(solr_doc, solr_db):
'''Push one couch doc to solr'''
n = 1
try:
solr_db.add(solr_doc)
print(
"++++ ADDED: {} :harvest_id_s {}".format(solr_doc['id'],
solr_doc['harvest_id_s']),
file=sys.stderr)
except SolrException as e:
print(
"ERROR for {} : {} {} {}".format(solr_doc['id'], e,
solr_doc['collection_url'],
solr_doc['harvest_id_s']),
file=sys.stderr)
n = 0
if not e.httpcode == 400:
raise e
return n
def get_key_for_env():
'''Get key based on DATA_BRANCH env var'''
if 'DATA_BRANCH' not in os.environ:
raise ValueError('Please set DATA_BRANCH environment variable')
return ''.join(('couchdb_since/', os.environ['DATA_BRANCH']))
class CouchdbLastSeq_S3(object):
'''store the last seq for only delta updates.
'''
def __init__(self):
self.s3 = boto3.resource('s3')
self.s3object = self.s3.Object(S3_BUCKET, get_key_for_env())
@property
def last_seq(self):
return int(self.s3object.get()['Body'].read())
@last_seq.setter
def last_seq(self, value):
'''value should be last_seq from couchdb _changes'''
self.s3object.put(Body=str(value))
def delete_solr_item_by_id(item_id):
url_solr = os.environ['URL_SOLR']
body = 'stream.body=<delete><id>{}</id></delete>'.format(item_id)
url_delete = '{}/update?{}&commit=true'.format(url_solr, body)
response = requests.get(url_delete)
response.raise_for_status()
subject = format_results_subject(item_id,
'Deleted document from Solr {env} ')
publish_to_harvesting(subject,
'DELETED {}'.format(item_id))
def delete_solr_collection(collection_key):
'''Delete a solr collection for the environment'''
url_solr = os.environ['URL_SOLR']
COLLECTION_URL_FORMAT = 'https://registry.cdlib.org/api/v1/collection/{}/'
collection_url = COLLECTION_URL_FORMAT.format(collection_key)
query = 'stream.body=<delete><query>collection_url:\"{}\"</query>' \
'</delete>&commit=true'.format(collection_url)
url_delete = '{}/update?{}'.format(url_solr, query)
response = requests.get(url_delete)
response.raise_for_status()
subject = format_results_subject(collection_key,
'Deleted documents from Solr {env} ')
publish_to_harvesting(subject,
'DELETED {}'.format(collection_key))
def harvesting_report(collection_key, updated_docs, num_added, report):
'''Make the nice report for the harvesting channel'''
report_list = [' : '.join((key, str(val))) for key, val in report.items()]
report_msg = '\n'.join(report_list)
msg = ''.join(('Synced collection {} to solr.\n'.format(collection_key),
'{} Couch Docs.\n'.format(len(updated_docs)),
'{} solr documents updated\n'.format(num_added),
report_msg))
return msg
def sync_couch_collection_to_solr(collection_key):
# This works from inside an environment with default URLs for couch & solr
delete_solr_collection(collection_key)
URL_SOLR = os.environ.get('URL_SOLR', None)
collection_key = str(collection_key) # Couch need string keys
v = CouchDBCollectionFilter(
couchdb_obj=get_couchdb(), collection_key=collection_key)
solr_db = Solr(URL_SOLR)
updated_docs = []
num_added = 0
report = defaultdict(int)
for r in v:
try:
fill_in_title(r.doc)
has_required_fields(r.doc)
except KeyError as e:
report[e.dict_key] += 1
print(e.message, file=sys.stderr)
continue
except ValueError as e:
report[e.dict_key] += 1
print(e.message, file=sys.stderr)
continue
solr_doc = map_couch_to_solr_doc(r.doc)
# TODO: here is where to check if existing and compare collection vals
try:
check_nuxeo_media(solr_doc)
except ValueError as e:
print(e.message, file=sys.stderr)
report[e.dict_key] += 1
continue
updated_docs.append(solr_doc)
num_added += push_doc_to_solr(solr_doc, solr_db=solr_db)
solr_db.commit()
publish_to_harvesting(
'Synced collection {} to solr'.format(collection_key),
harvesting_report(
collection_key,
updated_docs,
num_added,
report))
return updated_docs, report
def main(url_couchdb=None,
dbname=None,
url_solr=None,
all_docs=False,
since=None):
'''Use the _changes feed with a "since" parameter to only catch new
changes to docs. The _changes feed will only have the *last* event on
a document and does not retain intermediate changes.
Setting the "since" to 0 will result in getting a _changes record for
each document, essentially dumping the db to solr
'''
print('Solr update PID: {}'.format(os.getpid()))
dt_start = datetime.datetime.now()
print('Start time:{}'.format(dt_start))
sys.stdout.flush() # put pd
db = get_couchdb(url=url_couchdb, dbname=dbname)
s3_seq_cache = CouchdbLastSeq_S3()
if not since:
since = s3_seq_cache.last_seq
if all_docs:
since = '0'
print('Attempt to connect to {0} - db:{1}'.format(url_couchdb, dbname))
print('Getting changes since:{}'.format(since))
sys.stdout.flush() # put pd
db = get_couchdb(url=url_couchdb, dbname=dbname)
changes = db.changes(since=since)
previous_since = since
last_since = int(
changes['last_seq']) # get new last_since for changes feed
results = changes['results']
n_up = n_design = n_delete = 0
solr_db = Solr(url_solr)
start_time = datetime.datetime.now()
for row in results:
cur_id = row['id']
if '_design' in cur_id:
n_design += 1
print("Skip {0}".format(cur_id))
continue
if row.get('deleted', False):
# need to get the solr doc for this couch
resp = solr_db.select(q=''.join(('harvest_id_s:"', cur_id, '"')))
if resp.numFound == 1:
sdoc = resp.results[0]
print('====DELETING: {0} -- {1}'.format(cur_id, sdoc['id']))
solr_db.delete(id=sdoc['id'])
n_delete += 1
else:
print("-----DELETION of {} - FOUND {} docs".format(
cur_id, resp.numFound))
else:
doc = db.get(cur_id)
try:
doc = fill_in_title(doc)
has_required_fields(doc)
except KeyError as e:
print(e.message)
continue
except ValueError as e:
print(e.message)
continue
try:
try:
solr_doc = map_couch_to_solr_doc(doc)
except OldCollectionException:
print('---- ERROR: OLD COLLECTION FOR:{}'.format(cur_id))
continue
try:
check_nuxeo_media(solr_doc)
except ValueError as e:
print(e.message)
continue
solr_doc = push_doc_to_solr(solr_doc, solr_db=solr_db)
except TypeError as e:
print('TypeError for {0} : {1}'.format(cur_id, e))
continue
n_up += 1
if n_up % 1000 == 0:
elapsed_time = datetime.datetime.now() - start_time
print("Updated {} so far in {}".format(n_up, elapsed_time))
solr_db.commit()
if not all_docs:
s3_seq_cache.last_seq = last_since
print("UPDATED {0} DOCUMENTS. DELETED:{1}".format(n_up, n_delete))
print("PREVIOUS SINCE:{0}".format(previous_since))
print("LAST SINCE:{0}".format(last_since))
run_time = datetime.datetime.now() - dt_start
print("RUN TIME:{}".format(run_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='update a solr instance from the couchdb doc store')
parser.add_argument(
'url_couchdb', help='URL to couchdb (http://127.0.0.1:5984)')
parser.add_argument('dbname', help='Couchdb database name')
parser.add_argument('url_solr', help='URL to writeable solr instance')
parser.add_argument(
'--since',
help='Since parameter for update. Defaults to value stored in S3')
parser.add_argument(
'--all_docs',
action='store_true',
help=''.join(('Harvest all couchdb docs. Safest bet. ',
'Will not set last sequence in s3')))
args = parser.parse_args()
print('Warning: this may take some time')
main(
url_couchdb=args.url_couchdb,
dbname=args.dbname,
url_solr=args.url_solr,
all_docs=args.all_docs,
since=args.since)
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,163 | ucldc/harvester | refs/heads/master | /harvester/fetcher/nuxeo_fetcher.py | # -*- coding: utf-8 -*-
import urlparse
import json
import pynux.utils
import boto
from .fetcher import Fetcher
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
STRUCTMAP_S3_BUCKET = 'static.ucldc.cdlib.org/media_json'
NUXEO_MEDIUM_IMAGE_URL_FORMAT = "https://nuxeo.cdlib.org/Nuxeo/nxpicsfile/" \
"default/{}/Medium:content/"
NUXEO_S3_THUMB_URL_FORMAT = "https://s3.amazonaws.com/" \
"static.ucldc.cdlib.org/ucldc-nuxeo-thumb-media/{}"
class NuxeoFetcher(Fetcher):
'''Harvest a Nuxeo FILE. Can be local or at a URL'''
def __init__(self, url_harvest, extra_data, conf_pynux={}, **kwargs):
'''
uses pynux (https://github.com/ucldc/pynux) to grab objects from
the Nuxeo API
api url is set from url_harvest, overriding pynuxrc config and
passed in conf.
the pynux config file should have user & password
and X-NXDocumemtProperties values filled in.
'''
super(NuxeoFetcher, self).__init__(url_harvest, extra_data, **kwargs)
self._url = url_harvest
self._path = extra_data
self._nx = pynux.utils.Nuxeo(conf=conf_pynux)
self._nx.conf['api'] = self._url
self._structmap_bucket = STRUCTMAP_S3_BUCKET
# get harvestable child objects
conf_pynux['api'] = self._url
self._dh = DeepHarvestNuxeo(self._path, '', conf_pynux=conf_pynux)
self._children = iter(self._dh.fetch_objects())
def _get_structmap_url(self, bucket, obj_key):
'''Get structmap_url property for object'''
structmap_url = "s3://{0}/{1}{2}".format(bucket, obj_key,
'-media.json')
return structmap_url
def _get_structmap_text(self, structmap_url):
'''
Get structmap_text for object. This is all the words from 'label'
in the json.
See https://github.com/ucldc/ucldc-docs/wiki/media.json
'''
structmap_text = ""
bucketpath = self._structmap_bucket.strip("/")
bucketbase = bucketpath.split("/")[0]
parts = urlparse.urlsplit(structmap_url)
# get contents of <nuxeo_id>-media.json file
conn = boto.connect_s3()
bucket = conn.get_bucket(bucketbase)
key = bucket.get_key(parts.path)
if not key: # media_json hasn't been harvested yet for this record
self.logger.error('Media json at: {} missing.'.format(parts.path))
return structmap_text
mediajson = key.get_contents_as_string()
mediajson_dict = json.loads(mediajson)
# concatenate all of the words from 'label' in the json
labels = []
labels.append(mediajson_dict['label'])
if 'structMap' in mediajson_dict:
labels.extend([sm['label'] for sm in mediajson_dict['structMap']])
structmap_text = ' '.join(labels)
return structmap_text
def _get_isShownBy(self, nuxeo_metadata):
'''
Get isShownBy value for object
1) if object has image at parent level, use this
2) if component(s) have image, use first one we can find
3) if object has PDF or video at parent level,
use image stashed on S3
4) if component(s) have PDF or video, use first component image stashed on S3 we can find
5) return None
'''
is_shown_by = None
uid = nuxeo_metadata['uid']
self.logger.info("About to get isShownBy for uid {}".format(uid))
# 1) if object has image at parent level, use this
if self._has_image(nuxeo_metadata):
self.logger.info("Nuxeo doc with uid {} has an image at the "
"parent level".format(uid))
is_shown_by = NUXEO_MEDIUM_IMAGE_URL_FORMAT.format(nuxeo_metadata[
'uid'])
self.logger.info("is_shown_by: {}".format(is_shown_by))
return is_shown_by
# 2) if component(s) have image, use first one we can find
first_image_component_uid = self._get_first_image_component(
nuxeo_metadata)
self.logger.info("first_image_component_uid: {}".format(
first_image_component_uid))
if first_image_component_uid:
self.logger.info("Nuxeo doc with uid {} has an image at the"
"component level".format(uid))
is_shown_by = NUXEO_MEDIUM_IMAGE_URL_FORMAT.format(
first_image_component_uid)
self.logger.info("is_shown_by: {}".format(is_shown_by))
return is_shown_by
# 3) if object has PDF at parent level, use image stashed on S3
if self._has_s3_thumbnail(nuxeo_metadata):
self.logger.info("Nuxeo doc with uid {} has a thumbnail for"
"parent file (probably PDF) stashed on S3".format(
uid))
is_shown_by = NUXEO_S3_THUMB_URL_FORMAT.format(nuxeo_metadata[
'uid'])
self.logger.info("is_shown_by: {}".format(is_shown_by))
return is_shown_by
# 4) if component(s) have PDF or video, use first component image stashed on S3 we can find
first_thumb_component_uid = self._get_first_thumb_component(
nuxeo_metadata)
self.logger.info("first_thumb_component_uid: {}".format(
first_thumb_component_uid))
if first_thumb_component_uid:
self.logger.info("Nuxeo doc with uid {} has thumbnail at the"
"component level".format(uid))
is_shown_by = NUXEO_S3_THUMB_URL_FORMAT.format(
first_thumb_component_uid)
self.logger.info("is_shown_by: {}".format(is_shown_by))
return is_shown_by
# 5) return None
self.logger.info("Could not find any image for Nuxeo doc with uid "
"{}! Returning None".format(uid))
return is_shown_by
def _has_image(self, metadata):
''' based on json metadata, determine whether or not this Nuxeo doc
has an image file associated
'''
if metadata['type'] != "SampleCustomPicture":
return False
properties = metadata['properties']
file_content = properties.get('file:content')
if file_content and 'name' in file_content and file_content['name'] == 'empty_picture.png':
return False
elif file_content and 'data' in file_content:
return True
else:
return False
def _has_s3_thumbnail(self, metadata):
''' based on json metadata, determine whether or not this Nuxeo doc
is PDF (or other non-image)
that will have thumb image stashed on S3 for it '''
if metadata['type'] not in ("CustomFile", "CustomVideo"):
return False
properties = metadata['properties']
file_content = properties.get('file:content')
if file_content and 'data' in file_content:
return True
else:
return False
def _get_first_image_component(self, parent_metadata):
''' get first image component we can find '''
component_uid = None
query = "SELECT * FROM Document WHERE ecm:parentId = '{}' AND " \
"ecm:isTrashed = 0 ORDER BY " \
"ecm:pos".format(parent_metadata['uid'])
for child in self._nx.nxql(query):
child_metadata = self._nx.get_metadata(uid=child['uid'])
if self._has_image(child_metadata):
component_uid = child_metadata['uid']
break
return component_uid
def _get_first_thumb_component(self, parent_metadata):
''' get first non-image component with thumbnail we can find '''
component_uid = None
query = "SELECT * FROM Document WHERE ecm:parentId = '{}' AND " \
"ecm:isTrashed = 0 ORDER BY " \
"ecm:pos".format(parent_metadata['uid'])
for child in self._nx.nxql(query):
child_metadata = self._nx.get_metadata(uid=child['uid'])
if self._has_s3_thumbnail(child_metadata):
component_uid = child_metadata['uid']
break
return component_uid
def next(self):
'''Return Nuxeo record by record to the controller'''
doc = self._children.next()
self.metadata = self._nx.get_metadata(uid=doc['uid'])
self.structmap_url = self._get_structmap_url(self._structmap_bucket,
doc['uid'])
self.metadata['structmap_url'] = self.structmap_url
self.metadata['structmap_text'] = self._get_structmap_text(
self.structmap_url)
self.metadata['isShownBy'] = self._get_isShownBy(self.metadata)
return self.metadata
class UCLDCNuxeoFetcher(NuxeoFetcher):
'''A nuxeo fetcher that verifies headers required for UCLDC metadata
from the UCLDC Nuxeo instance.
Essentially, this checks that the X-NXDocumentProperties is correct
for the UCLDC
'''
def __init__(self, url_harvest, extra_data, conf_pynux={}, **kwargs):
'''Check that required UCLDC properties in conf setting'''
super(UCLDCNuxeoFetcher, self).__init__(url_harvest, extra_data,
conf_pynux, **kwargs)
assert ('dublincore' in self._nx.conf['X-NXDocumentProperties'])
assert ('ucldc_schema' in self._nx.conf['X-NXDocumentProperties'])
assert ('picture' in self._nx.conf['X-NXDocumentProperties'])
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,164 | ucldc/harvester | refs/heads/master | /harvester/cleanup_dir.py | '''Cleanup files & directories under /tmp'''
import os
import shutil
import glob
dir_root = '/tmp/'
def cleanup_work_dir(dir_root=dir_root):
'''Cleanup directories & files under the root directory
Defaults to /tmp/ which is default working directory'''
listing = glob.glob(dir_root+'*')
for l in listing:
if os.path.isdir(l):
try:
shutil.rmtree(l)
except:
pass
else:
try:
os.remove(l)
except:
pass
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,165 | ucldc/harvester | refs/heads/master | /scripts/rqfilter.py | import sys, os
from redis import Redis
from rq import Connection, Queue, Worker
from rq.queue import FailedQueue
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost' )
redis_conn = Redis(host=REDIS_HOST, password=REDIS_PASSWORD)
qfailed = FailedQueue(connection=redis_conn)
#print(dir(qfailed))
#action : can be one of requeue o, connection=redis_conn)
err_search = 'timeout'
action = 'requeue'
#action = 'cancel'
jobs_filtered = []
for job in qfailed.jobs:
print(job.dump())
# print(job.dump().keys())
if err_search in job.dump()['exc_info']:
jobs_filtered.append(job)
job.timeout = 604800 #1week
job.save()
if action == 'requeue':
result = qfailed.requeue(job.id)
#q = Queue(job.dump()['origin'], connection=redis_conn)
##result = q.enqueue(job)
print result
print('{} jobs matched {}'.format(len(jobs_filtered), err_search))
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,166 | ucldc/harvester | refs/heads/master | /test/test_run_ingest.py | import os
import sys
from unittest import TestCase
import shutil
import re
import pickle
from mypretty import httpretty
# import httpretty
import logbook
from mock import patch
from mock import MagicMock
from test.utils import ConfigFileOverrideMixin, LogOverrideMixin
from test.utils import DIR_FIXTURES
from harvester.collection_registry_client import Collection
import harvester.fetcher as fetcher
from harvester.fetcher import get_log_file_path
from harvester.config import config
import harvester.run_ingest as run_ingest
if 'ARN_TOPIC_HARVESTING_REPORT' not in os.environ:
os.environ['ARN_TOPIC_HARVESTING_REPORT'] = 'fakey'
class MainTestCase(ConfigFileOverrideMixin, LogOverrideMixin, TestCase):
'''Test the main function'''
@httpretty.activate
def setUp(self):
super(MainTestCase, self).setUp()
self.dir_test_profile = '/tmp/profiles/test'
self.dir_save = None
if not os.path.isdir(self.dir_test_profile):
os.makedirs(self.dir_test_profile)
self.user_email = 'email@example.com'
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/197/",
body=open(DIR_FIXTURES + '/collection_api_test.json').read())
self.url_api_collection = \
"https://registry.cdlib.org/api/v1/collection/197/"
sys.argv = ['thisexe', self.user_email, self.url_api_collection]
self.collection = Collection(self.url_api_collection)
self.setUp_config(self.collection)
self.mail_handler = logbook.TestHandler(bubble=True)
self.mail_handler.push_thread()
def tearDown(self):
self.mail_handler.pop_thread()
super(MainTestCase, self).tearDown()
self.tearDown_config()
if self.dir_save:
shutil.rmtree(self.dir_save)
shutil.rmtree(self.dir_test_profile)
def testReturnAdd(self):
self.assertTrue(hasattr(fetcher, 'EMAIL_RETURN_ADDRESS'))
@httpretty.activate
@patch('boto3.resource', autospec=True)
def testMainCreatesCollectionProfile(self, mock_boto3):
'''Test that the main function produces a collection profile
file for DPLA. The path to this file is needed when creating a
DPLA ingestion document.
'''
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/197/",
body=open(DIR_FIXTURES + '/collection_api_test.json').read())
httpretty.register_uri(
httpretty.GET,
re.compile("http://content.cdlib.org/oai?.*"),
body=open(DIR_FIXTURES + '/testOAI-128-records.xml').read())
Collection("https://registry.cdlib.org/api/v1/collection/197/")
with patch('dplaingestion.couch.Couch') as mock_couch:
instance = mock_couch.return_value
instance._create_ingestion_document.return_value = 'test-id'
ingest_doc_id, num, self.dir_save, self.fetcher = fetcher.main(
self.user_email,
self.url_api_collection,
log_handler=self.test_log_handler,
mail_handler=self.test_log_handler,
dir_profile=self.dir_test_profile,
profile_path=self.profile_path,
config_file=self.config_file)
self.assertEqual(ingest_doc_id, 'test-id')
self.assertEqual(num, 128)
self.assertTrue(os.path.exists(os.path.join(self.profile_path)))
@patch('dplaingestion.couch.Couch')
def testMainCollection__init__Error(self, mock_couch):
self.assertRaises(
ValueError,
fetcher.main,
self.user_email,
'this-is-a-bad-url',
log_handler=self.test_log_handler,
mail_handler=self.mail_handler,
dir_profile=self.dir_test_profile,
config_file=self.config_file)
self.assertEqual(len(self.test_log_handler.records), 1)
self.assertEqual(len(self.mail_handler.records), 1)
@httpretty.activate
@patch('dplaingestion.couch.Couch')
def testMainCollectionWrongType(self, mock_couch):
'''Test what happens with wrong type of harvest'''
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/197/",
body=open(DIR_FIXTURES + '/collection_api_test_bad_type.json')
.read())
self.assertRaises(
ValueError,
fetcher.main,
self.user_email,
"https://registry.cdlib.org/api/v1/collection/197/",
log_handler=self.test_log_handler,
mail_handler=self.mail_handler,
dir_profile=self.dir_test_profile,
config_file=self.config_file)
self.assertEqual(len(self.test_log_handler.records), 1)
self.assertEqual(len(self.mail_handler.records), 1)
@httpretty.activate
def testCollectionNoEnrichItems(self):
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/36/",
body=open(DIR_FIXTURES + '/collection_api_no_enrich_item.json')
.read())
c = Collection("https://registry.cdlib.org/api/v1/collection/36/")
with self.assertRaises(ValueError):
c.dpla_profile_obj
@httpretty.activate
@patch(
'harvester.fetcher.HarvestController.__init__',
side_effect=Exception('Boom!'),
autospec=True)
def testMainHarvestController__init__Error(self, mock_method):
'''Test the try-except block in main when HarvestController not created
correctly'''
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/197/",
body=open(DIR_FIXTURES + '/collection_api_test.json').read())
httpretty.register_uri(
httpretty.GET,
re.compile("http://content.cdlib.org/oai?.*"),
body=open(DIR_FIXTURES + '/testOAI-128-records.xml').read())
sys.argv = [
'thisexe', 'email@example.com',
'https://registry.cdlib.org/api/v1/collection/197/'
]
self.assertRaises(
Exception,
fetcher.main,
self.user_email,
self.url_api_collection,
log_handler=self.test_log_handler,
mail_handler=self.test_log_handler,
dir_profile=self.dir_test_profile)
self.assertEqual(len(self.test_log_handler.records), 4)
self.assertTrue("[ERROR] HarvestMain: Exception in harvester init" in
self.test_log_handler.formatted_records[3])
self.assertTrue("Boom!" in self.test_log_handler.formatted_records[3])
c = Collection('https://registry.cdlib.org/api/v1/collection/197/')
os.remove(
os.path.abspath(
os.path.join(self.dir_test_profile, c.id + '.pjs')))
# self.dir_test_profile, c.slug+'.pjs')))
@httpretty.activate
@patch(
'harvester.fetcher.HarvestController.harvest',
side_effect=Exception('Boom!'),
autospec=True)
def testMainFnWithException(self, mock_method):
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/197/",
body=open(DIR_FIXTURES + '/collection_api_test.json').read())
httpretty.register_uri(
httpretty.GET,
re.compile("http://content.cdlib.org/oai?.*"),
body=open(DIR_FIXTURES + '/testOAI-128-records.xml').read())
with patch('dplaingestion.couch.Couch') as mock_couch:
instance = mock_couch.return_value
instance._create_ingestion_document.return_value = 'test-id'
self.assertRaises(
Exception,
fetcher.main,
self.user_email,
self.url_api_collection,
log_handler=self.test_log_handler,
mail_handler=self.test_log_handler,
profile_path=self.profile_path,
config_file=self.config_file)
self.assertEqual(len(self.test_log_handler.records), 7)
self.assertTrue("[ERROR] HarvestMain: Error while harvesting:" in
self.test_log_handler.formatted_records[6])
self.assertTrue("Boom!" in self.test_log_handler.formatted_records[6])
@httpretty.activate
@patch('boto3.resource', autospec=True)
def testMainFn(self, mock_boto3):
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/197/",
body=open(DIR_FIXTURES + '/collection_api_test.json').read())
httpretty.register_uri(
httpretty.GET,
re.compile("http://content.cdlib.org/oai?.*"),
body=open(DIR_FIXTURES + '/testOAI-128-records.xml').read())
with patch('dplaingestion.couch.Couch') as mock_couch:
instance = mock_couch.return_value
instance._create_ingestion_document.return_value = 'test-id'
ingest_doc_id, num, self.dir_save, self.harvester = fetcher.main(
self.user_email,
self.url_api_collection,
log_handler=self.test_log_handler,
mail_handler=self.test_log_handler,
dir_profile=self.dir_test_profile,
profile_path=self.profile_path,
config_file=self.config_file)
self.assertEqual(len(self.test_log_handler.records), 10)
self.assertIn(u'[INFO] HarvestMain: Init harvester next',
self.test_log_handler.formatted_records[0])
self.assertEqual(self.test_log_handler.formatted_records[1],
u'[INFO] HarvestMain: Create DPLA profile document')
self.assertTrue(u'[INFO] HarvestMain: DPLA profile document' in
self.test_log_handler.formatted_records[2])
self.assertEqual(self.test_log_handler.formatted_records[3],
u'[INFO] HarvestMain: Create ingest doc in couch')
self.assertEqual(self.test_log_handler.formatted_records[4],
u'[INFO] HarvestMain: Ingest DOC ID: test-id')
self.assertEqual(self.test_log_handler.formatted_records[5],
u'[INFO] HarvestMain: Start harvesting next')
self.assertTrue(
u"[INFO] HarvestController: Starting harvest for: "
u"email@example.com Santa Clara University: Digital Objects "
u"['UCDL'] ['Calisphere']",
self.test_log_handler.formatted_records[6])
self.assertEqual(self.test_log_handler.formatted_records[7],
u'[INFO] HarvestController: 100 records harvested')
self.assertEqual(self.test_log_handler.formatted_records[8],
u'[INFO] HarvestController: 128 records harvested')
self.assertEqual(
self.test_log_handler.formatted_records[9],
u'[INFO] HarvestMain: Finished harvest of '
u'calisphere-santa-clara-university-digital-objects. 128 '
u'records harvested.'
)
class LogFileNameTestCase(TestCase):
'''Test the log file name function'''
def setUp(self):
self.old_dir = os.environ.get('DIR_HARVESTER_LOG')
os.environ['DIR_HARVESTER_LOG'] = 'test/log/dir'
def tearDown(self):
os.environ.pop('DIR_HARVESTER_LOG')
if self.old_dir:
os.environ['DIR_HARVESTER_LOG'] = self.old_dir
def testLogName(self):
n = get_log_file_path('test_collection_slug')
print(n)
self.assertTrue(
re.match(
'test/log/dir/harvester-test_collection_slug-\d{8}-\d{6}-.log',
n))
class ConfigTestCase(TestCase):
'''test the environment variable parsing and confg file init'''
def setUp(self):
self.rpwd = self.rhost = self.ec2ingest = self.ec2solr = None
if 'REDIS_PASSWORD' in os.environ:
self.rpwd = os.environ['REDIS_PASSWORD']
del os.environ['REDIS_PASSWORD']
if 'REDIS_HOST' in os.environ:
self.rhost = os.environ['REDIS_HOST']
del os.environ['REDIS_HOST']
def tearDown(self):
# remove env vars if created?
if self.rpwd:
os.environ['REDIS_PASSWORD'] = self.rpwd
else:
del os.environ['REDIS_PASSWORD']
if self.rhost:
os.environ['REDIS_HOST'] = self.rhost
def testConfig(self):
with self.assertRaises(KeyError) as cm:
config(redis_required=True)
self.assertEqual(
str(cm.exception.message),
'Please set environment variable REDIS_PASSWORD to redis '
'password!')
os.environ['REDIS_HOST'] = 'redis_host_ip'
os.environ['REDIS_PASSWORD'] = 'XX'
conf = config()
self.assertEqual(conf['redis_host'], 'redis_host_ip')
self.assertEqual(conf['redis_port'], '6380')
self.assertEqual(conf['redis_password'], 'XX')
self.assertEqual(conf['redis_connect_timeout'], 10)
class RunIngestTestCase(LogOverrideMixin, TestCase):
'''Test the run_ingest script. Wraps harvesting with rest of DPLA
ingest process.
'''
def setUp(self):
super(RunIngestTestCase, self).setUp()
os.environ['REDIS_PASSWORD'] = 'XX'
os.environ['ID_EC2_INGEST'] = 'INGEST'
os.environ['ID_EC2_SOLR_BUILD'] = 'BUILD'
os.environ['DPLA_CONFIG_FILE'] = 'akara.ini'
os.environ['DATA_BRANCH'] = 'stage'
os.environ['ARN_TOPIC_HARVESTING_REPORT'] = 'bogus'
os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'
def tearDown(self):
# remove env vars if created?
super(RunIngestTestCase, self).tearDown()
del os.environ['REDIS_PASSWORD']
del os.environ['ID_EC2_INGEST']
del os.environ['ID_EC2_SOLR_BUILD']
del os.environ['DATA_BRANCH']
del os.environ['ARN_TOPIC_HARVESTING_REPORT']
del os.environ['AWS_DEFAULT_REGION']
if 'DPLA_CONFIG_FILE' in os.environ:
del os.environ['DPLA_CONFIG_FILE']
@patch('boto3.resource', autospec=True)
@patch('harvester.run_ingest.Redis', autospec=True)
@patch('couchdb.Server')
@patch('dplaingestion.scripts.enrich_records.main', return_value=0)
@patch('dplaingestion.scripts.save_records.main', return_value=0)
@patch('dplaingestion.scripts.remove_deleted_records.main', return_value=0)
@patch('dplaingestion.scripts.check_ingestion_counts.main', return_value=0)
@patch('dplaingestion.scripts.dashboard_cleanup.main', return_value=0)
@patch('dplaingestion.couch.Couch')
def testRunIngest(self, mock_couch, mock_dash_clean, mock_check,
mock_remove, mock_save, mock_enrich, mock_couchdb,
mock_redis, mock_boto3):
mock_couch.return_value._create_ingestion_document.return_value = \
'test-id'
# this next is because the redis client unpickles....
mock_redis.return_value.hget.return_value = pickle.dumps('RQ-result!')
mail_handler = MagicMock()
url_api_collection = 'https://registry.cdlib.org/api/v1/collection/' \
'178/'
httpretty.httpretty.enable()
httpretty.register_uri(
httpretty.GET,
url_api_collection,
body=open(DIR_FIXTURES + '/collection_api_test_oac.json').read())
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1'
'&relation=ark:/13030/tf2v19n928',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
httpretty.register_uri(
httpretty.POST,
'https://sns.us-west-2.amazonaws.com',
body='''<PublishResponse
xmlns="http://sns.amazonaws.com/doc/2010-03-31/"> <PublishResult>
<MessageId>567910cd-659e-55d4-8ccb-5aaf14679dc0</MessageId>
</PublishResult> <ResponseMetadata>
<RequestId>d74b8436-ae13-5ab4-a9ff-ce54dfea72a0</RequestId>
</ResponseMetadata> </PublishResponse>''')
httpretty.register_uri(
httpretty.GET,
'http://169.254.169.254/latest/meta-data/local-ipv4',
body='0.0.0.0')
run_ingest.main(
'mark.redar@ucop.edu',
url_api_collection,
log_handler=self.test_log_handler,
mail_handler=mail_handler)
mock_couch.assert_called_with(
config_file='akara.ini',
dashboard_db_name='dashboard',
dpla_db_name='ucldc')
mock_enrich.assert_called_with([None, 'test-id'])
self.assertEqual(len(self.test_log_handler.records), 14)
@patch('boto3.resource', autospec=True)
@patch('harvester.run_ingest.Redis', autospec=True)
@patch('couchdb.Server')
@patch('dplaingestion.scripts.enrich_records.main', return_value=0)
@patch('dplaingestion.scripts.save_records.main', return_value=0)
@patch('dplaingestion.scripts.remove_deleted_records.main', return_value=0)
@patch('dplaingestion.scripts.check_ingestion_counts.main', return_value=0)
@patch('dplaingestion.scripts.dashboard_cleanup.main', return_value=0)
@patch('dplaingestion.couch.Couch')
def testRunIngestProductionNotReady(self, mock_couch, mock_dash_clean,
mock_check, mock_remove, mock_save,
mock_enrich, mock_couchdb, mock_redis,
mock_boto3):
mock_couch.return_value._create_ingestion_document.return_value = \
'test-id'
# this next is because the redis client unpickles....
mock_redis.return_value.hget.return_value = pickle.dumps('RQ-result!')
mail_handler = MagicMock()
url_api_collection = 'https://registry.cdlib.org/api/v1/' \
'collection/178/'
httpretty.httpretty.enable()
httpretty.register_uri(
httpretty.GET,
url_api_collection,
body=open(DIR_FIXTURES + '/collection_api_test_oac.json').read())
httpretty.register_uri(
httpretty.GET,
'http://dsc.cdlib.org/search?facet=type-tab&style=cui&raw=1&'
'relation=ark:/13030/tf2v19n928',
body=open(DIR_FIXTURES + '/testOAC-url_next-1.json').read())
os.environ['DATA_BRANCH'] = 'production'
self.assertRaises(
Exception,
run_ingest.main,
'mark.redar@ucop.edu',
url_api_collection,
log_handler=self.test_log_handler,
mail_handler=mail_handler)
print self.test_log_handler.records
self.assertEqual(len(self.test_log_handler.records), 9)
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,167 | ucldc/harvester | refs/heads/master | /test/test_ia_fetcher.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from unittest import TestCase
import harvester.fetcher as fetcher
from test.utils import DIR_FIXTURES
from test.utils import LogOverrideMixin
from mypretty import httpretty
# import httpretty
class InternetArchiveTestCase(LogOverrideMixin, TestCase):
'''Test the fetcher for the Internet Archive API'''
@httpretty.activate
def test_fetching(self):
'''Basic tdd start'''
url = 'https://example.edu'
extra_data = 'collection:environmentaldesignarchive AND subject:"edith heath"'
page_current = 1
url_first = fetcher.IA_Fetcher.url_advsearch.format(
page_current=page_current, search_query=extra_data)
httpretty.register_uri(
httpretty.GET,
url_first,
responses=[
httpretty.Response(body=open(DIR_FIXTURES +
'/ia-results-1.json').read()),
httpretty.Response(body=open(DIR_FIXTURES +
'/ia-results-2.json').read()),
httpretty.Response(body=open(DIR_FIXTURES +
'/ia-results-3.json').read()),
])
h = fetcher.IA_Fetcher(url, extra_data)
results = []
for v in h:
results.extend(v)
self.assertEqual(h.url_base, url)
self.assertEqual(
h.url_advsearch, 'https://archive.org/advancedsearch.php?'
'q={search_query}&rows=500&page={page_current}&output=json')
self.assertEqual(len(results), 1285)
self.assertEqual(
results[1284], {
u'week':
0,
u'publicdate':
u'2014-02-28T03:17:59Z',
u'format': [
u'Archive BitTorrent', u'JPEG', u'JPEG Thumb', u'JSON',
u'Metadata'
],
u'title':
u'Upright Cabinet Piano',
u'downloads':
68,
u'indexflag': [u'index', u'nonoindex'],
u'mediatype':
u'image',
u'collection': [
u'metropolitanmuseumofart-gallery',
u'fav-mar_a_luisa_guevara_tirado', u'fav-drewblanco'
],
u'month':
1,
u'btih':
u'e16555eb5474d2543c7ad27a1cfd145195ce05bf',
u'item_size':
353871,
u'backup_location':
u'ia905804_31',
u'year':
u'1835',
u'date':
u'1835-01-01T00:00:00Z',
u'oai_updatedate': [
u'2014-02-28T03:17:59Z', u'2014-02-28T03:17:59Z',
u'2016-08-31T20:56:29Z'
],
u'identifier':
u'mma_upright_cabinet_piano_504395',
u'subject': [
u'North and Central America', u'Wood, various materials',
u'Cabinets', u'Case furniture', u'1835', u'Pianos',
u'New York City', u'Metropolitan Museum of Art',
u'Zithers', u'United States', u'Brooklyn',
u'Musical instruments', u'Chordophones', u'New York',
u'Furniture'
]
})
# Copyright © 2017, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,168 | ucldc/harvester | refs/heads/master | /test/test_solr_fetcher.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from mock import patch
from test.utils import ConfigFileOverrideMixin, LogOverrideMixin
from test.utils import DIR_FIXTURES
from harvester.collection_registry_client import Collection
import solr
import pysolr
import harvester.fetcher as fetcher
from mypretty import httpretty
# import httpretty
class SolrFetcherTestCase(LogOverrideMixin, TestCase):
'''Test the harvesting of solr baed data.'''
# URL:/solr/select body:q=extra_data&version=2.2&fl=%2A%2Cscore&wt=standard
@httpretty.activate
def testClassInit(self):
'''Test that the class exists and gives good error messages
if initial data not correct'''
httpretty.register_uri(
httpretty.POST,
'http://example.edu/solr/select',
body=open(DIR_FIXTURES +
'/ucsd-new-feed-missions-bb3038949s-0.xml').read())
self.assertRaises(TypeError, fetcher.SolrFetcher)
h = fetcher.SolrFetcher(
'http://example.edu/solr', 'extra_data', rows=3)
self.assertTrue(hasattr(h, 'solr'))
self.assertTrue(isinstance(h.solr, solr.Solr))
self.assertEqual(h.solr.url, 'http://example.edu/solr')
self.assertTrue(hasattr(h, 'query'))
self.assertEqual(h.query, 'extra_data')
self.assertTrue(hasattr(h, 'resp'))
self.assertEqual(h.resp.start, 0)
self.assertEqual(len(h.resp.results), 3)
self.assertTrue(hasattr(h, 'numFound'))
self.assertEqual(h.numFound, 10)
self.assertTrue(hasattr(h, 'index'))
@httpretty.activate
def testIterateOverResults(self):
'''Test the iteration over a mock set of data'''
httpretty.register_uri(
httpretty.POST,
'http://example.edu/solr/select',
responses=[
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-0.xml')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-1.xml')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-2.xml')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-3.xml')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-4.xml')
.read())
])
h = fetcher.SolrFetcher(
'http://example.edu/solr', 'extra_data', rows=3)
self.assertEqual(len(h.resp.results), 3)
n = 0
for r in h:
n += 1
self.assertEqual(['Mission at Santa Barbara'], r['title_tesim'])
self.assertEqual(n, 10)
class PySolrQueryFetcherTestCase(LogOverrideMixin, TestCase):
'''Test the harvesting of solr baed data.'''
# URL:/solr/select body:q=extra_data&version=2.2&fl=%2A%2Cscore&wt=standard
@httpretty.activate
def testClassInit(self):
'''Test that the class exists and gives good error messages
if initial data not correct'''
httpretty.register_uri(
httpretty.GET,
'http://example.edu/solr/query',
body=open(DIR_FIXTURES +
'/ucsd-new-feed-missions-bb3038949s-0.json').read())
self.assertRaises(TypeError, fetcher.PySolrQueryFetcher)
h = fetcher.PySolrQueryFetcher(
'http://example.edu/solr',
'extra_data', )
self.assertTrue(hasattr(h, 'solr'))
self.assertTrue(isinstance(h.solr, pysolr.Solr))
self.assertEqual(h.solr.url, 'http://example.edu/solr')
self.assertTrue(hasattr(h, 'results'))
self.assertEqual(len(h.results), 4)
self.assertEqual(h.results['response']['numFound'], 10)
self.assertEqual(h.numFound, 10)
self.assertTrue(hasattr(h, 'index'))
@httpretty.activate
def testIterateOverResults(self):
'''Test the iteration over a mock set of data'''
httpretty.register_uri(
httpretty.GET,
'http://example.edu/solr/query',
responses=[
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-0.json')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-1.json')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-2.json')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-3.json')
.read()),
])
self.assertRaises(TypeError, fetcher.PySolrFetcher)
h = fetcher.PySolrQueryFetcher('http://example.edu/solr', 'extra_data',
**{'rows': 3})
self.assertEqual(
h._query_path,
'query?q=extra_data&sort=id+asc&cursorMark=%2A&wt=json&rows=3')
n = 0
for r in h:
n += 1
self.assertEqual(n, 10)
self.assertEqual(['Mission Santa Ynez'], r['title_tesim'])
class RequestsSolrFetcherTestCase(LogOverrideMixin, TestCase):
'''Test the Request Solr fetcher which uses cursorMark'''
@httpretty.activate
def testIterateOverResults(self):
'''Test the RequestSolrFetcher iteration over a mock set of data'''
httpretty.register_uri(
httpretty.GET,
'http://example.edu/solr',
responses=[
httpretty.Response(body=open(
DIR_FIXTURES + '/ucb-cursor-results-0.json').read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucb-cursor-results-1.json').read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucb-cursor-results-2.json').read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucb-cursor-results-3.json').read()),
])
h = fetcher.RequestsSolrFetcher(
'http://example.edu/solr',
'q=extra:data&header=app-name:Value-with:in-it'
'&header=app_key:111222333')
h._page_size = 1
self.assertEqual(h._query_params['q'], ['extra:data'])
self.assertEqual(h._headers, {
'app-name': 'Value-with:in-it',
'app_key': '111222333'
})
cursor = h._nextCursorMark
docs = []
docs.append(h.next()) # gets the one from init, no get_next_results
self.assertEqual(cursor, h._cursorMark)
docs.append(h.next()) # get_next_results
self.assertNotEqual(cursor, h._nextCursorMark)
cursor = h._nextCursorMark
docs.append(h.next()) # get_next_results
self.assertEqual(cursor, h._cursorMark)
cursor = h._nextCursorMark
docs.append(h.next()) # get_next_results
self.assertEqual(cursor, h._cursorMark)
self.assertEqual(len(docs), 4)
def test_url_request(self):
'''Test the url_request dynamic property of the fetcher'''
h = fetcher.RequestsSolrFetcher(
'http://example.edu/solr',
'q=extra:data&header=app-name:Value-with:in-it'
'&header=app_key:111222333')
self.assertEqual(
'http://example.edu/solr?rows=1000&cursorMark=None'
'&q=extra:data&sort=id asc&wt=json',
h.url_request)
h._cursorMark = 'XXXX'
self.assertEqual(
'http://example.edu/solr?rows=1000&cursorMark=XXXX'
'&q=extra:data&sort=id asc&wt=json',
h.url_request)
h = fetcher.RequestsSolrFetcher(
'http://example.edu/solr',
'q=extra:data&header=app-name:Value-with:in-it'
'&header=app_key:111222333&wt=xml&sort=PID asc')
self.assertEqual(
'http://example.edu/solr?rows=1000&cursorMark=None'
'&q=extra:data&wt=xml&sort=PID asc',
h.url_request)
class HarvestSolr_ControllerTestCase(ConfigFileOverrideMixin, LogOverrideMixin,
TestCase):
'''Test the function of Solr harvest controller'''
@httpretty.activate
def setUp(self):
super(HarvestSolr_ControllerTestCase, self).setUp()
# self.testFile = DIR_FIXTURES+'/collection_api_test_oac.json'
httpretty.register_uri(
httpretty.GET,
"https://registry.cdlib.org/api/v1/collection/183/",
body=open(DIR_FIXTURES + '/collection_api_solr_harvest.json').read(
))
httpretty.register_uri(
httpretty.POST,
'http://example.edu/solr/blacklight/select',
body=open(DIR_FIXTURES +
'/ucsd-new-feed-missions-bb3038949s-0.xml').read())
self.collection = Collection(
'https://registry.cdlib.org/api/v1/collection/183/')
self.setUp_config(self.collection)
self.controller = fetcher.HarvestController(
'email@example.com',
self.collection,
config_file=self.config_file,
profile_path=self.profile_path)
print "DIR SAVE::::: {}".format(self.controller.dir_save)
def tearDown(self):
super(HarvestSolr_ControllerTestCase, self).tearDown()
self.tearDown_config()
# shutil.rmtree(self.controller.dir_save)
@httpretty.activate
@patch('boto3.resource', autospec=True)
def testSolrHarvest(self, mock_boto3):
'''Test the function of the Solr harvest with <date> objects
in stream'''
httpretty.register_uri(
httpretty.POST,
'http://example.edu/solr/blacklight/select',
responses=[
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-0.xml')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-1.xml')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-2.xml')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-3.xml')
.read()),
httpretty.Response(body=open(
DIR_FIXTURES + '/ucsd-new-feed-missions-bb3038949s-4.xml')
.read())
])
self.assertTrue(hasattr(self.controller, 'harvest'))
self.controller.harvest()
print "LOGS:{}".format(self.test_log_handler.formatted_records)
self.assertEqual(len(self.test_log_handler.records), 2)
self.assertTrue(
'UC San Diego' in self.test_log_handler.formatted_records[0])
self.assertEqual(self.test_log_handler.formatted_records[1],
'[INFO] HarvestController: 13 records harvested')
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,169 | ucldc/harvester | refs/heads/master | /harvester/fetcher/oai_fetcher.py | # -*- coding: utf-8 -*-
import re
import tempfile
from urlparse import parse_qs
from .fetcher import Fetcher
from sickle import Sickle
from sickle.models import Record as SickleDCRecord
from pymarc import parse_xml_to_array
from lxml import etree
def etree_to_dict(t):
d = {t.tag: map(etree_to_dict, t.iterchildren())}
d.update(('@' + k, v) for k, v in t.attrib.iteritems())
d['text'] = t.text
return d
class SickleMARCRecord(SickleDCRecord):
'''Extend the sickle Record to handle oai marc xml
using pymarc's parse_xml_to_array function.
parse_xml_to_array takes a file and returns an array
of all records in the file, but in this case it's
guaranteed to be just one record per file, because
Sickle is handling iterating through the oai feed.
SickleDCRecord definition:
https://github.com/mloesch/sickle/blob/79d7c727af3a4437720116549d4c681e74799f7e/sickle/models.py#L120
'''
def __init__(self, record_element, strip_ns=True):
super(SickleMARCRecord, self).__init__(
record_element, strip_ns=strip_ns)
if not self.deleted:
marc_file = tempfile.TemporaryFile()
metadata = self.xml.find(
".//" + self._oai_namespace + "metadata/")
marc_file.write(
etree.tostring(metadata, encoding='utf-8'))
marc_file.seek(0)
records = parse_xml_to_array(marc_file)
self.metadata = records[0].as_dict()
class SickleDIDLRecord(SickleDCRecord):
'''Extend the Sickle Record to handle oai didl xml.
Fills in data for the didl specific values
After Record's __init__ runs, the self.metadata contains keys for the
following DIDL data: DIDLInfo, Resource, Item, Component, Statement,
Descriptor
DIDLInfo contains created date for the data feed - drop
Statement wraps the dc metadata
Only the Resource & Component have unique data in them
'''
def __init__(self, record_element, strip_ns=True):
super(SickleDIDLRecord, self).__init__(
record_element, strip_ns=strip_ns)
# need to grab the didl components here
if not self.deleted:
didl = self.xml.find('.//{urn:mpeg:mpeg21:2002:02-DIDL-NS}DIDL')
didls = didl.findall('.//{urn:mpeg:mpeg21:2002:02-DIDL-NS}*')
for element in didls:
tag = re.sub(r'\{.*\}', '', element.tag)
self.metadata[tag] = etree_to_dict(element)
class OAIFetcher(Fetcher):
'''Fetcher for oai'''
def __init__(self, url_harvest, extra_data, **kwargs):
super(OAIFetcher, self).__init__(url_harvest, extra_data, **kwargs)
# TODO: check extra_data?
self.oai_client = Sickle(self.url)
self._metadataPrefix = self.get_metadataPrefix(extra_data)
# ensure not cached in module?
self.oai_client.class_mapping['ListRecords'] = SickleDCRecord
self.oai_client.class_mapping['GetRecord'] = SickleDCRecord
if extra_data: # extra data is set spec
if 'set' in extra_data:
params = parse_qs(extra_data)
self._set = params['set'][0]
else:
self._set = extra_data
# if metadataPrefix=didl, use didlRecord for parsing
if self._metadataPrefix.lower() == 'didl':
self.oai_client.class_mapping['ListRecords'] = SickleDIDLRecord
self.oai_client.class_mapping['GetRecord'] = SickleDIDLRecord
elif self._metadataPrefix.lower() == 'marcxml':
self.oai_client.class_mapping['ListRecords'] = SickleMARCRecord
self.oai_client.class_mapping['GetRecord'] = SickleMARCRecord
self.records = self.oai_client.ListRecords(
metadataPrefix=self._metadataPrefix,
set=self._set,
ignore_deleted=True)
else:
self.records = self.oai_client.ListRecords(
metadataPrefix=self._metadataPrefix, ignore_deleted=True)
def get_metadataPrefix(self, extra_data):
'''Set the metadata format for the feed.
If it is in extra_data, use that.
Else, see if oai_qdc is supported, if so use that.
Else, revert to oai_dc
'''
if extra_data:
if 'metadataPrefix' in extra_data:
params = parse_qs(extra_data)
return params['metadataPrefix'][0]
mdformats = [x for x in self.oai_client.ListMetadataFormats()]
for f in mdformats:
if f.metadataPrefix == 'oai_qdc':
return 'oai_qdc'
return 'oai_dc'
def next(self):
'''return a record iterator? then outside layer is a controller,
same for all. Records are dicts that include:
any metadata
campus list
repo list
collection name
'''
while True:
sickle_rec = self.records.next()
if not sickle_rec.deleted:
break # good record to harvest, don't do deleted
# update process looks for deletions
rec = sickle_rec.metadata
rec['datestamp'] = sickle_rec.header.datestamp
rec['id'] = sickle_rec.header.identifier
return rec
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,170 | ucldc/harvester | refs/heads/master | /scripts/sync_couch_collection_to_solr.py | #! /bin/env python
# -*- coding: utf-8 -*-
import datetime
import os
from solr import Solr
from harvester.post_processing.couchdb_runner import CouchDBCollectionFilter
from harvester.couchdb_init import get_couchdb
from harvester.solr_updater import map_couch_to_solr_doc, push_doc_to_solr
from harvester.solr_updater import has_required_fields, fill_in_title
# This works from inside an environment with default URLs for couch & solr
URL_SOLR = os.environ.get('URL_SOLR', None)
def main(collection_key):
v = CouchDBCollectionFilter(
couchdb_obj=get_couchdb(), collection_key=collection_key)
solr_db = Solr(URL_SOLR)
results = []
for r in v:
dt_start = dt_end = datetime.datetime.now()
try:
doc = fill_in_title(r.doc)
has_required_fields(r.doc)
except KeyError, e:
print(e.message)
continue
solr_doc = map_couch_to_solr_doc(r.doc)
results.append(solr_doc)
solr_doc = push_doc_to_solr(solr_doc, solr_db=solr_db)
dt_end = datetime.datetime.now()
solr_db.commit()
return results
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
description='Sync collection to production couchdb')
parser.add_argument(
'collection_key', type=str, help='Numeric ID for collection')
args = parser.parse_args(sys.argv[1:])
print "DELETE COLLECTION TO CAPTURE ANY REMOVALS"
results = main(args.collection_key)
print 'Updated {} docs'.format(len(results))
# arg will be just id
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,171 | ucldc/harvester | refs/heads/master | /harvester/fetcher/emuseum_fetcher.py | # -*- coding: utf-8 -*-
import datetime
import time
import requests
import re
from xml.etree import ElementTree as ET
from collections import defaultdict
from .fetcher import Fetcher
class eMuseum_Fetcher(Fetcher):
'''Paginates through eMuseum API XML search results until
no more records are found'''
def __init__(self, url_harvest, extra_data, **kwargs):
self.url_base = url_harvest
self.page_current = 1
self.doc_current = 1
self.docs_fetched = 0
@property
def url_current(self):
quote_param = '/search/*/objects/xml?filter=approved%3Atrue&page='
return '{0}{1}{2}'.format(self.url_base, quote_param,
self.page_current)
def _dochits_to_objset(self, docHits):
'''Returns list of objects. Use 'name' attribute
as JSON field name and 'value' as main 'text' field in CouchDB; save any other attributes as nested dict 'attrib'
'''
objset = []
# iterate through docHits
for d in docHits:
fieldnumb = 1
textnumb = 1
obj = {}
attributes = {}
obj_mdata = defaultdict(list)
for mdata in d:
'''assign CouchDB fieldname from what's available, else use iterative unknown'''
if 'name' in mdata.attrib:
md_fieldname = mdata.attrib['name']
elif 'label' in mdata.attrib:
md_fieldname = mdata.attrib['label']
else:
md_fieldname = ''.join(('unknown', str(fieldnumb)))
fieldnumb += 1
for value in mdata:
if len(mdata) > 1:
txt_fieldname = ''.join(('text', str(textnumb)))
obj_mdata[txt_fieldname] = value.text
textnumb += 1
else:
obj_mdata['text'] = value.text
for att in mdata.attrib:
if 'name' not in att:
att_dict = {att: mdata.attrib[att]}
attributes.update(att_dict)
if mdata.attrib:
obj_mdata['attrib'] = dict(attributes)
attributes.clear()
obj[md_fieldname] = dict(obj_mdata)
obj_mdata.clear()
objset.append(obj)
return objset
def next(self):
'''get next objset, use etree to pythonize. Stop
iterating when no more <object>s are found'''
dt_start = dt_end = datetime.datetime.now()
xml = requests.get(self.url_current).text
dt_end = datetime.datetime.now()
time.sleep((dt_end-dt_start).total_seconds())
tree = ET.fromstring(xml.encode('utf-8'))
hits = tree.findall("objects/object")
self.docs_total = len(hits)
if self.docs_total == 0:
raise StopIteration
self.page_current += 1
return self._dochits_to_objset(hits)
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,172 | ucldc/harvester | refs/heads/master | /scripts/queue_sync_couchdb_collection.py | #! /bin/env python
# -*- coding: utf-8 -*-
import sys
import logbook
from rq import Queue
from redis import Redis
from harvester.config import parse_env
JOB_TIMEOUT = 28800 # 8 hrs
URL_REMOTE_COUCHDB = 'https://harvest-stg.cdlib.org/couchdb'
def queue_couch_sync(redis_host,
redis_port,
redis_password,
redis_timeout,
rq_queue,
url_api_collection,
url_remote_couchdb=None,
timeout=JOB_TIMEOUT):
'''Queue job onto RQ queue'''
rQ = Queue(
rq_queue,
connection=Redis(
host=redis_host,
port=redis_port,
password=redis_password,
socket_connect_timeout=redis_timeout))
job = rQ.enqueue_call(
func='harvester.couchdb_sync_db_by_collection.main',
kwargs=dict(
url_remote_couchdb=url_remote_couchdb,
url_api_collection=url_api_collection),
timeout=timeout)
return job
def main(url_api_collections,
url_remote_couchdb=URL_REMOTE_COUCHDB,
log_handler=None):
'''This should only be run in production env!
Queue is hard coded to normal-production so that it will be run there
'''
config = parse_env(None)
if not log_handler:
log_handler = logbook.StderrHandler(level='DEBUG')
log_handler.push_application()
for url_api_collection in [x for x in url_api_collections.split(';')]:
queue_couch_sync(
config['redis_host'],
config['redis_port'],
config['redis_password'],
config['redis_connect_timeout'],
rq_queue='normal-production',
url_api_collection=url_api_collection,
url_remote_couchdb=url_remote_couchdb, )
log_handler.pop_application()
def def_args():
import argparse
parser = argparse.ArgumentParser(
description='Sync collection to production couchdb')
parser.add_argument('user_email', type=str, help='user email')
parser.add_argument('rq_queue', type=str, help='RQ queue to put job in')
parser.add_argument(
'url_api_collection',
type=str,
help='URL for the collection Django tastypie api resource')
return parser
if __name__ == '__main__':
parser = def_args()
args = parser.parse_args(sys.argv[1:])
if not args.url_api_collection:
parser.print_help()
sys.exit(27)
main(args.url_api_collection, URL_REMOTE_COUCHDB)
"""
Copyright © 2016, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the University of California nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
| {"/test/test_oai_fetcher.py": ["/test/utils.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/__init__.py"], "/harvester/run_ingest.py": ["/harvester/config.py", "/harvester/collection_registry_client.py", "/harvester/sns_message.py", "/harvester/image_harvest.py", "/harvester/cleanup_dir.py"], "/test/test_preservica_api_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_collection_registry_client.py": ["/harvester/collection_registry_client.py", "/test/utils.py"], "/harvester/post_processing/batch_update_couchdb_by_collection.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/scripts/queue_deep_harvest_single_object_jobs.py": ["/harvester/config.py", "/harvester/collection_registry_client.py"], "/scripts/queue_image_harvest_by_doc.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/queue_image_harvest_for_doc_ids.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py"], "/scripts/image_harvest_opl_preservica.py": ["/harvester/image_harvest.py"], "/test/test_nuxeo_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/test/test_harvestcontroller.py": ["/test/utils.py", "/harvester/fetcher/__init__.py", "/harvester/collection_registry_client.py", "/harvester/fetcher/controller.py"], "/harvester/rq_worker_sns_msgs.py": ["/harvester/sns_message.py"], "/scripts/queue_delete_couchdb_collection.py": ["/harvester/config.py"], "/scripts/remove_field_list_from_collection_docs.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/image_harvest.py", "/harvester/couchdb_init.py"], "/test/test_cmisatomfeed_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_marc_fetcher.py": ["/harvester/collection_registry_client.py", "/test/utils.py", "/harvester/fetcher/__init__.py"], "/scripts/redis_delete_harvested_images_script.py": ["/harvester/post_processing/couchdb_runner.py"], "/scripts/queue_document_reenrich.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/harvester/fetcher/controller.py": ["/harvester/collection_registry_client.py", "/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py"], "/test/test_flickr_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_deep_harvest_single_object_with_components.py": ["/harvester/config.py"], "/scripts/enq_date_fix.py": ["/harvester/post_processing/couchdb_runner.py", "/harvester/post_processing/run_transform_on_couchdb_docs.py"], "/test/test_youtube_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_enrich_existing_couch_doc.py": ["/test/utils.py", "/harvester/config.py", "/harvester/post_processing/enrich_existing_couch_doc.py"], "/scripts/queue_harvest.py": ["/harvester/config.py"], "/harvester/post_processing/enrich_existing_couch_doc.py": ["/harvester/couchdb_init.py"], "/harvester/image_harvest.py": ["/harvester/couchdb_init.py", "/harvester/config.py", "/harvester/couchdb_pager.py", "/harvester/cleanup_dir.py", "/harvester/sns_message.py"], "/test/test_ucsf_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/sync_couch_to_solr.py": ["/harvester/couchdb_pager.py", "/harvester/couchdb_init.py"], "/test/test_xml_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/test/test_dedupe_sourceresource.py": ["/test/utils.py"], "/test/test_integration_tests.py": ["/test/utils.py", "/harvester/fetcher/__init__.py"], "/harvester/couchdb_sync_db_by_collection.py": ["/harvester/collection_registry_client.py", "/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/harvester/fetcher/__init__.py": ["/harvester/fetcher/oai_fetcher.py", "/harvester/fetcher/solr_fetcher.py", "/harvester/fetcher/marc_fetcher.py", "/harvester/fetcher/nuxeo_fetcher.py", "/harvester/fetcher/oac_fetcher.py", "/harvester/fetcher/ucsf_xml_fetcher.py", "/harvester/fetcher/cmis_atom_feed_fetcher.py", "/harvester/fetcher/flickr_fetcher.py", "/harvester/fetcher/youtube_fetcher.py", "/harvester/fetcher/ucd_json_fetcher.py", "/harvester/fetcher/emuseum_fetcher.py", "/harvester/fetcher/ia_fetcher.py", "/harvester/fetcher/preservica_api_fetcher.py", "/harvester/fetcher/controller.py"], "/test/test_emuseum_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/harvester/solr_updater.py": ["/harvester/couchdb_init.py", "/harvester/post_processing/couchdb_runner.py", "/harvester/sns_message.py"], "/test/test_ia_fetcher.py": ["/harvester/fetcher/__init__.py", "/test/utils.py"], "/scripts/queue_sync_couchdb_collection.py": ["/harvester/config.py"]} |
54,176 | liron-li/LivaSpider | refs/heads/master | /core/models.py | """
sqlAlchemy模型
生成迁移文件:alembic revision --autogenerate -m "desc"
执行迁移:alembic upgrade head
"""
from sqlalchemy import create_engine, Column, Integer, String, Enum, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Model = declarative_base()
class UrlPool(Model):
""" url地址池表模型 """
__tablename__ = "url_pool"
id = Column(Integer(), primary_key=True, autoincrement=True)
url = Column(String(500))
is_crawl = Column(Enum('yes', 'no'), default='no', nullable=False)
class Baike(Model):
""" 百度百科表模型 """
__tablename__ = "baike"
id = Column(Integer(), primary_key=True, autoincrement=True)
title = Column(String(100))
description = Column(String(2000))
# 初始化数据库连接:
engine = create_engine('mysql://root:123456@192.168.33.110:3306/spider?charset=utf8')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
| {"/core/crawling.py": ["/core/models.py"]} |
54,177 | liron-li/LivaSpider | refs/heads/master | /example_crawl_baike.py | from core import models, crawling
from bs4 import BeautifulSoup
class Spider(crawling.SpiderBase):
@staticmethod
def parse_item(response):
soup = BeautifulSoup(response.text, 'html.parser')
db = models.DBSession()
if soup.h1 is not None and soup.find('div', class_="lemma-summary") is not None:
title = soup.h1.text.strip()
description = soup.find('div', class_="lemma-summary").text.strip()
db.execute('SET NAMES utf8;')
db.execute('SET CHARACTER SET utf8;')
db.execute('SET character_set_connection=utf8;')
print('title : %s' % title)
print("description : %s" % description)
new_row = models.Baike(
title=title.encode(response.encoding),
description=description.encode(response.encoding)
)
db.add(new_row)
db.commit()
if __name__ == '__main__':
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'
}
cookies = {
}
config = {
# 请求头
"headers": headers,
# cookies
"cookies": cookies,
# 根url
"base_url": "http://baike.baidu.com/",
# 起始url
"start_url": "http://baike.baidu.com/item/%E9%93%81%E6%A0%91/110475",
# 抓取的网站正则
"url_rule": r'^http://baike.baidu.com/item/',
}
spider = Spider(config)
spider.crawl(Spider.parse_item)
| {"/core/crawling.py": ["/core/models.py"]} |
54,178 | liron-li/LivaSpider | refs/heads/master | /core/crawling.py | from .models import DBSession, UrlPool
from bs4 import BeautifulSoup
import asyncio
import requests
import urllib
import re
class SpiderBase(object):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'
}
cookies = None
def __init__(self, config):
self.config = config
self.db = DBSession()
def _push_url(self, url):
""" url存入数据库 """
res = self.db.query(UrlPool).filter(UrlPool.url == url).one_or_none()
if res is None:
new_url = UrlPool(url=url, is_crawl='no')
self.db.add(new_url)
self.db.commit()
def _get_un_crawl_url(self):
""" 获取一个未爬取的url """
res = self.db.query(UrlPool).filter(UrlPool.is_crawl == 'no').all()
if res is not []:
return res[0].url
return None
def get(self, url):
""" http get """
response = requests.get(
url,
headers=self.config.get('headers', self.headers),
cookies=self.config.get('cookies', self.cookies)
)
return response
async def async_task(self, url, parse_item):
""" 异步任务 """
self._extract_urls(url)
_loop = asyncio.get_event_loop()
future = _loop.run_in_executor(None, self.get, url)
print('crawl: %s' % url)
response = await future
parse_item(response)
def _extract_urls(self, url):
""" 提取url """
response = self.get(url)
url_rule = self.config.get('url_rule')
soup = BeautifulSoup(response.text, 'html.parser')
for link in soup.find_all('a'):
url = urllib.parse.urljoin(self.config.get('base_url'), link.get('href'))
if re.match(url_rule, url):
self._push_url(url)
def _mark_crawled(self, url):
""" 标记url为已爬取 """
self.db.query(UrlPool).filter(UrlPool.url == url).update({UrlPool.is_crawl: "yes"})
self.db.commit()
def crawl(self, parse_item, start_url=None):
""" 爬取 """
url = self.config.get('start_url', start_url)
if url is None:
url = self.config.get('start_url')
# 提取url存入数据库
self._extract_urls(url)
start = True
loop = asyncio.get_event_loop()
while start:
url = self._get_un_crawl_url()
if url is None:
start = False
self._mark_crawled(url)
loop.run_until_complete(self.async_task(url, parse_item))
loop.close()
| {"/core/crawling.py": ["/core/models.py"]} |
54,179 | liron-li/LivaSpider | refs/heads/master | /alembic/versions/404fa70bcf2c_create_tables.py | """create tables
Revision ID: 404fa70bcf2c
Revises:
Create Date: 2017-05-05 14:30:11.289634
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '404fa70bcf2c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('baike',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(length=2000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('url_pool',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(length=500), nullable=True),
sa.Column('is_crawl', sa.Enum('yes', 'no'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('url_pool')
op.drop_table('baike')
# ### end Alembic commands ###
| {"/core/crawling.py": ["/core/models.py"]} |
54,197 | TzuHuanTai/RaspberryPi_Weather | refs/heads/master | /tsl2561.py | # Code sourced from:
# https://github.com/seanbechhofer/raspberrypi/blob/master/python/TSL2561.py
# https://github.com/adafruit/Adafruit_CircuitPython_TSL2561/blob/0c205ed557cf03bad6ab73f10a4b132b40d47bd6/adafruit_tsl2561.py#L127
import time
import smbus
_DEFAULT_ADDRESS = 0x39
_COMMAND_BIT = 0x80
_CONTROL_POWERON = 0x03
_CONTROL_POWEROFF = 0x00
_REGISTER_CONTROL = 0x00
_REGISTER_TIMING = 0x01
_REGISTER_CHAN0_LOW = 0x0C
_REGISTER_CHAN1_LOW = 0x0E
_GAIN_SCALE = (16, 1)
_TIME_SCALE = (0.034, 0.252, 1)
_CLIP_THRESHOLD = (4900, 37000, 65000)
class TSL2561:
def __init__(self, bus, address = _DEFAULT_ADDRESS, pause = 1):
self.bus = bus
self.address = address
self.pause = pause
self.gain = 0
self.integration_time = 2
self.enable() # power on
self.set_gain() # 1x gain preselected
self.set_integration_time() # 402ms integration
def enable(self):
self.bus.write_byte_data(self.address, _COMMAND_BIT | _REGISTER_CONTROL, _CONTROL_POWERON)
def disable(self):
self.bus.write_byte_data(self.address, _COMMAND_BIT | _REGISTER_CONTROL, _CONTROL_POWEROFF)
def set_gain(self, gain = 0):
# 0x00(00) gain = 1x
# 0x10(00) gain = 16x
if (gain != self.gain):
self.bus.write_byte_data(self.address, _COMMAND_BIT | _REGISTER_TIMING, gain<<4 | self.integration_time)
self.gain = gain
time.sleep(self.pause)
def set_integration_time(self, integration_time = 2):
# 0x00(00) integration time = 13.7ms
# 0x01(01) integration time = 101ms
# 0x02(02) integration time = 402ms
if (integration_time != self.integration_time):
self.bus.write_byte_data(self.address, _COMMAND_BIT | _REGISTER_TIMING, self.gain<<4 | integration_time)
self.integration_time = integration_time
time.sleep(self.pause)
def read_broadband(self):
low, high = self.bus.read_i2c_block_data(self.address, _COMMAND_BIT | _REGISTER_CHAN0_LOW, 0x02)
return high << 8 | low
def read_infrared(self):
low, high = self.bus.read_i2c_block_data(self.address, _COMMAND_BIT | _REGISTER_CHAN1_LOW, 0x02)
return high << 8 | low
def read_lux(self):
ch0, ch1 = self.read_broadband(), self.read_infrared()
if ch0 == 0:
return None
if ch0 >= _CLIP_THRESHOLD[self.integration_time]:
return None
if ch1 >= _CLIP_THRESHOLD[self.integration_time]:
return None
ratio = ch1 / ch0
if ratio >= 0 and ratio <= 0.50:
lux = 0.0304 * ch0 - 0.062 * ch0 * ratio**1.4
elif ratio <= 0.61:
lux = 0.0224 * ch0 - 0.031 * ch1
elif ratio <= 0.80:
lux = 0.0128 * ch0 - 0.0153 * ch1
elif ratio <= 1.30:
lux = 0.00146 * ch0 - 0.00112 * ch1
else:
lux = 0.
# Pretty sure the floating point math formula on pg. 23 of datasheet
# is based on 16x gain and 402ms integration time. Need to scale
# result for other settings.
# Scale for gain.
lux *= _GAIN_SCALE[self.gain]
# Scale for integration time.
lux /= _TIME_SCALE[self.integration_time]
return lux
if __name__ == "__main__":
bus = smbus.SMBus(1)
tsl=TSL2561(bus)
tsl.set_gain(0)
tsl.set_integration_time(0)
test = tsl.read_lux()
print(test)
'''
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
tsl2561Address = 0x39 # TSL2561 address, (57)
# Select control register, 0x00(00) with command register, 0x80(128)
# 0x03(03) Power ON mode
bus.write_byte_data(0x39, 0x00 | 0x80, 0x03)
# TSL2561 address, 0x39(57)
# Select timing register, 0x01(01) with command register, 0x80(128)
# 0x02(02) Nominal integration time = 402ms
bus.write_byte_data(0x39, 0x01 | 0x80, 0x02)
time.sleep(0.5)
# Read data back from 0x0C(12) with command register, 0x80(128), 2 bytes
# ch0 LSB, ch0 MSB
data = bus.read_i2c_block_data(0x39, 0x0C | 0x80, 0x02)
# Read data back from 0x0E(14) with command register, 0x80(128), 2 bytes
# ch1 LSB, ch1 MSB
data1 = bus.read_i2c_block_data(0x39, 0x0E | 0x80, 0x02)
# Convert the data
print(data[0],data[1])
ch0 = data[1] * 256 + data[0]
ch1 = data1[1] * 256 + data1[0]
# Output data to screen
print ('Full Spectrum(IR + Visible) :{:.1f} lux'.format(ch0))
print ('Infrared Value :{:.1f} lux'.format(ch1))
print ('Visible Value :{:.1f} lux'.format(ch0 - ch1))
''' | {"/realtime_detector.py": ["/tsl2561.py", "/lcd1602.py"]} |
54,198 | TzuHuanTai/RaspberryPi_Weather | refs/heads/master | /lcd1602.py | import smbus
from time import *
# LCD Address
ADDRESS = 0x3F
# commands
LCD_CLEARDISPLAY = 0b00000001
LCD_RETURNHOME = 0b00000010
LCD_ENTRYMODESET = 0b00000100
LCD_DISPLAYCONTROL = 0b00001000
LCD_CURSORSHIFT = 0b00010000
LCD_FUNCTIONSET = 0b00100000
LCD_SETCGRAMADDR = 0b01000000
LCD_SETDDRAMADDR = 0b10000000
# flags for display entry mode
LCD_ENTRYRIGHT = 0b00000000
LCD_ENTRYLEFT = 0b00000010
LCD_ENTRYSHIFTINCREMENT = 0b00000001
LCD_ENTRYSHIFTDECREMENT = 0b00000000
# flags for display on/off control
LCD_DISPLAYON = 0b00000100
LCD_DISPLAYOFF = 0b00000000
LCD_CURSORON = 0b00000010
LCD_CURSOROFF = 0b00000000
LCD_BLINKON = 0b00000001
LCD_BLINKOFF = 0b00000000
# flags for display/cursor shift
LCD_DISPLAYMOVE = 0b00001000
LCD_CURSORMOVE = 0b00000000
LCD_MOVERIGHT = 0b00000100
LCD_MOVELEFT = 0b00000000
# flags for function set
LCD_8BITMODE = 0b00010000
LCD_4BITMODE = 0b00000000
LCD_2LINE = 0b00001000
LCD_1LINE = 0b00000000
LCD_5x10DOTS = 0b00000100
LCD_5x8DOTS = 0b00000000
# flags for backlight control
LCD_BACKLIGHT = 0b00001000
LCD_NOBACKLIGHT = 0b00000000
En = 0b00000100 # Enable bit
Rw = 0b00000010 # Read/Write bit
Rs = 0b00000001 # Register select bit
class LCD:
#initializes objects and lcd
def __init__(self, bus, address = ADDRESS):
self.bus = bus
self.address = address
self.backlight = LCD_BACKLIGHT
self.lcd_write(0b10) # initial
self.lcd_write(LCD_FUNCTIONSET | LCD_2LINE | LCD_5x8DOTS | LCD_4BITMODE)
self.lcd_write(LCD_DISPLAYCONTROL | LCD_DISPLAYON)
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_ENTRYMODESET | LCD_ENTRYLEFT)
sleep(0.2)
def write_cmd(self, cmd):
self.bus.write_byte(self.address, cmd)
sleep(0.0001)
# clocks EN to latch command
def lcd_strobe(self, data):
self.write_cmd(data | En | self.backlight)
sleep(.0005)
self.write_cmd(((data & ~En) | self.backlight))
sleep(.0001)
def lcd_write_four_bits(self, data):
# send ~En first with write_cmd() to ensure initial signal
self.write_cmd(data | self.backlight)
self.lcd_strobe(data)
# write a command to lcd
def lcd_write(self, cmd, mode=0):
self.lcd_write_four_bits(mode | (cmd & 0xF0))
self.lcd_write_four_bits(mode | ((cmd << 4) & 0xF0))
# put string function
def lcd_display_string(self, string):
for char in string:
self.lcd_write(ord(char), Rs)
# clear lcd and set to home
def lcd_clear(self):
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_RETURNHOME)
# define backlight on/off (lcd.backlight(1); off= lcd.backlight(0)
def set_backlight(self, state): # for state, 1 = on, 0 = off
if state == 1:
self.backlight = LCD_BACKLIGHT
elif state == 0:
self.backlight = LCD_NOBACKLIGHT
self.write_cmd(self.backlight)
def set_cursor(self, row, col):
'''
line 1 addr: 0x00~0x0f or 0b0000000~0b0001111
line 2 addr: 0x40~0x4f or 0b1000000~0b1001111
'''
self.lcd_write((LCD_SETDDRAMADDR | row << 6) | col)
sleep(0.005)
if __name__ == '__main__':
bus = smbus.SMBus(1)
mylcd = lcd(bus)
mylcd.set_cursor(0, 5)
mylcd.lcd_display_string("test")
sleep(2) # 2 sec delay
mylcd.set_backlight(0)
| {"/realtime_detector.py": ["/tsl2561.py", "/lcd1602.py"]} |
54,199 | TzuHuanTai/RaspberryPi_Weather | refs/heads/master | /climate.py | class Climate():
def __init__(self):
self.ObsTime = None
self.StnPres = None
self.SeaPres = None
self.Temperature = None
self.Td = None
self.RH = None
self.WS = None
self.WD = None
self.WSGust = None
self.WDGust = None
self.Precp = None
self.PrecpHour = None
self.SunShine = None
self.GlobalRad = None
self.Visb = None
self.Lux = None
| {"/realtime_detector.py": ["/tsl2561.py", "/lcd1602.py"]} |
54,200 | TzuHuanTai/RaspberryPi_Weather | refs/heads/master | /realtime_detector.py | import requests # call apiimport
import json
import Adafruit_DHT as dht # sensor
import datetime as dt
import time
import smbus
from tsl2561 import TSL2561
from lcd1602 import LCD
# Declare
class weather:
def __init__(self):
self.ObsTime = None
self.Temperature = None
self.Rh = None
self.Lux = None
# ===config====
# Station Identity
StationId = 0
# APIUrl
APIUrl = "http://127.0.0.1:6080/api/climate/{0}".format(StationId)
print(APIUrl)
# token 已設定raspberry專用權限,但要如何保護此token曝光要再想想!
token = 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJHZW5lcmFsIiwiaXNzIjoi55CG5p-l5b635LqC5pCe5pyJ6ZmQ5YWs5Y-4IiwiaWQiOiJwaSIsInJvbGVJZCI6IjUiLCJuYmYiOjE1Mjg2ODc3MDEsImV4cCI6MTc0OTYxMjUwMSwiaWF0IjoxNTI4Njg3NzAxfQ.MJWYrbPgG361F4nXwjFdTRdPdGcQ3ff6A6_4TcF24HY'
# ====initial====
bus = smbus.SMBus(1)
tsl = TSL2561(bus)
lcd = LCD(bus)
readLux = 100
lcd.set_backlight(1)
lcd.lcd_display_string("Richard Corp.")
for i in range(3):
lcd.set_cursor(1, i*4)
lcd.lcd_display_string("go! ")
time.sleep(0.5)
# lcd.set_backlight(0)
while True:
try:
now = dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
## ====Read sensor signal save as params====
# ====DHT22溫濕度計====
# 接觸不良會讀出None, 讀取pin 4訊號源
h, t = dht.read_retry(dht.DHT22, 16)
# ====TSL25611光度計====
# Set gain增量係數放大讀取解析度,0=1x(適合光亮), 1=16x(適合昏暗)
# 若1x測到10,設定16x會測到160絕對值,但16x要是超過65535就None了
if readLux < 0:
tsl.set_gain(0)
elif readLux < 1024:
tsl.set_gain(1)
else:
tsl.set_gain(0)
# Set integration time(昏暗適合高量測時間) (0=13.7ms, 1=101ms, 2=402ms)
# 設越高累加到的數值也越高,超過65535就None了
if readLux < 0:
tsl.set_integration_time(0)
elif readLux < 1024:
tsl.set_integration_time(2)
elif readLux < 4096:
tsl.set_integration_time(1)
else:
tsl.set_integration_time(0)
readLux = tsl.read_lux()
if readLux == None:
tsl.set_gain(0)
tsl.set_integration_time(0)
readLux = tsl.read_lux()
if readLux == None:
readLux = -1
lux = readLux
## Write into LCD
lcd.set_cursor(0, 0)
lcd.lcd_display_string('Temp:{0:.1f}C Lux: '.format(t))
lcd.set_cursor(1, 0)
lcd.lcd_display_string('Humi:{0:.1f}% {1:5.0f}'.format(h, lux))
## ====Sending http put request and save response as object====
header = {'Authorization':token,'Content-Type':'application/json'}
detected = weather()
detected.ObsTime = dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]+'+08:00'
detected.Temperature = float('{0:.1f}'.format(t))
detected.Rh = float('{0:.1f}'.format(h))
detected.Lux = float('{0:5.0f}'.format(lux))
body = json.dumps(detected.__dict__)
r = requests.post(url = APIUrl, headers = header, data = body)
except:
lcd.set_cursor(0, 15)
lcd.lcd_display_string('*')
time.sleep(3)
| {"/realtime_detector.py": ["/tsl2561.py", "/lcd1602.py"]} |
54,201 | matumba78/nth_fibonacci | refs/heads/master | /fibon/forms.py | from django import forms
class NumberForm(forms.Form):
number = forms.IntegerField(label='Number') | {"/fibon/admin.py": ["/fibon/models.py"], "/fibon/urls.py": ["/fibon/views.py"]} |
54,202 | matumba78/nth_fibonacci | refs/heads/master | /fibon/views.py | from django.shortcuts import render
from django.views.generic import View
from fibon.models import ResultFibonacci
from fibon.forms import NumberForm
import math
from django.http import HttpResponse
import time,json
# Create your views here.
class FibonacciView(View):
def get_fibo_number(self,num):
''' this is itreative approach whose time complexity is O(n)'''
if num < 2:
return 1
else:
num_seq_1 = 1
num_seq_2 = 1
for i in range(2, num):
temp = num_seq_1 + num_seq_2
num_seq_1 = num_seq_2
num_seq_2 = temp
return num_seq_2
def get_fibo_usinf_formula(self,num):
''' this approach is based on the mathematical formula whose time complexity is O(1)'''
result = (1 + math.sqrt(5))/2
return round(pow(result,num)/math.sqrt(5))
def get_fibo_by_dp(self,num):
''' this approach is based on dynamic programming whose time complexity is O(n)'''
a=0
b=1
if num==0:
return a
for i in range(2,num+1):
c = a + b
a = b
b = c
return b
def get(self,request):
return render(request, 'home.html')
def post(self,request):
form = NumberForm(request.POST)
if form.is_valid():
number = form.cleaned_data['number']
number = int(number)
if number:
if not ResultFibonacci.objects.filter(number=number):
start_time = time.clock()
print start_time
result = self.get_fibo_by_dp(number)
result = str(result)
time_taken = time.clock() - start_time
fibonacci_object = ResultFibonacci(number=number,result=result,time_elapsed=time_taken)
fibonacci_object.save()
result = {
"number": number,
"result": result,
"time_elapsed": time_taken
}
#return HttpResponse(json.dumps(result), content_type='application/json', status=200)
return render(request,'result.html',{'result':result})
else:
fibonacci_object = ResultFibonacci.objects.get(number=number)
result = {
"number":fibonacci_object.number,
"result":fibonacci_object.result,
"time_elapsed":fibonacci_object.time_elapsed
}
#return HttpResponse(json.dumps(result),content_type='application/json',status=200)
return render(request, 'result.html', {'result': result})
else:
form = NumberForm()
return render(request, 'home.html')
| {"/fibon/admin.py": ["/fibon/models.py"], "/fibon/urls.py": ["/fibon/views.py"]} |
54,203 | matumba78/nth_fibonacci | refs/heads/master | /fibon/models.py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class ResultFibonacci(models.Model):
'''result is stored in char field to facilitates storage of large number as sqlite
integer cannot store large integers which results in overflow'''
number = models.IntegerField(unique=True)
result = models.CharField(max_length=255)
time_elapsed = models.CharField(max_length=255)
| {"/fibon/admin.py": ["/fibon/models.py"], "/fibon/urls.py": ["/fibon/views.py"]} |
54,204 | matumba78/nth_fibonacci | refs/heads/master | /fibon/admin.py | from django.contrib import admin
# Register your models here.
from .models import ResultFibonacci
admin.site.register(ResultFibonacci) | {"/fibon/admin.py": ["/fibon/models.py"], "/fibon/urls.py": ["/fibon/views.py"]} |
54,205 | matumba78/nth_fibonacci | refs/heads/master | /fibon/urls.py | from django.conf.urls import include, url
from django.contrib import admin
from django.views.decorators.csrf import csrf_exempt
from fibon.views import FibonacciView
urlpatterns=[
url(r'^home/',csrf_exempt(FibonacciView.as_view()),name='home-view'),
url(r'^fibo-number/',csrf_exempt(FibonacciView.as_view()),name='result-view'),
] | {"/fibon/admin.py": ["/fibon/models.py"], "/fibon/urls.py": ["/fibon/views.py"]} |
54,206 | matumba78/nth_fibonacci | refs/heads/master | /fibon/apps.py | from django.apps import AppConfig
class FibonConfig(AppConfig):
name = 'fibon'
| {"/fibon/admin.py": ["/fibon/models.py"], "/fibon/urls.py": ["/fibon/views.py"]} |
54,209 | JaishreeJanu/final_project | refs/heads/master | /demo.py | import app
app.func()
| {"/demo.py": ["/app.py"]} |
54,210 | JaishreeJanu/final_project | refs/heads/master | /app.py | import os
print('Hey ! I am Jaishree')
print('This is second statement')
def func():
print("Hello")
def calc(a,b):
return a+b
if __name__ == "__main__":
print("Hey!!!")
print("Working with remote repo")
print("This statement is only in testing branch")
print("testing pull request")
| {"/demo.py": ["/app.py"]} |
54,211 | hi-noikiy/sinotrans | refs/heads/master | /zakkabag/forms.py | from django import forms
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class DashboardForm(forms.Form):
year = forms.IntegerField(
label=_('year'),
initial=timezone.now().year,
min_value=2000,
required=False)
def clean_year(self):
year = self.cleaned_data['year']
if year:
raise forms.ValidationError(_('This field is required.'))
return year
def __init__(self, *args, **kwargs):
super(DashboardForm, self).__init__(*args, **kwargs)
self.fields['year'].widget.attrs['class'] ="form-control"
self.fields['year'].widget.attrs['min'] ="2000"
self.fields['year'].widget.attrs['max'] = timezone.now().year + 1 | {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,212 | hi-noikiy/sinotrans | refs/heads/master | /picking/forms.py | from django import forms
from django.forms.models import modelformset_factory
from django.utils.translation import ugettext_lazy as _
from .models import PickingBill
class PickingBillScanForm(forms.Form):
number = forms.CharField(
label=_('pickingbill number'),
widget=forms.Textarea(),
required=True)
picking_staff = forms.CharField(
label=_('picking staff'),
required=True
)
class WaybillScanForm(forms.Form):
number = forms.CharField(
label=_('waybill number'),
widget=forms.Textarea(),
required=True)
label_staff = forms.CharField(
label=_('label staff'),
required=True
)
distribution_staff = forms.CharField(
label=_('distribution staff'),
required=True
) | {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,213 | hi-noikiy/sinotrans | refs/heads/master | /picking/urls.py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from .views import (
PickingbillAssignView,
WaybillCompleteView,
LineChartJSONView,
PickingbillStatView,
)
urlpatterns = [
url(r'^pickingbillassign$', PickingbillAssignView.as_view(), name='pickingbill_assign'),
url(r'^waybillcomplete$', WaybillCompleteView.as_view(), name='waybill_assign'),
url(r'^pickingbillstat$', PickingbillStatView.as_view(), name='pickingbill_stat'),
url(r'^linechartjason1$', LineChartJSONView.as_view(), name='line_chart_json1'),
] | {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,214 | hi-noikiy/sinotrans | refs/heads/master | /zakkabag/urls.py | from django.conf import settings
from django.conf.urls.static import static
from inspection.admin import my_admin_site
if settings.USE_EXPLICIT_LANG_URL:
from django.conf.urls.i18n import i18n_patterns as url_patterns
else:
from django.conf.urls import patterns as url_patterns
from django.conf.urls import patterns, include, url
from django.contrib import admin
from newsletter.views import home, contact
from zakkabag.views import about, sitemap, set_language, DashboardViewSINO, test
# from .views import CKEditorImageUpload
admin.autodiscover()
def i18n_javascript(request):
return admin.site.i18n_javascript(request)
urlpatterns = [
url(r'^$', home, name='home'),
url(r'^home$', home, name='home'),
url(r'^test$', test, name='test'),
url(r'^dashboard/$', DashboardViewSINO, name='dashboard'),
url(r'^contact/$', contact, name='contact'),
url(r'^about/$', about, name='about'),
url(r'^about/sitemap$', sitemap, name='sitemap'),
url(r'^admin/jsi18n', i18n_javascript), # added for AdminDateTimeWidget
url(r'^admin/', include(admin.site.urls)),
url(r'^sino/', include(my_admin_site.urls)),
# url(r'^sino/', my_admin_site.urls, namespace='sino'),
# url(r'^personalcenter/', include('personalcenter.urls')),
# url(r'^crowdfundings/', include('crowdfundings.urls')),
url(r'^newsletter/', include('newsletter.urls')),
url(r'^auth/', include('authwrapper.urls')),
url(r'^inspection/', include('inspection.urls')),
url(r'^fileupload/', include('fileuploadwrapper.urls')),
url(r'^equipments/', include('equipments.urls')),
url(r'^outsourcing/', include('outsourcing.urls')),
url(r'^trainings/', include('trainings.urls')),
url(r'^picking/', include('picking.urls')),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^setlang/$', set_language, name='setlang'),
url(r'^phone_login/', include('phone_login.urls')),
# url(r'^upload/ckeditorimage/$', CKEditorImageUpload, name='ckeditor_image_upload'),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
]
urlpatterns += [
url(r'^i18n/', include('django.conf.urls.i18n')),
]
import os
if settings.DEBUG:
if settings.USE_SAE_BUCKET: #'SERVER_SOFTWARE' in os.environ:
pass
else:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,215 | hi-noikiy/sinotrans | refs/heads/master | /picking/models.py | from django.db import models
from django.core.urlresolvers import reverse
from django.utils.text import slugify
from django.db.models.signals import post_delete, post_save, pre_save
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from django.http import Http404
from django.utils import timezone
from datetime import datetime, timedelta
from django.conf import settings
# Create your models here.
class Waybill(models.Model):
number = models.CharField(_('waybill number'), max_length=30, blank=True)
forwarder = models.CharField(_('forwarder'), max_length=30, blank=True)
product_number = models.IntegerField(_('product number'), blank=True)
packing_number = models.IntegerField(_('packing number'), blank=True)
volume = models.DecimalField(_('volume'), max_digits=20, decimal_places=5, blank=True)
status = models.CharField(_('status'), max_length=30, blank=True)
created = models.DateField(_('created date'),auto_now_add=True, auto_now=False)
completed = models.DateTimeField(_('completed time'),auto_now_add=False, auto_now=False)
class Meta:
ordering = ['number']
verbose_name = _('waybill')
verbose_name_plural = _('waybill')
def __unicode__(self):
return self.number
class PickingBill(models.Model):
waybill = models.ForeignKey(Waybill, verbose_name=_('waybill'))
number = models.CharField(_('pickingbill number'), max_length=30, blank=True)
product_id = models.CharField(_('product id'), max_length=30, blank=True)
product_name = models.CharField(_('product name'), max_length=30, blank=True)
dispatch_bill_number = models.CharField(_('dispatch bill number'), max_length=30, blank=True)
waybill_number = models.CharField(_('waybill number'), max_length=30, blank=True)
product_total_number = models.IntegerField(_('product total number'), blank=True)
packing_total_number = models.IntegerField(_('packing total number'), blank=True)
volume = models.DecimalField(_('volume'), max_digits=20, decimal_places=5, blank=True)
status = models.CharField(_('status'), max_length=30, blank=True)
created = models.DateField(_('created date'),auto_now_add=True, auto_now=False)
assigned = models.DateTimeField(_('assigned time'),auto_now_add=False, auto_now=False)
class Meta:
ordering = ['number']
verbose_name = _('picking bill')
verbose_name_plural = _('picking bill')
def __unicode__(self):
return self.number
| {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,216 | hi-noikiy/sinotrans | refs/heads/master | /newsletter/views.py |
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect
from django.utils.translation import ugettext as _
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView, CreateView
# from django.utils.http import is_safe_url
from inspection.models import DailyInspection, shelf_inspection
from .forms import SignUpForm,ContactForm
from .models import Banner, Article
from django.contrib.auth import get_user_model
User = get_user_model()
def home(request):
title = 'Sign Up now'
banners = Banner.objects.filter(active=True).order_by("?")[:6]
form = SignUpForm(request.POST or None)
context = {
"title": title,
"form": form,
'banners':banners,
}
try:
categories_src = [
[
('news', _('news')),
('hot', _('hot')),
],
[
('organization_and_position_responsibility', _('organization and position responsibility')),
('regulations', _('regulations')),
],
[
('activities', _('activities')),
]
]
categories = []
objects = []
for category in categories_src:
categories.append(category )
#objects.append((category, Article.objects.filter(category__in=[x[0] for x in category])))
objects.append([(x[0], Article.objects.filter(category=x[0])) for x in category])
#context["categories"] = Article.article_category
#context["objects"] = [(category[0], Article.objects.filter(category=category[0])) for category in Article.article_category]
context["categories"] = categories
context["objects"] = objects
except:
pass
try:
context["dailyinspection_object_list"] = DailyInspection.objects.all()[:8]
except:
pass
try:
records_list = [(object, \
object.shelf_inspection_record_set.filter(use_condition="normal").count(), \
object.shelf_inspection_record_set.filter(is_locked=False).count(), \
object.shelf_inspection_record_set.filter(gradient__gt=1.4).count()) for object in shelf_inspection.objects.all()]
context["shelf_inspection_records"] = records_list[:10]
except:
pass
try:
count = DailyInspection.objects.overdue().count()
#context["overdue_dailyinspection"] = count
request.session["cart_item_count"] = count
except:
pass
if form.is_valid():
#form.save()
#print request.POST['email'] #not recommended, raw data without validation
instance = form.save(commit=False)
full_name = form.cleaned_data.get("full_name")
if not full_name:
full_name = "New full name"
instance.full_name = full_name
# if not instance.full_name:
# instance.full_name = "Justin"
instance.save()
context = {
"title": "Thank you"
}
return render(request, "home.html", context)
def contact(request):
title = 'Contact Us'
title_align_center = True
form = ContactForm(request.POST or None)
if form.is_valid():
# for key, value in form.cleaned_data.iteritems():
# print key, value
# #print form.cleaned_data.get(key)
form_email = form.cleaned_data.get("email")
form_message = form.cleaned_data.get("message")
form_full_name = form.cleaned_data.get("full_name")
# print email, message, full_name
subject = 'Site contact form'
from_email = settings.EMAIL_HOST_USER
to_email = [from_email, form_email]
contact_message = "%s: %s via %s"%(
form_full_name,
form_message,
form_email)
some_html_message = """
<h1>hello</h1>
"""
# import smtplib
# try:
# smtpObj = smtplib.SMTP()
# smtpObj.connect(settings.EMAIL_HOST, 25)
# smtpObj.login(settings.EMAIL_HOST_USER,settings.EMAIL_HOST_PASSWORD)
# smtpObj.sendmail(settings.EMAIL_HOST_USER, settings.EMAIL_HOST_USER, some_html_message)
# print "sent successfully !!!!!!!!!!!!!!!!"
# except smtplib.SMTPException:
# print "Error: sent fail $$$$$$$$$$$$$$"
send_mail(subject,
contact_message,
from_email,
to_email,
html_message=some_html_message,
fail_silently=False)
context = {
"form": form,
"title": title,
"title_align_center": title_align_center,
}
return render(request, "forms.html", context)
class ArticleDetailView(DetailView):
model = Article
template_name = "article/article_detail.html"
def get_context_data(self, *args, **kwargs):
context = super(ArticleDetailView, self).get_context_data(*args, **kwargs)
context["object"] = self.get_object()
return context
def get(self, request, *args, **kwargs):
return super(ArticleDetailView, self).get(request, *args, **kwargs)
def dispatch(self, request, *args, **kwargs):
instance = self.get_object()
request.breadcrumbs([
(_("Home"),reverse("home", kwargs={})),
(_("Newsletter"),reverse("article_list", kwargs={})),
(instance.title,request.path_info),
])
return super(ArticleDetailView, self).dispatch(request,args,kwargs)
class ArticleListView(ListView):
model = Article
template_name = "article/article_list.html"
def get_context_data(self, *args, **kwargs):
context = super(ArticleListView, self).get_context_data(*args, **kwargs)
#context["category"] = list(set([article.category for article in Article.objects.all()]))
context["categories"] = Article.article_category
context["objects"] = [(category[0], Article.objects.filter(category=category[0])) for category in Article.article_category]
return context
def dispatch(self, request, *args, **kwargs):
request.breadcrumbs([
(_("Home"),reverse("home", kwargs={})),
(_("Newsletter"),reverse("article_list", kwargs={})),
])
return super(ArticleListView, self).dispatch(request,args,kwargs)
| {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,217 | hi-noikiy/sinotrans | refs/heads/master | /zakkabag/views.py | from django.shortcuts import render
from django.core.urlresolvers import reverse
#from django.views.i18n import set_language
from django import http
from django.utils import translation
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language, get_language, to_locale,
)
from django.utils.http import is_safe_url
from .forms import DashboardForm
def set_language(request):
#print request.META.get('HTTP_REFERER', None)
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER', None)
if not is_safe_url(url=next, host=request.get_host()):
next = reverse("home", kwargs={})
response = http.HttpResponseRedirect(next)
lang_code = request.POST.get('language', None) if request.method == 'POST' else request.GET.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
def about(request):
return render(request, "about.html", {})
def sitemap(request):
return render(request, "map.html", {})
# def CKEditorImageUpload(request):
# if request.method == 'POST':
# callback = request.GET.get('CKEditorFuncNum')
# try:
# path = "static/upload/" + time.strftime("%Y%m%d%H%M%S",time.localtime())
# f = request.FILES["upload"]
# file_name = path + "_" + f.name
# des_origin_f = open(file_name, "wb+")
# for chunk in f.chunks():
# des_origin_f.write(chunk)
# des_origin_f.close()
# except Exception, e:
# print e
# res = "<script>window.parent.CKEDITOR.tools.callFunction("+callback+",'/"+file_name+"', '');</script>"
# return HttpResponse(res)
# else:
# raise Http404()
from inspection.models import month_choice
from inspection.models import DailyInspection
import time, datetime
from datetime import timedelta
from django.utils import timezone
from django.utils.translation import ugettext as _
from inspection.api import (
get_daily_inspection_total,
get_daily_inspection_uncompleted,
get_daily_inspection_efficiency,
get_daily_inspection_uncompleted_url,
get_daily_inspection_total_url,
get_daily_inspection_rows,
get_whpi_total,
get_whpi_uncompleted,
get_whpi_efficiency,
get_pi_uncompleted_url,
get_pi_total_url,
get_pi_rows,
get_spray_rows,
get_spray_total,
get_spray_uncompleted,
get_spray_efficiency,
get_spray_uncompleted_url,
get_spray_total_url,
get_hydrant_rows,
get_hydrant_total,
get_hydrant_uncompleted,
get_hydrant_efficiency,
get_hydrant_uncompleted_url,
get_hydrant_total_url,
get_other_equipment_rows,
get_other_equipment_total,
get_other_equipment_uncompleted,
get_other_equipment_efficiency,
get_other_equipment_uncompleted_url,
get_other_equipment_total_url,
get_shelf_inspection_rows,
get_shelf_inspection_total,
get_shelf_inspection_uncompleted,
get_shelf_inspection_efficiency,
get_shelf_inspection_uncompleted_url,
get_shelf_inspection_total_url,
get_vehicle_inspection_rows,
get_vehicle_inspection_total,
get_vehicle_inspection_uncompleted,
get_vehicle_inspection_efficiency,
get_vehicle_inspection_uncompleted_url,
get_vehicle_inspection_total_url,
get_forklift_repair_rows,
get_forklift_repair_total,
get_forklift_repair_uncompleted,
get_forklift_repair_efficiency,
get_forklift_repair_uncompleted_url,
get_forklift_repair_total_url,
get_forklift_maint_rows,
get_forklift_maint_total,
get_forklift_maint_uncompleted,
get_forklift_maint_cost,
get_forklift_maint_uncompleted_url,
get_forklift_maint_total_url,
get_annual_training_plan_rows,
get_annual_training_plan_total,
get_annual_training_plan_uncompleted,
get_annual_training_plan_ratio,
get_annual_training_plan_uncompleted_url,
get_annual_training_plan_total_url,
)
# def get_last_times():
# year = timezone.now().year #time.localtime()[0]
# return [[i, year] for i in range(1,13)]
def DashboardViewSINO(request):
year = timezone.now().year
form = DashboardForm(request.GET or None)
# if form.is_valid():
if request.GET.get('year', None):
year = int(request.GET.get('year'))
else:
print form.errors
return render(request,"dashboard_statistic.html",{'form':form})
row_groups = []
indicators = []
row_headers = DailyInspection.daily_insepction_category
# (display, rowspan, columnspan)
column_header1 = [
[ [month[1],1,3] for month in month_choice + (('', _('Total')),) ]
]
column_header2 = [[
(_("total number"),1,1),
(_("Uncompleted"),1,1),
(_("efficiency"),1,1),
]*len(column_header1[0])]
if row_groups:
column_header1.insert(0,[_("category"),2,1])
if indicators:
column_header1.insert(0,[_("indicator"),2,1])
column_header1[0].insert(0,[_("category"),2,1])
column_css = ['table-total','table-warning','']
context = {}
context["headers"] = column_header1 + column_header2
context["column_css"] = column_css # MUST = data field length
data1 = get_daily_inspection_total(year)
data2 = get_daily_inspection_uncompleted(year)
data3 = get_daily_inspection_efficiency(year)
data4 = get_daily_inspection_total_url(year)
data5 = get_daily_inspection_uncompleted_url(year)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
rows = get_daily_inspection_rows()
indicator = ["na"]*len(rows)
group = ["na"]*len(rows)
context["rows_dailyinspection"] = zip(rows,indicator,group,data)
#
rows = get_pi_rows()
data1 = get_whpi_total(year)
data2 = get_whpi_uncompleted(year)
data3 = get_whpi_efficiency(year)
data4 = get_pi_total_url(year)
data5 = get_pi_uncompleted_url(year)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
context["rows_pi"] = zip(rows,indicator,group,data)
rows = get_spray_rows()
data1 = get_spray_total(year)
data2 = get_spray_uncompleted(year)
data3 = get_spray_efficiency(year)
data4 = get_spray_total_url(year)
data5 = get_spray_uncompleted_url(year)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
context["rows_spray"] = zip(rows,indicator,group,data)
#
rows = get_hydrant_rows()
data1 = get_hydrant_total(year)
data2 = get_hydrant_uncompleted(year)
data3 = get_hydrant_efficiency(year)
data4 = get_hydrant_total_url(year)
data5 = get_hydrant_uncompleted_url(year)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
context["rows_hydrant"] = zip(rows,indicator,group,data)
rows = get_other_equipment_rows()
data1 = get_other_equipment_total(year)
data2 = get_other_equipment_uncompleted(year)
data3 = get_other_equipment_efficiency(year)
data4 = get_other_equipment_total_url(year)
data5 = get_other_equipment_uncompleted_url(year)
indicator = ["na"]*len(rows)
group = ["na"]*len(rows)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
context["rows_other_equipment"] = zip(rows,indicator,group,data)
rows = get_shelf_inspection_rows()
data1 = get_shelf_inspection_total(year)
data2 = get_shelf_inspection_uncompleted(year)
data3 = get_shelf_inspection_efficiency(year)
data4 = get_shelf_inspection_total_url(year)
data5 = get_shelf_inspection_uncompleted_url(year)
indicator = ["na"]*len(rows)
group = ["na"]*len(rows)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
context["rows_shelf_inspection"] = zip(rows,indicator,group,data)
from inspection.models import shelf
context["shelf_count"] = shelf.objects.all().count
rows = get_vehicle_inspection_rows()
data1 = get_vehicle_inspection_total(year)
data2 = get_vehicle_inspection_uncompleted(year)
data3 = get_vehicle_inspection_efficiency(year)
data4 = get_vehicle_inspection_total_url(year)
data5 = get_vehicle_inspection_uncompleted_url(year)
indicator = ["na"]*len(rows)
group = ["na"]*len(rows)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
context["rows_vehicle_inspection"] = zip(rows,indicator,group,data)
from outsourcing.models import Vehicle
context["vehicle_count"] = Vehicle.objects.all().count
#>>>
rows = get_forklift_repair_rows()
data1 = get_forklift_repair_total(year)
data2 = get_forklift_repair_uncompleted(year)
data3 = get_forklift_repair_efficiency(year)
data4 = get_forklift_repair_total_url(year)
data5 = get_forklift_repair_uncompleted_url(year)
indicator = ["na"]*len(rows)
group = ["na"]*len(rows)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
context["rows_forklift_repair"] = zip(rows,indicator,group,data)
from outsourcing.models import Forklift
context["forklift_count"] = Forklift.objects.all().count
#>>>
rows = get_annual_training_plan_rows()
data1 = get_annual_training_plan_total(year)
data2 = get_annual_training_plan_uncompleted(year)
data3 = get_annual_training_plan_ratio(year)
data4 = get_annual_training_plan_total_url(year)
data5 = get_annual_training_plan_uncompleted_url(year)
indicator = ["na"]*len(rows)
group = ["na"]*len(rows)
data = [ zip(a,b,c,d,e) for a,b,c,d,e in zip(data1,data2,data3,data4,data5)]
context["rows_annual_training_plan"] = zip(rows,indicator,group,data)
#>>>>>>>>>>>>>>
rows = get_forklift_maint_rows()
data1 = get_forklift_maint_total(year)
# data2 = get_forklift_maint_uncompleted(year)
data3 = get_forklift_maint_cost(year)
data4 = get_forklift_maint_total_url(year)
# data5 = get_forklift_maint_uncompleted_url(year)
indicator = ["na"]*len(rows)
group = ["na"]*len(rows)
data = [ zip(a,b,c) for a,b,c in zip(data1,data3,data4)]
context["rows_forklift_maint"] = zip(rows,indicator,group,data)
context["column_css_forklift_maint"] = ['table-total','']
column_header1 = [
[ [month[1],1,2] for month in month_choice + (('', _('Total')),) ]
]
column_header2 = [[
(_("total number"),1,1),
(_("Expense"),1,1),
]*len(column_header1[0])]
column_header1[0].insert(0,[_("category"),2,1])
context["headers_forklift_maint"] = column_header1 + column_header2
context['form'] = DashboardForm(request.GET or None, initial={'year':timezone.now().year})
return render(request,"dashboard_statistic.html",context)
def test(request):
return render(request, "test.html", {}) | {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,218 | hi-noikiy/sinotrans | refs/heads/master | /inspection/mixins.py | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render
from django.utils.translation import ugettext as _
from django.db import models
from django.forms import models as model_forms
model_map = {
'DailyInspection': ['daily_inspection_stat','Daily Inspection'],
'Forklift': ['storage_sec','Storage Security'],
'Vehicle': ['transport_security','Transport Security'],
#'Artical', reverse("article_list", kwargs={}),
#'DailyInspection', reverse("transport_security", kwargs={}),
#'DailyInspection', reverse("storage_sec", kwargs={}),
#'DailyInspection', reverse("rehearsal_list", kwargs={}),
}
submodel_map = {
'ShelfAnnualInspection': ['shelf_inspection_list','shelf inspection'], # add Model homepage
'shelf': ['shelf_inspection_list','shelf inspection'],
'shelf_inspection_record': ['shelf_inspection_list','shelf inspection'],
'ForkliftRepair': ['forklift_list','Forklift'],
'ForkliftMaint': ['forklift_list','Forklift'],
'ForkliftAnnualInspection': ['forklift_list','Forklift'],
'TrainingCourse': ['annualtrainingplan_list','annual training plan'],
'TrainingRecord': ['annualtrainingplan_list','annual training plan'],
'ExtinguisherInspection': ['extinguisherinspection_list','extinguisher inspection'],
'HydrantInspection': ['hydrantinspection_list','hydrant inspection'],
}
class StaffRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(StaffRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if request.user.is_staff:
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
else:
#raise Http404
#return HttpResponseRedirect(reverse('home',kwargs={}))
return render(request, "dailyinspection/permission_alert.html", {})
class SuperRequiredMixin(StaffRequiredMixin):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if request.user.is_superuser:
return super(SuperRequiredMixin, self).dispatch(request, *args, **kwargs)
else:
raise Http404
class LoginRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(LoginRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class ShortcutURLMixin(object):
def dispatch(self, request, *args, **kwargs):
#if self.request.session.get("shortcut_back_url") and not self.request.session.get("shortcut_back_url_saved", None):
# self.request.session["shortcut_back_url_saved"] = self.request.session["shortcut_back_url"]
self.request.session["shortcut_back_url"] = self.request.get_full_path()
self.request.session["shortcut_create_pk"] = self.get_object().pk
return super(ShortcutURLMixin, self).dispatch(request,args,kwargs)
class TableListViewMixin(object):
template_name = "default/list.html"
fields = []
fields_display = []
fields_files = []
fields_images = []
foreign_fields_images = []
def get_context_data(self, *args, **kwargs):
context = super(TableListViewMixin, self).get_context_data(*args, **kwargs)
context["fields"] = [field.name for field in self.model._meta.get_fields() if not field.name in [self.model._meta.pk.attname,] and not isinstance(field, models.ManyToOneRel)] \
if not self.fields else self.fields
context["fields_display"] = self.fields_display
context["fields_files"] = self.fields_files
context["fields_images"] = self.fields_images
context["foreign_fields_images"] = self.foreign_fields_images
return context
def dispatch(self, request, *args, **kwargs):
if self.request.session.get("shortcut_back_url"):
del self.request.session["shortcut_back_url"]
if self.request.session.get("shortcut_back_url_saved"):
del self.request.session["shortcut_back_url_saved"]
if self.request.session.get("shortcut_create_pk"):
del self.request.session["shortcut_create_pk"]
list = [
(_("Home"),reverse("home", kwargs={})),
(self.model._meta.verbose_name,request.path_info),
]
# if model_map.get(self.model._meta.object_name, None):
# value = model_map.get(self.model._meta.object_name, None)
# list.insert(1, [_(value[1]), reverse(value[0], kwargs={})])
if submodel_map.get(self.model._meta.object_name, None):
value = submodel_map.get(self.model._meta.object_name, None)
list.insert(1, [_(value[1]), reverse(value[0], kwargs={})])
request.breadcrumbs(list)
return super(TableListViewMixin, self).dispatch(request,args,kwargs)
# from django.db.models.fields import ManyToOneRel
from django.db import models
class TableDetailViewMixin(object):
template_name = "default/detail.html"
# fieldsets = [("title",{"fields":("",)}), ]
fieldsets = []
fields = []
fields_display = []
fields_files = []
fields_images = []
model = None
model_sets = [("model name", None, []),] # model name, object_list, list_display
def get_context_data(self, *args, **kwargs):
context = super(TableDetailViewMixin, self).get_context_data(*args, **kwargs)
if not self.fieldsets:
context["fields"] = [field for field in self.model._meta.get_fields() if not field.name in [self.model._meta.pk.attname,] and not isinstance(field, models.ManyToOneRel)] \
if not self.fields else self.fields
# lookup_field
# _get_non_gfk_field
# need time to learning
# pagination :: items_for_result
context["fieldsets"] = self.fieldsets
context["fields_display"] = self.fields_display
context["fields_files"] = self.fields_files
context["fields_images"] = self.fields_images
context["model_sets"] = self.model_sets
if self.request.session.get("shortcut_back_url") and not self.request.get_full_path() == self.request.session.get("shortcut_back_url"):
context["back_url"] = self.request.session.get("shortcut_back_url")
return context
def dispatch(self, request, *args, **kwargs):
list = [
(_("Home"),reverse("home", kwargs={})),
(self.model._meta.verbose_name, self.get_object().get_absolute_url_list() if hasattr(self.get_object(),"get_absolute_url_list") else ""),
(self.get_object(),request.path_info),
]
if submodel_map.get(self.model._meta.object_name, None):
value = submodel_map.get(self.model._meta.object_name, None)
list.insert(1, [_(value[1]), reverse(value[0], kwargs={})])
request.breadcrumbs(list)
return super(TableDetailViewMixin, self).dispatch(request,args,kwargs)
# fields = ModelFormMixin::fields
# form_class
# form_class = ModelFormMixin::get_form_class << model_forms.modelform_factory(model, fields=self.fields)
# form_class = FormMixin::get_form_class << self.form_class
# form = FormMixin::get_form()
# kwargs : ModelFormMixin::get_form_kwargs
# instance : self.object
# kwargs : FormMixin::get_form_kwargs
# initial : self.get_initial()
# prefix : self.get_prefix()
# data : self.request.POST
# files : self.request.FILES
# success_url :
# ModelFormMixin::get_success_url
# FormMixin::get_success_url
# get_context_data
# form : FormMixin::get_context_data
class UpdateViewMixin(object):
template_name = "default/update.html"
# model = models.Model
fields = None # is defined in ModelFormMixin
# fields = [field.name for field in model._meta.get_fields() if not field.name in [model._meta.pk.attname,] and not isinstance(field, models.ManyToOneRel)]
def get_form_class(self):
if not self.form_class:
if self.fields:
self.form_class = model_forms.modelform_factory(self.model, fields=self.fields, )
else:
self.form_class = model_forms.modelform_factory(self.model, exclude=["",], )
return self.form_class
def get_success_url(self):
return self.get_object().get_absolute_url() if not self.request.session.get("shortcut_back_url", None) else self.request.session["shortcut_back_url"]
def get_context_data(self, *args, **kwargs):
context = super(UpdateViewMixin, self).get_context_data(*args, **kwargs)
context["title"] = self.get_object()
if self.request.session.get("shortcut_back_url"):
context["back_url"] = self.request.session.get("shortcut_back_url")
return context
# HERE just for learning, it was implemented in base classed
def post(self, request, *args, **kwargs):
self.object = self.get_object() # must call in advace # called in BaseUpdateView::post
form = self.get_form() # called in FormMixin::get_form
# form = self.form_class(self.request.POST or None, self.request.FILES or None)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
return super(UpdateViewMixin, self).post(request, *args, **kwargs)
# copy from FormMixin
# def form_valid(self, form):
# return HttpResponseRedirect(self.get_success_url())
# def form_invalid(self, form):
# return self.render_to_response(self.get_context_data())
def get_fields(self, *args, **kwargs):
return self.fields
def dispatch(self, request, *args, **kwargs):
if not self.fields and not self.get_fields() and not self.form_class:
self.fields = [field.name for field in self.model._meta.get_fields() if not field.name in [self.model._meta.pk.attname,] and not isinstance(field, models.ManyToOneRel)]
list = [
(_("Home"),reverse("home", kwargs={})),
(self.model._meta.verbose_name, self.get_object().get_absolute_url_list() if hasattr(self.get_object(),"get_absolute_url_list") else ""),
(self.get_object(),request.path_info),
]
if submodel_map.get(self.model._meta.object_name, None):
value = submodel_map.get(self.model._meta.object_name, None)
list.insert(1, [_(value[1]), reverse(value[0], kwargs={})])
request.breadcrumbs(list)
return super(UpdateViewMixin, self).dispatch(request,args,kwargs)
"""
self.object : ModelFormMixin::form_valid
"""
class CreateViewMixin(object):
template_name = "default/create.html"
def get_form_class(self):
if not self.form_class:
if self.fields:
self.form_class = model_forms.modelform_factory(self.model, fields=self.fields, )
else:
self.form_class = model_forms.modelform_factory(self.model, exclude=["",], )
return self.form_class
def get_success_url(self):
#return self.model().get_absolute_url_list()
# return self.object.get_absolute_url() # default function
return self.object.get_absolute_url() if not self.request.session.get("shortcut_back_url", None) else self.request.session["shortcut_back_url"]
def get_context_data(self, *args, **kwargs):
context = super(CreateViewMixin, self).get_context_data(*args, **kwargs)
if self.request.session.get("shortcut_back_url") and not self.request.get_full_path() == self.request.session.get("shortcut_back_url"):
context["back_url"] = self.request.session.get("shortcut_back_url")
elif hasattr(self.model(),"get_absolute_url_list"):
context["back_url"] = self.model().get_absolute_url_list()
return context
def form_invalid(self, form):
self.object = None
return super(CreateViewMixin, self).form_invalid(form)
# copy from "ModelFormMixin::form_valid"
"""
def form_valid(self, form):
self.object = form.save()
return super(CreateViewMixin, self).form_valid(form)
"""
def dispatch(self, request, *args, **kwargs):
list = [
(_("Home"),reverse("home", kwargs={})),
(self.model._meta.verbose_name, self.model().get_absolute_url_list() if hasattr(self.model(),"get_absolute_url_list") else ""),
(_("Create"),request.path_info),
]
if submodel_map.get(self.model._meta.object_name, None):
value = submodel_map.get(self.model._meta.object_name, None)
list.insert(1, [_(value[1]), reverse(value[0], kwargs={})])
request.breadcrumbs(list)
return super(CreateViewMixin, self).dispatch(request,args,kwargs)
| {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,219 | hi-noikiy/sinotrans | refs/heads/master | /picking/views.py | from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.views.generic.base import View, TemplateResponseMixin, ContextMixin, TemplateView
from django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormMixin, ModelFormMixin
from django.http import HttpResponseRedirect
from django.conf import settings
from django.utils.translation import ugettext as _
from django.contrib import messages
from django_filters import FilterSet, CharFilter, NumberFilter, BooleanFilter, DateFilter, MethodFilter
from chartjs.views.lines import BaseLineChartView
from .forms import PickingBillScanForm, WaybillScanForm
from .models import Waybill, PickingBill
# Create your views here.
class PickingbillAssignView(FormMixin, ListView) :
template_name = "picking/pickingbill_assign.html"
model = PickingBill
form_class = PickingBillScanForm
def get_context_data(self, *args, **kwargs):
context = super(PickingbillAssignView, self).get_context_data(*args, **kwargs)
context["pickingbill_scan_form"] = PickingBillScanForm()
context["pickingbill_objects_unassigned"] = PickingBill.objects.all()
field_exclude = ["id","waybill"]
context["fields"] = [field for field in self.model._meta.get_fields() if not field.name in field_exclude]
return context
class WaybillCompleteView(FormMixin, ListView) :
template_name = "picking/waybill_complete.html"
model = Waybill
form_class = WaybillScanForm
def get_context_data(self, *args, **kwargs):
context = super(WaybillCompleteView, self).get_context_data(*args, **kwargs)
context["waybill_scan_form"] = WaybillScanForm(self.request.GET or None, self.request.POST or None )
context["waybill_objects_unassigned"] = Waybill.objects.all()
field_exclude = ["id","pickingbill"]
context["fields"] = [field for field in self.model._meta.get_fields() if not field.name in field_exclude]
return context
class LineChartJSONView(BaseLineChartView):
def get_labels(self):
"""Return labels for the x-axis."""
# return self.get_dates()
return ['2017-11-1', '2017-11-2', '2017-11-3', '2017-11-4', '2017-11-5']
def get_providers(self):
"""Return names of datasets."""
# return self.get_catetory()
return ['Tom', 'Jerry', 'Haro', 'Boto']
def get_data(self):
"""Return 3 datasets to plot."""
# return self.get_chart_counts()
return [["1.73","1.9","2.0","1.7"], ["1.83","1.774","1.8","1.8"], ["1.93","1.9","1.9","1.9"], ["2.13","1.4","2.5","2.0"],] # per provider
class PickingbillStatView(ListView) :
template_name = "picking/pickingbill_stat.html"
model = PickingBill
def get_context_data(self, *args, **kwargs):
context = super(PickingbillStatView, self).get_context_data(*args, **kwargs)
context["x-axis"] = ['2017-11-1', '2017-11-2', '2017-11-3', '2017-11-4']
context["y-axis"] = ['Tom', 'Jerry', 'Haro', 'Boto']
context["values"] = [["1.73","1.774","1.5","2.0"], ["1.73","1.774","1.5","2.0"], ["1.73","1.774","1.5","2.0"], ["1.73","1.774","1.5","2.0"],]
return context
| {"/picking/forms.py": ["/picking/models.py"], "/picking/urls.py": ["/picking/views.py"], "/zakkabag/urls.py": ["/newsletter/views.py", "/zakkabag/views.py"], "/picking/views.py": ["/picking/forms.py", "/picking/models.py"]} |
54,222 | Howard277/mars_customer_center | refs/heads/master | /customer/migrations/0001_initial.py | # Generated by Django 2.1 on 2019-01-10 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=20)),
('age', models.IntegerField(null=True)),
('sex', models.CharField(choices=[('man', '男'), ('woman', '女')], default='man', max_length=5)),
('id_card_no', models.CharField(db_index=True, max_length=20, null=True)),
('phone_no', models.CharField(db_index=True, max_length=20, null=True)),
('phone_no_2', models.CharField(db_index=True, max_length=20, null=True)),
('passport_no', models.CharField(db_index=True, max_length=20, null=True)),
('home_address', models.CharField(max_length=200, null=True)),
('photo_url', models.CharField(max_length=200, null=True)),
('create_time', models.DateTimeField(auto_now_add=True)),
('create_user', models.CharField(max_length=20)),
('update_time', models.DateTimeField(auto_now=True)),
('update_user', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='PropertyCar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.IntegerField(db_index=True, null=True)),
('brand', models.CharField(max_length=50)),
('brand_chinese_name', models.CharField(max_length=50)),
('create_time', models.DateTimeField(auto_now_add=True)),
('create_user', models.CharField(max_length=20)),
('update_time', models.DateTimeField(auto_now=True)),
('update_user', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='PropertyHouse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.IntegerField(db_index=True, null=True)),
('house_no', models.CharField(max_length=50)),
('house_address', models.CharField(max_length=200)),
('create_time', models.DateTimeField(auto_now_add=True)),
('create_user', models.CharField(max_length=20)),
('update_time', models.DateTimeField(auto_now=True)),
('update_user', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.IntegerField(db_index=True)),
('name', models.CharField(db_index=True, max_length=20)),
('age', models.IntegerField(null=True)),
('sex', models.CharField(default='man', max_length=5)),
('relation_type', models.CharField(max_length=20)),
('id_card_no', models.CharField(db_index=True, max_length=20, null=True)),
('phone_no', models.CharField(db_index=True, max_length=20, null=True)),
('phone_no_2', models.CharField(db_index=True, max_length=20, null=True)),
('passport_no', models.CharField(db_index=True, max_length=20, null=True)),
('home_address', models.CharField(max_length=200, null=True)),
('create_time', models.DateTimeField(auto_now_add=True)),
('create_user', models.CharField(max_length=20)),
('update_time', models.DateTimeField(auto_now=True)),
('update_user', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='WordInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.IntegerField(db_index=True, null=True)),
('company_name', models.CharField(db_index=True, max_length=200)),
('company_org_no', models.CharField(db_index=True, max_length=200, null=True)),
('company_phone_no', models.CharField(db_index=True, max_length=20, null=True)),
('company_address', models.CharField(max_length=200, null=True)),
('create_time', models.DateTimeField(auto_now_add=True)),
('create_user', models.CharField(max_length=20)),
('update_time', models.DateTimeField(auto_now=True)),
('update_user', models.CharField(max_length=20)),
],
),
]
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,223 | Howard277/mars_customer_center | refs/heads/master | /order/common.py | ORDER_STATUS = (('init', '初始状态'), ('wait_pay', '待付款状态'), ('finish', '完成状态'))
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,224 | Howard277/mars_customer_center | refs/heads/master | /customer/urls.py | from django.urls import path, include
from . import views, views_relationship
urlpatterns = [
path('health', views.health),
# 配置“客户”模块接口地址
path('save', views.save),
path('all', views.all),
path('delete_by_id', views.delete_by_id),
path('page_by_condition', views.page_by_condition),
path('upload_customer_image', views.upload_customer_image),
# 配置“联系人”模块接口地址
path('relationship/', include(
[path('all', views_relationship.all)
, path('get_by_customerid', views_relationship.get_by_customerid)
, path('save', views_relationship.save)]))
]
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,225 | Howard277/mars_customer_center | refs/heads/master | /customer/views_relationship.py | from .models import Relationship
from django.http import HttpResponse
from django.core import serializers
import json
# 获取所有联系人
def all(request):
return HttpResponse(serializers.serialize("json", Relationship.objects.all()),
content_type='application/json')
# 保存联系人
def save(request):
paras = json.loads(request.body.decode())
relationship = Relationship()
# 通过判断id的长度是否大于0,判断是新增还是修改
if 'id' in paras and paras['id'] > 0:
# 如果修改,那么首先需要从数据库中拿到原始对象
id = paras['id']
relationship = Relationship.objects.get(id=id)
# 遍历所有参数与对象属性进行匹配,如果能匹配就进行赋值
for p_key in paras:
for c_key in relationship.__dict__:
if p_key == c_key:
relationship.__dict__[c_key] = paras[p_key]
relationship.save()
return HttpResponse(relationship.id)
# 通过客户id查询联系人
def get_by_customerid(request):
customer_id = -1
if 'customer_id' in request.GET:
customer_id = request.GET['customer_id']
return HttpResponse(serializers.serialize("json", Relationship.objects.all().filter(customer_id=customer_id)),
content_type='application/json')
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,226 | Howard277/mars_customer_center | refs/heads/master | /mars_customer_center/settings.py | """
Django settings for mars_customer_center project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# 获取环境变量,通过环境变量设置配置参数
ENV = os.getenv('ENV')
# 打印出环境变量,便于排查问题
print('ENV:', ENV)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ny&@m!b=izgkn--61gkbtq_ychi3k%gn0e=l$1378im^mb2u(%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'customer',
'order',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'common.logmiddleware.LogMiddleware',
]
ROOT_URLCONF = 'mars_customer_center.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mars_customer_center.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# 数据库配置
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mars_customer',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '3306',
}
# , 'db_slave': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'customer_slave',
# 'USER': 'root',
# 'PASSWORD': '1qaz!QAZ',
# 'HOST': 'localhost',
# 'PORT': '3306',
# }
}
# 数据库路由配置
# DATABASE_ROUTERS = ['customer_center.dbrouter.Router', ]
# 日志配置
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'propagate': True,
'level': 'DEBUG',
},
}
}
# redis配置
REDIS_SENTINELS = [('192.168.13.118', 27001), ('192.168.13.118', 27002), ('192.168.13.118', 27003)]
REDIS_SERVICE_NAME = 'mymaster'
REDIS_PASSWORD = '1qaz!QAZ'
REDIS_DB = 0
# 设置不同环境下的变量
if ENV == 'test':
# 数据库配置
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'hb_customer',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '192.168.13.18',
'PORT': '3306',
}, 'db_slave': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'hb_customer',
'USER': 'root',
'PASSWORD': 'root123',
'HOST': '192.168.13.17',
'PORT': '3306',
}
}
# 日志配置
BASE_LOG_DIR = os.getcwd() + '/log/'
if not os.path.exists(BASE_LOG_DIR):
print('创建日志文件夹:', BASE_LOG_DIR)
os.mkdir(BASE_LOG_DIR)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s][%(threadName)s:%(thread)d][task_id:%(name)s][%(filename)s:%(lineno)d]'
'[%(levelname)s][%(message)s]'
},
'simple': {
'format': '[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d]%(message)s'
},
'collect': {
'format': '%(message)s'
}
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'SF': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler', # 保存到文件,根据文件大小自动切
'filename': os.path.join(BASE_LOG_DIR, "info.log"), # 日志文件
'maxBytes': 1024 * 1024 * 50, # 日志大小 50M
'backupCount': 3, # 备份数为3 xx.log --> xx.log.1 --> xx.log.2 --> xx.log.3
'formatter': 'standard',
'encoding': 'utf-8',
},
'TF': {
'level': 'INFO',
'class': 'logging.handlers.TimedRotatingFileHandler', # 保存到文件,根据时间自动切
'filename': os.path.join(BASE_LOG_DIR, "info.log"), # 日志文件
'backupCount': 3, # 备份数为3 xx.log --> xx.log.2018-08-23_00-00-00 --> xx.log.2018-08-24_00-00-00 --> ...
'when': 'D', # 每天一切, 可选值有S/秒 M/分 H/小时 D/天 W0-W6/周(0=周一) midnight/如果没指定时间就默认在午夜
'formatter': 'standard',
'encoding': 'utf-8',
},
'error': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler', # 保存到文件,自动切
'filename': os.path.join(BASE_LOG_DIR, "err.log"), # 日志文件
'maxBytes': 1024 * 1024 * 5, # 日志大小 50M
'backupCount': 5,
'formatter': 'standard',
'encoding': 'utf-8',
}
},
'loggers': {
'': { # 默认的logger应用如下配置
'handlers': ['SF', 'TF', 'error'], # 上线之后可以把'console'移除
'level': 'DEBUG',
'propagate': True,
}
},
}
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,227 | Howard277/mars_customer_center | refs/heads/master | /order/migrations/0001_initial.py | # Generated by Django 2.1 on 2019-01-10 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('card_no', models.CharField(db_index=True, max_length=50)),
('customer_name', models.CharField(db_index=True, max_length=50, null=True)),
('order_status', models.CharField(choices=[('init', '初始状态'), ('wait_pay', '待付款状态'), ('finish', '完成状态')], db_index=True, default='init', max_length=50)),
('create_time', models.DateTimeField(auto_now_add=True)),
('create_user', models.CharField(max_length=50)),
('update_time', models.DateTimeField(auto_now=True)),
('update_user', models.CharField(max_length=50)),
],
),
]
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,228 | Howard277/mars_customer_center | refs/heads/master | /order/views.py | from django.shortcuts import render
from .models import Order
from django.http import HttpResponse
from django.core import serializers
import json
from common import utils
from django.views.decorators.http import require_POST
from django.db.models import Q
# Create your views here.
def save(request):
params = json.loads(request.body.decode())
order = Order()
# 通过判断入参中是否包含订单号,判断是新增订单还是修改订单
if 'id' in params and len(params[id]) > 0:
# 包含id的属于修改订单
order = Order.objects.get(id=id)
else:
# 不包含订单id的,属于新增订单
order.id = utils.redis_incr('order')
for okey in order.__dict__:
for pkey in params:
if okey == pkey:
order.__dict__[okey] = params[pkey]
order.save()
return HttpResponse(order.id)
# 通过客户编号获取订单信息
def get_order_by_customerid(request):
customer_id = request.GET['customerId']
orders = Order.objects.all().filter(customer_id=customer_id)
return HttpResponse(serializers.serialize(orders), content_type='application/json')
# 通过条件查询分页数据
@require_POST
def page_by_condition(request):
params = json.loads(request.body.decode())
page = params['page']
condition = params['condition']
order_list = Order.objects.all()
if condition != None and len(condition) > 0:
order_list = order_list.filter(Q(id__icontains=condition)
| Q(card_no__icontains=condition)
| Q(customer_name__icontains=condition)
| Q(order_status__icontains=condition))
pagesize = page['pagesize']
current_page = page['currentpage']
begin_index = (current_page - 1) * pagesize
end_index = current_page * pagesize
total = order_list.count()
customer_list_json = serializers.serialize("json", order_list[begin_index:end_index])
page['total'] = total
return HttpResponse(json.dumps({'data': customer_list_json, 'page': page}),
content_type="application/json")
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,229 | Howard277/mars_customer_center | refs/heads/master | /order/models.py | from django.db import models
from .common import ORDER_STATUS
# Create your models here.
# 订单
class Order(models.Model):
id = models.CharField(max_length=50, primary_key=True) # 订单编号,主键
card_no = models.CharField(max_length=50, db_index=True) # 客户身份证号
customer_name = models.CharField(max_length=50, null=True, db_index=True) # 客户姓名
order_status = models.CharField(max_length=50, choices=ORDER_STATUS, default='init', db_index=True) # 订单状态
create_time = models.DateTimeField(auto_now_add=True)
create_user = models.CharField(max_length=50)
update_time = models.DateTimeField(auto_now=True)
update_user = models.CharField(max_length=50)
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,230 | Howard277/mars_customer_center | refs/heads/master | /order/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('save', views.save),
path('page_by_condition', views.page_by_condition),
path('get_order_by_customerid', views.get_order_by_customerid)
]
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,231 | Howard277/mars_customer_center | refs/heads/master | /common/logmiddleware.py | from django.utils.deprecation import MiddlewareMixin
from django.http import HttpRequest
import logging
import json
# 日志 中间件 类
class LogMiddleware(MiddlewareMixin):
# 处理函数
def process_request(self, request):
# 记录请求的方法和地址
method = request.method
path = request.path
infos = {'method': method, 'path': path}
# POST请求数据也要记录
if method == 'POST':
if request.content_type == 'application/json':
infos['data'] = json.loads(request.body.decode())
else:
infos['data'] = request.POST
print(infos)
pass
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,232 | Howard277/mars_customer_center | refs/heads/master | /customer/views.py | import uuid
import json
from django.http import HttpResponse
from .models import Customer
from django.views.decorators.http import require_POST, require_GET
from django.core import serializers
from django.db.models import Q
from PIL import Image
# Create your views here.
def health(require):
return HttpResponse(json.dumps({'status': 'UP'}),
content_type='application/json')
# 保存客户,必须是POST请求
@require_POST
def save(require):
# application/json 参数接收
paras = json.loads(require.body.decode())
customer = Customer()
# 通过判断id的长度是否大于0,判断是新增还是修改
if 'id' in paras and paras['id'] > 0:
# 如果修改,那么首先需要从数据库中拿到原始对象
id = paras['id']
customer = Customer.objects.get(id=id)
# 遍历所有参数与对象属性进行匹配,如果能匹配就进行赋值
for p_key in paras:
for c_key in customer.__dict__:
if p_key == c_key:
customer.__dict__[c_key] = paras[p_key]
customer.save()
return HttpResponse(customer.id)
# 获取所有客户信息
@require_GET
def all(require):
return HttpResponse(serializers.serialize("json", Customer.objects.all()),
content_type="application/json")
# 通过查询条件搜索客户信息
@require_GET
def search_by_condition(require):
customers = Customer.objects.all()
params = require.GET
if 'name' in params:
customers = customers.filter(name=params['name'])
if 'id_card_no' in params:
customers = customers.filter(id_card_no=params['id_card_no'])
return HttpResponse(serializers.serialize("json", customers), content_type="application/json")
# 通过id删除客户信息
@require_POST
def delete_by_id(require):
result = {'flag': True, 'msg': '删除失败!'}
paras = json.loads(require.body.decode())
if 'id' in paras:
id = paras['id']
Customer.objects.filter(id=id).delete()
result = {'flag': True, 'msg': '删除成功!'}
else:
result['msg'] = '没有设置id参数'
return HttpResponse(json.dumps(result), content_type='application/json')
# 通过条件查询分页数据
@require_POST
def page_by_condition(require):
params = json.loads(require.body.decode())
page = params['page']
condition = params['condition']
customer_list = Customer.objects.all()
if condition != None and len(condition) > 0:
customer_list = customer_list.filter(Q(name__icontains=condition)
| Q(id_card_no__icontains=condition)
| Q(phone_no__icontains=condition)
| Q(phone_no_2__icontains=condition)
| Q(passport_no__icontains=condition)
| Q(home_address__icontains=condition))
pagesize = page['pagesize']
currentpage = page['currentpage']
beginindex = (currentpage - 1) * pagesize
endindex = currentpage * pagesize
total = customer_list.count()
customer_list_json = serializers.serialize("json", customer_list[beginindex:endindex])
page['total'] = total
return HttpResponse(json.dumps({'data': customer_list_json, 'page': page}),
content_type="application/json")
# @require_POST
def upload_customer_image(request):
if request.method == 'POST':
photo = request.FILES['file']
if photo:
photoname = str(uuid.uuid1()) + '.' + str(photo).split('.')[-1] # 使用uuid作为图片的存储名称
photofullname = '/Users/wuketao/Downloads/' + photoname
img = Image.open(photo)
img.save(photofullname)
if 'pk' in request.POST:
pk = request.POST['pk']
current_customer = Customer.objects.get(id=pk) # type:Customer
current_customer.photo_url = photoname
current_customer.save()
# 设置一个session,然后跳转到对应的页面,此处简易写写
return HttpResponse('上传成功')
else:
return HttpResponse('上传失败')
return HttpResponse('图片为空')
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,233 | Howard277/mars_customer_center | refs/heads/master | /common/utils.py | import time
import redis
from redis.sentinel import Sentinel
from mars_customer_center import settings
# 获取当前时间的字符串形式
def get_datetime_str():
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
def get_date_str():
return time.strftime('%Y-%m-%d', time.localtime())
def get_datetime_str2():
return time.strftime('%Y%m%d%H%M%S', time.localtime())
def get_date_str2():
return time.strftime('%Y%m%d', time.localtime())
# 获取redis 哨兵
def _get_redis_sentinel():
return Sentinel(settings.REDIS_SENTINELS,
socket_timeout=10)
# 获取redis master
def get_redis_master():
sentinel = _get_redis_sentinel()
master = sentinel.discover_master('mymaster')
return sentinel.master_for(settings.REDIS_SERVICE_NAME, socket_timeout=0.5, password=settings.REDIS_PASSWORD,
db=settings.REDIS_DB)
# 获取redis slave
def get_redis_slave():
sentinel = _get_redis_sentinel()
# master = sentinel.discover_master('mymaster')
return sentinel.slave_for(settings.REDIS_SERVICE_NAME, socket_timeout=0.5, password=settings.REDIS_PASSWORD,
db=settings.REDIS_DB)[0]
# redis自增
def redis_incr(key):
redis_instance = redis.Redis(host='192.168.13.118', port=7001, db=0, password='1qaz!QAZ') # get_redis_master()
id_primary = key + get_date_str2() # redis中存储的key是传入的关键字加当前日期信息。
id_increase = redis_instance.incr(id_primary)
return id_primary + str(id_increase) # 最终返回信息为redis中存储的key+value
| {"/order/views.py": ["/order/models.py"], "/order/models.py": ["/order/common.py"]} |
54,236 | aploium/my_utils | refs/heads/master | /requestfuzz/datastructure.py | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
import re
from orderedmultidict.orderedmultidict import omdict
if six.PY3:
import collections
from http.cookies import SimpleCookie
from urllib import parse
else:
# 这里不能用py2标准库的 SimpleCookie, 有很多坑
from future.backports.http.cookies import SimpleCookie
from future.backports.urllib import parse
from future.moves import collections
from . import utils
REGEX_CHARSET = re.compile(r"charset=([\w-]+)", re.IGNORECASE)
_dummy_simple_cookie = SimpleCookie()
def to_querydict(data):
if isinstance(data, QueryDict):
return data
if not data:
return QueryDict()
elif isinstance(data, six.string_types):
# 这里 keep_blank_values 必须有, 否则会发生空参数的丢失
# 详见: https://docs.python.org/3.6/library/urllib.parse.html#urllib.parse.parse_qsl
data = utils.ensure_unicode(data)
pairs = parse.parse_qsl(data, keep_blank_values=True)
return QueryDict(pairs)
elif utils.like_list(data) or utils.like_dict(data):
return QueryDict(data)
return data
class OrderedMultiDict(omdict):
def items(self):
return super(OrderedMultiDict, self).allitems()
def __setitem__(self, key, value):
if key not in self:
super(OrderedMultiDict, self).__setitem__(key, value)
else:
self.inplace_set(key, value)
def inplace_set(self, key, value):
items = self.items()
new_items = []
_found_flag = False
for _k, _v in items:
if _k == key:
if not _found_flag:
new_items.append((key, value))
_found_flag = True
else:
new_items.append((_k, _v))
if not _found_flag:
new_items.append((key, value))
self.load(new_items)
def update(self, values):
if utils.like_dict(values):
_values = values.items()
else:
_values = values
for k, v in _values:
self[k] = v
class HTTPHeaders(OrderedMultiDict):
def _find_real_key(self, key):
if six.PY2 and isinstance(key, six.binary_type):
key = key.decode("UTF-8")
for _k in self.keys():
if _k.lower() == key.lower():
return _k
return key
def __getitem__(self, key):
key = self._find_real_key(key)
return super(HTTPHeaders, self).__getitem__(key)
def get(self, key, default=None):
key = self._find_real_key(key)
return super(HTTPHeaders, self).get(key, default)
def __setitem__(self, key, value):
key = self._find_real_key(key)
return super(HTTPHeaders, self).__setitem__(key, value)
def pop(self, key, default=None):
key = self._find_real_key(key)
return super(HTTPHeaders, self).pop(key, default)
def add(self, key, value=None):
key = self._find_real_key(key)
return super(HTTPHeaders, self).add(key, value)
def __contains__(self, key):
key = self._find_real_key(key)
return super(HTTPHeaders, self).__contains__(key)
def update(self, other): # TODO: 改成更加高性能的封装
if hasattr(other, "items"):
for k, v in other.items():
self[k] = v
else:
for k, v in other:
self[k] = v
class QueryDict(OrderedMultiDict):
pass
@six.python_2_unicode_compatible
class Cookie(OrderedMultiDict):
def __init__(self, data=None):
super(Cookie, self).__init__()
if data:
self._load_cookies(data)
def _load_cookies(self, data):
"""将str或dict或tuple-pair加载为cookie
由于标准库的 load() 方法不支持list, 所以对list单独处理
"""
if utils.like_list(data):
data = collections.OrderedDict(data)
simple_cookie = SimpleCookie(data)
pairs = [(c.key, c.value) for c in simple_cookie.values()]
pairs.sort(key=lambda item: self._find_key_pos(data, item[0]))
self.update(pairs)
@staticmethod
def _find_key_pos(oridata, key):
"""查找某一key在原始cookie串中的位置"""
if isinstance(oridata, six.string_types):
for syntax in ("; {}=", ";{}=", " {}=", "{}="):
pos = oridata.find(syntax.format(key))
if pos != -1:
return pos
elif utils.like_list(oridata):
for index, (k, v) in enumerate(oridata):
if k == key:
return index
elif utils.like_dict(oridata):
for index, (k, v) in enumerate(oridata.items()):
if k == key:
return index
else: # 其他位置格式无法查找位置
return 0
def tostr(self):
pairs = []
for name, val in self.items():
_, quoted_val = _dummy_simple_cookie.value_encode(val)
pairs.append((name, quoted_val))
output = "; ".join(
"{}={}".format(k, v)
for k, v in pairs
)
return output
def __str__(self):
return "{}".format(self.tostr())
def __repr__(self):
return "{}({})".format(self.__class__.__name__, repr(self.tostr()))
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,237 | aploium/my_utils | refs/heads/master | /oss_utils.py | #!/usr/bin/env python3
# coding=utf-8
import os
import json
import logging
import time
import traceback
import oss2
log = logging.getLogger(__name__)
class ListedBucket(oss2.Bucket):
def __init__(self, accesskey, secretkey, endpoint, bucket_name, is_cname=False, session=None, connect_timeout=None,
app_name='',
enable_crc=True):
self._auth = oss2.Auth(accesskey, secretkey)
super().__init__(self._auth, endpoint, bucket_name, is_cname, session, connect_timeout, app_name, enable_crc)
def put_file_to_listed_folder(self, key, filename, skip_exist=False, max_retries=5, **kwargs):
"""
将文件上传到OSS上, 并且更新对应的 list.json
详情请看README.md中 OSS存储备忘录 一节
"""
key = key.lstrip("/")
if skip_exist and self.object_exists(key):
file_size = os.path.getsize(filename)
meta = self.get_object_meta(key)
if meta.content_length == file_size:
log.info("skip: {}".format(key))
return True
else:
log.info("remote file exist but not same, overwrite. len:{} local: len:{}".format(
meta.content_length, file_size,
))
log.info("OSS: uploading {} to {}".format(filename, key))
for i in range(max_retries):
try:
result = self.put_object_from_file(key, filename, **kwargs)
assert result.status == 200
except:
if i == max_retries - 1:
raise
traceback.print_exc()
time.sleep(1)
else:
break
dir_name = os.path.dirname(key)
base_name = os.path.basename(key)
modify_time = int(os.path.getmtime(filename))
# 下载 list.json
list_json_key = "{}/list.json".format(dir_name.rstrip("/"))
for i in range(max_retries):
try:
list_json = self.get_object(list_json_key).read().decode("utf-8")
files = json.loads(list_json) # type: dict
except oss2.exceptions.NoSuchKey:
files = {}
break
except:
if i == max_retries - 1:
raise
traceback.print_exc()
time.sleep(1)
else:
break
# 更新 list.json
file_dic = files.get(base_name, {})
file_dic["timestamp"] = modify_time
files[base_name] = file_dic
# 把更新后的list.json传回OSS
for i in range(max_retries):
try:
_result = self.put_object(list_json_key, json.dumps(files).encode("utf-8"))
assert _result.status == 200
except:
if i == max_retries - 1:
raise
traceback.print_exc()
time.sleep(1)
else:
break
return result
def upload_folder_to_oss(bucket_obj, folder, key_prefix="", **kwargs):
"""
:type bucket_obj: ListedBucket
"""
for dirpath, dirnames, filenames in os.walk(folder):
reldirpath = os.path.relpath(dirpath, folder)
if reldirpath == ".":
reldirpath = ""
this_key_prefix = os.path.join(key_prefix, reldirpath)
for filename in filenames:
key = os.path.join(this_key_prefix, filename)
file = os.path.join(dirpath, filename)
bucket_obj.put_file_to_listed_folder(key, file, **kwargs)
if __name__ == "__main__":
import sys
logging.basicConfig(
format="[%(levelname)s %(asctime)s %(module)s.%(funcName)s#%(lineno)d] %(message)s",
level=logging.INFO
)
if len(sys.argv) < 2:
log.error("You must give at least one folder!")
exit(1)
accesskey = os.getenv("OSS_ACCESS_KEY")
secretkey = os.getenv("OSS_SECRET_KEY")
endpoint = os.getenv("OSS_ENDPOINT")
bucket_name = os.getenv("OSS_BUCKET")
prefix = os.getenv("OSS_PREFIX", "")
skip_exist = os.getenv("OSS_SKIP_EXIST", False)
if not (accesskey and accesskey and endpoint and bucket_name):
log.error("you must set OSS_ACCESS_KEY OSS_SECRET_KEY OSS_ENDPOINT OSS_BUCKET environment value")
exit(2)
bucket_obj = ListedBucket(accesskey, secretkey, endpoint, bucket_name)
for folder in sys.argv[1:]:
log.info("uploading: {}".format(folder))
upload_folder_to_oss(bucket_obj, folder, key_prefix=prefix, skip_exist=skip_exist)
log.info("done!")
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,238 | aploium/my_utils | refs/heads/master | /sentry.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals
import logging
import sys
import os
import getpass
import platform
import subprocess
import raven
import raven.conf
import raven.conf.defaults
import raven.processors
import raven.handlers.logging
class AdditionalInfoProcessor(raven.processors.Processor):
def filter_extra(self, data):
""":type data: dict"""
data.update({
"cwd": os.getcwd(),
"user": getpass.getuser(),
"uname": platform.uname(),
"py_version": sys.version,
})
return data
def git_version(default=None):
"""
返回当前的git版本, 以供sentry的 release 使用
示例用法:
sentry.setup(..., release=git_revision())
Return the git revision as a string
References:
https://github.com/numpy/numpy/blob/master/setup.py#L71-L93
"""
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', '--short', 'HEAD'])
git_revision = out.strip().decode('ascii')
except:
git_revision = default
return git_revision
client = None # type: raven.Client
def setup(dsn=None, name=None, level=logging.WARNING,
autoversion=True,
**kwargs):
global client
dsn = dsn or os.getenv("SENTRY_DSN")
if not dsn:
raise ValueError("you must give SENTRY_DSN, or set it in env")
if kwargs.get('string_max_length') is None:
kwargs['string_max_length'] = 4096
if autoversion and kwargs.get('release') is None:
release = git_version()
if release:
kwargs['release'] = release
if kwargs.get('ignore_exceptions') is None:
kwargs['ignore_exceptions'] = [KeyboardInterrupt]
if kwargs.get('auto_log_stacks') is None:
kwargs['auto_log_stacks'] = True
client = raven.Client(
dsn,
name=name,
processors=raven.conf.defaults.PROCESSORS + (
AdditionalInfoProcessor.__module__ + "." + AdditionalInfoProcessor.__name__,),
**kwargs
)
handler = raven.handlers.logging.SentryHandler(client)
handler.setLevel(level)
raven.conf.setup_logging(handler)
return client
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,239 | aploium/my_utils | refs/heads/master | /err_hunter/err_hunter/__init__.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, unicode_literals
from logging import (
CRITICAL, FATAL, ERROR, WARNING,
WARN, INFO, DEBUG, NOTSET,
)
logger_global_custom_data = {}
from .traceback2 import format_exc, print_exc
from .mylogger import MyHTTPHandler, apply_handler, MultiprocessRotatingFileHandler
from .mylogging import (
basicConfig, colorConfig, getLogger, FILE_LOG_FORMAT,
VERBOSE, TRACE, NOISE, LOWEST,
)
VERSION = (0, 8, 2)
VERSION_STR = "{}.{}.{}".format(*VERSION)
__version__ = VERSION
__author__ = "Ap<meow@meow.cat>"
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,240 | aploium/my_utils | refs/heads/master | /attr.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals, division
import inspect
import collections
import traceback
import random
__version__ = (2017, 5, 30, 1)
__author__ = "Aploium<i@z.codes>"
DEFAULT_MAXDEPTH = 2
DEFAULT_MAXLEN = 1024
BASIC_PADDING_LENGTH = 4
PADDING_STEP = 4
DEFAULT_MASKED_KEYWORDS = ("secret", "password", "passwd", "token", "access_key")
MAX_SINGLE_VAR_LEN = 256
def attributes(var,
interested=None,
maxlen=DEFAULT_MAXLEN,
skip_private=True,
max_depth=DEFAULT_MAXDEPTH,
masked_keywords=DEFAULT_MASKED_KEYWORDS,
from_dict=None,
with_sepline=False,
max_single_var_len=MAX_SINGLE_VAR_LEN,
_padding=BASIC_PADDING_LENGTH,
):
if _padding == BASIC_PADDING_LENGTH:
if with_sepline:
output = "#### BEGIN ATTRIBUTES {} ####\n".format(type(var))
else:
output = ""
if not isinstance(var, (dict, list, tuple, set)):
try:
_repr_var = repr(var)[:maxlen]
except:
_repr_var = traceback.format_exc()
output += "__str__: {}\n".format(_repr_var)
else:
output = ""
if from_dict is False or from_dict is None and not isinstance(var, collections.Mapping):
from_dict = False
names = dir(var)
else:
from_dict = True
names = var.keys()
if len(names) > max_single_var_len:
output += "[WARNING {} ITEMS IGNORED] ".format(len(names) - max_single_var_len)
names = random.sample(names, max_single_var_len)
half_len = maxlen // 2
for name in names:
if (skip_private and name.startswith("_")) or name.endswith("_"):
continue
try:
if from_dict:
subval = var[name]
else:
subval = getattr(var, name)
except:
subval = traceback.format_exc()
if inspect.ismodule(subval) or inspect.isfunction(subval) or inspect.isclass(subval):
continue
type_str = str(type(subval))
str_prefix = ""
if inspect.ismethod(subval):
subval_str = "<method>"
nosub = True
else:
if not isinstance(subval, (str, bytes, bytearray)) \
and isinstance(subval, collections.Sequence):
try:
subval_len = len(subval)
except:
pass
else:
if subval_len > max_single_var_len:
str_prefix += "[{} MORE ITEMS IGNORED]".format(subval_len - max_single_var_len)
subval = subval[:max_single_var_len]
try:
subval_str = repr(subval)
except:
subval_str = traceback.format_exc()
nosub = False
if masked_keywords:
name_low = name.lower()
for key in masked_keywords:
if key in name_low:
subval_str = '***masked***'
nosub = True
break
rec = False
if len(subval_str) > maxlen:
str_prefix = "[OMITTED WARNING!]"
subval_str = subval_str[:half_len] \
+ " ###omit:{}### ".format(len(subval_str) - maxlen) \
+ subval_str[-half_len:]
subval_str = str_prefix + subval_str
ori_subval_str = ""
if interested is not None and max_depth and not nosub:
ori_subval_str = subval_str[:maxlen]
for needle in interested:
if needle in type_str or subval_str is True:
subval_str = attributes(
subval, maxlen=maxlen,
interested=interested,
max_depth=max_depth - 1,
_padding=_padding + PADDING_STEP,
)
rec = True
if _padding:
output += " " * _padding
if not rec:
output += "{name}: {value}\n".format(name=name, value=subval_str)
else:
output += "{name}: {value}\n{subvalues}".format(name=name, value=ori_subval_str, subvalues=subval_str)
if with_sepline and _padding == BASIC_PADDING_LENGTH:
output += "#### END ATTRIBUTES {} ####\n".format(type(var))
return output
if __name__ == "__main__":
import requests
import time
r = requests.get("https://www.baidu.com")
start_time = time.time()
print(attributes(r, interested=["RequestsCookieJar", "PreparedRequest"]))
print("cost:", time.time() - start_time)
print(attributes(r.headers))
print(attributes({
"password": "this should not be displayed",
"PASSWD": "this should not be displayed",
}))
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,241 | aploium/my_utils | refs/heads/master | /ipip/__init__.py | #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals, print_function
import logging
import struct
from socket import inet_aton
import os
import collections
logger = logging.getLogger(__name__)
_unpack_V = lambda b: struct.unpack("<L", b)
_unpack_N = lambda b: struct.unpack(">L", b)
_unpack_C = lambda b: struct.unpack("B", b)
_finder_cls = None
IPInfo = collections.namedtuple("IPInfo", (
"ip",
"country", "province", "city", "village",
"isp",
"lat", "lon",
"timezone_name", "timezone_offset",
"postcode", "phone_prefix",
"country_abbr", "continent"
))
class IP(object):
offset = 0
index = 0
binary = b""
@staticmethod
def load(file):
try:
path = os.path.abspath(file)
with open(path, "rb") as f:
IP.binary = f.read()
IP.offset, = _unpack_N(IP.binary[:4])
IP.index = IP.binary[4:IP.offset]
except:
logger.error("cannot open file %s" % file, exc_info=True)
raise
@staticmethod
def find(ip):
index = IP.index
offset = IP.offset
binary = IP.binary
nip = inet_aton(ip)
ipdot = ip.split('.')
if int(ipdot[0]) < 0 or int(ipdot[0]) > 255 or len(ipdot) != 4:
return None
tmp_offset = int(ipdot[0]) * 4
start, = _unpack_V(index[tmp_offset:tmp_offset + 4])
index_offset = index_length = 0
max_comp_len = offset - 1028
start = start * 8 + 1024
while start < max_comp_len:
if index[start:start + 4] >= nip:
index_offset, = _unpack_V(index[start + 4:start + 7] + b'\x00')
index_length, = _unpack_C(index[start + 7:start + 8])
break
start += 8
if index_offset == 0:
return None
res_offset = offset + index_offset - 1024
return binary[res_offset:res_offset + index_length].decode('utf-8')
class IPX(object):
binary = b""
index = 0
offset = 0
@staticmethod
def load(file):
try:
path = os.path.abspath(file)
with open(path, "rb") as f:
IPX.binary = f.read()
IPX.offset, = _unpack_N(IPX.binary[:4])
IPX.index = IPX.binary[4:IPX.offset]
except:
logger.error("IPIP: cannot open file %s" % file, exc_info=True)
raise
@staticmethod
def find(ip):
index = IPX.index
offset = IPX.offset
binary = IPX.binary
nip = inet_aton(ip)
ipdot = ip.split('.')
if int(ipdot[0]) < 0 or int(ipdot[0]) > 255 or len(ipdot) != 4:
return None
tmp_offset = (int(ipdot[0]) * 256 + int(ipdot[1])) * 4
start, = _unpack_V(index[tmp_offset:tmp_offset + 4])
index_offset = index_length = -1
max_comp_len = offset - 262144 - 4
start = start * 9 + 262144
while start < max_comp_len:
if index[start:start + 4] >= nip:
index_offset, = _unpack_V(index[start + 4:start + 7] + b'\x00')
index_length, = _unpack_C(index[start + 8:start + 9])
break
start += 9
if index_offset == 0:
return None
res_offset = offset + index_offset - 262144
return binary[res_offset:res_offset + index_length].decode('utf-8')
def setup_ipx(file_path=None):
global _finder_cls
if file_path is None:
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "ipip.datx")
IPX.load(file_path)
_finder_cls = IPX
def find(ip):
if _finder_cls is None:
setup_ipx()
result = _finder_cls.find(ip)
if result is None:
return None
result = [ip] + result.split("\t")
ip_info = IPInfo(*result)
return ip_info
if __name__ == '__main__':
print(find("118.28.8.8"))
print(find("42.120.74.202"))
print(find("8.8.8.8"))
print(find("11.191.47.131"))
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,242 | aploium/my_utils | refs/heads/master | /requestfuzz/tests/test_datastructure.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
import pytest
from requestfuzz import OrderedMultiDict
@pytest.mark.skip("暂时无法保留update时的重复key")
def test_update_with_dup_key():
omd = OrderedMultiDict()
omd.update(OrderedMultiDict([
("a", "1"),
("a", "2")
]))
assert omd == OrderedMultiDict([
("a", "1"),
("a", "2")
])
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,243 | aploium/my_utils | refs/heads/master | /import_file.py | #!/usr/bin/env python3
# coding=utf-8
"""直接从文件导入成module"""
import importlib.util
import sys
import os
def import_file(path, name=None, make_global=False):
name = name or os.path.splitext(os.path.basename(path))[0]
spec = importlib.util.spec_from_file_location(name, path)
module_ = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module_)
if make_global:
sys.modules[name] = module_
return module_
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,244 | aploium/my_utils | refs/heads/master | /disk_kv_storge/leveldb_engine/__init__.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals
import os
import sys
import platform
import functools
def import_helper(relpath, name):
dirname = os.path.dirname(os.path.abspath(__file__))
abspath = os.path.join(dirname, relpath)
sys.path.insert(0, abspath)
if sys.version_info[0] == 2:
import imp
module_ = imp.load_module(name, *imp.find_module(name))
else:
import importlib
module_ = importlib.import_module(name)
sys.path.remove(abspath)
return module_
_mode = None
VERBOSE_NAME = None
NAME = "leveldb"
if platform.system() == "Windows":
if sys.version_info[:2] == (3, 6):
try:
leveldb = import_helper("leveldb_win_py36", "leveldb")
except Exception as e:
raise ImportError(str(e))
VERBOSE_NAME = "pyleveldb_win_py36"
elif sys.version_info[:2] == (2, 7):
try:
leveldb = import_helper("leveldb_win_py27", "leveldb")
except Exception as e:
raise ImportError(str(e))
VERBOSE_NAME = "pyleveldb_win_py27"
else:
raise ImportError("unsupported version")
_mode = "pyleveldb"
else:
try:
import plyvel as leveldb
except ImportError:
import leveldb
_mode = "pyleveldb"
VERBOSE_NAME = "pyleveldb_unix"
else:
_mode = "plyvel"
VERBOSE_NAME = "plyvel_leveldb_unix"
# open
if _mode == "plyvel":
def open(dbpath, block_cache_size=8 * (2 << 20)):
return leveldb.DB(dbpath, lru_cache_size=block_cache_size, create_if_missing=True)
get = leveldb.DB.get
put = leveldb.DB.put
delete = leveldb.DB.delete
close = leveldb.DB.close
keys = functools.partial(leveldb.DB.iterator, include_value=False)
values = functools.partial(leveldb.DB.iterator, include_key=False)
items = leveldb.DB.iterator
elif _mode == "pyleveldb":
open = leveldb.LevelDB
get = leveldb.LevelDB.Get
put = leveldb.LevelDB.Put
delete = leveldb.LevelDB.Delete
close = lambda x: None
keys = functools.partial(leveldb.LevelDB.RangeIter, include_value=False)
items = leveldb.LevelDB.RangeIter
def values(self, *args, **kwargs):
for x in items(self, *args, **kwargs):
yield x[1]
else:
raise ImportError("bad mode: {}".format(_mode))
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,245 | aploium/my_utils | refs/heads/master | /disk_kv_storge/disk_timeoutdict.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals, division, print_function, absolute_import
import sys
import time
import json
import struct
import logging
logger = logging.getLogger(__name__)
try:
from . import BaseDiskKV
except (ImportError, ValueError):
# noinspection PyUnresolvedReferences
from disk_kv_storge import BaseDiskKV
if sys.version_info[0] == 2:
# noinspection PyUnresolvedReferences
string_types = (str, unicode)
# noinspection PyUnresolvedReferences
integer_types = (int, long)
# noinspection PyUnresolvedReferences
text_type = unicode
binary_type = (str, bytes, bytearray)
else:
string_types = str
integer_types = int
text_type = str
binary_type = (bytes, bytearray)
def pack_timestamp(value, timestamp=None):
time_bytes = struct.pack("!d", timestamp or time.time())
return time_bytes + value
def get_timestamp(b):
return struct.unpack("!d", b[:8])[0]
def unpack_timestamp(b):
timestamp = struct.unpack("!d", b[:8])[0]
return b[8:], timestamp
# --------------------------------------------------------
def _key_encode(key):
if isinstance(key, text_type):
return key.encode("utf8")
if isinstance(key, binary_type):
return key
if isinstance(key, integer_types):
key = str(key)
return key.encode("utf8")
def _key_decode(key):
return key.decode("utf8")
# --------------------------------------------------------
try:
import msgpack
except ImportError:
logger.warning(
"msgpack not found, please consider install msgpack (http://msgpack.org/) for serialization, "
"it's better than json. Fallback to builtin json for serialization"
)
def _value_encode(value):
value = json.dumps(value)
value = value.encode("UTF-8")
value = pack_timestamp(value)
return value
def _value_decode(value):
value = unpack_timestamp(value)[0]
value = value.decode("UTF-8")
value = json.loads(value)
return value
else:
def _value_encode(value):
value = msgpack.dumps(value, use_bin_type=True)
value = pack_timestamp(value)
return value
def _value_decode(value):
value = unpack_timestamp(value)[0]
value = msgpack.loads(value, encoding='UTF-8', use_list=False)
return value
# --------------------------------------------------------
class DiskTimeoutDict(BaseDiskKV):
"""
>>> import time
>>> td = DiskTimeoutDict(1)
>>> td["cat"] = "foobar"
>>> assert td["cat"] == "foobar"
>>> time.sleep(0.5)
>>> td["dog"] = 42
>>> assert td["cat"] == "foobar"
>>> assert td.get("cat") == "foobar"
>>> assert td.get("non-exist", "a") == "a"
>>> assert tuple(td.keys()) == ("cat", "dog")
>>> assert tuple(td.values()) == ("foobar", 42)
>>> assert tuple(td.items()) == (("cat","foobar"), ("dog",42))
>>> assert len(td) == 2
>>> assert td["dog"] == 42, list(td.items())
>>> time.sleep(0.6)
>>> assert "cat" not in td
>>> assert td["dog"] == 42
>>> time.sleep(0.5)
>>> assert "dog" not in td
>>> td["x"] = 1
>>> del td["x"]
>>> assert "x" not in td
>>>
>>> # test json storge
>>> _dic = {"mon":[1, 2, 3, 4, {"cat": 1, b"binkey": "中文"}]}
>>> td["monkey"] = _dic
>>> assert td["monkey"] == _dic
>>>
>>> # test many keys
>>> for i in range(10000): td[str(i)] = {"i_{}".format(i): i}
>>> for i in range(10000): assert td[str(i)] == {"i_{}".format(i): i}
>>> time.sleep(1.1)
>>> for i in range(10000): assert str(i) not in td
"""
def __init__(self, max_age, check_interval=None, **kwargs):
super(DiskTimeoutDict, self).__init__(**kwargs)
if check_interval is None:
check_interval = max_age / 10.0
self.max_age = max_age
self.check_interval = check_interval
self.next_checkpoint = time.time() + self.check_interval
def __getitem__(self, key):
if self.next_checkpoint < time.time():
self.remove_expired()
return super(DiskTimeoutDict, self).__getitem__(key)
def remove_expired(self):
expired_key = []
now = time.time()
for k in self.keys(decode=False):
value = self.rawget(k)
tiemstamp = get_timestamp(value)
if tiemstamp + self.max_age < now:
expired_key.append(k)
for k in expired_key:
self.delete(k, decode=False)
self.next_checkpoint = now + self.check_interval
return len(expired_key)
key_encode = staticmethod(_key_encode)
key_decode = staticmethod(_key_decode)
value_encode = staticmethod(_value_encode)
value_decode = staticmethod(_value_decode)
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,246 | aploium/my_utils | refs/heads/master | /requestfuzz/request.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
import requests
import re
import copy
import collections
try:
from urllib import parse
except ImportError:
from future.backports.urllib import parse
from . import utils
from .datastructure import (
OrderedMultiDict, QueryDict, HTTPHeaders, Cookie, to_querydict)
from .url import Url
from .recursive_parse import parse_multipart, split_multipart
# 注意: 这个 import 实际在此文件末尾, 以避免冲突
# from .bare import BareLoader
def merge_data(old, new, base=OrderedMultiDict):
if not old:
_old = base()
else:
_old = copy.deepcopy(old)
if isinstance(old, tuple):
_old = list(old)
if utils.like_list(old):
_old.extend(new)
return _old
elif utils.like_dict(old):
_old.update(new)
return _old
else:
return new
@six.python_2_unicode_compatible
class FuzzableRequest(object): # TODO: 需要一个统一的 data 类
"""
对http请求进行解析、修改的基础组件
Args:
meta(dict): 在meta中允许以dict形式存储少量额外信息
bare(bytes): 裸包, 仅做存储使用
plugins(list): 启用的插件, 例如
csrf、自动模拟浏览器头、移除无用参数等
基类都是 `plugin_base.FzPluginBase`
注意传入的是插件的类而不是实例
"""
DEFAULT_PLUGINS = []
def __init__(self, url,
method=None, protocol="HTTP/1.1",
data=None, json=None, files=None,
headers=None, cookies=None,
bare=None,
meta=None,
plugins=None,
):
self.plugins = []
self.headers = HTTPHeaders(headers or [])
self.protocol = utils.ensure_unicode(protocol)
self.method = utils.ensure_unicode(method) if method else "GET"
self.bare = bare
self.meta = meta or {}
self._url = Url(url)
# 从str或dict加载cookie
self.cookies = Cookie(cookies)
self.data, self.json, self.files = self.prepare_data(
data, json, files, self.content_type)
# 初始化插件
plugins = plugins or self.DEFAULT_PLUGINS
if plugins:
for plugin_class in plugins:
_plg = plugin_class(self)
self.plugins.append(_plg)
self._event("init_complete")
# ----------- 下面是对 self.url 的简单映射 ---------
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = Url(value)
@property
def scheme(self):
return self.url.scheme
@scheme.setter
def scheme(self, value):
self.url.scheme = value
@property
def host(self):
return self.url.host
@host.setter
def host(self, value):
self.url.host = value
@property
def scheme(self):
return self.url.scheme
@scheme.setter
def scheme(self, value):
self.url.scheme = value
@property
def port(self):
return self.url.port
@port.setter
def port(self, value):
self.url.port = value
@property
def path(self):
return self.url.path
@path.setter
def path(self, value):
self.url.path = value
@property
def fragment(self):
return self.url.port
@fragment.setter
def fragment(self, value):
self.url.fragment = value
@property
def query(self):
return self.url.query
@query.setter
def query(self, value):
self.url.query = value
@property
def query_string(self):
return self.url.query_string
@property
def netloc(self):
"""
返回域名和端口, 默认端口自动省略
等效于 urlsplit 的 .netloc
Examples:
"foo.com:8888"
"bar.com"
"""
return self.url.netloc
# ----------- 简单的 property ---------
@property
def content_type(self):
return self.headers.get("Content-Type", "")
@property
def bin_body(self):
"""返回二进制的body
Returns:
bytes
"""
return self.to_bare_obj().body
@bin_body.setter
def bin_body(self, value):
"""设置data的原始值, 例如二进制值"""
self.data = value
# ----------- methods --------------
def to_requests(self):
"""
转换为供 `requests.request` 用的参数dict
注意! 由于标准库的坑, to_requests() 的结果不能直接用
json.dumps() 转化为json, 若有这样的需求, 请使用 .to_jsonable(
Examples:
import requests
req_dict = fuzzable.to_requests()
requests.request(**req_dict)
"""
_headers = copy.deepcopy(self.headers)
# if self.cookies:
# # 由于在py3.6以前的版本中, SimpleCookie是无序的,
# # 所以不得不手动设置header头
# _headers["Cookie"] = str(self.cookies)
# elif "Cookie" in _headers:
# del _headers["Cookie"]
if "Cookie" in _headers:
del _headers["Cookie"]
if "Content-Length" in _headers:
del _headers["Content-Length"]
return {
"method": self.method, # str
"url": self.url.without_query, # str
"params": copy.deepcopy(self.query), # dict
"data": copy.deepcopy(self.data), # dict or qsl or bin
"headers": _headers, # dict
"cookies": copy.deepcopy(self.cookies), # dict
"json": copy.deepcopy(self.json), # dict
"files": copy.copy(self.files), # dict
}
def to_jsonable(self):
"""
转换为可以 json.dump 的dict
注意! 键的顺序会错乱, 重复项也会丢失
Examples:
{
"method": "POST", # str
"url": "http://cat.com:8080/foo.php?id=23&name=foo", # str
"params": {"id":"23", "name":"foo"}, # dict
"data": {"fff":"aaa", "bbb":"ccc"}, # dict or qsl or bin
"headers": ..., # dict
"cookies": ..., # dict
"json": ..., # dict or None
"files": ..., # dict or None
}
"""
req_dict = self.to_requests()
# 替换为完整url
req_dict["url"] = str(self.url)
for k in ("params", "headers", "cookies", "files"):
req_dict[k] = dict(req_dict[k]) if req_dict[k] else {}
if utils.like_dict(req_dict["data"]):
req_dict["data"] = dict(req_dict["data"])
return req_dict
def to_bare_obj(self):
"""转换为 Bare 对象
Returns:
BareLoader
"""
return BareLoader.from_fuzzable(self)
def to_bare(self):
"""转换为socket级别的裸二进制请求体, 就跟抓包看到的那种一样"""
return self.to_bare_obj().raw
# noinspection PyTypeChecker
def fork(self,
method=None, protocol=None,
path=None, query=None,
data=None, json=None, files=None,
headers=None, cookies=None,
host=None,
port=None, scheme=None,
meta=None,
):
"""创建拷贝并以merge的形式更新某些值"""
# 进行复制
new = self.deepcopy()
# 修改值
new.merge(
# 嘛虽然可以直接写 **kwargs, 不过出于辅助IDE补全提示的考虑
# 还是这样繁琐地全部写出来了
method=method, protocol=protocol,
path=path, query=query,
data=data, json=json, files=files,
headers=headers, cookies=cookies,
host=host,
port=port, scheme=scheme,
meta=meta,
)
return new
def merge(self,
method=None, protocol=None,
path=None, query=None,
data=None, json=None, files=None,
headers=None, cookies=None,
host=None,
port=None, scheme=None,
meta=None,
):
"""就地更新某些值, 对于dict-like的值, 以merge的方式修改"""
if method: self.method = method
if protocol: self.protocol = protocol
if path: self.path = path
if host: self.host = host
if port: self.port = port
if scheme: self.scheme = scheme
data, json, files = self.prepare_data(data, json, files, self.content_type)
# merge
if meta: self.meta = merge_data(self.meta, meta, base=dict)
if json: self.json = merge_data(self.json, json, base=dict)
if files: self.files = merge_data(self.files, files, base=QueryDict)
if headers: self.headers = merge_data(self.headers, headers, base=HTTPHeaders)
if cookies: self.cookies = merge_data(self.cookies, cookies, base=Cookie)
if data:
self.data = merge_data(self.data, data, base=QueryDict)
if query:
query = to_querydict(query)
self.query = merge_data(self.query, query, base=QueryDict)
# --------------- complex ------------
if six.PY2:
def deepcopy(self):
if not self.files:
return copy.deepcopy(self)
else:
# 由于py2的deepcopy无法对 StringIO 进行,
# 所以需要临时把files取出, 对其余部分进行deepcopy以后
# 再重新把浅copy的files放进去
# 这是线程不安全的, 不过因为访问非常不稀疏
# 所以实际上是没问题的
_files = self.files
self.files = None
new = copy.deepcopy(self)
self.files = _files
new.files = copy.copy(_files)
return new
else:
def deepcopy(self):
# py3完全没问题
return copy.deepcopy(self)
# -------------- private ------------
def _event(self, event, *args, **kwargs):
method = "on_{}".format(event)
for plugin in self.plugins:
getattr(plugin, method)(*args, **kwargs)
# --------- magic method ----------
def __str__(self):
return "{}<{} {}>".format(self.__class__.__name__, self.method, self.url)
__repr__ = __str__
# --------- classmethod -----------
@classmethod
def from_bare(cls, bare, **kwargs):
"""
从socket级别的二进制请求体生成 FuzzableRequest
Args:
bare(BareLoader|bytes): 裸二进制或 BareLoader
"""
if not isinstance(bare, BareLoader):
bare = BareLoader(bare)
fz = bare.to_fuzzable(cls, **kwargs)
return fz
# --------- staticmethod ------------
@staticmethod
def prepare_data(data=None, json=None, files=None, content_type=None):
"""
预处理请求数据
Args:
data (QueryDict|str|bytes):
json (dict|str):
files (dict[str, cgi.FieldStorage]):
content_type (str):
Returns:
(QueryDict, dict, dict[str, cgi.FieldStorage]):
"""
# 处理 multipart
if content_type and content_type.startswith("multipart/") \
and isinstance(data, six.string_types):
_multipart = parse_multipart(data, content_type)
# 把 _multipart 中的 form 部分和 files 部分分开存
_forms, _files = split_multipart(_multipart)
data = _forms
if files:
files = merge_data(files, _files)
else:
files = _files
else:
try:
data = to_querydict(data)
except:
pass # data就是二进制
if isinstance(json, six.string_types):
import json as libjson # 两个 json 名字不能冲突
json = libjson.loads(json)
return data, json, files
from .bare import BareLoader
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,247 | aploium/my_utils | refs/heads/master | /requestfuzz/tests/test_bare.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
import cgi
import requests
import err_hunter
import copy
from ..request import FuzzableRequest
from ..bare import BareLoader
logger = err_hunter.getLogger()
def test_decode_post():
request_bin = b'''POST /index.html?fromSite=-2&fromSite=another&appName=cat HTTP/1.1
Host: www.example.com
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) Gecko/20100101 Firefox/54.0
Accept: undefined
Accept-Language: zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3
Accept-Encoding: gzip, deflate, br
Content-Type: application/x-www-form-urlencoded; charset=UTF-8
X-Requested-With: XMLHttpRequest
Referer: http://www.example.com/referer
Content-Length: 81
Cookie: t=qwertyuiop; _uab_collina=1501125267079; cna=zxcvbnm; _umdata=A502B1276E
Connection: close
loginId=abcdef.io&loginId=another-loginId&appName=cat&appEntrance=cat&bizParams=c'''
request = BareLoader(request_bin, real_host="example.com")
assert request.method == request.command == "POST"
# header大小写不敏感
assert request.headers["accept"] == "undefined"
assert request.headers["Accept"] == "undefined"
# 保证cookies的顺序和值和原始一样
assert tuple(request.cookies.items())[:3] == (
('t', 'qwertyuiop'),
('_uab_collina', '1501125267079'),
('cna', 'zxcvbnm'),
)
assert request.host == "www.example.com"
assert request.text[:16] == 'loginId=abcdef.i'
assert request.real_host == "example.com"
assert request.is_json is False
assert request.is_form is True
# 保证forms, 并且支持相同value多次出现
assert tuple(request.forms.items())[:4] == (('loginId', 'abcdef.io'),
('loginId', 'another-loginId'),
('appName', 'cat'),
('appEntrance', 'cat'),)
assert request.POST == request.forms # 在本例情况下两者相同
assert request.raw == request_bin
assert request.content_length == 81
assert request.user_agent == "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) Gecko/20100101 Firefox/54.0"
assert request.is_xhr is True
assert request.query_string == "fromSite=-2&fromSite=another&appName=cat"
assert request.body == b'loginId=abcdef.io&loginId=another-loginId&appName=cat&appEntrance=cat&bizParams=c'
# 保证顺序, 并支持重复
assert tuple(request.query.items()) == (
('fromSite', '-2'),
('fromSite', 'another'),
('appName', 'cat'),
)
assert tuple((k.lower(), v) for k, v in request.headers.items())[:9] == (
('host', 'www.example.com'),
('user-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) Gecko/20100101 Firefox/54.0'),
('accept', 'undefined'),
('accept-language', 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'),
('accept-encoding', 'gzip, deflate, br'),
('content-type', 'application/x-www-form-urlencoded; charset=UTF-8'),
('x-requested-with', 'XMLHttpRequest'),
('referer', 'http://www.example.com/referer'),
('content-length', '81'),
)
return request
def test_decode_get():
request_bin = b"""GET /get?cat=1&dog=2 HTTP/1.1
Accept: text/html
Accept-Encoding: gzip, deflate
Connection: keep-alive
Host: httpbin.org
User-Agent: HTTPie/0.9.9
X-Needle: uCX6YrzPpTmax
"""
r = BareLoader(request_bin)
assert r.raw == request_bin
assert r.host == "httpbin.org"
assert tuple(r.query.items()) == (("cat", "1"), ("dog", "2"))
assert r.user_agent == "HTTPie/0.9.9"
return r
def test_decode_multipart():
request_bin = b"""POST /post?cat=1&dog=2 HTTP/1.1
Host: httpbin.org
Content-Length: 296
User-Agent: http_clay/1.0
Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryEW35oPYWK6qwibcP
Accept: text/html
Accept-Language: zh-CN,zh;q=0.8
Cookie: JSESSIONID=A53DAC634D455E4D1F16829B7BD480F7
Connection: close
------WebKitFormBoundaryEW35oPYWK6qwibcP
Content-Disposition: form-data; name="upload"; filename="abc.txt"
Content-Type: text/plain
afasfafasfasfa
------WebKitFormBoundaryEW35oPYWK6qwibcP
Content-Disposition: form-data; name="caption"
aaaaa
------WebKitFormBoundaryEW35oPYWK6qwibcP--"""
r = BareLoader(request_bin, real_host="httpbin.org")
assert len(r.POST) == 2
assert len(r.files) == 1
f = r.files["upload"]
assert isinstance(f, cgi.FieldStorage)
assert r.POST["upload"].file.getvalue() == f.file.getvalue()
assert f.file.getvalue() == b'afasfafasfasfa'
assert f.outerboundary == b"----WebKitFormBoundaryEW35oPYWK6qwibcP"
assert f.disposition == "form-data"
assert f.name == "upload"
assert f.filename == "abc.txt"
assert f.type == "text/plain"
assert r.POST["caption"] == "aaaaa"
return r
def resend(req, pattern=b'<h1>Example Domain</h1>'):
tor = req.to_requests()
r = requests.request(**tor)
assert pattern in r.content
return r
def build_same(r):
def _decode(v):
if isinstance(v, six.binary_type):
return v.decode("UTF-8")
else:
return v
# --- 测试什么都不变的情况下与原有数据保持相同
same = BareLoader.build(old=r)
for attr in ("host", "body", "is_json", "is_form",
"GET", "POST", "raw", "headers", "cookies",
"query", "query_string"):
values = {"old": _decode(getattr(r, attr)),
"new": _decode(getattr(same, attr)),
}
if isinstance(values["old"], six.string_types) \
or isinstance(values["old"], six.binary_type):
values["old"] = values["old"].lower()
values["new"] = values["new"].lower()
try:
assert values["old"] == values["new"], attr
except:
logger.error(repr(values["old"]))
logger.error(repr(values["new"]))
raise
return same
def build_modified(r):
"""
Args:
r (BareLoader):
"""
# headers需要复制一份出来才能修改, 其他不用
h = copy.deepcopy(r.headers)
h["Accept"] = "*/*" # 修改已有的字段, 字段顺序保持不变, 下同
h["Cat"] = "dog" # 新字段, 会加在最后面, 下同
cookies = r.cookies
cookies["cna"] = "changed"
cookies["nonexist"] = "bar"
data = r.forms
data["appName"] = "dog"
data["nonexist"] = "bar"
query = r.query
query["appName"] = "abcdefg&"
query["nonexist"] = "23333"
new = BareLoader.build(
# 以下所有字段都是可有可无的
# 为了demo, 所以才全部写上了
old=r,
method="PUT",
protocol="HTTP/1.0",
path="/yet/another/path",
query=query,
data=data, # data的用法和行为和requests等同
# json= # json和files 的用法也和requests一样
headers=h,
host="www.example.org",
cookies=cookies,
port=443,
scheme="https",
)
logger.debug("new req:\n%s", new.raw.decode("UTF-8"))
_newh = copy.deepcopy(new.headers)
_oldh = copy.deepcopy(h)
for _hname in ("Content-Length", "Content-Type", "Cookie"):
if _hname not in _oldh:
del _newh[_hname]
# headers的顺序和值相同
assert tuple(x.lower() for x in _newh.keys()) == tuple(x.lower() for x in _oldh.keys())
for _hname in ("Cookie", "Content-Length", "Host"):
if _hname in _newh:
del _newh[_hname]
del _oldh[_hname]
assert tuple(_newh.values()) == tuple(_oldh.values())
assert new.port == 443
assert new.real_host == r.real_host
assert new.host == "www.example.org"
assert new.path == "/yet/another/path"
assert new.protocol == "HTTP/1.0"
assert new.scheme == "https"
assert new.method == "PUT"
assert len(new.body) == new.content_length
assert tuple(new.forms.items()) == tuple(data.items())
assert tuple(new.query.items()) == tuple(query.items())
assert dict(new.cookies.items()) == dict(cookies.items())
return new
def compare_bareloader_fuzzable(b, f):
assert isinstance(b, BareLoader)
assert isinstance(f, FuzzableRequest)
for name in [
"query", "method", "host", "port", "path",
"protocol", "scheme",
"headers", "cookies",
]:
try:
assert getattr(b, name) == getattr(f, name)
except:
raise
assert tuple(b.headers.items()) == tuple(f.headers.items())
assert tuple(b.cookies.items()) == tuple(f.cookies.items())
_temp = tuple(b.headers.items())
for k in f.headers.keys():
assert (k, f.headers[k]) in _temp
_temp = tuple(b.cookies.items())
for k in f.cookies.keys():
assert (k, f.cookies[k]) in _temp
return f
def test_post():
r = test_decode_post()
resend(r)
build_same(r)
# modify
rm = build_modified(r)
resend(rm)
compare_bareloader_fuzzable(r, r.to_fuzzable())
compare_bareloader_fuzzable(r, FuzzableRequest.from_bare(r))
compare_bareloader_fuzzable(r, FuzzableRequest.from_bare(r.raw))
compare_bareloader_fuzzable(r, r.to_fuzzable().fork())
compare_bareloader_fuzzable(rm, rm.to_fuzzable())
compare_bareloader_fuzzable(rm, FuzzableRequest.from_bare(rm))
_fz = FuzzableRequest.from_bare(rm.raw)
_fz.scheme = "https"
_fz.port = 443
compare_bareloader_fuzzable(rm, _fz)
compare_bareloader_fuzzable(rm, rm.to_fuzzable().fork())
def test_get():
r = test_decode_get()
resend(r, b"uCX6YrzPpTmax")
build_same(r)
build_modified(r)
compare_bareloader_fuzzable(r, r.to_fuzzable())
compare_bareloader_fuzzable(r, FuzzableRequest.from_bare(r))
compare_bareloader_fuzzable(r, FuzzableRequest.from_bare(r.raw))
compare_bareloader_fuzzable(r, r.to_fuzzable().fork())
def test_multipart():
r = test_decode_multipart()
compare_bareloader_fuzzable(r, r.to_fuzzable())
compare_bareloader_fuzzable(r, FuzzableRequest.from_bare(r))
compare_bareloader_fuzzable(r, FuzzableRequest.from_bare(r.raw))
compare_bareloader_fuzzable(r, r.to_fuzzable().fork())
resend(r, b'"upload": "afasfafasfasfa"')
def test_json():
request_bin = b'''POST /anything HTTP/1.1
Host: httpbin.org
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0
Accept: */*
Accept-Language: zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3
Accept-Encoding: gzip, deflate, br
Content-Type: application/json
X-Requested-With: XMLHttpRequest
Referer: http://httpbin.org/
Content-Length: 37
Cookie: some=cookie
Connection: close
{"json_content":"helloworld"}'''
fz = FuzzableRequest.from_bare(request_bin)
assert not fz.data
assert fz.json == {"json_content": "helloworld"}
assert fz.to_requests()["json"] == {"json_content": "helloworld"}
assert fz.fork().json is not fz.json
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,248 | aploium/my_utils | refs/heads/master | /err_hunter/err_hunter/mylogger.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, unicode_literals
import getpass
import inspect
import logging
import logging.handlers
import os
import platform
import sys
import traceback
import multiprocessing
import requests
from . import frame_operations
from . import traceback2
FORMAT = "[%(levelname)1.1s %(asctime)s %(module)s.%(funcName)s#%(lineno)d] %(message)s"
class MultiprocessRotatingFileHandler(logging.handlers.RotatingFileHandler):
def __init__(self, *args, ensure_single_line=True, **kwargs):
self._baseFilename = None
super(MultiprocessRotatingFileHandler, self).__init__(*args, **kwargs)
self.ensure_single_line = ensure_single_line
def format(self, record):
result = super(MultiprocessRotatingFileHandler, self).format(record)
if self.ensure_single_line:
result = result.replace("\r", r"\r").replace("\n", r"\n")
return result
@property
def baseFilename(self):
basepath, ext = os.path.splitext(self._baseFilename)
cur_process = multiprocessing.current_process()
return "{}.{}-{}{}".format(basepath, cur_process.name, cur_process.pid, ext)
@baseFilename.setter
def baseFilename(self, value):
self._baseFilename = value
class MyHTTPHandler(logging.Handler):
def __init__(self, url, interested=None,
method="POST", level=logging.WARNING, callback=None, timeout=10, req_kwargs=None,
source_path=None
):
super(MyHTTPHandler, self).__init__(level)
self.url = url
self.method = method
self.req_kwargs = req_kwargs or {}
self.session = requests.Session()
self.callback = callback
self.timeout = timeout
self.interested = interested
self.source_path = source_path or os.getcwd()
def mapLogRecord(self, record):
data = {}
data.update(record.__dict__)
data.update({
"_cwd": os.getcwd(),
"_username": getpass.getuser(),
"_hostname": platform.node(),
"_uname": str(platform.uname()),
"_py_version": sys.version,
})
if record.levelno >= logging.ERROR:
real_frame = frame_operations.real_frame_extract(
inspect.currentframe(),
filepath=data["pathname"],
lineno=data["lineno"]
)
if real_frame is not None:
data["_logframe"] = frame_operations.frame_format(real_frame, interested=self.interested)
if sys.exc_info() != (None, None, None) and "_traceback" not in data:
data["_traceback"] = traceback.format_exc()
data["_traceback_frames"] = traceback2.format_exc(with_normal=False)
return data
def _emit(self, record):
""":type record: logging.LogRecord"""
kwargs = {"timeout": self.timeout, "allow_redirects": False}
data = self.mapLogRecord(record) # type: dict
if self.method == "GET":
kwargs["params"] = data
else:
kwargs["data"] = data
kwargs.update(self.req_kwargs)
resp = self.session.request(
self.method, self.url, **kwargs
)
if self.callback is not None:
self.callback(record, resp)
def emit(self, record):
try:
self._emit(record)
except:
self.handleError(record)
def apply_handler(
url,
level=logging.WARNING,
method="POST",
interested=None,
parent_name=None,
callback=None,
timeout=10,
req_kwargs=None,
source_path=None,
lazy=False,
):
if lazy:
logging.basicConfig(
format=FORMAT,
level=logging.INFO,
)
handler = MyHTTPHandler(
url, interested=interested,
method=method, level=level,
callback=callback, timeout=timeout,
req_kwargs=req_kwargs,
source_path=source_path,
)
handler.setFormatter(logging.Formatter())
logging.getLogger(parent_name).addHandler(handler)
return handler
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,249 | aploium/my_utils | refs/heads/master | /err_hunter/demo.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals
import requests
import err_hunter
def error_func():
monkey = 3
a = 1 / 0 # this will raise error
universe = 42 # never reached
def func():
cat = 7
r = requests.get("http://example.com")
error_func()
monkey = 7
some_global_var = {"a": "b"}
try:
func()
except:
err_hunter.print_exc(
interested=[ # we want to see things inside requests' response
"requests.models.Response"]
)
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,250 | aploium/my_utils | refs/heads/master | /requestfuzz/tests/test_fuzzable.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
if six.PY3:
import collections
else:
from future.moves import collections
import requests
from requestfuzz.request import FuzzableRequest
from requestfuzz.datastructure import HTTPHeaders, QueryDict, Cookie
from requestfuzz import utils
import err_hunter
logger = err_hunter.getLogger()
def make_simple_req():
return FuzzableRequest("http://httpbin.org/anything?a=1&b=2&c=3&a=x&a=y")
def make_complex_req():
return FuzzableRequest(
"http://www.httpbin.org/anything?a=1&b=2&c=3&a=x&a=y",
method="POST",
data="a=b&c=d&c=e&e=x&x=f", # 允许传入字符串/dict/kv-pairs
headers=collections.OrderedDict([
# 允许传入dict/kv-pairs, 不过只有 OrderedDict 会保留顺序
("User-Agent", "RequestFuzz"),
("referer", "http://example.com"),
("conneCtion", "close"), # 允许异形headers
("Accept", "*/*"),
]),
cookies=[ # 允许传入 字符串/dict/kv-pairs
("Answer", "42"), ("Sess", "x"), ("sky", "net")],
)
def test_basic():
fz = make_simple_req()
assert fz.port == 80
assert fz.host == fz.netloc == "httpbin.org"
assert fz.protocol == "HTTP/1.1"
assert tuple(fz.query.items()) == (('a', '1'), ('b', '2'), ('c', '3'), ('a', 'x'), ('a', 'y'))
assert fz.query_string == "a=1&b=2&c=3&a=x&a=y"
assert fz.scheme == "http"
assert fz.url == "http://httpbin.org/anything?a=1&b=2&c=3&a=x&a=y"
assert fz.url.without_path == "http://httpbin.org"
assert fz.url.without_query == "http://httpbin.org/anything"
assert not fz.headers
assert not fz.cookies
assert not fz.data
assert not fz.json
fz.query["e"] = "233"
assert tuple(fz.query.items()) == (('a', '1'), ('b', '2'), ('c', '3'), ('a', 'x'), ('a', 'y'), ("e", "233"))
assert fz.query_string == 'a=1&b=2&c=3&a=x&a=y&e=233'
assert fz.url == 'http://httpbin.org/anything?a=1&b=2&c=3&a=x&a=y&e=233'
fz.query["a"] = "0day"
assert tuple(fz.query.items()) == (('a', '0day'), ('b', '2'), ('c', '3'), ("e", "233"))
assert fz.query_string == 'a=0day&b=2&c=3&e=233'
assert fz.url == 'http://httpbin.org/anything?a=0day&b=2&c=3&e=233'
del fz.query["a"]
assert fz.query_string == 'b=2&c=3&e=233'
assert tuple(fz.query.items()) == (('b', '2'), ('c', '3'), ("e", "233"))
fz.query["c"] = "4"
assert fz.query_string == 'b=2&c=4&e=233'
fz.query.add("c", "5")
assert fz.query_string == 'b=2&c=4&e=233&c=5'
fz.query.update({"d": "7"})
assert fz.query_string == 'b=2&c=4&e=233&c=5&d=7'
assert "d" in fz.query
def test_headers():
fz = make_simple_req()
# ------ headers ---------
assert fz.headers is fz.headers # 即不是每次调用生成新的
assert isinstance(fz.headers, HTTPHeaders)
assert utils.like_dict(fz.headers)
fz.headers["X-ReqFuzz"] = "0day"
assert fz.headers["x-reqfuzz"] == fz.headers["X-ReqFuzz"] == "0day"
assert "x-reqfuzz" in fz.headers
assert "X-ReqFuzz" in fz.headers
fz.headers["x-H1"] = "skynet"
fz.headers["X-h2"] = "skynet2"
assert tuple(fz.headers.items()) == (('X-ReqFuzz', '0day'), ('x-H1', 'skynet'), ('X-h2', 'skynet2'))
fz.headers["X-h1"] = "skynet1"
assert tuple(fz.headers.items()) == (('X-ReqFuzz', '0day'), ('x-H1', 'skynet1'), ('X-h2', 'skynet2'))
fz.headers.update({"x-h1": "sky", "x-h3": "net"})
assert tuple(fz.headers.items()) == (('X-ReqFuzz', '0day'), ('x-H1', 'sky'), ('X-h2', 'skynet2'), ('x-h3', 'net'))
del fz.headers["X-h1"]
assert tuple(fz.headers.items()) == (('X-ReqFuzz', '0day'), ('X-h2', 'skynet2'), ('x-h3', 'net'))
fz.headers.add("X-H2", "another")
assert tuple(fz.headers.items()) == (('X-ReqFuzz', '0day'), ('X-h2', 'skynet2'),
('x-h3', 'net'),
('X-h2', 'another'))
fz.headers["x-h2"] = "h2"
assert tuple(fz.headers.items()) == (('X-ReqFuzz', '0day'), ('X-h2', 'h2'), ('x-h3', 'net'))
assert dict(fz.headers) == {'X-ReqFuzz': '0day', 'X-h2': 'h2', 'x-h3': 'net'}
def test_seturl():
fz = make_simple_req()
fz.url = "https://example.com:4443/index.html?a=2&b=2"
assert fz.url == "https://example.com:4443/index.html?a=2&b=2"
assert fz.path == "/index.html"
assert fz.netloc == 'example.com:4443'
assert fz.port == 4443
assert fz.scheme == "https"
assert fz.query_string == 'a=2&b=2'
assert fz.host == "example.com"
def test_to_requests():
fz = make_complex_req()
assert fz.url == 'http://www.httpbin.org/anything?a=1&b=2&c=3&a=x&a=y'
assert tuple(fz.headers.items()) == (('User-Agent', 'RequestFuzz'),
('referer', 'http://example.com'),
('conneCtion', 'close'),
('Accept', '*/*'))
assert fz.headers["connection"] == fz.headers["conneCtion"]
assert fz.headers["reFerer"] == fz.headers["referer"]
assert tuple(fz.cookies.items()) == (('Answer', '42'), ('Sess', 'x'), ('sky', 'net'))
assert tuple(fz.data.items()) == (('a', 'b'), ('c', 'd'), ('c', 'e'), ('e', 'x'), ('x', 'f'))
assert fz.method == "POST"
assert fz.to_requests() == {
'data': QueryDict([('a', 'b'), ('c', 'd'), ('c', 'e'), ('e', 'x'), ('x', 'f')]),
'headers': HTTPHeaders(
[('User-Agent', 'RequestFuzz'), ('referer', 'http://example.com'),
('conneCtion', 'close'), ('Accept', '*/*'),
# ('Cookie', 'Answer=42; Sess=x; sky=net')
]),
'method': 'POST',
'params': QueryDict([('a', '1'), ('b', '2'), ('c', '3'), ('a', 'x'), ('a', 'y')]),
'url': 'http://www.httpbin.org/anything',
"cookies": Cookie('Answer=42; Sess=x; sky=net'),
"json": None,
"files": None,
}
_req = fz.to_requests()
assert _req["params"] is not fz.query
assert _req["data"] is not fz.data
assert _req["headers"] is not fz.headers
# _req["proxies"] = {"http":"http://127.0.0.1:8080"}
r = requests.request(**_req)
rj = r.json()
assert rj["args"] == {'a': 'y', 'b': '2', 'c': '3'}
assert rj["data"] == ""
assert rj["form"] == {'a': 'b', 'c': ['d', 'e'], 'e': 'x', 'x': 'f'}
# 因为对cookies构建还有问题, 所以在 py3.6 以下版本中,
# cookies的顺序不会被保留, 这里跳过对 cookies 的顺序的test
# 而只是检测 cookies 的存在性
assert "Answer=42" in rj["headers"]["Cookie"]
assert "Sess=x" in rj["headers"]["Cookie"]
assert "sky=net" in rj["headers"]["Cookie"]
del rj["headers"]["Cookie"]
assert rj["headers"] == {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'close',
'Content-Length': '19',
'Content-Type': 'application/x-www-form-urlencoded',
# 'Cookie': 'Answer=42; Sess=x; sky=net',
'Host': 'www.httpbin.org',
'Referer': 'http://example.com',
'User-Agent': 'RequestFuzz'
}
assert rj["method"] == "POST"
assert rj["url"] == 'http://www.httpbin.org/anything?a=y&b=2&c=3'
def test_to_bare(fz=None):
fz = fz or make_complex_req()
assert fz.to_bare() == \
b"""\
POST /anything?a=1&b=2&c=3&a=x&a=y HTTP/1.1\r\n\
User-Agent: RequestFuzz\r\n\
referer: http://example.com\r\n\
conneCtion: close\r\n\
Accept: */*\r\n\
Cookie: Answer=42; Sess=x; sky=net\r\n\
Content-Length: 19\r\n\
Content-Type: application/x-www-form-urlencoded\r\n\
Host: www.httpbin.org\r\n\
\r\n\
a=b&c=d&c=e&e=x&x=f"""
def test_fork_complex():
fz = make_complex_req()
# 什么都不变, 应该是完全相同的
fk = fz.fork()
assert fz.to_bare() == fk.to_bare()
assert fk.query is not fz.query
assert fk.data is not fz.data
assert fk.headers is not fz.headers
assert fk.cookies is not fz.cookies
fk = fz.fork(
method="PUT", path="/put",
query=[("foo", "bar"), ("c", "changed")], # 以merge形式合并
data=[("bar", "cat"), ("a", "chg")],
headers=[("User-Agent", "AnotherUA"), ("X-method", "PUT")],
cookies=[('Answer', '43'), ("cook", "ie")],
)
assert fk.method == "PUT"
assert fk.path == "/put"
assert tuple(fk.query.items()) == (('a', '1'), ('b', '2'), ('c', 'changed'),
('a', 'x'), ('a', 'y'), ('foo', 'bar'))
assert fk.query_string == 'a=1&b=2&c=changed&a=x&a=y&foo=bar'
assert fk.url == 'http://www.httpbin.org/put?a=1&b=2&c=changed&a=x&a=y&foo=bar'
assert tuple(fk.data.items()) == (('a', 'chg'), ('c', 'd'), ('c', 'e'),
('e', 'x'), ('x', 'f'), ('bar', 'cat'))
assert tuple(fk.headers.items()) == (('User-Agent', 'AnotherUA'),
('referer', 'http://example.com'),
('conneCtion', 'close'),
('Accept', '*/*'),
('X-method', 'PUT'))
assert tuple(fk.cookies.items()) == (('Answer', '43'), ('Sess', 'x'),
('sky', 'net'), ('cook', 'ie'))
assert fk.to_bare() == b"""\
PUT /put?a=1&b=2&c=changed&a=x&a=y&foo=bar HTTP/1.1\r\n\
User-Agent: AnotherUA\r\n\
referer: http://example.com\r\n\
conneCtion: close\r\n\
Accept: */*\r\n\
X-method: PUT\r\n\
Cookie: Answer=43; Sess=x; sky=net; cook=ie\r\n\
Content-Length: 29\r\n\
Content-Type: application/x-www-form-urlencoded\r\n\
Host: www.httpbin.org\r\n\
\r\n\
a=chg&c=d&c=e&e=x&x=f&bar=cat"""
test_to_bare(fz) # fork之前的不应该发生改变
def test_strange_cookie():
fz = make_simple_req()
fz.cookies["strange"] = "\"\'\\\x9f ;"
assert str(fz.cookies) == r'''strange="\"'\\\237 \073"'''
def test_to_jsonable():
import json
fz = make_complex_req()
jsonable = fz.to_jsonable()
assert json.loads(json.dumps(jsonable)) == jsonable
def test_multipart():
request_bin = b"""POST /post?cat=1&dog=2 HTTP/1.1
Host: httpbin.org
Content-Length: 296
User-Agent: http_clay/1.0
Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryEW35oPYWK6qwibcP
Accept: text/html
Accept-Language: zh-CN,zh;q=0.8
Cookie: JSESSIONID=A53DAC634D455E4D1F16829B7BD480F7
Connection: close
------WebKitFormBoundaryEW35oPYWK6qwibcP
Content-Disposition: form-data; name="upload"; filename="abc.txt"
Content-Type: text/plain
afasfafasfasfa
------WebKitFormBoundaryEW35oPYWK6qwibcP
Content-Disposition: form-data; name="caption"
aaaaa
------WebKitFormBoundaryEW35oPYWK6qwibcP--"""
fz = FuzzableRequest.from_bare(request_bin)
assert fz.data == QueryDict({"caption": "aaaaa"})
assert fz.data["caption"] == "aaaaa"
f = fz.files["upload"]
import cgi
assert isinstance(f, cgi.FieldStorage)
assert fz.files["upload"].file.getvalue() == b'afasfafasfasfa'
assert fz.files["upload"] is f
assert f.file.getvalue() == b'afasfafasfasfa'
assert f.outerboundary == b"----WebKitFormBoundaryEW35oPYWK6qwibcP"
assert f.disposition == "form-data"
assert f.name == "upload"
assert f.filename == "abc.txt"
assert f.type == "text/plain"
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,251 | aploium/my_utils | refs/heads/master | /requestfuzz/tests/test_plugins.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import requests
from requestfuzz import FuzzableRequest, HTTPHeaders
from requestfuzz.plugin import AutoHeader, AutoCleanParam
def test_auto_header():
fz = FuzzableRequest(
"http://httpbin.org/anything",
headers={"User-Agent": "foobar"},
plugins=[AutoHeader],
)
assert fz.headers == HTTPHeaders([
('User-Agent', 'foobar'),
('Accept-Encoding', 'gzip, deflate'),
('Accept-Language', 'zh-CN,zh;q=0.8,en;q=0.6,it;q=0.4,es;q=0.2'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Referer', 'http://httpbin.org/anything')
])
r = requests.request(**fz.to_requests())
assert r.json()["headers"] == {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,it;q=0.4,es;q=0.2',
'Connection': 'close',
'Host': 'httpbin.org',
'Referer': 'http://httpbin.org/anything',
'User-Agent': 'foobar'
}
def test_auto_clean_param():
fz = FuzzableRequest(
"http://httpbin.org/anything?spm=useless&_t=123&a=b&c=d",
plugins=[AutoCleanParam],
)
assert fz.query_string == "a=b&c=d"
def test_two_plugins():
# AutoClean 在 AutoHeader 之前调用,
# 所以referer里面 **没有** spm
fz = FuzzableRequest(
"http://httpbin.org/anything?spm=useless&_t=123&a=b&c=d",
headers={"User-Agent": "foobar"},
plugins=[AutoCleanParam, AutoHeader],
)
assert fz.headers == HTTPHeaders([
('User-Agent', 'foobar'),
('Accept-Encoding', 'gzip, deflate'),
('Accept-Language', 'zh-CN,zh;q=0.8,en;q=0.6,it;q=0.4,es;q=0.2'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Referer', 'http://httpbin.org/anything?a=b&c=d')
])
assert fz.query_string == "a=b&c=d"
# -----------------------
# AutoClean 在 AutoHeader 之后调用,
# 所以referer里面没 **有** spm
fz = FuzzableRequest(
"http://httpbin.org/anything?spm=useless&_t=123&a=b&c=d",
headers={"User-Agent": "foobar"},
plugins=[AutoHeader, AutoCleanParam],
)
assert fz.headers == HTTPHeaders([
('User-Agent', 'foobar'),
('Accept-Encoding', 'gzip, deflate'),
('Accept-Language', 'zh-CN,zh;q=0.8,en;q=0.6,it;q=0.4,es;q=0.2'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Referer', 'http://httpbin.org/anything?spm=useless&_t=123&a=b&c=d')
])
assert fz.query_string == "a=b&c=d"
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,252 | aploium/my_utils | refs/heads/master | /err_hunter/err_hunter/traceback2.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals, print_function
import os
import sys
import traceback
from . import frame_operations
if sys.version_info[0] == 3:
walk_tb = traceback.walk_tb
else:
def walk_tb(tb):
while tb is not None:
yield tb.tb_frame, tb.tb_lineno
tb = tb.tb_next
def format_exc(interested=None, source_path=None, with_normal=True):
if sys.exc_info() == (None, None, None):
return "NoTraceback"
source_path = source_path or os.getcwd()
_traceback = ""
for frame, lineno in walk_tb(sys.exc_info()[2]):
abs_path = frame.f_code.co_filename
if ".." not in os.path.relpath(abs_path, source_path):
_traceback += frame_operations.frame_format(
frame, interested=interested, frame_lineno=lineno
) + "\n"
if with_normal:
_traceback = "{}\n{}".format(traceback.format_exc(), _traceback)
return _traceback
def print_exc(interested=None, source_path=None, with_normal=True):
try:
print(format_exc(interested=interested, source_path=source_path, with_normal=with_normal))
except:
traceback.print_exc()
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,253 | aploium/my_utils | refs/heads/master | /mysql_fast_insert.py | #!/usr/bin/env python3
# coding=utf-8
"""
MySQL快速Insert辅助器
author: 零日
python3 only
"""
import threading
import traceback
import logging
import datetime
import time
import os
import multiprocessing
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
try:
import pymysql
pymysql.install_as_MySQLdb()
import MySQLdb
except:
raise ImportError("you should install mysqlclient(recommend) or pymysql")
DEFAULT_CONCURRENCY = 12
DEFAULT_QUEUE_SIZE = DEFAULT_CONCURRENCY * 2 + 10
# SUGGESTED_BATCH_SIZE = 1000
log = logging.getLogger(__name__)
class MysqlFastInsert(object):
MODE_EXECUTEMANY = 1
MODE_MERGED = 2
MODE_BLACKHOLE = 3
def __init__(
self, conn_kwargs, base_sql=None,
queue_size=DEFAULT_QUEUE_SIZE,
concurrency=DEFAULT_CONCURRENCY,
processed=False,
mode=MODE_MERGED,
statistic_interval=10,
error_dump=True, dump_folder="sqldump"
):
"""
:param conn_kwargs: 传到 MySQLdb.connect() 中的参数, dict
:param base_sql: 基础sql, 例如:
'''
INSERT INTO `some_table`
(`id`, `name`, `phone`)
VALUES -- 注意! VALUES后面的 (%s, %s, %s) 部分 <不要有>, 会自动生成
'''
:param queue_size: 队列大小, 必须大于零, 建议的值是 concurrency*[2~5]
:param concurrency: 并发数 并发数比较小的时候, 提升并发数能提升性能, 但是并发数过大后边际收益很小
:param processed: 是否在独立子进程中运行mysql insert任务
若为False, 则在主进程中的子线程 (线程模式)
若为True, 则会放到一个单独的子进程中 (进程模式) (子进程中仍然会有一些工作线程)
进程模式会多消耗约20%的CPU, 速度比线程模式慢10%左右, 但是线程模式不能利用多核, 所以看情况取舍.
最好两种模式都跑一下, 然后看哪个更适合
:param mode: 插入模式
MODE_EXECUTEMANY: 使用 cursor 的 .executemany() 来插入, 无注入风险, 但是比较慢
MODE_MERGED: 用字符串拼接的方式来插入, 比 MODE_EXECUTEMANY 快得多, 但有很小的注入风险
拼接前会自动使用 sql_escape() 来转义
拼接插入失败时会 fallback 到 MODE_EXECUTEMANY
MODE_BLACKHOLE: 什么都不做, 丢弃所有结果, 一般用来测速什么的
:param error_dump: 当sql插入出现问题时, 会把这条sql dump到文件夹中, 默认文件夹是当前目录的 sqldump
:
"""
_conn_kw = dict(
use_unicode=True,
charset="utf8",
)
_conn_kw.update(conn_kwargs)
self.conn_kwargs = _conn_kw
self.concurrency = concurrency
self.processed = processed
self.mode = mode
self.base_sql = base_sql
self.statistic_interval = statistic_interval
self.error_dump = error_dump
if queue_size == 0:
raise ValueError(
"queue_size should not be 0, "
"because the queue must be blocked when something goes wrong"
)
else:
self.queue_size = queue_size
if self.processed:
import multiprocessing
self._multiprocessing = multiprocessing
else:
import multiprocessing.dummy as multiprocessing_dummy
self._multiprocessing = multiprocessing_dummy
if self.mode == self.MODE_MERGED:
self.insert_function = self._insert_merged
elif self.mode == self.MODE_EXECUTEMANY:
self.insert_function = self._insert_executemany
elif self.mode == self.MODE_BLACKHOLE:
self.insert_function = self._insert_blackhole
else:
raise ValueError("wrong mode: {}".format(self.mode))
self.queue = self._multiprocessing.JoinableQueue(self.queue_size)
self.subprocess = None
self.checkpoint = time.time()
# self.count是进程间的共享内存变量, 用于跨进程计数
self.count = self._multiprocessing.Value("i", 0, lock=False)
self.count.value = 0
# sqldump的文件夹
self.dump_folder = os.path.abspath(dump_folder)
if not os.path.exists(self.dump_folder):
try:
os.makedirs(self.dump_folder)
except:
log.warning("cannot create folder: {}, bad sql dump is disabled".format(
self.dump_folder
))
self.error_dump = False
self.dump_folder = None
log.info("MysqlFastInsert init complete")
def start(self):
self.subprocess = self._multiprocessing.Process(target=self._run_keeper)
self.subprocess.daemon = True
self.subprocess.start()
def join(self):
if self.subprocess is None:
raise RuntimeError("You must start() before join()")
else:
self.queue.join()
def insert_many(self, rows, block=True, timeout=None):
"""
插入多条记录(几百条)
:param rows:
格式如下, 每行与上面 base_sql 中的VALUES对应:
[
(id, name, phone, ...),
(id, name, phone, ...),
(id, name, phone, ...),
]
"""
if not rows:
return
self.queue.put(rows, block=block, timeout=timeout)
self.print_statistic()
def print_statistic(self):
now = time.time()
if now - self.checkpoint > self.statistic_interval:
count = self.count.value
self.count.value = 0
delta = now - self.checkpoint
self.checkpoint = now
if now - self.checkpoint > 3 * self.statistic_interval:
# 间隔过长, 统计数据无意义, 不打印带有速率的回显, 只打印数量
log.info("inserted {} rows in the past {}s".format(count, round(delta, 3)))
else:
log.info(
"delta:{}s count:{} speed:{}/s qsize:{} qfull:{} P:{} Th:{}".format(
round(delta, 3), count, round(count / delta, 2),
self.queue.qsize(), self.queue.full(),
multiprocessing.current_process().name,
threading.current_thread().name,
))
def _insert_executemany(self, cur, rows):
sql_values_placeholder = "(" + "%s," * (len(rows[0]) - 1) + "%s)"
sql = self.base_sql + sql_values_placeholder
try:
return cur.executemany(sql, rows)
except:
log.error("mysql语句在MODE_EXECUEMANY执行失败", extra={"sql": sql}, exc_info=True, )
raise
def _insert_blackhole(self, cur, rows):
return len(rows)
def sql_escape(self, value):
if isinstance(value, str):
return MySQLdb.escape_string(value).decode("utf-8")
elif isinstance(value, bytes):
try:
return self.sql_escape(value.decode("utf-8"))
except:
return value
else:
return value
def _insert_merged(self, cur, rows):
sql_values = ",".join(
"({})".format(",".join(
"\"{}\"".format(self.sql_escape(x)) for x in row
))
for row in rows
)
sql = self.base_sql + sql_values
try:
return cur.execute(sql)
# return len(rows)
except:
try:
self._insert_executemany(cur, rows)
except:
raise
else:
log.warning(
"mysql语句在MODE_MERGED执行出错, 但在MODE_EXECUEMANY执行成功, sql已dump供分析",
extra={"sql": sql},
exc_info=True,
)
finally:
self.sql_dump(sql, traceback.format_exc())
raise
def _get_connection(self):
"""不断尝试连接mysql, 因为程序会长期运行, 中间可能会有mysql的短暂宕机"""
sleep_interval = 1
while True:
try:
conn = MySQLdb.connect(**self.conn_kwargs) # type: MySQLdb.connections.Connection
cur = conn.cursor() # type: MySQLdb.cursors.Cursor
cur.execute('SET NAMES UTF8')
except:
log.error("无法连接mysql", exc_info=True)
time.sleep(sleep_interval)
sleep_interval += 10
else:
return conn, cur
def re_connect(self, conn):
try:
conn.close()
except:
pass
return self._get_connection()
def _queue_submitting(self):
log.info("MysqlFastInsert thread:{} start".format(threading.current_thread()))
conn, cur = self._get_connection()
while True:
try:
lines = self.queue.get()
except:
log.error("mysql-inserter unable to get queue", exc_info=True)
time.sleep(6)
continue
# log.debug("line:", len(lines), lines[:3])
start_time = time.time()
try:
row_count = self.insert_function(cur, lines)
# row_count = len(lines)
except MySQLdb.ProgrammingError:
# 例如: 表不存在, 语法错误等
log.error(
"mysql执行错误 MySQLdb.ProgrammingError! process:{} cursor:{}".format(
self._multiprocessing.current_process(),
cur),
exc_info=True)
# 不需要重连
except:
log.error(
"mysql insert error! process:{} cursor:{}".format(
self._multiprocessing.current_process(),
cur),
exc_info=True)
conn, cur = self.re_connect(conn)
else:
try:
conn.commit()
except:
log.error("commit error!", exc_info=True)
conn, cur = self.re_connect(conn)
else:
log.debug("mysql successfully inserted: {} rows in {}ms".format(
row_count, round((time.time() - start_time) * 1000, 2)))
self.count.value += row_count # 用于跨进程计数
finally:
self.queue.task_done()
def _run_keeper(self):
log.debug("run keeper running at {}".format(self._multiprocessing.current_process()))
pool = []
for i in range(self.concurrency):
p = threading.Thread(
target=self._queue_submitting,
)
p.daemon = True
p.start()
pool.append(p)
for p in pool:
p.join()
def sql_dump(self, sql, msg=None):
if not self.error_dump:
return
dump_prefix = os.path.join(
self.dump_folder,
"sqldump_{}".format(
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S_%f')
)
)
try:
with open(dump_prefix + ".sql", "w", encoding="utf-8") as fw:
fw.write(sql)
if msg:
with open(dump_prefix + ".meta.txt", "w", encoding="utf-8") as fw:
fw.write(msg)
except:
log.warning("unable to dump bad sql", exc_info=True)
else:
log.warning("bad sql has been dumped to {}".format(dump_prefix + ".sql"))
def main():
conn_kwargs = dict(
host="",
user="",
passwd="",
db="",
use_unicode=True,
charset="utf8",
)
inserter = MysqlFastInsert(
conn_kwargs,
base_sql="aaaaaaaaa"
)
inserter.start()
inserter.join()
if __name__ == '__main__':
main()
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,254 | aploium/my_utils | refs/heads/master | /requestfuzz/mutant.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
from future import utils as six
from .datastructure import OrderedMultiDict
from .recursive_parse import *
from .utils import ensure_unicode
try: # for type hint
from typing import Type
from .request import FuzzableRequest
from .payload import Payload
except:
pass
class PayloadFactoryBase(object): # TODO: 有待改进
"""
用于产生填入各个可修改位点的payload
在初始化时给出fz, .make() 对特定位点产生payload
Args:
ori_fz(FuzzableRequest): 原始fz
"""
def __init__(self, ori_fz):
self.ori_fz = ori_fz
def make(self, key=None, value=None, place=None, node=None):
"""
给定一个待修改的位置, 生成此位点可用的payload
Args:
key (str): 待修改位置的原始key
value (str): 待修改位置的原始value
place (str): 位置名
node (BaseNode): 解析树中的对应节点, 传入的是原始树的副本
Yields:
tuple[Payload, dict]:
返回此位置可能的一系列新值, 每一项为 tuple[Payload, meta]
"""
raise NotImplementedError
class MutantBase(object):
"""
用于生成fz的变体
Args:
place(str): 标记此factory会修改request上的哪一部分
例如: query headers
factory_class(Type[PayloadFactoryBase]):
用于对每个可以修改的地方产生一系列的变体
给定修改点, 给出此处可以应用的一系列payload
"""
EXCLUDES = dict(
# partmatch对大小写不敏感
partmatch=("csrf", "spm", "__preventcache", "timestamp", "sectoken", "sec_token"),
fullmatch=("_", "_t", "callback")
)
def __init__(self, factory_class=None, excludes=EXCLUDES):
self.factory_class = factory_class
self.excludes = excludes
def make(self, fz):
"""
根据fz生成变体
Args:
fz (FuzzableRequest):
Yields:
FuzzableRequest: 返回一系列fz, 都是对原fz的副本
"""
raise NotImplementedError
def should_skip(self, key=None, value=None):
"""
判断一组潜在位点是否应该跳过
"""
if isinstance(key, six.string_types):
key_lower = key.lower()
if any((exc in key_lower) for exc in self.excludes['partmatch']):
return True
if key in self.excludes['fullmatch']:
return True
return False
class ShallowMutant(MutantBase):
"""
遍历query和data, 并分别将其中每一个value进行修改
不进行递归遍历
在返回的 fz.meta 中存储有用于报告的信息
Warnings:
由于QueryDict还不完善, 对存在多个相同key的污染有问题,
复数的key会被丢弃, 只留下一个, 但是他们的value都会被依次遍历到
eg: a=b&a=c&x=1 --> a=<payload>b and a=<payload>c&x=1
"""
PLACES = ["query", "data"]
def make(self, fz):
"""
Args:
fz (FuzzableRequest):
"""
factory = self.factory_class(fz) # type: PayloadFactoryBase
for place in self.PLACES:
dic = getattr(fz, place) # type: OrderedMultiDict
for key, value in dic.items():
if self.should_skip(key, value):
continue
payload_iter = factory.make(key, value=value, place=place)
for payload, meta in payload_iter:
# 在meta中记录的用于debug或用于报告的信息
meta.update(payload=payload, key=key, value=value, place=place)
kw = {"meta": meta, place: {key: payload.content}}
yield fz.fork(**kw)
class DeepMutant(MutantBase):
PLACES = ["query", "data"]
def make(self, fz):
"""
Args:
fz (FuzzableRequest):
"""
factory = self.factory_class(fz)
for place in self.PLACES:
# 取得data, 这里现在写得有点丑
if place == "data":
try:
data = ensure_unicode(fz.bin_body)
except:
continue
elif place == "query":
data = fz.query_string
else:
data = getattr(fz, place)
root = load(data)
for node in root.iter_tree():
if self.should_skip(node.key, node.data):
continue
payload_iter = factory.make(
node.key, node.text, place=place, node=node)
for payload, meta in payload_iter:
meta.update(payload=payload, key=node.key, value=node.text, place=place, node=node)
new_root = node.fork_tree(payload.content)
# 回写
new_fz = fz.fork(meta=meta)
if place == "data":
new_fz.bin_body = new_root.text.encode("UTF-8")
else:
setattr(new_fz, place, new_root.data)
yield new_fz
class HeadersMutant(MutantBase):
KEYS = ("User-agent", "X-Forward-For", "Referer")
def make(self, fz):
"""
Args:
fz (FuzzableRequest):
"""
payload_factory = self.factory_class(fz) # type: PayloadFactoryBase
for key in self.KEYS:
value = fz.headers.get(key, "")
for payload, meta in payload_factory.make(
key, value=value, place="headers"):
# 因为headers中不允许出现\r\n, 出现就会报错, 所以跳过
# 如果需要污染headers, 请直接指定, 不要在这里用crlf注入
if "\r" in payload.content or "\n" in payload.content:
continue
meta.update(payload=payload, key=key, value=value)
new = fz.fork(headers={key: payload.content},
meta=meta)
yield new
class PathMutant(MutantBase):
def make(self, fz):
"""
Args:
fz (FuzzableRequest):
"""
payload_factory = self.factory_class(fz) # type: PayloadFactoryBase
for payload, meta in payload_factory.make(
"<path>", value=fz.path, place="path"):
meta.update(payload=payload, value=fz.path)
new = fz.fork(path=payload.content, meta=meta)
yield new
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,255 | aploium/my_utils | refs/heads/master | /unicode_decode.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals
import sys
try:
import cchardet as chardet
except ImportError:
try:
import chardet
except ImportError:
pass
if sys.version_info[0] == 3:
binary_type = bytes
text_type = str
else:
# noinspection PyUnresolvedReferences
text_type = unicode
binary_type = str
def unicode_decode(content):
r"""
用多种方式尝试解码二进制内容为unicode文本
copy from `unicode_decode.py`
:return: tuple(编码, 解码后的unicode)
:rtype: (str, bytes)
>>> unicode_decode("简体中文UTF8汉字".encode("utf8"))
('UTF-8', '简体中文UTF8汉字')
>>> unicode_decode("简体中文GBK汉字".encode("gbk"))
('GB18030', '简体中文GBK汉字')
>>> unicode_decode(b'\xfa\xfb\xfc')
Traceback (most recent call last):
...
UnicodeError: unable to decode b'\xfa\xfb\xfc'
"""
try:
return "UTF-8", content.decode("UTF-8")
except:
pass
try:
return "GB18030", content.decode("GB18030")
except:
pass
try:
encoding = chardet.detect(content)["encoding"]
return encoding, content.decode(encoding)
except:
pass
raise UnicodeError("unable to decode {}".format(repr(content[:32])))
def ensure_unicode(content):
if content is None:
return content
if isinstance(content, text_type):
return content
elif isinstance(content, binary_type):
_, uni = unicode_decode(content)
return uni
return content
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,256 | aploium/my_utils | refs/heads/master | /requests_raw.py | #!/usr/bin/env python3
# coding=utf-8
r"""
用于获取最原始的http响应原文,
即直接socket中读出的原始响应头、未解开 gzip/chunk 的响应体
工作在SSL的上层, SSL对其透明
工作原理:
hook 掉 requests 的底层 http.client 中的 socket,
每次读取 socket 中数据的时候, 都会额外复制一份出来
由于http.client本身会 *丢弃* 原始数据, 所以只能自己把它存下来
理论上除了额外的内存占用和少许的性能损耗外不会有副作用, 也不会有兼容性风险
使用方法:
首先 monkey_patch() 打 patch
这个 patch 可以在任何时候打, 不像 gevent 一样必须在一开始打,
会影响所有在 patch 之后的 requests 请求
然后像正常一样使用 requests 发起请求, 得到 response
使用 get_raw(response) 获取响应原文
限制:
python3.4+
@零日 <chenze.zcz@alibaba-inc.com>
>>> # BEGIN doctest
>>> monkey_patch()
>>> import requests
>>> import gzip
>>>
>>> # get raw gzipped body
>>> r = requests.get("http://example.com")
>>> raw = get_raw(r)
>>> assert isinstance(raw, bytearray)
>>> assert raw.startswith(b"HTTP/1.1 200 OK\r\n")
>>> dec = gzip.decompress(get_body(raw))
>>> assert dec == r.content
>>> assert "<title>Example Domain</title>" in dec.decode("utf-8")
>>> print("source_ip:{} dest_ip:{}".format(*get_ip(r))) # doctest: +ELLIPSIS
source_ip:(..., ...) dest_ip:(..., 80)
>>>
>>> # chunked encoding
>>> r2 = requests.get("https://www.baidu.com")
>>> raw2 = get_raw(r2)
>>> raw2 # doctest: +ELLIPSIS
bytearray(b'HTTP/1.1 200 OK\r\n...)
>>> assert raw2.startswith(b"HTTP/1.1 200 OK\r\n")
>>> assert b"Transfer-Encoding: chunked" in raw2
>>> dec2 = gzip.decompress(decode_chunked(raw2))
>>> assert dec2 == r2.content
>>> assert b"www.baidu.com" in dec2
>>>
>>> # this url will be 302 redirected to http://example.com/
>>> r3 = requests.get("https://httpbin.org/redirect-to?url=http%3A%2F%2Fexample.com%2F")
>>> raw3 = get_raw(r3)
>>> # notice! the intermediate 302 raw content would NOT be record
>>> assert raw3.startswith(b"HTTP/1.1 200 OK\r\n")
>>> # if you want to record the intermediate result, please use `allow_redirects=False`
>>> r4 = requests.get("https://httpbin.org/redirect-to?url=http%3A%2F%2Fexample.com%2F", allow_redirects=False)
>>> raw4 = get_raw(r4)
>>> assert raw4.startswith(b"HTTP/1.1 302 FOUND\r\n")
"""
__all__ = ("monkey_patch", "get_raw", "get_body", "decode_chunked", "get_ip")
import logging
import functools
import http.client
import io
logger = logging.getLogger(__name__)
_already_patched = False
class HookedBufferedReader(io.BufferedReader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dumped = bytearray()
def flush(self, *args, **kwargs):
result = super().flush(*args, **kwargs)
if result:
self.dumped += result
return result
def readline(self, *args, **kwargs):
result = super().readline(*args, **kwargs)
self.dumped += result
return result
def readinto(self, buffer):
_b = memoryview(bytearray(len(buffer)))
result = super().readinto(_b)
self.dumped += _b[:result].tobytes()
buffer[:result] = _b[:result]
return result
def read(self, *args, **kwargs):
result = super().read(*args, **kwargs)
self.dumped += result
return result
def patch_http_client(raw_func):
@functools.wraps(raw_func)
def new_func(self, *args, **kwargs):
""":type self: http_client.HTTPResponse"""
if isinstance(self.fp, (HookedBufferedReader, io.BytesIO)):
# skip!
return raw_func(self, *args, **kwargs)
self._raw_fp = self.fp # type: io.BufferedReader
# 顺便也记录下IP好了, requests本身也没有记录IP的功能
try:
# 对于HTTP请求, 这样就能正常地获取到ip和端口
self.source_ip = self._raw_fp.raw._sock.getsockname()
self.dest_ip = self._raw_fp.raw._sock.getpeername()
except:
try:
# 对于HTTPS, 需要再往下一层才能拿到ip和端口
# QAQ 真是不科学...居然要深入这么多层才能拿到
self.source_ip = self._raw_fp.raw._sock.socket.getsockname()
self.dest_ip = self._raw_fp.raw._sock.socket.getpeername()
except:
self.source_ip = None
self.dest_ip = None
self.fp = HookedBufferedReader(self._raw_fp.raw)
self.dumped = self.fp.dumped # type: bytearray
return raw_func(self, *args, **kwargs)
return new_func
def monkey_patch():
global _already_patched
if _already_patched or hasattr(http.client.HTTPResponse, "_original_begin"):
return
logger.warning("monkey patching!")
http.client.HTTPResponse._original_begin = http.client.HTTPResponse.begin
http.client.HTTPResponse.begin = patch_http_client(http.client.HTTPResponse.begin)
_already_patched = True
def get_raw(resp):
""":rtype: bytearray"""
try:
return resp.raw._original_response.dumped # type: bytearray
except:
return None
def get_body(data):
"""
:type data: bytearray
:rtype: bytearray
"""
pos = data.find(b"\r\n\r\n")
if pos == -1:
return bytearray()
else:
return data[pos + 4:]
def decode_chunked(data):
"""
from: http://beezari.livejournal.com/190869.html
modified for python3 compatibility
:type data: bytearray
:rtype: bytearray
"""
dec_body = bytearray()
# of the data payload. you can also parse content-length header as well.
if data.startswith(b"HTTP/"):
chunked_body = get_body(data)
else:
chunked_body = data
while chunked_body:
off = int(chunked_body[:chunked_body.find(b"\r\n")], 16)
if not off:
break
chunked_body = chunked_body[chunked_body.find(b"\r\n") + 2:]
dec_body += chunked_body[:off]
chunked_body = chunked_body[off + 2:]
return dec_body
def get_addr(resp):
"""获取请求的source_ip 和 dest_ip"""
# (source_ip, dest_ip)
return resp.raw._original_response.source_ip, resp.raw._original_response.dest_ip
get_ip = get_addr # 历史兼容性的别名
if __name__ == "__main__":
import doctest
doctest.testmod()
print("doctest passed")
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,257 | aploium/my_utils | refs/heads/master | /version_utils.py | # coding=utf-8
"""
用于版本字符串的转换、比较、范围匹配
用法请看 `test__version_range` 这个函数
依赖:
future
distutils
@aploium
MIT License
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
from future import utils as six
import re
import sys
import operator
from distutils.version import LooseVersion as _LooseVersion
RE_SPLIT_COMPARISON = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$")
RE_REMOVE_BLANK = re.compile(r"\s*")
COMPMAP = {"<": operator.lt, "<=": operator.le, "==": operator.eq,
">": operator.gt, ">=": operator.ge, "!=": operator.ne}
COMPMAP_REVERSE = {v: k for k, v in COMPMAP.items()}
def to_version(version):
if isinstance(version, six.string_types):
return Version(remove_blank(version))
else:
return version
def remove_blank(txt):
"""移除空白字符"""
return RE_REMOVE_BLANK.sub("", txt)
class Version(_LooseVersion):
def _cmp(self, other):
if isinstance(other, str):
other = Version(other)
try:
if self.version == other.version:
return 0
if self.version < other.version:
return -1
if self.version > other.version:
return 1
except TypeError:
# issues #2
# 没有可靠的办法比较字符串版本号和数字版本号的大小,
# 但是为了避免抛出意外的错误, fallback到简陋的字符串比较
if self.vstring == other.vstring:
return 0
if self.vstring < other.vstring:
return -1
if self.vstring > other.vstring:
return 1
@six.python_2_unicode_compatible
class VersionCond(object):
"""
>=1.2
<2.5
"""
def __init__(self, op, version):
self.version = to_version(version)
if isinstance(op, six.string_types):
op = COMPMAP[op]
self.op = op
def match(self, version):
if self.version.vstring == 'all':
return True
if not version or not isinstance(version, (six.string_types, Version)):
return False
version = to_version(version)
return self.op(version, self.version)
@classmethod
def from_str(cls, cond_str):
cond_str = remove_blank(cond_str)
m = RE_SPLIT_COMPARISON.search(cond_str)
if m is not None:
op = m.group(1)
version = m.group(2)
else:
# 若没有找到操作符, 则认为需要完全匹配版本串
op = "=="
version = cond_str
return cls(op, version)
def to_str(self):
if self.op == operator.eq:
op_str = '' # 省略等号
else:
op_str = COMPMAP_REVERSE[self.op]
return "{}{}".format(op_str, self.version.vstring)
def __str__(self):
return self.to_str()
def __repr__(self):
return "{}({})".format(self.__class__.__name__, repr(self.to_str()))
@six.python_2_unicode_compatible
class CondGroup(object):
"""
>=1.5,<1.9
"""
def __init__(self, conds):
if isinstance(conds, CondGroup):
self.conds = conds.conds
elif isinstance(conds, VersionCond):
self.conds = [conds]
elif isinstance(conds, six.string_types):
self.conds = [VersionCond.from_str(x) for x in conds.split(',')]
elif not conds:
self.conds = []
else:
self.conds = [VersionCond(op, version) for op, version in conds]
def match(self, version):
version = to_version(version)
for cond in self.conds:
if not cond.match(version):
return False
return True
def to_str(self):
if self.conds is None:
return "all"
return ",".join(c.to_str() for c in self.conds)
def __str__(self):
return self.to_str()
def __repr__(self):
return "{}({})".format(self.__class__.__name__, repr(self.to_str()))
@six.python_2_unicode_compatible
class VersionRange(object):
def __init__(self, ranges):
if isinstance(ranges, six.string_types):
self.ranges = [CondGroup(x) for x in ranges.split('|')]
elif isinstance(ranges, (list, tuple)):
self.ranges = [CondGroup(x) for x in ranges]
elif not ranges:
self.ranges = []
else:
raise TypeError('unknown ranges type')
def match(self, version):
if not self.ranges:
return True
version = to_version(version)
for cond_group in self.ranges:
if cond_group.match(version):
return True
return False
def to_str(self):
return "|".join(cg.to_str() for cg in self.ranges)
def __str__(self):
return self.to_str()
def __repr__(self):
return "{}({})".format(self.__class__.__name__, repr(self.to_str()))
def __eq__(self, other):
if isinstance(other, VersionRange):
return self.to_str() == other.to_str()
elif isinstance(other, six.string_types):
return self.to_str() == other
else:
return False
def _internal_guess_range(versions):
"""
供下面 guess_range_from_versions() 内部调用,只会分成一段
Args:
versions (list[Version])
"""
lowest = highest = versions[0]
for version in versions[1:]:
if version < lowest:
lowest = version
elif version > highest:
highest = version
return lowest, highest
def guess_range(versions, digits=2):
"""
根据一系列离散的版本猜测版本范围
会把 group_digits 位的版本分为同一段
Examples:
(digits=1) "1.1|1.2|1.3|1.4" --> ">=1.1,<=1.4"
(digits=1) "1.1|1.2|1.3|1.4|2.1|2.2|2.3" --> ">=1.1,<=1.4|>=2.1,<=2.3"
'1.1.1|1.1.2|1.1.3|1.2|1.2.4|2.0|2.0.2|3.0'
--> '>=1.1.1,<=1.1.3|>=1.2,<=1.2.4|>=2.0,<=2.0.2|3.0'
Args:
versions (list[str]|str): 一系列离散的版本号
digits (int): 将最高几位作为一组
Returns:
VersionRange
"""
if isinstance(versions, six.string_types):
versions = [Version(x) for x in versions.split('|')]
else:
versions = [Version(x) for x in versions]
versions.sort()
if not versions:
raise ValueError('must given at least one version')
sections = []
group_buff = [versions[0]]
for version in versions[1:]:
if version.version[:digits] == group_buff[0].version[:digits]:
group_buff.append(version)
else:
sections.append(_internal_guess_range(group_buff))
group_buff = [version]
# 最后一组
sections.append(_internal_guess_range(group_buff))
version_ranges = []
for low, high in sections:
if low == high:
cg = low.vstring
else:
cg = ">={},<={}".format(low, high)
version_ranges.append(cg)
vr = VersionRange(version_ranges)
return vr
# -----------------------------------------------------
# ------------------- BEGIN TESTS -------------------
# -----------------------------------------------------
def test_version():
v1 = Version('1.4.9.1')
v2 = Version('1.4.9a1')
# 这种比较是不靠谱的, 但是没有可靠的方法
# see issues#2
assert v2 > v1
def test_version_cond():
for cond in (
VersionCond(">", "1.5"),
VersionCond.from_str(">1.5"),
VersionCond.from_str(">=1.5"),
VersionCond.from_str(">=1.9"),
VersionCond.from_str(">=1.10"),
):
assert cond.match("10.0")
assert cond.match("1.10")
assert cond.match("1.4.9a1") is False
assert cond.match("0.9.1p7") is False
for cond in (
VersionCond("<", "1.5"),
VersionCond.from_str("<1.5 "),
VersionCond.from_str("<= 1.5"),
VersionCond.from_str("<= 1.9"),
VersionCond.from_str("<=1.4.9b1"),
VersionCond.from_str(" <1.4.9b1"),
VersionCond.from_str("<= 1.4.10 "),
VersionCond.from_str(" <= 1.4.9a1 "),
):
assert cond.match("10.0") is False
assert cond.match("1.10") is False
assert cond.match("1.4.9a1")
assert cond.match("0.9.1p7")
for cond in (
VersionCond("==", "1.4"),
VersionCond.from_str(" 1.4 "),
VersionCond.from_str("== 1.4"),
VersionCond.from_str("!=1.5"),
):
assert cond.match("1.4")
assert cond.match("1.5") is False
assert VersionCond.from_str('all').match("any thing!")
def test_version_range():
for vr in (
VersionRange(["1.4", "==1.4.1", "1.4.2", "1.5.0", "1.5.1", "1.5.2", "1.6"]),
VersionRange([">=1.4, <=1.4.2 ", " >=1.5, <1.5.3 ", "==1.6"]), # 允许空格
VersionRange([">=1.4, <=1.4.1 ", "1.4.2", " >=1.5, <1.5.3 ", "1.6"]),
VersionRange(">=1.4, <=1.4.1 |1.4.2 | >=1.5, <1.5.3 |1.6"), # 字符串
VersionRange(">=1.4,<=1.4.1|1.4.2|>=1.5,<1.5.3|1.6"),
VersionRange("!=1.4.3,!=1.2.3"),
VersionRange(["!=1.4.3, !=1.2.3"]),
):
assert vr.match("1.4.1")
assert vr.match("1.4.2")
assert vr.match("1.4.3") is False
assert vr.match("1.2.3") is False
assert vr.match("1.5.0 ") # 允许空格
assert vr.match("1.5.1")
assert vr.match(" 1.5.2")
assert vr.match("1.6")
assert vr.match(Version("1.6"))
vr = VersionRange([">1.5, <1.11", "2.5"])
assert vr.match("1.10")
assert vr.match("2.5")
assert vr.match("1.10.b") # 允许各种奇怪的版本号
assert vr.match("1.10+")
assert vr.match("1.10c")
assert vr.match("1.9c")
assert vr.match("1.9.x")
assert vr.match("1.9.*")
assert vr.match("1.4z") is False
assert vr.match("2016") is False
assert vr.match(None) is False
assert vr.match(1.11) is False
assert vr.match(object()) is False
vr = VersionRange(">=2016, <2017")
assert vr.match("2016春节版")
vr = VersionRange(">=2016, <2017.1")
assert vr.match("2016春节版")
# all 和 None 表示匹配所有东西
for vr in (
VersionRange(None),
VersionRange([None]),
VersionRange("all"),
VersionRange(["all"]),
):
assert vr.match("match anything!")
# 对字符串对转换
vr = VersionRange([">=1.4, <=1.4.1 ", "1.4.2", "==1.4.3", " >=1.5, <1.5.3 ", "1.6"])
assert vr.to_str() == '>=1.4,<=1.4.1|1.4.2|1.4.3|>=1.5,<1.5.3|1.6'
vr = VersionRange('>=1.4 , <= 1.4.1 | 1.4.2 | >= 1.5 , < 1.5.3 | 1.6 ') # space
assert vr.to_str() == '>=1.4,<=1.4.1|1.4.2|>=1.5,<1.5.3|1.6'
assert vr == '>=1.4,<=1.4.1|1.4.2|>=1.5,<1.5.3|1.6'
assert vr == VersionRange('>=1.4,<=1.4.1|1.4.2|>=1.5,<1.5.3|1.6')
def test_range_guess():
vr = guess_range("1.1|1.2|1.3|1.4", digits=1)
assert vr == '>=1.1,<=1.4'
vr = guess_range("1.1|1.2|1.3|1.4|2.1|2.2|2.3", digits=1)
assert vr == '>=1.1,<=1.4|>=2.1,<=2.3'
vr = guess_range('1.1.1|1.1.2|1.1.3|1.2|1.2.4|2.0|2.0.2|3.0')
assert vr == '>=1.1.1,<=1.1.3|>=1.2,<=1.2.4|>=2.0,<=2.0.2|3.0'
vr = guess_range('1.1')
assert vr == '1.1'
vr = guess_range('1.1|2.0|3.0')
assert vr == '1.1|2.0|3.0'
vr = guess_range('1.1.1|1.1.2|1.1.3|1.2|1.2.4|2.0|2.0.2|3.0'.split("|"))
assert vr == '>=1.1.1,<=1.1.3|>=1.2,<=1.2.4|>=2.0,<=2.0.2|3.0'
# 允许乱序
vr = guess_range(['2.0.2', '1.1.2', '1.2.4', '1.1.3', '1.2', '3.0', '1.1.1', '2.0'])
assert vr == '>=1.1.1,<=1.1.3|>=1.2,<=1.2.4|>=2.0,<=2.0.2|3.0'
if __name__ == "__main__":
test_version()
test_version_cond()
test_version_range()
test_range_guess()
print("all tests passed!")
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,258 | aploium/my_utils | refs/heads/master | /timestring.py | #!/usr/bin/env python3
# coding=utf-8
import datetime
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,259 | aploium/my_utils | refs/heads/master | /requestfuzz/url.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
from urllib import parse
from . import utils
from .datastructure import QueryDict, to_querydict
@six.python_2_unicode_compatible
class Url(object):
"""
Url对象
<scheme>://<netloc>/<path>?<query_string>#<fragment>
Args:
data (str|tuple): 可以传入url, 或一个五元组,
即 urlsplit() 得到的五元组
Args:
scheme(str) : "http"/"https"
host(str) : 不带端口号的host
port(int) : 端口
path(str) : 带有前导 / 的path
fragment(str) : url里 # 号后面的frag, 大部分情况下没有
query(QueryDict) : 字典形式的 query
Methods:
filename : property url里的文件名 *允许赋值
url : property 完整的url *允许赋值
netloc : property 带有端口号的, 即 host:port,
如果是默认端口则省略端口号
query_string : property 字符串形式的 query
without_query : property 不带query的url
without_path : property 不带path的url (自然也不带query)
all_but_scheme : property url里除了scheme的部分
path_qs : property path和query_string
root_domain : property 根域名
ext : property 扩展名
tostr(): : 转换为完整的url
See Also:
`requestfuzz.tests.test_url`
"""
def __init__(self, data):
self._set_url(data)
@property
def netloc(self):
"""
返回域名和端口, 默认端口自动省略
等效于 urlsplit 的 .netloc
Examples:
"foo.com:8888"
"bar.com"
"""
return utils.make_netloc(self.host, self.scheme, self.port)
@property
def query_string(self):
return parse.urlencode(self.query)
@property
def url(self):
return self.tostr()
@url.setter
def url(self, data):
self._set_url(data)
@property
def without_query(self):
"""
返回不带query的url
Examples:
"http://foo.com:88/cat.html"
"https://foo.com/dog.php"
"""
return parse.urlunsplit((self.scheme, self.netloc, self.path, "", ""))
@property
def without_path(self):
"""
返回不带path的url (自然也不带query)
Examples:
"http://foo.com:88"
"https://bar.com"
"""
return parse.urlunsplit((self.scheme, self.netloc, "", "", ""))
@property
def path_qs(self):
"""
path和qs
Examples
<-- http://cat.com:8080/foo/dog.txt?q=1
--> /foo/dog.txt?q=1
"""
qs = self.query_string
if qs:
return "{}?{}".format(self.path, qs)
else:
return self.path
@property
def root_domain(self):
"""
Get the root domain name.
Examples:
input: www.ciudad.com.ar
output: ciudad.com.ar
input: i.love.myself.ru
output: myself.ru
"""
return utils.extract_root_domain(self.host)
@root_domain.setter
def root_domain(self, value):
"""
设置新的root domain
通常用于伪造域名, 例如把 foo.com 改成 myfoo.com
Examples:
ori: http://foo.bar.com/abc
new: monkey.com
output: http://foo.monkey.com/abc
"""
value = value.lstrip(".") # 移除左边多余的点
old_root = self.root_domain
self.host = self.host[:-len(old_root)] + value
@property
def filename(self):
"""
返回url路径中可能存在的文件名
Examples:
http://cat.com/foo/dog.txt --> dog.txt
"""
return self.path[self.path.rfind("/") + 1:]
@filename.setter
def filename(self, filename):
"""
设置url路径中的文件名
Examples:
>>> url = Url("http://cat.com/bar/dog.txt")
>>> url.filename = "foo.jpg"
>>> str(url)
"http://cat.com/bar/foo.jpg"
"""
self.path = self.path[:self.path.rfind("/") + 1] + filename
@property
def ext(self):
"""
返回url文件名的扩展名, 返回值中带有点
Examples:
http://cat.com/foo/dog.txt --> .txt
"""
filename = self.filename
pos = filename.rfind('.')
if pos == -1:
return ""
else:
return filename[pos:]
@property
def all_but_scheme(self):
"""
返回除了scheme以外url里所有东西
Examples:
<-- http://cat.com:8080/foo/dog.txt?q=1
--> cat.com:8080/foo/dog.txt?q=1
"""
# 这里产生的是类似这样的带有前导 // 的, 需要手动去掉它
_with_slash = parse.urlunsplit(
("", self.netloc, self.path, self.query_string, self.fragment))
return _with_slash[2:]
def tostr(self):
return parse.urlunsplit((self.scheme, self.netloc, self.path, self.query_string, self.fragment))
def split(self):
# noinspection PyArgumentList
return parse.SplitResult(self.scheme, self.netloc, self.path, self.query_string, self.fragment)
def _set_url(self, data):
"""根据url来更新对应的值, 一律覆盖旧的"""
# 根据url解开一些参数
sp = self._urlsplit(data)
self.scheme = sp.scheme
self.host = sp.hostname
self.path = sp.path
self.fragment = sp.fragment
if sp.port:
self.port = sp.port
else:
self.port = {"http": 80, "https": 443}.get(self.scheme, None)
# 转换query
query = sp.query
self.query = to_querydict(query)
def __str__(self):
return self.tostr()
def __repr__(self):
return "{}({})".format(self.__class__.__name__, repr(self.tostr()))
def __hash__(self):
return hash(self.url)
def __eq__(self, other):
return self.url == other
def __contains__(self, item):
return item in self.url
def __iter__(self):
return iter(self.url)
@staticmethod
def _urlsplit(data):
"""
Args:
data (str|tuple)
Returns:
parse.SplitResult: 切分后的五元组
"""
if utils.like_list(data):
assert len(data) == 5
# noinspection PyArgumentList
return parse.SplitResult(*data)
elif isinstance(data, six.string_types):
data = utils.ensure_unicode(data)
return parse.urlsplit(data)
elif isinstance(data, Url):
return data.split()
else:
raise TypeError("unknown type: {}, need str or tuple".format(type(data)))
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,260 | aploium/my_utils | refs/heads/master | /err_hunter/demo3.py | #!/usr/bin/env python3
# coding=utf-8
import err_hunter
err_hunter.colorConfig("NOISE")
log = err_hunter.getLogger()
log.info("info")
log.verbose("verbose is lower than info but higher than debug")
log.debug("debug level")
log.trace("trace is lower than debug level")
log.noise("noise is more lower")
log.lowest("the lowest level, this will not be displayed here")
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,261 | aploium/my_utils | refs/heads/master | /requestfuzz/payload.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
from future import utils as six
import copy
try:
from typing import Callable
except:
pass
from .utils import ensure_unicode
@six.python_2_unicode_compatible
class Payload(object):
"""
用于保存payload的容器
参数格式化占位符: [>name<]
Args:
content(str|bytes|unicode):
字面意思, 可以包含格式化标记, 例如:
"http://[>reqid<].hacker.com"
格式化标记会在 .format() 中被替换
pattern(str|bytes|unicode|Callable):
阳性结果中出现的的pattern, 例如:
"root:x:0:0:root:"
允许传入自定义函数, 函数原型为:
def match(self, resp) --> bool
其中resp是对应的响应, 即 `requests.Response` 类
places(tuple[str]):
此payload应用的地方, 留空则为不限(有待增强)
eg: ["path", "headers"]
可选的值为: query data headers path (未来还会添加)
Examples:
Payload("/etc/password", "root:x:0:0:root:")
Payload("[>ori_val<]@[>reqid<].hacker.com") # 不包含pattern
Warnings:
在开发时不要改变参数顺序, 因为会直接传进来
Notes:
格式化标记参考:
reqid:
请求的全局ID, 一般格式为
[prefix][三个字母][序号][三个字母]
例如: xce123frg 在openfuzz里直接作为特征串用
ori_val:
被污染的参数的原始值
例如 id=233 中的 ori_val 就是 "233"
url_no_path:
没有path的url, 即url的前缀, 等效于 `fuzzable.Url.without_path`
例如: http://cat.com
# 有需要再继续添加
"""
def __init__(self, content, pattern=None, places=None):
self.content = content
self.pattern = pattern # pattern允许是函数
self.places = places
def format(self, **kwargs):
"""
格式化payload, 并返回一个新的实例
Args:
**kwargs: 格式化参数
Returns:
Payload: 格式化后的新payload实例
"""
new = copy.deepcopy(self)
new._iformat(**kwargs)
return new
def _iformat(self, **kwargs):
"""就地格式化, 会修改自身"""
_ctt = self.content
for k, v in kwargs.items():
# sentry #1868 #1884
if v is None:
v = ""
elif isinstance(v, six.binary_type): # sentry #1910
try:
v = ensure_unicode(v)
except UnicodeError:
v = ""
elif not isinstance(v, six.text_type):
v = str(v)
_ctt = _ctt.replace("[>" + k + "<]", v)
self.content = _ctt
def __str__(self):
return "{}<{}>".format(self.__class__.__name__, repr(self.content))
def __repr__(self):
if self.pattern:
return "{}({}, {})".format(self.__class__.__name__, repr(self.content), repr(self.pattern))
else:
return "{}({})".format(self.__class__.__name__, repr(self.content))
def __eq__(self, other):
if isinstance(other, Payload):
return other.content == self.content and other.pattern == self.pattern
elif isinstance(other, six.string_types):
return self.content == other
else:
return False
@classmethod
def build(cls, obj):
"""根据字符串等生成payload"""
if isinstance(obj, Payload):
return copy.deepcopy(obj)
elif isinstance(obj, six.string_types):
return cls(obj)
elif isinstance(obj, (tuple, list)):
return cls(*obj)
elif isinstance(obj, dict):
return cls(**obj)
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,262 | aploium/my_utils | refs/heads/master | /url_dedup.py | #!/usr/bin/env python3
# coding=utf-8
"""
提供带有归一化的 url 去重功能
所有流量生成FZ类,进行参数循环解析生成参数list,去除所有无用参数
使用uri与参数list的key作为标识去重
相同的uri与完全相同的参数list的key的FZ为同一个请求
FZ 输出,重新入库
Requirements:
pybloom-live
future
pybloomfiltermmap [可选, 仅在linux下有, 能快很多]
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
from future import utils as six
if six.PY3:
from urllib import parse
else:
from future.backports.urllib import parse
try:
from pybloomfilter import BloomFilter
except ImportError:
from pybloom_live import BloomFilter
class UrlDedup(object):
"""
带有归一化的 url 去重功能
白名单和黑名单:
会保留白名单中参数的value, 例如 "action=put" 在常规情况下会变成 action=
而在白名单内的, 则不会被移除value, 即 "action=put" 会完整保留
黑名单中的参数将会被丢弃
Examples:
>>> ud = UrlDedup()
>>> ud.occurs("http://cat.com/foo")
False
>>> ud.occurs("http://cat.com/foo") # 第二次出现返回 True
True
>>> ud.occurs("http://cat.com/foo?id=1") # 没出现过的参数
False
>>> ud.occurs("http://cat.com/foo?id=233") # 参数key相同
True
500w, err=0.001 的情况下, 占用内存大约 8MB
"""
WHITELIST = frozenset(["action", "Action", "method"])
BLACKLIST = frozenset([
'spm', '_spm', '__preventCache', '_t',
'timestamp', '_timestamp', '__timestamp',
'_'
])
def __init__(self, capacity=5000000, err_rate=0.001,
whitelist=WHITELIST, blacklist=BLACKLIST,
):
self.bloom = BloomFilter(capacity, err_rate)
self.whitelist = whitelist
self.blacklist = blacklist
def _normalize(self, url):
"""
将url转换为可用于bloom的形式
会去掉query中的 value, 排序key, 并移除 fragment
Examples:
http://cat.com/foo?z=x&a=b&c=d 排序并移除value
--> http://cat.com/foo?a=&c=&z=
http://cat.com/foo#cat 丢弃fragment
--> http://cat.com/foo
http://cat.com/foo 保持原样
--> http://cat.com/foo
由于query中的key会发生排序, 所以下面两个url是 *等效* 的
http://cat.com/foo?a=1&b=2
http://cat.com/foo?b=2&a=1
不区分大小写, 所以以下两个是 *不同* 的
http://cat.com/foo?A=1
http://cat.com/foo?A=2
黑白名单(action为白名单, spm黑名单)
http://cat.com/foo?action=put&spm=12345
--> http://cat.com/foo?action=put
"""
try:
_sp = parse.urlsplit(url)
except: # 解析url失败, 原样返回
return url
if not _sp.query:
if not _sp.fragment:
return url
else:
# 移除fragment
return parse.urlunsplit((_sp.scheme, _sp.netloc, _sp.path, "", ""))
# ---- 处理query -----
try:
query = parse.parse_qsl(_sp.query, True)
except:
return url
query.sort()
normalized_query = []
for key, value in query:
if key in self.blacklist:
continue
elif key in self.whitelist:
pass
else:
value = ""
normalized_query.append((key, value))
normalized_query = parse.urlencode(normalized_query)
return parse.urlunsplit((_sp.scheme, _sp.netloc, _sp.path, normalized_query, ""))
def occurs(self, url, auto_add=True):
"""
给定一个url, 返回此 url 在以前有没有出现过
并自动将此 url 记录为 "已出现"
例子见上面的文档
Returns:
bool: 此url是否出现过
"""
normalized_url = self._normalize(url)
if isinstance(url, six.text_type):
normalized_url = normalized_url.encode("UTF-8")
if auto_add:
return self.bloom.add(normalized_url)
else:
return normalized_url in self.bloom
def __contains__(self, url):
return self.occurs(url, auto_add=False)
def add(self, url):
return self.occurs(url)
def test_url_dedup():
ud = UrlDedup()
# 基础测试
assert ud.occurs("http://cat.com") is False
assert ud.occurs("http://cat.com") is True
assert ud.occurs("http://cat.com/foo") is False
assert ud.occurs("http://cat.com/foo") is True
assert ud.occurs("http://cat.com/foo#frag") is True # 剔除frag
assert ud.occurs("http://cat.com") is True
# 测试query
assert ud.occurs("http://cat.com/?a=1&b=2&c=3") is False
assert ud.occurs("http://cat.com/?a=4&b=5&c=") is True # 相同的query key
assert ud.occurs("http://cat.com/?A=1&b=2&c=3") is False # 区分大小写
assert ud.occurs("http://cat.com/?b=1&a=2&c=3") is True # 顺序无关
assert ud.occurs("http://cat.com/?a=1&b=2") is False # 少了一个c, 被认为是不同的
assert ud.occurs("https://cat.com/?a=1&b=2&c=3") is False # http和https
assert ud.occurs("https://cat.com/?a=1&b=2&c=3#aaa") is True # 剔除frag
assert ud.occurs("https://cat.com/path?a=1&b=2&c=3") is False # 不同的path
# 测试白名单
assert ud._normalize("http://cat.com/?action=put&id=1") \
== "http://cat.com/?action=put&id=" # 完整保留 action=put
assert ud.occurs("http://cat.com/?action=put&id=1") is False # 其中action是白名单
assert ud.occurs("http://cat.com/?action=get&id=1") is False # 不同的action被认为是不同的
assert ud.occurs("http://cat.com/?action=put&id=2") is True # 改变action以外的参数则是相同的
# 测试黑名单
assert ud._normalize("http://cat.com/?spm=1234&id=1") \
== "http://cat.com/?id=" # spm被吃掉了
assert ud.occurs("http://cat.com/?spm=1234&id=1") is False
assert ud.occurs("http://cat.com/?id=1") is True # 移除spm, 也是出现过的
assert ud.occurs("http://cat.com/?_t=777&id=1") is True # 加上另一个黑名单参数
assert ud.occurs("http://cat.com/?_t=888&id=1&spm=999") is True
# 测试参数顺序的重排
assert ud._normalize("http://cat.com/?y=2&x=1&z=3") \
== "http://cat.com/?x=&y=&z="
assert ud.occurs("http://cat.com/?x=1&y=2&z=3") is False
assert ud.occurs("http://cat.com/?y=1&x=2&z=3") is True # 更换参数顺序
# 测试大量url能否判断
_total = 100000
_sum = sum(ud.occurs("http://dog.com/?id_{}=1".format(i)) for i in range(_total))
assert _sum < _total / 500.0, _sum / _total # 允许很少量的假阳性
for i in range(_total):
assert ud.occurs("http://dog.com/?id_{}=1".format(i)) is True
assert ud.occurs("http://cat.com/?a=4&b=5&c=") is True # 最后测试一下之前出现的
if __name__ == '__main__':
test_url_dedup()
print("all tests passed!")
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,263 | aploium/my_utils | refs/heads/master | /err_hunter/err_hunter/myinspect.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals
import sys
import inspect
if sys.version_info[0] == 3:
_unwrap = inspect.unwrap
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = inspect.BlockFinder()
try:
tokens = inspect.tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (inspect.EndOfBlock, IndentationError):
pass
return lines # different to builtin inspect is here
else:
# copied from python3.6 inspect
def _unwrap(func, stop=None):
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
# from python 2.7 inspect
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = inspect.BlockFinder()
try:
inspect.tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
except (inspect.EndOfBlock, IndentationError):
pass
return lines
def getsourcelines(obj):
obj = _unwrap(obj)
lines, lnum = inspect.findsource(obj)
if inspect.ismodule(obj):
return lines, 0
else:
return getblock(lines[lnum:]), lnum + 1
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,264 | aploium/my_utils | refs/heads/master | /requestfuzz/tests/test_rec_parse.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
from future.backports.urllib import parse
import copy
import json
from requestfuzz import QueryDict
from requestfuzz.recursive_parse import *
def test_basic_form():
ori = [
("a", "1"),
("b", "2"),
("a", "3"),
("d", "4"),
("f", ""),
]
ori_text = parse.urlencode(ori)
root = load(ori_text)
assert isinstance(root, FormNode)
assert root.data == QueryDict([('a', '1'), ('b', '2'), ('a', '3'), ('d', '4'), ('f', '')])
assert root.text == ori_text
for index, node in enumerate(root.iter_tree()):
assert isinstance(node, PlainNode)
assert node.key == ori[index][0]
assert node.data == ori[index][1]
assert node.index_in_parent == index
def test_form_modify():
ori = [
("a", "1"),
("b", "2"),
("a", "3"),
("d", "4"),
("f", ""),
]
ori_text = parse.urlencode(ori)
root = load(ori_text)
root.children[1].reload("foo") # b=foo
assert root.text == "a=1&b=foo&a=3&d=4&f="
assert root.data == QueryDict([('a', '1'), ('b', 'foo'), ('a', '3'), ('d', '4'), ('f', '')])
assert root.children[1].data == "foo"
assert root.children[1].index_in_parent == 1
def test_form_fork():
ori = [
("a", "1"),
("b", "2"),
("a", "3"),
("d", "4"),
("f", ""),
]
ori_text = parse.urlencode(ori)
root = load(ori_text)
# 新树的根节点
new_root = root.children[1].fork_tree("foo") # b=foo
assert new_root.text == "a=1&b=foo&a=3&d=4&f="
assert new_root.data == QueryDict([('a', '1'), ('b', 'foo'), ('a', '3'), ('d', '4'), ('f', '')])
assert new_root.children[1].data == "foo"
assert new_root.children[1].index_in_parent == 1
# 旧树不发生改变
assert root.data == QueryDict([('a', '1'), ('b', '2'), ('a', '3'), ('d', '4'), ('f', '')])
assert root.children[1] is not new_root.children[1]
def test_complex():
js = {
"monkey": "cat",
"aform": parse.urlencode([
("choice", 17),
("choice", 18),
("choice", parse.quote("test+1fsf")),
("choice", "ZnNmc2Q="),
("json", json.dumps({"chained": "json2", "aaa": "bbb"}))
]),
"foo": ["b", "a", {"b": "c", "d": "e"}, "this=is&a=form"],
"bar": False,
"ttt": None,
"key-with.a-dot": "value-with.a-dot",
"中文": "中文"
}
nested = r"""_callback({})""".format(json.dumps(js, ensure_ascii=False))
root = load(nested)
assert isinstance(root, JSONPNode)
assert str(root["monkey"]) \
== "PlainNode<key='monkey' parent='<root>' depth=1 data='cat'>"
assert str(root["bar"]) \
== "PlainNode<key='bar' parent='<root>' depth=1 data=False>"
assert str(root["ttt"]) \
== "PlainNode<key='ttt' parent='<root>' depth=1 data=None>"
assert str(root["key-with.a-dot"]) \
== "PlainNode<key='key-with.a-dot' parent='<root>' depth=1 data='value-with.a-dot'>"
assert str(root["中文"]) == "PlainNode<key='中文' parent='<root>' depth=1 data='中文'>"
assert str(root["aform"]) \
== """FormNode<key='aform' parent='<root>' depth=1 data=QueryDict([('choice', '17'), ('choice', '18'), ('choice', 'test%2B1fsf'), ('choice', 'ZnNmc2Q='), ('json', '{"chained": "json2", "aaa": "bbb"}')])>"""
assert str(root["foo"]) \
== "JSONNode<key='foo' parent='<root>' depth=1 data=['b', 'a', {'b': 'c', 'd': 'e'}, 'this=is&a=form']>"
# 测试fork
new_root = root["aform"]["json"]["aaa"].fork_tree("doge")
_node = new_root["aform"]["json"]["aaa"]
assert _node.data == "doge"
assert _node.parent.data == {'aaa': 'doge', 'chained': 'json2'}
assert str(_node.parent.parent) \
== """FormNode<key='aform' parent='<root>' depth=1 data=QueryDict([('choice', '17'), ('choice', '18'), ('choice', 'test%2B1fsf'), ('choice', 'ZnNmc2Q='), ('json', '{"chained": "json2", "aaa": "doge"}')])>"""
# 测试fork, 同时改变key和value
new_root = root["aform"]["json"]["aaa"].fork_tree("doge2", key="kite")
_node = new_root["aform"]["json"]["kite"]
assert "aaa" not in new_root["aform"]["json"] # key重命名
assert _node.data == "doge2"
assert _node.parent.data == {'kite': 'doge2', 'chained': 'json2'}
# fork后原有的node不发生改变
assert str(root["aform"]["json"]["aaa"]) \
== "PlainNode<key='aaa' parent='json' depth=3 data='bbb'>"
assert root["aform"]["json"]["aaa"].index_in_parent == 1
assert root["aform"]["json"]["aaa"].abskey \
== ('<root>', 'aform#1', 'json#4', 'aaa#1')
assert str(root["foo"]["2"]) \
== "JSONNode<key='2' parent='foo' depth=2 data={'b': 'c', 'd': 'e'}>"
# 遍历叶子
all_leaves = [
"monkey",
"choice", "choice",
"chained", "aaa", "urlencode", "base64",
"0", "1", "this",
"b", "a", "d",
"bar", "ttt", "key-with.a-dot", "中文",
]
_all_leaves = copy.copy(all_leaves)
for leaf in root.iter_all_leaves():
# print(leaf, leaf.abskey)
_all_leaves.remove(leaf.key)
assert not _all_leaves
if __name__ == '__main__':
test_basic_form()
test_form_modify()
test_form_fork()
test_complex()
print("all tests passed")
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,265 | aploium/my_utils | refs/heads/master | /err_hunter/demo2.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals
import logging
import err_hunter
err_hunter.colorConfig()
logger = err_hunter.getLogger(__name__)
logger.info("some info")
logger.warning("some warning")
another_logger = logging.getLogger("yet_another_logger")
another_logger.info("this should be colored")
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,266 | aploium/my_utils | refs/heads/master | /requestfuzz/tests/test_url.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import six
from urllib import parse
from requestfuzz import Url
def test_url_basic():
url = Url("http://example.com:8080/foo/bar.php?q=cat&q=dog#frag")
assert url == str(url) == url.url == "http://example.com:8080/foo/bar.php?q=cat&q=dog#frag"
assert url.all_but_scheme == 'example.com:8080/foo/bar.php?q=cat&q=dog#frag'
assert url.ext == ".php"
assert url.filename == 'bar.php'
assert url.fragment == 'frag'
assert url.host == "example.com"
assert url.netloc == "example.com:8080"
assert url.path == '/foo/bar.php'
assert url.path_qs == '/foo/bar.php?q=cat&q=dog'
assert url.port == 8080
assert tuple(url.query.items()) == (('q', 'cat'), ('q', 'dog'))
assert url.query_string == 'q=cat&q=dog'
assert url.scheme == "http"
assert url.without_path == 'http://example.com:8080'
assert url.without_query == 'http://example.com:8080/foo/bar.php'
assert url.root_domain == 'example.com'
def test_url_no_scheme():
url = Url("//example.com")
assert url.tostr() == str(url) == '//example.com'
assert url.scheme == ""
assert url.host == "example.com"
assert url.port is None
url = Url("//example.com/x")
assert url.tostr() == '//example.com/x'
assert url.scheme == ""
assert url.host == "example.com"
assert url.path == "/x"
assert url.port is None
url = Url("//example.com:233/x")
assert url.tostr() == '//example.com:233/x'
assert url.scheme == ""
assert url.host == "example.com"
assert url.path == "/x"
assert url.port == 233
def test_not_url():
url = Url("some_path")
assert url.tostr() == "some_path"
assert url.host is None
assert url.scheme == ""
assert url.host is None
assert url.port is None
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,267 | aploium/my_utils | refs/heads/master | /timeoutdict.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals, division
import time
import collections
__version__ = (1, 3, 0)
class TimeoutDict(collections.MutableMapping):
"""
>>> import time
>>> td = TimeoutDict(1)
>>> td["cat"] = "foobar"
>>> assert td["cat"] == "foobar"
>>> time.sleep(0.5)
>>> td["dog"] = 42
>>> assert td["cat"] == "foobar"
>>> assert td.get("cat") == "foobar"
>>> assert td.get("non-exist", "a") == "a"
>>> assert tuple(td.keys()) == ("cat", "dog")
>>> assert tuple(td.values()) == ("foobar", 42)
>>> assert tuple(td.items()) == (("cat","foobar"), ("dog",42))
>>> assert len(td) == 2
>>> assert td["dog"] == 42
>>> time.sleep(0.6)
>>> assert "cat" not in td
>>> assert td["dog"] == 42
>>> time.sleep(0.5)
>>> assert "dog" not in td
>>> td["x"] = 1
>>> del td["x"]
>>> assert "x" not in td
>>> # test maxlen
>>> td = TimeoutDict(1, max_len=2)
>>> td.update({1:1, 2:2, 3:3})
>>> assert len(td) == 2
>>> td[4]=4
>>> td[5]=5
>>> assert len({1, 2, 3} & td.keys()) == 0, td
"""
# noinspection PyMissingConstructor
def __init__(self, max_age, max_len=0, **kwargs):
assert max_age >= 0
assert max_len >= 0
self.data = collections.OrderedDict()
self.oldest_time = time.time()
self.max_age = max_age
self.max_len = max_len
def oldest_item(self, with_time=False):
key, time_value = next(iter(self.data.items()))
time_, value = time_value
if with_time:
return key, value, time_
else:
return key, value
def check_expire(self):
now = time.time()
if not self.oldest_time \
or now - self.oldest_time < self.max_age:
return 0
del_list = []
for key, time_value in self.data.items(): # 从旧往前依次检查
if now - time_value[0] > self.max_age:
del_list.append(key)
else:
self.oldest_time = time_value[0]
break
else: # 没有被break, 所有key都被清空, 清零oldest_time
self.oldest_time = time.time()
for key in del_list:
del self.data[key]
return len(del_list)
def __getitem__(self, key):
self.check_expire()
return self.data[key][1]
def __contains__(self, key):
self.check_expire()
return key in self.data
def __delitem__(self, key):
self.check_expire()
del self.data[key]
def __len__(self):
self.check_expire()
return len(self.data)
def __iter__(self):
self.check_expire()
return iter(self.data)
def keys(self):
self.check_expire()
return self.data.keys()
def values(self):
self.check_expire()
return (x[1] for x in self.data.values())
def items(self):
self.check_expire()
return ((k, v[1]) for k, v in self.data.items())
def __setitem__(self, key, item):
if self.max_len and len(self.data) == self.max_len:
# 若超过最大长度则删除最前面的
self.data.popitem(last=False)
self.data[key] = (time.time(), item)
def copy(self):
new = self.__class__(self.max_age)
new.oldest_time = self.oldest_time
new.data = self.data.copy()
def __repr__(self):
return "{}<{}>".format(self.__class__.__name__, repr(self.data))
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,268 | aploium/my_utils | refs/heads/master | /err_hunter/err_hunter/frame_operations.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals
import inspect
import sys
from . import myinspect
from .attr import attributes
PY2 = (sys.version_info[0] == 2)
def real_frame_extract(subframe, filepath, lineno):
"""
:type subframe: inspect.FrameInfo
:rtype: inspect.FrameInfo
"""
frames = inspect.getouterframes(subframe)
for frame in frames:
if PY2:
if frame[1] == filepath and frame[2] == lineno:
return frame[0] # type: inspect.FrameInfo
elif frame.filename == filepath and frame.lineno == lineno:
return frame.frame # type: inspect.FrameInfo
return None
def frame_format(frame, interested=None, linerange=5, frame_lineno=None):
abs_path = frame.f_code.co_filename
func_name = frame.f_code.co_name
global_vars = attributes(frame.f_globals, from_dict=True, interested=interested)
local_vars = attributes(frame.f_locals, from_dict=True, interested=interested)
frame_lineno = frame_lineno or frame.f_lineno
source_lines, first_lineno = myinspect.getsourcelines(frame.f_code)
running_line = source_lines[frame_lineno - first_lineno]
if PY2:
running_line = running_line.decode("utf8")
source_lines[frame_lineno - first_lineno] = "--->" \
+ running_line.rstrip("\r\n ") \
+ " <---\n"
frag_first_lineno = max(0, frame_lineno - first_lineno - linerange)
source_lines = source_lines[
frag_first_lineno
: frame_lineno - first_lineno + linerange
]
if PY2: # convert bytes to unicode
_source_lines = []
for line in source_lines:
try:
line = line.decode("utf8")
except:
line = repr(line)
_source_lines.append(line)
source_lines = _source_lines
frag_first_lineno += first_lineno
source_lines = "".join(
(
" {:<4}{}".format(i + frag_first_lineno, x)
if not x.startswith("-")
else " {}".format(x)
)
for i, x in enumerate(source_lines)
)
text = """File "{abs_path}", line {frame_lineno}, in {func_name}
{source_lines}
#----global_vars----#
{global_vars}
#----local_vars----#
{local_vars}
#------------------------------------#
""".format(
abs_path=abs_path, frame_lineno=frame_lineno, func_name=func_name,
source_lines=source_lines.rstrip("\r\n"), global_vars=global_vars.rstrip(), local_vars=local_vars.rstrip()
)
return text
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
54,269 | aploium/my_utils | refs/heads/master | /disk_kv_storge/jsondiskkv.py | #!/usr/bin/env python3
# coding=utf-8
from __future__ import unicode_literals, division, print_function, absolute_import
import logging
import sys
logger = logging.getLogger(__name__)
try:
from . import BaseDiskKV
except (ImportError, ValueError):
# noinspection PyUnresolvedReferences
from disk_kv_storge import BaseDiskKV
if sys.version_info[0] == 2:
# noinspection PyUnresolvedReferences
string_types = (str, unicode)
# noinspection PyUnresolvedReferences
integer_types = (int, long)
# noinspection PyUnresolvedReferences
text_type = unicode
binary_type = (str, bytes, bytearray)
else:
string_types = str
integer_types = int
text_type = str
binary_type = (bytes, bytearray)
try:
import msgpack
except ImportError:
logger.warning("msgpack not found, please consider install msgpack(http://msgpack.org/) for serialization, "
"it's better than json. Fallback to builtin json for serialization")
# --------------------------------------------------------
def _key_encode(key):
if isinstance(key, text_type):
return key.encode("utf8")
if isinstance(key, binary_type):
return key
if isinstance(key, integer_types):
key = str(key)
return key.encode("utf8")
def _key_decode(key):
return key.decode("utf8")
# --------------------------------------------------------
try:
import msgpack
except ImportError:
logger.warning(
"msgpack not found, please consider install msgpack (http://msgpack.org/) for serialization, "
"it's better than json. Fallback to builtin json for serialization"
)
import json
def _value_encode(value):
return json.dumps(value, ensure_ascii=False).encode("UTF-8")
def _value_decode(value):
return json.loads(value.decode("UTF-8"))
else:
def _value_encode(value):
return msgpack.dumps(value, use_bin_type=True)
def _value_decode(value):
return msgpack.loads(value, encoding='UTF-8', use_list=False)
# -------------------------
class JsonDiskKV(BaseDiskKV):
"""
>>> td = JsonDiskKV()
>>> td["cat"] = "foobar"
>>> assert td["cat"] == "foobar"
>>> td["dog"] = 42
>>> assert td["cat"] == "foobar"
>>> assert td.get("cat") == "foobar"
>>> assert td.get("non-exist", "a") == "a"
>>> assert tuple(td.keys()) == ("cat", "dog")
>>> assert tuple(td.values()) == ("foobar", 42)
>>> assert tuple(td.items()) == (("cat","foobar"), ("dog",42))
>>> assert len(td) == 2
>>> assert td["dog"] == 42, list(td.items())
>>> td["x"] = 1
>>> del td["x"]
>>> assert "x" not in td
>>>
>>> # test json storge
>>> _dic = {"mon":[1, 2, 3, 4, {"cat": 1, b"binkey": "中文"}]}
>>> td["monkey"] = _dic
>>> assert td["monkey"] == _dic
>>>
>>> # test many keys
>>> for i in range(10000): td[str(i)] = {"i_{}".format(i): i}
>>> for i in range(10000): assert td[str(i)] == {"i_{}".format(i): i}
"""
key_encode = staticmethod(_key_encode)
key_decode = staticmethod(_key_decode)
value_encode = staticmethod(_value_encode)
value_decode = staticmethod(_value_decode)
| {"/requestfuzz/datastructure.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/__init__.py": ["/err_hunter/err_hunter/traceback2.py", "/err_hunter/err_hunter/mylogger.py", "/err_hunter/err_hunter/mylogging.py"], "/requestfuzz/tests/test_datastructure.py": ["/requestfuzz/__init__.py"], "/disk_kv_storge/disk_timeoutdict.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/request.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/bare.py"], "/requestfuzz/tests/test_bare.py": ["/requestfuzz/request.py", "/requestfuzz/bare.py"], "/err_hunter/err_hunter/mylogger.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/tests/test_fuzzable.py": ["/requestfuzz/request.py", "/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"], "/requestfuzz/tests/test_plugins.py": ["/requestfuzz/__init__.py", "/requestfuzz/plugin.py"], "/err_hunter/err_hunter/traceback2.py": ["/err_hunter/err_hunter/__init__.py"], "/requestfuzz/mutant.py": ["/requestfuzz/datastructure.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/utils.py", "/requestfuzz/request.py", "/requestfuzz/payload.py"], "/requestfuzz/url.py": ["/requestfuzz/__init__.py", "/requestfuzz/datastructure.py"], "/requestfuzz/payload.py": ["/requestfuzz/utils.py"], "/requestfuzz/tests/test_rec_parse.py": ["/requestfuzz/__init__.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_url.py": ["/requestfuzz/__init__.py"], "/err_hunter/err_hunter/frame_operations.py": ["/err_hunter/err_hunter/__init__.py"], "/disk_kv_storge/jsondiskkv.py": ["/disk_kv_storge/__init__.py"], "/requestfuzz/bare.py": ["/requestfuzz/utils.py", "/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/url.py", "/requestfuzz/recursive_parse.py"], "/requestfuzz/tests/test_mutants.py": ["/requestfuzz/__init__.py", "/requestfuzz/payload.py", "/requestfuzz/mutant.py", "/requestfuzz/tests/test_fuzzable.py"], "/requestfuzz/__init__.py": ["/requestfuzz/datastructure.py", "/requestfuzz/request.py", "/requestfuzz/bare.py", "/requestfuzz/url.py", "/requestfuzz/csrf.py", "/requestfuzz/recursive_parse.py", "/requestfuzz/mutant.py", "/requestfuzz/payload.py"], "/requestfuzz/plugin.py": ["/requestfuzz/request.py"], "/err_hunter/err_hunter/mylogging.py": ["/err_hunter/err_hunter/__init__.py", "/err_hunter/err_hunter/mylogger.py"], "/requestfuzz/tests/test_rebuild_bare.py": ["/requestfuzz/__init__.py"], "/requestfuzz/csrf.py": ["/requestfuzz/datastructure.py", "/requestfuzz/plugin.py", "/requestfuzz/__init__.py"], "/disk_kv_storge/__init__.py": ["/disk_kv_storge/disk_timeoutdict.py", "/disk_kv_storge/jsondiskkv.py"], "/requestfuzz/recursive_parse.py": ["/requestfuzz/datastructure.py", "/requestfuzz/__init__.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.