function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def __init__(self, _seed, allocator=None, job_check=JobVerification.CHECK_REQUEST, **kwargs):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def name(self):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def get_id(self):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def scheduling_method(self, cur_time, es_dict, es):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def set_resource_manager(self, resource_manager):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def schedule(self, cur_time, es_dict, es):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def _check_job_request(self, _job):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def __str__(self):
return self.get_id() | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def __init__(self, seed, allocator, name, sorting_parameters, **kwargs):
SchedulerBase.__init__(self, seed, allocator, **kwargs)
self.name = name
self.sorting_parameters = sorting_parameters | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def get_id(self):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def scheduling_method(self, cur_time, jobs, es_dict):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def __init__(self, _allocator, _seed=0, **kwargs):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def __init__(self, _allocator, _resource_manager=None, _seed=0, **kwargs):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def __init__(self, _allocator, _resource_manager=None, _seed=0, **kwargs):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def __init__(self, allocator, seed=0, **kwargs):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def get_id(self):
""" | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def scheduling_method(self, cur_time, queued_jobs, es_dict):
"""
This function must map the queued events to available nodes at the current time. | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def _try_fifo_allocation(self, queued_jobs, cur_time):
"""
Allocates as many jobs as possible using the FIFO approach. As soon as one allocation fails, all subsequent jobs fail too.
Then, the return tuple contains info about the allocated jobs (assigned nodes and such) and also the position of the blocked job. | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def _calculate_slot(self, cur_time, avl_resources, decided_allocations, prev_blocked, blocked_job, es_dict):
"""
Computes a reservation for the blocked job, by releasing incrementally the resources used by the running
events and recently allocated jobs. The earliest slot in which blocked_job fits is chosen. | cgalleguillosm/accasim | [
3,
1,
3,
3,
1496215976
] |
def init_logger():
global logger
logger = logging.getLogger('GetLyrics')
logger.setLevel(logging.DEBUG)
log_fmt = '%(asctime)s/%(name)s[%(levelname)s]: %(message)s'
logging.basicConfig(format=log_fmt) | jntkym/rappers | [
2,
1,
2,
3,
1448888730
] |
def getArtistId(artist):
u"""j-lyrics.netでのアーティストのIDを取得
"""
params = {"ka": artist,}
baseurl = "http://search.j-lyric.net/index.php"
r = requests.get(baseurl, params=params)
soup = BeautifulSoup(r.content)
urls = soup.find("div", id="lyricList").findAll("a")
r = re.compile(r'http://j-lyric.net/artist/\w+/')
for url in urls:
href = url.get("href")
if href.startswith("http://j-lyric.net/artist/"):
return href.split("/")[-2]
if verbose:
logger.warning(artist + ": Not found")
return None | jntkym/rappers | [
2,
1,
2,
3,
1448888730
] |
def getLyricText(url):
u"""歌詞を取得して返す
"""
r = requests.get(url)
soup = BeautifulSoup(r.content)
# TODO: refactoring
text = str(soup.find("p", id="lyricBody"))
text = text.replace('<p id="lyricBody">', '').replace('</p>', '')
text = text.replace('\r', '').replace('\n', '')
return text.replace('\n', '').replace('<br />', '<BR>') | jntkym/rappers | [
2,
1,
2,
3,
1448888730
] |
def main(args):
global verbose
verbose = args.verbose
artist_list = getSimilarArtist(args.artist)
artist_list = [args.artist,] + artist_list
print("artist\ttitle\ttext")
for artist in artist_list[:args.n_artists]:
urls = getLyricUrlList(artist)
if verbose:
logger.info('{}: {} songs'.format(artist, len(urls)))
for i, url in enumerate(urls, start=1):
if verbose:
if i%10 == 0: logger.info("Wrote " + str(i) + " songs")
lyric = getLyricText(url[1])
print("{artist}\t{title}\t{text}".format(
artist=artist,
title=url[0].encode("utf-8"),
text=lyric))
time.sleep(1.0) # Wait one second | jntkym/rappers | [
2,
1,
2,
3,
1448888730
] |
def get_workers(self, **kwargs):
return Worker.get_all(parent=self, **kwargs) | Staffjoy/client_python | [
31,
29,
31,
3,
1454293430
] |
def create_worker(self, **kwargs):
return Worker.create(parent=self, **kwargs) | Staffjoy/client_python | [
31,
29,
31,
3,
1454293430
] |
def get_schedule(self, id):
return Schedule.get(parent=self, id=id) | Staffjoy/client_python | [
31,
29,
31,
3,
1454293430
] |
def get_shift(self, id):
return Shift.get(parent=self, id=id) | Staffjoy/client_python | [
31,
29,
31,
3,
1454293430
] |
def get_shift_query(self, **kwargs):
return ShiftQuery.get_all(parent=self, **kwargs) | Staffjoy/client_python | [
31,
29,
31,
3,
1454293430
] |
def get_recurring_shift(self, id):
return RecurringShift.get(parent=self, id=id) | Staffjoy/client_python | [
31,
29,
31,
3,
1454293430
] |
def __init__(self, name, connection):
super(PostgresNoSQLTable, self).__init__(name, connection)
self.super = super(PostgresNoSQLTable, self) | cansik/pg4nosql | [
6,
1,
6,
3,
1437555734
] |
def insert(self, auto_commit=True, **data):
relational_data = data
relational_data_columns = ''
relational_data_values = ''
if relational_data:
relational_data_columns = ",".join(relational_data.keys())
data_list = map(str, map(to_nullable_string, relational_data.values()))
relational_data_values = ",".join(data_list)
self.cursor.execute(self.__SQL_INSERT, (AsIs(self.name),
AsIs(relational_data_columns),
AsIs(relational_data_values)))
if auto_commit:
self.commit()
return self.cursor.fetchone()[DEFAULT_ROW_IDENTIFIER] | cansik/pg4nosql | [
6,
1,
6,
3,
1437555734
] |
def put(self, json_data, auto_commit=True, **relational_data):
relational_data.update({DEFAULT_JSON_COLUMN_NAME: json_data})
return self.insert(auto_commit=auto_commit, **relational_data) | cansik/pg4nosql | [
6,
1,
6,
3,
1437555734
] |
def get(self, object_id):
self.cursor.execute(self.__SQL_GET_JSON, (AsIs(self.name), object_id))
record = self.cursor.fetchone()
if record is None:
return record
return PostgresNoSQLResultItem(record, self) | cansik/pg4nosql | [
6,
1,
6,
3,
1437555734
] |
def query_one(self, query='True', columns='*'):
result = self.query(query, columns)
if not result:
return None
return result[0] | cansik/pg4nosql | [
6,
1,
6,
3,
1437555734
] |
def delete(self, object_id, auto_commit=True):
self.cursor.execute(self.__SQL_DELETE_JSON, (AsIs(self.name), object_id))
if auto_commit:
self.commit() | cansik/pg4nosql | [
6,
1,
6,
3,
1437555734
] |
def iterative_zoom(image, mindiff=1., zoomshape=[10,10],
return_zoomed=False, zoomstep=2, verbose=False,
minmax=np.min, ploteach=False, return_center=True):
"""
Iteratively zoom in on the *minimum* position in an image until the
delta-peak value is below `mindiff`
Parameters
----------
image : np.ndarray
Two-dimensional image with a *minimum* to zoom in on (or maximum, if
specified using `minmax`)
mindiff : float
Minimum difference that must be present in image before zooming is done
zoomshape : [int,int]
Shape of the "mini" image to create. Smaller is faster, but a bit less
accurate. [10,10] seems to work well in preliminary tests (though unit
tests have not been written)
return_zoomed : bool
Return the zoomed image in addition to the measured offset?
zoomstep : int
Amount to increase the zoom factor by on each iteration. Probably best to
stick with small integers (2-5ish).
verbose : bool
Print out information about zoom factor, offset at each iteration
minmax : np.min or np.max
Can zoom in on the minimum or maximum of the image
ploteach : bool
Primarily a debug tool, and to be used with extreme caution! Will open
a new figure at each iteration showing the next zoom level.
return_center : bool
Return the center position in original image coordinates? If False,
will retern the *offset from center* instead (but beware the
conventions associated with the concept of 'center' for even images).
Returns
-------
The y,x offsets (following numpy convention) of the center position of the
original image. If `return_zoomed`, returns (zoomed_image, zoom_factor,
offsets) because you can't interpret the zoomed image without the zoom
factor.
"""
image_zoom = image
argminmax = np.argmin if "min" in minmax.__name__ else np.argmax
zf = 1. # "zoom factor" initialized to 1 for the base shift measurement
offset = np.array([0]*image.ndim,dtype='float') # center offset
delta_image = (image_zoom - minmax(image_zoom))
xaxzoom = np.indices(image.shape)
if ploteach:
ii = 1
pl.figure(ii)
pl.clf()
pl.pcolor(np.arange(image.shape[0]+1)-0.5,np.arange(image.shape[1]+1)-0.5, image)
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
pl.plot(minpos[1],minpos[0],'wx')
# check to make sure the smallest *nonzero* difference > mindiff
while np.abs(delta_image[np.abs(delta_image)>0]).min() > mindiff:
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
center = xaxzoom[0][minpos],xaxzoom[1][minpos]
offset = xaxzoom[0][minpos]-(image.shape[0]-1)/2,xaxzoom[1][minpos]-(image.shape[1]-1)/2
zf *= zoomstep
xaxzoom, image_zoom = zoom.zoom_on_pixel(image, center, usfac=zf,
outshape=zoomshape, return_xouts=True)
delta_image = image_zoom-minmax(image_zoom)
# base case: in case you can't do any better...
# (at this point, you're all the way zoomed)
if np.all(delta_image == 0):
if verbose:
print("Can't zoom any further. zf=%i" % zf)
break
if verbose:
print(("Zoom factor %6i, center = %30s, offset=%30s, minpos=%30s, min|diff|=%15g" %
(zf, ",".join(["%15g" % c for c in center]),
",".join(["%15g" % c for c in offset]),
",".join(["%5i" % c for c in minpos]),
np.abs(delta_image[np.abs(delta_image)>0]).min()
)))
if ploteach:
ii += 1
pl.figure(ii)
pl.clf()
pl.pcolor(centers_to_edges(xaxzoom[1][0,:]),centers_to_edges(xaxzoom[0][:,0]),image_zoom)
pl.contour(xaxzoom[1],xaxzoom[0],image_zoom-image_zoom.min(),levels=[1,5,15],cmap=pl.cm.gray)
pl.plot(center[1],center[0],'wx')
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
pl.plot(xaxzoom[1][minpos],
xaxzoom[0][minpos],
'w+')
pl.arrow(center[1],center[0],xaxzoom[1][minpos]-center[1],xaxzoom[0][minpos]-center[0],color='w',
head_width=0.1/zf, linewidth=1./zf, length_includes_head=True)
pl.figure(1)
#pl.contour(xaxzoom[1],xaxzoom[0],image_zoom-image_zoom.min(),levels=[1,5,15],cmap=pl.cm.gray)
pl.arrow(center[1],center[0],xaxzoom[1][minpos]-center[1],xaxzoom[0][minpos]-center[0],color='w',
head_width=0.1/zf, linewidth=1./zf, length_includes_head=True) | keflavich/image_registration | [
139,
48,
139,
14,
1346634490
] |
def centers_to_edges(arr):
dx = arr[1]-arr[0]
newarr = np.linspace(arr.min()-dx/2,arr.max()+dx/2,arr.size+1)
return newarr | keflavich/image_registration | [
139,
48,
139,
14,
1346634490
] |
def emulator_rom_launch_command(emulator, rom):
"""Generates a command string that will launch `rom` with `emulator` (using
the format provided by the user). The return value of this function should
be suitable to use as the `Exe` field of a Steam shortcut"""
# Normalizing the strings is just removing any leading/trailing quotes.
# The beautiful thing is that strip does nothing if it doesnt contain quotes,
# so normalizing it then adding quotes should do what I want 100% of the time
normalize = lambda s: s.strip("\"")
add_quotes = lambda s: "\"%s\"" % s
# We don't know if the user put quotes around the emulator location. If
# so, we dont want to add another pair and screw things up.
#
# The user didnt give us the ROM information, but screw it, I already
# have some code to add quotes to a string, might as well use it.
quoted_location = add_quotes(normalize(emulator.location))
quoted_rom = add_quotes(normalize(rom.path))
# The format string contains a bunch of specifies that users can use to
# substitute values in at runtime. Right now the only supported values are:
# %l - The location of the emulator (to avoid sync bugs)
# %r - The location of the ROM (so the emulator knows what to launch)
# %fn - The ROM filename without its extension (for emulators that utilize separete configuration files)
#
# More may be added in the future, but for now this is what we support
return (
emulator.format
.replace("%l", quoted_location)
.replace("%r", quoted_rom)
.replace("%fn", os.path.splitext(os.path.basename(rom.path))[0])
) | scottrice/Ice | [
818,
108,
818,
209,
1356404560
] |
def test_default(self):
schema = {}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, "b") | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version(self):
schema = {'mergeStrategy': 'version'}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "a"}, {'value': "b"}]) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_meta(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a", merge_options={
'version': {'metadata': {'uri': 'http://example.com/a'}}})
base = merger.merge(base, "b", merge_options={
'version': {'metadata': {'uri': 'http://example.com/b'}}})
self.assertEqual(base, [
{'value': "a",
'uri': 'http://example.com/a'},
{'value': "b",
'uri': 'http://example.com/b'}]) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_meta_deprecated(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with warnings.catch_warnings(record=True) as w:
base = merger.merge(None, 'a', meta={'foo': 'bar'})
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning)) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_unique_false(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'unique': False}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, "a")
base = merger.merge(base, "a")
self.assertEqual(base, [{'value': "a"}, {'value': "a"}]) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_last(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'limit': 1}}
base = None
base = jsonmerge.merge(base, "a", schema)
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': "b"}]) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_base_not_a_list_of_objects(self):
schema = {'mergeStrategy': 'version'}
base = ["a"]
with self.assertRaises(BaseInstanceError) as cm:
jsonmerge.merge(base, "b", schema) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_base_empty_list(self):
schema = {'mergeStrategy': 'version'}
base = []
base = jsonmerge.merge(base, "b", schema)
self.assertEqual(base, [{'value': 'b'}]) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_append_type_error(self):
schema = {'mergeStrategy': 'append'}
base = None
with self.assertRaises(HeadInstanceError) as cm:
jsonmerge.merge(base, "a", schema)
self.assertEqual(cm.exception.value.ref, "#") | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_default(self):
schema = {}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'b': "b"}, schema)
self.assertEqual(base, {'a': "a", 'b': "b"}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_trivial(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'b': "b"}, schema)
self.assertTrue(isinstance(base, dict))
self.assertEqual(base, {'a': "a", 'b': "b"}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_type_error(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
with self.assertRaises(HeadInstanceError) as cm:
jsonmerge.merge(base, "a", schema)
self.assertEqual(cm.exception.value.ref, "#") | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_overwrite(self):
schema = {'mergeStrategy': 'objectMerge'}
base = None
base = jsonmerge.merge(base, {'a': "a"}, schema)
base = jsonmerge.merge(base, {'a': "b"}, schema)
self.assertEqual(base, {'a': "b"}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_objclass2(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'a': {'mergeStrategy': 'objectMerge',
'mergeOptions': { 'objClass': 'OrderedDict'}}}}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, {'a': {'b': 'c'}, 'd': {'e': 'f'}})
self.assertIsInstance(base, dict)
self.assertIsInstance(base['a'], OrderedDict)
self.assertIsInstance(base['d'], dict) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_objclass_menu(self):
schema = {'mergeStrategy': 'objectMerge', 'mergeOptions': { 'objClass': 'foo'}}
class MyDict(dict):
pass
objclass_menu = {'foo': MyDict}
merger = jsonmerge.Merger(schema, objclass_menu=objclass_menu)
base = None
base = merger.merge(base, {'c': "a", 'a': "a"})
self.assertTrue(isinstance(base, MyDict)) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_append(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'a': {'mergeStrategy': 'append'}
}}
base = None
base = jsonmerge.merge(base, {'a': ["a"]}, schema)
base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_append_additional(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'b': {'mergeStrategy': 'overwrite'}
},
'additionalProperties': {
'mergeStrategy': 'append'
}}
base = None
base = jsonmerge.merge(base, {'a': ["a"]}, schema)
base = jsonmerge.merge(base, {'a': ["b"], 'b': 'c'}, schema)
self.assertEqual(base, {'a': ["a", "b"], 'b': 'c'}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_example(self):
head1 = {
'buyer': {
'id': {
'name': "Test old",
},
'uri': 'Test uri old',
}
}
head2 = {
'buyer': {
'id': {
'name': "Test new"
},
'uri': 'Test uri new',
},
'award': "Award"
}
base_expect = {
'buyer': {
'id': {
'name': [
{'value': "Test old"},
{'value': "Test new"},
]
},
'uri': 'Test uri new',
},
'award': "Award"
}
schema = {
'mergeStrategy': 'objectMerge',
'properties': {
'buyer': {
'properties': {
'id': {
'properties': {
'name': {
'mergeStrategy': 'version',
}
}
},
'uri': {
'mergeStrategy': 'overwrite',
}
},
},
'award': {
'mergeStrategy': 'overwrite',
}
},
}
base = None
base = jsonmerge.merge(base, head1, schema)
base = jsonmerge.merge(base, head2, schema)
self.assertEqual(base, base_expect) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_external_refs(self):
schema_1 = {
'id': 'http://example.com/schema_1.json',
'properties': {
'a': {'$ref': "schema_2.json#/definitions/a"},
},
}
schema_2 = {
'id': 'http://example.com/schema_2.json',
'definitions': {
"a": {
"properties": {
"b": {'mergeStrategy': 'version'},
}
},
}
}
merger = jsonmerge.Merger(schema_1)
# merge() would otherwise make a HTTP request
merger.cache_schema(schema_2)
base = None
base = merger.merge(base, {"a": {"b": "c"}})
base = merger.merge(base, {"a": {"b": "d"}})
self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_external_refs_draft6(self):
schema_1 = {
'$id': 'http://example.com/schema_1.json',
'properties': {
'a': {'$ref': "schema_2.json#/definitions/a"},
},
}
schema_2 = {
'$id': 'http://example.com/schema_2.json',
'definitions': {
"a": {
"properties": {
"b": {'mergeStrategy': 'version'},
}
},
}
}
merger = jsonmerge.Merger(schema_1, validatorclass=Draft6Validator)
# merge() would otherwise make a HTTP request
merger.cache_schema(schema_2)
base = None
base = merger.merge(base, {"a": {"b": "c"}})
base = merger.merge(base, {"a": {"b": "d"}})
self.assertEqual(base, {"a": {"b": [{"value": "c"}, {"value": "d"}]}}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_oneof_recursive(self):
# Schema to merge all arrays with "append" strategy and all objects
# with the default "objectMerge" strategy.
schema = {
"oneOf": [
{
"type": "array",
"mergeStrategy": "append"
},
{
"type": "object",
"additionalProperties": {
"$ref": "#"
}
},
{
"type": "string"
},
]
}
base = {"a": ["1"], "b": "3", "c": {"d": ["4"], "e": "f"}}
head = {"a": ["2"], "b": "4", "g": "7", "c": {"d": ["3"]}}
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, {"a": ["1", "2"], "b": "4", "g": "7", "c": {"d": ["4", "3"], "e": "f"}}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_oneof_multiple_validate(self):
schema = {
'oneOf': [
{
'type': 'array',
'maxItems': 3,
'mergeStrategy': 'append'
},
{
'type': 'array',
'minItems': 2,
'mergeStrategy': 'overwrite'
}
]
}
merger = jsonmerge.Merger(schema)
base = [1]
base = merger.merge(base, [2])
self.assertEqual(base, [1, 2])
base = [1, 2]
with self.assertRaises(HeadInstanceError) as cm:
base = merger.merge(base, [3, 4]) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_anyof_overwrite_toplevel(self):
schema = {
'mergeStrategy': 'overwrite',
'anyOf': [
{
'type': 'array'
},
{
'type': 'string'
},
]
}
merger = jsonmerge.Merger(schema)
self.assertEqual(merger.merge([2, 3, 4], 'a'), 'a')
self.assertEqual(merger.merge('a', [2, 3, 4]), [2, 3, 4]) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def merge(self, walk, base, head, schema, meta, **kwargs):
if base is None:
ref = ""
else:
ref = base.ref
return JSONValue("foo", ref) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"items": {
"properties": {
"id": {"type": "string"},
"field": {"type": "number"},
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 2}
]
}
b = {
"awards": [
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
expected = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_no_items(self):
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "id"},
}
a = [
{"id": "A", "field": 1},
]
b = [
{"id": "A", "field": 2},
]
# by default, it should fall back to "replace" strategy for integers.
expected = [
{"id": "A", "field": 2},
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_no_key(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
a = [
{"id": "A", "field": 1},
]
b = [
{'field': 2}
]
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
# it should ignore array elements that do not have the id
self.assertEqual(base, a) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_complex_id(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
a = [
{"id": ["A", {"B": "C"} ], "field": 1},
{"id": ["A", {"B": "D"} ], "field": 2},
{"id": ["A", {"B": "E"} ], "field": 3},
]
b = [
{"id": ["A", {"B": "D"} ], "field": 4},
{"id": ["E", {"B": "C"} ], "field": 5},
]
merger = jsonmerge.Merger(schema)
c = merger.merge(a, b)
expected = [
{"id": ["A", {"B": "C"} ], "field": 1},
{"id": ["A", {"B": "D"} ], "field": 4},
{"id": ["A", {"B": "E"} ], "field": 3},
{"id": ["E", {"B": "C"} ], "field": 5},
]
self.assertEqual(expected, c) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_with_subschema(self):
schema = {
"properties": {
"awards": {
"type": "array",
"mergeStrategy": "arrayMergeById",
"items": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"field": {
"type": "number",
"mergeStrategy": "version"
}
}
}
}
}
}
a = {
"awards": [
{"id": "A", "field": 1},
{"id": "B", "field": 2}
]
}
b = {
"awards": [
{"id": "B", "field": 3},
{"id": "C", "field": 4}
]
}
expected = {
"awards": [
{"id": "A", "field": [{"value": 1}]},
{"id": "B", "field": [{"value": 2}, {"value": 3}]},
{"id": "C", "field": [{"value": 4}]}
]
}
merger = jsonmerge.Merger(schema)
base = None
base = merger.merge(base, a)
base = merger.merge(base, b)
self.assertEqual(base, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_only_integers(self):
# arrayMergeById strategy can be used to treat simple arrays of
# integers as Python sets by setting idRef to root (i.e. pointing to
# the array element itself)
#
# https://github.com/avian2/jsonmerge/issues/24
schema = {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "/"},
}
base = [ 1, 2 ]
head = [ 2, 3 ]
expected = [ 1, 2, 3]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_bad_base_type(self):
schema = {
'mergeStrategy': 'arrayMergeById'
}
head = []
base = {'foo': 'bar'}
merger = jsonmerge.Merger(schema)
with self.assertRaises(BaseInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#') | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_non_unique_base(self):
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a'},
{'id': 'a'}
]
head = [
{'id': 'a',
'foo': 1}
]
merger = jsonmerge.Merger(schema)
with self.assertRaises(BaseInstanceError) as cm:
merger.merge(base, head)
self.assertEqual(cm.exception.value.ref, '#/1') | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_order_issue_31_1(self):
# There was an issue with arrayMergeById where head value would be
# merged with the last item in the base list, not the matching item.
# The result was then assigned to the matching item.
#
# If the last item in the base list was just created in the same
# arrayMergeById (i.e. by another item in the head list), then merge
# would fail with "Unresolvable JSON pointer".
#
# https://github.com/avian2/jsonmerge/pull/31
schema = {
"mergeStrategy": "arrayMergeById",
}
base = [
{'id': 'a', 'val': {'a': 1}},
{'id': 'b', 'val': {'b': 2}},
]
head = [
{'id': 'a', 'val': {'c': 3}}
]
expected = [
# bug would produce {'b': 2, 'c': 3} here
{'id': 'a', 'val': {'a': 1, 'c': 3}},
{'id': 'b', 'val': {'b': 2}},
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_subclass_get_key(self):
class MyArrayMergeById(jsonmerge.strategies.ArrayMergeById):
def get_key(self, walk, item, idRef):
return item.val[-1]
schema = {'mergeStrategy': 'myArrayMergeById'}
merger = jsonmerge.Merger(schema=schema,
strategies={'myArrayMergeById': MyArrayMergeById()})
base = [
[ 'a', 'b', 'id1' ],
[ 'c', 'id2' ],
]
head = [
[ 'e', 'f', 'g', 'id3' ],
[ 'd', 'id1' ],
]
expected = [
[ 'd', 'id1' ],
[ 'c', 'id2' ],
[ 'e', 'f', 'g', 'id3' ],
]
base = merger.merge(base, head)
self.assertEqual(base, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_multiple_ids_ignore(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'mergeOptions': {
'idRef': ['/a', '/b'],
'ignoreId': [1, 2],
}
}
base = [
{
'a': 1,
'b': 1,
}
]
head = [
{
# ignoreId matches
'a': 1,
'b': 2,
'c': 2,
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
expected = [
{
'a': 1,
'b': 1
},
{
'a': 2,
'b': 2,
'c': 3,
}
]
merger = jsonmerge.Merger(schema)
base = merger.merge(base, head)
self.assertEqual(base, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_append_with_unique(self):
schema = {
"mergeStrategy": "append",
"uniqueItems": True,
}
merger = jsonmerge.Merger(schema)
head = ["a"]
base = None
base = merger.merge(base, head)
base = merger.merge(base, head)
schema2 = merger.get_schema()
jsonschema.validate(head, schema2)
jsonschema.validate(base, schema2) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_tilde_in_property_name(self):
base = {'a': 0}
head = {'~1': 1}
base = jsonmerge.merge(base, head)
self.assertEqual(base, {'a': 0, '~1': 1}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_discard_objectmerge_null(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'discard'
}
} }
base = {}
head = {'a': 1}
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, {}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_discard_arraymergebyid_null(self):
schema = {
'mergeStrategy': 'arrayMergeById',
'items': {
'mergeStrategy': 'discard'
} }
base = [ ]
head = [ {'id': 1, 'val': 1} ]
base = jsonmerge.merge(base, head, schema)
self.assertEqual(base, []) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_bad_strategy(self):
schema = {
'properties': {
'a': {
'mergeStrategy': 'invalidStrategy'
} } }
base = {'a': 1 }
head = {'a': 2 }
with self.assertRaises(SchemaError) as cm:
jsonmerge.merge(base, head, schema)
self.assertEqual(cm.exception.value.ref, '#/properties/a') | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_index(self):
schema = {
'mergeStrategy': 'arrayMergeByIndex'
}
base = [ {'a': 0 }, {'b': 1} ]
head = [ {'c': 2 }, {'d': 3} ]
result = jsonmerge.merge(base, head, schema)
self.assertEqual(result, [ {'a': 0, 'c': 2}, {'b': 1, 'd': 3} ]) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_default_overwrite(self):
schema = {'description': 'test'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {'description': 'test'}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_default_object_merge(self):
schema = {
'properties': {
'foo': {
'mergeStrategy': 'version',
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'properties': {
'foo': {
'type': 'array',
'items': {
'properties': {
'value': {},
}
}
}
}
}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_append(self):
schema = {'type': 'array',
'mergeStrategy': 'append'}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, {'type': 'array'}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_ref_twice(self):
schema = {
'properties': {
'a': {
'$ref': '#/definitions/item'
},
'b': {
'$ref': '#/definitions/item'
},
},
'definitions': {
'item': {
'type': 'object',
'mergeStrategy': 'version'
}
}
}
expected = {
'properties': {
'a': {
'$ref': '#/definitions/item'
},
'b': {
'$ref': '#/definitions/item'
},
},
'definitions': {
'item': {
'type': 'array',
'items': {
'properties': {
'value': {
'type': 'object',
}
}
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(expected, schema2) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_meta_deprecated(self):
schema = {'mergeStrategy': 'version'}
merger = jsonmerge.Merger(schema)
with warnings.catch_warnings(record=True) as w:
merger.get_schema(meta={'foo': 'bar'})
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning)) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_version_limit(self):
schema = {'mergeStrategy': 'version',
'mergeOptions': {'limit': 5}}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'type': 'array',
'items': {
'properties': {
'value': {}
}
},
'maxItems': 5
}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_object_merge_nested(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'foo': {'mergeStrategy': 'version'}
}}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2,
{
'properties': {
'foo': {
'type': 'array',
'items': {
'properties': {
'value': {}
}
}
}
}
}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_anyof_dont_descend(self):
# However, 'anyOf' should be fine if we don't descend through it (e.g.
# if it's after a 'overwrite' strategy for instance.
schema = {
'properties': {
'a': {
'mergeStrategy': 'overwrite',
'properties': {
'b': {
'anyOf': [
{'properties': {'c': {}}},
{'properties': {'d': {}}},
]
}
}
}
}
}
expected = {
'properties': {
'a': {
'properties': {
'b': {
'anyOf': [
{'properties': {'c': {}}},
{'properties': {'d': {}}},
]
}
}
}
}
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_internal_refs(self):
schema = {
'id': 'http://example.com/schema_1.json',
'mergeStrategy': 'overwrite',
'properties': {
'foo': {
'$ref': '#/definitions/bar'
}
},
'definitions': {
'bar': {
'properties': {
'baz': {}
}
}
}
}
expected = {
'id': 'http://example.com/schema_1.json',
'properties': {
'foo': {
'$ref': '#/definitions/bar'
}
},
'definitions': {
'bar': {
'properties': {
'baz': {}
}
}
}
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_reference_in_meta(self):
schema = {'mergeStrategy': 'version'}
meta_schema = {
'id': 'http://example.com/schema_1.json',
'$ref': 'schema_2.json#/definitions/meta'
}
schema_2 = {
'id': 'http://example.com/schema_2.json',
'definitions': {
'meta': {
'properties': {
'foo': {
'type': 'string'
},
'bar': {
'enum': [ 'a', 'b' ]
}
}
}
}
}
merger = jsonmerge.Merger(schema)
merger.cache_schema(schema_2)
mschema = merger.get_schema(merge_options={
'version': {'metadataSchema': meta_schema}})
self.assertEqual(mschema,
{
'type': 'array',
'items': {
'properties': {
'value': {},
'foo': {'type': 'string'},
'bar': {'enum': ['a', 'b'] },
}
}
}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_array_in_schema(self):
schema = {
'mergeStrategy': 'overwrite',
'enum': [
"foo",
"bar",
]
}
expected = {
'enum': [
"foo",
"bar",
]
}
merger = jsonmerge.Merger(schema)
mschema = merger.get_schema()
self.assertEqual(expected, mschema) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id(self):
schema = {
"mergeStrategy": "arrayMergeById",
"items": {
'type': 'object'
}
}
expected = {
"items": {
'type': 'object'
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_id_with_depth_twice(self):
# Here were have a $ref that get_schema() should descend into twice.
schema = {
"properties": {
"test": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
},
"test2": {
"mergeStrategy": "arrayMergeById",
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "string",
"mergeStrategy": "version"
}
}
}
}
}
expected = {
"properties": {
"test": {
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
},
"test2": {
"type": "array",
"items": {
"$ref": "#/definitions/refitem"
}
}
},
"definitions": {
"refitem": {
"type": "object",
"properties": {
"field1": {
"type": "array",
"items": {
"properties": {
"value": {
"type": "string"
}
}
}
}
}
}
}
}
self.maxDiff = None
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_append_additional(self):
schema = {'mergeStrategy': 'objectMerge',
'properties': {
'b': {'mergeStrategy': 'overwrite'}
},
'additionalProperties': {
'mergeStrategy': 'append'
}}
expected = {'properties': {
'b': {},
},
'additionalProperties': {}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_oneof(self):
schema = {
'oneOf': [
{
'type': 'array',
'mergeStrategy': 'append'
},
{
'type': 'object'
}
]
}
expected = {
'oneOf': [
{
'type': 'array',
},
{
'type': 'object'
}
]
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_oneof_toplevel(self):
schema = {
"mergeStrategy": "version",
"oneOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
expected = {
"type": "array",
"items": {
"properties": {
"value": {
"oneOf": [
{"type": "string", "pattern": "^!?(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?:\\/[0-9]{1,2})?$"},
{"type": "string", "format": "hostname"}
]
}
}
}
}
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
self.assertEqual(schema2, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_discard(self):
schema = { 'type': 'string',
'mergeStrategy': 'discard' }
merger = jsonmerge.Merger(schema)
schema2 = merger.get_schema()
expected = { 'type': 'string' }
self.assertEqual(schema2, expected) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_merge_by_index(self):
schema = {
'type': 'array',
'mergeStrategy': 'arrayMergeByIndex'
}
merger = jsonmerge.Merger(schema)
result = merger.get_schema()
self.assertEqual(result, {'type': 'array'}) | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_str_with_ref(self):
e = SchemaError("Test error", JSONValue({}, '#'))
self.assertEqual(str(e), 'Test error: #') | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def test_str_with_name(self):
e = SchemaError("Test error", JSONValue({}, '#'), 'test')
self.assertEqual(str(e), "'test' merge strategy: Test error: #") | avian2/jsonmerge | [
181,
25,
181,
7,
1406390970
] |
def add_arguments(self, parser):
parser.add_argument('corpus', type=str)
parser.add_argument('languages', nargs='+', type=str)
parser.add_argument('--add_lemmata', action='store_true', dest='add_lemmata', default=False)
parser.add_argument('--add_indices', action='store_true', dest='add_indices', default=False)
parser.add_argument('--xlsx', action='store_true', dest='format_xlsx', default=False)
parser.add_argument('--doc', dest='document')
parser.add_argument('--formal_structure') | UUDigitalHumanitieslab/timealign | [
7,
1,
7,
8,
1469016396
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.