| | --- |
| | language: |
| | - en |
| | license: other |
| | pretty_name: Geo Benchmark |
| | task_categories: |
| | - text-generation |
| | configs: |
| | - config_name: GKMC |
| | data_files: |
| | - split: test |
| | path: GKMC/test-* |
| | - config_name: GeoQuery_place |
| | data_files: |
| | - split: train |
| | path: GeoQuery_place/train-* |
| | - split: validation |
| | path: GeoQuery_place/validation-* |
| | - split: test |
| | path: GeoQuery_place/test-* |
| | - config_name: GeoQuery_regression |
| | data_files: |
| | - split: train |
| | path: GeoQuery_regression/train-* |
| | - split: validation |
| | path: GeoQuery_regression/validation-* |
| | - split: test |
| | path: GeoQuery_regression/test-* |
| | - config_name: GeoQuestions1089_YN |
| | data_files: |
| | - split: test |
| | path: GeoQuestions1089_YN/test-* |
| | - config_name: GeoQuestions1089_coord |
| | data_files: |
| | - split: test |
| | path: GeoQuestions1089_coord/test-* |
| | - config_name: GeoQuestions1089_place |
| | data_files: |
| | - split: test |
| | path: GeoQuestions1089_place/test-* |
| | - config_name: GeoQuestions1089_regression |
| | data_files: |
| | - split: test |
| | path: GeoQuestions1089_regression/test-* |
| | - config_name: GeoSQA |
| | data_files: |
| | - split: train |
| | path: GeoSQA/train-* |
| | - split: validation |
| | path: GeoSQA/validation-* |
| | - split: test |
| | path: GeoSQA/test-* |
| | - config_name: GridRoute |
| | data_files: |
| | - split: test |
| | path: GridRoute/test-* |
| | - config_name: MsMarco |
| | data_files: |
| | - split: test |
| | path: MsMarco/test-* |
| | - split: train |
| | path: MsMarco/train-* |
| | - split: validation |
| | path: MsMarco/validation-* |
| | - config_name: NY-POI |
| | data_files: |
| | - split: test |
| | path: NY-POI/test-* |
| | - config_name: PPNL_multi |
| | data_files: |
| | - split: test |
| | path: PPNL_multi/test-* |
| | - split: train |
| | path: PPNL_multi/train-* |
| | - split: validation |
| | path: PPNL_multi/validation-* |
| | - config_name: PPNL_single |
| | data_files: |
| | - split: test |
| | path: PPNL_single/test-* |
| | - split: train |
| | path: PPNL_single/train-* |
| | - split: validation |
| | path: PPNL_single/validation-* |
| | - config_name: SpartUN |
| | data_files: |
| | - split: test |
| | path: SpartUN/test-* |
| | - split: train |
| | path: SpartUN/train-* |
| | - split: validation |
| | path: SpartUN/validation-* |
| | - config_name: SpatialEvalLLM |
| | data_files: |
| | - split: test |
| | path: SpatialEvalLLM/test-* |
| | - config_name: StepGame |
| | data_files: |
| | - split: train |
| | path: StepGame/train-* |
| | - split: validation |
| | path: StepGame/validation-* |
| | - split: test |
| | path: StepGame/test-* |
| | - config_name: TourismQA |
| | data_files: |
| | - split: test |
| | path: TourismQA/test-* |
| | - split: train |
| | path: TourismQA/train-* |
| | - split: validation |
| | path: TourismQA/validation-* |
| | dataset_info: |
| | - config_name: GKMC |
| | features: |
| | - name: question_id |
| | dtype: int64 |
| | - name: answer |
| | dtype: string |
| | - name: scenario |
| | dtype: string |
| | - name: question |
| | dtype: string |
| | - name: A |
| | dtype: string |
| | - name: B |
| | dtype: string |
| | - name: C |
| | dtype: string |
| | - name: D |
| | dtype: string |
| | splits: |
| | - name: test |
| | num_bytes: 1055828 |
| | num_examples: 1600 |
| | download_size: 510919 |
| | dataset_size: 1055828 |
| | - config_name: GeoQuery_place |
| | features: |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | list: string |
| | splits: |
| | - name: train |
| | num_bytes: 57875 |
| | num_examples: 346 |
| | - name: validation |
| | num_bytes: 4037 |
| | num_examples: 33 |
| | - name: test |
| | num_bytes: 27964 |
| | num_examples: 184 |
| | download_size: 30317 |
| | dataset_size: 89876 |
| | - config_name: GeoQuery_regression |
| | features: |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | list: float64 |
| | splits: |
| | - name: train |
| | num_bytes: 12026 |
| | num_examples: 182 |
| | - name: validation |
| | num_bytes: 1017 |
| | num_examples: 17 |
| | - name: test |
| | num_bytes: 5966 |
| | num_examples: 89 |
| | download_size: 13105 |
| | dataset_size: 19009 |
| | - config_name: GeoQuestions1089_YN |
| | features: |
| | - name: question_id |
| | dtype: int64 |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | list: bool |
| | - name: answer_type |
| | list: string |
| | splits: |
| | - name: test |
| | num_bytes: 12412 |
| | num_examples: 181 |
| | download_size: 7718 |
| | dataset_size: 12412 |
| | - config_name: GeoQuestions1089_coord |
| | features: |
| | - name: question_id |
| | dtype: int64 |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | list: |
| | list: float64 |
| | - name: answer_type |
| | list: string |
| | splits: |
| | - name: test |
| | num_bytes: 7042 |
| | num_examples: 87 |
| | download_size: 6242 |
| | dataset_size: 7042 |
| | - config_name: GeoQuestions1089_place |
| | features: |
| | - name: question_id |
| | dtype: int64 |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | list: string |
| | - name: answer_type |
| | list: string |
| | splits: |
| | - name: test |
| | num_bytes: 4373368 |
| | num_examples: 455 |
| | download_size: 1896109 |
| | dataset_size: 4373368 |
| | - config_name: GeoQuestions1089_regression |
| | features: |
| | - name: question_id |
| | dtype: int64 |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | list: float64 |
| | - name: answer_type |
| | list: string |
| | splits: |
| | - name: test |
| | num_bytes: 20649 |
| | num_examples: 231 |
| | download_size: 10655 |
| | dataset_size: 20649 |
| | - config_name: GeoSQA |
| | features: |
| | - name: question_id |
| | dtype: int64 |
| | - name: scenario_id |
| | dtype: int64 |
| | - name: answer |
| | dtype: string |
| | - name: annotation |
| | dtype: string |
| | - name: scenario |
| | dtype: string |
| | - name: question |
| | dtype: string |
| | - name: A |
| | dtype: string |
| | - name: B |
| | dtype: string |
| | - name: C |
| | dtype: string |
| | - name: D |
| | dtype: string |
| | splits: |
| | - name: train |
| | num_bytes: 2350343 |
| | num_examples: 2644 |
| | - name: validation |
| | num_bytes: 566689 |
| | num_examples: 628 |
| | - name: test |
| | num_bytes: 762135 |
| | num_examples: 838 |
| | download_size: 1327080 |
| | dataset_size: 3679167 |
| | - config_name: GridRoute |
| | features: |
| | - name: matrix_size |
| | dtype: int64 |
| | - name: start |
| | list: int64 |
| | - name: end |
| | list: |
| | list: int64 |
| | - name: obstacles_coords |
| | list: |
| | list: int64 |
| | - name: path |
| | list: |
| | list: int64 |
| | splits: |
| | - name: test |
| | num_bytes: 439500 |
| | num_examples: 300 |
| | download_size: 16947 |
| | dataset_size: 439500 |
| | - config_name: MsMarco |
| | features: |
| | - name: question_id |
| | dtype: int64 |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | dtype: string |
| | - name: passages |
| | list: |
| | - name: is_selected |
| | dtype: int64 |
| | - name: passage_text |
| | dtype: string |
| | - name: url |
| | dtype: string |
| | splits: |
| | - name: test |
| | num_bytes: 10860618 |
| | num_examples: 2907 |
| | - name: train |
| | num_bytes: 90739271 |
| | num_examples: 23513 |
| | - name: validation |
| | num_bytes: 16126312 |
| | num_examples: 4149 |
| | download_size: 58502647 |
| | dataset_size: 117726201 |
| | - config_name: NY-POI |
| | features: |
| | - name: long-term_check-ins |
| | list: |
| | list: string |
| | - name: recent_check-ins |
| | list: |
| | list: string |
| | - name: candidates |
| | list: |
| | list: string |
| | - name: answer |
| | list: string |
| | splits: |
| | - name: test |
| | num_bytes: 9088765 |
| | num_examples: 1347 |
| | download_size: 3829714 |
| | dataset_size: 9088765 |
| | - config_name: PPNL_multi |
| | features: |
| | - name: matrix_size |
| | dtype: int64 |
| | - name: world_description |
| | dtype: string |
| | - name: world |
| | list: |
| | list: int64 |
| | - name: obstacles_coords |
| | list: |
| | list: int64 |
| | - name: start |
| | list: int64 |
| | - name: end |
| | list: |
| | list: int64 |
| | - name: n_goals |
| | dtype: int64 |
| | - name: path |
| | list: |
| | list: int64 |
| | - name: agent_as_a_point |
| | dtype: string |
| | - name: agent_has_direction |
| | dtype: string |
| | - name: distribution |
| | dtype: string |
| | splits: |
| | - name: test |
| | num_bytes: 80282702 |
| | num_examples: 55440 |
| | - name: train |
| | num_bytes: 76667038 |
| | num_examples: 53440 |
| | - name: validation |
| | num_bytes: 9587004 |
| | num_examples: 6680 |
| | download_size: 13201821 |
| | dataset_size: 166536744 |
| | - config_name: PPNL_single |
| | features: |
| | - name: matrix_size |
| | dtype: int64 |
| | - name: world_description |
| | dtype: string |
| | - name: world |
| | list: |
| | list: int64 |
| | - name: obstacles_coords |
| | list: |
| | list: int64 |
| | - name: start |
| | list: int64 |
| | - name: end |
| | list: |
| | list: int64 |
| | - name: n_goals |
| | dtype: int64 |
| | - name: path |
| | list: |
| | list: int64 |
| | - name: agent_as_a_point |
| | dtype: string |
| | - name: agent_has_direction |
| | dtype: string |
| | - name: distribution |
| | dtype: string |
| | splits: |
| | - name: test |
| | num_bytes: 15738553 |
| | num_examples: 19044 |
| | - name: train |
| | num_bytes: 12749254 |
| | num_examples: 16032 |
| | - name: validation |
| | num_bytes: 1594684 |
| | num_examples: 2004 |
| | download_size: 1341236 |
| | dataset_size: 30082491 |
| | - config_name: SpartUN |
| | features: |
| | - name: scenario_id |
| | dtype: string |
| | - name: question_id |
| | dtype: string |
| | - name: scenario |
| | dtype: string |
| | - name: question |
| | dtype: string |
| | - name: candidates_answers |
| | list: string |
| | - name: answer |
| | list: string |
| | - name: type |
| | dtype: string |
| | - name: k_hop |
| | dtype: int64 |
| | splits: |
| | - name: test |
| | num_bytes: 3597916 |
| | num_examples: 5551 |
| | - name: train |
| | num_bytes: 24431833 |
| | num_examples: 37095 |
| | - name: validation |
| | num_bytes: 3562581 |
| | num_examples: 5600 |
| | download_size: 3174385 |
| | dataset_size: 31592330 |
| | - config_name: SpatialEvalLLM |
| | features: |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | list: string |
| | - name: scenario |
| | dtype: string |
| | - name: struct_type |
| | dtype: string |
| | - name: size |
| | dtype: string |
| | - name: k_hop |
| | dtype: string |
| | - name: seed |
| | dtype: string |
| | - name: description_level |
| | dtype: string |
| | splits: |
| | - name: test |
| | num_bytes: 1100727 |
| | num_examples: 1400 |
| | download_size: 203297 |
| | dataset_size: 1100727 |
| | - config_name: StepGame |
| | features: |
| | - name: scenario |
| | dtype: string |
| | - name: question |
| | dtype: string |
| | - name: answer |
| | dtype: string |
| | - name: k_hop |
| | dtype: int64 |
| | - name: candidates_answers |
| | list: string |
| | splits: |
| | - name: train |
| | num_bytes: 15871676 |
| | num_examples: 50000 |
| | - name: validation |
| | num_bytes: 1584479 |
| | num_examples: 5000 |
| | - name: test |
| | num_bytes: 65697697 |
| | num_examples: 100000 |
| | download_size: 18189276 |
| | dataset_size: 83153852 |
| | - config_name: TourismQA |
| | features: |
| | - name: question |
| | dtype: string |
| | - name: city |
| | struct: |
| | - name: coord |
| | list: float64 |
| | - name: name |
| | dtype: string |
| | - name: tagged_locations |
| | list: string |
| | - name: tagged_locations_lat_long |
| | list: |
| | list: float64 |
| | - name: answers_names |
| | list: string |
| | - name: answers_adresses |
| | list: string |
| | - name: answers_sum_reviews |
| | list: string |
| | - name: answers_reviews |
| | list: |
| | list: string |
| | - name: answers_lat_longs |
| | list: |
| | list: float64 |
| | splits: |
| | - name: test |
| | num_bytes: 7590110 |
| | num_examples: 2153 |
| | - name: train |
| | num_bytes: 74777277 |
| | num_examples: 19762 |
| | - name: validation |
| | num_bytes: 7343485 |
| | num_examples: 2109 |
| | download_size: 45062284 |
| | dataset_size: 89710872 |
| | --- |
| | |
| | # Dataset Card for GeoBenchLLM |
| |
|
| | ## Table of Contents |
| |
|
| | ## Dataset Description |
| |
|
| | - **Homepage:** https://github.com/Rfr2003/GeoBenchLLM |
| | - **Repository:** https://github.com/Rfr2003/GeoBenchLLM |
| | - **Paper:** |
| | - **Point of Contact:** rodrigo.ferreira-rodrigues@utoulouse.fr |
| |
|
| | ### Dataset Summary |
| |
|
| | GeoBenchLLM aims to assess Large Language Models' (LLM) geographical abilities across a multitude of tasks. It is built from 12 datasets split across 8 differents tasks: |
| |
|
| | - Knowledge/**Coordinates Prediction** : [GeoQuestions1089](https://github.com/AI-team-UoA/GeoQuestions1089) |
| | - Knowledge/**Yes|No questions**: [GeoQuestions1089](https://github.com/AI-team-UoA/GeoQuestions1089) |
| | - Knowledge/**Regression questions**: [GeoQuestions1089](https://github.com/AI-team-UoA/GeoQuestions1089), [GeoQuery](https://www.cs.utexas.edu/~ml/nldata/geoquery.html) |
| | - Knowledge/**Place Prediction**: [GeoQuestions1089](https://github.com/AI-team-UoA/GeoQuestions1089), [GeoQuery](https://www.cs.utexas.edu/~ml/nldata/geoquery.html), [Ms Marco](https://microsoft.github.io/msmarco/) |
| | - Reasoning/**Scenario Complex QA**: [GeoSQA](http://ws.nju.edu.cn/gaokao/geosqa/1.0/), [GKMC](https://github.com/nju-websoft/Jeeves-GKMC) |
| | - Reasoning/**Spatial Reasoning**: [SpartUN](https://github.com/HLR/SpaRTUN), [StepGame](https://github.com/ShiZhengyan/StepGame), [SpatialEvalLLM](https://github.com/runopti/SpatialEvalLLM) |
| | - Application/**POI Recommendation**: [TourismQA](https://github.com/dair-iitd/TourismQA), [NY-QA](https://sites.google.com/site/yangdingqi/home/foursquare-dataset) |
| | - Application/**Path Finding**: [GridRoute](https://github.com/LinChance/GridRoute), [PPNL](https://github.com/MohamedAghzal/llms-as-path-planners) |
| |
|
| | These datasets have been preprocessed in order to be easily accessible. |
| |
|
| |
|
| | ```python |
| | import datasets |
| | |
| | dataset = datasets.load_dataset("rfr2003/GeoBenchLLM", "GeoSQA") |
| | ``` |
| |
|
| | ### Supported Tasks and Leaderboards |
| |
|
| | The dataset is used for Text Generation. |
| |
|
| | ### Languages |
| |
|
| | All datasets are in English (`en`). |
| |
|
| | ## Dataset Structure |
| |
|
| | As this dataset contains very heterogenous tasks, almost every dataset as a different data structure. |
| |
|
| | ### Data Instances |
| |
|
| | Please report to the dataset viewer to see what an instance for each dataset looks like. |
| |
|
| | ### Data Fields |
| |
|
| | We will give for each dataset the data fields. Note that fields highlighted by 🟦 are required to formulate the question and fields highlighted by 🟩 contain the answer to the question. Every other fields can either be used to perform some analytics or to formulate differents tasks on the same dataset. |
| |
|
| | - **GeoQuestions1089_coord**: |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟩 `answer`(`List[float]`) : the coordinates of the answer. The first element of the list correspond to the latitude and the second to the longitude. |
| | - **GeoQuestions1089_YN**: |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟩 `answer`(`List[bool]`) : a list containing the boolean corresponding to the answer. |
| | - **GeoQuestions1089_regression** and **GeoQuery_regression**: |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟩 `answer`(`List[float]`) : a list containing the numbers to be predicted. |
| | - **GeoQuestions1089_place** and **GeoQuery_place**: |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟩 `answer`(`List[str]`) : a list containing the names of the places to be predicted. |
| | - **Ms-Marco_place**: |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟩 `answer`(`str`) : the answer to the question formulated by a human. |
| | - `question_id`(`int64`) : the id of the question from the original dataset. |
| | - `passages`(`List[dict]`) : a list of dicts. Each dict correspond to a passage and gives the following information: |
| | - `is_selected`(`int64`) : 1 if the passage was selected to write the answer, 0 otherwise. |
| | - `passage_text`(`str`) : the text of the passage. |
| | - `url`(`str`) : the url from where the passage was retrieved. |
| | - **GeoSQA**: |
| | - 🟦 `annotation`(`str`) : the description of the image normally used to answer the question. |
| | - 🟦 `scenario`(`str`) : the scenario attached to the image providing context to the question. |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟦 `A`(`str`) : one of the possibles answers to the question. |
| | - 🟦 `B`(`str`) : one of the possibles answers to the question. |
| | - 🟦 `C`(`str`) : one of the possibles answers to the question. |
| | - 🟦 `D`(`str`) : one of the possibles answers to the question. |
| | - 🟩 `answer`(`str`) : the letter corresponding to the right choice. |
| | - `question_id`(`int64`) : the id of the question from the original dataset. |
| | - `scenario_id`(`int64`) : the id of the scenario from the original dataset. |
| | - **GKMC**: |
| | - 🟦 `scenario`(`str`) : the scenario providing context to the question. |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟦 `A`(`str`) : one of the possibles answers to the question. |
| | - 🟦 `B`(`str`) : one of the possibles answers to the question. |
| | - 🟦 `C`(`str`) : one of the possibles answers to the question. |
| | - 🟦 `D`(`str`) : one of the possibles answers to the question. |
| | - 🟩 `answer`(`str`) : the letter corresponding to the right choice. |
| | - `question_id`(`int64`) : the id of the question from the original dataset. |
| | - **SpatialEvalLLM**: |
| | - 🟦 `scenario`(`str`) : the scenario providing context to the question. |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟩 `answer`(`List[str]`) : a list containing the names of the right objects to predict. |
| | - `struct_type`(`str`) : the geometric structure of the map. |
| | - `size`(`str`) : the size of the structure in number of tiles composing it. |
| | - `k_hop`(`str`) : the minimum reasoning steps required to answer the question. |
| | - `seed`(`str`) : the seed used to generate the question. |
| | - `description_level`(`str`) : if **global** then the entierity of the map is described. If **local**, only a portion of the map is described. |
| | - **SpartUN**: |
| | - 🟦 `scenario`(`str`) : the scenario providing context to the question. |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟦 `candidates_answers`(`List[str]`) : the candidates answers from which the model has to retrieve. |
| | - 🟩 `answer`(`List[str]`) : a list containing the right answers from the candidate list. |
| | - `question_id`(`str`) : the id of the question from the original dataset. |
| | - `scenario_id`(`str`) : the id of the scenario from the original dataset. |
| | - `type`(`str`) : **YN** from boolean questions, **FR** for Find Relation questions. |
| | - `k_hop`(`int64`) : the minimum reasoning steps required to answer the question. |
| | - **StepGame**: |
| | - 🟦 `scenario`(`str`) : the scenario providing context to the question. |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟦 `candidates_answers`(`List[str]`) : the candidates answers from which the model has to retrieve. |
| | - 🟩 `answer`(`List[str]`) : a list containing the right answers from the candidate list. |
| | - `k_hop`(`int64`) : the minimum reasoning steps required to answer the question. |
| | - **TourismQA**: |
| | - 🟦 `question`(`str`) : the question to be answered. |
| | - 🟩 `answers_names`(`List[str]`) : a list containing the names of the POI to be recommended (answer expected). |
| | - `city`(`dict`) : a dict containing the following informations about the city where take place the question: |
| | - `coord`(`List[float]`) : the coordinates of the city. The first element of the list correspond to the latitude and the second to the longitude. |
| | - `name`(`str`) : the name of the city. |
| | - `tagged_locations`(`List[str]`) : the locations names retrieved from the question (not used for our description of the task). |
| | - `tagged_locations_lat_long`(`List[flaot]`) : the latitudes and longitudes of the locations retrieved from the question (not used for our description of the task). |
| | - `answers_adresses`(`List[str]`) : the postal adresses of each answer (not used for our description of the task). |
| | - `answers_reviews`(`List[List[str]]`) : for each POI, we have a list of reviews (not used for our description of the task). |
| | - `answers_sum_reviews`(`List[str]`) : a summarization of the reviews for each POI retrieved from ??? work (not used for our description of the task). |
| | - `answers_lat_longs`(`List[str]`) : the latitudes and longitudes of the answers (not used for our description of the task). |
| | - **NY-POI**: |
| | - 🟦 `long-term_check-ins`(`List[List[str]]`) : a list of long-term check-ins from the same user. Each check-in is list composed in the order : POI id, POI category and time of visit in UTC. |
| | - 🟦 `recent_check-ins`(`List[List[str]]`) : a list of recent check-ins from an user. Each check-in is list composed in the order : POI id, POI category and time of visit in UTC. |
| | - 🟦 `candidates`(`List[List[str]]`) : a list of POI candidates containing the answer. For each POI, we have its id, its distance from the last visited POI in the recent check-ins list and its category. |
| | - 🟩 `answer`(`List[str]`) : the id of the POI corresponding to the answer. |
| | - **GridRoute**: |
| | - 🟦 `matrix_size`(`int64`) : the size of the squared matrix. |
| | - 🟦 `start`(`List[int64]`) : the coordinates of the starting point. The first element of the list is the x coordinate and the second is the y one. |
| | - 🟦 `end`(`List[List[int64]]`) : a list of ending points that the has to reach. This dataset only has one end point per question. |
| | - 🟦 `obstacles_coords`(`List[List[int64]]`) : a list of coordinates corresponding to the obstacles that we have to avoid. For each point, the first element of the list is the x coordinate and the second is the y one. |
| | - 🟩 `path`(`List[List[int64]]`) : a list of coordinates corresponding to the optimal path. For each point, the first element of the list is the x coordinate and the second is the y one. |
| | - **PPNL_single**: |
| | - 🟦 `matrix_size`(`int64`) : the size of the squared matrix. |
| | - 🟦 `start`(`List[int64]`) : the coordinates of the starting point. The first element of the list is the x coordinate and the second is the y one. |
| | - 🟦 `end`(`List[List[int64]]`) : a list of ending points that the has to reach. This dataset only has one end point per question. |
| | - 🟦 `obstacles_coords`(`List[List[int64]]`) : a list of coordinates corresponding to the obstacles that we have to avoid. For each point, the first element of the list is the x coordinate and the second is the y one. |
| | - 🟩 `path`(`List[List[int64]]`) : a list of coordinates corresponding to the optimal path. For each point, the first element of the list is the x coordinate and the second is the y one. If there is no path possible, this field is an empty list. |
| | - `world_description`(`str`) : a description of the world in natural language. Can be used to directly prompt the model. |
| | - `n_goals`(`int64`) : the number of end points to reach. |
| | - `agent_as_a_point`(`str`) : the solution path described as if the model is a point. |
| | - `agent_has_direction`(`str`) : the solution path described as directions. |
| | - `distribution`(`str`) : **iid** if the example has the same properties (matrix size, initial location/goal placements and number of obstacles), **ood** otherwise. |
| | - **PPNL_multi**: |
| | - 🟦 `matrix_size`(`int64`) : the size of the squared matrix. |
| | - 🟦 `start`(`List[int64]`) : the coordinates of the starting point. The first element of the list is the x coordinate and the second is the y one. |
| | - 🟦 `end`(`List[List[int64]]`) : a list of ending points that the has to reach. Each question has at least 2 goals to reach. |
| | - 🟦 `obstacles_coords`(`List[List[int64]]`) : a list of coordinates corresponding to the obstacles that we have to avoid. For each point, the first element of the list is the x coordinate and the second is the y one. |
| | - 🟩 `path`(`List[List[int64]]`) : a list of coordinates corresponding to the optimal path. For each point, the first element of the list is the x coordinate and the second is the y one. If there is no path possible, this field is an empty list. |
| | - `world_description`(`str`) : a description of the world in natural language. Can be used to directly prompt the model. |
| | - `n_goals`(`int64`) : the number of end points to reach. |
| | - `agent_as_a_point`(`str`) : the solution path described as if the model is a point. |
| | - `agent_has_direction`(`str`) : the solution path described as directions. |
| | - `distribution`(`str`) : **iid** if the example has the same properties (matrix size, initial location/goal placements and number of obstacles), **ood** otherwise. |
| |
|
| |
|
| | ### Data Splits |
| |
|
| | | Cogn. Level | Tasks | Datasets | Train | Dev | Test | |
| | | --------------- | ---------------------- | ---------------------------------------- | --------------------- | ------------------- | ------------------------- | |
| | | **Knowledge** | Coordinates Prediction | GeoQuestions1089_coord | – | – | 87 | |
| | | | Yes/No questions | GeoQuestions1089_YN | – | – | 181 | |
| | | | Regression | GeoQuestions1089_regression<br>GeoQuery_regression | –<br>182 | –<br>17 | 231<br>89 | |
| | | | Place prediction | GeoQuestions1089_place<br>GeoQuery_place<br>MS-Marco_place | –<br>346<br>23 513 | –<br>33<br>4 149 | 455<br>184<br>2 907 | |
| | | **──────────** | **──────────** | **──────────** | **──────────** | **──────────** | **──────────** | |
| | | **Reasoning** | Scenario Complex QA | GeoSQA<br>GKMC | 2 644<br>– | 628<br>– | 838<br>1 600 | |
| | | | Spatial Reasoning | SpatialEvalLLM<br>SpartUN<br>StepGame | –<br>37 095<br>50 000 | –<br>5 600<br>5 000 | 1 400<br>5 551<br>100 000 | |
| | | **──────────** | **──────────** | **──────────** | **──────────** | **──────────** | **──────────** | |
| | | **Application** | POI Recommendation | TourismQA<br>NY-QA | 19 762<br>– | 2 109<br>– | 2 153<br>1 347 | |
| | | | Path Finding | GridRoute<br>PPNL_single<br>PPNL_multi | –<br>16 032<br>53 440 | –<br>2 004<br>6 680 | 300<br>19 044<br>55 440 | |
| | | **──────────** | **──────────** | **──────────** | **──────────** | **──────────** | **──────────** | |
| | | **Total** | – | – | **203 014** | **26 220** | **191 807** | |
| | |
| | |
| | |
| | ## Dataset Creation |
| | |
| | ### Curation Rationale |
| | |
| | [Needs More Information] |
| | |
| | ### Source Data |
| | |
| | #### Initial Data Collection and Normalization |
| | |
| | [Needs More Information] |
| | |
| | #### Who are the source language producers? |
| | |
| | [Needs More Information] |
| | |
| | ### Annotations |
| | |
| | #### Annotation process |
| | |
| | [Needs More Information] |
| | |
| | #### Who are the annotators? |
| | |
| | [Needs More Information] |
| | |
| | ### Personal and Sensitive Information |
| | |
| | [Needs More Information] |
| | |
| | ## Considerations for Using the Data |
| | |
| | ### Social Impact of Dataset |
| | |
| | [Needs More Information] |
| | |
| | ### Discussion of Biases |
| | |
| | [Needs More Information] |
| | |
| | ### Other Known Limitations |
| | |
| | [Needs More Information] |
| | |
| | ## Additional Information |
| | |
| | ### Dataset Curators |
| | |
| | [Needs More Information] |
| | |
| | ### Licensing Information |
| | |
| | [Needs More Information] |
| | |
| | ### Citation Information |
| | |
| | Thanks for all the authors of the all the datasets. If you use this Benchmark, please cite their work too. |
| | |
| | ```Tex |
| | @misc{huang2021retrieverreadermeetsscenariobasedmultiplechoice, |
| | title={When Retriever-Reader Meets Scenario-Based Multiple-Choice Questions}, |
| | author={Zixian Huang and Ao Wu and Yulin Shen and Gong Cheng and Yuzhong Qu}, |
| | year={2021}, |
| | eprint={2108.13875}, |
| | archivePrefix={arXiv}, |
| | primaryClass={cs.CL}, |
| | url={https://arxiv.org/abs/2108.13875}, |
| | } |
| | |
| | @inproceedings{finegan-dollak-etal-2018-improving, |
| | title = "Improving Text-to-{SQL} Evaluation Methodology", |
| | author = "Finegan-Dollak, Catherine and |
| | Kummerfeld, Jonathan K. and |
| | Zhang, Li and |
| | Ramanathan, Karthik and |
| | Sadasivam, Sesh and |
| | Zhang, Rui and |
| | Radev, Dragomir", |
| | editor = "Gurevych, Iryna and |
| | Miyao, Yusuke", |
| | booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", |
| | month = jul, |
| | year = "2018", |
| | address = "Melbourne, Australia", |
| | publisher = "Association for Computational Linguistics", |
| | url = "https://aclanthology.org/P18-1033/", |
| | doi = "10.18653/v1/P18-1033", |
| | pages = "351--360", |
| | } |
| | |
| | @inproceedings{data-geography-original |
| | dataset = {Geography, original}, |
| | author = {John M. Zelle and Raymond J. Mooney}, |
| | title = {Learning to Parse Database Queries Using Inductive Logic Programming}, |
| | booktitle = {Proceedings of the Thirteenth National Conference on Artificial Intelligence - Volume 2}, |
| | year = {1996}, |
| | pages = {1050--1055}, |
| | location = {Portland, Oregon}, |
| | url = {http://dl.acm.org/citation.cfm?id=1864519.1864543}, |
| | } |
| | |
| | @misc{huang2019geosqabenchmarkscenariobasedquestion, |
| | title={GeoSQA: A Benchmark for Scenario-based Question Answering in the Geography Domain at High School Level}, |
| | author={Zixian Huang and Yulin Shen and Xiao Li and Yuang Wei and Gong Cheng and Lin Zhou and Xinyu Dai and Yuzhong Qu}, |
| | year={2019}, |
| | eprint={1908.07855}, |
| | archivePrefix={arXiv}, |
| | primaryClass={cs.CL}, |
| | url={https://arxiv.org/abs/1908.07855}, |
| | } |
| | |
| | @misc{li2025gridroutebenchmarkllmbasedroute, |
| | title={GridRoute: A Benchmark for LLM-Based Route Planning with Cardinal Movement in Grid Environments}, |
| | author={Kechen Li and Yaotian Tao and Ximing Wen and Quanwei Sun and Zifei Gong and Chang Xu and Xizhe Zhang and Tianbo Ji}, |
| | year={2025}, |
| | eprint={2505.24306}, |
| | archivePrefix={arXiv}, |
| | primaryClass={cs.AI}, |
| | url={https://arxiv.org/abs/2505.24306}, |
| | } |
| | |
| | @article{DBLP:journals/corr/NguyenRSGTMD16, |
| | author = {Tri Nguyen and |
| | Mir Rosenberg and |
| | Xia Song and |
| | Jianfeng Gao and |
| | Saurabh Tiwary and |
| | Rangan Majumder and |
| | Li Deng}, |
| | title = {{MS} {MARCO:} {A} Human Generated MAchine Reading COmprehension Dataset}, |
| | journal = {CoRR}, |
| | volume = {abs/1611.09268}, |
| | year = {2016}, |
| | url = {http://arxiv.org/abs/1611.09268}, |
| | archivePrefix = {arXiv}, |
| | eprint = {1611.09268}, |
| | timestamp = {Mon, 13 Aug 2018 16:49:03 +0200}, |
| | biburl = {https://dblp.org/rec/journals/corr/NguyenRSGTMD16.bib}, |
| | bibsource = {dblp computer science bibliography, https://dblp.org} |
| | } |
| | |
| | @inbook{placequestions, |
| | author = {Hamzei, Ehsan and Li, Haonan and Vasardani, Maria and Baldwin, Timothy and Winter, Stephan and Tomko, Martin}, |
| | year = {2020}, |
| | month = {01}, |
| | pages = {3-19}, |
| | title = {Place Questions and Human-Generated Answers: A Data Analysis Approach}, |
| | isbn = {978-3-030-14745-7}, |
| | doi = {10.1007/978-3-030-14745-7_1} |
| | } |
| |
|
| | @inproceedings{aghzal2024can, |
| | title={Can Large Language Models be Good Path Planners? A Benchmark and Investigation on Spatial-temporal Reasoning}, |
| | author={Aghzal, Mohamed and Plaku, Erion and Yao, Ziyu}, |
| | booktitle={ICLR 2024 Workshop on Large Language Model (LLM) Agents}, |
| | year={2024} |
| | } |
| |
|
| | @inproceedings{mirzaee-kordjamshidi-2022-transfer, |
| | title = "Transfer Learning with Synthetic Corpora for Spatial Role Labeling and Reasoning", |
| | author = "Mirzaee, Roshanak and |
| | Kordjamshidi, Parisa", |
| | booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", |
| | month = dec, |
| | year = "2022", |
| | address = "Abu Dhabi, United Arab Emirates", |
| | publisher = "Association for Computational Linguistics", |
| | url = "https://aclanthology.org/2022.emnlp-main.413", |
| | pages = "6148--6165", |
| | abstract = "", |
| | } |
| | |
| | @article{yamada2023evaluating, |
| | title={Evaluating Spatial Understanding of Large Language Models}, |
| | author={Yamada, Yutaro and Bao, Yihan and Lampinen, Andrew K and Kasai, Jungo and Yildirim, Ilker}, |
| | journal={Transactions on Machine Learning Research}, |
| | year={2024} |
| | } |
| | |
| | @inproceedings{10.1145/3459637.3482320, |
| | author = {Contractor, Danish and Shah, Krunal and Partap, Aditi and Singla, Parag and Mausam, Mausam}, |
| | title = {Answering POI-recommendation Questions using Tourism Reviews}, |
| | year = {2021}, |
| | isbn = {9781450384469}, |
| | publisher = {Association for Computing Machinery}, |
| | address = {New York, NY, USA}, |
| | url = {https://doi.org/10.1145/3459637.3482320}, |
| | doi = {10.1145/3459637.3482320}, |
| | booktitle = {Proceedings of the 30th ACM International Conference on Information \& Knowledge Management}, |
| | pages = {281–291}, |
| | numpages = {11}, |
| | keywords = {large scale qa, poi-recommendation, question answering, real world task, tourism qa}, |
| | location = {Virtual Event, Queensland, Australia}, |
| | series = {CIKM '21} |
| | } |
| | |
| |
|
| | @misc{li2024locationawaremodularbiencoder, |
| | title={Location Aware Modular Biencoder for Tourism Question Answering}, |
| | author={Haonan Li and Martin Tomko and Timothy Baldwin}, |
| | year={2024}, |
| | eprint={2401.02187}, |
| | archivePrefix={arXiv}, |
| | primaryClass={cs.CL}, |
| | url={https://arxiv.org/abs/2401.02187}, |
| | } |
| | |
| | @inproceedings{10.1007/978-3-031-47243-5_15, |
| | title = {Benchmarking Geospatial Question Answering Engines Using the Dataset GeoQuestions1089}, |
| | author = {Sergios-Anestis Kefalidis, Dharmen Punjani, Eleni Tsalapati, |
| | Konstantinos Plas, Mariangela Pollali, Michail Mitsios, |
| | Myrto Tsokanaridou, Manolis Koubarakis and Pierre Maret}, |
| | booktitle = {The Semantic Web - {ISWC} 2023 - 22nd International Semantic Web Conference, |
| | Athens, Greece, November 6-10, 2023, Proceedings, Part {II}}, |
| | year = {2023} |
| | } |
| | |
| | @inproceedings{stepGame2022shi, |
| | title={StepGame: A New Benchmark for Robust Multi-Hop Spatial Reasoning in Texts}, |
| | author={Shi, Zhengxiang and Zhang, Qiang and Lipani, Aldo}, |
| | volume={36}, |
| | url={https://ojs.aaai.org/index.php/AAAI/article/view/21383}, |
| | DOI={10.1609/aaai.v36i10.21383}, |
| | booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, |
| | year={2022}, |
| | month={Jun.}, |
| | pages={11321-11329} |
| | } |
| | |
| | @inproceedings{Yang_2022, series={SIGIR ’22}, |
| | title={GETNext: Trajectory Flow Map Enhanced Transformer for Next POI Recommendation}, |
| | url={http://dx.doi.org/10.1145/3477495.3531983}, |
| | DOI={10.1145/3477495.3531983}, |
| | booktitle={Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval}, |
| | publisher={ACM}, |
| | author={Yang, Song and Liu, Jiamou and Zhao, Kaiqi}, |
| | year={2022}, |
| | month=jul, pages={1144–1153}, |
| | collection={SIGIR ’22} |
| | } |
| | |
| | @ARTICLE{6844862, |
| | author={Yang, Dingqi and Zhang, Daqing and Zheng, Vincent W. and Yu, Zhiyong}, |
| | journal={IEEE Transactions on Systems, Man, and Cybernetics: Systems}, |
| | title={Modeling User Activity Preference by Leveraging User Spatial Temporal Characteristics in LBSNs}, |
| | year={2015}, |
| | volume={45}, |
| | number={1}, |
| | pages={129-142}, |
| | keywords={Tensile stress;Data models;Context modeling;Correlation;Hidden Markov models;Location based social networks;spatial;temporal;tensor factorization;user activity preference;Location based social networks;spatial;temporal;tensor factorization;user activity preference}, |
| | doi={10.1109/TSMC.2014.2327053} |
| | } |
| |
|
| | @inproceedings{10.1145/3539618.3591770, |
| | author = {Yan, Xiaodong and Song, Tengwei and Jiao, Yifeng and He, Jianshan and Wang, Jiaotuan and Li, Ruopeng and Chu, Wei}, |
| | title = {Spatio-Temporal Hypergraph Learning for Next POI Recommendation}, |
| | year = {2023}, |
| | isbn = {9781450394086}, |
| | publisher = {Association for Computing Machinery}, |
| | address = {New York, NY, USA}, |
| | url = {https://doi.org/10.1145/3539618.3591770}, |
| | doi = {10.1145/3539618.3591770}, |
| | booktitle = {Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval}, |
| | pages = {403–412}, |
| | numpages = {10}, |
| | keywords = {graph transformer, hypergraph, next poi recommendation}, |
| | location = {Taipei, Taiwan}, |
| | series = {SIGIR '23} |
| | } |
| | |
| | @INPROCEEDINGS{10605522, |
| | author={Feng, Shanshan and Lyu, Haoming and Li, Fan and Sun, Zhu and Chen, Caishun}, |
| | booktitle={2024 IEEE Conference on Artificial Intelligence (CAI)}, |
| | title={Where to Move Next: Zero-shot Generalization of LLMs for Next POI Recommendation}, |
| | year={2024}, |
| | volume={}, |
| | number={}, |
| | pages={1530-1535}, |
| | keywords={Accuracy;Large language models;Computational modeling;Buildings;Chatbots;Cognition;Data models;LLMs;Next POI Recommendation;Zero-shot;Spatial-Temporal Data}, |
| | doi={10.1109/CAI59869.2024.00277} |
| | } |
| |
|
| | ``` |
| | |
| | ### Contributions |
| | |
| | TO DO |
| | |