code stringlengths 17 6.64M |
|---|
class EnvSpec(Protocol):
'Cpp EnvSpec class.'
_config_keys: List[str]
_default_config_values: Tuple
gen_config: Type
def __init__(self, config: Tuple):
'Protocol for constructor of EnvSpec.'
@property
def _state_spec(self) -> Tuple:
'Cpp private _state_spec.'
@proper... |
class ArraySpec(object):
'Spec of numpy array.'
def __init__(self, dtype: Type, shape: List[int], bounds: Tuple[(Any, Any)], element_wise_bounds: Tuple[(Any, Any)]):
'Constructor of ArraySpec.'
self.dtype = dtype
self.shape = shape
if element_wise_bounds[0]:
self.m... |
class EnvPool(Protocol):
'Cpp PyEnvpool class interface.'
_state_keys: List[str]
_action_keys: List[str]
spec: Any
def __init__(self, spec: EnvSpec):
'Constructor of EnvPool.'
def __len__(self) -> int:
'Return the number of environments.'
@property
def _spec(self) ->... |
def check_key_duplication(cls: Any, keytype: str, keys: List[str]) -> None:
"Check if there's any duplicated keys in ``keys``."
(ukeys, counts) = np.unique(keys, return_counts=True)
if (not np.all((counts == 1))):
dup_keys = ukeys[(counts > 1)]
raise SystemError(f'{cls} c++ code error. {ke... |
def _shape_with_layout(specs: Tuple[(Tuple[(List[int], Any)], ...)]) -> Tuple[(xla_client.Shape, ...)]:
return tuple(((xla_client.Shape.array_shape(dtype, shape, tuple(range((len(shape) - 1), (- 1), (- 1)))) if (len(shape) > 0) else xla_client.Shape.scalar_shape(dtype)) for (shape, dtype) in specs))
|
def _normalize_specs(specs: Tuple[(Tuple[(Any, List[int])], ...)]) -> Tuple[(Tuple[(List[int], Any)], ...)]:
return tuple(((shape, dtypes.canonicalize_dtype(dtype)) for (dtype, shape) in specs))
|
def _make_xla_function(obj: Any, handle: bytes, name: str, specs: Tuple[(Tuple[Any], Tuple[Any])], capsules: Tuple[(Any, Any)]) -> Callable:
(in_specs, out_specs) = specs
in_specs = _normalize_specs(in_specs)
out_specs = _normalize_specs(out_specs)
(cpu_capsule, gpu_capsule) = capsules
xla_client.... |
def make_xla(obj: Any) -> Any:
'Return callables that can be jitted in a namedtuple.\n\n Args:\n obj: The object that has a `_xla` function.\n All instances of envpool has a `_xla` function that returns\n the necessary information for creating jittable send/recv functions.\n\n Returns:\n XlaFunc... |
class EnvRegistry():
'A collection of available envs.'
def __init__(self) -> None:
'Constructor of EnvRegistry.'
self.specs: Dict[(str, Tuple[(str, str, Dict[(str, Any)])])] = {}
self.envpools: Dict[(str, Dict[(str, Tuple[(str, str)])])] = {}
def register(self, task_id: str, impo... |
def _vizdoom_game_list() -> List[str]:
return [game.replace('.cfg', '') for game in sorted(os.listdir(maps_path)) if (game.endswith('.cfg') and os.path.exists(os.path.join(maps_path, game.replace('.cfg', '.wad'))))]
|
class _VizdoomPretrainTest(absltest.TestCase):
def get_path(self, path: str) -> str:
return os.path.join('envpool', 'vizdoom', 'maps', path)
def eval_c51(self, task: str, resume_path: str, num_envs: int=10, seed: int=0, cfg_path: Optional[str]=None, reward_config: Optional[dict]=None) -> Tuple[(np.n... |
class TimeStep(dm_env.TimeStep):
def last(self) -> bool:
'Adapt the batch step_type for EnvironmentLoop episode-end checking.'
return any((self.step_type == dm_env.StepType.LAST))
|
class BatchSequenceAdder(reverb_sequence.SequenceAdder):
@classmethod
def signature(cls, environment_spec: specs.EnvironmentSpec, extras_spec: types.NestedSpec=..., sequence_length: Optional[int]=None, batch_size: Optional[int]=None):
def add_extra_dim(paths: Iterable[str], spec: tf.TensorSpec):
... |
class AdderWrapper(Adder):
def __init__(self, adder: Adder) -> None:
self._adder: Adder = adder
def reset(self):
self._adder.reset()
def add_first(self, timestep: TimeStep):
if (not any(timestep.first())):
raise ValueError('adder.add_first with an initial timestep (i... |
def _batched_feed_forward_with_extras_to_actor_core(policy: FeedForwardPolicyWithExtra) -> ActorCore[(SimpleActorCoreStateWithExtras, Mapping[(str, jnp.ndarray)])]:
'Modified adapter allowing batched data processing.'
def select_action(params: networks_lib.Params, observation: networks_lib.Observation, state... |
class PPOBuilder(ppo.PPOBuilder):
'Wrap the PPO algorithm builder for EnvPool.'
def __init__(self, config: ppo.PPOConfig, num_envs: int=(- 1)):
super().__init__(config)
self._num_envs = num_envs
self._batch_env = (num_envs > 0)
def make_replay_tables(self, environment_spec: specs... |
class BatchEnvWrapper(dm_env.Environment):
def __init__(self, environment: Union[(DummyVecEnv, EnvPool)]):
self._environment = environment
if (not isinstance(environment, DummyVecEnv)):
self._num_envs = len(environment.all_env_ids)
self._use_env_pool = True
else:
... |
def make_mujoco_environment(task: str, use_envpool: bool=False, use_vec_env=False, num_envs: int=2):
env_wrappers = []
if use_envpool:
env = envpool.make(task, env_type='gym', num_envs=num_envs)
env_wrappers.append(BatchEnvWrapper)
elif use_vec_env:
env = make_vec_env(task, n_envs=... |
def make_logger(label: str, config: dict, run_name: str='', wb_entity: str='', steps_key: str='steps', task_instance: int=0) -> loggers.Logger:
del task_instance, steps_key
num_envs = (config['num_envs'] if config['use_batch_env'] else 1)
print_fn = logging.info
terminal_logger = terminal.TerminalLogg... |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip('.py'), help='Name of this experiment.')
parser.add_argument('--env-name', type=str, default='HalfCheetah-v3', help='What environment to run.')
parser.add_argument... |
def build_experiment_config(FLAGS):
task = FLAGS.env_name
use_envpool = FLAGS.use_envpool
use_vec_env = ((not use_envpool) and FLAGS.use_vec_env)
use_batch_env = (use_envpool or use_vec_env)
num_envs = (FLAGS.num_envs if use_batch_env else (- 1))
num_steps = ((FLAGS.num_steps // FLAGS.num_envs... |
def main():
logging.info(f'Jax Devices: {jax.devices()}')
FLAGS = parse_args()
(experiment, config) = build_experiment_config(FLAGS)
if FLAGS.use_wb:
run_name = f'acme_ppo__{FLAGS.env_name}'
if FLAGS.use_envpool:
run_name += f'__envpool-{FLAGS.num_envs}'
elif FLAGS.... |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip('.py'), help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default='Pong-v5', help='the id of the gym environment')
parser.add_argument(... |
class RecordEpisodeStatistics(gym.Wrapper):
def __init__(self, env, deque_size=100):
super(RecordEpisodeStatistics, self).__init__(env)
self.num_envs = getattr(env, 'num_envs', 1)
self.episode_returns = None
self.episode_lengths = None
self.has_lives = False
env.re... |
def layer_init(layer, std=(2 ** 0.5), bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
return layer
|
class Agent(nn.Module):
def __init__(self, envs):
super(Agent, self).__init__()
self.network = nn.Sequential(layer_init(nn.Conv2d(4, 32, 8, stride=4)), nn.ReLU(), layer_init(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), layer_init(nn.Conv2d(64, 64, 3, stride=1)), nn.ReLU(), nn.Flatten(), layer_init... |
def gym_sync_step() -> None:
num_envs = 4
env = envpool.make_gym('Pong-v5', num_envs=num_envs)
action_num = env.action_space.n
if is_legacy_gym:
obs = env.reset()
else:
(obs, _) = env.reset()
assert (obs.shape == (num_envs, 4, 84, 84))
for _ in range(1000):
action =... |
def dm_sync_step() -> None:
num_envs = 4
env = envpool.make_dm('Pong-v5', num_envs=num_envs)
action_num = env.action_spec().num_values
ts = env.reset()
assert ts.observation.obs.shape, (num_envs, 4, 84, 84)
for _ in range(1000):
action = np.random.randint(action_num, size=num_envs)
... |
def async_step() -> None:
num_envs = 8
batch_size = 4
env = envpool.make_dm('Pong-v5', num_envs=num_envs, batch_size=batch_size)
action_num = env.action_spec().num_values
ts = env.reset()
for _ in range(1000):
env_id = ts.observation.env_id
assert (len(env_id) == batch_size)
... |
def make_env() -> None:
env_dm = envpool.make('Pong-v5', env_type='dm')
env_dm0 = envpool.make_dm('Pong-v5')
env_gym = envpool.make('Pong-v5', env_type='gym')
env_gym0 = envpool.make_gym('Pong-v5')
print(env_dm)
print(env_gym)
assert (str(env_dm) == str(env_dm0))
assert (str(env_gym) =... |
def make_spec() -> None:
spec = envpool.make_spec('Pong-v5', num_envs=4)
print(spec)
gym_obs_space = spec.observation_space
gym_act_space = spec.action_space
dm_obs_spec = spec.observation_spec()
dm_act_spec = spec.action_spec()
np.testing.assert_allclose(gym_obs_space.high, 255)
asser... |
def check_info_optim() -> None:
env = envpool.make_gym('Ant-v3')
info = env.step(np.array([env.action_space.sample()]), np.array([0]))[(- 1)]
assert (('qpos0' not in info) and ('qvel0' not in info))
|
@njit
def compute_gae(gamma: float, gae_lambda: float, value: np.ndarray, reward: np.ndarray, done: np.ndarray, env_id: np.ndarray, numenv: int) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
(T, B) = value.shape
mask = ((1.0 - done) * (gamma * gae_lambda))
index_tp1 = (np.zeros(numenv, np.int32) - 1)
... |
def test_episodic_returns():
value = np.zeros([8, 1])
done = np.array([1, 0, 0, 1, 0, 1, 0, 1.0]).reshape(8, 1).astype(bool)
rew = np.array([0, 1, 2, 3, 4, 5, 6, 7.0]).reshape(8, 1)
env_id = np.zeros([8, 1], int)
(returns, adv, mask) = compute_gae(gamma=0.1, gae_lambda=1, value=value, reward=rew, ... |
def test_time():
(T, B, N) = (128, 8, (8 * 4))
cnt = 10000
value = np.random.rand(T, B)
rew = np.random.rand(T, B)
done = np.random.randint(2, size=[T, B]).astype(bool)
env_id = np.random.randint(N, size=[T, B])
def wrapper():
return compute_gae(gamma=0.99, gae_lambda=0.95, value=... |
class CnnActorCritic(nn.Module):
def __init__(self, action_size: int):
super().__init__()
layers = [nn.Conv2d(4, 32, kernel_size=8, stride=4), nn.ReLU(inplace=True), nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1), nn.ReLU(inplace=True... |
class MlpActorCritic(nn.Module):
def __init__(self, state_size: int, action_size: int):
super().__init__()
layers = [nn.Linear(state_size, 64), nn.ReLU(inplace=True), nn.Linear(64, 64), nn.ReLU(inplace=True)]
self.net = nn.Sequential(*layers)
self.actor = nn.Linear(64, action_size... |
class DiscretePPO():
def __init__(self, actor_critic: nn.Module, optim: torch.optim.Optimizer, dist_fn: Type[torch.distributions.Distribution], lr_scheduler: torch.optim.lr_scheduler.LambdaLR, config: argparse.Namespace):
self.actor_critic = actor_critic
self.optim = optim
self.dist_fn = ... |
class MovAvg():
def __init__(self, size: int=100):
self.size = size
self.cache = []
def add_bulk(self, x: np.ndarray) -> float:
self.cache += x.tolist()
if (len(self.cache) > self.size):
self.cache = self.cache[(- self.size):]
return np.mean(self.cache)
|
class Actor():
def __init__(self, policy: DiscretePPO, train_envs: Any, test_envs: Any, writer: SummaryWriter, config: argparse.Namespace):
self.policy = policy
self.train_envs = train_envs
self.test_envs = test_envs
self.writer = writer
self.config = config
self.o... |
class VecAdapter(VecEnvWrapper):
'\n Convert EnvPool object to a Stable-Baselines3 (SB3) VecEnv.\n\n :param venv: The envpool object.\n '
def __init__(self, venv: EnvPool):
venv.num_envs = venv.spec.config.num_envs
super().__init__(venv=venv)
def step_async(self, actions: np.ndarray) ... |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='CartPole-v1')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--buffer-size', type=int, default=20000)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_a... |
def run_ppo(args):
env = gym.make(args.task)
if (args.task == 'CartPole-v0'):
env.spec.reward_threshold = 200
elif (args.task == 'CartPole-v1'):
env.spec.reward_threshold = 500
train_envs = envpool.make(args.task, num_envs=args.training_num, env_type='gym')
test_envs = envpool.make... |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='Pendulum-v1')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--buffer-size', type=int, default=20000)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_a... |
def run_ppo(args):
env = gym.make(args.task)
if (args.task == 'Pendulum-v1'):
env.spec.reward_threshold = (- 250)
args.state_shape = (env.observation_space.shape or env.observation_space.n)
args.action_shape = (env.action_space.shape or env.action_space.n)
args.max_action = env.action_spac... |
def policy(states: jnp.ndarray) -> jnp.ndarray:
return jnp.zeros(states.shape[0], dtype=jnp.int32)
|
def gym_sync_step() -> None:
num_envs = 4
env = envpool.make_gym('Pong-v5', num_envs=num_envs)
(handle, recv, send, step) = env.xla()
def actor_step(iter, loop_var):
(handle0, states) = loop_var
action = policy(states)
if is_legacy_gym:
(handle1, (new_states, rew, ... |
def dm_sync_step() -> None:
num_envs = 4
env = envpool.make_dm('Pong-v5', num_envs=num_envs)
(handle, recv, send, step) = env.xla()
def actor_step(iter, loop_var):
(handle0, states) = loop_var
action = policy(states.observation.obs)
(handle1, new_states) = step(handle0, action... |
def async_step() -> None:
num_envs = 8
batch_size = 4
env = envpool.make_dm('Pong-v5', num_envs=num_envs, batch_size=batch_size)
(handle, recv, send, step) = env.xla()
def actor_step(iter, loop_var):
(handle0, states) = loop_var
action = policy(states.observation.obs)
hand... |
class InstallPlatlib(install):
'Fix auditwheel error, https://github.com/google/or-tools/issues/616'
def finalize_options(self) -> None:
install.finalize_options(self)
if self.distribution.has_ext_modules():
self.install_lib = self.install_platlib
|
class BinaryDistribution(Distribution):
def is_pure(self) -> bool:
return False
def has_ext_modules(foo) -> bool:
return True
|
def build_wtq_zero_dataset(folder, template_files):
os.makedirs(folder, exist_ok=True)
table_processor = get_codex_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='gpt2')
eval_dataset = load_dataset('wikitablequestions', split='test')
prompt_files = template_files
for prompt_... |
def build_wikisql_zero_dataset(folder, template_files):
os.makedirs(folder, exist_ok=True)
table_processor = get_codex_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='gpt2')
def _convert_table_types(_table):
'Runs the type converter over the table cells.'
ret_table ... |
def build_tabfact_zero_dataset(folder, template_files):
os.makedirs(folder, exist_ok=True)
table_processor = get_codex_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='gpt2')
eval_dataset = load_dataset('tab_fact', 'tab_fact', split='test')
def table2dict(table_str):
tab... |
def build_sqa_zero_dataset(folder, template_files):
os.makedirs(folder, exist_ok=True)
table_processor = get_codex_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='gpt2')
eval_dataset = load_dataset('msr_sqa', split='test')
prompt_files = template_files
for prompt_file in pro... |
def normalize_output(output):
prefix_list = ['The answer is', 'the answer is', 'Answer: ', 'Extracted Answer: ', 'Answer :', ': ']
for prefix in prefix_list:
if output.startswith(prefix):
output = output[len(prefix):]
output = output.strip('.').strip()
output = output.replace('; ',... |
def evaluate_file(file_path):
(predictions, ground_truth, questions) = ([], [], [])
with open(file_path, 'r', encoding='utf8') as f:
json_lines = f.readlines()
for (idx, line) in enumerate(json_lines):
try:
json_obj = json.loads(line)
if isinstance(j... |
class TimeoutException(Exception):
pass
|
def timeout_handler(signum, frame):
raise TimeoutException('Timed out!')
|
def run_chatgpt_api(model_input):
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(30)
chatbot = Chatbot(api_key=OPENAI_API_KEY, temperature=0.0)
response = chatbot.ask(model_input)
return response
|
def run_chatgpt_prediction(test_file):
print('Running ChatGPT on test file: {}'.format(test_file))
output_file = test_file.replace('.json', '.json.chatgpt')
if os.path.exists(output_file):
passed_cases = open(output_file, 'r').readlines()
if (not passed_cases[(- 1)].endswith('\n')):
... |
def run_codex_api(model_input):
result = None
while (result is None):
try:
result = openai.Completion.create(engine='code-davinci-002', prompt=model_input, api_key=OPENAI_API_KEY, temperature=0.0, max_tokens=128, n=1, stop=['\n\n', '\n'])
except Exception as e:
print(e,... |
def run_codex_prediction(test_file):
print(f'Running codex on {test_file} ...')
output_file = test_file.replace('.json', '.json.codex')
print(f'Output file: {output_file} ...')
if os.path.exists(output_file):
passed_cases = open(output_file, 'r').readlines()
if (not passed_cases[(- 1)]... |
def build_tabfact_zero_dataset(dataset_name, folder):
prompt_templates = get_tabfact_prompt_templates()
os.makedirs(f'{folder}/{dataset_name}', exist_ok=True)
table_processor = get_default_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='google/flan-t5-xl')
def table2dict(table_... |
def build_sqa_zero_dataset(dataset_name, folder):
prompt_templates = get_sqa_prompt_templates()
os.makedirs(f'{folder}/{dataset_name}', exist_ok=True)
table_processor = get_default_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='google/flan-t5-xl')
for (idx, prompt_template) in ... |
def build_wtq_zero_dataset(dataset_name, folder):
prompt_templates = get_wtq_prompt_templates()
os.makedirs(f'{folder}/{dataset_name}', exist_ok=True)
table_processor = get_default_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='google/flan-t5-xl')
for (idx, prompt_template) in ... |
def build_wikisql_zero_dataset(dataset_name, folder):
prompt_templates = get_wikisql_prompt_templates()
os.makedirs(f'{folder}/{dataset_name}', exist_ok=True)
table_processor = get_default_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='google/flan-t5-xl')
def _convert_table_ty... |
def build_svamp_zero_dataset(dataset_name, folder):
prompt_templates = get_svamp_prompt_templates()
os.makedirs(f'{folder}/svamp', exist_ok=True)
url_link = 'https://raw.githubusercontent.com/arkilpatel/SVAMP/main/data/mawps-asdiv-a_svamp/dev.csv'
for (idx, prompt_template) in enumerate(prompt_templat... |
def build_bbh_fewshot_dataset(dataset_name, folder):
assert (dataset_name in ALL_BBH_TEMPLATES)
prompt_templates = ALL_BBH_TEMPLATES[dataset_name]
for (idx, prompt_template) in enumerate(prompt_templates):
print('Current prompt template: ', prompt_template)
template = Template(prompt_templ... |
def build_mmlu_fewshot_dataset(dataset_name, folder):
from eval_scripts.mmlu_constants import subcategories, categories
tokenizer = AutoTokenizer.from_pretrained('google/flan-t5-xl')
download_link = 'https://people.eecs.berkeley.edu/~hendrycks/data.tar'
os.makedirs(f'{folder}/{dataset_name}', exist_ok... |
def get_svamp_prompt_templates():
return ['Question: {{ question }}?\nAnswer:', '{{ question }}?', 'Answer the following question:\n\n{{ question }}', 'Answer this question:\n\n{{ question }}?', 'Please answer this question: {{ question }}', 'Answer the question...{{ question }}?', 'What is the answer to this que... |
def get_sqa_prompt_templates():
return ['[ {{ table }} ] Answer the following dialogue based on the above table: {{ question }}', '[ {{ table }} ] Answer this question based on the above table: {{ question }}', '<table> {{ table }} </table> Answer the following question based on the above table: {{ question }}', ... |
def get_tabfact_prompt_templates():
return ['Table:\n{{ table }}\nClaim: "{{ question }}" Is the claim entailed by the Table? Options: - yes - no. The answer is', 'Examine the provided table below: {{ table }} A claim has been made: "{{ question }}". Based on the information in the table, can we determine if this... |
def get_wikisql_prompt_templates():
return ['<table> {{ table }} </table> Answer this question based on the above table: {{ question }}', '[Table] {{ table }} [/Table] Answer this question based on the above table: {{ question }}', '[ {{ table }} ] Answer this question based on the above table: {{ question }}', '... |
def get_wtq_prompt_templates():
return ['[Table] {{ table }} [/Table] Answer this question based on the above table: {{ question }}', '[ {{ table }} ] Answer this question based on the above table: {{ question }}', 'Here is a table: {{ table }}. Answer the following question: {{ question }}', 'Answer the question... |
def evaluate_example(predict_str: str, ground_str: str):
ground_str = ground_str.lower()
predict_str = predict_str.lower()
predict_spans = predict_str.split(delimiter)
ground_spans = ground_str.split(delimiter)
predict_values = defaultdict((lambda : 0))
ground_values = defaultdict((lambda : 0)... |
def get_denotation_accuracy(predictions: List[str], references: List[str], **kwargs):
assert (len(predictions) == len(references))
correct_num = 0
for (predict_str, ground_str) in zip(predictions, references):
is_correct = evaluate_example(predict_str, ground_str)
if is_correct:
... |
def get_denotation_accuracy_binder(predictions: List[str], references: List[str], questions: List[str]):
assert (len(predictions) == len(references))
correct_num = 0
for (predict_str, ground_str, question) in zip(predictions, references, questions):
is_correct = evaluate_example_binder(predict_str... |
def get_exact_match(predictions: List[str], references: List[str], **kwargs):
'\n Exact match as the default evaluation\n '
assert (len(predictions) == len(references))
correct_num = 0
for (prediction, reference) in zip(predictions, references):
if (prediction.lower() == reference.lower(... |
def get_exact_match_option(predictions: List[str], references: List[str], **kwargs):
'\n Exact match as the default evaluation\n '
assert (len(predictions) == len(references))
correct_num = 0
for (prediction, reference) in zip(predictions, references):
if (prediction.lower().replace('(',... |
def check_denotation(target_values, predicted_values):
'Return True if the predicted denotation is correct.\n\n Args:\n target_values (list[Value])\n predicted_values (list[Value])\n Returns:\n bool\n '
if (len(target_values) != len(predicted_values)):
return False
fo... |
def normalize(x):
if (not isinstance(x, str)):
x = x.decode('utf8', errors='ignore')
x = ''.join((c for c in unicodedata.normalize('NFKD', x) if (unicodedata.category(c) != 'Mn')))
x = re.sub('[‘’´`]', "'", x)
x = re.sub('[“”]', '"', x)
x = re.sub('[‐‑‒–—−]', '-', x)
while True:
... |
class Value(object):
__metaclass__ = ABCMeta
_normalized = None
@abstractmethod
def match(self, other):
'Return True if the value matches the other value.\n Args:\n other (Value)\n Returns:\n a boolean\n '
pass
@property
def normaliz... |
class StringValue(Value):
def __init__(self, content):
assert isinstance(content, str)
self._normalized = normalize(content)
self._hash = hash(self._normalized)
def __eq__(self, other):
return (isinstance(other, StringValue) and (self.normalized == other.normalized))
def... |
class NumberValue(Value):
def __init__(self, amount, original_string=None):
assert isinstance(amount, (int, float))
if (abs((amount - round(amount))) < 1e-06):
self._amount = int(amount)
else:
self._amount = float(amount)
if (not original_string):
... |
class DateValue(Value):
def __init__(self, year, month, day, original_string=None):
'Create a new DateValue. Placeholders are marked as -1.'
assert isinstance(year, int)
assert (isinstance(month, int) and ((month == (- 1)) or (1 <= month <= 12)))
assert (isinstance(day, int) and (... |
def to_value(original_string, corenlp_value=None):
'Convert the string to Value object.\n Args:\n original_string (basestring): Original string\n corenlp_value (basestring): Optional value returned from CoreNLP\n Returns:\n Value\n '
if isinstance(original_string, Value):
... |
def to_value_list(original_strings, corenlp_values=None):
'Convert a list of strings to a list of Values\n Args:\n original_strings (list[basestring])\n corenlp_values (list[basestring or None])\n Returns:\n list[Value]\n '
assert isinstance(original_strings, (list, tuple, set))
... |
def str_normalize(user_input, recognition_types=None):
'A string normalizer which recognize and normalize value based on recognizers_suite'
user_input = str(user_input)
user_input = user_input.replace('\\n', '; ')
def replace_by_idx_pairs(orig_str, strs_to_replace, idx_pairs):
assert (len(str... |
def evaluate_example_official(predict_list: List, ground_truth: List):
predict_spans = [str(val).lower() for val in predict_list]
predict_spans = to_value_list(predict_spans)
ground_spans = to_value_list(ground_truth)
ret = check_denotation(target_values=ground_spans, predicted_values=predict_spans)
... |
def evaluate_example_binder(predict_list: str, ground_truth: str, allow_semantic=True, question: str=None):
pred = [str(p).lower().strip() for p in predict_list.split(delimiter)]
gold = [str(g).lower().strip() for g in ground_truth.split(delimiter)]
if (not allow_semantic):
pred = [str_normalize(s... |
def get_bleu_4(predictions: List[str], references: List[str], max_order=4, smooth=False, **kwargs):
bleu = evaluate.load('bleu')
predictions_group = []
references_group = []
cur_pred = None
cur_refers = []
for (prediction, reference) in zip(predictions, references):
if (cur_pred is Non... |
def main(model_name, batch_size, data_file, eval_func_name):
torch.cuda.empty_cache()
accelerator = Accelerator()
split_name = 'test'
dataset = load_dataset('json', data_files={split_name: data_file})
print('Evaluating {} on file {} \n\n'.format(model_name, data_file))
if accelerator.is_main_p... |
def main_wrapper():
checkpoint_path = 'sail/tapex-zero-large'
eval_file = 'https://huggingface.co/datasets/sail/symbolic-instruction-tuning/blob/main/test/wtq_tapex_large.json'
main(model_name=checkpoint_path, data_file=eval_file, batch_size=8, eval_func_name='get_denotation_accuracy')
|
def _split_thousands(delimiter, value):
split = value.split(delimiter)
return ((len(split) > 1) and any(map((lambda x: (len(x) == 3)), split)))
|
def convert_to_float(value):
'Converts value to a float using a series of increasingly complex heuristics.\n Args:\n value: object that needs to be converted. Allowed types include\n float/int/strings.\n Returns:\n A float interpretation of value.\n Raises:\n ValueError if the float... |
def _normalize_float(answer):
if (answer is None):
return None
try:
value = convert_to_float(answer)
if (isinstance(value, float) and math.isnan(value)):
return None
return value
except ValueError:
return answer.lower()
|
class _Aggregation(enum.Enum):
'Aggregations as defined by WikiSQL. Indexes match the data.'
NONE = 0
MAX = 1
MIN = 2
COUNT = 3
SUM = 4
AVERAGE = 5
|
class _Operator(enum.Enum):
'The boolean operators used by WikiSQL. Indexes match the data.'
EQUALS = 0
GREATER = 1
LESSER = 2
|
@dataclasses.dataclass
class _Condition():
'Represents an SQL where clauses (e.g A = "a" or B > 5).'
column: Text
operator: _Operator
cmp_value: Any
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.