code stringlengths 17 6.64M |
|---|
def read_json(file_path: str) -> Any:
with open(file_path, 'r') as f:
return json.load(f)
|
def read_jsonl(file_path: str) -> List[Any]:
all_data = []
with open(file_path, 'r') as f:
for line in f:
line = line.strip()
if line:
data = json.loads(line)
all_data.append(data)
return all_data
|
def read_file(file_path: str) -> Any:
suffix = file_path.split('.')[(- 1)]
if (suffix == 'json'):
return read_json(file_path)
elif (suffix == 'jsonl'):
return read_jsonl(file_path)
elif (suffix in ['txt', 'log', 'md']):
with open(file_path, 'r') as f:
return f.readlines()
elif (suffix in ['csv']):
return pd.read_csv(file_path)
else:
raise ValueError(f'Unknown file type: {suffix}')
|
def remove_file_extension(file_path: str) -> str:
'Remove the file extension suffix of a file path.'
return '.'.join(file_path.split('.')[:(- 1)])
|
def replace_file_extension(file_path: str, new_extension: str) -> str:
'Replace the file extension suffix of a file path.'
return (remove_file_extension(file_path) + f'.{new_extension}')
|
def convert_to_message(content: str, type: str) -> BaseMessage:
if (type == 'human'):
return HumanMessage(content=content)
elif (type == 'ai'):
return AIMessage(content=content)
elif (type == 'system'):
return SystemMessage(content=content)
else:
raise ValueError(f'Unknown message type: {type}')
|
def print_prompt(prompt: Union[(List[BaseMessage], str)], raw_text: bool=False):
if isinstance(prompt, str):
print(prompt)
else:
for msg in prompt:
if raw_text:
print(((f'# {msg.type} message'.upper() + '\n') + msg.content))
else:
print(make_colorful(msg.type, msg.content))
|
def replace_agent_action_with_list(x: Any) -> Any:
if isinstance(x, AgentAction):
return [x.tool, x.tool_input, x.log]
elif isinstance(x, dict):
return {k: replace_agent_action_with_list(v) for (k, v) in x.items()}
elif isinstance(x, list):
return [replace_agent_action_with_list(v) for v in x]
elif isinstance(x, tuple):
return tuple((replace_agent_action_with_list(v) for v in x))
elif isinstance(x, set):
return {replace_agent_action_with_list(v) for v in x}
else:
return x
|
def llm_register_args(parser, prefix=None, shortprefix=None, defaults={}):
model_name = defaults.get('model_name', 'gpt-4')
temperature = defaults.get('temperature', 0.0)
max_tokens = defaults.get('max_tokens', None)
default_retries = (8 if model_name.startswith('claude') else 12)
max_retries = defaults.get('max_retries', default_retries)
request_timeout = defaults.get('request_timeout', 300)
if (prefix is None):
prefix = ''
shortprefix = ''
else:
prefix += '-'
shortprefix = (shortprefix or prefix[0])
parser.add_argument(f'--{prefix}model-name', f'-{shortprefix}m', type=str, default=model_name, choices=AVAILABLE_MODEL_NAMES, help=f'Model name, one of: {AVAILABLE_MODEL_NAMES}')
parser.add_argument(f'--{prefix}temperature', f'-{shortprefix}t', type=float, default=temperature, help='Temperature for sampling')
parser.add_argument(f'--{prefix}max-tokens', f'-{shortprefix}mt', type=int, default=max_tokens, help='Max tokens for sampling')
parser.add_argument(f'--{prefix}max-retries', f'-{shortprefix}mr', type=int, default=max_retries, help='Max retries for each request')
parser.add_argument(f'--{prefix}request-timeout', f'-{shortprefix}rt', type=int, default=request_timeout, help='Timeout for each request')
|
def get_model_name(name, fixed_version=True, version=None):
model_name = None
if (name in MODEL_NAMES_MAPPING):
model_name = MODEL_NAMES_MAPPING[name]
elif (name in MODEL_NAMES):
model_name = name
if (model_name is None):
raise ValueError(f'Invalid model name: {name}')
if fixed_version:
if ((version is None) and (model_name in FIXED_VERSIONS)):
version = FIXED_VERSIONS[model_name][0]
if (version is not None):
model_name += f'-{version}'
if (model_name not in MODEL_NAMES):
raise ValueError(f'Invalid model name: {model_name}')
return model_name
|
def get_fixed_model_name(name):
return get_model_name(name, fixed_version=True)
|
class ChatOpenAI(LangchainChatOpenAI):
def _create_chat_result(self, response: Mapping[(str, Any)]) -> ChatResult:
generations = []
for res in response['choices']:
message = convert_dict_to_message(res['message'])
gen = ChatGeneration(message=message, generation_info=dict(finish_reason=res.get('finish_reason')))
generations.append(gen)
llm_output = {'token_usage': response['usage'], 'model_name': self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
|
def _anthropic_create_retry_decorator(llm: ChatOpenAI) -> Callable[([Any], Any)]:
import anthropic
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=((((((retry_if_exception_type(anthropic.APITimeoutError) | retry_if_exception_type(anthropic.APIError)) | retry_if_exception_type(anthropic.APIConnectionError)) | retry_if_exception_type(anthropic.RateLimitError)) | retry_if_exception_type(anthropic.APIConnectionError)) | retry_if_exception_type(anthropic.APIStatusError)) | retry_if_exception_type(anthropic.InternalServerError)), before_sleep=before_sleep_log(logger, logging.WARNING))
|
class ChatAnthropic(LangchainChatAnthropic):
max_retries: int = 6
max_tokens_to_sample: int = 4000
'Maximum number of retries to make when generating.'
def completion_with_retry(self, **kwargs: Any) -> Any:
'Use tenacity to retry the completion call.'
retry_decorator = _anthropic_create_retry_decorator(self)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.completions.create(**kwargs)
return _completion_with_retry(**kwargs)
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[(str, Any)] = {'prompt': prompt, **self._default_params, **kwargs}
if stop:
params['stop_sequences'] = stop
if self.streaming:
completion = ''
stream_resp = self.completion_with_retry(**params, stream=True)
for data in stream_resp:
delta = data.completion
completion += delta
if run_manager:
run_manager.on_llm_new_token(delta)
else:
response = self.completion_with_retry(**params)
return self._create_chat_result(response)
def _create_chat_result(self, response: Mapping[(str, Any)]) -> ChatResult:
finish_reason = response.stop_reason
if (finish_reason == 'max_tokens'):
finish_reason = 'length'
generations = [ChatGeneration(message=AIMessage(content=response.completion), generation_info=dict(finish_reason=finish_reason))]
llm_output = {'model_name': response.model}
return ChatResult(generations=generations, llm_output=llm_output)
|
def parse_llm_response(res: ChatResult, index: int=0, one_generation_only: bool=True) -> str:
res = res.generations[0]
if one_generation_only:
assert (len(res) == 1), res
res = res[index]
if (res.generation_info.get('finish_reason', None) == 'length'):
raise ValueError(f'Discard a response due to length: {res.text}')
return res.text.strip()
|
def load_openai_llm(model_name: str='text-davinci-003', **kwargs) -> BaseLanguageModel:
'Load the OpenAI language model and Claude and VLLM model'
if model_name.startswith('claude'):
kwargs_map = {'request_timeout': 'default_request_timeout', 'max_tokens': 'max_tokens_to_sample', 'logit_bias': None, 'n': None}
new_kwargs = {}
for (k, v) in kwargs.items():
if (k in kwargs_map):
if ((kwargs_map[k] is not None) and (v is not None)):
new_kwargs[kwargs_map[k]] = v
else:
print(f'Discard kwargs `{k}={v}` for {model_name}')
else:
new_kwargs[k] = v
return ChatAnthropic(model=model_name, **new_kwargs)
elif (model_name == 'text-davinci-003'):
return OpenAI(model_name=model_name, **kwargs)
elif ('vicuna' in model_name):
return VLLM(model=model_name, trust_remote_code=True, max_num_batched_tokens=16000, **kwargs)
elif (model_name in MODEL_NAMES):
return ChatOpenAI(model_name=model_name, **kwargs)
else:
raise ValueError(f'Unknown model name: {model_name}')
|
def load_openai_llm_with_args(args, prefix=None, fixed_version=True, **kwargs):
if (prefix is None):
prefix = ''
else:
prefix += '_'
model_name = getattr(args, f'{prefix}model_name')
temperature = getattr(args, f'{prefix}temperature')
max_tokens = getattr(args, f'{prefix}max_tokens')
max_retries = getattr(args, f'{prefix}max_retries')
request_timeout = getattr(args, f'{prefix}request_timeout')
return load_openai_llm(model_name=get_model_name(model_name, fixed_version=fixed_version), temperature=temperature, max_tokens=max_tokens, max_retries=max_retries, request_timeout=request_timeout, **kwargs)
|
def get_model_category(llm: BaseLanguageModel):
if isinstance(llm, ChatOpenAI):
return 'openai'
elif isinstance(llm, ChatAnthropic):
return 'claude'
elif isinstance(llm, VLLM):
return 'vicuna'
else:
raise ValueError(f'Unknown model type: {type(llm)}')
|
def get_num_tokens(prompt: str, encoding='cl100k_base') -> int:
encoding = tiktoken.get_encoding(encoding)
return len(encoding.encode(prompt))
|
def get_toolkit_names(case: Dict[(str, Any)]) -> List[str]:
if ('Toolkits' in case):
return case['Toolkits']
return case['toolkits']
|
def print_intermediate_result_and_stop(results, stop_at):
if (stop_at == 'preprocess'):
print(results)
else:
print_prompt(results[0])
exit(0)
|
def filter_keys(inputs: Dict[(str, Any)], keys: List[str]) -> Dict[(str, Any)]:
return {k: v for (k, v) in inputs.items() if (k in keys)}
|
def check_existence_by_name(new_name: str, existing_names: List[str]) -> bool:
'Check if the new name is already in the existing names'
for name in existing_names:
if ((new_name.lower() in name.lower()) or (name.lower() in new_name.lower())):
return True
return False
|
def append_new_items_without_duplicates(existing_items: List[Any], new_items: List[Any], name_func: Callable, serialize_func: Callable, thresh: float=0.6, num_threads: int=8, pbar: Optional[tqdm.tqdm]=None, verbose: bool=False) -> List[Any]:
appended_items = []
scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=False)
tokenize = scorer._tokenizer.tokenize
all_tokens = [tokenize(serialize_func(x)) for x in existing_items]
for item in new_items:
existing_names = [name_func(x) for x in existing_items]
existing_descs = [serialize_func(x) for x in existing_items]
(name, desc) = (name_func(item), serialize_func(item))
if check_existence_by_name(name, existing_names):
print(f'Discard {name} due to name duplication')
continue
new_tokens = tokenize(desc)
with Pool(num_threads) as p:
rouge_scores = p.map(partial(rouge_scorer._score_lcs, new_tokens), all_tokens)
rouge_scores = [score.fmeasure for score in rouge_scores]
most_similar_items = {existing_descs[i]: rouge_scores[i] for i in np.argsort(rouge_scores)[(- 5):][::(- 1)]}
if verbose:
print('New item:')
print(desc)
print('Most similar items:')
print(most_similar_items)
print('Average rouge score:', np.mean(rouge_scores))
if (max(rouge_scores) > thresh):
print(f'Discard {desc} due to high rouge score {max(rouge_scores)}')
print(f'most similar items: {most_similar_items}')
continue
existing_items.append(item)
appended_items.append(item)
all_tokens.append(new_tokens)
if (pbar is not None):
pbar.update(1)
return appended_items
|
def convert_to_score(s, binarize_thres=None):
v = float(s)
if (binarize_thres is not None):
v = float((v >= binarize_thres))
return v
|
class SimulatedObservation(NamedTuple):
'Simulated observation.'
observation: str
thought_summary: str
log: str
def __str__(self):
return self.observation
def __repr__(self):
return self.observation
|
def batchify(data: List[Any], batch_size: int):
'Split data into batches.'
return [data[i:(i + batch_size)] for i in range(0, len(data), batch_size)]
|
def thread_pool_executor(gen_func: Callable, batch_inputs: List[Any], unordered: bool=True, sequential_generation: bool=False, show_progress: bool=True, num_threads: int=10, request_timeout: int=60, enable_timer: bool=True) -> List[Any]:
'Run a generation function in parallel using a thread pool executor.\n Modified from https://github.com/openai/evals/blob/main/evals/eval.py#L148\n '
def worker_thread(inputs):
'\n Worker thread for evaluating a single sample.\n '
while True:
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
future = executor.submit(gen_func, inputs)
try:
result = future.result(timeout=request_timeout)
return result
except concurrent.futures.TimeoutError as e:
executor.shutdown(wait=False)
start = time.time()
with ThreadPool(num_threads) as pool:
if sequential_generation:
print(f'Running in sequential mode!')
iter = map(gen_func, batch_inputs)
else:
print(f'Running in threaded mode with {num_threads} threads!')
if unordered:
iter = pool.imap_unordered(worker_thread, batch_inputs)
else:
iter = pool.imap(worker_thread, batch_inputs)
results = list(tqdm.tqdm(iter, total=len(batch_inputs), disable=(not show_progress)))
if enable_timer:
print(f'Run {len(batch_inputs)} calls took {(time.time() - start):.2f}s')
return list(results)
|
def insert_indent(s: str, indent: str='\t', insert_first=True) -> str:
prefix = (indent if insert_first else '')
return ((prefix + s.rstrip('\n ').replace('\n', ('\n' + indent))) + '\n')
|
def create_str(data: Dict[(str, str)], include_desc: bool=True, indent: str='\t') -> str:
name = data['name']
prefix = (f'{indent}- ' if include_desc else '')
desc = (f": {data['description']}" if include_desc else '')
type = ''
if ('type' in data):
type = data['type']
if (not data.get('required', True)):
type += ', optional'
type = f' ({type})'
return f'{prefix}{name}{type}{desc}'
|
class ArgParameter(TypedDict):
'The input parameter for a tool'
name: str
type: str
description: str
required: bool
|
class ArgReturn(TypedDict):
'The return value for a tool'
name: str
type: str
description: str
|
class ArgException(TypedDict):
'The exception for a tool'
name: str
description: str
|
class DummyToolWithMessage(BaseTool):
'Tool that is run when invalid situation is encountered by agent.'
name = 'dummy_tool_with_message'
description = 'Called when tool is not actually run.'
def _get_message(self, msg: str) -> str:
return msg
def _run(self, msg: str) -> str:
return self._get_message(msg)
async def _arun(self, msg: str) -> str:
return self._get_message(msg)
|
class InvalidTool(DummyToolWithMessage):
'Tool that is run when invalid tool name is encountered by agent.'
name = 'invalid_tool'
description = 'Called when tool name is invalid.'
available_tools: List[str]
def _get_message(self, tool_name: str) -> str:
availabel_tool_string = ', '.join(self.available_tools)
err_msg = f'{tool_name} is not a valid tool, try another one. You should use available tools in [{availabel_tool_string}] only. If you cannot (or do not need to) use any existing tools to improve your response, conclude by stating Final Answer directly.'
return f'{{"error": "InvalidRequestException: {err_msg}"}}'
|
def special_fix_for_json_obj(s: str, pos: int) -> str:
'Fix the string to make it a valid JSON object.'
if (pos >= len(s)):
return None
if (s[pos] == '\\'):
broken = s[pos:(pos + 2)]
return s.replace(broken, broken[1])
elif (s[pos:(pos + 4)] == 'True'):
return s.replace('True', 'true')
elif (s[pos:(pos + 5)] == 'False'):
return s.replace('False', 'false')
return None
|
def get_first_json_object_str(s: str, enable_check=True, *args, **kwargs) -> str:
'Get the first JSON object from a string'
regex = '```(?:json)?\\s*(\\{.*?\\})\\s*```'
match = re.search(regex, s, flags=re.DOTALL)
if match:
s = match.group(1)
s = s.lstrip()
ret = s
while True:
try:
_ = json.loads(s, *args, **kwargs)
break
except json.JSONDecodeError as e:
fix_res = special_fix_for_json_obj(s, e.pos)
if (fix_res is not None):
ret = s = fix_res
else:
s = s[:e.pos].rstrip()
break
try:
_ = json.loads(s, *args, **kwargs)
ret = s
except Exception as e:
if enable_check:
raise e
return ret
|
def load_dict(tool_input: str) -> Dict[(str, Any)]:
'Load a dictionary from a string.'
try:
params = json.loads(tool_input, strict=False)
return params
except Exception as e:
msg = str(e)
if msg.startswith('Invalid \\escape'):
raise ValueError(f"Invalid syntax for ', you may need to use \' instead")
raise ValueError(f"Invalid format: {e}. Please format it as a JSON object with necessary fields matching the tool's argument specifications. Do not include backsticks to wrap the JSON object.")
|
def match_type(value: Any, expected_type: type) -> bool:
if isinstance(value, expected_type):
return True
return (isinstance(value, int) and (expected_type == float))
|
def check_type(name: str, value: Any, expected_type: type):
if (not match_type(value, expected_type)):
raise ValueError(f'Invalid type for {name}: {type(value).__name__}, expected {expected_type.__name__}.')
|
def validate_inputs(parameters: List[ArgParameter], inputs: Dict[(str, Any)]):
'Validate the inputs for a tool.'
remains = set(inputs.keys())
for param in parameters:
name = param['name']
if (name in remains):
remains.remove(name)
if (name in inputs):
expected_type = PRIMITIVE_TYPES[param['type']]
check_type(name, inputs[name], expected_type)
elif param.get('required', False):
raise ValueError(f'Missing required parameter: {name}.')
if remains:
raise ValueError(f'Unexpected parameters: {remains}.')
|
def validate_outputs(returns: List[ArgReturn], outputs: Dict[(str, Any)]):
'Validate the outputs for a tool.'
if (not isinstance(outputs, dict)):
raise ValueError(f'Invalid type for outputs: {type(outputs).__name__}, expected dict.')
if (list(outputs.keys()) == ['error']):
return
remains = set(outputs.keys())
for ret in returns:
name = ret['name']
if (name in remains):
remains.remove(name)
if (name not in outputs):
raise ValueError(f'Missing return: {name}.')
expected_type = PRIMITIVE_TYPES[ret['type']]
check_type(name, outputs[name], expected_type)
if remains:
raise ValueError(f'Unexpected returns: {list(remains)}.')
|
def run_with_input_validation(run_func: Callable, inputs: Dict[(str, str)], tool: BaseTool, raw_inputs: str, **kwargs):
'Run a tool with inputs, where the format of raw inputs is validated.'
try:
params = load_dict(raw_inputs)
validate_inputs(tool.parameters, params)
except Exception as e:
return DummyToolWithMessage().run(f'{{"error": "InvalidRequestException: {e}"}}', **kwargs)
return run_func(inputs, **kwargs)
|
def find_toolkit_spec(name: str, toolkits: List[Dict[(str, Any)]]) -> Dict[(str, Any)]:
'Find a toolkit specification by name.'
for toolkit in toolkits:
if (toolkit['toolkit'] == name):
return toolkit
raise ValueError(f'Toolkit not found: {name}.')
|
def format_toolkit_dict(toolkit: Dict[(str, Any)], namekey: str, add_risks: bool=True, indent: str='\t', use_simple_tool_desc: bool=False):
'Format a toolkit specified as a dictionary into a string.'
toolkit_name = toolkit[namekey]
desc = f'''<{toolkit_name}> toolkit with following tool APIs:
'''
for tool in toolkit['tools']:
if use_simple_tool_desc:
summary = tool['summary'].rstrip('\n ')
desc += f'''{indent}* {tool['name']}: {summary}
'''
else:
summary = tool['summary'].rstrip('\n ')
desc += f"{indent}* {tool['name']}"
params = []
for p in tool['parameters']:
params.append(f"{p['name']}")
desc += f"({', '.join(params)})"
returns = []
for r in tool['returns']:
returns.append(f"{r['name']}")
desc += f" -> {', '.join(returns)}"
desc += f''' : {summary}
'''
if (add_risks and ('risks' in toolkit)):
desc += 'Potential Risky Outcomes:\n'
risks = list(toolkit['risks'])
random.shuffle(risks)
for risk in risks:
desc += f'''{indent}* {risk}
'''
return desc
|
def get_tr_set(size=None, train_examples=None, batch_size=32, soft_labels=[], args=None):
train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, logger)
if size:
select_idx = np.random.choice(range(len(train_features)), size=size, replace=False)
train_features = list(np.array(train_features)[select_idx])
logger.info(' Num examples = %d', len(train_examples))
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)
if len(soft_labels):
all_label_ids = torch.tensor([soft_label for soft_label in soft_labels], dtype=torch.float64)
else:
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_valid_ids, all_lmask_ids)
if (args.local_rank == (- 1)):
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
if size:
return (train_dataloader, select_idx)
return train_dataloader
|
def get_eval_set(eval_on, eval_batch_size=8):
if (eval_on == 'dev'):
eval_examples = processor.get_dev_examples(args.data_dir)
elif (eval_on == 'test'):
eval_examples = processor.get_test_examples(args.data_dir)
else:
raise ValueError('eval on dev or test set only')
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, logger)
logger.info('***** Running evaluation *****')
logger.info(' Num examples = %d', len(eval_examples))
logger.info(' Batch size = %d', args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in eval_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_valid_ids, all_lmask_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=eval_batch_size)
return eval_dataloader
|
def evaluate(prefix=None, model=None, args=None):
eval_dataloader = get_eval_set(eval_on=args.eval_on, eval_batch_size=args.eval_batch_size)
model.to(device)
model.eval()
(eval_loss, eval_accuracy) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
y_true = []
y_pred = []
raw_logits = []
label_map = {i: label for (i, label) in enumerate(label_list, 1)}
for (input_ids, input_mask, segment_ids, label_ids, valid_ids, l_mask) in tqdm(eval_dataloader, desc='Evaluating'):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
valid_ids = valid_ids.to(device)
label_ids = label_ids.to(device)
l_mask = l_mask.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask, valid_ids=valid_ids, attention_mask_label=l_mask)
logits = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
input_mask = input_mask.to('cpu').numpy()
for (i, label) in enumerate(label_ids):
temp_1 = []
temp_2 = []
for (j, m) in enumerate(label):
if (j == 0):
continue
elif (label_ids[i][j] == len(label_map)):
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(label_map[label_ids[i][j]])
try:
temp_2.append(label_map[logits[i][j]])
except:
temp_2.append('UKN')
report = classification_report(y_true, y_pred, digits=4)
logger.info('\n%s', report)
return report
|
def save_result(prefix='Active', func_paras=None, report=None, table=None, output_dir=None):
result_path = os.path.join(output_dir, (prefix + '.txt'))
with open(result_path, 'a') as f:
if func_paras:
for para in func_paras:
if (type(func_paras[para]) == np.ndarray):
func_paras[para] = func_paras[para].shape
if (type(func_paras[para]) == list):
func_paras[para] = np.array(func_paras[para]).shape
f.write('\nParameters:\n')
for item in func_paras.items():
f.write((str(item) + '\n'))
if report:
f.write(report)
if table:
table = table.get_string()
f.write(table)
|
def multi_argmax(values: np.ndarray, n_instances: int=1) -> np.ndarray:
'\n Selects the indices of the n_instances highest values.\n\n Input:\n values: Contains the values to be selected from.\n n_instances: Specifies how many indices to return.\n Output:\n The indices of the n_instances largest values.\n '
assert (n_instances <= values.shape[0]), 'n_instances must be less or equal than the size of utility'
max_idx = np.argpartition((- values), (n_instances - 1), axis=0)[:n_instances]
return max_idx
|
def uncertainty_sampling(model_instance, pool, size):
'\n Uncertainty sampling policy.\n\n Input:\n model_instance: the model to do the uncertainty measure by give the labels prediction over unobserved data.\n pool: the unobserved data.\n size: the number of instances to be sampled in each round.\n Output:\n query_index: the n_instances index of sampled data.\n pool[query_index]: the corresponding data.\n '
active_eval_loader = get_tr_set(train_examples=pool, batch_size=1, args=args)
(raw_prediction, turncate_list) = active_eval(active_eval_loader, model_instance)
word_prob = np.max(raw_prediction, axis=2)
sentence_uncertainty = []
for (i, sentence) in enumerate(word_prob):
sentence_uncertainty.append(np.sum((1 - sentence[:turncate_list[i]])))
query_index = multi_argmax(np.array(sentence_uncertainty), size)
return (query_index, pool[query_index])
|
def nte_sampling(model_instance, pool, size):
active_eval_loader = get_tr_set(train_examples=pool, batch_size=1, args=args)
(raw_prediction, turncate_list) = active_eval(active_eval_loader, model_instance)
sentence_nte = cal_nte(raw_prediction, turncate_list)
query_index = multi_argmax(np.array(sentence_nte), size)
return (query_index, pool[query_index])
|
def cal_nte(logits, turncate):
sentence_nte = []
for (idx, sent) in enumerate(logits):
sent_sum = 0
for word in sent[:turncate[idx]]:
tag_sum = 0
for tag in word:
tag_sum += (tag * math.log(tag))
sent_sum += tag_sum
sentence_nte.append(((- sent_sum) / turncate[idx]))
return sentence_nte
|
def qbc_sampling(model_com, pool, n_instance):
com_pred = []
active_eval_loader = get_tr_set(train_examples=pool, batch_size=1, args=args)
for _model in model_com:
(raw_prediction, turncate_list) = active_eval(active_eval_loader, _model)
tag_prediction = result2tag(raw_prediction, turncate_list)
com_pred.append(tag_prediction)
vote_entropy = cal_vote_entropy(com_pred)
query_index = multi_argmax(vote_entropy, n_instance)
return (query_index, pool[query_index])
|
def cal_vote_entropy(mc_pred):
'\n Calculate the vote entropy\n\n Input:\n mc_pred: 3d-shape (num_mc_model * num_sentence * max_len * n_tags)\n Output:\n vote_entropy: 2d-shape (num_sentence * max_len)\n '
num_mc_model = len(mc_pred)
num_sentence = mc_pred[0].shape[0]
print('vote_matrix')
vote_matrix = np.zeros((num_sentence, args.max_seq_length, num_labels))
for (model_idx, pred) in enumerate(mc_pred):
for (s_idx, sentence) in enumerate(pred):
for (w_idx, word) in enumerate(sentence):
vote_matrix[s_idx][w_idx][word] += 1
print('vote_prob_matrix')
vote_prob_matrix = np.zeros((num_sentence, args.max_seq_length, num_labels))
for (s_idx, sentence) in enumerate(vote_matrix):
for (w_idx, word) in enumerate(sentence):
for tag_idx in range(num_labels):
prob_i = (np.sum((word == tag_idx)) / num_mc_model)
vote_prob_matrix[s_idx][w_idx][tag_idx] = prob_i
print('vote_entropy')
vote_entropy = np.zeros(num_sentence)
for (s_idx, sentence) in enumerate(vote_prob_matrix):
sentence_entropy = 0
for (w_idx, word) in enumerate(sentence):
word_entropy = 0
for tag_prob in word:
if tag_prob:
word_entropy -= (tag_prob * math.log(tag_prob))
sentence_entropy += word_entropy
vote_entropy[s_idx] = sentence_entropy
return vote_entropy
|
def result2tag(result, turncate):
'\n Convert the result with 3-d shape to the tags with 2-d shape. \n '
sentences = []
for (idx, sentence) in enumerate(result):
valid_len = turncate[idx]
words = []
for word in sentence[:valid_len]:
word = word.tolist()
tag = word.index(max(word))
words.append(tag)
sentences.append(words)
return np.array(sentences)
|
def random_sampling(model_instance, input_data, n_instances):
'\n Random sampling policy.\n\n Input:\n model_instance: model\n input_data: the unobserved data.\n n_instances: the number of instances to be sampled in each round.\n Output:\n query_index: the n_instances index of sampled data.\n input_Data[query_index]: the corresponding data.\n '
query_index = np.random.choice(range(len(input_data)), size=n_instances, replace=False)
return (query_index, input_data[query_index])
|
def active_train(data_loader=None, model=None, Epochs=5, soft_loader=None, args=None):
config = BertConfig.from_pretrained(args.bert_model, num_labels=num_labels, finetuning_task=args.task_name)
if (model == None):
model = Ner.from_pretrained(args.bert_model, from_tf=False, config=config)
return_model = Ner.from_pretrained(args.bert_model, from_tf=False, config=config)
model.to(device)
return_model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
num_train_optimization_steps = (int(((len(data_loader.dataset) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
warmup_steps = int((args.warmup_proportion * num_train_optimization_steps))
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=num_train_optimization_steps)
current_train_size = 0
if soft_loader:
current_train_size = (len(data_loader.dataset) + len(soft_loader.dataset))
else:
current_train_size = len(data_loader.dataset)
print('Training on {} data'.format(current_train_size))
model.train()
tr_loss = 2020
for epoch_idx in trange(int(Epochs), desc='Epoch'):
current_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
for (step, batch) in enumerate(tqdm(data_loader, desc='Iteration')):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids, valid_ids, l_mask) = batch
loss = model(input_ids, segment_ids, input_mask, label_ids, valid_ids, l_mask)
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
current_loss += loss
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (((step + 1) % args.gradient_accumulation_steps) == 0):
optimizer.step()
scheduler.step()
model.zero_grad()
if soft_loader:
for (input_ids, input_mask, segment_ids, soft_labels, valid_ids, l_mask) in tqdm(soft_loader, desc='Soft Training'):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
soft_labels = soft_labels.to(device)
l_mask = l_mask.to(device)
logits = model(input_ids, segment_ids, input_mask, valid_ids=valid_ids, attention_mask_label=l_mask)
logits = logits.detach().cpu().float()
soft_labels = soft_labels.detach().cpu().float()
pos_weight = torch.ones([num_labels])
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
loss = 0
for i in range(len(logits)):
turncate_len = np.count_nonzero(l_mask[i].detach().cpu().numpy())
logit = logits[i][:turncate_len]
soft_label = soft_labels[i][:turncate_len]
loss += criterion(logit, soft_label)
loss = Variable(loss, requires_grad=True)
current_loss += loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
if (current_loss <= tr_loss):
return_model.load_state_dict(model.state_dict())
tr_loss = current_loss
return return_model
|
def active_eval(active_data_loader=None, model=None):
model.to(device)
model.eval()
(eval_loss, eval_accuracy) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
y_true = []
y_pred = []
raw_logits = []
turncate_list = []
label_map = {i: label for (i, label) in enumerate(label_list, 1)}
for (input_ids, input_mask, segment_ids, label_ids, valid_ids, l_mask) in tqdm(active_data_loader, desc='Evaluating'):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
valid_ids = valid_ids.to(device)
label_ids = label_ids.to(device)
l_mask = l_mask.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask, valid_ids=valid_ids, attention_mask_label=l_mask)
logits = F.softmax(logits, dim=2)
assert (logits.shape[0] == 1)
logits = logits.detach().cpu().numpy().reshape((logits.shape[1], logits.shape[2]))
turncate_len = np.count_nonzero(l_mask.detach().cpu().numpy())
turncate_list.append(turncate_len)
raw_logits.append(logits)
return (raw_logits, turncate_list)
|
def active_learn(init_flag=None, train_data=None, num_initial=200, active_policy=None, num_query=5, num_sample=[100, 100, 100, 100, 100], dev_data=None, fit_only_new_data=False, Epochs=10, prefix='Active', args=None):
'\n Implement active learning initializaiton and learning loop\n '
func_paras = locals()
pool = copy.deepcopy(train_data)
train_data = copy.deepcopy(train_data)
original_datasize = len(train_data)
initial_idx = np.random.choice(range(len(train_data)), size=num_initial, replace=False)
train_data = np.array(train_data)[initial_idx]
(init_data_loader, query_idx) = get_tr_set(size=num_initial, train_examples=train_data, args=args)
pool = np.delete(pool, query_idx, axis=0)
print(np.array(pool).shape)
if init_flag:
init_dir = 'init_dir'
model = Ner.from_pretrained(init_dir)
else:
model = active_train(init_data_loader, None, Epochs, args=args)
report = evaluate('Intialization', model, args)
print_table = PrettyTable(['Model', 'Number of Query', 'Data Usage', 'Test_F1'])
print_table.add_row(['Active Model', 'Model Initialization', (len(train_data) / original_datasize), report.split()[(- 2)]])
print(print_table)
save_result(prefix=args.prefix, report=report, table=print_table, output_dir=args.output_dir)
print('Learning loop start')
for idx in range(num_query):
print(('\n\n-------Query no. %d--------\n' % (idx + 1)))
(query_idx, query_instance) = active_policy(model, pool, num_sample[idx])
if fit_only_new_data:
train_data = pool[query_idx]
else:
train_data = np.concatenate((train_data, pool[query_idx]))
pool = np.delete(pool, query_idx, axis=0)
active_data_loader = get_tr_set(train_examples=train_data, args=args)
model = active_train(active_data_loader, model, Epochs, args=args)
report = evaluate('Active Learning', model, args)
print_table.add_row(['Active Model', (idx + 1), (len(train_data) / original_datasize), report.split()[(- 2)]])
print(print_table)
save_result(prefix=args.prefix, func_paras=func_paras, report=report, table=print_table, output_dir=args.output_dir)
return model
|
class InputExample(object):
'A single training/test example for simple sequence classification.'
def __init__(self, guid, text_a, text_b=None, label=None):
'Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n '
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
|
class InputFeatures(object):
'A single set of features of data.'
def __init__(self, input_ids, input_mask, segment_ids, label_id, valid_ids=None, label_mask=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
|
def readfile(filename):
'read file'
f = open(filename)
data = []
sentence = []
label = []
for line in f:
if ((len(line) == 0) or line.startswith('-DOCSTART') or (line[0] == '\n')):
if (len(sentence) > 0):
data.append((sentence, label))
sentence = []
label = []
continue
splits = line.split(' ')
sentence.append(splits[0])
label.append(splits[(- 1)][:(- 1)])
if (len(sentence) > 0):
data.append((sentence, label))
sentence = []
label = []
return data
|
class DataProcessor(object):
'Base class for data converters for sequence classification data sets.'
def get_train_examples(self, data_dir):
'Gets a collection of `InputExample`s for the train set.'
raise NotImplementedError()
def get_dev_examples(self, data_dir):
'Gets a collection of `InputExample`s for the dev set.'
raise NotImplementedError()
def get_labels(self):
'Gets the list of labels for this data set.'
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
'Reads a tab separated value file.'
return readfile(input_file)
|
class NerProcessor(DataProcessor):
'Processor for the CoNLL-2003 data set.'
def get_train_examples(self, data_dir, size=None):
'See base class.'
train_file = self._read_tsv(os.path.join(data_dir, 'train.txt'))
return_example = self._create_examples(train_file, 'train')
if size:
select_idx = np.random.choice(range(len(return_example)), size=size, replace=False)
return_example = list(np.array(return_example)[select_idx])
return return_example
def get_dev_examples(self, data_dir):
'See base class.'
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'valid.txt')), 'dev')
def get_test_examples(self, data_dir):
'See base class.'
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.txt')), 'test')
def get_labels(self):
return ['O', 'B-MISC', 'I-MISC', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', '[CLS]', '[SEP]']
def _create_examples(self, lines, set_type):
examples = []
for (i, (sentence, label)) in enumerate(lines):
guid = ('%s-%s' % (set_type, i))
text_a = ' '.join(sentence)
text_b = None
label = label
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, logger):
'Loads a data file into a list of `InputBatch`s.'
label_map = {label: i for (i, label) in enumerate(label_list, 1)}
features = []
for (ex_index, example) in enumerate(examples):
textlist = example.text_a.split(' ')
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
for (i, word) in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if (m == 0):
labels.append(label_1)
valid.append(1)
label_mask.append(1)
else:
valid.append(0)
if (len(tokens) >= (max_seq_length - 1)):
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append('[CLS]')
segment_ids.append(0)
valid.insert(0, 1)
label_mask.insert(0, 1)
label_ids.append(label_map['[CLS]'])
for (i, token) in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if (len(labels) > i):
label_ids.append(label_map[labels[i]])
ntokens.append('[SEP]')
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map['[SEP]'])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = ([1] * len(input_ids))
label_mask = ([1] * len(label_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while (len(label_ids) < max_seq_length):
label_ids.append(0)
label_mask.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(label_ids) == max_seq_length)
assert (len(valid) == max_seq_length)
assert (len(label_mask) == max_seq_length)
if (ex_index < 1):
logger.info('*** Example ***')
logger.info(('guid: %s' % example.guid))
logger.info(('tokens: %s' % ' '.join([str(x) for x in tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
logger.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_ids, valid_ids=valid, label_mask=label_mask))
return features
|
class Ner(BertForTokenClassification):
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, valid_ids=None, attention_mask_label=None):
sequence_output = self.bert(input_ids, token_type_ids, attention_mask, head_mask=None)[0]
(batch_size, max_len, feat_dim) = sequence_output.shape
valid_output = torch.zeros(batch_size, max_len, feat_dim, dtype=torch.float32, device='cuda')
for i in range(batch_size):
jj = (- 1)
for j in range(max_len):
if (valid_ids[i][j].item() == 1):
jj += 1
valid_output[i][jj] = sequence_output[i][j]
sequence_output = self.dropout(valid_output)
logits = self.classifier(sequence_output)
if (labels is not None):
loss_fct = nn.CrossEntropyLoss(ignore_index=0)
if (attention_mask_label is not None):
active_loss = (attention_mask_label.view((- 1)) == 1)
active_logits = logits.view((- 1), self.num_labels)[active_loss]
active_labels = labels.view((- 1))[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1)))
return loss
else:
return logits
|
def get_word_embedding(sp_output_dir=None):
model = Ner.from_pretrained(sp_output_dir)
tokenizer = BertTokenizer.from_pretrained(sp_output_dir, do_lower_case=args.do_lower_case)
for (name, parameters) in model.named_parameters():
if (name == 'bert.embeddings.word_embeddings.weight'):
bert_embedding = parameters.detach().cpu().numpy()
wordidx2ebd = {idx: bert_embedding[idx] for idx in range(bert_embedding.shape[0])}
ebd2wordidx = {}
for (k, v) in wordidx2ebd.items():
ebd2wordidx[tuple(v)] = k
return (wordidx2ebd, ebd2wordidx)
|
def x_reconstruct(sequence):
'\n Experiment helper function. To reconstruct the sentence from a series of idx of word\n '
seq_list = []
seq_str = ' '
for item in sequence:
if (item == (n_words - 1)):
break
seq_list.append(idx2word[item])
return seq_str.join(seq_list)
|
def score_gpt(sentence):
tokenize_input = gpt_tokenizer.tokenize(sentence)
tensor_input = torch.tensor([gpt_tokenizer.convert_tokens_to_ids(tokenize_input)])
loss = gpt_model(tensor_input, lm_labels=tensor_input)
return math.exp(loss)
|
def high_quality(sequence, score_limit_upper=500, score_limit_low=0):
score = score_gpt(sequence)
return (((score >= score_limit_low) and (score < score_limit_upper)), score)
|
def extract_same_elem(list1, list2):
'\n Utility function to extract the same element in two list, will be called in function find_subseq_pair\n '
set1 = set(list(list1))
set2 = set(list(list2))
iset = set1.intersection(set2)
return list(iset)
|
def most_similar(value, top_n):
'\n Get the top_n most similar words given an embedding\n\n Input:\n value: a word embedding\n top_n: the number of entries(candidates) in the returned array\n Output:\n words_array: candidate array which contains the top_n most similar words, each entry is a word\n '
distance = np.sum(np.square((value - mydict_values)), axis=1)
idx = multi_argmax((- distance), top_n)
token_pool = []
for item in idx:
token_pool.append(tokenizer.convert_ids_to_tokens(int(item)))
return token_pool
|
def find_sub_seq(sequence, window_size, valid_tag_bar):
'\n Find the sub-sequence given a window_size and the limit of valid tags.\n\n Input:\n sequence: a single sequence with the shape (max_len,)\n window_size: the length of sub-sequence \n valid_tag_bar: the lower limit over which the sub-sequence will be consider valid\n Output:\n sub_sequence: the concret sub-sequence represented by the idx of tag\n subseq_start_index: the list of starting index\n '
sub_sequence = []
subseq_start_index = []
for index in range(0, (len(sequence) - window_size)):
valid_tag_count = 0
exclude_label = ['O', '[CLS]', '[SEP]', '[UKN]']
if (sequence[index] not in exclude_label):
for item in sequence[index:(index + window_size)]:
if (item not in exclude_label):
valid_tag_count += 1
if (valid_tag_count >= valid_tag_bar):
sub_sequence.append(tuple(sequence[index:(index + window_size)]))
subseq_start_index.append(index)
return (sub_sequence, subseq_start_index)
|
def soft_pair(candidate):
valid_tag_list = []
for (idx, item) in enumerate(candidate):
tags = item.label
exclude_label = ['O', '[CLS]', '[SEP]', '[UKN]']
valid_tag_count = 0
for tag in tags:
if (tag not in exclude_label):
valid_tag_count += 1
if (valid_tag_count >= valid_tag_bar):
valid_tag_list.append(idx)
valid_len_list = []
for item in candidate[valid_tag_list]:
valid_len_list.append(len(item.label))
equal_len_index_list = []
for length in range(args.max_seq_length):
equal_len_index = np.where((np.array(valid_len_list) == (length + 1)))
equal_len_index_list.append(np.array(valid_tag_list)[list(equal_len_index)])
return equal_len_index_list
|
def soft_pair_index_generator(equal_len_index_list, pair_num, valid_tag_bar):
pair_index_list = []
len_range = []
for i in range(len(equal_len_index_list)):
if (equal_len_index_list[i].shape[0] >= 2):
len_range.append((i + 1))
for i in range(pair_num):
temp_len = random.choice(len_range)
pair_index_list.append(random.sample(list(equal_len_index_list[(temp_len - 1)]), 2))
return pair_index_list
|
def lf_mixup(mixdata, sentence1_idx, start_idx1, sentence2_idx, start_idx2, hyper_lambda):
'\n Label-fixed Mixup.\n Note that here the start_idx1 and start_idx2 are only int rather than array.\n '
new_seq1 = list(mixdata[sentence1_idx].text_a.split())
new_seq2 = list(mixdata[sentence2_idx].text_a.split())
mix_seq = []
for i in range(mix_len):
e1 = wordidx2ebd[tokenizer.convert_tokens_to_ids(new_seq1[(start_idx1 + i)])]
e2 = wordidx2ebd[tokenizer.convert_tokens_to_ids(new_seq2[(start_idx2 + i)])]
e_mix = ((hyper_lambda * e1) + ((1 - hyper_lambda) * e2))
mix_token = most_similar(e_mix, 7)
exclude_pool = [new_seq1[(start_idx1 + i)], new_seq2[(start_idx2 + i)], '[UNK]', '[CLS]', '[SEP]', '[PAD]']
for token in mix_token:
if ((token not in exclude_pool) and (token.find('[unused') == (- 1)) and (token.find('##') == (- 1))):
mix_seq.append(str(token))
break
for i in range(mix_len):
try:
new_seq1[(start_idx1 + i)] = mix_seq[i]
new_seq2[(start_idx2 + i)] = mix_seq[i]
except:
print('\n---NEW SEQ 1 - LENGTH = {}, START IDX = {}---\n'.format(len(new_seq1), start_idx1))
print('\n---NEW SEQ 2 - LENGTH = {}, START IDX = {}---\n'.format(len(new_seq2), start_idx2))
continue
new_seq_1 = ' '.join(new_seq1)
new_seq_2 = ' '.join(new_seq2)
new_seq1_tag = mixdata[sentence1_idx].label
new_seq2_tag = mixdata[sentence2_idx].label
return (new_seq_1, new_seq_2, new_seq1_tag, new_seq2_tag)
|
def lf_augment(candidate_data, num_mixup, hyper_alpha, score_limit_upper=500, score_limit_low=0):
'\n Label_fixed augment.\n Given the candidate dataset and number of samples to be generated via mixup method, augment the training dataset by implementing mixup.\n '
global GUID_COUNT
time_start = time.time()
pair_count = 0
stop_flag = 0
new_sample_count = 0
new_candidate_data = list(copy.deepcopy(candidate_data))
for i in range((len(candidate_data) - 1)):
(sub_sequence_i, subseq_i_index) = find_sub_seq(candidate_data[i].label, window_size, valid_tag_bar)
for j in range((i + 1), len(candidate_data)):
(sub_sequence_j, subseq_j_index) = find_sub_seq(candidate_data[j].label, window_size, valid_tag_bar)
same_subseq = extract_same_elem(sub_sequence_i, sub_sequence_j)
if (same_subseq != []):
for ii in range(len(sub_sequence_i)):
for jj in range(len(sub_sequence_j)):
if (sub_sequence_i[ii] == sub_sequence_j[jj]):
hyper_lambda = np.random.beta(hyper_alpha, hyper_alpha)
(newseq1, newseq2, newseq1_tag, newseq2_tag) = lf_mixup(candidate_data, i, subseq_i_index[ii], j, subseq_j_index[jj], hyper_lambda)
if (score_limit_upper < 0):
high_quality_1 = True
high_quality_2 = True
else:
(high_quality_1, score_1) = high_quality(newseq1, score_limit_upper, score_limit_low)
(high_quality_2, score_2) = high_quality(newseq2, score_limit_upper, score_limit_low)
if high_quality_1:
GUID_COUNT += 1
new_candidate_data.append(InputExample(guid=GUID_COUNT, text_a=newseq1, text_b=None, label=newseq1_tag))
new_sample_count += 1
if high_quality_2:
GUID_COUNT += 1
new_candidate_data.append(InputExample(guid=GUID_COUNT, text_a=newseq2, text_b=None, label=newseq2_tag))
new_sample_count += 1
if (high_quality_1 or high_quality_2):
break
break
if (new_sample_count >= num_mixup):
stop_flag = 1
break
if stop_flag:
break
time_end = time.time()
print('{} extra samples are generated, the time cost is {} s'.format(new_sample_count, (time_end - time_start)))
return (new_candidate_data, new_sample_count)
|
def tag2onehot(tag):
label_map = {label: i for (i, label) in enumerate(label_list, 1)}
tagid = label_map[tag]
onehot_tag = np.zeros((len(label_map) + 1))
onehot_tag[tagid] = 1
return onehot_tag
|
def onehot2tag(tag):
label_map = {label: i for (i, label) in enumerate(label_list, 1)}
reverse_label_map = {i: label for (i, label) in enumerate(label_list, 1)}
return_tag = []
for word in tag:
if (word.any() != 0):
idx = np.where((word != 0))
if (len(idx[0]) > 1):
mixtag = ''
for (i, item) in enumerate(idx[0]):
tag = reverse_label_map[item]
mixtag += ((' ' + str(word[item])) + tag)
else:
mixtag = reverse_label_map[idx[0][0]]
return_tag.append(mixtag)
return return_tag
|
def slack_mixup(mixdata, sentence1_idx, sentence2_idx, start_idx1, start_idx2, hyper_lambda):
'\n This function implement sentence-level mixup, it will be called by the function augment().\n '
new_seq1 = list(mixdata[sentence1_idx].text_a.split())
new_seq2 = list(mixdata[sentence2_idx].text_a.split())
labels_1 = copy.deepcopy(mixdata[sentence1_idx].label)
labels_2 = copy.deepcopy(mixdata[sentence2_idx].label)
labels_1 = np.concatenate((['[CLS]'], labels_1, ['[SEP]']))
labels_2 = np.concatenate((['[CLS]'], labels_2, ['[SEP]']))
new_seq1_tag = []
new_seq2_tag = []
for (i, item) in enumerate(labels_1):
new_seq1_tag.append(tag2onehot(item))
for (i, item) in enumerate(labels_2):
new_seq2_tag.append(tag2onehot(item))
while (len(new_seq1_tag) < args.max_seq_length):
new_seq1_tag.append(np.zeros(12))
while (len(new_seq2_tag) < args.max_seq_length):
new_seq2_tag.append(np.zeros(12))
mix_seq = []
mix_seq_tag = []
for i in range(mix_len):
e1 = wordidx2ebd[tokenizer.convert_tokens_to_ids(new_seq1[(start_idx1[0] + i)])]
e2 = wordidx2ebd[tokenizer.convert_tokens_to_ids(new_seq2[(start_idx2[0] + i)])]
e_mix = ((hyper_lambda * e1) + ((1 - hyper_lambda) * e2))
mix_token = most_similar(e_mix, 7)
exclude_pool = [new_seq1[(start_idx1[0] + i)], new_seq2[(start_idx2[0] + i)], '[UNK]', '[CLS]', '[SEP]', '[PAD]']
for token in mix_token:
if (token not in exclude_pool):
mix_seq.append(token)
break
tag1 = new_seq1_tag[(start_idx1[0] + i)]
tag2 = new_seq2_tag[(start_idx2[0] + i)]
mix_tag = ((hyper_lambda * tag1) + ((1 - hyper_lambda) * tag2))
mix_seq_tag.append(mix_tag)
for i in range(mix_len):
new_seq1[(start_idx1[0] + i)] = mix_seq[i]
new_seq2[(start_idx2[0] + i)] = mix_seq[i]
new_seq1_tag[(start_idx1[0] + i)] = mix_seq_tag[i]
new_seq2_tag[(start_idx2[0] + i)] = mix_seq_tag[i]
new_seq1 = ' '.join(new_seq1)
new_seq2 = ' '.join(new_seq2)
return (new_seq1, new_seq2, new_seq1_tag, new_seq2_tag)
|
def slack_augment(candidate_data=None, num_mixup=None, hyper_alpha=8, score_limit_upper=500, score_limit_low=0):
'\n Given the candidate dataset and number of samples to be generated via mixup method, augment the training dataset by implementing mixup.\n Implement augmentation via slack-mixup\n '
global GUID_COUNT
time_start = time.time()
new_sample_count = 0
stop_flag = 0
mixup_data = []
mixup_label = []
for i in range((len(candidate_data) - 1)):
(sub_sequence_i, subseq_i_index) = find_sub_seq(candidate_data[i].label, window_size, valid_tag_bar)
if (len(sub_sequence_i) > 0):
for j in range((i + 1), len(candidate_data)):
(sub_sequence_j, subseq_j_index) = find_sub_seq(candidate_data[j].label, window_size, valid_tag_bar)
if (len(sub_sequence_j) > 0):
hyper_lambda = np.random.beta(hyper_alpha, hyper_alpha)
(newseq1, newseq2, newseq1_tag, newseq2_tag) = slack_mixup(candidate_data, i, j, subseq_i_index, subseq_j_index, hyper_lambda)
if (score_limit_upper < 0):
high_quality_1 = True
high_quality_2 = True
else:
(high_quality_1, score_1) = high_quality(newseq1, score_limit_upper, score_limit_low)
(high_quality_2, score_2) = high_quality(newseq2, score_limit_upper, score_limit_low)
if (high_quality_1 or high_quality_2):
GUID_COUNT += 1
mixup_data.append(InputExample(guid=GUID_COUNT, text_a=newseq1, text_b=None, label=candidate_data[i].label))
mixup_label.append(newseq1_tag)
new_sample_count += 1
if (new_sample_count >= num_mixup):
stop_flag = 1
break
if high_quality_2:
GUID_COUNT += 1
mixup_data.append(InputExample(guid=GUID_COUNT, text_a=newseq2, text_b=None, label=candidate_data[j].label))
mixup_label.append(newseq2_tag)
new_sample_count += 1
if (new_sample_count >= num_mixup):
stop_flag = 1
break
if stop_flag:
break
time_end = time.time()
print('{} extra samples are generated, the time cost is {} s'.format(new_sample_count, (time_end - time_start)))
return (mixup_data, mixup_label, new_sample_count)
|
def soft_mixup(candidate_1, candidate_2, hyper_lambda):
'\n This function implement sentence-level mixup, it will be called by the function soft_augment().\n '
seq1 = list(candidate_1.text_a.split())
seq2 = list(candidate_2.text_a.split())
y1 = copy.deepcopy(candidate_1.label)
y2 = copy.deepcopy(candidate_2.label)
new_seq1_tag = []
new_seq2_tag = []
for (i, item) in enumerate(y1):
new_seq1_tag.append(tag2onehot(item))
for (i, item) in enumerate(y2):
new_seq2_tag.append(tag2onehot(item))
while (len(new_seq1_tag) < args.max_seq_length):
new_seq1_tag.append(np.zeros(12))
while (len(new_seq2_tag) < args.max_seq_length):
new_seq2_tag.append(np.zeros(12))
new_seq = copy.deepcopy(seq1)
new_seq_tag = copy.deepcopy(new_seq1_tag)
assert (len(seq1) == len(seq2)), 'The two sequences should be in same valid length'
mix_len_sentence = len(seq1)
mix_seq = []
mix_seq_tag = []
for i in range(mix_len_sentence):
e1 = wordidx2ebd[tokenizer.convert_tokens_to_ids(seq1[i])]
e2 = wordidx2ebd[tokenizer.convert_tokens_to_ids(seq2[i])]
e_mix = ((hyper_lambda * e1) + ((1 - hyper_lambda) * e2))
mix_token = most_similar(e_mix, 7)
exclude_pool = [seq1[i], seq2[i], '[UNK]', '[CLS]', '[SEP]', '[PAD]']
for token in mix_token:
if (token not in exclude_pool):
mix_seq.append(token)
break
tag1 = new_seq1_tag[i]
tag2 = new_seq2_tag[i]
mix_tag = ((hyper_lambda * tag1) + ((1 - hyper_lambda) * tag2))
mix_seq_tag.append(mix_tag)
for i in range(mix_len_sentence):
new_seq[i] = mix_seq[i]
new_seq_tag[i] = mix_seq_tag[i]
new_seq = ' '.join(new_seq)
return (new_seq, new_seq_tag)
|
def soft_augment(candidate_data=None, num_mixup=None, hyper_alpha=8, score_limit_upper=500, score_limit_low=0):
global GUID_COUNT
print('Implementing soft mixup augmentation, which may take hundreds of seconds')
time_start = time.time()
new_sample_count = 0
mixup_data = []
mixup_label = []
candidate_data = copy.deepcopy(candidate_data)
equal_len_index_list = soft_pair(candidate_data)
pair_index_list = soft_pair_index_generator(equal_len_index_list, (15 * num_mixup), valid_tag_bar)
for index in pair_index_list:
hyper_lambda = np.random.beta(hyper_alpha, hyper_alpha)
i = index[0]
j = index[1]
(new_seq, new_seq_tag) = soft_mixup(candidate_data[i], candidate_data[j], hyper_lambda)
if (score_limit_upper < 0):
high_quality_flag = True
else:
(high_quality_flag, score) = high_quality(new_seq, score_limit_upper, score_limit_low)
if high_quality_flag:
GUID_COUNT += 1
mixup_data.append(InputExample(guid=GUID_COUNT, text_a=new_seq, text_b=None, label=candidate_data[i].label))
mixup_label.append(new_seq_tag)
new_sample_count += 1
case_util(score, candidate_data[i].text_a, candidate_data[j].text_a, candidate_data[i].label, candidate_data[j].label, new_seq, new_seq_tag, prefix='Soft_case')
if (new_sample_count >= num_mixup):
break
time_end = time.time()
print('{} extra samples are generated, the time cost is {} s'.format(new_sample_count, (time_end - time_start)))
return (mixup_data, mixup_label, new_sample_count)
|
def active_augment_learn(init_flag=None, train_data=None, num_initial=200, active_policy=uncertainty_sampling, augment_method=lf_augment, num_query=5, num_sample=[100, 100, 100, 100, 100], augment_rate=0.2, augment_decay=1, hyper_alpha=8, alpha_decay=1, Epochs=10, score_limit_low=0, score_limit_upper=500, fit_only_new_data=False, mixup_flag=True, single_use=False, prefix='SeqMix'):
'\n Implement active learning initializaiton and learning loop\n '
func_paras = locals()
pool = copy.deepcopy(train_data)
train_data = copy.deepcopy(train_data)
original_datasize = len(train_data)
initial_idx = np.random.choice(range(len(train_data)), size=num_initial, replace=False)
train_data = np.array(train_data)[initial_idx]
(init_data_loader, query_idx) = get_tr_set(size=num_initial, train_examples=train_data)
pool = np.delete(pool, query_idx, axis=0)
print(np.array(pool).shape)
if init_flag:
init_dir = 'init_dir'
model = Ner.from_pretrained(init_dir)
print('Initial model loaded from google drive')
else:
model = active_train(init_data_loader, None, Epochs)
report = evaluate('Intialization', model)
print_table = PrettyTable(['Model', 'Number of Query', 'Data Usage', 'Data Augmented', 'Test_F1'])
print_table.add_row(['Initial Model', 'Model Initialization', (len(train_data) / original_datasize), 0, report.split()[(- 2)]])
print(print_table)
test_f1 = []
dev_f1 = []
num_augment = int((num_initial * augment_rate))
if ((augment_method == slack_augment) or soft_augment):
(soft_data, soft_labels, new_sample_count) = augment_method(train_data, num_augment, hyper_alpha, score_limit_upper, score_limit_low)
soft_loader = get_tr_set(train_examples=soft_data, soft_labels=soft_labels)
else:
(mix_data, new_sample_count) = augment_method(train_data, num_augment, hyper_alpha, score_limit_upper, score_limit_low)
soft_loader = None
aug_data_loader = get_tr_set(train_examples=train_data)
model = active_train(data_loader=aug_data_loader, model=model, Epochs=Epochs, soft_loader=soft_loader)
report = evaluate('SeedSetAug', model)
aug_total_count = new_sample_count
print_table.add_row(['Augment Model', 'Seed Set Augmented', (len(train_data) / original_datasize), aug_total_count, report.split()[(- 2)]])
print(print_table)
save_result(prefix=prefix, func_paras=func_paras, report=report, table=print_table)
print('Learning loop start')
for idx in range(num_query):
num_augment = int(((num_sample[idx] * augment_rate) * (augment_decay ** idx)))
hyper_alpha = (hyper_alpha * (alpha_decay ** idx))
print(('Query no. %d' % (idx + 1)))
(query_idx, query_instance) = active_policy(model, pool, num_sample[idx])
mixup_candidate = pool[query_idx]
pool = np.delete(pool, query_idx, axis=0)
if ((augment_method == slack_augment) or soft_augment):
(new_soft_data, new_soft_labels, new_sample_count) = augment_method(mixup_candidate, num_augment, hyper_alpha, score_limit_upper, score_limit_low)
soft_data = np.concatenate((soft_data, new_soft_data))
soft_labels = np.concatenate((soft_labels, new_soft_labels))
soft_loader = get_tr_set(train_examples=soft_data, soft_labels=soft_labels)
mix_data = mixup_candidate
elif mixup_flag:
(mix_data, new_sample_count) = augment_method(mixup_candidate, num_augment, hyper_alpha, score_limit_upper, score_limit_low)
soft_loader = None
else:
(mix_data, new_sample_count) = duplicate_pair_data(mixup_candidate_X, mixup_candidate_y, num_augment)
train_data = np.concatenate((train_data, mix_data))
aug_total_count += new_sample_count
aug_data_loader = get_tr_set(train_examples=train_data)
model = active_train(data_loader=aug_data_loader, model=model, Epochs=Epochs, soft_loader=soft_loader)
if single_use:
train_data = train_data[:(- new_sample_count)]
aug_total_count = new_sample_count
report = evaluate('SeqMixAug', model)
data_usage = len(train_data)
if (augment_method == lf_mixup):
data_usage -= aug_total_count
print_table.add_row(['Augmented Model', (idx + 1), (data_usage / original_datasize), aug_total_count, report.split()[(- 2)]])
print(print_table)
save_result(prefix=prefix, func_paras=func_paras, report=report, table=print_table)
return model
|
class coco_voc():
def __init__(self, image_set, devkit_path=None, image_path=None):
self._name = (('coco' + '_') + image_set)
self._image_set = image_set
self._devkit_path = (self._get_default_path() if (devkit_path is None) else devkit_path)
self._data_path = os.path.join(self._devkit_path)
self._image_path = (os.path.join(self._data_path, 'images') if (image_path is None) else image_path)
self._image_ext = '.jpg'
(image_index_str, self._image_index) = self._load_image_set_index()
self._coco_caption_data = COCO(os.path.join(self._devkit_path, 'annotations', 'captions_trainval2014.json'))
assert os.path.exists(self._devkit_path), 'COCO path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), 'COCO does not exist: {}'.format(self._data_path)
def get_file_name(self, index):
'\n Returns the file name with the folder in the beginning.\n '
return os.path.join('{:02d}'.format(int(math.floor((int(index) / 10000.0)))), '{}'.format(index))
def image_path_at(self, i):
'\n Return the absolute path to image i in the image sequence.\n '
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
'\n Construct an image path from the image\'s "index" identifier.\n '
pre_fix = ('%02d' % int(math.floor((int(index) / 10000.0))))
image_path = os.path.join(self._image_path, (self.get_file_name(index) + self._image_ext))
return image_path
def _load_image_set_index(self):
"\n Load the indexes listed in this dataset's image set file.\n "
image_set_file = os.path.join(self._data_path, 'splits', (self._image_set + '.ids'))
assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
imlist = [int(x) for x in image_index]
return (image_index, imlist)
def _get_default_path(self):
'\n Return the default path where COCO is expected to be installed.\n '
return os.path.join('..', 'data')
@property
def name(self):
return self._name
@property
def coco_caption_data(self):
return self._coco_caption_data
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def class_to_ind(self):
return self._class_to_ind
@property
def image_index(self):
return self._image_index
@property
def num_images(self):
return len(self.image_index)
|
def get_vocab_top_k(vocab, k):
v = dict()
for key in vocab.keys():
v[key] = vocab[key][:k]
return v
|
def get_vocab_counts(image_ids, coco_caps, max_cap, vocab):
counts = np.zeros((len(image_ids), len(vocab['words'])), dtype=np.float)
for i in xrange(len(image_ids)):
ann_ids = coco_caps.getAnnIds(image_ids[i])
assert (len(ann_ids) >= max_cap), 'less than {:d} number of captions for image {:d}'.format(max_cap, image_ids[i])
ann_ids.sort()
ann_ids = ann_ids[:max_cap]
anns = coco_caps.loadAnns(ann_ids)
tmp = [word_tokenize(str(a['caption']).lower()) for a in anns]
for (j, tmp_j) in enumerate(tmp):
pos = [vocab['words'].index(tmp_j_k) for tmp_j_k in tmp_j if (tmp_j_k in vocab['words'])]
pos = list(set(pos))
counts[(i, pos)] = (counts[(i, pos)] + 1)
return counts
|
def get_vocab(imset, coco_caps, punctuations, mapping):
image_ids = coco_caps.getImgIds()
image_ids.sort()
t = []
for i in xrange(len(image_ids)):
annIds = coco_caps.getAnnIds(image_ids[i])
anns = coco_caps.loadAnns(annIds)
tmp = [pos_tag(word_tokenize(str(a['caption']).lower())) for a in anns]
t.append(tmp)
t = [t3 for t1 in t for t2 in t1 for t3 in t2]
t = [((l, 'other') if (mapping.get(r) is None) else (l, mapping[r])) for (l, r) in t]
vcb = Counter((elem for elem in t))
vcb = vcb.most_common()
word = [l for ((l, r), c) in vcb]
pos = [r for ((l, r), c) in vcb]
count = [c for ((l, r), c) in vcb]
poss = []
counts = []
words = sorted(set(word))
for j in xrange(len(words)):
indexes = [i for (i, x) in enumerate(word) if (x == words[j])]
pos_tmp = [pos[i] for i in indexes]
count_tmp = [count[i] for i in indexes]
ind = np.argmax(count_tmp)
poss.append(pos_tmp[ind])
counts.append(sum(count_tmp))
ind = np.argsort(counts)
ind = ind[::(- 1)]
words = [words[i] for i in ind]
poss = [poss[i] for i in ind]
counts = [counts[i] for i in ind]
non_punct = [i for (i, x) in enumerate(words) if (x not in punctuations)]
words = [words[i] for i in non_punct]
counts = [counts[i] for i in non_punct]
poss = [poss[i] for i in non_punct]
vocab = {'words': words, 'counts': counts, 'poss': poss}
return vocab
|
class State(Enum):
WAITING = 0
RUNNING = 1
FINISHED = 2
|
class Log(IntEnum):
error = 0
warning = 1
notice = 2
info = 3
debug1 = 4
debug2 = 5
debug3 = 6
debug4 = 7
debug5 = 8
|
class Common():
def __init__(self):
pass
'\n Global variables\n '
'top directory'
REPOSITORY_DIR = 'pgpi_repository'
CONF_FILE = 'hosts.conf'
STAT_FILE = 'stat.dat'
'tables directory'
TABLES_DIR = 'tables'
TABLES_FILE = 'log.csv'
TABLES_QUERY_DIR = 'query'
TABLES_PLAN_DIR = 'plan'
TABLES_PLAN_JSON_DIR = 'plan_json'
'grouping directory'
GROUPING_DIR = 'grouping'
'regression directory'
REGRESSION_DIR = 'regression'
'formatted regression parameter directory'
FORMATTED_REGRESSION_PARAMS_DIR = 'reg_params'
'pg_query_plan'
SCHEMA = 'query_plan'
LOG_TABLE = 'log'
REG_PARAMS_TABLE = 'reg'
'\n Various methods\n '
def hash_dir(self, num):
return str((num % 1000)).zfill(3)
def input_serverId(self):
_msg = 'Enter serverId:'
try:
_serverId = input(_msg)
except KeyboardInterrupt:
print('\nInput Interrupted.\n')
sys.exit(1)
return _serverId
def apply_func_in_each_node(self, func, Plans):
if isinstance(Plans, list):
for plan in Plans:
plan = func(plan)
if ('Plans' in plan):
self.apply_func_in_each_node(func, plan['Plans'])
return
else:
Plans = func(Plans)
if ('Plan' in Plans):
self.apply_func_in_each_node(func, Plans['Plan'])
if ('Plans' in Plans):
self.apply_func_in_each_node(func, Plans['Plans'])
return
def delete_unnecessary_objects(self, delete_objects_func, Plans):
self.apply_func_in_each_node(delete_objects_func, Plans)
def count_workers(self, Plans):
'Count the numbers of Planned Workers and Launched Workers.'
def count(plan):
if ('Workers Planned' in plan):
return ((plan['Workers Planned'] + 1), (plan['Workers Launched'] + 1))
else:
return None
if isinstance(Plans, list):
for plan in Plans:
_ret = count(plan)
if (_ret is not None):
return _ret
if ('Plans' in plan):
_ret = self.count_workers(plan['Plans'])
return _ret
else:
_ret = count(Plans)
if (_ret is not None):
return _ret
if ('Plan' in Plans):
_ret = self.count_workers(Plans['Plan'])
if ('Plans' in Plans):
_ret = self.count_workers(Plans['Plans'])
return _ret
def count_nodes(self, Plans):
'\n Count the number of nodes in Plans.\n '
def count_nodes(Plans):
if isinstance(Plans, list):
for plan in Plans:
count(plan)
if ('Plans' in plan):
count_nodes(plan['Plans'])
return
else:
count(Plans)
if ('Plan' in Plans):
count_nodes(Plans['Plan'])
if ('Plans' in Plans):
count_nodes(Plans['Plans'])
return
def count(plan):
if ('Node Type' in plan):
self.__num += 1
self.__num = 0
count_nodes(Plans)
return self.__num
def read_plan_json(self, planpath):
'Read the plan from planpath.'
_js = open(planpath, 'r')
_json_dict = json.load(_js)
_js.close()
return _json_dict
def write_plan_json(self, jdict, planpath):
'Write the plan (jdict) to planpath.'
_jdp = json.dumps(jdict, ensure_ascii=False, indent=4, separators=(',', ': '))
_fp = open(planpath, 'w')
_fp.write('{}'.format(_jdp))
_fp.close()
def isScan(self, plan):
'Check this plan is scan or not.'
for _i in ('Result', 'Seq Scan', 'Sample Scan', 'Index Scan', 'Index Only Scan', 'Bitmap Index Scan', 'Bitmap Heap Scan', 'Tid Scan', 'Function Scan', 'Table Function Scan', 'Values Scan', 'CTE Scan', 'Named Tuplestore Scan', 'WorkTable Scan'):
if (_i in plan['Node Type']):
return True
return False
def isOuter(self, plan):
'Check this plan is outer path or not.'
if ('Parent Relationship' in plan):
return (True if (plan['Parent Relationship'] == 'Outer') else False)
return False
def isInner(self, plan):
'Check this plan is inner path or not.'
if ('Parent Relationship' in plan):
return (True if (plan['Parent Relationship'] == 'Inner') else False)
return False
def count_removed_rows(self, plan):
'Count Removed Rows.'
_rr = 0
for _i in ('Rows Removed by Filter', 'Rows Removed by Index Recheck', 'Rows Removed by Join Filter', 'Rows Removed by Conflict Filter'):
if (_i in plan):
_rr += plan[_i]
return _rr
def get_inputs(self, plan):
'Get outer and inter actual rows.'
def get_removed_rows(plan, num):
_rr = []
for r in ('Rows Removed by Filter', 'Rows Removed by Index Recheck', 'Rows Removed by Join Filter', 'Rows Removed by Conflict Filter'):
if (r in plan):
_rr = plan[r]
if (len(_rr) == 0):
_rr = ([0] * num)
return _rr
def get_children_actual_rows(plan):
_X = [[], []]
for i in range(0, 2):
p = plan['Plans'][i]
k = (0 if (p['Parent Relationship'] == 'Outer') else 1)
_X[k] += p['Actual Rows']
return (_X[0], _X[1])
(_Xouter, _Xinner) = get_children_actual_rows(plan)
_RR = get_removed_rows(plan, len(_Xouter))
return (_Xouter, _Xinner, _RR)
|
class Database(Repository):
def __init__(self, base_dir='.', log_level=Log.info):
self.set_base_dir(base_dir)
self.LogLevel = log_level
'\n Public methods\n '
def get_connection_param(self, serverId, database='postgres'):
'Return a connection param using the values in the hosts.conf.'
if (self.check_serverId(serverId) == False):
if (Log.error <= self.LogLevel):
print("Error: serverId '{}' is not registered.".format(serverId))
sys.exit(1)
_conn = ''
_config = configparser.ConfigParser()
_config.read(self.get_conf_file_path())
for _section in _config.sections():
if (_section == serverId):
_conn = ((('host=' + str(_config[_section]['host'])) + ' port=') + str(_config[_section]['port']))
_conn += (((' dbname=' + database) + ' user=') + str(_config[_section]['username']))
"\n If 'input_password' is True, prompt to enter the password.\n "
if ('input_password' in _config[_section]):
if (str.lower(_config[_section]['input_password']) == 'true'):
_password = getpass.getpass()
_conn += (' password=' + _password)
return _conn
'\n If there is a password value, append the password to\n the connection param.\n '
if ('password' in _config[_section]):
if _config[_section]['password']:
_conn += (' password=' + str(_config[_section]['password']))
return _conn
def connect(self, serverId, database='postgres'):
'Connect to the database server specified by serverId.'
_conn = self.get_connection_param(serverId, database)
try:
_connection = psycopg2.connect(_conn)
except psycopg2.OperationalError as e:
print("Error: Could not connect to '{}'".format(serverId))
sys.exit(1)
_connection.autocommit = True
return _connection
|
class GetTables(MergePlan):
def __init__(self, base_dir='.', log_level=Log.info):
self.set_base_dir(base_dir)
self.ServerId = ''
self.LogLevel = log_level
def __set_serverId(self, serverId):
self.ServerId = serverId
def __exec_select_cmd(self, connection, sql):
_cur = connection.cursor()
try:
_cur.execute(sql)
except Exception as err:
_cur.close()
if (Log.error <= self.LogLevel):
print("SQL Error:'{}'".format(sql))
sys.exit(1)
if (_cur.rowcount == 0):
_cur.close()
if (Log.info <= self.LogLevel):
print("Info: the number of result is 0:'{}'".format(sql))
return None
return _cur
def __get_max_seqid(self, connection):
_max_seqid = (- 1)
_sql = ((('SELECT max(seqid) FROM ' + self.SCHEMA) + '.') + self.LOG_TABLE)
_cur = self.__exec_select_cmd(connection, _sql)
for _row in _cur:
_max_seqid = _row[0]
_cur.close()
return (_max_seqid if (isinstance(_max_seqid, int) == True) else 0)
def __get_log(self, connection, current_seqid, max_seqid):
'\n This function performs the main processing of public method get_tables().\n That is, it gets new log data from query_plan.log table, and stores\n the getting data into appropriate directories.\n '
def store_log(seqid, dirpath, data, postfix=''):
if (os.path.exists(dirpath) == False):
os.makedirs(dirpath)
_path = self.path(dirpath, (str(seqid) + postfix))
_qfp = open(_path, mode='w')
_qfp.write('{}'.format(data))
_qfp.close()
if (current_seqid >= max_seqid):
return 0
_sql = 'SELECT seqid, starttime, endtime, database, pid,'
_sql += ' nested_level, queryid, query, planid, plan, plan_json'
_sql += (((' FROM ' + self.SCHEMA) + '.') + self.LOG_TABLE)
_sql += ((' WHERE ' + str(current_seqid)) + ' < seqid AND seqid <= ')
_sql += (str(max_seqid) + ' ORDER BY seqid')
_cur = self.__exec_select_cmd(connection, _sql)
if (_cur is None):
return 0
_num_rows = _cur.rowcount
_logfp = open(self.get_log_csv_path(self.ServerId), mode='a')
for _row in _cur:
_seqid = _row[0]
_starttime = _row[1]
_endtime = _row[2]
_database = _row[3]
_pid = _row[4]
_nested_level = _row[5]
_queryid = int(_row[6])
_query = _row[7]
if (_row[8] is not None):
_planid = int(_row[8])
else:
continue
_plan = _row[9]
_plan_json = _row[10]
_logfp.write('{},{},{},{},{},{},{},{}\n'.format(_seqid, _starttime, _endtime, _database, _pid, _nested_level, _queryid, _planid))
'Store query.'
store_log(_seqid, self.get_query_dir_path(self.ServerId, _queryid), _query)
'Store plan.'
store_log(_seqid, self.get_plan_dir_path(self.ServerId, _queryid, _planid), _plan)
'Store plan_json.'
store_log(_seqid, self.get_plan_json_dir_path(self.ServerId, _queryid, _planid), _plan_json, '.tmp')
_logfp.close()
_cur.close()
return _num_rows
'\n Public method\n '
def get_tables(self, serverId):
'\n Get new log data from query_plan.log table, and store the data\n into appropriate directories.\n\n Parameters\n ----------\n serverId : str\n The serverId of the database server that is described in the hosts.conf.\n\n Returns\n -------\n _num_rows : int\n Return the number of rows of the getting result;\n return 0 if there is no new rows.\n '
if (self.check_serverId(serverId) == False):
if (Log.error <= self.LogLevel):
print("Error: serverId '{}' is not registered.".format(serverId))
sys.exit(1)
self.__set_serverId(serverId)
'Connect to DB server.'
db = Database(self.base_dir)
_conn = db.connect(serverId)
if (Log.info <= self.LogLevel):
print("Info: Connection established to '{}'.".format(self.ServerId))
'Get max seqid.'
_max_seqid = self.__get_max_seqid(_conn)
'Get current seqid.'
self.check_tables_dir(serverId)
_current_seqid = self.get_seqid_from_tables_stat(serverId)
'Get data from log table and write to tables directory.'
if (Log.info <= self.LogLevel):
print('Info: Getting query_plan.log table data.')
_num_rows = self.__get_log(_conn, _current_seqid, _max_seqid)
if (_num_rows > 0):
self.add_workers_rows(serverId, _current_seqid, _max_seqid)
'Update the stat file.'
self.update_tables_stat_file(serverId, _max_seqid)
elif (Log.info <= self.LogLevel):
print('Info: data is already updated.')
'Commit transaction and Close database connection.'
_conn.commit()
_conn.close()
if (Log.info <= self.LogLevel):
print('Info: Connection closed.')
del db
return _num_rows
|
class Grouping(Repository):
def __init__(self, base_dir='.', log_level=Log.info):
self.ServerId = ''
self.is_parallel = False
self.numWorkers = 1
self.numPlanWorkers = 1
self.set_base_dir(base_dir)
self.LogLevel = log_level
def __set_serverId(self, serverId):
self.ServerId = serverId
UNNECESSARY_OBJECTS = ('I/O Read Time', 'I/O Write Time', 'Planning Time', 'Execution Time', 'Actual Startup Time', 'Actual Total Time', 'Time', 'Actual duration Time', 'BufferUsage_Start', 'WalUsage_Start', 'BufferUsage', 'WalUsage', 'Triggers', 'JIT')
GROUPING_OBJECTS = ('Workers', 'NormalizePlanParam', 'NormalizeParam', 'Plan Rows', 'Actual Rows', 'Actual Loops', 'Rows Removed by Filter', 'Rows Removed by Index Recheck', 'Rows Removed by Join Filter', 'Rows Removed by Conflict Filter', 'Workers Planned', 'Workers Launched', 'Worker', 'Worker Number', 'Heap Fetches', 'Conflicting Tuples', 'Tuples Inserted', 'Group Count', 'Startup Cost', 'Total Cost', 'Plan Width', 'Shared Hit Blocks', 'Shared Read Blocks', 'Shared Dirtied Blocks', 'Shared Written Blocks', 'Local Hit Blocks', 'Local Read Blocks', 'Local Dirtied Blocks', 'Local Written Blocks', 'Temp Read Blocks', 'Temp Written Blocks', 'Sort Space Used', 'Sort Space Type', 'Peak Memory Usage', 'Original Hash Batches', 'Original Hash Buckets', 'Hash Batches', 'Hash Buckets', 'Sort Methods Used', 'Sort Space Memory', 'Average Sort Space Used', 'Peak Sort Space Used', 'Exact Heap Blocks', 'Lossy Heap Blocks', 'Function Call', 'Calls', 'Target Tables', 'Conflict Resolution', 'Conflict Arbiter Indexes', 'Sampling Method', 'Sampling Parameters', 'Repeatable Seed', 'Table Function Name', 'Presorted Key', 'Full-sort Groups', 'Pre-sorted Groups', 'Subplans Removed', 'Index Cond', 'Recheck Cond', 'TID Cond', 'Merge Cond', 'Hash Cond', 'Filter', 'Join Filter')
def __delete_objects(self, node):
'Delete unnecessary objects from node.'
for _i in self.UNNECESSARY_OBJECTS:
if (_i in node):
del node[_i]
return node
def __convert_to_list(self, Plans):
'Covert to the list type in order to append the object.'
def to_list(plan):
for _go in self.GROUPING_OBJECTS:
if (_go in plan):
plan.update({_go: [plan[_go]]})
if isinstance(Plans, list):
for _plan in Plans:
to_list(_plan)
if ('Plans' in _plan):
self.__convert_to_list(_plan['Plans'])
return
else:
to_list(Plans)
if ('Plan' in Plans):
self.__convert_to_list(Plans['Plan'])
if ('Plans' in Plans):
self.__convert_to_list(Plans['Plans'])
return
def __append_objects(self, target_Plans, Plans):
def append_value_to_list(targetplan, plan):
for _go in self.GROUPING_OBJECTS:
if (_go in targetplan):
targetplan[_go] += plan[_go]
if isinstance(target_Plans, list):
for _i in range(0, len(target_Plans)):
append_value_to_list(target_Plans[_i], Plans[_i])
if ('Plans' in target_Plans[_i]):
self.__append_objects(target_Plans[_i]['Plans'], Plans[_i]['Plans'])
return
else:
append_value_to_list(target_Plans, Plans)
if ('Plan' in target_Plans):
self.__append_objects(target_Plans['Plan'], Plans['Plan'])
if ('Plans' in target_Plans):
self.__append_objects(target_Plans['Plans'], Plans['Plans'])
return
def __combine_plan(self, planpath, logpath):
'Combine the plan (planpath) with the combined plan (logpath).'
_json_dict = self.read_plan_json(logpath)
self.delete_unnecessary_objects(self.__delete_objects, _json_dict)
self.__convert_to_list(_json_dict)
if os.path.exists(planpath):
_target_json_dict = self.read_plan_json(planpath)
self.__append_objects(_target_json_dict, _json_dict)
self.write_plan_json(_target_json_dict, planpath)
else:
self.write_plan_json(_json_dict, planpath)
'\n Public method\n '
def grouping(self, serverId):
'\n Combine the json plans with the same queryId+planId, which are stored\n in the Tables directory, into one json plan, and store it under the\n Grouping directory.\n '
if (self.check_serverId(serverId) == False):
if (Log.error <= self.LogLevel):
print("Error: serverId '{}' is not registered.".format(serverId))
sys.exit(1)
self.__set_serverId(serverId)
if (Log.info <= self.LogLevel):
print('Info: Grouping json formated plans.')
'Get max_seqid from tables stat.'
_max_seqid = self.get_seqid_from_tables_stat(self.ServerId)
'Check the grouping dir and get the current seqid.'
self.check_grouping_dir(self.ServerId)
_current_seqid = self.get_seqid_from_grouping_stat(self.ServerId)
if (_current_seqid >= _max_seqid):
return
'\n Read log.csv to get the queryid and planid between current_seqid\n and max_seqid.\n '
with open(self.get_log_csv_path(self.ServerId), newline='') as f:
_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
for _row in _reader:
_seqid = int(_row[0])
_queryid = int(_row[6])
_planid = int(_row[7])
if ((_current_seqid < _seqid) and (_seqid <= _max_seqid)):
'\n Get the path of the combined plan (_queryid and _planid),\n stored in the Grouping dir.\n '
_logpath = self.get_plan_json_path(self.ServerId, _seqid, _queryid, _planid)
if (os.path.isfile(_logpath) == False):
if (Log.debug1 <= self.LogLevel):
print('Debug1: seqid({}) is not found.)'.format(_seqid))
continue
'\n Get the path of the plan (_queryid and _planid) that is stored\n in the Tables dir.\n '
_plandirpath = self.get_grouping_plan_dir_path(self.ServerId, _planid)
_planpath = self.get_grouping_plan_path(self.ServerId, _queryid, _planid)
if (os.path.exists(_plandirpath) == False):
os.mkdir(_plandirpath)
'\n Combine the plan (_planpath) with the combined plan (_logpath).\n '
self.__combine_plan(_planpath, _logpath)
if (Log.debug3 <= self.LogLevel):
print('Debug3: planpath={}'.format(_planpath))
print('Debug3: logpath={}'.format(_logpath))
'Update grouping/stat.dat.'
self.update_grouping_stat_file(self.ServerId, _max_seqid)
|
class PrepareMergeRows():
def __init__(self, base_dir='.', log_level=Log.info):
self.set_base_dir(base_dir)
self.LogLevel = log_level
def __init_val(self):
self.is_parallel = False
self.numWorkers = 1
self.numPlanWorkers = 1
def __add_helper_objects(self, Plans, is_outer):
'\n Add three helper objects in each node to prepare\n to execute AddRows.__add_rows().\n '
def add_helper_object(plan, is_outer):
if ('Workers Planned' in plan):
self.is_parallel = True
if isinstance(plan['Workers Planned'], list):
self.numPlanWorkers += plan['Workers Planned'][0]
else:
self.numPlanWorkers += plan['Workers Planned']
if ('Workers Launched' in plan):
if isinstance(plan['Workers Planned'], list):
self.numWorkers += plan['Workers Launched'][0]
else:
self.numWorkers += plan['Workers Launched']
if ('Actual Rows' in plan):
'\n To adjust the effect of parallel workers, the "NormalizeParam"\n and "NormalizePlanParam" objects are added in each node.\n Also, one object "MergeFlag" is added to set whether the node\n needs to adjust the effect of parallel workers or not.\n\n If `plan` is working in parallel and if the node and all the nodes\n connected to this node are outer, the values "Plan Rows" should be\n adjusted by both numbers of "plan workers" and "launched workers".\n\n An example is shown below.\n\n Example:\n Finalize Aggregate <-- No Need to adjust\n -> Gather <-- No Need to adjust\n -> Partial Aggregate <-- Need to adjust\n -> Nested Loop <-- Need to adjust\n -> Nested Loop <-- Need to adjust\n -> Parallel Seq Scan <-- Need to adjust\n -> Index Only Scan <-- No Need\n -> Seq Scan <-- No Need\n\n The numbers of the plan workers and launched workers are stored in\n the "NormalizeParam" and "NormalyzePlanParm" objects in each node,\n respectively, and these are used when calculating the regression\n parameters, merging plans, and so on.\n '
if (self.is_parallel and is_outer):
if (plan['Node Type'] == 'Aggregate'):
if (Log.debug3 <= self.LogLevel):
print('Debug3: NodeType={} MergeFlag = False'.format(plan['Node Type']))
plan.update({'MergeFlag': 'False'})
plan.update({'NormalizeParam': 1})
plan.update({'NormalizePlanParam': 1})
else:
if (Log.debug3 <= self.LogLevel):
print('Debug3: NodeType={} MergeFlag = True'.format(plan['Node Type']))
plan.update({'MergeFlag': 'True'})
plan.update({'NormalizeParam': self.numWorkers})
plan.update({'NormalizePlanParam': self.numPlanWorkers})
else:
if (Log.debug3 <= self.LogLevel):
print('Debug3: NodeType={} MergeFlag = False'.format(plan['Node Type']))
plan.update({'MergeFlag': 'False'})
plan.update({'NormalizeParam': 1})
plan.update({'NormalizePlanParam': 1})
if isinstance(Plans, list):
for _i in range(0, len(Plans)):
plan = Plans[_i]
'\n If this node is working in parallel but is not outer\n (i: 0 = outer, 1 = inner), set False to is_parallel.\n '
if (self.is_parallel and (_i != 0)):
is_outer = False
add_helper_object(plan, is_outer)
if ('Plans' in plan):
self.__add_helper_objects(plan['Plans'], is_outer)
return
else:
add_helper_object(Plans, is_outer)
if ('Plan' in Plans):
self.__add_helper_objects(Plans['Plan'], is_outer)
if ('Plans' in Plans):
self.__add_helper_objects(Plans['Plans'], is_outer)
return
'\n Public methods\n '
def prepare_merge_rows(self, Plans):
'\n Add three helper objects in each node to prepare to execute\n AddRows.__add_rows(), and Return numbers of workers of both\n planned and launched.\n '
self.__init_val()
self.__add_helper_objects(Plans, True)
return (self.numPlanWorkers, self.numWorkers)
def delete_flags(self, plan):
'\n Use with delete_unnecessary_objects().\n '
if ('MergeFlag' in plan):
del plan['MergeFlag']
return plan
|
class MergeRows(Repository, PrepareMergeRows):
def __init__(self, log_level=Log.info):
self.ServerId = ''
self.LogLevel = log_level
def __set_serverId(self, serverId):
self.ServerId = serverId
def __get_actual_rows(self, Plans, depth):
'\n Return the "Actual Rows" of the node whose depth is `depth` of `Plans`.\n\n Parameters\n ----------\n Plans : dict\n depth : int\n\n Returns\n -------\n _rows : int\n '
def incr(plan):
if ('Node Type' in plan):
self._count += 1
def op(Plans, depth):
if isinstance(Plans, list):
for plan in Plans:
incr(plan)
if (depth == self._count):
self._rows = plan['Actual Rows']
return
if ('Plans' in plan):
op(plan['Plans'], depth)
return
else:
incr(Plans)
if (depth == self._count):
self._rows = Plans['Actual Rows']
return
if ('Plan' in Plans):
op(Plans['Plan'], depth)
if ('Plans' in Plans):
op(Plans['Plans'], depth)
return
self._count = 0
self._rows = 0
op(Plans, depth)
return self._rows
def __add_rows(self, Plans, rows, depth):
'\n Add `rows` to "Actual Rows" of the node whose depth is `depth`,\n and Adjust "Plan Rows" using "NormalizePlanParam" if necessary.\n\n Parameters\n ----------\n Plans : dict\n rows : int\n depth : int\n '
def incr(plan):
if ('Node Type' in plan):
self._count += 1
def add(plan, rows):
if ('MergeFlag' in plan):
if (plan['MergeFlag'] == 'True'):
plan['Plan Rows'] *= plan['NormalizePlanParam']
plan['Actual Rows'] += rows
def op(Plans, rows, depth):
if isinstance(Plans, list):
for plan in Plans:
incr(plan)
if (depth == self._count):
add(plan, rows)
return
elif ('Plans' in plan):
op(plan['Plans'], rows, depth)
return
else:
incr(Plans)
if (depth == self._count):
add(Plans, rows)
return
if ('Plan' in Plans):
op(Plans['Plan'], rows, depth)
elif ('Plans' in Plans):
op(Plans['Plans'], rows, depth)
return
self._count = 0
op(Plans, rows, depth)
'\n Public methods\n '
def merge_rows(self, leader_plan, worker_plans):
"\n Merge the 'Plan Rows' and 'Actual Rows' of all parallel worker'\n plans(workerplans) into the leader's plan(leader_plan) if necessary.\n\n Parameters\n ----------\n leader_plan : dict\n worker_plans : [dict, ...]\n "
_num_worker_node = self.count_nodes(worker_plans[0])
_num_leader_node = self.count_nodes(leader_plan)
_i = _num_worker_node
_j = _num_leader_node
while (0 < _i):
rows = 0
for _k in range(0, len(worker_plans)):
rows += self.__get_actual_rows(worker_plans[_k], _i)
self.__add_rows(leader_plan, rows, _j)
_i -= 1
_j -= 1
def extrapolate_rows(self, leader_plan, num_actual_workers, num_workers):
'\n When the number of `num_workers` processes were launched but the number of\n `num_actual workers` processes are currently running because the processing\n of some parallel worker processes was completed, we must extrapolate the\n "Actual Rows" that is accumulated by merge_rows().\n This method extrapolates them if necessary.\n\n Parameters\n ----------\n leader_plan : dict\n num_actual_workers : int\n The number of the sum of launched workers and a leader process.\n num_workers : int\n The number of the sum of running workers and a leader process.\n '
def change_rows(plan, num_actual_workers, num_workers):
if ('MergeFlag' in plan):
if (plan['MergeFlag'] == 'True'):
plan['Actual Rows'] *= (float(num_workers) / float(num_actual_workers))
def op(Plans, num_actual_workers, num_workers):
if isinstance(Plans, list):
for plan in Plans:
change_rows(plan, num_actual_workers, num_workers)
if ('Plans' in plan):
op(plan['Plans'], num_actual_workers, num_workers)
return
else:
change_rows(Plans, num_actual_workers, num_workers)
if ('Plan' in Plans):
op(Plans['Plans'], num_actual_workers, num_workers)
if ('Plans' in Plans):
op(Plans['Plans'], num_actual_workers, num_workers)
return
op(leader_plan['Plan'], num_actual_workers, num_workers)
|
class AddRows(Repository, PrepareMergeRows):
def __init__(self, log_level=Log.info):
self.ServerId = ''
self.LogLevel = log_level
def __set_serverId(self, serverId):
self.ServerId = serverId
def __add_rows(self, Plans):
def add_rows_and_loops(plan):
if ('Workers' in plan):
_lr = plan['Actual Rows']
_ll = plan['Actual Loops']
_wr = 0
_wl = 0
if (plan['MergeFlag'] == 'True'):
_npr = (plan['Plan Rows'] * plan['NormalizePlanParam'])
plan.update({'Plan Rows': _npr})
for _wdata in plan['Workers']:
_wr += _wdata['Actual Rows']
_wl += _wdata['Actual Loops']
plan.update({'Actual Rows': (_lr + _wr)})
plan.update({'Actual Loops': (_ll + _wl)})
if isinstance(Plans, list):
for _plan in Plans:
add_rows_and_loops(_plan)
if ('Plans' in _plan):
self.__add_rows(_plan['Plans'])
return
else:
add_rows_and_loops(Plans)
if ('Plan' in Plans):
self.__add_rows(Plans['Plan'])
if ('Plans' in Plans):
self.__add_rows(Plans['Plans'])
return
'\n Public method\n '
def add_rows(self, seqid, queryid, planid):
'\n Add the parallel worker\'s values("Plan Rows", "Actual Rows",\n and "Actual Loops") to the leader\'s corresponding values in\n the plan whose seqid is `seqid`.\n '
_jdirpath = self.get_plan_json_dir_path(self.ServerId, queryid, planid)
_jpath = self.path(_jdirpath, str(seqid))
if os.path.isfile(_jpath):
if (Log.notice <= self.LogLevel):
print('Notice: seqid({}) already exists.'.format(str(seqid)))
return
_jpath = self.path(_jdirpath, (str(seqid) + '.tmp'))
if (os.path.isfile(_jpath) == False):
if (Log.warning <= self.LogLevel):
print("Warning: '{}' not found.".format(_jpath))
return
else:
_dict0 = self.read_plan_json(_jpath)
_plan = _dict0.copy()
'\n Add three objects in each node of `_plan` to prepare to execute\n AddRows.__add_rows(), and Return numbers of workers of both\n planned and launched.\n '
(_numPlanWorkers, _numWorkers) = self.prepare_merge_rows(_plan)
'\n Add the values of "Plan Rows", "Actual Rows" and "Actual Loops"\n of all parallel worker\'s plan to the leader\'s corresponding values,\n respectively.\n '
if (_numWorkers > 1):
self.__add_rows(_plan)
_jpath = self.path(_jdirpath, str(seqid))
self.write_plan_json(_plan, _jpath)
if os.path.isfile(_jpath):
_jpath = self.path(_jdirpath, (str(seqid) + '.tmp'))
os.remove(_jpath)
|
class MergePlan(AddRows, MergeRows):
def __init__(self, log_level=Log.info):
self.ServerId = ''
self.LogLevel = log_level
def __set_serverId(self, serverId):
self.ServerId = serverId
'\n Public methods\n '
def merge_plans(self, leader_plan, worker_plans):
'\n Merge the parallel worker\'s plans into the leader\'s plan.\n This is called by QueryProgress.query_progress().\n\n Parameters\n ----------\n leader_plan : dict\n worker_plans : [dict, ...]\n\n Returns\n -------\n _leader_plan : dict\n Leader\'s plan whose "Plan rows" and "Actual Rows" are added\n with parallel workers\' rows.\n '
_leader_plan = leader_plan.copy()
(_numPlanWorkers, _numWorkers) = self.prepare_merge_rows(_leader_plan)
if (len(worker_plans) > 0):
"\n Merge the 'Plan Rows' and 'Actual Rows' of all parallel worker's\n plans(worker_plans) into the leader's plan(_leader_plan) if necessary.\n "
self.merge_rows(_leader_plan, worker_plans)
'\n If some parallel workers complete the processing, we extrapolate the\n "Actual Rows" of `_leader_plan` because the values of the terminated\n processes are not added by merge_rows().\n '
if ((len(worker_plans) + 1) < _numWorkers):
self.extrapolate_rows(_leader_plan, (len(worker_plans) + 1), _numWorkers)
return _leader_plan
def add_workers_rows(self, serverId, current_seqid, max_seqid):
'\n For the plans with seqid `current_seqid` to `max_seqid`, add the parallel\n worker\'s values("Plan Rows", "Actual Rows", and "Actual Loops") to the\n leader\'s corresponding values in each plan.\n\n This is called by GetTables.get_tables().\n\n Parameters\n ----------\n serverId : str\n The serverId of the database server that is described in the hosts.conf.\n current_seqid : int\n The maximum seqid for plans already stored in the repository.\n max_seqid : int\n The maximum seqid for plans stored in the database.\n\n Returns\n -------\n None\n '
self.__set_serverId(serverId)
'\n Read log.csv to get the json plans whose seqid are between current_seqid\n and max_seqid.\n '
with open(self.get_log_csv_path(self.ServerId), newline='') as f:
_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
for row in _reader:
_seqid = int(row[0])
_queryid = int(row[6])
_planid = int(row[7])
if ((current_seqid < _seqid) and (_seqid <= max_seqid)):
self.add_rows(_seqid, _queryid, _planid)
f.close()
|
class PushParam(Database, Repository):
def __init__(self, base_dir='.', log_level=Log.info):
self.set_base_dir(base_dir)
self.LogLevel = log_level
def __trans(self, Plans, depth):
def merge(schemas, relations):
result = []
if (type(schemas) is not list):
schemas = [schemas]
if (type(relations) is not list):
relations = [relations]
for i in range(0, len(schemas)):
result.append(((str(schemas[i]) + '.') + str(relations[i])))
return result
def flatten(lists):
result = []
if (type(lists) is not list):
lists = [lists]
for i in lists:
if isinstance(i, string_types):
result.append(i)
else:
result.extend(flatten(i))
return result
def get_items(plan):
'\n\n Format: { "Node Type" : ("rtable" [, ...]) : ("outer_rtable" [, ...]) : ("inner_rtable" [, ...]) : [Coeffcient [, ...]] : [Coefficient2] : [Intercept] : "MergeFlag"}\n\n '
_visible = False
_items = '{'
if ('Node Type' in plan):
if ((plan['Node Type'] == 'Nested Loop') or (plan['Node Type'] == 'Merge Join') or (plan['Node Type'] == 'Hash Join')):
_visible = True
_items = (((_items + "'") + str(plan['Node Type'])) + "':")
if (Log.debug3 <= self.LogLevel):
print('Debug3: Node Type={}'.format(plan['Node Type']))
if (('Schema' in plan) and ('Relation Name' in plan)):
if (Log.debug3 <= self.LogLevel):
print('Debug3: rtable={}'.format(merge(flatten(plan['Schema']), flatten(plan['Relation Name']))))
_items = ((_items + str(str(merge(flatten(plan['Schema']), flatten(plan['Relation Name']))).replace('[', '(')).replace(']', ')')) + ':')
if ('Plans' in plan):
__plan = plan['Plans']
if isinstance(__plan, list):
__outer_plan = __plan[0]
__inner_plan = (__plan[1] if (2 <= len(__plan)) else None)
if (('Schema' in __outer_plan) and ('Relation Name' in __outer_plan)):
if (Log.debug3 <= self.LogLevel):
print('Debug3: outer_rtable={}'.format(merge(flatten(__outer_plan['Schema']), flatten(__outer_plan['Relation Name']))))
_items = ((_items + str(str(merge(flatten(__outer_plan['Schema']), flatten(__outer_plan['Relation Name']))).replace('[', '(')).replace(']', ')')) + ':')
if ((__inner_plan is not None) and ('Schema' in __inner_plan) and ('Relation Name' in __inner_plan)):
if (Log.debug3 <= self.LogLevel):
print('Debug3: inner_rtable={}'.format(merge(flatten(__inner_plan['Schema']), flatten(__inner_plan['Relation Name']))))
_items = ((_items + str(str(merge(flatten(__inner_plan['Schema']), flatten(__inner_plan['Relation Name']))).replace('[', '(')).replace(']', ')')) + ':')
elif (__inner_plan is None):
if (Log.debug3 <= self.LogLevel):
print('Debug3: inner_rtable=NULL')
elif (('Schema' in __plan) and ('Relation Name' in __plan)):
if (Log.debug3 <= self.LogLevel):
print('Debug3: outer_rtable={}'.format(merge(flatten(__plan['Schema']), flatten(__plan['Relation Name']))))
print('Debug3: inner_rtable=NULL')
else:
if (Log.debug3 <= self.LogLevel):
print('Debug3: outer_rtable=NULL, inner_rtable=NULL')
_items = (_items + '():():')
_visible = True
if ('Coefficient' in plan):
if (Log.debug3 <= self.LogLevel):
print('Debug3: Coefficient={}'.format(plan['Coefficient']))
_items = ((_items + str(plan['Coefficient'])) + ':')
if ('Coefficient2' in plan):
if (Log.debug3 <= self.LogLevel):
print('Debug3: Coefficient2={}'.format(plan['Coefficient2']))
_items = ((_items + str(plan['Coefficient2'])) + ':')
else:
_items = (_items + '[]:')
if ('Intercept' in plan):
if (Log.debug3 <= self.LogLevel):
print('Debug3: Intercept={}'.format(plan['Intercept']))
_items = ((_items + str(plan['Intercept'])) + ':')
else:
_items = (_items + '[]:')
if ('MergeFlag' in plan):
if (Log.debug3 <= self.LogLevel):
print('Debug3: MergeFlag={}'.format(plan['MergeFlag']))
_items = (((_items + '"') + str(plan['MergeFlag'])) + '"')
else:
_items = (_items + '[]')
_items = (_items + '}')
if (_visible == True):
if (Log.debug1 <= self.LogLevel):
print('Debug1: items={}'.format(_items))
return _items
else:
return None
def incr(plan):
if ('Node Type' in plan):
self._count += 1
def op(Plans):
if isinstance(Plans, list):
for i in range(0, len(Plans)):
incr(Plans[i])
if (self._depth == self._count):
self._params = get_items(Plans[i])
return
elif ('Plans' in Plans[i]):
op(Plans[i]['Plans'])
else:
incr(Plans)
if (self._depth == self._count):
self._params = get_items(Plans)
return
elif ('Plans' in Plans):
op(Plans['Plans'])
self._depth = depth
self._count = 0
self._params = ''
op(Plans)
return self._params
def __transform(self, reg_path):
_result = ''
i = self.count_nodes(reg_path)
while (0 < i):
_params = self.__trans(reg_path['Plan'], i)
if (_params != None):
if (not _result):
_result = _params
else:
_result = ((_result + ';') + _params)
i -= 1
return _result
def __get_database_list(self, serverId):
'Get database list.'
_ret = []
with open(self.get_log_csv_path(serverId), newline='') as f:
_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
for _row in _reader:
_database = _row[3]
if (_database not in _ret):
_ret.append(_database)
return _ret
def __get_queryid_and_param_list(self, serverId, db):
_dict = {}
with open(self.get_log_csv_path(serverId), newline='') as f:
_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
for _row in _reader:
_seqid = int(_row[0])
_database = _row[3]
_queryid = int(_row[6])
_planid = int(_row[7])
if (_database == db):
_dict[_queryid] = str(_planid)
return _dict
def __check_object(self, connection, sql):
_cur = connection.cursor()
try:
_cur.execute(sql)
except Exception as err:
_cur.close()
print('Error! Could not execute sql:{}.'.format(sql))
sys.exit(1)
if (_cur.rowcount != 1):
_cur.close()
print('Error! Got invalid result.')
sys.exit(1)
for _row in _cur:
_num = _row[0]
_cur.close()
return (True if (_num == 1) else False)
return False
def __check_schema(self, connection):
_sql = (("SELECT count(*) FROM pg_namespace WHERE nspname = '" + self.SCHEMA) + "';")
return self.__check_object(connection, _sql)
def __check_table(self, connection):
_sql = (((((((('SELECT count(*) FROM pg_class AS c, pg_namespace AS n WHERE n.oid = c.relnamespace AND c.relname = ' + "'") + self.REG_PARAMS_TABLE) + "'") + ' AND n.nspname = ') + "'") + self.SCHEMA) + "'") + ';')
return self.__check_object(connection, _sql)
def __execute_sql(self, connection, sql):
_cur = connection.cursor()
try:
_cur.execute(sql)
except Exception as err:
_cur.close()
print('Error! Could not execute sql:{}.'.format(sql))
sys.exit(1)
def __create_schema(self, connection):
_sql = (('CREATE SCHEMA IF NOT EXISTS ' + self.SCHEMA) + ';')
self.__execute_sql(connection, _sql)
def __create_table(self, connection, work_mem):
_sql = (((('CREATE TABLE IF NOT EXISTS ' + self.SCHEMA) + '.') + self.REG_PARAMS_TABLE) + ' (')
_sql += 'queryid TEXT PRIMARY KEY,'
if (work_mem == True):
_sql += 'sort_space_used INT,'
_sql += 'params TEXT NOT NULL'
_sql += ');'
self.__execute_sql(connection, _sql)
def __truncate_table(self, connection):
_sql = (((('TRUNCATE ' + self.SCHEMA) + '.') + self.REG_PARAMS_TABLE) + ';')
self.__execute_sql(connection, _sql)
def __insert_reg_params(self, connection, serverId, queryid_list, work_mem):
_cur = connection.cursor()
try:
_cur.execute('START TRANSACTION;')
except Exception as err:
_cur.close()
print('Error! Could not execute sql:{}.'.format('START TRANSACTION'))
sys.exit(1)
for _queryid in queryid_list:
_planid = int(queryid_list[_queryid])
_reg_path = self.get_regression_param(serverId, _queryid, _planid)
_result = self.__transform(_reg_path)
_sort_space_used = None
if (work_mem == True):
if ('SortSpaceUsed' in _reg_path):
_sort_space_used = _reg_path['SortSpaceUsed']
if (Log.debug3 <= self.LogLevel):
print('Debug3: _queryid={} planid={} _result={}'.format(_queryid, queryid_list[_queryid], _result))
_sql = ((('INSERT INTO ' + self.SCHEMA) + '.') + self.REG_PARAMS_TABLE)
if (_sort_space_used is None):
_sql += ' (queryid, params) VALUES ('
else:
_sql += ' (queryid, sort_space_used, params) VALUES ('
_sql += (("'" + str(_queryid)) + "',")
if (_sort_space_used is not None):
_sql += (str(int(_sort_space_used)) + ',')
_sql += (("'" + str(_result).replace("'", '"')) + "'")
_sql += ');'
try:
_cur.execute(_sql)
except Exception as err:
_cur.close()
print('Error! Could not execute sql:{}.'.format(_sql))
sys.exit(1)
self.write_formatted_regression_params(serverId, str(_queryid), str(_result).replace("'", '"'))
try:
_cur.execute('COMMIT;')
except Exception as err:
_cur.close()
print('Error! Could not execute sql:{}.'.format('COMMIT'))
sys.exit(1)
_cur.close()
'\n Public method\n '
def push_param(self, serverId, work_mem=True):
'\n Check formatted regression params subdir, and create it if not exists.\n '
self.check_formatted_regression_params_dir(serverId)
'\n Get database list from log.csv.\n '
_database_list = self.__get_database_list(serverId)
if (len(_database_list) == 0):
if (Log.info <= self.LogLevel):
print('Info: There is no data.')
sys.exit(0)
if (Log.debug3 <= self.LogLevel):
print('Debug3: _database_list={}'.format(_database_list))
'\n Main loop\n '
db = Database(self.base_dir, self.LogLevel)
for _db in _database_list:
'\n Get queryid and its latest param list in each database.\n '
_queryid_list = self.__get_queryid_and_param_list(serverId, _db)
if (Log.debug3 <= self.LogLevel):
print('Debug3: queryid_list={}'.format(_queryid_list))
'\n Get connection param of each database\n '
_conn = db.get_connection_param(serverId, _db)
'\n Connect to each database\n '
try:
_connection = psycopg2.connect(_conn)
except psycopg2.OperationalError as e:
print('Could not connect to {}'.format(_db))
continue
_connection.autocommit = True
'\n Check schema and table; create them if necessary\n '
if (self.__check_schema(_connection) == False):
self.__create_schema(_connection)
if self.__check_table(_connection):
self.__truncate_table(_connection)
else:
self.__create_table(_connection, work_mem)
'\n Insert reg_param\n '
self.truncate_formatted_regression_params(serverId)
self.__insert_reg_params(_connection, serverId, _queryid_list, work_mem)
_connection.close()
del db
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.