code
stringlengths
17
6.64M
def init_datasets() -> None: preprocess_enron() preprocess_ling() preprocess_sms() preprocess_spamassassin()
def train_val_test_split(df, train_size=0.8, has_val=True): 'Return a tuple (DataFrame, DatasetDict) with a custom train/val/split' if isinstance(train_size, int): train_size = (train_size / len(df)) df = df.sample(frac=1, random_state=0) (df_train, df_test) = train_test_split(df, test_size=(1 - train_size), stratify=df['label']) if has_val: (df_test, df_val) = train_test_split(df_test, test_size=0.5, stratify=df_test['label']) return ((df_train, df_val, df_test), datasets.DatasetDict({'train': datasets.Dataset.from_pandas(df_train), 'val': datasets.Dataset.from_pandas(df_val), 'test': datasets.Dataset.from_pandas(df_test)})) else: return ((df_train, df_test), datasets.DatasetDict({'train': datasets.Dataset.from_pandas(df_train), 'test': datasets.Dataset.from_pandas(df_test)}))
class EvalOnTrainCallback(TrainerCallback): 'Custom callback to evaluate on the training set during training.' def __init__(self, trainer) -> None: super().__init__() self._trainer = trainer def on_epoch_end(self, args, state, control, **kwargs): if control.should_evaluate: control_train = copy.deepcopy(control) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix='train') return control_train
def get_trainer(model, dataset, tokenizer=None): 'Return a trainer object for transformer models.' def compute_metrics(y_pred): 'Computer metrics during training.' (logits, labels) = y_pred predictions = np.argmax(logits, axis=(- 1)) return evaluate.load('f1').compute(predictions=predictions, references=labels, average='macro') if (type(model).__name__ == 'SetFitModel'): trainer = SetFitTrainer(model=model, train_dataset=dataset['train'], eval_dataset=dataset['val'], loss_class=CosineSimilarityLoss, metric='f1', batch_size=16, num_iterations=20, num_epochs=3) return trainer elif (('T5' in type(model).__name__) or ('FLAN' in type(model).__name__)): def compute_metrics_t5(y_pred, verbose=0): 'Computer metrics during training for T5-like models.' (predictions, labels) = y_pred predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True) labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id) labels = tokenizer.batch_decode(labels, skip_special_tokens=True) predictions = [(1 if ('spam' in predictions[i]) else 0) for i in range(len(predictions))] labels = [(1 if ('spam' in labels[i]) else 0) for i in range(len(labels))] result = evaluate.load('f1').compute(predictions=predictions, references=labels, average='macro') return result data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=(- 100), pad_to_multiple_of=8) training_args = Seq2SeqTrainingArguments(output_dir='experiments', per_device_train_batch_size=8, per_device_eval_batch_size=8, learning_rate=5e-05, num_train_epochs=5, predict_with_generate=True, fp16=False, evaluation_strategy='epoch', save_strategy='epoch', load_best_model_at_end=True, save_total_limit=5) trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=dataset['train'], eval_dataset=dataset['val'], data_collator=data_collator, compute_metrics=compute_metrics_t5) trainer.add_callback(EvalOnTrainCallback(trainer)) return trainer else: training_args = TrainingArguments(output_dir='experiments', per_device_train_batch_size=16, per_device_eval_batch_size=8, learning_rate=5e-05, num_train_epochs=10, evaluation_strategy='epoch', save_strategy='epoch', load_best_model_at_end=True, save_total_limit=10) trainer = Trainer(model=model, args=training_args, train_dataset=dataset['train'], eval_dataset=dataset['val'], compute_metrics=compute_metrics) trainer.add_callback(EvalOnTrainCallback(trainer)) return trainer
def predict(trainer, model, dataset, tokenizer=None): 'Convert the predict function to specific classes to unify the API.' if (type(model).__name__ == 'SetFitModel'): return model(dataset['text']) elif ('T5' in type(model).__name__): predictions = trainer.predict(dataset) predictions = tokenizer.batch_decode(predictions.predictions, skip_special_tokens=True) predictions = [(1 if ('spam' in predictions[i]) else 0) for i in range(len(predictions))] return predictions else: return trainer.predict(dataset).predictions.argmax(axis=(- 1))
def train_llms(seeds, datasets, train_sizes, test_set='test'): 'Train all the large language models.' for seed in list(seeds): set_seed(seed) for dataset_name in list(datasets): for train_size in train_sizes: scores = pd.DataFrame(index=list(LLMS.keys()), columns=(list(SCORING.keys()) + ['training_time', 'inference_time'])) df = get_dataset(dataset_name) (_, dataset) = train_val_test_split(df, train_size=train_size, has_val=True) experiment = f'llm_{dataset_name}_{test_set}_{train_size}_train_seed_{seed}' for (model_name, (model, tokenizer)) in LLMS.items(): tokenized_dataset = tokenize(dataset, tokenizer) trainer = get_trainer(model, tokenized_dataset, tokenizer) start = time.time() train_result = trainer.train() end = time.time() scores.loc[model_name]['training_time'] = (end - start) if ('SetFit' not in model_name): log = pd.DataFrame(trainer.state.log_history) log.to_csv(f'outputs/csv/loss_{model_name}_{experiment}.csv') plot_loss(experiment, dataset_name, model_name) start = time.time() predictions = predict(trainer, model, tokenized_dataset[test_set], tokenizer) end = time.time() for (score_name, score_fn) in SCORING.items(): scores.loc[model_name][score_name] = score_fn(dataset[test_set]['label'], predictions) scores.loc[model_name]['inference_time'] = (end - start) save_scores(experiment, model_name, scores.loc[model_name].to_dict()) plot_scores(experiment, dataset_name) print(scores)
def train_baselines(seeds, datasets, train_sizes, test_set='test'): 'Train all the baseline models.' init_nltk() for seed in list(seeds): set_seed(seed) for dataset_name in list(datasets): for train_size in train_sizes: scores = pd.DataFrame(index=list(MODELS.keys()), columns=(list(SCORING.keys()) + ['training_time', 'inference_time'])) df = get_dataset(dataset_name) df = transform_df(df) ((df_train, df_val, df_test), _) = train_val_test_split(df, train_size=train_size, has_val=True) experiment = f'ml_{dataset_name}_{test_set}_{train_size}_train_seed_{seed}' for (model_name, (model, max_iter)) in MODELS.items(): encoder = TfidfVectorizer(max_features=max_iter) (X_train, y_train, encoder) = encode_df(df_train, encoder) (X_test, y_test, encoder) = encode_df(df_test, encoder) if (test_set == 'val'): cv = cross_validate(model, X_train, y_train, scoring=list(SCORING.keys()), cv=5, n_jobs=(- 1)) for (score_name, score_fn) in SCORING.items(): scores.loc[model_name][score_name] = cv[f'test_{score_name}'].mean() if (test_set == 'test'): start = time.time() model.fit(X_train, y_train) end = time.time() scores.loc[model_name]['training_time'] = (end - start) start = time.time() y_pred = model.predict(X_test) end = time.time() scores.loc[model_name]['inference_time'] = (end - start) for (score_name, score_fn) in SCORING.items(): scores.loc[model_name][score_name] = score_fn(y_pred, y_test) save_scores(experiment, model_name, scores.loc[model_name].to_dict()) plot_scores(experiment, dataset_name) print(scores)
def init_nltk(): nltk.download('punkt') nltk.download('stopwords')
def tokenize_words(text): 'Tokenize words in text and remove punctuation' text = word_tokenize(str(text).lower()) text = [token for token in text if token.isalnum()] return text
def remove_stopwords(text): 'Remove stopwords from the text' text = [token for token in text if (token not in stopwords.words('english'))] return text
def stem(text): 'Stem the text (originate => origin)' text = [ps.stem(token) for token in text] return text
def transform(text): 'Tokenize, remove stopwords, stem the text' text = tokenize_words(text) text = remove_stopwords(text) text = stem(text) text = ' '.join(text) return text
def transform_df(df): 'Apply the transform function to the dataframe' df['transformed_text'] = df['text'].apply(transform) return df
def encode_df(df, encoder=None): 'Encode the features for training set' if hasattr(encoder, 'vocabulary_'): X = encoder.transform(df['transformed_text']).toarray() else: X = encoder.fit_transform(df['transformed_text']).toarray() y = df['label'].values return (X, y, encoder)
def tokenize(dataset, tokenizer): 'Tokenize dataset' def tokenization(examples): return tokenizer(examples['text'], padding='max_length', truncation=True) def tokenization_t5(examples, padding='max_length'): text = [('classify as ham or spam: ' + item) for item in examples['text']] inputs = tokenizer(text, max_length=tokenizer.model_max_length, padding=padding, truncation=True) labels = tokenizer(text_target=examples['label'], max_length=max_label_length, padding=True, truncation=True) inputs['labels'] = [[(x if (x != tokenizer.pad_token_id) else (- 100)) for x in label] for label in labels['input_ids']] return inputs if (tokenizer is None): return dataset elif ('T5' in type(tokenizer).__name__): dataset = dataset.map((lambda x: {'label': ('ham' if (x['label'] == 0) else 'spam')})) tokenized_label = dataset['train'].map((lambda x: tokenizer(x['label'], truncation=True)), batched=True) max_label_length = max([len(x) for x in tokenized_label['input_ids']]) return dataset.map(tokenization_t5, batched=True, remove_columns=['label']) else: return dataset.map(tokenization, batched=True)
def set_seed(seed) -> None: 'Fix random seeds' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
def plot_loss(experiment: str, dataset_name: str, model_name: str) -> None: 'Plot loss curve for LLMs.' log = pd.read_csv(f'outputs/csv/loss_{model_name}_{experiment}.csv') log = pd.DataFrame(log).iloc[:(- 1)] train_losses = log['train_loss'].dropna().values eval_losses = log['eval_loss'].dropna().values x = np.arange(1, (len(train_losses) + 1), step=1) with plt.style.context(['science', 'high-vis']): (fig, ax) = plt.subplots() plt.plot(x, train_losses, label='Training loss') plt.plot(x, eval_losses, label='Evaluation loss') ax.set_title(f'{model_name} ({dataset_name.upper()})') ax.set_xticks(x, labels=range(1, (len(x) + 1))) ax.set_xlabel('Epochs') ax.set_ylabel('Loss') ax.legend(loc='upper right') Path(f'outputs/pdf/').mkdir(parents=True, exist_ok=True) Path(f'outputs/png/').mkdir(parents=True, exist_ok=True) plt.savefig(f'outputs/pdf/loss_{model_name}_{experiment}.pdf', format='pdf') plt.savefig(f'outputs/png/loss_{model_name}_{experiment}.png', format='png', dpi=300) plt.show()
def plot_scores(experiment: str, dataset_name: str) -> None: 'Plot scores as histogram.' scores = pd.read_csv(f'outputs/csv/{experiment}.csv', index_col=0) x = np.arange(len(scores)) width = 0.2 (fig, ax) = plt.subplots(figsize=(9, 3)) rects1 = ax.bar(x=(x - width), height=scores['f1'], width=width, label='F1 score') rects2 = ax.bar(x=x, height=scores['precision'], width=width, label='Precision') rects3 = ax.bar(x=(x + width), height=scores['recall'], width=width, label='Recall') ax.set_title(f'{dataset_name.upper()}') ax.set_ylabel('Score') ax.set_xticks(x, labels=scores.index, fontsize=10) plt.legend(bbox_to_anchor=(0.5, (- 0.25)), loc='lower center', ncol=4) fig.tight_layout() Path(f'outputs/pdf/').mkdir(parents=True, exist_ok=True) Path(f'outputs/png/').mkdir(parents=True, exist_ok=True) plt.savefig(f'outputs/pdf/{experiment}.pdf', format='pdf') plt.savefig(f'outputs/png/{experiment}.png', format='png', dpi=300) plt.show()
def plot_pie_charts() -> None: 'Plot ham/spam distribution for each dataset.' dataset_names = ['ling', 'sms', 'spamassassin', 'enron'] (fig, axs) = plt.subplots(1, 4, figsize=(16, 4)) for (i, dataset_name) in enumerate(dataset_names): df = get_dataset(dataset_name) axs[i].pie(df['label'].value_counts().to_numpy(), autopct='%1.2f\\%%', pctdistance=0.35, startangle=(- 30), wedgeprops={'width': 0.3}, textprops={'fontsize': 22}) axs[i].set_title(f'''{dataset_name.upper()} ({len(df):,} samples)''', fontsize=24) fig.legend(['spam', 'ham'], bbox_to_anchor=(0.5, (- 0.1)), loc='lower center', ncol=2, prop={'size': 22}) fig.tight_layout() plt.subplots_adjust(wspace=(- 0.3)) plt.savefig(f'outputs/pdf/pie_charts.pdf', format='pdf') plt.savefig(f'outputs/png/pie_charts.png', format='png', dpi=300) plt.show()
def save_scores(experiment: str, index: str, values: dict) -> None: 'Log scores for individual models in the corresponding csv file' llms = ['BERT', 'RoBERTa', 'SetFit-MiniLM', 'SetFit-mpnet', 'FLAN-T5-small', 'FLAN-T5-base'] models = ['NB', 'LR', 'KNN', 'SVM', 'XGBoost', 'LightGBM'] Path(f'outputs/csv/').mkdir(parents=True, exist_ok=True) file = Path(f'outputs/csv/{experiment}.csv') if file.is_file(): scores = pd.read_csv(f'outputs/csv/{experiment}.csv', index_col=0) scores.loc[index] = values else: if (index in llms): scores = pd.DataFrame(index=llms, columns=(list(SCORING.keys()) + ['training_time', 'inference_time'])) else: scores = pd.DataFrame(index=models, columns=(list(SCORING.keys()) + ['training_time', 'inference_time'])) scores.loc[index] = values scores.to_csv(f'outputs/csv/{experiment}.csv')
def run(config: Dict[(str, Any)], log_dir: str='', kernel_seed: int=0, kernel_random_state: Optional[np.random.RandomState]=None) -> Dict[(str, Any)]: '\n Wrapper function that enables to run one simulation.\n It does the following steps:\n - instantiation of the kernel\n - running of the simulation\n - return the end_state object\n\n Arguments:\n config: configuration file for the specific simulation\n log_dir: directory where log files are stored\n kernel_seed: simulation seed\n kernel_random_state: simulation random state\n ' coloredlogs.install(level=config['stdout_log_level'], fmt='[%(process)d] %(levelname)s %(name)s %(message)s') kernel = Kernel(random_state=(kernel_random_state or np.random.RandomState(seed=kernel_seed)), log_dir=log_dir, **subdict(config, ['start_time', 'stop_time', 'agents', 'agent_latency_model', 'default_computation_delay', 'custom_properties'])) sim_start_time = dt.datetime.now() logger.info(f'Simulation Start Time: {sim_start_time}') end_state = kernel.run() sim_end_time = dt.datetime.now() logger.info(f'Simulation End Time: {sim_end_time}') logger.info(f'Time taken to run simulation: {(sim_end_time - sim_start_time)}') return end_state
class Agent(): '\n Base Agent class\n\n Attributes:\n id: Must be a unique number (usually autoincremented).\n name: For human consumption, should be unique (often type + number).\n type: For machine aggregation of results, should be same for all agents\n following the same strategy (incl. parameter settings).\n random_state: an np.random.RandomState object, already seeded. Every agent\n is given a random state to use for any stochastic needs.\n log_events: flag to log or not the events during the simulation\n log_to_file: flag to write on disk or not the logged events\n ' def __init__(self, id: int, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None, log_events: bool=True, log_to_file: bool=True) -> None: self.id: int = id self.type: str = (type or self.__class__.__name__) self.name: str = (name or f'{self.type}_{self.id}') self.random_state: np.random.RandomState = (random_state or np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64'))) self.log_events: bool = log_events self.log_to_file: bool = (log_to_file & log_events) self.kernel = None self.current_time: NanosecondTime = 0 self.log: List[Tuple[(NanosecondTime, str, Any)]] = [] self.logEvent('AGENT_TYPE', type) def kernel_initializing(self, kernel) -> None: '\n Called by the kernel one time when simulation first begins.\n\n No other agents are guaranteed to exist at this time.\n\n Kernel reference must be retained, as this is the only time the agent can\n "see" it.\n\n Arguments:\n kernel: The Kernel instance running the experiment.\n ' self.kernel = kernel logger.debug('{} exists!'.format(self.name)) def kernel_starting(self, start_time: NanosecondTime) -> None: '\n Called by the kernel one time after simulationInitializing.\n\n All other agents are guaranteed to exist at this time.\n\n Base Agent schedules a wakeup call for the first available timestamp.\n Subclass agents may override this behavior as needed.\n\n Arguments:\n start_time: The earliest time for which the agent can schedule a wakeup call\n (or could receive a message).\n ' assert (self.kernel is not None) logger.debug('Agent {} ({}) requesting kernel wakeup at time {}'.format(self.id, self.name, fmt_ts(start_time))) self.set_wakeup(start_time) def kernel_stopping(self) -> None: '\n Called by the kernel one time before simulationTerminating.\n\n All other agents are guaranteed to exist at this time.\n ' pass def kernel_terminating(self) -> None: '\n Called by the kernel one time when simulation terminates.\n\n No other agents are guaranteed to exist at this time.\n ' if (self.log and self.log_to_file): df_log = pd.DataFrame(self.log, columns=('EventTime', 'EventType', 'Event')) df_log.set_index('EventTime', inplace=True) self.write_log(df_log) def logEvent(self, event_type: str, event: Any='', append_summary_log: bool=False, deepcopy_event: bool=True) -> None: "\n Adds an event to this agent's log.\n\n The deepcopy of the Event field, often an object, ensures later state\n changes to the object will not retroactively update the logged event.\n\n Arguments:\n event_type: label of the event (e.g., Order submitted, order accepted last trade etc....)\n event: actual event to be logged\n append_summary_log:\n deepcopy_event: Set to False to skip deepcopying the event object.\n " if (not self.log_events): return if deepcopy_event: event = deepcopy(event) self.log.append((self.current_time, event_type, event)) if append_summary_log: assert (self.kernel is not None) self.kernel.append_summary_log(self.id, event_type, event) def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None: '\n Called each time a message destined for this agent reaches the front of the\n kernel\'s priority queue.\n\n Arguments:\n current_time: The simulation time at which the kernel is delivering this\n message -- the agent should treat this as "now".\n sender_id: The ID of the agent who sent the message.\n message: An object guaranteed to inherit from the message.Message class.\n ' assert (self.kernel is not None) self.current_time = current_time if logger.isEnabledFor(logging.DEBUG): logger.debug('At {}, agent {} ({}) received: {}'.format(fmt_ts(current_time), self.id, self.name, message)) def wakeup(self, current_time: NanosecondTime) -> None: '\n Agents can request a wakeup call at a future simulation time using\n ``Agent.set_wakeup()``.\n\n This is the method called when the wakeup time arrives.\n\n Arguments:\n current_time: The simulation time at which the kernel is delivering this\n message -- the agent should treat this as "now".\n ' assert (self.kernel is not None) self.current_time = current_time if logger.isEnabledFor(logging.DEBUG): logger.debug('At {}, agent {} ({}) received wakeup.'.format(fmt_ts(current_time), self.id, self.name)) def send_message(self, recipient_id: int, message: Message, delay: int=0) -> None: '\n Sends a message to another Agent.\n\n Arguments:\n recipient_id: ID of the agent receiving the message.\n message: The ``Message`` class instance to send.\n delay: Represents an agent\'s request for ADDITIONAL delay (beyond the\n Kernel\'s mandatory computation + latency delays). Represents parallel\n pipeline processing delays (that should delay the transmission of\n messages but do not make the agent "busy" and unable to respond to new\n messages)\n ' assert (self.kernel is not None) self.kernel.send_message(self.id, recipient_id, message, delay=delay) def send_message_batch(self, recipient_id: int, messages: List[Message], delay: NanosecondTime=0) -> None: '\n Sends a batch of messages to another Agent.\n\n Arguments:\n recipient_id: ID of the agent receiving the messages.\n messages: A list of ``Message`` class instances to send.\n delay: Represents an agent\'s request for ADDITIONAL delay (beyond the\n Kernel\'s mandatory computation + latency delays). Represents parallel\n pipeline processing delays (that should delay the transmission of messages\n but do not make the agent "busy" and unable to respond to new messages)\n ' assert (self.kernel is not None) self.kernel.send_message(self.id, recipient_id, MessageBatch(messages), delay=delay) def set_wakeup(self, requested_time: NanosecondTime) -> None: '\n Called to receive a "wakeup call" from the kernel at some requested future time.\n\n Arguments:\n requested_time: Defaults to the next possible timestamp. Wakeup time cannot\n be the current time or a past time.\n ' assert (self.kernel is not None) self.kernel.set_wakeup(self.id, requested_time) def get_computation_delay(self): "Queries thr agent's current computation delay from the kernel." return self.kernel.get_agent_compute_delay(sender_id=self.id) def set_computation_delay(self, requested_delay: int) -> None: '\n Calls the kernel to update the agent\'s computation delay.\n\n This does not initiate a global delay, nor an immediate delay for the agent.\n Rather it sets the new default delay for the calling agent. The delay will be\n applied upon every return from wakeup or recvMsg.\n\n Note that this delay IS applied to any messages sent by the agent during the\n current wake cycle (simulating the messages popping out at the end of its\n "thinking" time).\n\n Also note that we DO permit a computation delay of zero, but this should really\n only be used for special or massively parallel agents.\n\n Arguments:\n requested_delay: delay given in nanoseconds.\n ' assert (self.kernel is not None) self.kernel.set_agent_compute_delay(sender_id=self.id, requested_delay=requested_delay) def delay(self, additional_delay: int) -> None: "\n Accumulates a temporary delay for the current wake cycle for this agent.\n\n This will apply the total delay (at time of send_message) to each message, and\n will modify the agent's next available time slot. These happen on top of the\n agent's compute delay BUT DO NOT ALTER IT. (i.e. effects are transient). Mostly\n useful for staggering outbound messages.\n\n Arguments:\n additional_delay: additional delay given in nanoseconds.\n " assert (self.kernel is not None) self.kernel.delay_agent(sender_id=self.id, additional_delay=additional_delay) def write_log(self, df_log: pd.DataFrame, filename: Optional[str]=None) -> None: '\n Called by the agent, usually at the very end of the simulation just before\n kernel shutdown, to write to disk any log dataframe it has been accumulating\n during simulation.\n\n The format can be decided by the agent, although changes will require a special\n tool to read and parse the logs. The Kernel places the log in a unique\n directory per run, with one filename per agent, also decided by the Kernel using\n agent type, id, etc.\n\n If filename is None the Kernel will construct a filename based on the name of\n the Agent requesting log archival.\n\n Arguments:\n df_log: dataframe that contains all the logged events during the simulation\n filename: Location on disk to write the log to.\n ' assert (self.kernel is not None) self.kernel.write_log(self.id, df_log, filename) def update_agent_state(self, state: Any) -> None: '\n Agents should use this method to replace their custom state in the dictionary\n the Kernel will return to the experimental config file at the end of the\n simulation.\n\n This is intended to be write-only, and agents should not use it to store\n information for their own later use.\n\n Arguments:\n state: The new state.\n ' assert (self.kernel is not None) self.kernel.update_agent_state(self.id, state) def __lt__(self, other) -> bool: return (f'{self.id}' < f'{other.id}')
class BaseGenerator(ABC, Generic[T]): '\n This is an abstract base class defining the interface for Generator objects in\n ABIDES. This class is not used directly and is instead inherited from child classes.\n\n Generators should produce an infinite amount of values.\n ' @abstractmethod def next(self) -> T: '\n Produces the next value from the generator.\n ' raise NotImplementedError @abstractmethod def mean(self) -> T: '\n Returns the average of the distribution of values generated.\n ' raise NotImplementedError
class InterArrivalTimeGenerator(BaseGenerator[float], ABC): '\n General class for time generation. These generators are used to generates a delta time between currrent time and the next wakeup of the agent.\n ' pass
class ConstantTimeGenerator(InterArrivalTimeGenerator): '\n Generates constant delta time of length step_duration\n\n Arguments:\n step_duration: length of the delta time in ns\n ' def __init__(self, step_duration: float) -> None: self.step_duration: float = step_duration def next(self) -> float: '\n returns constant time delta for next wakeup\n ' return self.step_duration def mean(self) -> float: '\n time delta is constant\n ' return self.step_duration
class PoissonTimeGenerator(InterArrivalTimeGenerator): '\n Lambda must be specified either in second through lambda_time or seconds^-1\n through lambda_freq.\n\n Arguments:\n random_generator: configuration random generator\n lambda_freq: frequency (in s^-1)\n lambda_time: period (in seconds)\n ' def __init__(self, random_generator: np.random.RandomState, lambda_freq: Optional[float]=None, lambda_time: Optional[float]=None) -> None: self.random_generator: np.random.RandomState = random_generator assert (((lambda_freq is None) and (lambda_time is not None)) or ((lambda_time is None) and (lambda_freq is not None))), 'specify lambda in frequency OR in time' self.lambda_s: float = (lambda_freq or (1 / lambda_time)) def next(self) -> Optional[float]: '\n returns time delta for next wakeup with time delta following Poisson distribution\n ' seconds = self.random_generator.exponential((1 / self.lambda_s)) return ((seconds * 1000000000) if (seconds is not None) else None) def mean(self) -> float: '\n returns the mean of a Poisson(lambda) distribution (i.e., 1/lambda)\n ' return (1 / self.lambda_s)
class Kernel(): '\n ABIDES Kernel\n\n Arguments:\n agents: List of agents to include in the simulation.\n start_time: Timestamp giving the start time of the simulation.\n stop_time: Timestamp giving the end time of the simulation.\n default_computation_delay: time penalty applied to an agent each time it is\n awakened (wakeup or recvMsg).\n default_latency: latency imposed on each computation, modeled physical latency in systems and avoid infinite loop of events happening at the same exact time (in ns)\n agent_latency: legacy parameter, used when agent_latency_model is not defined\n latency_noise:legacy parameter, used when agent_latency_model is not defined\n agent_latency_model: Model of latency used for the network of agents.\n skip_log: if True, no log saved on disk.\n seed: seed of the simulation.\n log_dir: directory where data is store.\n custom_properties: Different attributes that can be added to the simulation\n (e.g., the oracle).\n ' def __init__(self, agents: List[Agent], start_time: NanosecondTime=str_to_ns('09:30:00'), stop_time: NanosecondTime=str_to_ns('16:00:00'), default_computation_delay: int=1, default_latency: float=1, agent_latency: Optional[List[List[float]]]=None, latency_noise: List[float]=[1.0], agent_latency_model: Optional[LatencyModel]=None, skip_log: bool=True, seed: Optional[int]=None, log_dir: Optional[str]=None, custom_properties: Optional[Dict[(str, Any)]]=None, random_state: Optional[np.random.RandomState]=None) -> None: custom_properties = (custom_properties or {}) self.random_state: np.random.RandomState = (random_state or np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64'))) self.messages: queue.PriorityQueue[(int, str, Message)] = queue.PriorityQueue() self.kernel_wall_clock_start: datetime = datetime.now() self.mean_result_by_agent_type: Dict[(str, Any)] = {} self.agent_count_by_type: Dict[(str, int)] = {} self.summary_log: List[Dict[(str, Any)]] = [] self.has_run = False for (key, value) in custom_properties.items(): setattr(self, key, value) self.agents: List[Agent] = agents self.gym_agents: List[Agent] = list(filter((lambda agent: ('CoreGymAgent' in [c.__name__ for c in agent.__class__.__bases__])), agents)) assert (len(self.gym_agents) <= 1), 'ABIDES-gym currently only supports using one gym agent' logger.debug(f'Detected {len(self.gym_agents)} ABIDES-gym agents') self.custom_state: Dict[(str, Any)] = {} self.start_time: NanosecondTime = start_time self.stop_time: NanosecondTime = stop_time self.current_time: NanosecondTime = start_time self.seed: Optional[int] = seed self.skip_log: bool = skip_log self.log_dir: str = (log_dir or str(int(self.kernel_wall_clock_start.timestamp()))) self.agent_current_times: List[NanosecondTime] = ([self.start_time] * len(self.agents)) self.agent_computation_delays: List[int] = ([default_computation_delay] * len(self.agents)) self.agent_latency_model = agent_latency_model if (agent_latency is None): self.agent_latency: List[List[float]] = ([([default_latency] * len(self.agents))] * len(self.agents)) else: self.agent_latency = agent_latency self.latency_noise: List[float] = latency_noise self.current_agent_additional_delay: int = 0 self.show_trace_messages: bool = False logger.debug(f'Kernel initialized') def run(self) -> Dict[(str, Any)]: '\n Wrapper to run the entire simulation (when not running in ABIDES-Gym mode).\n\n 3 Steps:\n - Simulation Instantiation\n - Simulation Run\n - Simulation Termination\n\n Returns:\n An object that contains all the objects at the end of the simulation.\n ' self.initialize() self.runner() return self.terminate() def initialize(self) -> None: '\n Instantiation of the simulation:\n - Creation of the different object of the simulation.\n - Instantiation of the latency network\n - Calls on the kernel_initializing and KernelStarting of the different agents\n ' logger.debug('Kernel started') logger.debug('Simulation started!') logger.debug('--- Agent.kernel_initializing() ---') for agent in self.agents: agent.kernel_initializing(self) logger.debug('--- Agent.kernel_starting() ---') for agent in self.agents: agent.kernel_starting(self.start_time) self.current_time = self.start_time logger.debug('--- Kernel Clock started ---') logger.debug('Kernel.current_time is now {}'.format(fmt_ts(self.current_time))) logger.debug('--- Kernel Event Queue begins ---') logger.debug('Kernel will start processing messages. Queue length: {}'.format(len(self.messages.queue))) self.event_queue_wall_clock_start = datetime.now() self.ttl_messages = 0 def runner(self, agent_actions: Optional[Tuple[(Agent, List[Dict[(str, Any)]])]]=None) -> Dict[(str, Any)]: '\n Start the simulation and processing of the message queue.\n Possibility to add the optional argument agent_actions. It is a list of dictionaries corresponding\n to actions to be performed by the experimental agent (Gym Agent).\n\n Arguments:\n agent_actions: A list of the different actions to be performed represented in a dictionary per action.\n\n Returns:\n - it is a dictionnary composed of two elements:\n - "done": boolean True if the simulation is done, else False. It is true when simulation reaches end_time or when the message queue is empty.\n - "results": it is the raw_state returned by the gym experimental agent, contains data that will be formated in the gym environement to formulate state, reward, info etc.. If\n there is no gym experimental agent, then it is None.\n ' if (agent_actions is not None): (exp_agent, action_list) = agent_actions exp_agent.apply_actions(action_list) while ((not self.messages.empty()) and self.current_time and (self.current_time <= self.stop_time)): (self.current_time, event) = self.messages.get() assert (self.current_time is not None) (sender_id, recipient_id, message) = event if ((self.ttl_messages % 100000) == 0): logger.info('--- Simulation time: {}, messages processed: {:,}, wallclock elapsed: {:.2f}s ---'.format(fmt_ts(self.current_time), self.ttl_messages, (datetime.now() - self.event_queue_wall_clock_start).total_seconds())) if self.show_trace_messages: logger.debug('--- Kernel Event Queue pop ---') logger.debug('Kernel handling {} message for agent {} at time {}'.format(message.type(), recipient_id, self.current_time)) self.ttl_messages += 1 self.current_agent_additional_delay = 0 if isinstance(message, WakeupMsg): if (self.agent_current_times[recipient_id] > self.current_time): self.messages.put((self.agent_current_times[recipient_id], (sender_id, recipient_id, message))) if self.show_trace_messages: logger.debug('After wakeup return, agent {} delayed from {} to {}'.format(recipient_id, fmt_ts(self.current_time), fmt_ts(self.agent_current_times[recipient_id]))) continue self.agent_current_times[recipient_id] = self.current_time wakeup_result = self.agents[recipient_id].wakeup(self.current_time) self.agent_current_times[recipient_id] += (self.agent_computation_delays[recipient_id] + self.current_agent_additional_delay) if self.show_trace_messages: logger.debug('After wakeup return, agent {} delayed from {} to {}'.format(recipient_id, fmt_ts(self.current_time), fmt_ts(self.agent_current_times[recipient_id]))) if (wakeup_result != None): return {'done': False, 'result': wakeup_result} else: if (self.agent_current_times[recipient_id] > self.current_time): self.messages.put((self.agent_current_times[recipient_id], (sender_id, recipient_id, message))) if self.show_trace_messages: logger.debug('Agent in future: message requeued for {}'.format(fmt_ts(self.agent_current_times[recipient_id]))) continue self.agent_current_times[recipient_id] = self.current_time if isinstance(message, MessageBatch): messages = message.messages else: messages = [message] for message in messages: self.agent_current_times[recipient_id] += (self.agent_computation_delays[recipient_id] + self.current_agent_additional_delay) if self.show_trace_messages: logger.debug('After receive_message return, agent {} delayed from {} to {}'.format(recipient_id, fmt_ts(self.current_time), fmt_ts(self.agent_current_times[recipient_id]))) self.agents[recipient_id].receive_message(self.current_time, sender_id, message) if self.messages.empty(): logger.debug('--- Kernel Event Queue empty ---') if (self.current_time and (self.current_time > self.stop_time)): logger.debug('--- Kernel Stop Time surpassed ---') if (len(self.gym_agents) > 0): self.gym_agents[0].update_raw_state() return {'done': True, 'result': self.gym_agents[0].get_raw_state()} else: return {'done': True, 'result': None} def terminate(self) -> Dict[(str, Any)]: '\n Termination of the simulation. Called once the queue is empty, or the gym environement is done, or the simulation\n reached kernel stop time:\n - Calls the kernel_stopping of the agents\n - Calls the kernel_terminating of the agents\n\n Returns:\n custom_state: it is an object that contains everything in the simulation. In particular it is useful to retrieve agents and/or logs after the simulation to proceed to analysis.\n ' event_queue_wall_clock_stop = datetime.now() event_queue_wall_clock_elapsed = (event_queue_wall_clock_stop - self.event_queue_wall_clock_start) logger.debug('--- Agent.kernel_stopping() ---') for agent in self.agents: agent.kernel_stopping() logger.debug('\n--- Agent.kernel_terminating() ---') for agent in self.agents: agent.kernel_terminating() logger.info('Event Queue elapsed: {}, messages: {:,}, messages per second: {:0.1f}'.format(event_queue_wall_clock_elapsed, self.ttl_messages, (self.ttl_messages / event_queue_wall_clock_elapsed.total_seconds()))) self.custom_state['kernel_event_queue_elapsed_wallclock'] = event_queue_wall_clock_elapsed self.custom_state['kernel_slowest_agent_finish_time'] = max(self.agent_current_times) self.custom_state['agents'] = self.agents self.write_summary_log() logger.info('Mean ending value by agent type:') for a in self.mean_result_by_agent_type: value = self.mean_result_by_agent_type[a] count = self.agent_count_by_type[a] logger.info(f'{a}: {int(round((value / count))):d}') logger.info('Simulation ending!') return self.custom_state def reset(self) -> None: '\n Used in the gym core environment:\n - First calls termination of the kernel, to close previous simulation\n - Then initializes a new simulation\n - Then runs the simulation (not specifying any action this time).\n ' if self.has_run: self.terminate() self.initialize() self.runner() def send_message(self, sender_id: int, recipient_id: int, message: Message, delay: int=0) -> None: '\n Called by an agent to send a message to another agent.\n\n The kernel supplies its own current_time (i.e. "now") to prevent possible abuse\n by agents. The kernel will handle computational delay penalties and/or network\n latency.\n\n Arguments:\n sender_id: ID of the agent sending the message.\n recipient_id: ID of the agent receiving the message.\n message: The ``Message`` class instance to send.\n delay: Represents an agent\'s request for ADDITIONAL delay (beyond the\n Kernel\'s mandatory computation + latency delays). Represents parallel\n pipeline processing delays (that should delay the transmission of\n messages but do not make the agent "busy" and unable to respond to new\n messages)\n ' sent_time = (((self.current_time + self.agent_computation_delays[sender_id]) + self.current_agent_additional_delay) + delay) if (self.agent_latency_model is not None): latency: float = self.agent_latency_model.get_latency(sender_id=sender_id, recipient_id=recipient_id) deliver_at = (sent_time + int(latency)) if self.show_trace_messages: logger.debug('Kernel applied latency {}, accumulated delay {}, one-time delay {} on send_message from: {} to {}, scheduled for {}'.format(latency, self.current_agent_additional_delay, delay, self.agents[sender_id].name, self.agents[recipient_id].name, fmt_ts(deliver_at))) else: latency = self.agent_latency[sender_id][recipient_id] noise = self.random_state.choice(len(self.latency_noise), p=self.latency_noise) deliver_at = (sent_time + int((latency + noise))) if self.show_trace_messages: logger.debug('Kernel applied latency {}, noise {}, accumulated delay {}, one-time delay {} on send_message from: {} to {}, scheduled for {}'.format(latency, noise, self.current_agent_additional_delay, delay, self.agents[sender_id].name, self.agents[recipient_id].name, fmt_ts(deliver_at))) self.messages.put((deliver_at, (sender_id, recipient_id, message))) if self.show_trace_messages: logger.debug('Sent time: {}, current time {}, computation delay {}'.format(sent_time, fmt_ts(self.current_time), self.agent_computation_delays[sender_id])) logger.debug('Message queued: {}'.format(message)) def set_wakeup(self, sender_id: int, requested_time: Optional[NanosecondTime]=None) -> None: '\n Called by an agent to receive a "wakeup call" from the kernel at some requested\n future time.\n\n NOTE: The agent is responsible for maintaining any required state; the kernel\n will not supply any parameters to the ``wakeup()`` call.\n\n Arguments:\n sender_id: The ID of the agent making the call.\n requested_time: Defaults to the next possible timestamp. Wakeup time cannot\n be the current time or a past time.\n ' if (requested_time is None): requested_time = (self.current_time + 1) if (self.current_time and (requested_time < self.current_time)): raise ValueError('set_wakeup() called with requested time not in future', 'current_time:', self.current_time, 'requested_time:', requested_time) if self.show_trace_messages: logger.debug('Kernel adding wakeup for agent {} at time {}'.format(sender_id, fmt_ts(requested_time))) self.messages.put((requested_time, (sender_id, sender_id, WakeupMsg()))) def get_agent_compute_delay(self, sender_id: int) -> int: '\n Allows an agent to query its current computation delay.\n\n Arguments:\n sender_id: The ID of the agent to get the computational delay for.\n ' return self.agent_computation_delays[sender_id] def set_agent_compute_delay(self, sender_id: int, requested_delay: int) -> None: '\n Called by an agent to update its computation delay.\n\n This does not initiate a global delay, nor an immediate delay for the agent.\n Rather it sets the new default delay for the calling agent. The delay will be\n applied upon every return from wakeup or recvMsg. Note that this delay IS\n applied to any messages sent by the agent during the current wake cycle\n (simulating the messages popping out at the end of its "thinking" time).\n\n Also note that we DO permit a computation delay of zero, but this should really\n only be used for special or massively parallel agents.\n\n Arguments:\n sender_id: The ID of the agent making the call.\n requested_delay: delay given in nanoseconds.\n ' if (not isinstance(requested_delay, int)): raise ValueError('Requested computation delay must be whole nanoseconds.', 'requested_delay:', requested_delay) if (requested_delay < 0): raise ValueError('Requested computation delay must be non-negative nanoseconds.', 'requested_delay:', requested_delay) self.agent_computation_delays[sender_id] = requested_delay def delay_agent(self, sender_id: int, additional_delay: int) -> None: "\n Called by an agent to accumulate temporary delay for the current wake cycle.\n\n This will apply the total delay (at time of send_message) to each message, and\n will modify the agent's next available time slot. These happen on top of the\n agent's compute delay BUT DO NOT ALTER IT. (i.e. effects are transient). Mostly\n useful for staggering outbound messages.\n\n Arguments:\n sender_id: The ID of the agent making the call.\n additional_delay: additional delay given in nanoseconds.\n " if (not isinstance(additional_delay, int)): raise ValueError('Additional delay must be whole nanoseconds.', 'additional_delay:', additional_delay) if (additional_delay < 0): raise ValueError('Additional delay must be non-negative nanoseconds.', 'additional_delay:', additional_delay) self.current_agent_additional_delay += additional_delay def find_agents_by_type(self, agent_type: Type[Agent]) -> List[int]: '\n Returns the IDs of any agents that are of the given type.\n\n Arguments:\n type: The agent type to search for.\n\n Returns:\n A list of agent IDs that are instances of the type.\n ' return [agent.id for agent in self.agents if isinstance(agent, agent_type)] def write_log(self, sender_id: int, df_log: pd.DataFrame, filename: Optional[str]=None) -> None: '\n Called by any agent, usually at the very end of the simulation just before\n kernel shutdown, to write to disk any log dataframe it has been accumulating\n during simulation.\n\n The format can be decided by the agent, although changes will require a special\n tool to read and parse the logs. The Kernel places the log in a unique\n directory per run, with one filename per agent, also decided by the Kernel using\n agent type, id, etc.\n\n If there are too many agents, placing all these files in a directory might be\n unfortunate. Also if there are too many agents, or if the logs are too large,\n memory could become an issue. In this case, we might have to take a speed hit to\n write logs incrementally.\n\n If filename is not None, it will be used as the filename. Otherwise, the Kernel\n will construct a filename based on the name of the Agent requesting log archival.\n\n Arguments:\n sender_id: The ID of the agent making the call.\n df_log: dataframe representation of the log that contains all the events logged during the simulation.\n filename: Location on disk to write the log to.\n ' if self.skip_log: return path = os.path.join('.', 'log', self.log_dir) if filename: file = '{}.bz2'.format(filename) else: file = '{}.bz2'.format(self.agents[sender_id].name.replace(' ', '')) if (not os.path.exists(path)): os.makedirs(path) df_log.to_pickle(os.path.join(path, file), compression='bz2') def append_summary_log(self, sender_id: int, event_type: str, event: Any) -> None: "\n We don't even include a timestamp, because this log is for one-time-only summary\n reporting, like starting cash, or ending cash.\n\n Arguments:\n sender_id: The ID of the agent making the call.\n event_type: The type of the event.\n event: The event to append to the log.\n " self.summary_log.append({'AgentID': sender_id, 'AgentStrategy': self.agents[sender_id].type, 'EventType': event_type, 'Event': event}) def write_summary_log(self) -> None: path = os.path.join('.', 'log', self.log_dir) file = 'summary_log.bz2' if (not os.path.exists(path)): os.makedirs(path) df_log = pd.DataFrame(self.summary_log) df_log.to_pickle(os.path.join(path, file), compression='bz2') def update_agent_state(self, agent_id: int, state: Any) -> None: '\n Called by an agent that wishes to replace its custom state in the dictionary the\n Kernel will return at the end of simulation. Shared state must be set directly,\n and agents should coordinate that non-destructively.\n\n Note that it is never necessary to use this kernel state dictionary for an agent\n to remember information about itself, only to report it back to the config file.\n\n Arguments:\n agent_id: The agent to update state for.\n state: The new state.\n ' if ('agent_state' not in self.custom_state): self.custom_state['agent_state'] = {} self.custom_state['agent_state'][agent_id] = state
class LatencyModel(): "\n LatencyModel provides a latency model for messages in the ABIDES simulation. The\n default is a cubic model as described herein.\n\n Arguments:\n random_state: An initialized ``np.random.RandomState`` object.\n min_latency: A 2-D numpy array of pairwise minimum latency. Integer nanoseconds.\n latency_model: Either 'cubic' or 'deterministic'.\n connected: Must be either scalar True or a 2-D numpy array. A False array entry\n prohibits communication regardless of values in other parameters.\n jitter: Requires a scalar, a 1-D numpy vector, or a 2-D numpy array. Controls\n shape of cubic curve for per-message additive latency noise. This is the 'a'\n parameter in the cubic equation above. Float in range [0,1].\n jitter_clip: Requires a scalar, a 1-D numpy vector, or a 2-D numpy array.\n Controls the minimum value of the uniform range from which 'x' is selected\n when applying per-message noise. Higher values create a LOWER maximum value\n for latency noise (clipping the cubic curve). Parameter is exclusive, 'x' is\n drawn from (jitter_clip,1]. Float in range [0,1].\n jitter_unit: Requires a scalar, a 1-D numpy vector, or a 2-D numpy array. This\n is the fraction of min_latency that will be considered the unit of\n measurement for jitter. For example, if this parameter is 10, an agent pair\n with min_latency of 333ns will have a 33.3ns unit of measurement for jitter,\n and an agent pair with min_latency of 13ms will have a 1.3ms unit of\n measurement for jitter. Assuming 'jitter' = 0.5 and 'jitter_clip' = 0, the\n first agent pair will have 50th percentile (median) jitter of 133.3ns and\n 90th percentile jitter of 16.65us, and the second agent pair will have 50th\n percentile (median) jitter of 5.2ms and 90th percentile jitter of 650ms.\n\n All values except min_latency may be specified as a single scalar for simplicity,\n and have defaults to allow ease of use as:\n\n ``latency = LatencyModel('cubic', min_latency = some_array)``\n\n All values may be specified with directional pairwise granularity to permit quite\n complex network models, varying quality of service, or asymmetric capabilities when\n these are necessary.\n\n **Cubic Model:**\n\n Using the 'cubic' model, the final latency for a message is computed as:\n ``min_latency + (a / (x^3))``, where 'x' is randomly drawn from a uniform\n distribution ``(jitter_clip,1]``, and 'a' is the jitter parameter defined below.\n\n The 'cubic' model requires five parameters (there are defaults for four). Scalar\n values apply to all messages between all agents. Numpy array parameters are all\n indexed by simulation agent_id. Vector arrays (1-D) are indexed to the sending\n agent. For 2-D arrays of directional pairwise values, row index is the sending agent\n and column index is the receiving agent. These do not have to be symmetric.\n\n Selection within the range is from a cubic distribution, so extreme high values will be\n quite rare. The table below shows example values based on the jitter parameter a (column\n header) and x drawn from a uniform distribution from [0,1] (row header).::\n\n x \\ a 0.001 0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 1.00\n 0.001 1M 100M 200M 300M 400M 500M 600M 700M 800M 900M 1B\n 0.01 1K 100K 200K 300K 400K 500K 600K 700K 800K 900K 1M\n 0.05 8.00 800.00 1.6K 2.4K 3.2K 4.0K 4.8K 5.6K 6.4K 7.2K 8.0K\n 0.10 1.00 100.00 200.00 300.00 400.00 500.00 600.00 700.00 800.00 900.00 1,000.00\n 0.20 0.13 12.50 25.00 37.50 50.00 62.50 75.00 87.50 100.00 112.50 125.00\n 0.30 0.04 3.70 7.41 11.11 14.81 18.52 22.22 25.93 29.63 33.33 37.04\n 0.40 0.02 1.56 3.13 4.69 6.25 7.81 9.38 10.94 12.50 14.06 15.63\n 0.50 0.01 0.80 1.60 2.40 3.20 4.00 4.80 5.60 6.40 7.20 8.00\n 0.60 0.00 0.46 0.93 1.39 1.85 2.31 2.78 3.24 3.70 4.17 4.63\n 0.70 0.00 0.29 0.58 0.87 1.17 1.46 1.75 2.04 2.33 2.62 2.92\n 0.80 0.00 0.20 0.39 0.59 0.78 0.98 1.17 1.37 1.56 1.76 1.95\n 0.90 0.00 0.14 0.27 0.41 0.55 0.69 0.82 0.96 1.10 1.23 1.37\n 0.95 0.00 0.12 0.23 0.35 0.47 0.58 0.70 0.82 0.93 1.05 1.17\n 0.99 0.00 0.10 0.21 0.31 0.41 0.52 0.62 0.72 0.82 0.93 1.03\n 1.00 0.00 0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 1.00\n " def __init__(self, random_state: np.random.RandomState, min_latency: np.ndarray, latency_model: str='cubic', connected: bool=True, jitter: float=0.5, jitter_clip: float=0.1, jitter_unit: float=10.0) -> None: self.latency_model: str = latency_model.lower() self.random_state: np.random.RandomState = random_state self.min_latency: np.ndarray = min_latency if (self.latency_model not in ['cubic', 'deterministic']): raise Exception(f'Config error: unknown latency model requested ({self.latency_model})') if (self.latency_model == 'cubic'): self.connected = connected self.jitter = jitter self.jitter_clip = jitter_clip self.jitter_unit = jitter_unit def get_latency(self, sender_id: int, recipient_id: int) -> float: 'LatencyModel.get_latency() samples and returns the final latency for a single\n Message according to the model specified during initialization.\n\n Arguments:\n sender_id: Simulation agent_id for the agent sending the message.\n recipient_id: Simulation agent_id for the agent receiving the message.\n ' min_latency = self._extract(self.min_latency, sender_id, recipient_id) if (self.latency_model == 'cubic'): if (not self._extract(self.connected, sender_id, recipient_id)): return (- 1) a = self._extract(self.jitter, sender_id, recipient_id) clip = self._extract(self.jitter_clip, sender_id, recipient_id) unit = self._extract(self.jitter_unit, sender_id, recipient_id) x = self.random_state.uniform(low=clip, high=1.0) latency = (min_latency + ((a / (x ** 3)) * (min_latency / unit))) return latency else: return min_latency def _extract(self, param: Union[(float, np.ndarray)], sid: int, rid: int): 'Internal function to extract correct values for a sender->recipient\n pair from parameters that can be specified as scalar, 1-D ndarray, or 2-D ndarray.\n\n Arguments:\n param: The parameter (not parameter name) from which to extract a value.\n sid: The simulation sender_id agent id.\n rid: The simulation recipient agent id.\n ' if np.isscalar(param): return param if isinstance(param, np.ndarray): if (param.ndim == 1): return param[sid] elif (param.ndim == 2): return param[(sid, rid)] raise Exception('Config error: LatencyModel parameter is not scalar, 1-D ndarray, or 2-D ndarray.')
@dataclass class Message(): 'The base Message class no longer holds envelope/header information, however any\n desired information can be placed in the arbitrary body.\n\n Delivery metadata is now handled outside the message itself.\n\n The body may be overridden by specific message type subclasses.\n ' __message_id_counter: ClassVar[int] = 1 message_id: int = field(init=False) def __post_init__(self): self.message_id: int = Message.__message_id_counter Message.__message_id_counter += 1 def __lt__(self, other: 'Message') -> bool: return (self.message_id < other.message_id) def type(self) -> str: return self.__class__.__name__
@dataclass class MessageBatch(Message): '\n Helper used for batching multiple messages being sent by the same sender to the same\n destination together. If very large numbers of messages are being sent this way,\n using this class can help performance.\n ' messages: List[Message]
@dataclass class WakeupMsg(Message): '\n Empty message sent to agents when woken up.\n ' pass
def subdict(d: Dict[(str, Any)], keys: List[str]) -> Dict[(str, Any)]: '\n Returns a dictionnary with only the keys defined in the keys list\n Arguments:\n - d: original dictionnary\n - keys: list of keys to keep\n Returns:\n - dictionnary with only the subset of keys\n ' return {k: v for (k, v) in d.items() if (k in keys)}
def restrictdict(d: Dict[(str, Any)], keys: List[str]) -> Dict[(str, Any)]: '\n Returns a dictionnary with only the intersections of the keys defined in the keys list and the keys in the o\n Arguments:\n - d: original dictionnary\n - keys: list of keys to keep\n Returns:\n - dictionnary with only the subset of keys\n ' inter = [k for k in d.keys() if (k in keys)] return subdict(d, inter)
def custom_eq(a: Any, b: Any) -> bool: 'returns a==b or True if both a and b are null' return ((a == b) | ((a != a) & (b != b)))
def get_wake_time(open_time, close_time, a=0, b=1): '\n Draw a time U-quadratically distributed between open_time and close_time.\n\n For details on U-quadtratic distribution see https://en.wikipedia.org/wiki/U-quadratic_distribution.\n ' def cubic_pow(n: float) -> float: 'Helper function: returns *real* cube root of a float.' if (n < 0): return (- ((- n) ** (1.0 / 3.0))) else: return (n ** (1.0 / 3.0)) def u_quadratic_inverse_cdf(y): alpha = (12 / ((b - a) ** 3)) beta = ((b + a) / 2) result = (cubic_pow((((3 / alpha) * y) - ((beta - a) ** 3))) + beta) return result uniform_0_1 = np.random.rand() random_multiplier = u_quadratic_inverse_cdf(uniform_0_1) wake_time = (open_time + (random_multiplier * (close_time - open_time))) return wake_time
def fmt_ts(timestamp: NanosecondTime) -> str: '\n Converts a timestamp stored as nanoseconds into a human readable string.\n ' return pd.Timestamp(timestamp, unit='ns').strftime('%Y-%m-%d %H:%M:%S')
def str_to_ns(string: str) -> NanosecondTime: '\n Converts a human readable time-delta string into nanoseconds.\n\n Arguments:\n string: String to convert into nanoseconds. Uses Pandas to do this.\n\n Examples:\n - "1s" -> 1e9 ns\n - "1min" -> 6e10 ns\n - "00:00:30" -> 3e10 ns\n ' return pd.to_timedelta(string).to_timedelta64().astype(int)
def datetime_str_to_ns(string: str) -> NanosecondTime: '\n Takes a datetime written as a string and returns in nanosecond unix timestamp.\n\n Arguments:\n string: String to convert into nanoseconds. Uses Pandas to do this.\n ' return pd.Timestamp(string).value
def ns_date(ns_datetime: NanosecondTime) -> NanosecondTime: '\n Takes a datetime in nanoseconds unix timestamp and rounds it to that day at 00:00.\n\n Arguments:\n ns_datetime: Nanosecond time value to round.\n ' return (ns_datetime - (ns_datetime % ((24 * 3600) * int(1000000000.0))))
def parse_logs_df(end_state: dict) -> pd.DataFrame: '\n Takes the end_state dictionnary returned by an ABIDES simulation goes through all\n the agents, extracts their log, and un-nest them returns a single dataframe with the\n logs from all the agents warning: this is meant to be used for debugging and\n exploration.\n ' agents = end_state['agents'] dfs = [] for agent in agents: messages = [] for m in agent.log: m = {'EventTime': (m[0] if isinstance(m[0], (int, np.int64)) else 0), 'EventType': m[1], 'Event': m[2]} event = m.get('Event', None) if (event == None): event = {'EmptyEvent': True} elif (not isinstance(event, dict)): event = {'ScalarEventValue': event} else: pass try: del m['Event'] except: pass m.update(event) if (m.get('agent_id') == None): m['agent_id'] = agent.id m['agent_type'] = agent.type messages.append(m) dfs.append(pd.DataFrame(messages)) return pd.concat(dfs)
def input_sha_wrapper(func: Callable) -> Callable: '\n compute a sha for the function call by looking at function name and inputs for the call\n ' def inner(*args, **kvargs): argspec = inspect.getfullargspec(func) index_first_kv = (len(argspec.args) - (len(argspec.defaults) if (argspec.defaults != None) else 0)) if (len(argspec.args) > 0): total_kvargs = dict(((k, v) for (k, v) in zip(argspec.args[index_first_kv:], argspec.defaults))) else: total_kvargs = {} total_kvargs.update(kvargs) input_sha = ((func.__name__ + '_') + hashlib.sha1(str.encode(str((args, total_kvargs)))).hexdigest()) return {'input_sha': input_sha} return inner
def cache_wrapper(func: Callable, cache_dir='cache/', force_recompute=False) -> Callable: '\n local caching decorator\n checks the functional call sha is only there is specified directory\n ' def inner(*args, **kvargs): if (not os.path.isdir(cache_dir)): os.mkdir(cache_dir) sha_call = input_sha_wrapper(func)(*args, **kvargs) cache_path = ((cache_dir + sha_call['input_sha']) + '.pkl') if (os.path.isfile(cache_path) and (not force_recompute)): with open(cache_path, 'rb') as handle: result = pickle.load(handle) return result else: result = func(*args, **kvargs) with open(cache_path, 'wb') as handle: pickle.dump(result, handle) return result return inner
def test_constant_time_generator(): g = ConstantTimeGenerator(10) assert (g.next() == 10) assert (g.mean() == 10)
def test_poisson_time_generator(): g = PoissonTimeGenerator(np.random.RandomState(), lambda_freq=10) assert (g.mean() == 0.1) assert (abs(((np.mean([g.next() for _ in range(100000)]) / 1000000000.0) - 0.1)) < 0.001) g = PoissonTimeGenerator(np.random.RandomState(), lambda_time=10) assert (g.mean() == 10) assert (abs(((np.mean([g.next() for _ in range(100000)]) / 1000000000.0) - 10)) < 0.1)
def test_str_to_ns(): assert (str_to_ns('0') == 0) assert (str_to_ns('1') == 1) assert (str_to_ns('1us') == 1000.0) assert (str_to_ns('1ms') == 1000000.0) assert (str_to_ns('1s') == 1000000000.0) assert (str_to_ns('1sec') == 1000000000.0) assert (str_to_ns('1second') == 1000000000.0) assert (str_to_ns('1m') == (1000000000.0 * 60)) assert (str_to_ns('1min') == (1000000000.0 * 60)) assert (str_to_ns('1minute') == (1000000000.0 * 60)) assert (str_to_ns('1h') == ((1000000000.0 * 60) * 60)) assert (str_to_ns('1hr') == ((1000000000.0 * 60) * 60)) assert (str_to_ns('1hour') == ((1000000000.0 * 60) * 60)) assert (str_to_ns('1d') == (((1000000000.0 * 60) * 60) * 24)) assert (str_to_ns('1day') == (((1000000000.0 * 60) * 60) * 24)) assert (str_to_ns('00:00:00') == 0) assert (str_to_ns('00:00:01') == 1000000000.0) assert (str_to_ns('00:01:00') == (1000000000.0 * 60)) assert (str_to_ns('01:00:00') == ((1000000000.0 * 60) * 60))
class AbidesGymCoreEnv(gym.Env, ABC): '\n Abstract class for core gym to inherit from to create usable specific ABIDES Gyms\n ' def __init__(self, background_config_pair: Tuple[(Callable, Optional[Dict[(str, Any)]])], wakeup_interval_generator: InterArrivalTimeGenerator, state_buffer_length: int, first_interval: Optional[NanosecondTime]=None, gymAgentConstructor=None) -> None: self.background_config_pair: Tuple[(Callable, Optional[Dict[(str, Any)]])] = background_config_pair if (background_config_pair[1] is None): background_config_pair[1] = {} self.wakeup_interval_generator: InterArrivalTimeGenerator = wakeup_interval_generator self.first_interval = first_interval self.state_buffer_length: int = state_buffer_length self.gymAgentConstructor = gymAgentConstructor self.seed() self.state: Optional[np.ndarray] = None self.reward: Optional[float] = None self.done: Optional[bool] = None self.info: Optional[Dict[(str, Any)]] = None def reset(self): '\n Reset the state of the environment and returns an initial observation.\n\n Returns\n -------\n observation (object): the initial observation of the space.\n ' seed = self.np_random.randint(low=0, high=(2 ** 32), dtype='uint64') background_config_args = self.background_config_pair[1] background_config_args.update({'seed': seed, **self.extra_background_config_kvargs}) background_config_state = self.background_config_pair[0](**background_config_args) nextid = len(background_config_state['agents']) gym_agent = self.gymAgentConstructor(nextid, 'ABM', first_interval=self.first_interval, wakeup_interval_generator=self.wakeup_interval_generator, state_buffer_length=self.state_buffer_length, **self.extra_gym_agent_kvargs) config_state = config_add_agents(background_config_state, [gym_agent]) self.gym_agent = config_state['agents'][(- 1)] kernel = Kernel(random_state=np.random.RandomState(seed=seed), **subdict(config_state, ['start_time', 'stop_time', 'agents', 'agent_latency_model', 'default_computation_delay', 'custom_properties'])) kernel.initialize() raw_state = kernel.runner() state = self.raw_state_to_state(deepcopy(raw_state['result'])) self.kernel = kernel return state def step(self, action: int) -> Tuple[(np.ndarray, float, bool, Dict[(str, Any)])]: "\n The agent takes a step in the environment.\n\n Parameters\n ----------\n action : Discrete\n\n Returns\n -------\n observation, reward, done, info : tuple\n observation (object) :\n an environment-specific object representing your observation of\n the environment.\n\n reward (float) :\n amount of reward achieved by the previous action. The scale\n varies between environments, but the goal is always to increase\n your total reward.\n\n done (bool) :\n whether it's time to reset the environment again. Most (but not\n all) tasks are divided up into well-defined episodes, and done\n being True indicates the episode has terminated. (For example,\n perhaps the pole tipped too far, or you lost your last life.)\n\n info (dict) :\n diagnostic information useful for debugging. It can sometimes\n be useful for learning (for example, it might contain the raw\n probabilities behind the environment's last state change).\n However, official evaluations of your agent are not allowed to\n use this for learning.\n " assert self.action_space.contains(action), f'Action {action} is not contained in Action Space' abides_action = self._map_action_space_to_ABIDES_SIMULATOR_SPACE(action) raw_state = self.kernel.runner((self.gym_agent, abides_action)) self.state = self.raw_state_to_state(deepcopy(raw_state['result'])) assert self.observation_space.contains(self.state), f'INVALID STATE {self.state}' self.reward = self.raw_state_to_reward(deepcopy(raw_state['result'])) self.done = (raw_state['done'] or self.raw_state_to_done(deepcopy(raw_state['result']))) if self.done: self.reward += self.raw_state_to_update_reward(deepcopy(raw_state['result'])) self.info = self.raw_state_to_info(deepcopy(raw_state['result'])) return (self.state, self.reward, self.done, self.info) def render(self, mode: str='human') -> None: "Renders the environment.\n\n The set of supported modes varies per environment. (And some\n environments do not support rendering at all.) By convention,\n if mode is:\n\n - human: render to the current display or terminal and\n return nothing. Usually for human consumption.\n - rgb_array: Return an numpy.ndarray with shape (x, y, 3),\n representing RGB values for an x-by-y pixel image, suitable\n for turning into a video.\n - ansi: Return a string (str) or StringIO.StringIO containing a\n terminal-style text representation. The text can include newlines\n and ANSI escape sequences (e.g. for colors).\n\n Note:\n Make sure that your class's metadata 'render.modes' key includes\n the list of supported modes. It's recommended to call super()\n in implementations to use the functionality of this method.\n\n Args:\n mode (str): the mode to render with\n " print(self.state, self.reward, self.info) def seed(self, seed: Optional[int]=None) -> List[Any]: 'Sets the seed for this env\'s random number generator(s).\n\n Note:\n Some environments use multiple pseudorandom number generators.\n We want to capture all such seeds used in order to ensure that\n there aren\'t accidental correlations between multiple generators.\n\n Returns:\n list<bigint>: Returns the list of seeds used in this env\'s random\n number generators. The first value in the list should be the\n "main" seed, or the value which a reproducer should pass to\n \'seed\'. Often, the main seed equals the provided \'seed\', but\n this won\'t be true if seed=None, for example.\n ' (self.np_random, seed) = seeding.np_random(seed) return [seed] def close(self) -> None: 'Override close in your subclass to perform any necessary cleanup.\n Environments will automatically close() themselves when\n garbage collected or when the program exits.\n ' @abstractmethod def raw_state_to_state(self, raw_state: Dict[(str, Any)]) -> np.ndarray: '\n abstract method that transforms a raw state into a state representation\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - state: state representation defining the MDP\n ' raise NotImplementedError @abstractmethod def raw_state_to_reward(self, raw_state: Dict[(str, Any)]) -> float: '\n abstract method that transforms a raw state into the reward obtained during the step\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: immediate reward computed at each step\n ' raise NotImplementedError @abstractmethod def raw_state_to_done(self, raw_state: Dict[(str, Any)]) -> float: '\n abstract method that transforms a raw state into the flag if an episode is done\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - done: flag that describes if the episode is terminated or not\n ' raise NotImplementedError @abstractmethod def raw_state_to_update_reward(self, raw_state: Dict[(str, Any)]) -> bool: '\n abstract method that transforms a raw state into the final step reward update (if needed)\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: update reward computed at the end of the episode\n ' raise NotImplementedError @abstractmethod def raw_state_to_info(self, raw_state: Dict[(str, Any)]) -> Dict[(str, Any)]: '\n abstract method that transforms a raw state into an info dictionnary\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: info dictionnary computed at each step\n ' raise NotImplementedError
class SubGymMarketsDailyInvestorEnv_v0(AbidesGymMarketsEnv): '\n Daily Investor V0 environnement. It defines one of the ABIDES-Gym-markets environnement.\n This environment presents an example of the classic problem where an investor tries to make money buying and selling a stock through-out a single day.\n The investor starts the day with cash but no position then repeatedly buy and sell the stock in order to maximize its\n marked to market value at the end of the day (i.e. cash plus holdingsvalued at the market price).\n\n Arguments:\n - background_config: the handcrafted agents configuration used for the environnement\n - mkt_close: time the market day ends\n - timestep_duration: how long between 2 wakes up of the gym experimental agent\n - starting_cash: cash of the agents at the beginning of the simulation\n - order_fixed_size: size of the order placed by the experimental gym agent\n - state_history_length: length of the raw state buffer\n - market_data_buffer_length: length of the market data buffer\n - first_interval: how long the simulation is run before the first wake up of the gym experimental agent\n - reward_mode: can use a dense of sparse reward formulation\n - done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value)\n - debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue)\n\n Execution V0:\n - Action Space:\n - MKT buy order_fixed_size\n - Hold\n - MKT sell order_fixed_size\n - State Space:\n - Holdings\n - Imbalance\n - Spread\n - DirectionFeature\n - padded_returns\n ' raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator raw_state_to_state_pre_process = markets_agent_utils.ignore_mkt_data_buffer_decorator def __init__(self, background_config: str='rmsc04', mkt_close: str='16:00:00', timestep_duration: str='60s', starting_cash: int=1000000, order_fixed_size: int=10, state_history_length: int=4, market_data_buffer_length: int=5, first_interval: str='00:05:00', reward_mode: str='dense', done_ratio: float=0.3, debug_mode: bool=False, background_config_extra_kvargs={}) -> None: self.background_config: Any = importlib.import_module('abides_markets.configs.{}'.format(background_config), package=None) self.mkt_close: NanosecondTime = str_to_ns(mkt_close) self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration) self.starting_cash: int = starting_cash self.order_fixed_size: int = order_fixed_size self.state_history_length: int = state_history_length self.market_data_buffer_length: int = market_data_buffer_length self.first_interval: NanosecondTime = str_to_ns(first_interval) self.reward_mode: str = reward_mode self.done_ratio: float = done_ratio self.debug_mode: bool = debug_mode self.down_done_condition: float = (self.done_ratio * starting_cash) assert (background_config in ['rmsc03', 'rmsc04', 'smc_01']), 'Select rmsc03, rmsc04 or smc_01 as config' assert ((self.first_interval <= str_to_ns('16:00:00')) & (self.first_interval >= str_to_ns('00:00:00'))), 'Select authorized FIRST_INTERVAL delay' assert ((self.mkt_close <= str_to_ns('16:00:00')) & (self.mkt_close >= str_to_ns('09:30:00'))), 'Select authorized market hours' assert (reward_mode in ['sparse', 'dense']), 'reward_mode needs to be dense or sparse' assert ((self.timestep_duration <= str_to_ns('06:30:00')) & (self.timestep_duration >= str_to_ns('00:00:00'))), 'Select authorized timestep_duration' assert ((type(self.starting_cash) == int) & (self.starting_cash >= 0)), 'Select positive integer value for starting_cash' assert ((type(self.order_fixed_size) == int) & (self.order_fixed_size >= 0)), 'Select positive integer value for order_fixed_size' assert ((type(self.state_history_length) == int) & (self.state_history_length >= 0)), 'Select positive integer value for order_fixed_size' assert ((type(self.market_data_buffer_length) == int) & (self.market_data_buffer_length >= 0)), 'Select positive integer value for order_fixed_size' assert (((type(self.done_ratio) == float) & (self.done_ratio >= 0)) & (self.done_ratio < 1)), 'Select positive float value for order_fixed_size between 0 and 1' assert (debug_mode in [True, False]), 'reward_mode needs to be True or False' background_config_args = {'end_time': self.mkt_close} background_config_args.update(background_config_extra_kvargs) super().__init__(background_config_pair=(self.background_config.build_config, background_config_args), wakeup_interval_generator=ConstantTimeGenerator(step_duration=self.timestep_duration), starting_cash=self.starting_cash, state_buffer_length=self.state_history_length, market_data_buffer_length=self.market_data_buffer_length, first_interval=self.first_interval) self.num_actions: int = 3 self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions) self.num_state_features: int = ((4 + self.state_history_length) - 1) self.state_highs: np.ndarray = np.array(([np.finfo(np.float32).max, 1.0, np.finfo(np.float32).max, np.finfo(np.float32).max] + ((self.state_history_length - 1) * [np.finfo(np.float32).max])), dtype=np.float32).reshape(self.num_state_features, 1) self.state_lows: np.ndarray = np.array(([np.finfo(np.float32).min, 0.0, np.finfo(np.float32).min, np.finfo(np.float32).min] + ((self.state_history_length - 1) * [np.finfo(np.float32).min])), dtype=np.float32).reshape(self.num_state_features, 1) self.observation_space: gym.Space = gym.spaces.Box(self.state_lows, self.state_highs, shape=(self.num_state_features, 1), dtype=np.float32) self.previous_marked_to_market = self.starting_cash def _map_action_space_to_ABIDES_SIMULATOR_SPACE(self, action: int) -> List[Dict[(str, Any)]]: "\n utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries)\n The action space ranges [0, 1, 2] where:\n - `0` MKT buy order_fixed_size\n - `1` Hold ( i.e. do nothing )\n - '2' MKT sell order_fixed_size\n\n Arguments:\n - action: integer representation of the different actions\n\n Returns:\n - action_list: list of the corresponding series of action mapped into abides env apis\n " if (action == 0): return [{'type': 'MKT', 'direction': 'BUY', 'size': self.order_fixed_size}] elif (action == 1): return [] elif (action == 2): return [{'type': 'MKT', 'direction': 'SELL', 'size': self.order_fixed_size}] else: raise ValueError(f'Action {action} is not part of the actions supported by the function.') @raw_state_to_state_pre_process def raw_state_to_state(self, raw_state: Dict[(str, Any)]) -> np.ndarray: '\n method that transforms a raw state into a state representation\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - state: state representation defining the MDP for the daily investor v0 environnement\n ' bids = raw_state['parsed_mkt_data']['bids'] asks = raw_state['parsed_mkt_data']['asks'] last_transactions = raw_state['parsed_mkt_data']['last_transaction'] holdings = raw_state['internal_data']['holdings'] imbalances = [markets_agent_utils.get_imbalance(b, a, depth=3) for (b, a) in zip(bids, asks)] mid_prices = [markets_agent_utils.get_mid_price(b, a, lt) for (b, a, lt) in zip(bids, asks, last_transactions)] returns = np.diff(mid_prices) padded_returns = np.zeros((self.state_history_length - 1)) padded_returns[(- len(returns)):] = (returns if (len(returns) > 0) else padded_returns) best_bids = [(bids[0][0] if (len(bids) > 0) else mid) for (bids, mid) in zip(bids, mid_prices)] best_asks = [(asks[0][0] if (len(asks) > 0) else mid) for (asks, mid) in zip(asks, mid_prices)] spreads = (np.array(best_asks) - np.array(best_bids)) direction_features = (np.array(mid_prices) - np.array(last_transactions)) computed_state = np.array(([holdings[(- 1)], imbalances[(- 1)], spreads[(- 1)], direction_features[(- 1)]] + padded_returns.tolist()), dtype=np.float32) return computed_state.reshape(self.num_state_features, 1) @raw_state_pre_process def raw_state_to_reward(self, raw_state: Dict[(str, Any)]) -> float: '\n method that transforms a raw state into the reward obtained during the step\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: immediate reward computed at each step for the daily investor v0 environnement\n ' if (self.reward_mode == 'dense'): holdings = raw_state['internal_data']['holdings'] cash = raw_state['internal_data']['cash'] last_transaction = raw_state['parsed_mkt_data']['last_transaction'] marked_to_market = (cash + (holdings * last_transaction)) reward = (marked_to_market - self.previous_marked_to_market) reward = (reward / self.order_fixed_size) num_ns_day = ((((16 - 9.5) * 60) * 60) * 1000000000.0) step_length = self.timestep_duration num_steps_per_episode = (num_ns_day / step_length) reward = (reward / num_steps_per_episode) self.previous_marked_to_market = marked_to_market return reward elif (self.reward_mode == 'sparse'): return 0 @raw_state_pre_process def raw_state_to_update_reward(self, raw_state: Dict[(str, Any)]) -> float: '\n method that transforms a raw state into the final step reward update (if needed)\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: update reward computed at the end of the episode for the daily investor v0 environnement\n ' if (self.reward_mode == 'dense'): return 0 elif (self.reward_mode == 'sparse'): holdings = raw_state['internal_data']['holdings'] cash = raw_state['internal_data']['cash'] last_transaction = raw_state['parsed_mkt_data']['last_transaction'] marked_to_market = (cash + (holdings * last_transaction)) reward = (marked_to_market - self.starting_cash) reward = (reward / self.order_fixed_size) num_ns_day = ((((16 - 9.5) * 60) * 60) * 1000000000.0) step_length = self.timestep_duration num_steps_per_episode = (num_ns_day / step_length) reward = (reward / num_steps_per_episode) return reward @raw_state_pre_process def raw_state_to_done(self, raw_state: Dict[(str, Any)]) -> bool: '\n method that transforms a raw state into the flag if an episode is done\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - done: flag that describes if the episode is terminated or not for the daily investor v0 environnement\n ' holdings = raw_state['internal_data']['holdings'] cash = raw_state['internal_data']['cash'] last_transaction = raw_state['parsed_mkt_data']['last_transaction'] marked_to_market = (cash + (holdings * last_transaction)) done = (marked_to_market <= self.down_done_condition) return done @raw_state_pre_process def raw_state_to_info(self, raw_state: Dict[(str, Any)]) -> Dict[(str, Any)]: '\n method that transforms a raw state into an info dictionnary\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: info dictionnary computed at each step for the daily investor v0 environnement\n ' last_transaction = raw_state['parsed_mkt_data']['last_transaction'] bids = raw_state['parsed_mkt_data']['bids'] best_bid = (bids[0][0] if (len(bids) > 0) else last_transaction) asks = raw_state['parsed_mkt_data']['asks'] best_ask = (asks[0][0] if (len(asks) > 0) else last_transaction) cash = raw_state['internal_data']['cash'] current_time = raw_state['internal_data']['current_time'] holdings = raw_state['internal_data']['holdings'] spread = (best_ask - best_bid) orderbook = {'asks': {'price': {}, 'volume': {}}, 'bids': {'price': {}, 'volume': {}}} for (book, book_name) in [(bids, 'bids'), (asks, 'asks')]: for level in [0, 1, 2]: (price, volume) = markets_agent_utils.get_val(bids, level) orderbook[book_name]['price'][level] = np.array([price]).reshape((- 1)) orderbook[book_name]['volume'][level] = np.array([volume]).reshape((- 1)) order_status = raw_state['internal_data']['order_status'] mkt_open = raw_state['internal_data']['mkt_open'] mkt_close = raw_state['internal_data']['mkt_close'] last_bid = markets_agent_utils.get_last_val(bids, last_transaction) last_ask = markets_agent_utils.get_last_val(asks, last_transaction) wide_spread = (last_ask - last_bid) ask_spread = (last_ask - best_ask) bid_spread = (best_bid - last_bid) marked_to_market = (cash + (holdings * last_transaction)) if (self.debug_mode == True): return {'last_transaction': last_transaction, 'best_bid': best_bid, 'best_ask': best_ask, 'spread': spread, 'bids': bids, 'asks': asks, 'cash': cash, 'current_time': current_time, 'holdings': holdings, 'orderbook': orderbook, 'order_status': order_status, 'mkt_open': mkt_open, 'mkt_close': mkt_close, 'last_bid': last_bid, 'last_ask': last_ask, 'wide_spread': wide_spread, 'ask_spread': ask_spread, 'bid_spread': bid_spread, 'marked_to_market': marked_to_market} else: return {}
class AbidesGymMarketsEnv(AbidesGymCoreEnv, ABC): '\n Abstract class for markets gym to inherit from to create usable specific ABIDES Gyms\n\n Arguments:\n - background_config_pair: tuple consisting in the background builder function and the inputs to use\n - wakeup_interval_generator: generator used to compute delta time wakeup for the gym experimental agent\n - starting_cash: cash of the agents at the beginning of the simulation\n - state_history_length: length of the raw state buffer\n - market_data_buffer_length: length of the market data buffer\n - first_interval: how long the simulation is run before the first wake up of the gym experimental agent\n - raw_state_pre_process: decorator used to pre-process raw_state\n\n ' raw_state_pre_process = markets_agent_utils.identity_decorator def __init__(self, background_config_pair: Tuple[(Callable, Optional[Dict[(str, Any)]])], wakeup_interval_generator: InterArrivalTimeGenerator, starting_cash: int, state_buffer_length: int, market_data_buffer_length: int, first_interval: Optional[NanosecondTime]=None, raw_state_pre_process=markets_agent_utils.identity_decorator) -> None: super().__init__(background_config_pair, wakeup_interval_generator, state_buffer_length, first_interval=first_interval, gymAgentConstructor=FinancialGymAgent) self.starting_cash: int = starting_cash self.market_data_buffer_length: int = market_data_buffer_length self.extra_gym_agent_kvargs = {'starting_cash': self.starting_cash, 'market_data_buffer_length': self.market_data_buffer_length} self.extra_background_config_kvargs = {'exchange_log_orders': False, 'book_logging': False, 'log_orders': None}
class MyCallbacks(DefaultCallbacks): '\n Class that defines callbacks for the execution environment\n ' def on_episode_start(self, *, worker: RolloutWorker, base_env: BaseEnv, policies: Dict[(str, Policy)], episode: MultiAgentEpisode, env_index: int, **kwargs): 'Callback run on the rollout worker before each episode starts.\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n base_env (BaseEnv): BaseEnv running the episode. The underlying\n env object can be gotten by calling base_env.get_unwrapped().\n policies (dict): Mapping of policy id to policy objects. In single\n agent mode there will only be a single "default" policy.\n episode (MultiAgentEpisode): Episode object which contains episode\n state. You can use the `episode.user_data` dict to store\n temporary data, and `episode.custom_metrics` to store custom\n metrics for the episode.\n env_index (EnvID): Obsoleted: The ID of the environment, which the\n episode belongs to.\n kwargs: Forward compatibility placeholder.\n ' assert (episode.length == 0), 'ERROR: `on_episode_start()` callback should be called right after env reset!' episode.user_data = defaultdict(default_factory=list) def on_episode_step(self, *, worker: RolloutWorker, base_env: BaseEnv, episode: MultiAgentEpisode, env_index: int, **kwargs): 'Runs on each episode step.\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n base_env (BaseEnv): BaseEnv running the episode. The underlying\n env object can be gotten by calling base_env.get_unwrapped().\n policies (Optional[Dict[PolicyID, Policy]]): Mapping of policy id\n to policy objects. In single agent mode there will only be a\n single "default_policy".\n episode (MultiAgentEpisode): Episode object which contains episode\n state. You can use the `episode.user_data` dict to store\n temporary data, and `episode.custom_metrics` to store custom\n metrics for the episode.\n env_index (EnvID): Obsoleted: The ID of the environment, which the\n episode belongs to.\n kwargs: Forward compatibility placeholder.\n ' assert (episode.length > 0), 'ERROR: `on_episode_step()` callback should not be called right after env reset!' agent0_info = episode._agent_to_last_info['agent0'] for (k, v) in agent0_info.items(): episode.user_data[k].append(v) def on_episode_end(self, *, worker: RolloutWorker, base_env: BaseEnv, policies: Dict[(str, Policy)], episode: MultiAgentEpisode, env_index: int, **kwargs): 'Runs when an episode is done.\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n base_env (BaseEnv): BaseEnv running the episode. The underlying\n env object can be gotten by calling base_env.get_unwrapped().\n policies (Dict[PolicyID, Policy]): Mapping of policy id to policy\n objects. In single agent mode there will only be a single\n "default_policy".\n episode (MultiAgentEpisode): Episode object which contains episode\n state. You can use the `episode.user_data` dict to store\n temporary data, and `episode.custom_metrics` to store custom\n metrics for the episode.\n env_index (EnvID): Obsoleted: The ID of the environment, which the\n episode belongs to.\n kwargs: Forward compatibility placeholder.\n ' for metrics in ['slippage_reward', 'late_penalty_reward', 'executed_quantity', 'remaining_quantity']: episode.custom_metrics[metrics] = np.sum(episode.user_data[metrics]) i = None milestone_index = (- 1) action_counter = episode.user_data['action_counter'][milestone_index] tot_actions = 0 for (key, val) in action_counter.items(): tot_actions += val for (key, val) in action_counter.items(): episode.custom_metrics[f'pct_action_counter_{key}_{i}'] = (val / tot_actions) metrics = ['holdings_pct', 'time_pct', 'diff_pct', 'imbalance_all', 'imbalance_5', 'price_impact', 'spread', 'direction_feature'] for metric in metrics: episode.custom_metrics[f'{metric}_{i}'] = episode.user_data[metric][milestone_index] num_max_steps_per_episode = episode.user_data['num_max_steps_per_episode'][(- 1)] num_milestone = 4 len_milestone = (num_max_steps_per_episode / num_milestone) for i in range((num_milestone + 1)): milestone_index = int((i * len_milestone)) if (milestone_index >= len(episode.user_data['action_counter'])): break action_counter = episode.user_data['action_counter'][milestone_index] tot_actions = 0 for (key, val) in action_counter.items(): tot_actions += val for (key, val) in action_counter.items(): episode.custom_metrics[f'pct_action_counter_{key}_{i}'] = (val / tot_actions) for metric in metrics: episode.custom_metrics[f'{metric}_{i}'] = episode.user_data[metric][milestone_index] def on_sample_end(self, *, worker: RolloutWorker, samples: SampleBatch, **kwargs): 'Called at the end of RolloutWorker.sample().\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n samples (SampleBatch): Batch to be returned. You can mutate this\n object to modify the samples generated.\n kwargs: Forward compatibility placeholder.\n ' pass def on_train_result(self, *, trainer, result: dict, **kwargs): 'Called at the end of Trainable.train().\n\n Args:\n trainer (Trainer): Current trainer instance.\n result (dict): Dict of results returned from trainer.train() call.\n You can mutate this object to add additional metrics.\n kwargs: Forward compatibility placeholder.\n ' pass def on_learn_on_batch(self, *, policy: Policy, train_batch: SampleBatch, result: dict, **kwargs) -> None: 'Called at the beginning of Policy.learn_on_batch().\n\n Note: This is called before 0-padding via\n `pad_batch_to_sequences_of_same_size`.\n\n Args:\n policy (Policy): Reference to the current Policy object.\n train_batch (SampleBatch): SampleBatch to be trained on. You can\n mutate this object to modify the samples generated.\n result (dict): A results dict to add custom metrics to.\n kwargs: Forward compatibility placeholder.\n ' pass def on_postprocess_trajectory(self, *, worker: RolloutWorker, episode: MultiAgentEpisode, agent_id: str, policy_id: str, policies: Dict[(str, Policy)], postprocessed_batch: SampleBatch, original_batches: Dict[(str, SampleBatch)], **kwargs): 'Called immediately after a policy\'s postprocess_fn is called.\n\n You can use this callback to do additional postprocessing for a policy,\n including looking at the trajectory data of other agents in multi-agent\n settings.\n\n Args:\n worker (RolloutWorker): Reference to the current rollout worker.\n episode (MultiAgentEpisode): Episode object.\n agent_id (str): Id of the current agent.\n policy_id (str): Id of the current policy for the agent.\n policies (dict): Mapping of policy id to policy objects. In single\n agent mode there will only be a single "default_policy".\n postprocessed_batch (SampleBatch): The postprocessed sample batch\n for this agent. You can mutate this object to apply your own\n trajectory postprocessing.\n original_batches (dict): Mapping of agents to their unpostprocessed\n trajectory data. You should not mutate this object.\n kwargs: Forward compatibility placeholder.\n ' if ('num_batches' not in episode.custom_metrics): episode.custom_metrics['num_batches'] = 0 episode.custom_metrics['num_batches'] += 1
class SubGymMarketsExecutionEnv_v0(AbidesGymMarketsEnv): '\n Execution V0 environnement. It defines one of the ABIDES-Gym-markets environnement.\n This environment presents an example of the algorithmic orderexecution problem.\n The agent has either an initial inventory of the stocks it tries to trade out of or no initial inventory and\n tries to acquire a target number of shares. The goal is to realize thistask while minimizing transaction cost from spreads\n and marketimpact. It does so by splitting the parent order into several smallerchild orders.\n\n Arguments:\n - background_config: the handcrafted agents configuration used for the environnement\n - mkt_close: time the market day ends\n - timestep_duration: how long between 2 wakes up of the gym experimental agent\n - starting_cash: cash of the agents at the beginning of the simulation\n - order_fixed_size: size of the order placed by the experimental gym agent\n - state_history_length: length of the raw state buffer\n - market_data_buffer_length: length of the market data buffer\n - first_interval: how long the simulation is run before the first wake up of the gym experimental agent\n - parent_order_size: Total size the agent has to execute (eitherbuy or sell).\n - execution_window: Time length the agent is given to proceed with 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟𝑆𝑖𝑧𝑒execution.\n - direction: direction of the 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟 (buy or sell)\n - not_enough_reward_update: it is a constant penalty per non-executed share atthe end of the𝑡𝑖𝑚𝑒𝑊𝑖𝑛𝑑𝑜𝑤\n - just_quantity_reward_update: update reward if all order is completed\n - reward_mode: can use a dense of sparse reward formulation\n - done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value)\n - debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue)\n - background_config_extra_kvargs: dictionary of extra key value arguments passed to the background config builder function\n\n Daily Investor V0:\n - Action Space:\n - MKT order_fixed_size\n - LMT order_fixed_size\n - Hold\n - State Space:\n - holdings_pct\n - time_pct\n - diff_pct\n - imbalance_all\n - imbalance_5\n - price_impact\n - spread\n - direction\n - returns\n ' raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator raw_state_to_state_pre_process = markets_agent_utils.ignore_mkt_data_buffer_decorator @dataclass class CustomMetricsTracker(ABC): '\n Data Class used to track custom metrics that are output to rllib\n ' slippage_reward: float = 0 late_penalty_reward: float = 0 executed_quantity: int = 0 remaining_quantity: int = 0 action_counter: Dict[(str, int)] = field(default_factory=dict) holdings_pct: float = 0 time_pct: float = 0 diff_pct: float = 0 imbalance_all: float = 0 imbalance_5: float = 0 price_impact: int = 0 spread: int = 0 direction_feature: float = 0 num_max_steps_per_episode: float = 0 def __init__(self, background_config: Any='rmsc04', mkt_close: str='16:00:00', timestep_duration: str='60s', starting_cash: int=1000000, order_fixed_size: int=10, state_history_length: int=4, market_data_buffer_length: int=5, first_interval: str='00:00:30', parent_order_size: int=1000, execution_window: str='00:10:00', direction: str='BUY', not_enough_reward_update: int=(- 1000), too_much_reward_update: int=(- 100), just_quantity_reward_update: int=0, debug_mode: bool=False, background_config_extra_kvargs: Dict[(str, Any)]={}) -> None: self.background_config: Any = importlib.import_module('abides_markets.configs.{}'.format(background_config), package=None) self.mkt_close: NanosecondTime = str_to_ns(mkt_close) self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration) self.starting_cash: int = starting_cash self.order_fixed_size: int = order_fixed_size self.state_history_length: int = state_history_length self.market_data_buffer_length: int = market_data_buffer_length self.first_interval: NanosecondTime = str_to_ns(first_interval) self.parent_order_size: int = parent_order_size self.execution_window: str = str_to_ns(execution_window) self.direction: str = direction self.debug_mode: bool = debug_mode self.too_much_reward_update: int = too_much_reward_update self.not_enough_reward_update: int = not_enough_reward_update self.just_quantity_reward_update: int = just_quantity_reward_update self.entry_price: int = 1 self.far_touch: int = 1 self.near_touch: int = 1 self.step_index: int = 0 self.custom_metrics_tracker = self.CustomMetricsTracker() assert (background_config in ['rmsc03', 'rmsc04', 'smc_01']), 'Select rmsc03 or rmsc04 as config' assert ((self.first_interval <= str_to_ns('16:00:00')) & (self.first_interval >= str_to_ns('00:00:00'))), 'Select authorized FIRST_INTERVAL delay' assert ((self.mkt_close <= str_to_ns('16:00:00')) & (self.mkt_close >= str_to_ns('09:30:00'))), 'Select authorized market hours' assert ((self.timestep_duration <= str_to_ns('06:30:00')) & (self.timestep_duration >= str_to_ns('00:00:00'))), 'Select authorized timestep_duration' assert ((type(self.starting_cash) == int) & (self.starting_cash >= 0)), 'Select positive integer value for starting_cash' assert ((type(self.order_fixed_size) == int) & (self.order_fixed_size >= 0)), 'Select positive integer value for order_fixed_size' assert ((type(self.state_history_length) == int) & (self.state_history_length >= 0)), 'Select positive integer value for order_fixed_size' assert ((type(self.market_data_buffer_length) == int) & (self.market_data_buffer_length >= 0)), 'Select positive integer value for order_fixed_size' assert (self.debug_mode in [True, False]), 'debug_mode needs to be True or False' assert (self.direction in ['BUY', 'SELL']), 'direction needs to be BUY or SELL' assert ((type(self.parent_order_size) == int) & (self.order_fixed_size >= 0)), 'Select positive integer value for parent_order_size' assert ((self.execution_window <= str_to_ns('06:30:00')) & (self.execution_window >= str_to_ns('00:00:00'))), 'Select authorized execution_window' assert (type(self.too_much_reward_update) == int), 'Select integer value for too_much_reward_update' assert (type(self.not_enough_reward_update) == int), 'Select integer value for not_enough_reward_update' assert (type(self.just_quantity_reward_update) == int), 'Select integer value for just_quantity_reward_update' background_config_args = {'end_time': self.mkt_close} background_config_args.update(background_config_extra_kvargs) super().__init__(background_config_pair=(self.background_config.build_config, background_config_args), wakeup_interval_generator=ConstantTimeGenerator(step_duration=self.timestep_duration), starting_cash=self.starting_cash, state_buffer_length=self.state_history_length, market_data_buffer_length=self.market_data_buffer_length, first_interval=self.first_interval) self.num_actions: int = 3 self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions) for i in range(self.num_actions): self.custom_metrics_tracker.action_counter[f'action_{i}'] = 0 num_ns_episode = (self.first_interval + self.execution_window) step_length = self.timestep_duration num_max_steps_per_episode = (num_ns_episode / step_length) self.custom_metrics_tracker.num_max_steps_per_episode = num_max_steps_per_episode self.num_state_features: int = ((8 + self.state_history_length) - 1) self.state_highs: np.ndarray = np.array(([2, 2, 4, 1, 1, np.finfo(np.float32).max, np.finfo(np.float32).max, np.finfo(np.float32).max] + ((self.state_history_length - 1) * [np.finfo(np.float32).max])), dtype=np.float32).reshape(self.num_state_features, 1) self.state_lows: np.ndarray = np.array(([(- 2), (- 2), (- 4), 0, 0, np.finfo(np.float32).min, np.finfo(np.float32).min, np.finfo(np.float32).min] + ((self.state_history_length - 1) * [np.finfo(np.float32).min])), dtype=np.float32).reshape(self.num_state_features, 1) self.observation_space: gym.Space = gym.spaces.Box(self.state_lows, self.state_highs, shape=(self.num_state_features, 1), dtype=np.float32) self.previous_marked_to_market: int = self.starting_cash def _map_action_space_to_ABIDES_SIMULATOR_SPACE(self, action: int) -> List[Dict[(str, Any)]]: "\n utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries)\n The action space ranges [0, 1, 2] where:\n - `0` MKT direction order_fixed_size\n - '1' LMT direction order_fixed_size\n - '2' DO NOTHING\n\n Arguments:\n - action: integer representation of the different actions\n\n Returns:\n - action_list: list of the corresponding series of action mapped into abides env apis\n " self.custom_metrics_tracker.action_counter[f'action_{action}'] += 1 if (action == 0): return [{'type': 'CCL_ALL'}, {'type': 'MKT', 'direction': self.direction, 'size': self.order_fixed_size}] elif (action == 1): return [{'type': 'CCL_ALL'}, {'type': 'LMT', 'direction': self.direction, 'size': self.order_fixed_size, 'limit_price': self.near_touch}] elif (action == 2): return [] else: raise ValueError(f'Action {action} is not part of the actions supported by the function.') @raw_state_to_state_pre_process def raw_state_to_state(self, raw_state: Dict[(str, Any)]) -> np.ndarray: '\n method that transforms a raw state into a state representation\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - state: state representation defining the MDP for the execution v0 environnement\n ' bids = raw_state['parsed_mkt_data']['bids'] asks = raw_state['parsed_mkt_data']['asks'] last_transactions = raw_state['parsed_mkt_data']['last_transaction'] holdings = raw_state['internal_data']['holdings'] holdings_pct = (holdings[(- 1)] / self.parent_order_size) mkt_open = raw_state['internal_data']['mkt_open'][(- 1)] current_time = raw_state['internal_data']['current_time'][(- 1)] time_from_parent_arrival = ((current_time - mkt_open) - self.first_interval) assert (current_time >= (mkt_open + self.first_interval)), 'Agent has woken up earlier than its first interval' time_limit = self.execution_window time_pct = (time_from_parent_arrival / time_limit) diff_pct = (holdings_pct - time_pct) imbalances_all = [markets_agent_utils.get_imbalance(b, a, depth=None) for (b, a) in zip(bids, asks)] imbalance_all = imbalances_all[(- 1)] imbalances_5 = [markets_agent_utils.get_imbalance(b, a, depth=5) for (b, a) in zip(bids, asks)] imbalance_5 = imbalances_5[(- 1)] mid_prices = [markets_agent_utils.get_mid_price(b, a, lt) for (b, a, lt) in zip(bids, asks, last_transactions)] mid_price = mid_prices[(- 1)] if (self.step_index == 0): self.entry_price = mid_price entry_price = self.entry_price book = (raw_state['parsed_mkt_data']['bids'][(- 1)] if (self.direction == 'BUY') else raw_state['parsed_mkt_data']['asks'][(- 1)]) self.near_touch = (book[0][0] if (len(book) > 0) else last_transactions[(- 1)]) price_impact = (np.log((mid_price / entry_price)) if (self.direction == 'BUY') else np.log((entry_price / mid_price))) best_bids = [(bids[0][0] if (len(bids) > 0) else mid) for (bids, mid) in zip(bids, mid_prices)] best_asks = [(asks[0][0] if (len(asks) > 0) else mid) for (asks, mid) in zip(asks, mid_prices)] spreads = (np.array(best_asks) - np.array(best_bids)) spread = spreads[(- 1)] direction_features = (np.array(mid_prices) - np.array(last_transactions)) direction_feature = direction_features[(- 1)] mid_prices = [markets_agent_utils.get_mid_price(b, a, lt) for (b, a, lt) in zip(bids, asks, last_transactions)] returns = np.diff(mid_prices) padded_returns = np.zeros((self.state_history_length - 1)) padded_returns[(- len(returns)):] = (returns if (len(returns) > 0) else padded_returns) self.custom_metrics_tracker.holdings_pct = holdings_pct self.custom_metrics_tracker.time_pct = time_pct self.custom_metrics_tracker.diff_pct = diff_pct self.custom_metrics_tracker.imbalance_all = imbalance_all self.custom_metrics_tracker.imbalance_5 = imbalance_5 self.custom_metrics_tracker.price_impact = price_impact self.custom_metrics_tracker.spread = spread self.custom_metrics_tracker.direction_feature = direction_feature computed_state = np.array(([holdings_pct, time_pct, diff_pct, imbalance_all, imbalance_5, price_impact, spread, direction_feature] + padded_returns.tolist()), dtype=np.float32) self.step_index += 1 return computed_state.reshape(self.num_state_features, 1) @raw_state_pre_process def raw_state_to_reward(self, raw_state: Dict[(str, Any)]) -> float: '\n method that transforms a raw state into the reward obtained during the step\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: immediate reward computed at each step for the execution v0 environnement\n ' entry_price = self.entry_price inter_wakeup_executed_orders = raw_state['internal_data']['inter_wakeup_executed_orders'] if (len(inter_wakeup_executed_orders) == 0): pnl = 0 else: pnl = (sum((((entry_price - order.fill_price) * order.quantity) for order in inter_wakeup_executed_orders)) if (self.direction == 'BUY') else sum((((order.fill_price - entry_price) * order.quantity) for order in inter_wakeup_executed_orders))) self.pnl = pnl reward = (pnl / self.parent_order_size) self.custom_metrics_tracker.slippage_reward = reward return reward @raw_state_pre_process def raw_state_to_update_reward(self, raw_state: Dict[(str, Any)]) -> float: '\n method that transforms a raw state into the final step reward update (if needed)\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: update reward computed at the end of the episode for the execution v0 environnement\n ' holdings = raw_state['internal_data']['holdings'] parent_order_size = self.parent_order_size if ((self.direction == 'BUY') and (holdings >= parent_order_size)): update_reward = (abs((holdings - parent_order_size)) * self.too_much_reward_update) elif ((self.direction == 'BUY') and (holdings < parent_order_size)): update_reward = (abs((holdings - parent_order_size)) * self.not_enough_reward_update) elif ((self.direction == 'SELL') and (holdings <= (- parent_order_size))): update_reward = (abs((holdings - parent_order_size)) * self.too_much_reward_update) elif ((self.direction == 'SELL') and (holdings > (- parent_order_size))): update_reward = (abs((holdings - parent_order_size)) * self.not_enough_reward_update) else: update_reward = self.just_quantity_reward_update update_reward = (update_reward / self.parent_order_size) self.custom_metrics_tracker.late_penalty_reward = update_reward return update_reward @raw_state_pre_process def raw_state_to_done(self, raw_state: Dict[(str, Any)]) -> bool: '\n method that transforms a raw state into the flag if an episode is done\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - done: flag that describes if the episode is terminated or not for the execution v0 environnement\n ' holdings = raw_state['internal_data']['holdings'] parent_order_size = self.parent_order_size current_time = raw_state['internal_data']['current_time'] mkt_open = raw_state['internal_data']['mkt_open'] time_limit = ((mkt_open + self.first_interval) + self.execution_window) if ((self.direction == 'BUY') and (holdings >= parent_order_size)): done = True elif ((self.direction == 'SELL') and (holdings <= (- parent_order_size))): done = True elif (current_time >= time_limit): done = True else: done = False self.custom_metrics_tracker.executed_quantity = (holdings if (self.direction == 'BUY') else (- holdings)) self.custom_metrics_tracker.remaining_quantity = (parent_order_size - self.custom_metrics_tracker.executed_quantity) return done @raw_state_pre_process def raw_state_to_info(self, raw_state: Dict[(str, Any)]) -> Dict[(str, Any)]: '\n method that transforms a raw state into an info dictionnary\n\n Arguments:\n - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent\n\n Returns:\n - reward: info dictionnary computed at each step for the execution v0 environnement\n ' last_transaction = raw_state['parsed_mkt_data']['last_transaction'] bids = raw_state['parsed_mkt_data']['bids'] best_bid = (bids[0][0] if (len(bids) > 0) else last_transaction) asks = raw_state['parsed_mkt_data']['asks'] best_ask = (asks[0][0] if (len(asks) > 0) else last_transaction) current_time = raw_state['internal_data']['current_time'] holdings = raw_state['internal_data']['holdings'] if (self.debug_mode == True): return {'last_transaction': last_transaction, 'best_bid': best_bid, 'best_ask': best_ask, 'current_time': current_time, 'holdings': holdings, 'parent_size': self.parent_order_size, 'pnl': self.pnl, 'reward': (self.pnl / self.parent_order_size)} else: return asdict(self.custom_metrics_tracker)
class CoreGymAgent(Agent, ABC): '\n Abstract class to inherit from to create usable specific ABIDES Gym Experiemental Agents\n ' @abstractmethod def update_raw_state(self) -> None: raise NotImplementedError @abstractmethod def get_raw_state(self) -> deque: raise NotImplementedError
class FinancialGymAgent(CoreBackgroundAgent, CoreGymAgent): "\n Gym experimental agent class. This agent is the interface between the ABIDES simulation and the ABIDES Gym environments.\n\n Arguments:\n - id: agents id in the simulation\n - symbol: ticker of the traded asset\n - starting_cash: agent's cash at the beginning of the simulation\n - subscribe_freq: frequency the agents receives market data from the exchange\n - subscribe: flag if the agent subscribe or not to market data\n - subscribe_num_levels: number of level depth in the OB the agent subscribes to\n - wakeup_interval_generator: inter-wakeup generator for agents next wakeup generation\n - state_buffer_length: length of the buffer of the agent raw_states\n _ market_data_buffer_length: length of the buffer for the received market data\n\n\n " def __init__(self, id: int, symbol: str, starting_cash: int, subscribe_freq: int=int(100000000.0), subscribe: float=True, subscribe_num_levels: int=10, wakeup_interval_generator: InterArrivalTimeGenerator=ConstantTimeGenerator(step_duration=str_to_ns('1min')), state_buffer_length: int=2, market_data_buffer_length: int=5, first_interval: Optional[NanosecondTime]=None, log_orders: bool=False, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None) -> None: super().__init__(id, symbol=symbol, starting_cash=starting_cash, log_orders=log_orders, name=name, type=type, random_state=random_state, wakeup_interval_generator=wakeup_interval_generator, state_buffer_length=state_buffer_length, market_data_buffer_length=market_data_buffer_length, first_interval=first_interval, subscribe=subscribe, subscribe_num_levels=subscribe_num_levels, subscribe_freq=subscribe_freq) self.symbol: str = symbol self.subscribe_freq: int = subscribe_freq self.subscribe: bool = subscribe self.subscribe_num_levels: int = subscribe_num_levels self.wakeup_interval_generator: InterArrivalTimeGenerator = wakeup_interval_generator self.lookback_period: NanosecondTime = self.wakeup_interval_generator.mean() if hasattr(self.wakeup_interval_generator, 'random_generator'): self.wakeup_interval_generator.random_generator = self.random_state self.state_buffer_length: int = state_buffer_length self.market_data_buffer_length: int = market_data_buffer_length self.first_interval: Optional[NanosecondTime] = first_interval self.has_subscribed: bool = False self.episode_executed_orders: List[Order] = [] self.inter_wakeup_executed_orders: List[Order] = [] self.parsed_episode_executed_orders: List[Tuple[(int, int)]] = [] self.parsed_inter_wakeup_executed_orders: List[Tuple[(int, int)]] = [] self.parsed_mkt_data: Dict[(str, Any)] = {} self.parsed_mkt_data_buffer = deque(maxlen=self.market_data_buffer_length) self.parsed_volume_data = {} self.parsed_volume_data_buffer = deque(maxlen=self.market_data_buffer_length) self.raw_state = deque(maxlen=self.state_buffer_length) self.order_status: Dict[(int, Dict[(str, Any)])] = {} def act_on_wakeup(self) -> Dict: '\n Computes next wakeup time, computes the new raw_state and clears the internal step buffers.\n Returns the raw_state to the abides gym environnement (outside of the abides simulation) where the next action will be selected.\n\n\n Returns:\n - the raw_state dictionnary that will be processed in the abides gym subenvironment\n ' wake_time = (self.current_time + self.wakeup_interval_generator.next()) self.set_wakeup(wake_time) self.update_raw_state() raw_state = deepcopy(self.get_raw_state()) self.new_step_reset() return raw_state
class CoreBackgroundAgent(TradingAgent): def __init__(self, id: int, symbol: str, starting_cash: int, subscribe_freq: int=int(100000000.0), lookback_period: Optional[int]=None, subscribe: bool=True, subscribe_num_levels: Optional[int]=None, wakeup_interval_generator: InterArrivalTimeGenerator=ConstantTimeGenerator(step_duration=str_to_ns('1min')), order_size_generator=None, state_buffer_length: int=2, market_data_buffer_length: int=5, first_interval: Optional[NanosecondTime]=None, log_orders: bool=False, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None) -> None: super().__init__(id, starting_cash=starting_cash, log_orders=log_orders, name=name, type=type, random_state=random_state) self.symbol: str = symbol self.subscribe_freq: int = subscribe_freq self.subscribe: bool = subscribe self.subscribe_num_levels: int = subscribe_num_levels self.first_interval: Optional[NanosecondTime] = first_interval self.wakeup_interval_generator: InterArrivalTimeGenerator = wakeup_interval_generator self.order_size_generator = order_size_generator if hasattr(self.wakeup_interval_generator, 'random_generator'): self.wakeup_interval_generator.random_generator = self.random_state self.state_buffer_length: int = state_buffer_length self.market_data_buffer_length: int = market_data_buffer_length self.first_interval: Optional[NanosecondTime] = first_interval if (self.order_size_generator != None): self.order_size_generator.random_generator = self.random_state self.lookback_period: NanosecondTime = self.wakeup_interval_generator.mean() self.has_subscribed: bool = False self.episode_executed_orders: List[Order] = [] self.inter_wakeup_executed_orders: List[Order] = [] self.parsed_episode_executed_orders: List[Tuple[(int, int)]] = [] self.parsed_inter_wakeup_executed_orders: List[Tuple[(int, int)]] = [] self.parsed_mkt_data: Dict[(str, Any)] = {} self.parsed_mkt_data_buffer: Deque[Dict[(str, Any)]] = deque(maxlen=self.market_data_buffer_length) self.parsed_volume_data = {} self.parsed_volume_data_buffer: Deque[Dict[(str, Any)]] = deque(maxlen=self.market_data_buffer_length) self.raw_state: Deque[Dict[(str, Any)]] = deque(maxlen=self.state_buffer_length) self.order_status: Dict[(int, Dict[(str, Any)])] = {} def kernel_starting(self, start_time: NanosecondTime) -> None: super().kernel_starting(start_time) def wakeup(self, current_time: NanosecondTime) -> bool: 'Agent interarrival wake up times are determined by wakeup_interval_generator' super().wakeup(current_time) if (not self.has_subscribed): super().request_data_subscription(L2SubReqMsg(symbol=self.symbol, freq=self.subscribe_freq, depth=self.subscribe_num_levels)) super().request_data_subscription(TransactedVolSubReqMsg(symbol=self.symbol, freq=self.subscribe_freq, lookback=self.lookback_period)) self.has_subscribed = True if ((self.mkt_open != None) and (current_time >= self.mkt_open)): raw_state = self.act_on_wakeup() return raw_state def act_on_wakeup(self): raise NotImplementedError def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None: 'Processes message from exchange. Main function is to update orders in orderbook relative to mid-price.\n :param simulation current time\n :param message received by self from ExchangeAgent\n :type current_time: pd.Timestamp\n :type message: str\n :return:\n ' super().receive_message(current_time, sender_id, message) if self.subscribe: if isinstance(message, MarketDataMsg): if isinstance(message, L2DataMsg): self.parsed_mkt_data = self.get_parsed_mkt_data(message) self.parsed_mkt_data_buffer.append(self.parsed_mkt_data) elif isinstance(message, TransactedVolDataMsg): self.parsed_volume_data = self.get_parsed_volume_data(message) self.parsed_volume_data_buffer.append(self.parsed_volume_data) def get_wake_frequency(self) -> NanosecondTime: time_first_wakeup = (self.first_interval if (self.first_interval != None) else self.wakeup_interval_generator.next()) return time_first_wakeup def apply_actions(self, actions: List[Dict[(str, Any)]]) -> None: for action in actions: if (action['type'] == 'MKT'): side = (Side.BID if (action['direction'] == 'BUY') else Side.ASK) self.place_market_order(self.symbol, action['size'], side) elif (action['type'] == 'LMT'): side = (Side.BID if (action['direction'] == 'BUY') else Side.ASK) self.place_limit_order(self.symbol, action['size'], side, action['limit_price']) elif (action['type'] == 'CCL_ALL'): self.cancel_all_orders() else: raise ValueError(f"Action Type {action['type']} is not supported") def update_raw_state(self) -> None: parsed_mkt_data_buffer = deepcopy(self.parsed_mkt_data_buffer) internal_data = self.get_internal_data() parsed_volume_data_buffer = deepcopy(self.parsed_volume_data_buffer) new = {'parsed_mkt_data': parsed_mkt_data_buffer, 'internal_data': internal_data, 'parsed_volume_data': parsed_volume_data_buffer} self.raw_state.append(new) def get_raw_state(self) -> Dict: return self.raw_state def get_parsed_mkt_data(self, message: L2DataMsg) -> Dict[(str, Any)]: bids = message.bids asks = message.asks last_transaction = message.last_transaction exchange_ts = message.exchange_ts mkt_data = {'bids': bids, 'asks': asks, 'last_transaction': last_transaction, 'exchange_ts': exchange_ts} return mkt_data def get_parsed_volume_data(self, message: TransactedVolDataMsg) -> Dict[(str, Any)]: last_transaction = message.last_transaction exchange_ts = message.exchange_ts bid_volume = message.bid_volume ask_volume = message.ask_volume total_volume = (bid_volume + ask_volume) volume_data = {'last_transaction': last_transaction, 'exchange_ts': exchange_ts, 'bid_volume': bid_volume, 'ask_volume': ask_volume, 'total_volume': total_volume} return volume_data def get_internal_data(self) -> Dict[(str, Any)]: holdings = self.get_holdings(self.symbol) cash = self.get_holdings('CASH') inter_wakeup_executed_orders = self.inter_wakeup_executed_orders episode_executed_orders = self.episode_executed_orders parsed_episode_executed_orders = self.parsed_episode_executed_orders parsed_inter_wakeup_executed_orders = self.parsed_inter_wakeup_executed_orders current_time = self.current_time order_status = self.order_status mkt_open = self.mkt_open mkt_close = self.mkt_close internal_data = {'holdings': holdings, 'cash': cash, 'inter_wakeup_executed_orders': inter_wakeup_executed_orders, 'episode_executed_orders': episode_executed_orders, 'parsed_episode_executed_orders': parsed_episode_executed_orders, 'parsed_inter_wakeup_executed_orders': parsed_inter_wakeup_executed_orders, 'starting_cash': self.starting_cash, 'current_time': current_time, 'order_status': order_status, 'mkt_open': mkt_open, 'mkt_close': mkt_close} return internal_data def order_executed(self, order: Order) -> None: super().order_executed(order) executed_qty = order.quantity executed_price = order.fill_price assert (executed_price is not None) order_id = order.order_id self.inter_wakeup_executed_orders.append(order) self.parsed_inter_wakeup_executed_orders.append((executed_qty, executed_price)) self.episode_executed_orders.append(order) self.parsed_episode_executed_orders.append((executed_qty, executed_price)) try: self.order_status[order_id] flag = True except KeyError: flag = False if flag: self.order_status[order_id]['executed_qty'] += executed_qty self.order_status[order_id]['active_qty'] -= executed_qty if (self.order_status[order_id]['active_qty'] <= 0): self.order_status[order_id]['status'] = 'executed' else: self.order_status[order_id] = {'status': 'mkt_immediately_filled', 'order': order, 'active_qty': 0, 'executed_qty': executed_qty, 'cancelled_qty': 0} def order_accepted(self, order: Order) -> None: super().order_accepted(order) self.order_status[order.order_id] = {'status': 'active', 'order': order, 'active_qty': order.quantity, 'executed_qty': 0, 'cancelled_qty': 0} def order_cancelled(self, order: Order) -> None: super().order_cancelled(order) order_id = order.order_id quantity = order.quantity self.order_status[order_id] = {'status': 'cancelled', 'order': order, 'cancelled_qty': quantity} def new_inter_wakeup_reset(self) -> None: self.inter_wakeup_executed_orders = [] self.parsed_inter_wakeup_executed_orders = [] def act(self, raw_state): raise NotImplementedError def new_step_reset(self) -> None: self.inter_wakeup_executed_orders = [] self.parsed_inter_wakeup_executed_orders = []
class MomentumAgent(TradingAgent): '\n Simple Trading Agent that compares the 20 past mid-price observations with the 50 past observations and places a\n buy limit order if the 20 mid-price average >= 50 mid-price average or a\n sell limit order if the 20 mid-price average < 50 mid-price average\n ' def __init__(self, id: int, symbol, starting_cash, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None, min_size=20, max_size=50, wake_up_freq: NanosecondTime=str_to_ns('60s'), poisson_arrival=True, order_size_model=None, subscribe=False, log_orders=False) -> None: super().__init__(id, name, type, random_state, starting_cash, log_orders) self.symbol = symbol self.min_size = min_size self.max_size = max_size self.size = (self.random_state.randint(self.min_size, self.max_size) if (order_size_model is None) else None) self.order_size_model = order_size_model self.wake_up_freq = wake_up_freq self.poisson_arrival = poisson_arrival if self.poisson_arrival: self.arrival_rate = self.wake_up_freq self.subscribe = subscribe self.subscription_requested = False self.mid_list: List[float] = [] self.avg_20_list: List[float] = [] self.avg_50_list: List[float] = [] self.log_orders = log_orders self.state = 'AWAITING_WAKEUP' def kernel_starting(self, start_time: NanosecondTime) -> None: super().kernel_starting(start_time) def wakeup(self, current_time: NanosecondTime) -> None: 'Agent wakeup is determined by self.wake_up_freq' can_trade = super().wakeup(current_time) if (self.subscribe and (not self.subscription_requested)): super().request_data_subscription(L2SubReqMsg(symbol=self.symbol, freq=int(10000000000.0), depth=1)) self.subscription_requested = True self.state = 'AWAITING_MARKET_DATA' elif (can_trade and (not self.subscribe)): self.get_current_spread(self.symbol) self.state = 'AWAITING_SPREAD' def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None: 'Momentum agent actions are determined after obtaining the best bid and ask in the LOB' super().receive_message(current_time, sender_id, message) if ((not self.subscribe) and (self.state == 'AWAITING_SPREAD') and isinstance(message, QuerySpreadResponseMsg)): (bid, _, ask, _) = self.get_known_bid_ask(self.symbol) self.place_orders(bid, ask) self.set_wakeup((current_time + self.get_wake_frequency())) self.state = 'AWAITING_WAKEUP' elif (self.subscribe and (self.state == 'AWAITING_MARKET_DATA') and isinstance(message, MarketDataMsg)): (bids, asks) = (self.known_bids[self.symbol], self.known_asks[self.symbol]) if (bids and asks): self.place_orders(bids[0][0], asks[0][0]) self.state = 'AWAITING_MARKET_DATA' def place_orders(self, bid: int, ask: int) -> None: 'Momentum Agent actions logic' if (bid and ask): self.mid_list.append(((bid + ask) / 2)) if (len(self.mid_list) > 20): self.avg_20_list.append(MomentumAgent.ma(self.mid_list, n=20)[(- 1)].round(2)) if (len(self.mid_list) > 50): self.avg_50_list.append(MomentumAgent.ma(self.mid_list, n=50)[(- 1)].round(2)) if ((len(self.avg_20_list) > 0) and (len(self.avg_50_list) > 0)): if (self.order_size_model is not None): self.size = self.order_size_model.sample(random_state=self.random_state) if (self.size > 0): if (self.avg_20_list[(- 1)] >= self.avg_50_list[(- 1)]): self.place_limit_order(self.symbol, quantity=self.size, side=Side.BID, limit_price=ask) else: self.place_limit_order(self.symbol, quantity=self.size, side=Side.ASK, limit_price=bid) def get_wake_frequency(self) -> NanosecondTime: if (not self.poisson_arrival): return self.wake_up_freq else: delta_time = self.random_state.exponential(scale=self.arrival_rate) return int(round(delta_time)) @staticmethod def ma(a, n=20): ret = np.cumsum(a, dtype=float) ret[n:] = (ret[n:] - ret[:(- n)]) return (ret[(n - 1):] / n)
class ExchangeAgent(FinancialAgent): '\n The ExchangeAgent expects a numeric agent id, printable name, agent type, timestamp\n to open and close trading, a list of equity symbols for which it should create order\n books, a frequency at which to archive snapshots of its order books, a pipeline\n delay (in ns) for order activity, the exchange computation delay (in ns), the levels\n of order stream history to maintain per symbol (maintains all orders that led to the\n last N trades), whether to log all order activity to the agent log, and a random\n state object (already seeded) to use for stochasticity.\n ' @dataclass class MetricTracker(ABC): total_time_no_liquidity_asks: int = 0 total_time_no_liquidity_bids: int = 0 pct_time_no_liquidity_asks: float = 0 pct_time_no_liquidity_bids: float = 0 total_exchanged_volume: int = 0 last_trade: Optional[int] = 0 @dataclass class BaseDataSubscription(ABC): '\n Base class for all types of data subscription registered with this agent.\n ' agent_id: int last_update_ts: int @dataclass class FrequencyBasedSubscription(BaseDataSubscription, ABC): '\n Base class for all types of data subscription that are sent from this agent\n at a fixed, regular frequency.\n ' freq: int @dataclass class L1DataSubscription(FrequencyBasedSubscription): pass @dataclass class L2DataSubscription(FrequencyBasedSubscription): depth: int @dataclass class L3DataSubscription(FrequencyBasedSubscription): depth: int @dataclass class TransactedVolDataSubscription(FrequencyBasedSubscription): lookback: str @dataclass class EventBasedSubscription(BaseDataSubscription, ABC): '\n Base class for all types of data subscription that are sent from this agent\n when triggered by an event or specific circumstance.\n ' event_in_progress: bool @dataclass class BookImbalanceDataSubscription(EventBasedSubscription): min_imbalance: float imbalance: Optional[float] = None side: Optional[Side] = None def __init__(self, id: int, mkt_open: NanosecondTime, mkt_close: NanosecondTime, symbols: List[str], name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None, book_logging: bool=True, book_log_depth: int=10, pipeline_delay: int=40000, computation_delay: int=1, stream_history: int=0, log_orders: bool=False, use_metric_tracker: bool=True) -> None: super().__init__(id, name, type, random_state) self.symbols = symbols self.reschedule: bool = False self.mkt_open: NanosecondTime = mkt_open self.mkt_close: NanosecondTime = mkt_close self.pipeline_delay: int = pipeline_delay self.computation_delay: int = computation_delay self.stream_history: int = stream_history self.book_logging: bool = book_logging self.book_log_depth: int = book_log_depth self.log_orders: bool = log_orders self.order_books: Dict[(str, OrderBook)] = {symbol: OrderBook(self, symbol) for symbol in symbols} if use_metric_tracker: self.metric_trackers: Dict[(str, ExchangeAgent.MetricTracker)] = {symbol: self.MetricTracker() for symbol in symbols} self.data_subscriptions: DefaultDict[(str, List[ExchangeAgent.BaseDataSubscription])] = defaultdict(list) self.market_close_price_subscriptions: List[int] = [] def kernel_initializing(self, kernel: 'Kernel') -> None: '\n The exchange agent overrides this to obtain a reference to an oracle.\n\n This is needed to establish a "last trade price" at open (i.e. an opening\n price) in case agents query last trade before any simulated trades are made.\n This can probably go away once we code the opening cross auction.\n\n Arguments:\n kernel: The ABIDES kernel that this agent instance belongs to.\n ' super().kernel_initializing(kernel) assert (self.kernel is not None) self.oracle = self.kernel.oracle for symbol in self.order_books: try: self.order_books[symbol].last_trade = self.oracle.get_daily_open_price(symbol, self.mkt_open) logger.debug('Opening price for {} is {}'.format(symbol, self.order_books[symbol].last_trade)) except AttributeError as e: logger.debug(str(e)) self.set_wakeup(self.mkt_close) def kernel_terminating(self) -> None: '\n The exchange agent overrides this to additionally log the full depth of its\n order books for the entire day.\n ' super().kernel_terminating() (bid_volume, ask_volume) = self.order_books['ABM'].get_transacted_volume((self.current_time - self.mkt_open)) self.total_exchanged_volume = (bid_volume + ask_volume) for symbol in self.symbols: self.analyse_order_book(symbol) for symbol in self.symbols: (bid_volume, ask_volume) = self.order_books[symbol].get_transacted_volume((self.current_time - self.mkt_open)) self.metric_trackers[symbol].total_exchanged_volume = (bid_volume + ask_volume) self.metric_trackers[symbol].last_trade = self.order_books[symbol].last_trade if (self.log_orders == None): return if hasattr(self.oracle, 'f_log'): for symbol in self.oracle.f_log: dfFund = pd.DataFrame(self.oracle.f_log[symbol]) if (not dfFund.empty): dfFund.set_index('FundamentalTime', inplace=True) self.write_log(dfFund, filename='fundamental_{}'.format(symbol)) logger.debug('Fundamental archival complete.') def wakeup(self, current_time: NanosecondTime): super().wakeup(current_time) if (current_time >= self.mkt_close): message = MarketClosePriceMsg({symbol: book.last_trade for (symbol, book) in self.order_books.items()}) for agent in self.market_close_price_subscriptions: self.send_message(agent, message) def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None: '\n Arguments:\n current_time:\n sender_id:\n message:\n ' super().receive_message(current_time, sender_id, message) self.set_computation_delay(self.computation_delay) if (current_time > self.mkt_close): if isinstance(message, OrderMsg): if isinstance(message, ModifyOrderMsg): logger.debug('{} received {}: OLD: {} NEW: {}'.format(self.name, message.type(), message.old_order, message.new_order)) else: logger.debug('{} received {}: {}'.format(self.name, message.type(), message.order)) self.send_message(sender_id, MarketClosedMsg()) return elif isinstance(message, QueryMsg): pass else: logger.debug('{} received {}, discarded: market is closed.'.format(self.name, message.type())) self.send_message(sender_id, MarketClosedMsg()) return if isinstance(message, OrderMsg): if self.log_orders: if isinstance(message, (ModifyOrderMsg, ReplaceOrderMsg)): self.logEvent(message.type(), message.new_order.to_dict(), deepcopy_event=False) else: self.logEvent(message.type(), message.order.to_dict(), deepcopy_event=False) else: self.logEvent(message.type(), message) if isinstance(message, MarketDataSubReqMsg): if (message.symbol not in self.order_books): return if (message.cancel == True): logger.debug('{} received MarketDataSubscriptionCancellation request from agent {}'.format(self.name, sender_id)) for data_sub in self.data_subscriptions[message.symbol]: if ((data_sub.agent_id == sender_id) and (data_sub.freq == message.freq) and (data_sub.depth == message.depth) and (data_sub.__class__ == message.__class__)): self.data_subscriptions[message.symbol].remove(data_sub) else: logger.debug('{} received MarketDataSubscriptionRequest request from agent {}'.format(self.name, sender_id)) if isinstance(message, L1SubReqMsg): sub: self.BaseDataSubscription = self.L1DataSubscription(sender_id, current_time, message.freq) elif isinstance(message, L2SubReqMsg): sub = self.L2DataSubscription(sender_id, current_time, message.freq, message.depth) elif isinstance(message, L3SubReqMsg): sub = self.L3DataSubscription(sender_id, current_time, message.freq, message.depth) elif isinstance(message, TransactedVolSubReqMsg): sub = self.TransactedVolDataSubscription(sender_id, current_time, message.freq, message.lookback) elif isinstance(message, BookImbalanceSubReqMsg): sub = self.BookImbalanceDataSubscription(sender_id, current_time, False, message.min_imbalance) else: raise Exception self.data_subscriptions[message.symbol].append(sub) if isinstance(message, MarketHoursRequestMsg): logger.debug('{} received market hours request from agent {}'.format(self.name, sender_id)) self.set_computation_delay(0) self.send_message(sender_id, MarketHoursMsg(self.mkt_open, self.mkt_close)) elif isinstance(message, MarketClosePriceRequestMsg): self.market_close_price_subscriptions.append(sender_id) elif isinstance(message, QueryLastTradeMsg): symbol = message.symbol if (symbol not in self.order_books): warnings.warn(f'Last trade request discarded. Unknown symbol: {symbol}') else: logger.debug('{} received QUERY_LAST_TRADE ({}) request from agent {}'.format(self.name, symbol, sender_id)) self.send_message(sender_id, QueryLastTradeResponseMsg(symbol=symbol, last_trade=self.order_books[symbol].last_trade, mkt_closed=(current_time > self.mkt_close))) elif isinstance(message, QuerySpreadMsg): symbol = message.symbol depth = message.depth if (symbol not in self.order_books): warnings.warn(f'Bid-ask spread request discarded. Unknown symbol: {symbol}') else: logger.debug('{} received QUERY_SPREAD ({}:{}) request from agent {}'.format(self.name, symbol, depth, sender_id)) self.send_message(sender_id, QuerySpreadResponseMsg(symbol=symbol, depth=depth, bids=self.order_books[symbol].get_l2_bid_data(depth), asks=self.order_books[symbol].get_l2_ask_data(depth), last_trade=self.order_books[symbol].last_trade, mkt_closed=(current_time > self.mkt_close))) elif isinstance(message, QueryOrderStreamMsg): symbol = message.symbol length = message.length if (symbol not in self.order_books): warnings.warn(f'Order stream request discarded. Unknown symbol: {symbol}') else: logger.debug('{} received QUERY_ORDER_STREAM ({}:{}) request from agent {}'.format(self.name, symbol, length, sender_id)) self.send_message(sender_id, QueryOrderStreamResponseMsg(symbol=symbol, length=length, orders=self.order_books[symbol].history[1:(length + 1)], mkt_closed=(current_time > self.mkt_close))) elif isinstance(message, QueryTransactedVolMsg): symbol = message.symbol lookback_period = message.lookback_period if (symbol not in self.order_books): warnings.warn(f'Order stream request discarded. Unknown symbol: {symbol}') else: logger.debug('{} received QUERY_TRANSACTED_VOLUME ({}:{}) request from agent {}'.format(self.name, symbol, lookback_period, sender_id)) (bid_volume, ask_volume) = self.order_books[symbol].get_transacted_volume(lookback_period) self.send_message(sender_id, QueryTransactedVolResponseMsg(symbol=symbol, bid_volume=bid_volume, ask_volume=ask_volume, mkt_closed=(current_time > self.mkt_close))) elif isinstance(message, LimitOrderMsg): logger.debug('{} received LIMIT_ORDER: {}'.format(self.name, message.order)) if (message.order.symbol not in self.order_books): warnings.warn(f'Limit Order discarded. Unknown symbol: {message.order.symbol}') else: self.order_books[message.order.symbol].handle_limit_order(deepcopy(message.order)) self.publish_order_book_data() elif isinstance(message, MarketOrderMsg): logger.debug('{} received MARKET_ORDER: {}'.format(self.name, message.order)) if (message.order.symbol not in self.order_books): warnings.warn(f'Market Order discarded. Unknown symbol: {message.order.symbol}') else: self.order_books[message.order.symbol].handle_market_order(deepcopy(message.order)) self.publish_order_book_data() elif isinstance(message, CancelOrderMsg): tag = message.tag metadata = message.metadata logger.debug('{} received CANCEL_ORDER: {}'.format(self.name, message.order)) if (message.order.symbol not in self.order_books): warnings.warn(f'Cancellation request discarded. Unknown symbol: {message.order.symbol}') else: self.order_books[message.order.symbol].cancel_order(deepcopy(message.order), tag, metadata) self.publish_order_book_data() elif isinstance(message, PartialCancelOrderMsg): tag = message.tag metadata = message.metadata logger.debug('{} received PARTIAL_CANCEL_ORDER: {}, new order: {}'.format(self.name, message.order, message.quantity)) if (message.order.symbol not in self.order_books): warnings.warn(f'Modification request discarded. Unknown symbol: {message.order.symbol}') else: self.order_books[message.order.symbol].partial_cancel_order(deepcopy(message.order), message.quantity, tag, metadata) self.publish_order_book_data() elif isinstance(message, ModifyOrderMsg): old_order = message.old_order new_order = message.new_order logger.debug('{} received MODIFY_ORDER: {}, new order: {}'.format(self.name, old_order, new_order)) if (old_order.symbol not in self.order_books): warnings.warn(f'Modification request discarded. Unknown symbol: {old_order.symbol}') else: self.order_books[old_order.symbol].modify_order(deepcopy(old_order), deepcopy(new_order)) self.publish_order_book_data() elif isinstance(message, ReplaceOrderMsg): agent_id = message.agent_id order = message.old_order new_order = message.new_order logger.debug('{} received REPLACE_ORDER: {}, new order: {}'.format(self.name, order, new_order)) if (order.symbol not in self.order_books): warnings.warn(f'Replacement request discarded. Unknown symbol: {order.symbol}') else: self.order_books[order.symbol].replace_order(agent_id, deepcopy(order), deepcopy(new_order)) self.publish_order_book_data() def publish_order_book_data(self) -> None: '\n The exchange agents sends an order book update to the agents using the\n subscription API if one of the following conditions are met:\n\n 1) agent requests ALL order book updates (freq == 0)\n 2) order book update timestamp > last time agent was updated AND the orderbook\n update time stamp is greater than the last agent update time stamp by a\n period more than that specified in the freq parameter.\n ' for (symbol, data_subs) in self.data_subscriptions.items(): book = self.order_books[symbol] for data_sub in data_subs: if isinstance(data_sub, self.FrequencyBasedSubscription): messages = self.handle_frequency_based_data_subscription(symbol, data_sub) elif isinstance(data_sub, self.EventBasedSubscription): messages = self.handle_event_based_data_subscription(symbol, data_sub) else: raise Exception('Got invalid data subscription object') for message in messages: self.send_message(data_sub.agent_id, message) if (len(messages) > 0): data_sub.last_update_ts = book.last_update_ts def handle_frequency_based_data_subscription(self, symbol: str, data_sub: 'ExchangeAgent.FrequencyBasedSubscription') -> List[Message]: book = self.order_books[symbol] if ((book.last_update_ts - data_sub.last_update_ts) < data_sub.freq): return [] messages = [] if isinstance(data_sub, self.L1DataSubscription): bid = book.get_l1_bid_data() ask = book.get_l1_ask_data() messages.append(L1DataMsg(symbol, book.last_trade, self.current_time, bid, ask)) elif isinstance(data_sub, self.L2DataSubscription): bids = book.get_l2_bid_data(data_sub.depth) asks = book.get_l2_ask_data(data_sub.depth) messages.append(L2DataMsg(symbol, book.last_trade, self.current_time, bids, asks)) elif isinstance(data_sub, self.L3DataSubscription): bids = book.get_l3_bid_data(data_sub.depth) asks = book.get_l3_ask_data(data_sub.depth) messages.append(L3DataMsg(symbol, book.last_trade, self.current_time, bids, asks)) elif isinstance(data_sub, self.L3DataSubscription): bids = book.get_l3_bid_data(data_sub.depth) asks = book.get_l3_ask_data(data_sub.depth) messages.append(L3DataMsg(symbol, book.last_trade, self.current_time, bids, asks)) elif isinstance(data_sub, self.TransactedVolDataSubscription): (bid_volume, ask_volume) = book.get_transacted_volume(data_sub.lookback) messages.append(TransactedVolDataMsg(symbol, book.last_trade, self.current_time, bid_volume, ask_volume)) else: raise Exception('Got invalid data subscription object') return messages def handle_event_based_data_subscription(self, symbol: str, data_sub: 'ExchangeAgent.EventBasedSubscription') -> List[Message]: book = self.order_books[symbol] messages = [] if isinstance(data_sub, self.BookImbalanceDataSubscription): (imbalance, side) = book.get_imbalance() event_in_progress = (imbalance > data_sub.min_imbalance) if (data_sub.event_in_progress and event_in_progress): if (side != data_sub.side): messages.append(BookImbalanceDataMsg(symbol, book.last_trade, self.current_time, MarketDataEventMsg.Stage.FINISH, data_sub.imbalance, data_sub.side)) data_sub.event_in_progress = True data_sub.side = side data_sub.imbalance = imbalance messages.append(BookImbalanceDataMsg(symbol, book.last_trade, self.current_time, MarketDataEventMsg.Stage.START, imbalance, side)) elif (data_sub.event_in_progress and (not event_in_progress)): data_sub.event_in_progress = False data_sub.side = None data_sub.imbalance = None messages.append(BookImbalanceDataMsg(symbol, book.last_trade, self.current_time, MarketDataEventMsg.Stage.FINISH, imbalance, side)) elif ((not data_sub.event_in_progress) and event_in_progress): data_sub.event_in_progress = True data_sub.side = side data_sub.imbalance = imbalance messages.append(BookImbalanceDataMsg(symbol, book.last_trade, self.current_time, MarketDataEventMsg.Stage.START, imbalance, side)) elif ((not data_sub.event_in_progress) and (not event_in_progress)): pass else: raise Exception('Got invalid data subscription object') return messages def logL2style(self, symbol: str) -> Optional[Tuple[(List, List)]]: book = self.order_books[symbol] if (not book.book_log2): return None tmp = book.book_log2 times = [] booktop = [] for t in tmp: times.append(t['QuoteTime']) booktop.append([t['bids'], t['asks']]) return (times, booktop) def send_message(self, recipient_id: int, message: Message) -> None: '\n Arguments:\n recipient_id:\n message:\n ' if isinstance(message, (OrderAcceptedMsg, OrderCancelledMsg, OrderExecutedMsg)): super().send_message(recipient_id, message, delay=self.pipeline_delay) if self.log_orders: self.logEvent(message.type(), message.order.to_dict()) else: super().send_message(recipient_id, message) def analyse_order_book(self, symbol: str): book = self.order_books[symbol].book_log2 self.get_time_dropout(book, symbol) def get_time_dropout(self, book: List[Dict[(str, Any)]], symbol: str): if (len(book) == 0): return df = pd.DataFrame(book) total_time = (df['QuoteTime'].iloc[(- 1)] - df['QuoteTime'].iloc[0]) is_null_bids = False t_null_bids_first = 0 T_null_bids = 0 is_null_asks = False t_null_asks_first = 0 T_null_asks = 0 for (_, row) in df.iterrows(): if ((len(row['bids']) == 0) & (is_null_bids == False)): t_null_bids_first = row['QuoteTime'] is_null_bids = True elif ((len(row['bids']) != 0) & (is_null_bids == True)): T_null_bids += (row['QuoteTime'] - t_null_bids_first) is_null_bids = False if ((len(row['asks']) == 0) & (is_null_asks == False)): t_null_asks_first = row['QuoteTime'] is_null_asks = True elif ((len(row['asks']) != 0) & (is_null_asks == True)): T_null_asks += (row['QuoteTime'] - t_null_asks_first) is_null_asks = False self.metric_trackers[symbol] = self.MetricTracker(total_time_no_liquidity_asks=(T_null_asks / 1000000000.0), total_time_no_liquidity_bids=(T_null_bids / 1000000000.0), pct_time_no_liquidity_asks=((100 * T_null_asks) / total_time), pct_time_no_liquidity_bids=((100 * T_null_bids) / total_time))
class FinancialAgent(Agent): "\n The FinancialAgent class contains attributes and methods that should be available to\n all agent types (traders, exchanges, etc) in a financial market simulation.\n\n To be honest, it mainly exists because the base Agent class should not have any\n finance-specific aspects and it doesn't make sense for ExchangeAgent to inherit from\n TradingAgent. Hopefully we'll find more common ground for traders and exchanges to\n make this more useful later on.\n " def __init__(self, id: int, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None) -> None: super().__init__(id, name, type, random_state) def dollarize(self, cents: Union[(List[int], int)]) -> Union[(List[str], str)]: '\n Used by any subclass to dollarize an int-cents price for printing.\n ' return dollarize(cents)
class AdaptiveMarketMakerAgent(TradingAgent): "This class implements a modification of the Chakraborty-Kearns `ladder` market-making strategy, wherein the\n the size of order placed at each level is set as a fraction of measured transacted volume in the previous time\n period.\n\n Can skew orders to size of current inventory using beta parameter, whence beta == 0 represents inventory being\n ignored and beta == infinity represents all liquidity placed on one side of book.\n\n ADAPTIVE SPREAD: the market maker's spread can be set either as a fixed or value or can be adaptive,\n " def __init__(self, id: int, symbol: str, starting_cash: int, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None, pov: float=0.05, min_order_size: int=20, window_size: float=5, anchor: str=ANCHOR_MIDDLE_STR, num_ticks: int=20, level_spacing: float=0.5, wake_up_freq: NanosecondTime=1000000000, poisson_arrival: bool=True, subscribe: bool=False, subscribe_freq: float=10000000000.0, subscribe_num_levels: int=1, cancel_limit_delay: int=50, skew_beta=0, price_skew_param=None, spread_alpha: float=0.85, backstop_quantity: int=0, log_orders: bool=False, min_imbalance=0.9) -> None: super().__init__(id, name, type, random_state, starting_cash, log_orders) self.is_adaptive: bool = False self.symbol: str = symbol self.pov: float = pov self.min_order_size: int = min_order_size self.anchor: str = self.validate_anchor(anchor) self.window_size: float = self.validate_window_size(window_size) self.num_ticks: int = num_ticks self.level_spacing: float = level_spacing self.wake_up_freq: str = wake_up_freq self.poisson_arrival: bool = poisson_arrival if self.poisson_arrival: self.arrival_rate = self.wake_up_freq self.subscribe: bool = subscribe self.subscribe_freq: float = subscribe_freq self.min_imbalance = min_imbalance self.subscribe_num_levels: int = subscribe_num_levels self.cancel_limit_delay: int = cancel_limit_delay self.skew_beta = skew_beta self.price_skew_param = price_skew_param self.spread_alpha: float = spread_alpha self.backstop_quantity: int = backstop_quantity self.log_orders: float = log_orders self.has_subscribed = False self.subscription_requested: bool = False self.state: Dict[(str, bool)] = self.initialise_state() self.buy_order_size: int = self.min_order_size self.sell_order_size: int = self.min_order_size self.last_mid: Optional[int] = None self.last_spread: float = INITIAL_SPREAD_VALUE self.tick_size: Optional[int] = (None if self.is_adaptive else ceil((self.last_spread * self.level_spacing))) self.LIQUIDITY_DROPOUT_WARNING: str = f'Liquidity dropout for agent {self.name}.' self.two_side: bool = (False if (self.price_skew_param is None) else True) def initialise_state(self) -> Dict[(str, bool)]: 'Returns variables that keep track of whether spread and transacted volume have been observed.' if self.subscribe: return {'AWAITING_MARKET_DATA': True, 'AWAITING_TRANSACTED_VOLUME': True} else: return {'AWAITING_SPREAD': True, 'AWAITING_TRANSACTED_VOLUME': True} def validate_anchor(self, anchor: str) -> str: 'Checks that input parameter anchor takes allowed value, raises ``ValueError`` if not.\n\n Arguments:\n anchor:\n\n Returns:\n The anchor if validated.\n ' if (anchor not in [ANCHOR_TOP_STR, ANCHOR_BOTTOM_STR, ANCHOR_MIDDLE_STR]): raise ValueError(f'Variable anchor must take the value `{ANCHOR_BOTTOM_STR}`, `{ANCHOR_MIDDLE_STR}` or `{ANCHOR_TOP_STR}`') else: return anchor def validate_window_size(self, window_size: float) -> Optional[int]: 'Checks that input parameter window_size takes allowed value, raises ``ValueError`` if not.\n\n Arguments:\n window_size:\n\n Returns:\n The window_size if validated\n ' try: return int(window_size) except: if (window_size.lower() == 'adaptive'): self.is_adaptive = True self.anchor = ANCHOR_MIDDLE_STR return None else: raise ValueError(f'Variable window_size must be of type int or string {ADAPTIVE_SPREAD_STR}.') def kernel_starting(self, start_time: NanosecondTime) -> None: super().kernel_starting(start_time) def wakeup(self, current_time: NanosecondTime): 'Agent wakeup is determined by self.wake_up_freq.' can_trade = super().wakeup(current_time) if (not self.has_subscribed): super().request_data_subscription(BookImbalanceSubReqMsg(symbol=self.symbol, min_imbalance=self.min_imbalance)) self.last_time_book_order = current_time self.has_subscribed = True if (self.subscribe and (not self.subscription_requested)): super().request_data_subscription(L2SubReqMsg(symbol=self.symbol, freq=self.subscribe_freq, depth=self.subscribe_num_levels)) self.subscription_requested = True self.get_transacted_volume(self.symbol, lookback_period=self.subscribe_freq) self.state = self.initialise_state() elif (can_trade and (not self.subscribe)): self.cancel_all_orders() self.delay(self.cancel_limit_delay) self.get_current_spread(self.symbol, depth=self.subscribe_num_levels) self.get_transacted_volume(self.symbol, lookback_period=self.wake_up_freq) self.initialise_state() def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None: 'Processes message from exchange.\n\n Main function is to update orders in orderbook relative to mid-price.\n\n Arguments:\n current_time: Simulation current time.\n message: Message received by self from ExchangeAgent.\n ' super().receive_message(current_time, sender_id, message) mid = None if (self.last_mid is not None): mid = self.last_mid if ((self.last_spread is not None) and self.is_adaptive): self._adaptive_update_window_and_tick_size() if (isinstance(message, QueryTransactedVolResponseMsg) and (self.state['AWAITING_TRANSACTED_VOLUME'] is True)): self.update_order_size() self.state['AWAITING_TRANSACTED_VOLUME'] = False if isinstance(message, BookImbalanceDataMsg): if (message.stage == MarketDataEventMsg.Stage.START): try: self.place_orders(mid) self.last_time_book_order = current_time except: pass if (not self.subscribe): if (isinstance(message, QuerySpreadResponseMsg) and (self.state['AWAITING_SPREAD'] is True)): (bid, _, ask, _) = self.get_known_bid_ask(self.symbol) if (bid and ask): mid = int(((ask + bid) / 2)) self.last_mid = mid if self.is_adaptive: spread = int((ask - bid)) self._adaptive_update_spread(spread) self.state['AWAITING_SPREAD'] = False else: logger.debug('SPREAD MISSING at time {}', current_time) self.state['AWAITING_SPREAD'] = False if ((self.state['AWAITING_SPREAD'] is False) and (self.state['AWAITING_TRANSACTED_VOLUME'] is False) and (mid is not None)): self.place_orders(mid) self.state = self.initialise_state() self.set_wakeup((current_time + self.get_wake_frequency())) else: if (isinstance(message, MarketDataMsg) and (self.state['AWAITING_MARKET_DATA'] is True)): bid = (self.known_bids[self.symbol][0][0] if self.known_bids[self.symbol] else None) ask = (self.known_asks[self.symbol][0][0] if self.known_asks[self.symbol] else None) if (bid and ask): mid = int(((ask + bid) / 2)) self.last_mid = mid if self.is_adaptive: spread = int((ask - bid)) self._adaptive_update_spread(spread) self.state['AWAITING_MARKET_DATA'] = False else: logger.debug('SPREAD MISSING at time {}', current_time) self.state['AWAITING_MARKET_DATA'] = False if ((self.state['MARKET_DATA'] is False) and (self.state['AWAITING_TRANSACTED_VOLUME'] is False)): self.place_orders(mid) self.state = self.initialise_state() def _adaptive_update_spread(self, spread) -> None: 'Update internal spread estimate with exponentially weighted moving average.\n\n Arguments:\n spread\n ' spread_ewma = ((self.spread_alpha * spread) + ((1 - self.spread_alpha) * self.last_spread)) self.window_size = spread_ewma self.last_spread = spread_ewma def _adaptive_update_window_and_tick_size(self) -> None: 'Update window size and tick size relative to internal spread estimate.' self.window_size = self.last_spread self.tick_size = round((self.level_spacing * self.window_size)) if (self.tick_size == 0): self.tick_size = 1 def update_order_size(self) -> None: 'Updates size of order to be placed.' buy_transacted_volume = self.transacted_volume[self.symbol][0] sell_transacted_volume = self.transacted_volume[self.symbol][1] total_transacted_volume = (buy_transacted_volume + sell_transacted_volume) qty = round((self.pov * total_transacted_volume)) if (self.skew_beta == 0): self.buy_order_size = (qty if (qty >= self.min_order_size) else self.min_order_size) self.sell_order_size = (qty if (qty >= self.min_order_size) else self.min_order_size) else: holdings = self.get_holdings(self.symbol) proportion_sell = sigmoid(holdings, self.skew_beta) sell_size = ceil((proportion_sell * qty)) buy_size = floor(((1 - proportion_sell) * qty)) self.buy_order_size = (buy_size if (buy_size >= self.min_order_size) else self.min_order_size) self.sell_order_size = (sell_size if (sell_size >= self.min_order_size) else self.min_order_size) def compute_orders_to_place(self, mid: int) -> Tuple[(List[int], List[int])]: 'Given a mid price, computes the orders that need to be removed from\n orderbook, and adds these orders to bid and ask deques.\n\n Arguments:\n mid: Mid price.\n ' if (self.price_skew_param is None): mid_point = mid else: buy_transacted_volume = self.transacted_volume[self.symbol][0] sell_transacted_volume = self.transacted_volume[self.symbol][1] if ((buy_transacted_volume == 0) and (sell_transacted_volume == 0)): mid_point = mid else: trade_imbalance = (((2 * buy_transacted_volume) / (buy_transacted_volume + sell_transacted_volume)) - 1) mid_point = int((mid + (trade_imbalance * self.price_skew_param))) if (self.anchor == ANCHOR_MIDDLE_STR): highest_bid = (int(mid_point) - floor((0.5 * self.window_size))) lowest_ask = (int(mid_point) + ceil((0.5 * self.window_size))) elif (self.anchor == ANCHOR_BOTTOM_STR): highest_bid = int((mid_point - 1)) lowest_ask = int((mid_point + self.window_size)) elif (self.anchor == ANCHOR_TOP_STR): highest_bid = int((mid_point - self.window_size)) lowest_ask = int((mid_point + 1)) lowest_bid = (highest_bid - ((self.num_ticks - 1) * self.tick_size)) highest_ask = (lowest_ask + ((self.num_ticks - 1) * self.tick_size)) bids_to_place = [price for price in range(lowest_bid, (highest_bid + self.tick_size), self.tick_size)] asks_to_place = [price for price in range(lowest_ask, (highest_ask + self.tick_size), self.tick_size)] return (bids_to_place, asks_to_place) def place_orders(self, mid: int) -> None: 'Given a mid-price, compute new orders that need to be placed, then\n send the orders to the Exchange.\n\n Arguments:\n mid: Mid price.\n ' (bid_orders, ask_orders) = self.compute_orders_to_place(mid) orders = [] if (self.backstop_quantity != 0): bid_price = bid_orders[0] logger.debug('{}: Placing BUY limit order of size {} @ price {}', self.name, self.backstop_quantity, bid_price) orders.append(self.create_limit_order(self.symbol, self.backstop_quantity, Side.BID, bid_price)) bid_orders = bid_orders[1:] ask_price = ask_orders[(- 1)] logger.debug('{}: Placing SELL limit order of size {} @ price {}', self.name, self.backstop_quantity, ask_price) orders.append(self.create_limit_order(self.symbol, self.backstop_quantity, Side.ASK, ask_price)) ask_orders = ask_orders[:(- 1)] for bid_price in bid_orders: logger.debug('{}: Placing BUY limit order of size {} @ price {}', self.name, self.buy_order_size, bid_price) orders.append(self.create_limit_order(self.symbol, self.buy_order_size, Side.BID, bid_price)) for ask_price in ask_orders: logger.debug('{}: Placing SELL limit order of size {} @ price {}', self.name, self.sell_order_size, ask_price) orders.append(self.create_limit_order(self.symbol, self.sell_order_size, Side.ASK, ask_price)) self.place_multiple_orders(orders) def get_wake_frequency(self) -> NanosecondTime: if (not self.poisson_arrival): return self.wake_up_freq else: delta_time = self.random_state.exponential(scale=self.arrival_rate) return int(round(delta_time))
class NoiseAgent(TradingAgent): '\n Noise agent implement simple strategy. The agent wakes up once and places 1 order.\n ' def __init__(self, id: int, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None, symbol: str='IBM', starting_cash: int=100000, log_orders: bool=False, order_size_model: Optional[OrderSizeGenerator]=None, wakeup_time: Optional[NanosecondTime]=None) -> None: super().__init__(id, name, type, random_state, starting_cash, log_orders) self.wakeup_time: NanosecondTime = wakeup_time self.symbol: str = symbol self.trading: bool = False self.state: str = 'AWAITING_WAKEUP' self.prev_wake_time: Optional[NanosecondTime] = None self.size: Optional[int] = (self.random_state.randint(20, 50) if (order_size_model is None) else None) self.order_size_model = order_size_model def kernel_starting(self, start_time: NanosecondTime) -> None: super().kernel_starting(start_time) self.oracle = self.kernel.oracle def kernel_stopping(self) -> None: super().kernel_stopping() try: (bid, bid_vol, ask, ask_vol) = self.get_known_bid_ask(self.symbol) except KeyError: self.logEvent('FINAL_VALUATION', self.starting_cash, True) else: H = int((round(self.get_holdings(self.symbol), (- 2)) / 100)) if (bid and ask): rT = (int((bid + ask)) / 2) else: rT = self.last_trade[self.symbol] surplus = (rT * H) logger.debug('Surplus after holdings: {}', surplus) surplus += (self.holdings['CASH'] - self.starting_cash) surplus = (float(surplus) / self.starting_cash) self.logEvent('FINAL_VALUATION', surplus, True) logger.debug('{} final report. Holdings: {}, end cash: {}, start cash: {}, final fundamental: {}, surplus: {}', self.name, H, self.holdings['CASH'], self.starting_cash, rT, surplus) def wakeup(self, current_time: NanosecondTime) -> None: super().wakeup(current_time) self.state = 'INACTIVE' if ((not self.mkt_open) or (not self.mkt_close)): return elif (not self.trading): self.trading = True logger.debug('{} is ready to start trading now.', self.name) if (self.mkt_closed and (self.symbol in self.daily_close_price)): return if (self.wakeup_time > current_time): self.set_wakeup(self.wakeup_time) return if (self.mkt_closed and (self.symbol not in self.daily_close_price)): self.get_current_spread(self.symbol) self.state = 'AWAITING_SPREAD' return if (type(self) == NoiseAgent): self.get_current_spread(self.symbol) self.state = 'AWAITING_SPREAD' else: self.state = 'ACTIVE' def placeOrder(self) -> None: buy_indicator = self.random_state.randint(0, (1 + 1)) (bid, bid_vol, ask, ask_vol) = self.get_known_bid_ask(self.symbol) if (self.order_size_model is not None): self.size = self.order_size_model.sample(random_state=self.random_state) if (self.size > 0): if ((buy_indicator == 1) and ask): self.place_limit_order(self.symbol, self.size, Side.BID, ask) elif ((not buy_indicator) and bid): self.place_limit_order(self.symbol, self.size, Side.ASK, bid) def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None: super().receive_message(current_time, sender_id, message) if (self.state == 'AWAITING_SPREAD'): if isinstance(message, QuerySpreadResponseMsg): if self.mkt_closed: return self.placeOrder() self.state = 'AWAITING_WAKEUP' def get_wake_frequency(self) -> NanosecondTime: return self.random_state.randint(low=0, high=100)
class TradingAgent(FinancialAgent): '\n The TradingAgent class (via FinancialAgent, via Agent) is intended as the\n base class for all trading agents (i.e. not things like exchanges) in a\n market simulation.\n\n It handles a lot of messaging (inbound and outbound) and state maintenance\n automatically, so subclasses can focus just on implementing a strategy without\n too much bookkeeping.\n ' def __init__(self, id: int, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None, starting_cash: int=100000, log_orders: bool=False) -> None: super().__init__(id, name, type, random_state) self.mkt_open: Optional[NanosecondTime] = None self.mkt_close: Optional[NanosecondTime] = None self.log_orders: bool = log_orders if (log_orders is None): self.log_orders = False self.log_to_file = False self.starting_cash: int = starting_cash self.MKT_BUY = sys.maxsize self.MKT_SELL = 0 self.holdings: Dict[(str, int)] = {'CASH': starting_cash} self.orders: Dict[(int, Order)] = {} self.last_trade: Dict[(str, int)] = {} self.exchange_ts: Dict[(str, NanosecondTime)] = {} self.daily_close_price: Dict[(str, int)] = {} self.nav_diff: int = 0 self.basket_size: int = 0 self.known_bids: Dict = {} self.known_asks: Dict = {} self.stream_history: Dict[(str, Any)] = {} self.transacted_volume: Dict = {} self.executed_orders: List = [] self.first_wake: bool = True self.mkt_closed: bool = False def kernel_starting(self, start_time: NanosecondTime) -> None: '\n Arguments:\n start_time: The time that the simulation started.\n ' assert (self.kernel is not None) self.logEvent('STARTING_CASH', self.starting_cash, True) self.exchange_id: int = self.kernel.find_agents_by_type(ExchangeAgent)[0] logger.debug(f'Agent {self.id} requested agent of type Agent.ExchangeAgent. Given Agent ID: {self.exchange_id}') super().kernel_starting(start_time) def kernel_stopping(self) -> None: super().kernel_stopping() assert (self.kernel is not None) self.logEvent('FINAL_HOLDINGS', self.fmt_holdings(self.holdings), deepcopy_event=False) self.logEvent('FINAL_CASH_POSITION', self.holdings['CASH'], True) cash = self.mark_to_market(self.holdings) self.logEvent('ENDING_CASH', cash, True) logger.debug('Final holdings for {}: {}. Marked to market: {}'.format(self.name, self.fmt_holdings(self.holdings), cash)) mytype = self.type gain = (cash - self.starting_cash) if (mytype in self.kernel.mean_result_by_agent_type): self.kernel.mean_result_by_agent_type[mytype] += gain self.kernel.agent_count_by_type[mytype] += 1 else: self.kernel.mean_result_by_agent_type[mytype] = gain self.kernel.agent_count_by_type[mytype] = 1 def wakeup(self, current_time: NanosecondTime) -> bool: '\n Arguments:\n current_time: The time that this agent was woken up by the kernel.\n\n Returns:\n For the sake of subclasses, TradingAgent now returns a boolean\n indicating whether the agent is "ready to trade" -- has it received\n the market open and closed times, and is the market not already closed.\n ' super().wakeup(current_time) if self.first_wake: self.logEvent('HOLDINGS_UPDATED', self.holdings) self.first_wake = False self.send_message(self.exchange_id, MarketClosePriceRequestMsg()) if (self.mkt_open is None): self.send_message(self.exchange_id, MarketHoursRequestMsg()) return ((self.mkt_open and self.mkt_close) and (not self.mkt_closed)) def request_data_subscription(self, subscription_message: MarketDataSubReqMsg) -> None: '\n Used by any Trading Agent subclass to create a subscription to market data from\n the Exchange Agent.\n\n Arguments:\n subscription_message: An instance of a MarketDataSubReqMessage.\n ' subscription_message.cancel = False self.send_message(recipient_id=self.exchange_id, message=subscription_message) def cancel_data_subscription(self, subscription_message: MarketDataSubReqMsg) -> None: '\n Used by any Trading Agent subclass to cancel subscription to market data from\n the Exchange Agent.\n\n Arguments:\n subscription_message: An instance of a MarketDataSubReqMessage.\n ' subscription_message.cancel = True self.send_message(recipient_id=self.exchange_id, message=subscription_message) def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None: '\n Arguments:\n current_time: The time that this agent received the message.\n sender_id: The ID of the agent who sent the message.\n message: The message contents.\n ' assert (self.kernel is not None) super().receive_message(current_time, sender_id, message) had_mkt_hours = ((self.mkt_open is not None) and (self.mkt_close is not None)) if isinstance(message, MarketHoursMsg): self.mkt_open = message.mkt_open self.mkt_close = message.mkt_close logger.debug('Recorded market open: {}'.format(fmt_ts(self.mkt_open))) logger.debug('Recorded market close: {}'.format(fmt_ts(self.mkt_close))) elif isinstance(message, MarketClosePriceMsg): for (symbol, close_price) in message.close_prices.items(): self.last_trade[symbol] = close_price elif isinstance(message, MarketClosedMsg): self.market_closed() elif isinstance(message, OrderExecutedMsg): self.order_executed(message.order) elif isinstance(message, OrderAcceptedMsg): self.order_accepted(message.order) elif isinstance(message, OrderCancelledMsg): self.order_cancelled(message.order) elif isinstance(message, OrderPartialCancelledMsg): self.order_partial_cancelled(message.new_order) elif isinstance(message, OrderModifiedMsg): self.order_modified(message.new_order) elif isinstance(message, OrderReplacedMsg): self.order_replaced(message.old_order, message.new_order) elif isinstance(message, QueryLastTradeResponseMsg): if message.mkt_closed: self.mkt_closed = True self.query_last_trade(message.symbol, message.last_trade) elif isinstance(message, QuerySpreadResponseMsg): if message.mkt_closed: self.mkt_closed = True self.query_spread(message.symbol, message.last_trade, message.bids, message.asks, '') elif isinstance(message, QueryOrderStreamResponseMsg): if message.mkt_closed: self.mkt_closed = True self.query_order_stream(message.symbol, message.orders) elif isinstance(message, QueryTransactedVolResponseMsg): if message.mkt_closed: self.mkt_closed = True self.query_transacted_volume(message.symbol, message.bid_volume, message.ask_volume) elif isinstance(message, MarketDataMsg): self.handle_market_data(message) have_mkt_hours = ((self.mkt_open is not None) and (self.mkt_close is not None)) if (have_mkt_hours and (not had_mkt_hours)): ns_offset = self.get_wake_frequency() self.set_wakeup((self.mkt_open + ns_offset)) def get_last_trade(self, symbol: str) -> None: '\n Used by any Trading Agent subclass to query the last trade price for a symbol.\n\n This activity is not logged.\n\n Arguments:\n symbol: The symbol to query.\n ' self.send_message(self.exchange_id, QueryLastTradeMsg(symbol)) def get_current_spread(self, symbol: str, depth: int=1) -> None: '\n Used by any Trading Agent subclass to query the current spread for a symbol.\n\n This activity is not logged.\n\n Arguments:\n symbol: The symbol to query.\n depth:\n ' self.send_message(self.exchange_id, QuerySpreadMsg(symbol, depth)) def get_order_stream(self, symbol: str, length: int=1) -> None: '\n Used by any Trading Agent subclass to query the recent order s tream for a symbol.\n\n Arguments:\n symbol: The symbol to query.\n length:\n ' self.send_message(self.exchange_id, QueryOrderStreamMsg(symbol, length)) def get_transacted_volume(self, symbol: str, lookback_period: str='10min') -> None: '\n Used by any trading agent subclass to query the total transacted volume in a\n given lookback period.\n\n Arguments:\n symbol: The symbol to query.\n lookback_period: The length of time to consider when calculating the volume.\n ' self.send_message(self.exchange_id, QueryTransactedVolMsg(symbol, lookback_period)) def create_limit_order(self, symbol: str, quantity: int, side: Side, limit_price: int, order_id: Optional[int]=None, is_hidden: bool=False, is_price_to_comply: bool=False, insert_by_id: bool=False, is_post_only: bool=False, ignore_risk: bool=True, tag: Any=None) -> LimitOrder: '\n Used by any Trading Agent subclass to create a limit order.\n\n Arguments:\n symbol: A valid symbol.\n quantity: Positive share quantity.\n side: Side.BID or Side.ASK.\n limit_price: Price in cents.\n order_id: An optional order id (otherwise global autoincrement is used).\n is_hidden:\n is_price_to_comply:\n insert_by_id:\n is_post_only:\n ignore_risk: Whether cash or risk limits should be enforced or ignored for\n the order.\n tag:\n ' order = LimitOrder(agent_id=self.id, time_placed=self.current_time, symbol=symbol, quantity=quantity, side=side, limit_price=limit_price, is_hidden=is_hidden, is_price_to_comply=is_price_to_comply, insert_by_id=insert_by_id, is_post_only=is_post_only, order_id=order_id, tag=tag) if (quantity > 0): new_holdings = self.holdings.copy() q = (order.quantity if order.side.is_bid() else (- order.quantity)) if (order.symbol in new_holdings): new_holdings[order.symbol] += q else: new_holdings[order.symbol] = q if (not ignore_risk): at_risk = (self.mark_to_market(self.holdings) - self.holdings['CASH']) new_at_risk = (self.mark_to_market(new_holdings) - new_holdings['CASH']) if ((new_at_risk > at_risk) and (new_at_risk > self.starting_cash)): logger.debug('TradingAgent ignored limit order due to at-risk constraints: {}\n{}'.format(order, self.fmt_holdings(self.holdings))) return return order else: warnings.warn(f'TradingAgent ignored limit order of quantity zero: {order}') def place_limit_order(self, symbol: str, quantity: int, side: Side, limit_price: int, order_id: Optional[int]=None, is_hidden: bool=False, is_price_to_comply: bool=False, insert_by_id: bool=False, is_post_only: bool=False, ignore_risk: bool=True, tag: Any=None) -> None: '\n Used by any Trading Agent subclass to place a limit order.\n\n Arguments:\n symbol: A valid symbol.\n quantity: Positive share quantity.\n side: Side.BID or Side.ASK.\n limit_price: Price in cents.\n order_id: An optional order id (otherwise global autoincrement is used).\n is_hidden:\n is_price_to_comply:\n insert_by_id:\n is_post_only:\n ignore_risk: Whether cash or risk limits should be enforced or ignored for\n the order.\n tag:\n ' order = self.create_limit_order(symbol, quantity, side, limit_price, order_id, is_hidden, is_price_to_comply, insert_by_id, is_post_only, ignore_risk, tag) if (order is not None): self.orders[order.order_id] = deepcopy(order) self.send_message(self.exchange_id, LimitOrderMsg(order)) if self.log_orders: self.logEvent('ORDER_SUBMITTED', order.to_dict(), deepcopy_event=False) def place_market_order(self, symbol: str, quantity: int, side: Side, order_id: Optional[int]=None, ignore_risk: bool=True, tag: Any=None) -> None: '\n Used by any Trading Agent subclass to place a market order.\n\n The market order is created as multiple limit orders crossing the spread\n walking the book until all the quantities are matched.\n\n Arguments:\n symbol: Name of the stock traded.\n quantity: Order quantity.\n side: Side.BID or Side.ASK.\n order_id: Order ID for market replay.\n ignore_risk: Whether cash or risk limits should be enforced or ignored for\n the order.\n tag:\n ' order = MarketOrder(self.id, self.current_time, symbol, quantity, side, order_id, tag) if (quantity > 0): new_holdings = self.holdings.copy() q = (order.quantity if order.side.is_bid() else (- order.quantity)) if (order.symbol in new_holdings): new_holdings[order.symbol] += q else: new_holdings[order.symbol] = q if (not ignore_risk): at_risk = (self.mark_to_market(self.holdings) - self.holdings['CASH']) new_at_risk = (self.mark_to_market(new_holdings) - new_holdings['CASH']) if ((new_at_risk > at_risk) and (new_at_risk > self.starting_cash)): logger.debug('TradingAgent ignored market order due to at-risk constraints: {}\n{}'.format(order, self.fmt_holdings(self.holdings))) return self.orders[order.order_id] = deepcopy(order) self.send_message(self.exchange_id, MarketOrderMsg(order)) if self.log_orders: self.logEvent('ORDER_SUBMITTED', order.to_dict(), deepcopy_event=False) else: warnings.warn('TradingAgent ignored market order of quantity zero: {}', order) def place_multiple_orders(self, orders: List[Union[(LimitOrder, MarketOrder)]]) -> None: '\n Used by any Trading Agent subclass to place multiple orders at the same time.\n\n Arguments:\n orders: A list of Orders to place with the exchange as a single batch.\n ' messages = [] for order in orders: if isinstance(order, LimitOrder): messages.append(LimitOrderMsg(order)) elif isinstance(order, MarketOrder): messages.append(MarketOrderMsg(order)) else: raise Exception('Expected LimitOrder or MarketOrder') self.orders[order.order_id] = deepcopy(order) if self.log_orders: self.logEvent('ORDER_SUBMITTED', order.to_dict(), deepcopy_event=False) if (len(messages) > 0): self.send_message_batch(self.exchange_id, messages) def cancel_order(self, order: LimitOrder, tag: Optional[str]=None, metadata: dict={}) -> None: "\n Used by derived classes of TradingAgent to cancel a limit order.\n\n The order must currently appear in the agent's open orders list.\n\n Arguments:\n order: The limit order to cancel.\n tag:\n metadata:\n " if isinstance(order, LimitOrder): self.send_message(self.exchange_id, CancelOrderMsg(order, tag, metadata)) if self.log_orders: self.logEvent('CANCEL_SUBMITTED', order.to_dict(), deepcopy_event=False) else: warnings.warn(f'Order {order} of type, {type(order)} cannot be cancelled') def cancel_all_orders(self): '\n Cancels all current limit orders held by this agent.\n ' for order in self.orders.values(): if isinstance(order, LimitOrder): self.cancel_order(order) def partial_cancel_order(self, order: LimitOrder, quantity: int, tag: Optional[str]=None, metadata: dict={}) -> None: "\n Used by any Trading Agent subclass to modify any existing limit order.\n\n The order must currently appear in the agent's open orders list.\n Arguments:\n order: The limit order to partially cancel.\n quantity:\n tag:\n metadata:\n " self.send_message(self.exchange_id, PartialCancelOrderMsg(order, quantity, tag, metadata)) if self.log_orders: self.logEvent('CANCEL_PARTIAL_ORDER', order.to_dict(), deepcopy_event=False) def modify_order(self, order: LimitOrder, new_order: LimitOrder) -> None: "\n Used by any Trading Agent subclass to modify any existing limit order.\n\n The order must currently appear in the agent's open orders list. Some\n additional tests might be useful here to ensure the old and new orders are\n the same in some way.\n\n Arguments:\n order: The existing limit order.\n new_order: The limit order to update the existing order with.\n " self.send_message(self.exchange_id, ModifyOrderMsg(order, new_order)) if self.log_orders: self.logEvent('MODIFY_ORDER', order.to_dict(), deepcopy_event=False) def replace_order(self, order: LimitOrder, new_order: LimitOrder) -> None: "\n Used by any Trading Agent subclass to replace any existing limit order.\n\n The order must currently appear in the agent's open orders list. Some\n additional tests might be useful here to ensure the old and new orders are\n the same in some way.\n\n Arguments:\n order: The existing limit order.\n new_order: The new limit order to replace the existing order with.\n " self.send_message(self.exchange_id, ReplaceOrderMsg(self.id, order, new_order)) if self.log_orders: self.logEvent('REPLACE_ORDER', order.to_dict(), deepcopy_event=False) def order_executed(self, order: Order) -> None: '\n Handles OrderExecuted messages from an exchange agent.\n\n Subclasses may wish to extend, but should still call parent method for basic\n portfolio/returns tracking.\n\n Arguments:\n order: The order that has been executed by the exchange.\n ' logger.debug(f'Received notification of execution for: {order}') if self.log_orders: self.logEvent('ORDER_EXECUTED', order.to_dict(), deepcopy_event=False) qty = (order.quantity if order.side.is_bid() else ((- 1) * order.quantity)) sym = order.symbol if (sym in self.holdings): self.holdings[sym] += qty else: self.holdings[sym] = qty if (self.holdings[sym] == 0): del self.holdings[sym] self.holdings['CASH'] -= (qty * order.fill_price) if (order.order_id in self.orders): o = self.orders[order.order_id] if (order.quantity >= o.quantity): del self.orders[order.order_id] else: o.quantity -= order.quantity else: warnings.warn(f'Execution received for order not in orders list: {order}') logger.debug(f'After order execution, agent open orders: {self.orders}') self.logEvent('HOLDINGS_UPDATED', self.holdings) def order_accepted(self, order: LimitOrder) -> None: '\n Handles OrderAccepted messages from an exchange agent.\n\n Subclasses may wish to extend.\n\n Arguments:\n order: The order that has been accepted from the exchange.\n ' logger.debug(f'Received notification of acceptance for: {order}') if self.log_orders: self.logEvent('ORDER_ACCEPTED', order.to_dict(), deepcopy_event=False) def order_cancelled(self, order: LimitOrder) -> None: '\n Handles OrderCancelled messages from an exchange agent.\n\n Subclasses may wish to extend.\n\n Arguments:\n order: The order that has been cancelled by the exchange.\n ' logger.debug(f'Received notification of cancellation for: {order}') if self.log_orders: self.logEvent('ORDER_CANCELLED', order.to_dict(), deepcopy_event=False) if (order.order_id in self.orders): del self.orders[order.order_id] else: warnings.warn(f'Cancellation received for order not in orders list: {order}') def order_partial_cancelled(self, order: LimitOrder) -> None: '\n Handles OrderCancelled messages from an exchange agent.\n\n Subclasses may wish to extend.\n\n Arguments:\n order: The order that has been partially cancelled by the exchange.\n ' logger.debug(f'Received notification of partial cancellation for: {order}') if self.log_orders: self.logEvent('PARTIAL_CANCELLED', order.to_dict()) if (order.order_id in self.orders): self.orders[order.order_id] = order else: warnings.warn(f'partial cancellation received for order not in orders list: {order}') logger.debug(f'After order partial cancellation, agent open orders: {self.orders}') self.logEvent('HOLDINGS_UPDATED', self.holdings) def order_modified(self, order: LimitOrder) -> None: '\n Handles OrderModified messages from an exchange agent.\n\n Subclasses may wish to extend.\n\n Arguments:\n order: The order that has been modified at the exchange.\n ' logger.debug(f'Received notification of modification for: {order}') if self.log_orders: self.logEvent('ORDER_MODIFIED', order.to_dict()) if (order.order_id in self.orders): self.orders[order.order_id] = order else: warnings.warn('Execution received for order not in orders list: {order}') logger.debug(f'After order modification, agent open orders: {self.orders}') self.logEvent('HOLDINGS_UPDATED', self.holdings) def order_replaced(self, old_order: LimitOrder, new_order: LimitOrder) -> None: '\n Handles OrderReplaced messages from an exchange agent.\n\n Subclasses may wish to extend.\n\n Arguments:\n order: The order that has been modified at the exchange.\n ' logger.debug(f'Received notification of replacement for: {old_order}') if self.log_orders: self.logEvent('ORDER_REPLACED', old_order.to_dict()) if (old_order.order_id in self.orders): del self.orders[old_order.order_id] else: warnings.warn(f'Execution received for order not in orders list: {old_order}') self.orders[new_order.order_id] = new_order logger.debug(f'After order replacement, agent open orders: {self.orders}') self.logEvent('HOLDINGS_UPDATED', self.holdings) def market_closed(self) -> None: '\n Handles MarketClosedMsg messages from an exchange agent.\n\n Subclasses may wish to extend.\n ' logger.debug('Received notification of market closure.') self.logEvent('MKT_CLOSED') self.mkt_closed = True def query_last_trade(self, symbol: str, price: int) -> None: '\n Handles QueryLastTradeResponseMsg messages from an exchange agent.\n\n Arguments:\n symbol: The symbol that was queried.\n price: The price at which the last trade executed at.\n ' self.last_trade[symbol] = price logger.debug('Received last trade price of {} for {}.'.format(self.last_trade[symbol], symbol)) if self.mkt_closed: self.daily_close_price[symbol] = self.last_trade[symbol] logger.debug('Received daily close price of {} for {}.'.format(self.last_trade[symbol], symbol)) def query_spread(self, symbol: str, price: int, bids: List[List[Tuple[(int, int)]]], asks: List[List[Tuple[(int, int)]]], book: str) -> None: '\n Handles QuerySpreadResponseMsg messages from an exchange agent.\n\n Arguments:\n symbol: The symbol that was queried.\n price:\n bids:\n asks:\n book:\n ' self.query_last_trade(symbol, price) self.known_bids[symbol] = bids self.known_asks[symbol] = asks if bids: (best_bid, best_bid_qty) = (bids[0][0], bids[0][1]) else: (best_bid, best_bid_qty) = ('No bids', 0) if asks: (best_ask, best_ask_qty) = (asks[0][0], asks[0][1]) else: (best_ask, best_ask_qty) = ('No asks', 0) logger.debug('Received spread of {} @ {} / {} @ {} for {}'.format(best_bid_qty, best_bid, best_ask_qty, best_ask, symbol)) self.logEvent('BID_DEPTH', bids) self.logEvent('ASK_DEPTH', asks) self.logEvent('IMBALANCE', [sum([x[1] for x in bids]), sum([x[1] for x in asks])]) self.book = book def handle_market_data(self, message: MarketDataMsg) -> None: '\n Handles Market Data messages for agents using subscription mechanism.\n\n Arguments:\n message: The market data message,\n ' if isinstance(message, L2DataMsg): symbol = message.symbol self.known_asks[symbol] = message.asks self.known_bids[symbol] = message.bids self.last_trade[symbol] = message.last_transaction self.exchange_ts[symbol] = message.exchange_ts def query_order_stream(self, symbol: str, orders) -> None: '\n Handles QueryOrderStreamResponseMsg messages from an exchange agent.\n\n It is up to the requesting agent to do something with the data, which is a list\n of dictionaries keyed by order id. The list index is 0 for orders since the most\n recent trade, 1 for orders that led up to the most recent trade, and so on.\n Agents are not given index 0 (orders more recent than the last trade).\n\n Arguments:\n symbol: The symbol that was queried.\n orders:\n ' self.stream_history[symbol] = orders def query_transacted_volume(self, symbol: str, bid_volume: int, ask_volume: int) -> None: '\n Handles the QueryTransactedVolResponseMsg messages from the exchange agent.\n\n Arguments:\n symbol: The symbol that was queried.\n bid_vol: The volume that has transacted on the bid side for the queried period.\n ask_vol: The volume that has transacted on the ask side for the queried period.\n ' self.transacted_volume[symbol] = (bid_volume, ask_volume) def get_known_bid_ask(self, symbol: str, best: bool=True): '\n Extract the current known bid and asks.\n\n This does NOT request new information.\n\n Arguments:\n symbol: The symbol to query.\n best:\n ' if best: bid = (self.known_bids[symbol][0][0] if self.known_bids[symbol] else None) ask = (self.known_asks[symbol][0][0] if self.known_asks[symbol] else None) bid_vol = (self.known_bids[symbol][0][1] if self.known_bids[symbol] else 0) ask_vol = (self.known_asks[symbol][0][1] if self.known_asks[symbol] else 0) return (bid, bid_vol, ask, ask_vol) else: bids = (self.known_bids[symbol] if self.known_bids[symbol] else None) asks = (self.known_asks[symbol] if self.known_asks[symbol] else None) return (bids, asks) def get_known_liquidity(self, symbol: str, within: float=0.0) -> Tuple[(int, int)]: '\n Extract the current bid and ask liquidity within a certain proportion of the\n inside bid and ask. (i.e. within=0.01 means to report total BID shares\n within 1% of the best bid price, and total ASK shares within 1% of the best\n ask price)\n\n Arguments:\n symbol: The symbol to query.\n within:\n\n Returns:\n (bid_liquidity, ask_liquidity). Note that this is from the order book\n perspective, not the agent perspective. (The agent would be selling into\n the bid liquidity, etc.)\n ' bid_liq = self.get_book_liquidity(self.known_bids[symbol], within) ask_liq = self.get_book_liquidity(self.known_asks[symbol], within) logger.debug('Bid/ask liq: {}, {}'.format(bid_liq, ask_liq)) logger.debug('Known bids: {}'.format(self.known_bids[self.symbol])) logger.debug('Known asks: {}'.format(self.known_asks[self.symbol])) return (bid_liq, ask_liq) def get_book_liquidity(self, book: Iterable[Tuple[(int, int)]], within: float) -> int: '\n Helper function for the above. Checks one side of the known order book.\n\n Arguments:\n book:\n within:\n ' liq = 0 for (i, (price, shares)) in enumerate(book): if (i == 0): best = price if (abs((best - price)) <= int(round((best * within)))): logger.debug('Within {} of {}: {} with {} shares'.format(within, best, price, shares)) liq += shares return liq def mark_to_market(self, holdings: Mapping[(str, int)], use_midpoint: bool=False) -> int: '\n Marks holdings to market (including cash).\n\n Arguments:\n holdings:\n use_midpoint:\n ' cash = holdings['CASH'] cash += (self.basket_size * self.nav_diff) for (symbol, shares) in holdings.items(): if (symbol == 'CASH'): continue if use_midpoint: (bid, ask, midpoint) = self.get_known_bid_ask_midpoint(symbol) if ((bid is None) or (ask is None) or (midpoint is None)): value = (self.last_trade[symbol] * shares) else: value = (midpoint * shares) else: value = (self.last_trade[symbol] * shares) cash += value self.logEvent('MARK_TO_MARKET', '{} {} @ {} == {}'.format(shares, symbol, self.last_trade[symbol], value)) self.logEvent('MARKED_TO_MARKET', cash) return cash def get_holdings(self, symbol: str) -> int: '\n Gets holdings. Returns zero for any symbol not held.\n\n Arguments:\n symbol: The symbol to query.\n ' return (self.holdings[symbol] if (symbol in self.holdings) else 0) def get_known_bid_ask_midpoint(self, symbol: str) -> Tuple[(Optional[int], Optional[int], Optional[int])]: '\n Get the known best bid, ask, and bid/ask midpoint from cached data. No volume.\n\n Arguments:\n symbol: The symbol to query.\n ' bid = (self.known_bids[symbol][0][0] if self.known_bids[symbol] else None) ask = (self.known_asks[symbol][0][0] if self.known_asks[symbol] else None) midpoint = (int(round(((bid + ask) / 2))) if ((bid is not None) and (ask is not None)) else None) return (bid, ask, midpoint) def get_average_transaction_price(self) -> float: 'Calculates the average price paid (weighted by the order size).' return round((sum(((executed_order.quantity * executed_order.fill_price) for executed_order in self.executed_orders)) / sum((executed_order.quantity for executed_order in self.executed_orders))), 2) def fmt_holdings(self, holdings: Mapping[(str, int)]) -> str: '\n Prints holdings.\n\n Standard dictionary->string representation is almost fine, but it is less\n confusing to see the CASH holdings in dollars and cents, instead of just integer\n cents. We could change to a Holdings object that knows to print CASH "special".\n\n Arguments:\n holdings:\n ' h = '' for (k, v) in sorted(holdings.items()): if (k == 'CASH'): continue h += '{}: {}, '.format(k, v) h += '{}: {}'.format('CASH', holdings['CASH']) h = (('{ ' + h) + ' }') return h
def list_dict_flip(ld: List[Dict[(str, Any)]]) -> Dict[(str, List[Any])]: '\n Utility that returns a dictionnary of list of dictionnary into a dictionary of list\n\n Arguments:\n - ld: list of dictionaary\n Returns:\n - flipped: dictionnary of lists\n Example:\n - ld = [{"a":1, "b":2}, {"a":3, "b":4}]\n - flipped = {\'a\': [1, 3], \'b\': [2, 4]}\n ' flipped = dict(((k, []) for (k, v) in ld[0].items())) for rs in ld: for k in flipped.keys(): flipped[k].append(rs[k]) return flipped
def identity_decorator(func): '\n identy for decorators: take a function and return that same function\n\n Arguments:\n - func: function\n Returns:\n - wrapper_identity_decorator: function\n ' def wrapper_identity_decorator(*args, **kvargs): return func(*args, **kvargs) return wrapper_identity_decorator
def ignore_mkt_data_buffer_decorator(func): '\n Decorator for function that takes as input self and raw_state.\n Applies the given function while ignoring the buffering in the market data.\n Only last element of the market data buffer is kept\n Arguments:\n - func: function\n Returns:\n - wrapper_mkt_data_buffer_decorator: function\n ' def wrapper_mkt_data_buffer_decorator(self, raw_state): raw_state_copy = deepcopy(raw_state) for i in range(len(raw_state)): raw_state[i]['parsed_mkt_data'] = raw_state_copy[i]['parsed_mkt_data'][(- 1)] raw_state[i]['parsed_volume_data'] = raw_state_copy[i]['parsed_volume_data'][(- 1)] raw_state2 = list_dict_flip(raw_state) flipped = dict(((k, list_dict_flip(v)) for (k, v) in raw_state2.items())) return func(self, flipped) return wrapper_mkt_data_buffer_decorator
def ignore_buffers_decorator(func): '\n Decorator for function that takes as input self and raw_state.\n Applies the given function while ignoring the buffering in both the market data and the general raw state.\n Only last elements are kept.\n Arguments:\n - func: function\n Returns:\n - wrapper_mkt_data_buffer_decorator: function\n ' def wrapper_ignore_buffers_decorator(self, raw_state): raw_state = raw_state[(- 1)] if (len(raw_state['parsed_mkt_data']) == 0): pass else: raw_state['parsed_mkt_data'] = raw_state['parsed_mkt_data'][(- 1)] if raw_state['parsed_volume_data']: raw_state['parsed_volume_data'] = raw_state['parsed_volume_data'][(- 1)] return func(self, raw_state) return wrapper_ignore_buffers_decorator
def get_mid_price(bids: List[PriceLevel], asks: List[PriceLevel], last_transaction: int) -> int: '\n Utility that computes the mid price from the snapshot of bid and ask side\n\n Arguments:\n - bids: list of list snapshot of bid side\n - asks: list of list snapshot of ask side\n - last_trasaction: last transaction in the market, used for corner cases when one side of the OB is empty\n Returns:\n - mid_price value\n ' if ((len(bids) == 0) and (len(asks) == 0)): return last_transaction elif (len(bids) == 0): return asks[0][0] elif (len(asks) == 0): return bids[0][0] else: return ((bids[0][0] + asks[0][0]) / 2)
def get_val(book: List[PriceLevel], level: int) -> Tuple[(int, int)]: '\n utility to compute the price and level at the level-th level of the order book\n\n Arguments:\n - book: side of the order book (bid or ask)\n - level: level of interest in the OB side (index starts at 0 for best bid/ask)\n\n Returns:\n - tuple price, volume for the i-th value\n ' if (book == []): return (0, 0) else: try: price = book[level][0] volume = book[level][1] return (price, volume) except: return (0, 0)
def get_last_val(book: List[PriceLevel], mid_price: int) -> int: '\n utility to compute the price of the deepest placed order in the side of the order book\n\n Arguments:\n - book: side of the order book (bid or ask)\n - mid_price: current mid price used for corner cases\n\n Returns:\n - mid price value\n ' if (book == []): return mid_price else: return book[(- 1)][0]
def get_volume(book: List[PriceLevel], depth: Optional[int]=None) -> int: '\n utility to compute the volume placed between the top of the book (depth 0) and the depth\n\n Arguments:\n - book: side of the order book (bid or ask)\n - depth: depth used to compute sum of the volume\n\n Returns:\n - volume placed\n ' if (depth is None): return sum([v[1] for v in book]) else: return sum([v[1] for v in book[:depth]])
def get_imbalance(bids: List[PriceLevel], asks: List[PriceLevel], direction: str='BUY', depth: Optional[int]=None) -> float: '\n utility to compute the imbalance computed between the top of the book and the depth-th value of depth\n\n Arguments:\n - bids: list of list snapshot of bid side\n - asks: list of list snapshot of ask side\n - direction: side used to compute the numerator in the division\n - depth: depth used to compute sum of the volume\n\n Returns:\n - imbalance\n ' if ((bids == []) and (asks == [])): return 0.5 elif (bids == []): if (direction == 'BUY'): return 0 else: return 1 elif (asks == []): if (direction == 'BUY'): return 1 else: return 0 elif (depth == None): bid_vol = sum([v[1] for v in bids]) ask_vol = sum([v[1] for v in asks]) else: bid_vol = sum([v[1] for v in bids[:depth]]) ask_vol = sum([v[1] for v in asks[:depth]]) if (direction == 'BUY'): return (bid_vol / (bid_vol + ask_vol)) else: return (ask_vol / (bid_vol + ask_vol))
class ValueAgent(TradingAgent): def __init__(self, id: int, name: Optional[str]=None, type: Optional[str]=None, random_state: Optional[np.random.RandomState]=None, symbol: str='IBM', starting_cash: int=100000, sigma_n: float=10000, r_bar: int=100000, kappa: float=0.05, sigma_s: float=100000, order_size_model=None, lambda_a: float=0.005, log_orders: float=False) -> None: super().__init__(id, name, type, random_state, starting_cash, log_orders) self.symbol: str = symbol self.sigma_n: float = sigma_n self.r_bar: int = r_bar self.kappa: float = kappa self.sigma_s: float = sigma_s self.lambda_a: float = lambda_a self.trading: bool = False self.state: str = 'AWAITING_WAKEUP' self.r_t: int = r_bar self.sigma_t: float = 0 self.prev_wake_time: Optional[NanosecondTime] = None self.percent_aggr: float = 0.1 self.size: Optional[int] = (self.random_state.randint(20, 50) if (order_size_model is None) else None) self.order_size_model = order_size_model self.depth_spread: int = 2 def kernel_starting(self, start_time: NanosecondTime) -> None: super().kernel_starting(start_time) self.oracle = self.kernel.oracle def kernel_stopping(self) -> None: super().kernel_stopping() H = int((round(self.get_holdings(self.symbol), (- 2)) / 100)) rT = self.oracle.observe_price(self.symbol, self.current_time, sigma_n=0, random_state=self.random_state) surplus = (rT * H) logger.debug('Surplus after holdings: {}', surplus) surplus += (self.holdings['CASH'] - self.starting_cash) surplus = (float(surplus) / self.starting_cash) self.logEvent('FINAL_VALUATION', surplus, True) logger.debug('{} final report. Holdings: {}, end cash: {}, start cash: {}, final fundamental: {}, surplus: {}', self.name, H, self.holdings['CASH'], self.starting_cash, rT, surplus) def wakeup(self, current_time: NanosecondTime) -> None: super().wakeup(current_time) self.state = 'INACTIVE' if ((not self.mkt_open) or (not self.mkt_close)): return elif (not self.trading): self.trading = True logger.debug('{} is ready to start trading now.', self.name) if (self.mkt_closed and (self.symbol in self.daily_close_price)): return delta_time = self.random_state.exponential(scale=(1.0 / self.lambda_a)) self.set_wakeup((current_time + int(round(delta_time)))) if (self.mkt_closed and (not (self.symbol in self.daily_close_price))): self.get_current_spread(self.symbol) self.state = 'AWAITING_SPREAD' return self.cancel_all_orders() if (type(self) == ValueAgent): self.get_current_spread(self.symbol) self.state = 'AWAITING_SPREAD' else: self.state = 'ACTIVE' def updateEstimates(self) -> int: obs_t = self.oracle.observe_price(self.symbol, self.current_time, sigma_n=self.sigma_n, random_state=self.random_state) logger.debug('{} observed {} at {}', self.name, obs_t, self.current_time) if (self.prev_wake_time is None): self.prev_wake_time = self.mkt_open delta = (self.current_time - self.prev_wake_time) r_tprime = ((1 - ((1 - self.kappa) ** delta)) * self.r_bar) r_tprime += (((1 - self.kappa) ** delta) * self.r_t) sigma_tprime = (((1 - self.kappa) ** (2 * delta)) * self.sigma_t) sigma_tprime += (((1 - ((1 - self.kappa) ** (2 * delta))) / (1 - ((1 - self.kappa) ** 2))) * self.sigma_s) self.r_t = ((self.sigma_n / (self.sigma_n + sigma_tprime)) * r_tprime) self.r_t += ((sigma_tprime / (self.sigma_n + sigma_tprime)) * obs_t) self.sigma_t = ((self.sigma_n * self.sigma_t) / (self.sigma_n + self.sigma_t)) delta = max(0, (self.mkt_close - self.current_time)) r_T = ((1 - ((1 - self.kappa) ** delta)) * self.r_bar) r_T += (((1 - self.kappa) ** delta) * self.r_t) r_T = int(round(r_T)) self.prev_wake_time = self.current_time logger.debug('{} estimates r_T = {} as of {}', self.name, r_T, self.current_time) return r_T def placeOrder(self) -> None: r_T = self.updateEstimates() (bid, bid_vol, ask, ask_vol) = self.get_known_bid_ask(self.symbol) if (bid and ask): mid = int(((ask + bid) / 2)) spread = abs((ask - bid)) if (self.random_state.rand() < self.percent_aggr): adjust_int = 0 else: adjust_int = self.random_state.randint(0, min((9223372036854775807 - 1), (self.depth_spread * spread))) if (r_T < mid): buy = False p = (bid + adjust_int) elif (r_T >= mid): buy = True p = (ask - adjust_int) else: buy = self.random_state.randint(0, (1 + 1)) p = r_T if (self.order_size_model is not None): self.size = self.order_size_model.sample(random_state=self.random_state) side = (Side.BID if (buy == 1) else Side.ASK) if (self.size > 0): self.place_limit_order(self.symbol, self.size, side, p) def receive_message(self, current_time: NanosecondTime, sender_id: int, message: Message) -> None: super().receive_message(current_time, sender_id, message) if (self.state == 'AWAITING_SPREAD'): if isinstance(message, QuerySpreadResponseMsg): if self.mkt_closed: return self.placeOrder() self.state = 'AWAITING_WAKEUP' def get_wake_frequency(self) -> NanosecondTime: delta_time = self.random_state.exponential(scale=(1.0 / self.lambda_a)) return int(round(delta_time))
def build_config(ticker='ABM', historical_date='20200603', start_time='09:30:00', end_time='16:00:00', exchange_log_orders=True, log_orders=True, book_logging=True, book_log_depth=10, seed=1, stdout_log_level='INFO', num_momentum_agents=25, num_noise_agents=5000, num_value_agents=100, execution_agents=True, execution_pov=0.1, mm_pov=0.025, mm_window_size='adaptive', mm_min_order_size=1, mm_num_ticks=10, mm_wake_up_freq=str_to_ns('10S'), mm_skew_beta=0, mm_level_spacing=5, mm_spread_alpha=0.75, mm_backstop_quantity=50000, fund_r_bar=100000, fund_kappa=1.67e-16, fund_sigma_s=0, fund_vol=0.001, fund_megashock_lambda_a=2.77778e-18, fund_megashock_mean=1000, fund_megashock_var=50000, val_r_bar=100000, val_kappa=1.67e-15, val_vol=1e-08, val_lambda_a=7e-11): fund_sigma_n = (fund_r_bar / 10) val_sigma_n = (val_r_bar / 10) symbol = ticker np.random.seed(seed) historical_date = datetime_str_to_ns(historical_date) mkt_open = (historical_date + str_to_ns(start_time)) mkt_close = (historical_date + str_to_ns(end_time)) (agent_count, agents, agent_types) = (0, [], []) starting_cash = 10000000 symbols = {symbol: {'r_bar': fund_r_bar, 'kappa': fund_kappa, 'sigma_s': fund_sigma_s, 'fund_vol': fund_vol, 'megashock_lambda_a': fund_megashock_lambda_a, 'megashock_mean': fund_megashock_mean, 'megashock_var': fund_megashock_var, 'random_state': np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64'))}} oracle = SparseMeanRevertingOracle(mkt_open, mkt_close, symbols) agents.extend([ExchangeAgent(id=0, name='EXCHANGE_AGENT', mkt_open=mkt_open, mkt_close=mkt_close, symbols=[symbol], book_logging=book_logging, book_log_depth=book_log_depth, log_orders=exchange_log_orders, pipeline_delay=0, computation_delay=0, stream_history=25000)]) agent_types.extend('ExchangeAgent') agent_count += 1 num_noise = num_noise_agents noise_mkt_open = (historical_date + str_to_ns('09:00:00')) noise_mkt_close = (historical_date + str_to_ns('16:00:00')) agents.extend([NoiseAgent(id=j, symbol=symbol, starting_cash=starting_cash, wakeup_time=get_wake_time(noise_mkt_open, noise_mkt_close), log_orders=log_orders) for j in range(agent_count, (agent_count + num_noise))]) agent_count += num_noise agent_types.extend(['NoiseAgent']) num_value = num_value_agents agents.extend([ValueAgent(id=j, name='Value Agent {}'.format(j), symbol=symbol, starting_cash=starting_cash, sigma_n=val_sigma_n, r_bar=val_r_bar, kappa=val_kappa, lambda_a=val_lambda_a, log_orders=log_orders) for j in range(agent_count, (agent_count + num_value))]) agent_count += num_value agent_types.extend(['ValueAgent']) '\n window_size == Spread of market maker (in ticks) around the mid price\n pov == Percentage of transacted volume seen in previous `mm_wake_up_freq` that\n the market maker places at each level\n num_ticks == Number of levels to place orders in around the spread\n wake_up_freq == How often the market maker wakes up\n \n ' mm_params = (2 * [(mm_window_size, mm_pov, mm_num_ticks, mm_wake_up_freq, mm_min_order_size)]) num_mm_agents = len(mm_params) mm_cancel_limit_delay = 50 agents.extend([AdaptiveMarketMakerAgent(id=j, name='ADAPTIVE_POV_MARKET_MAKER_AGENT_{}'.format(j), type='AdaptivePOVMarketMakerAgent', symbol=symbol, starting_cash=starting_cash, pov=mm_params[idx][1], min_order_size=mm_params[idx][4], window_size=mm_params[idx][0], num_ticks=mm_params[idx][2], wake_up_freq=mm_params[idx][3], cancel_limit_delay=mm_cancel_limit_delay, skew_beta=mm_skew_beta, level_spacing=mm_level_spacing, spread_alpha=mm_spread_alpha, backstop_quantity=mm_backstop_quantity, log_orders=log_orders) for (idx, j) in enumerate(range(agent_count, (agent_count + num_mm_agents)))]) agent_count += num_mm_agents agent_types.extend('POVMarketMakerAgent') num_momentum_agents = num_momentum_agents agents.extend([MomentumAgent(id=j, name='MOMENTUM_AGENT_{}'.format(j), symbol=symbol, starting_cash=starting_cash, min_size=1, max_size=10, wake_up_freq=str_to_ns('20s'), log_orders=log_orders) for j in range(agent_count, (agent_count + num_momentum_agents))]) agent_count += num_momentum_agents agent_types.extend('MomentumAgent') trade = (True if execution_agents else False) pov_agent_start_time = (mkt_open + str_to_ns('00:30:00')) pov_agent_end_time = (mkt_close - str_to_ns('00:30:00')) pov_proportion_of_volume = execution_pov pov_quantity = 1200000.0 pov_frequency = str_to_ns('1min') pov_direction = Side.BID pov_agent = POVExecutionAgent(id=agent_count, name='POV_EXECUTION_AGENT', type='ExecutionAgent', symbol=symbol, starting_cash=starting_cash, start_time=pov_agent_start_time, end_time=pov_agent_end_time, freq=pov_frequency, lookback_period=pov_frequency, pov=pov_proportion_of_volume, direction=pov_direction, quantity=pov_quantity, trade=trade, log_orders=True) execution_agents = [pov_agent] agents.extend(execution_agents) agent_types.extend('ExecutionAgent') agent_count += 1 random_state_kernel = np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64')) latency_model = generate_latency_model(agent_count) default_computation_delay = 50 kernelStartTime = historical_date kernelStopTime = (mkt_close + str_to_ns('00:01:00')) return {'start_time': kernelStartTime, 'stop_time': kernelStopTime, 'agents': agents, 'agent_latency_model': latency_model, 'default_computation_delay': default_computation_delay, 'custom_properties': {'oracle': oracle}, 'random_state_kernel': random_state_kernel, 'stdout_log_level': stdout_log_level}
def build_config(seed=(int((datetime.now().timestamp() * 1000000)) % ((2 ** 32) - 1)), date='20210205', end_time='10:00:00', stdout_log_level='INFO', ticker='ABM', starting_cash=10000000, log_orders=True, book_logging=True, book_log_depth=10, stream_history_length=500, exchange_log_orders=None, num_noise_agents=1000, num_value_agents=102, r_bar=100000, kappa=1.67e-15, lambda_a=5.7e-12, kappa_oracle=1.67e-16, sigma_s=0, fund_vol=5e-05, megashock_lambda_a=2.77778e-18, megashock_mean=1000, megashock_var=50000, mm_window_size='adaptive', mm_pov=0.025, mm_num_ticks=10, mm_wake_up_freq='60S', mm_min_order_size=1, mm_skew_beta=0, mm_price_skew=4, mm_level_spacing=5, mm_spread_alpha=0.75, mm_backstop_quantity=0, mm_cancel_limit_delay=50, num_momentum_agents=12): '\n create the background configuration for rmsc04\n These are all the non-learning agent that will run in the simulation\n :param seed: seed of the experiment\n :type seed: int\n :param log_orders: debug mode to print more\n :return: all agents of the config\n :rtype: list\n ' np.random.seed(seed) def path_wrapper(pomegranate_model_json): '\n temporary solution to manage calls from abides-gym or from the rest of the code base\n TODO:find more general solution\n :return:\n :rtype:\n ' path = os.getcwd() if (path.split('/')[(- 1)] == 'abides_gym'): return ('../' + pomegranate_model_json) else: return pomegranate_model_json mm_wake_up_freq = str_to_ns(mm_wake_up_freq) ORDER_SIZE_MODEL = OrderSizeModel() MM_PARAMS = [(mm_window_size, mm_pov, mm_num_ticks, mm_wake_up_freq, mm_min_order_size), (mm_window_size, mm_pov, mm_num_ticks, mm_wake_up_freq, mm_min_order_size)] NUM_MM = len(MM_PARAMS) SIGMA_N = (r_bar / 100) DATE = int(pd.to_datetime(date).to_datetime64()) MKT_OPEN = (DATE + str_to_ns('09:30:00')) MKT_CLOSE = (DATE + str_to_ns(end_time)) NOISE_MKT_OPEN = (MKT_OPEN - str_to_ns('00:30:00')) NOISE_MKT_CLOSE = (DATE + str_to_ns('16:00:00')) symbols = {ticker: {'r_bar': r_bar, 'kappa': kappa_oracle, 'sigma_s': sigma_s, 'fund_vol': fund_vol, 'megashock_lambda_a': megashock_lambda_a, 'megashock_mean': megashock_mean, 'megashock_var': megashock_var, 'random_state': np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32)))}} oracle = SparseMeanRevertingOracle(MKT_OPEN, NOISE_MKT_CLOSE, symbols) (agent_count, agents, agent_types) = (0, [], []) agents.extend([ExchangeAgent(id=0, name='EXCHANGE_AGENT', type='ExchangeAgent', mkt_open=MKT_OPEN, mkt_close=MKT_CLOSE, symbols=[ticker], book_logging=book_logging, book_log_depth=book_log_depth, log_orders=exchange_log_orders, pipeline_delay=0, computation_delay=0, stream_history=stream_history_length, random_state=np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64')))]) agent_types.extend('ExchangeAgent') agent_count += 1 agents.extend([NoiseAgent(id=j, name='NoiseAgent {}'.format(j), type='NoiseAgent', symbol=ticker, starting_cash=starting_cash, wakeup_time=get_wake_time(NOISE_MKT_OPEN, NOISE_MKT_CLOSE), log_orders=log_orders, order_size_model=ORDER_SIZE_MODEL, random_state=np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64'))) for j in range(agent_count, (agent_count + num_noise_agents))]) agent_count += num_noise_agents agent_types.extend(['NoiseAgent']) agents.extend([ValueAgent(id=j, name='Value Agent {}'.format(j), type='ValueAgent', symbol=ticker, starting_cash=starting_cash, sigma_n=SIGMA_N, r_bar=r_bar, kappa=kappa, lambda_a=lambda_a, log_orders=log_orders, order_size_model=ORDER_SIZE_MODEL, random_state=np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64'))) for j in range(agent_count, (agent_count + num_value_agents))]) agent_count += num_value_agents agent_types.extend(['ValueAgent']) agents.extend([AdaptiveMarketMakerAgent(id=j, name='ADAPTIVE_POV_MARKET_MAKER_AGENT_{}'.format(j), type='AdaptivePOVMarketMakerAgent', symbol=ticker, starting_cash=starting_cash, pov=MM_PARAMS[idx][1], min_order_size=MM_PARAMS[idx][4], window_size=MM_PARAMS[idx][0], num_ticks=MM_PARAMS[idx][2], wake_up_freq=MM_PARAMS[idx][3], poisson_arrival=True, cancel_limit_delay=mm_cancel_limit_delay, skew_beta=mm_skew_beta, price_skew_param=mm_price_skew, level_spacing=mm_level_spacing, spread_alpha=mm_spread_alpha, backstop_quantity=mm_backstop_quantity, log_orders=log_orders, random_state=np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64'))) for (idx, j) in enumerate(range(agent_count, (agent_count + NUM_MM)))]) agent_count += NUM_MM agent_types.extend('POVMarketMakerAgent') agents.extend([MomentumAgent(id=j, name='MOMENTUM_AGENT_{}'.format(j), type='MomentumAgent', symbol=ticker, starting_cash=starting_cash, min_size=1, max_size=10, wake_up_freq=str_to_ns('37s'), poisson_arrival=True, log_orders=log_orders, order_size_model=ORDER_SIZE_MODEL, random_state=np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64'))) for j in range(agent_count, (agent_count + num_momentum_agents))]) agent_count += num_momentum_agents agent_types.extend('MomentumAgent') random_state_kernel = np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64')) latency_model = generate_latency_model(agent_count) default_computation_delay = 50 kernelStartTime = DATE kernelStopTime = (MKT_CLOSE + str_to_ns('1s')) return {'seed': seed, 'start_time': kernelStartTime, 'stop_time': kernelStopTime, 'agents': agents, 'agent_latency_model': latency_model, 'default_computation_delay': default_computation_delay, 'custom_properties': {'oracle': oracle}, 'random_state_kernel': random_state_kernel, 'stdout_log_level': stdout_log_level}
class OrderSizeGenerator(BaseGenerator[int], ABC): pass
class ConstantOrderSizeGenerator(OrderSizeGenerator): def __init__(self, order_size: int) -> None: self.order_size: int = order_size def next(self) -> int: return self.order_size def mean(self) -> int: return self.order_size
class UniformOrderSizeGenerator(OrderSizeGenerator): def __init__(self, order_size_min: int, order_size_max: int, random_generator: np.random.RandomState) -> None: self.order_size_min: int = order_size_min self.order_size_max: int = (order_size_max + 1) self.random_generator: np.random.RandomState = random_generator def next(self) -> int: return self.random_generator.randint(self.order_size_min, self.order_size_max) def mean(self) -> float: return (((self.order_size_max - self.order_size_min) - 1) / 2)
class OrderDepthGenerator(BaseGenerator[int], ABC): pass
class ConstantDepthGenerator(OrderDepthGenerator): def __init__(self, order_depth: int) -> None: self.order_depth: int = order_depth def next(self) -> int: return self.order_depth def mean(self) -> int: return self.order_depth
class UniformDepthGenerator(OrderDepthGenerator): def __init__(self, order_depth_min: int, order_depth_max: int, random_generator: np.random.RandomState) -> None: self.random_generator: np.random.RandomState = random_generator self.order_depth_min: int = order_depth_min self.order_depth_max: int = (order_depth_max + 1) def next(self) -> int: return self.random_generator.randint(self.order_depth_min, self.order_depth_max) def mean(self) -> float: return (((self.order_depth_max - self.order_depth_min) - 1) / 2)
@dataclass class MarketClosedMsg(Message): '\n This message is sent from an ``ExchangeAgent`` to a ``TradingAgent`` when a ``TradingAgent`` has\n made a request that cannot be completed because the market the ``ExchangeAgent`` trades\n is closed.\n ' pass
@dataclass class MarketHoursRequestMsg(Message): '\n This message can be sent to an ``ExchangeAgent`` to query the opening hours of the market\n it trades. A ``MarketHoursMsg`` is sent in response.\n ' pass
@dataclass class MarketHoursMsg(Message): '\n This message is sent by an ``ExchangeAgent`` in response to a ``MarketHoursRequestMsg``\n message sent from a ``TradingAgent``.\n\n Attributes:\n mkt_open: The time that the market traded by the ``ExchangeAgent`` opens.\n mkt_close: The time that the market traded by the ``ExchangeAgent`` closes.\n ' mkt_open: NanosecondTime mkt_close: NanosecondTime
@dataclass class MarketClosePriceRequestMsg(Message): "\n This message can be sent to an ``ExchangeAgent`` to request that the close price of\n the market is sent when the exchange closes. This is used to accurately calculate\n the agent's final mark-to-market value.\n "
@dataclass class MarketClosePriceMsg(Message): "\n This message is sent by an ``ExchangeAgent`` when the exchange closes to all agents\n that habve requested this message. The value is used to accurately calculate the\n agent's final mark-to-market value.\n\n Attributes:\n close_prices: A mapping of symbols to closing prices.\n " close_prices: Dict[(str, Optional[int])]
@dataclass class MarketDataSubReqMsg(Message, ABC): '\n Base class for creating or cancelling market data subscriptions with an\n ``ExchangeAgent``.\n\n Attributes:\n symbol: The symbol of the security to request a data subscription for.\n cancel: If True attempts to create a new subscription, if False attempts to\n cancel an existing subscription.\n ' symbol: str cancel: bool = False
@dataclass class MarketDataFreqBasedSubReqMsg(MarketDataSubReqMsg, ABC): '\n Base class for creating or cancelling market data subscriptions with an\n ``ExchangeAgent``.\n\n Attributes:\n symbol: The symbol of the security to request a data subscription for.\n cancel: If True attempts to create a new subscription, if False attempts to\n cancel an existing subscription.\n freq: The frequency in nanoseconds^-1 at which to receive market updates.\n ' freq: int = 1
@dataclass class MarketDataEventBasedSubReqMsg(MarketDataSubReqMsg, ABC): '\n Base class for creating or cancelling market data subscriptions with an\n ``ExchangeAgent``.\n\n Attributes:\n symbol: The symbol of the security to request a data subscription for.\n cancel: If True attempts to create a new subscription, if False attempts to\n cancel an existing subscription.\n '
@dataclass class L1SubReqMsg(MarketDataFreqBasedSubReqMsg): '\n This message requests the creation or cancellation of a subscription to L1 order\n book data from an ``ExchangeAgent``.\n\n Attributes:\n symbol: The symbol of the security to request a data subscription for.\n cancel: If True attempts to create a new subscription, if False attempts to\n cancel an existing subscription.\n freq: The frequency in nanoseconds^-1 at which to receive market updates.\n ' pass
@dataclass class L2SubReqMsg(MarketDataFreqBasedSubReqMsg): '\n This message requests the creation or cancellation of a subscription to L2 order\n book data from an ``ExchangeAgent``.\n\n Attributes:\n symbol: The symbol of the security to request a data subscription for.\n cancel: If True attempts to create a new subscription, if False attempts to\n cancel an existing subscription.\n freq: The frequency in nanoseconds^-1 at which to receive market updates.\n depth: The maximum number of price levels on both sides of the order book to\n return data for. Defaults to the entire book.\n ' depth: int = sys.maxsize
@dataclass class L3SubReqMsg(MarketDataFreqBasedSubReqMsg): '\n This message requests the creation or cancellation of a subscription to L3 order\n book data from an ``ExchangeAgent``.\n\n Attributes:\n symbol: The symbol of the security to request a data subscription for.\n cancel: If True attempts to create a new subscription, if False attempts to\n cancel an existing subscription.\n freq: The frequency in nanoseconds^-1 at which to receive market updates.\n depth: The maximum number of price levels on both sides of the order book to\n return data for. Defaults to the entire book.\n ' depth: int = sys.maxsize
@dataclass class TransactedVolSubReqMsg(MarketDataFreqBasedSubReqMsg): '\n This message requests the creation or cancellation of a subscription to transacted\n volume order book data from an ``ExchangeAgent``.\n\n Attributes:\n symbol: The symbol of the security to request a data subscription for.\n cancel: If True attempts to create a new subscription, if False attempts to\n cancel an existing subscription.\n freq: The frequency in nanoseconds^-1 at which to receive market updates.\n lookback: The period in time backwards from the present to sum the transacted\n volume for.\n ' lookback: str = '1min'
@dataclass class BookImbalanceSubReqMsg(MarketDataEventBasedSubReqMsg): '\n This message requests the creation or cancellation of a subscription to book\n imbalance events.\n\n Attributes:\n symbol: The symbol of the security to request a data subscription for.\n cancel: If True attempts to create a new subscription, if False attempts to\n cancel an existing subscription.\n min_imbalance: The minimum book imbalance needed to trigger this subscription.\n\n 0.0 is no imbalance.\n 1.0 is full imbalance (ie. liquidity drop).\n ' min_imbalance: float = 1.0
@dataclass class MarketDataMsg(Message, ABC): '\n Base class for returning market data subscription results from an ``ExchangeAgent``.\n\n The ``last_transaction`` and ``exchange_ts`` fields are not directly related to the\n subscription data but are included for bookkeeping purposes.\n\n Attributes:\n symbol: The symbol of the security this data is for.\n last_transaction: The time of the last transaction that happened on the exchange.\n exchange_ts: The time that the message was sent from the exchange.\n ' symbol: str last_transaction: int exchange_ts: NanosecondTime
@dataclass class MarketDataEventMsg(MarketDataMsg, ABC): '\n Base class for returning market data subscription results from an ``ExchangeAgent``.\n\n The ``last_transaction`` and ``exchange_ts`` fields are not directly related to the\n subscription data but are included for bookkeeping purposes.\n\n Attributes:\n symbol: The symbol of the security this data is for.\n last_transaction: The time of the last transaction that happened on the exchange.\n exchange_ts: The time that the message was sent from the exchange.\n stage: The stage of this event (start or finish).\n ' class Stage(Enum): START = 'START' FINISH = 'FINISH' stage: Stage
@dataclass class L1DataMsg(MarketDataMsg): '\n This message returns L1 order book data as part of an L1 data subscription.\n\n Attributes:\n symbol: The symbol of the security this data is for.\n last_transaction: The time of the last transaction that happened on the exchange.\n exchange_ts: The time that the message was sent from the exchange.\n bid: The best bid price and the available volume at that price.\n ask: The best ask price and the available volume at that price.\n ' bid: Tuple[(int, int)] ask: Tuple[(int, int)]
@dataclass class L2DataMsg(MarketDataMsg): '\n This message returns L2 order book data as part of an L2 data subscription.\n\n Attributes:\n symbol: The symbol of the security this data is for.\n last_transaction: The time of the last transaction that happened on the exchange.\n exchange_ts: The time that the message was sent from the exchange.\n bids: A list of tuples containing the price and available volume at each bid\n price level.\n asks: A list of tuples containing the price and available volume at each ask\n price level.\n ' bids: List[Tuple[(int, int)]] asks: List[Tuple[(int, int)]]
@dataclass class L3DataMsg(MarketDataMsg): '\n This message returns L3 order book data as part of an L3 data subscription.\n\n Attributes:\n symbol: The symbol of the security this data is for.\n last_transaction: The time of the last transaction that happened on the exchange.\n exchange_ts: The time that the message was sent from the exchange.\n bids: A list of tuples containing the price and a list of order sizes at each\n bid price level.\n asks: A list of tuples containing the price and a list of order sizes at each\n ask price level.\n ' bids: List[Tuple[(int, List[int])]] asks: List[Tuple[(int, List[int])]]
@dataclass class TransactedVolDataMsg(MarketDataMsg): '\n This message returns order book transacted volume data as part of an transacted\n volume data subscription.\n\n Attributes:\n symbol: The symbol of the security this data is for.\n last_transaction: The time of the last transaction that happened on the exchange.\n exchange_ts: The time that the message was sent from the exchange.\n bid_volume: The total transacted volume of bid orders for the given lookback period.\n ask_volume: The total transacted volume of ask orders for the given lookback period.\n ' bid_volume: int ask_volume: int
@dataclass class BookImbalanceDataMsg(MarketDataEventMsg): '\n Sent when the book imbalance reaches a certain threshold dictated in the\n subscription request message.\n\n Attributes:\n symbol: The symbol of the security this data is for.\n last_transaction: The time of the last transaction that happened on the exchange.\n exchange_ts: The time that the message was sent from the exchange.\n stage: The stage of this event (start or finish).\n imbalance: Proportional size of the imbalance.\n side: Side of the book that the imbalance is towards.\n ' imbalance: float side: Side
@dataclass class OrderMsg(Message, ABC): pass
@dataclass class LimitOrderMsg(OrderMsg): order: LimitOrder
@dataclass class MarketOrderMsg(OrderMsg): order: MarketOrder