docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Insert or update R-Value in `self.r_df`.
Args:
state_key: The key of state.
r_value: R-Value(Reward).
action_key: The key of action if it is nesesary for the parametar of value function.
Exceptions:
TypeError: If the type of `r_value` is not float.
|
def save_r_df(self, state_key, r_value, action_key=None):
if action_key is not None:
add_r_df = pd.DataFrame([(state_key, action_key, r_value)], columns=["state_key", "action_key", "r_value"])
else:
add_r_df = pd.DataFrame([(state_key, r_value)], columns=["state_key", "r_value"])
if self.r_df is not None:
self.r_df = pd.concat([add_r_df, self.r_df])
if action_key is not None:
self.r_df = self.r_df.drop_duplicates(["state_key", "action_key"])
else:
self.r_df = self.r_df.drop_duplicates(["state_key"])
else:
self.r_df = add_r_df
| 368,866
|
Learning and searching the optimal solution.
Args:
state_key: Initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
|
def learn(self, state_key, limit=1000):
self.t = 1
while self.t <= limit:
next_action_list = self.extract_possible_actions(state_key)
if len(next_action_list):
action_key = self.select_action(
state_key=state_key,
next_action_list=next_action_list
)
reward_value = self.observe_reward_value(state_key, action_key)
if len(next_action_list):
# Max-Q-Value in next action time.
next_state_key = self.update_state(
state_key=state_key,
action_key=action_key
)
next_next_action_list = self.extract_possible_actions(next_state_key)
next_action_key = self.predict_next_action(next_state_key, next_next_action_list)
next_max_q = self.extract_q_df(next_state_key, next_action_key)
# Update Q-Value.
self.update_q(
state_key=state_key,
action_key=action_key,
reward_value=reward_value,
next_max_q=next_max_q
)
# Update State.
state_key = next_state_key
# Normalize.
self.normalize_q_value()
self.normalize_r_value()
# Vis.
self.visualize_learning_result(state_key)
# Check.
if self.check_the_end_flag(state_key) is True:
break
# Epsode.
self.t += 1
| 368,869
|
Update Q-Value.
Args:
state_key: The key of state.
action_key: The key of action.
reward_value: R-Value(Reward).
next_max_q: Maximum Q-Value.
|
def update_q(self, state_key, action_key, reward_value, next_max_q):
# Now Q-Value.
q = self.extract_q_df(state_key, action_key)
# Update Q-Value.
new_q = q + self.alpha_value * (reward_value + (self.gamma_value * next_max_q) - q)
# Save updated Q-Value.
self.save_q_df(state_key, action_key, new_q)
| 368,870
|
Predict next action by Q-Learning.
Args:
state_key: The key of state in `self.t+1`.
next_action_list: The possible action in `self.t+1`.
Returns:
The key of action.
|
def predict_next_action(self, state_key, next_action_list):
if self.q_df is not None:
next_action_q_df = self.q_df[self.q_df.state_key == state_key]
next_action_q_df = next_action_q_df[next_action_q_df.action_key.isin(next_action_list)]
if next_action_q_df.shape[0] == 0:
return random.choice(next_action_list)
else:
if next_action_q_df.shape[0] == 1:
max_q_action = next_action_q_df["action_key"].values[0]
else:
next_action_q_df = next_action_q_df.sort_values(by=["q_value"], ascending=False)
max_q_action = next_action_q_df.iloc[0, :]["action_key"]
return max_q_action
else:
return random.choice(next_action_list)
| 368,871
|
Initialization
Args:
arm_id_list: List of arms Master id.
|
def __init__(self, arm_id_list):
[self.__beta_dist_dict.setdefault(key, BetaDist()) for key in arm_id_list]
| 368,873
|
Pull arms.
Args:
arm_id: Arms master id.
success: The number of success.
failure: The number of failure.
|
def pull(self, arm_id, success, failure):
self.__beta_dist_dict[arm_id].observe(success, failure)
| 368,874
|
Listup arms and expected value.
Args:
limit: Length of the list.
Returns:
[Tuple(`Arms master id`, `expected value`)]
|
def recommend(self, limit=10):
expected_list = [(arm_id, beta_dist.expected_value()) for arm_id, beta_dist in self.__beta_dist_dict.items()]
expected_list = sorted(expected_list, key=lambda x: x[1], reverse=True)
return expected_list[:limit]
| 368,875
|
Calculate similarity with the Tanimoto coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
|
def calculate(self, token_list_x, token_list_y):
match_list = [tanimoto_value for tanimoto_value in token_list_x if tanimoto_value in token_list_y]
return float(len(match_list) / (len(token_list_x) + len(token_list_y) - len(match_list)))
| 368,876
|
Select action by Q(state, action).
Concreat method for boltzmann distribution.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
The key of action.
|
def select_action(self, state_key, next_action_list):
if self.q_df is None or self.q_df.shape[0] == 0:
return random.choice(next_action_list)
next_action_b_df = self.__calculate_boltzmann_factor(state_key, next_action_list)
if next_action_b_df.shape[0] == 1:
return next_action_b_df["action_key"].values[0]
prob = np.random.random()
next_action_b_df = next_action_b_df.sort_values(by=["boltzmann_factor"])
i = 0
while prob > next_action_b_df.iloc[i, :]["boltzmann_factor"] + next_action_b_df.iloc[i + 1, :]["boltzmann_factor"]:
i += 1
if i + 1 >= next_action_b_df.shape[0]:
break
max_b_action_key = next_action_b_df.iloc[i, :]["action_key"]
return max_b_action_key
| 368,880
|
Calculate boltzmann factor.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
[(`The key of action`, `boltzmann probability`)]
|
def __calculate_boltzmann_factor(self, state_key, next_action_list):
sigmoid = self.__calculate_sigmoid()
q_df = self.q_df[self.q_df.state_key == state_key]
q_df = q_df[q_df.isin(next_action_list)]
q_df["boltzmann_factor"] = q_df["q_value"] / sigmoid
q_df["boltzmann_factor"] = q_df["boltzmann_factor"].apply(np.exp)
q_df["boltzmann_factor"] = q_df["boltzmann_factor"] / q_df["boltzmann_factor"].sum()
return q_df
| 368,882
|
Select action by Q(state, action).
Concreat method.
ε-greedy.
Args:
state_key: The key of state.
next_action_list: The possible action in `self.t+1`.
If the length of this list is 0, all action should be possible.
Returns:
The key of action.
|
def select_action(self, state_key, next_action_list):
epsilon_greedy_flag = bool(np.random.binomial(n=1, p=self.epsilon_greedy_rate))
if epsilon_greedy_flag is False:
action_key = random.choice(next_action_list)
else:
action_key = self.predict_next_action(state_key, next_action_list)
return action_key
| 368,888
|
Init.
Args:
boltzmann_q_learning: is-a `BoltzmannQLearning`.
init_state_key: First state key.
|
def __init__(
self,
greedy_q_learning,
init_state_key
):
if isinstance(boltzmann_q_learning, BoltzmannQLearning):
self.__boltzmann_q_learning = boltzmann_q_learning
else:
raise TypeError()
self.__init_state_key = init_state_key
| 368,889
|
Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
|
def inference(self, observed_arr):
_ = self.__lstm_model.inference(observed_arr)
return self.__lstm_model.get_feature_points()
| 368,891
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
Returns:
`np.ndarray` of delta or gradients.
|
def learn(self, grad_arr):
if grad_arr.ndim > 3:
grad_arr = grad_arr.reshape((
grad_arr.shape[0],
grad_arr.shape[1],
-1
))
grad_arr = grad_arr[:, -1]
elif grad_arr.ndim == 3:
grad_arr = grad_arr[:, -1]
delta_arr, _, grads_list = self.__lstm_model.hidden_back_propagate(grad_arr)
grads_list.insert(0, None)
grads_list.insert(0, None)
self.__lstm_model.optimize(
grads_list,
self.__learning_rate,
1
)
return delta_arr
| 368,892
|
Infernce Q-Value.
Args:
predicted_q_arr: `np.ndarray` of predicted Q-Values.
real_q_arr: `np.ndarray` of real Q-Values.
|
def learn_q(self, predicted_q_arr, real_q_arr):
self.__predicted_q_arr_list.append(predicted_q_arr)
while len(self.__predicted_q_arr_list) > self.__seq_len:
self.__predicted_q_arr_list = self.__predicted_q_arr_list[1:]
while len(self.__predicted_q_arr_list) < self.__seq_len:
self.__predicted_q_arr_list.append(self.__predicted_q_arr_list[-1])
predicted_q_arr = np.array(self.__predicted_q_arr_list)
predicted_q_arr = predicted_q_arr.transpose((1, 0, 2))
self.__real_q_arr_list.append(real_q_arr)
while len(self.__real_q_arr_list) > self.__seq_len:
self.__real_q_arr_list = self.__real_q_arr_list[1:]
while len(self.__real_q_arr_list) < self.__seq_len:
self.__real_q_arr_list.append(self.__real_q_arr_list[-1])
real_q_arr = np.array(self.__real_q_arr_list)
real_q_arr = real_q_arr.transpose((1, 0, 2))
loss = self.__computable_loss.compute_loss(predicted_q_arr, real_q_arr)
delta_arr = self.__computable_loss.compute_delta(predicted_q_arr, real_q_arr)
delta_arr, lstm_output_grads_list = self.__lstm_model.output_back_propagate(
predicted_q_arr,
delta_arr
)
delta_arr, _, lstm_hidden_grads_list = self.__lstm_model.hidden_back_propagate(
delta_arr[:, -1]
)
lstm_grads_list = lstm_output_grads_list
lstm_grads_list.extend(lstm_hidden_grads_list)
self.__lstm_model.optimize(lstm_grads_list, self.__learning_rate, 1)
self.__loss_list.append(loss)
| 368,894
|
Infernce Q-Value.
Args:
next_action_arr: `np.ndarray` of action.
Returns:
`np.ndarray` of Q-Values.
|
def inference_q(self, next_action_arr):
q_arr = next_action_arr.reshape((next_action_arr.shape[0], -1))
self.__q_arr_list.append(q_arr)
while len(self.__q_arr_list) > self.__seq_len:
self.__q_arr_list = self.__q_arr_list[1:]
while len(self.__q_arr_list) < self.__seq_len:
self.__q_arr_list.append(self.__q_arr_list[-1])
q_arr = np.array(self.__q_arr_list)
q_arr = q_arr.transpose((1, 0, 2))
q_arr = self.__lstm_model.inference(q_arr)
return q_arr[:, -1].reshape((q_arr.shape[0], 1))
| 368,895
|
Init.
Args:
batch_size: Batch size.
seq_len: The length of sequneces.
The length corresponds to the number of `time` splited by `time_fraction`.
min_pitch: The minimum of note number.
max_pitch: The maximum of note number.
|
def __init__(
self,
batch_size=20,
seq_len=10,
min_pitch=24,
max_pitch=108
):
self.__batch_size = batch_size
self.__seq_len = seq_len
self.__dim = max_pitch - min_pitch
| 368,897
|
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
|
def compute(self, x_arr, y_arr):
return np.linalg.norm(x_arr - y_arr, axis=-1)
| 368,899
|
Tokenize token list.
Args:
token_list: The list of tokens..
Returns:
[vector of token, vector of token, vector of token, ...]
|
def vectorize(self, token_list):
sentence_list = [token_list]
test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list)
pred_arr = self.__controller.inference(test_observed_arr)
return self.__controller.get_feature_points()
| 368,900
|
Learning.
Args:
iter_n: The number of training iterations.
k_step: The number of learning of the `discriminator`.
|
def learn(self, iter_n=500, k_step=10):
generative_model, discriminative_model = self.__GAN.train(
self.__true_sampler,
self.__generative_model,
self.__discriminative_model,
iter_n=iter_n,
k_step=k_step
)
self.__generative_model = generative_model
self.__discriminative_model = discriminative_model
| 368,902
|
Entry Point.
Args:
url: target url.
|
def Main(url):
# The object of Web-Scraping.
web_scrape = WebScraping()
# Execute Web-Scraping.
document = web_scrape.scrape(url)
# The object of NLP.
nlp_base = NlpBase()
# Set tokenizer. This is japanese tokenizer with MeCab.
nlp_base.tokenizable_doc = MeCabTokenizer()
sentence_list = nlp_base.listup_sentence(document)
batch_size = 10
if len(sentence_list) < batch_size:
raise ValueError("The number of extracted sentences is insufficient.")
all_token_list = []
for i in range(len(sentence_list)):
nlp_base.tokenize(sentence_list[i])
all_token_list.extend(nlp_base.token)
sentence_list[i] = nlp_base.token
vectorlizable_sentence = LSTMRTRBM()
vectorlizable_sentence.learn(
sentence_list=sentence_list,
token_master_list=list(set(all_token_list)),
hidden_neuron_count=1000,
batch_size=batch_size,
learning_rate=1e-03,
seq_len=5
)
test_list = sentence_list[:batch_size]
feature_points_arr = vectorlizable_sentence.vectorize(test_list)
print("Feature points (Top 5 sentences):")
print(feature_points_arr)
| 368,904
|
Filtering with std.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result.
|
def filter(self, scored_list):
if len(scored_list) > 0:
avg = np.mean([s[1] for s in scored_list])
std = np.std([s[1] for s in scored_list])
else:
avg = 0
std = 0
limiter = avg + 0.5 * std
mean_scored = [(sent_idx, score) for (sent_idx, score) in scored_list if score > limiter]
return mean_scored
| 368,908
|
Init.
Args:
function_approximator: is-a `FunctionApproximator`.
map_size: Size of map.
memory_num: The number of step of agent's memory.
repeating_penalty: The value of penalty in the case that agent revisit.
enemy_num: The number of enemies.
enemy_init_dist: Minimum euclid distance of initial position of agent and enemies.
|
def __init__(
self,
function_approximator,
batch_size=4,
map_size=(10, 10),
memory_num=4,
repeating_penalty=0.5,
enemy_num=2,
enemy_init_dist=5
):
self.__map_arr = self.__create_map(map_size)
self.__agent_pos = self.START_POS
self.__enemy_num = enemy_num
self.__enemy_pos_list = [None] * enemy_num
self.__enemy_init_dist = enemy_init_dist
self.__create_enemy(self.__map_arr)
self.__reward_list = []
self.__route_memory_list = []
self.__memory_num = memory_num
self.__repeating_penalty = repeating_penalty
self.__batch_size = batch_size
super().__init__(function_approximator)
self.__inferencing_flag = False
| 368,910
|
Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route.
|
def inference(self, state_arr, limit=1000):
self.__inferencing_flag = True
agent_x, agent_y = np.where(state_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
self.__create_enemy(self.__map_arr)
result_list = [(agent_x, agent_y, 0.0)]
result_val_list = [agent_x, agent_y]
for e in range(self.__enemy_num):
result_val_list.append(self.__enemy_pos_list[e][0])
result_val_list.append(self.__enemy_pos_list[e][1])
result_val_list.append(0.0)
result_list.append(tuple(result_val_list))
self.t = 0
while self.t < limit:
next_action_arr = self.extract_possible_actions(state_arr)
next_q_arr = self.function_approximator.inference_q(next_action_arr)
action_arr, q = self.select_action(next_action_arr, next_q_arr)
self.__move_enemy(action_arr)
agent_x, agent_y = np.where(action_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_val_list = [agent_x, agent_y]
for e in range(self.__enemy_num):
result_val_list.append(self.__enemy_pos_list[e][0])
result_val_list.append(self.__enemy_pos_list[e][1])
try:
result_val_list.append(q[0])
except IndexError:
result_val_list.append(q)
result_list.append(tuple(result_val_list))
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
return result_list
| 368,911
|
Extract possible actions.
Args:
state_arr: `np.ndarray` of state.
Returns:
`np.ndarray` of actions.
The shape is:(
`batch size corresponded to each action key`,
`channel that is 1`,
`feature points1`,
`feature points2`
)
|
def extract_possible_actions(self, state_arr):
agent_x, agent_y = np.where(state_arr[-1] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
possible_action_arr = None
for x, y in [
(-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)
]:
next_x = agent_x + x
if next_x < 0 or next_x >= state_arr[-1].shape[1]:
continue
next_y = agent_y + y
if next_y < 0 or next_y >= state_arr[-1].shape[0]:
continue
wall_flag = False
if x > 0:
for add_x in range(1, x):
if self.__map_arr[agent_x + add_x, next_y] == self.WALL:
wall_flag = True
elif x < 0:
for add_x in range(x, 0):
if self.__map_arr[agent_x + add_x, next_y] == self.WALL:
wall_flag = True
if wall_flag is True:
continue
if y > 0:
for add_y in range(1, y):
if self.__map_arr[next_x, agent_y + add_y] == self.WALL:
wall_flag = True
elif y < 0:
for add_y in range(y, 0):
if self.__map_arr[next_x, agent_y + add_y] == self.WALL:
wall_flag = True
if wall_flag is True:
continue
if self.__map_arr[next_x, next_y] == self.WALL:
continue
if (next_x, next_y) in self.__route_memory_list:
continue
next_action_arr = np.zeros((
3 + self.__enemy_num,
state_arr[-1].shape[0],
state_arr[-1].shape[1]
))
next_action_arr[0][agent_x, agent_y] = 1
next_action_arr[1] = self.__map_arr
next_action_arr[-1][next_x, next_y] = 1
for e in range(self.__enemy_num):
enemy_state_arr = np.zeros(state_arr[0].shape)
enemy_state_arr[self.__enemy_pos_list[e][0], self.__enemy_pos_list[e][1]] = 1
next_action_arr[2 + e] = enemy_state_arr
next_action_arr = np.expand_dims(next_action_arr, axis=0)
if possible_action_arr is None:
possible_action_arr = next_action_arr
else:
possible_action_arr = np.r_[possible_action_arr, next_action_arr]
if possible_action_arr is not None:
while possible_action_arr.shape[0] < self.__batch_size:
key = np.random.randint(low=0, high=possible_action_arr.shape[0])
possible_action_arr = np.r_[
possible_action_arr,
np.expand_dims(possible_action_arr[key], axis=0)
]
else:
# Forget oldest memory and do recuresive executing.
self.__route_memory_list = self.__route_memory_list[1:]
possible_action_arr = self.extract_possible_actions(state_arr)
return possible_action_arr
| 368,912
|
Compute the reward value.
Args:
state_arr: `np.ndarray` of state.
action_arr: `np.ndarray` of action.
Returns:
Reward value.
|
def observe_reward_value(self, state_arr, action_arr):
if self.__check_goal_flag(action_arr) is True:
return 1.0
else:
self.__move_enemy(action_arr)
x, y = np.where(action_arr[-1] == 1)
x, y = x[0], y[0]
e_dist_sum = 0.0
for e in range(self.__enemy_num):
e_dist = np.sqrt(
((x - self.__enemy_pos_list[e][0]) ** 2) + ((y - self.__enemy_pos_list[e][1]) ** 2)
)
e_dist_sum += e_dist
e_dist_penalty = e_dist_sum / self.__enemy_num
goal_x, goal_y = self.__goal_pos
if x == goal_x and y == goal_y:
distance = 0.0
else:
distance = np.sqrt(((x - goal_x) ** 2) + (y - goal_y) ** 2)
if (x, y) in self.__route_long_memory_list:
repeating_penalty = self.__repeating_penalty
else:
repeating_penalty = 0.0
return 1.0 - distance - repeating_penalty + e_dist_penalty
| 368,913
|
Check the end flag.
If this return value is `True`, the learning is end.
As a rule, the learning can not be stopped.
This method should be overrided for concreate usecases.
Args:
state_arr: `np.ndarray` of state in `self.t`.
Returns:
bool
|
def check_the_end_flag(self, state_arr):
if self.__check_goal_flag(state_arr) is True or self.__check_crash_flag(state_arr):
return True
else:
return False
| 368,916
|
Calculate similarity with the Dice coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
|
def calculate(self, token_list_x, token_list_y):
x, y = self.unique(token_list_x, token_list_y)
try:
result = 2 * len(x & y) / float(sum(map(len, (x, y))))
except ZeroDivisionError:
result = 0.0
return result
| 368,919
|
Summarize input document.
Args:
test_arr: `np.ndarray` of observed data points..
vectorizable_token: is-a `VectorizableToken`.
sentence_list: `list` of all sentences.
limit: The number of selected abstract sentence.
Returns:
`np.ndarray` of scores.
|
def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5):
if isinstance(vectorizable_token, VectorizableToken) is False:
raise TypeError()
_ = self.inference(test_arr)
score_arr = self.__encoder_decoder_controller.get_reconstruction_error()
score_arr = score_arr.reshape((
score_arr.shape[0],
-1
)).mean(axis=1)
score_list = score_arr.tolist()
abstract_list = []
for i in range(limit):
if self.__normal_prior_flag is True:
key = score_arr.argmin()
else:
key = score_arr.argmax()
score = score_list.pop(key)
score_arr = np.array(score_list)
seq_arr = test_arr[key]
token_arr = vectorizable_token.tokenize(seq_arr.tolist())
s = " ".join(token_arr.tolist())
_s = "".join(token_arr.tolist())
for sentence in sentence_list:
if s in sentence or _s in sentence:
abstract_list.append(sentence)
abstract_list = list(set(abstract_list))
if len(abstract_list) >= limit:
break
return abstract_list
| 368,922
|
Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
|
def inference(self, observed_arr):
if observed_arr.ndim != 2:
observed_arr = observed_arr.reshape((observed_arr.shape[0], -1))
pred_arr = self.__nn.inference(observed_arr)
return pred_arr
| 368,924
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
|
def learn(self, grad_arr, fix_opt_flag=False):
if grad_arr.ndim != 2:
grad_arr = grad_arr.reshape((grad_arr.shape[0], -1))
delta_arr = self.__nn.back_propagation(grad_arr)
if fix_opt_flag is False:
self.__nn.optimize(self.__learning_rate, 1)
return delta_arr
| 368,925
|
Init.
Args:
bar_gram: is-a `BarGram`.
midi_df_list: `list` of paths to MIDI data extracted by `MidiController`.
batch_size: Batch size.
seq_len: The length of sequneces.
The length corresponds to the number of `time` splited by `time_fraction`.
time_fraction: Time fraction which means the length of bars.
|
def __init__(
self,
bar_gram,
midi_df_list,
batch_size=20,
seq_len=10,
time_fraction=0.1,
conditional_flag=True
):
if isinstance(bar_gram, BarGram) is False:
raise TypeError()
self.__bar_gram = bar_gram
program_list = []
self.__midi_df_list = midi_df_list
for i in range(len(self.__midi_df_list)):
program_list.extend(
self.__midi_df_list[i]["program"].drop_duplicates().values.tolist()
)
program_list = list(set(program_list))
self.__batch_size = batch_size
self.__seq_len = seq_len
self.__channel = len(program_list)
self.__program_list = program_list
self.__time_fraction = time_fraction
self.__dim = self.__bar_gram.dim
self.__conditional_flag = conditional_flag
| 368,926
|
Multi-Agent Learning.
Override.
Args:
initial_state_key: Initial state.
limit: Limit of the number of learning.
game_n: The number of games.
|
def learn(self, initial_state_key, limit=1000, game_n=1):
end_flag_list = [False] * len(self.q_learning_list)
for game in range(game_n):
state_key = copy.copy(initial_state_key)
self.t = 1
while self.t <= limit:
for i in range(len(self.q_learning_list)):
if game + 1 == game_n:
self.state_key_list.append((i, copy.copy(state_key)))
self.q_learning_list[i].t = self.t
next_action_list = self.q_learning_list[i].extract_possible_actions(state_key)
if len(next_action_list):
action_key = self.q_learning_list[i].select_action(
state_key=state_key,
next_action_list=next_action_list
)
reward_value = self.q_learning_list[i].observe_reward_value(state_key, action_key)
# Check.
if self.q_learning_list[i].check_the_end_flag(state_key) is True:
end_flag_list[i] = True
# Max-Q-Value in next action time.
next_state_key = self.q_learning_list[i].update_state(
state_key=state_key,
action_key=action_key
)
next_next_action_list = self.q_learning_list[i].extract_possible_actions(next_state_key)
if len(next_next_action_list):
next_action_key = self.q_learning_list[i].predict_next_action(
next_state_key,
next_next_action_list
)
next_max_q = self.q_learning_list[i].extract_q_df(next_state_key, next_action_key)
# Update Q-Value.
self.q_learning_list[i].update_q(
state_key=state_key,
action_key=action_key,
reward_value=reward_value,
next_max_q=next_max_q
)
# Update State.
state_key = next_state_key
# Epsode.
self.t += 1
self.q_learning_list[i].t = self.t
if False not in end_flag_list:
break
| 368,929
|
Init for Adaptive Simulated Annealing.
Args:
reannealing_per: How often will this model reanneals there per cycles.
thermostat: Thermostat.
t_min: The minimum temperature.
t_default: The default temperature.
|
def adaptive_set(
self,
reannealing_per=50,
thermostat=0.9,
t_min=0.001,
t_default=1.0
):
self.__reannealing_per = reannealing_per
self.__thermostat = thermostat
self.__t_min = t_min
self.__t_default = t_default
| 368,934
|
Change temperature.
Override.
Args:
t: Now temperature.
Returns:
Next temperature.
|
def change_t(self, t):
t = super().change_t(t)
self.__now_cycles += 1
if self.__now_cycles % self.__reannealing_per == 0:
t = t * self.__thermostat
if t < self.__t_min:
t = self.__t_default
return t
| 368,935
|
Training the model.
Args:
observed_arr: `np.ndarray` of observed data points.
target_arr: `np.ndarray` of target labeled data.
|
def learn(self, observed_arr, target_arr):
# Pre-learning.
if self.__pre_learning_epochs > 0:
self.__encoder_decoder_controller.learn(observed_arr, observed_arr)
learning_rate = self.__learning_rate
row_o = observed_arr.shape[0]
row_t = target_arr.shape[0]
if row_t != 0 and row_t != row_o:
raise ValueError("The row of `target_arr` must be equivalent to the row of `observed_arr`.")
if row_t == 0:
target_arr = observed_arr.copy()
else:
if target_arr.ndim == 2:
target_arr = target_arr.reshape((target_arr.shape[0], 1, target_arr.shape[1]))
if self.__test_size_rate > 0:
train_index = np.random.choice(observed_arr.shape[0], round((1 - self.__test_size_rate) * observed_arr.shape[0]), replace=False)
test_index = np.array(list(set(range(observed_arr.shape[0])) - set(train_index)))
train_observed_arr = observed_arr[train_index]
test_observed_arr = observed_arr[test_index]
train_target_arr = target_arr[train_index]
test_target_arr = target_arr[test_index]
else:
train_observed_arr = observed_arr
train_target_arr = target_arr
encoder_best_params_list = []
decoder_best_params_list = []
re_encoder_best_params_list = []
try:
self.__change_inferencing_mode(False)
self.__memory_tuple_list = []
eary_stop_flag = False
loss_list = []
min_loss = None
for epoch in range(self.__epochs):
if ((epoch + 1) % self.__attenuate_epoch == 0):
learning_rate = learning_rate / self.__learning_attenuate_rate
rand_index = np.random.choice(train_observed_arr.shape[0], size=self.__batch_size)
batch_observed_arr = train_observed_arr[rand_index]
batch_target_arr = train_target_arr[rand_index]
try:
_ = self.inference(batch_observed_arr)
delta_arr, _, loss = self.compute_retrospective_loss()
remember_flag = False
if len(loss_list) > 0:
if abs(loss - (sum(loss_list)/len(loss_list))) > self.__tld:
remember_flag = True
if remember_flag is True:
self.__remember_best_params(
encoder_best_params_list,
decoder_best_params_list,
re_encoder_best_params_list
)
# Re-try.
_ = self.inference(batch_observed_arr)
delta_arr, _, loss = self.compute_retrospective_loss()
re_encoder_grads_list, decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.back_propagation(delta_arr)
self.optimize(
re_encoder_grads_list,
decoder_grads_list,
encoder_grads_list,
learning_rate,
epoch
)
if min_loss is None or min_loss > loss:
min_loss = loss
encoder_best_params_list = [
self.__encoder_decoder_controller.encoder.graph.weights_lstm_hidden_arr,
self.__encoder_decoder_controller.encoder.graph.weights_lstm_observed_arr,
self.__encoder_decoder_controller.encoder.graph.lstm_bias_arr
]
decoder_best_params_list = [
self.__encoder_decoder_controller.decoder.graph.weights_lstm_hidden_arr,
self.__encoder_decoder_controller.decoder.graph.weights_lstm_observed_arr,
self.__encoder_decoder_controller.decoder.graph.lstm_bias_arr
]
re_encoder_best_params_list = [
self.__retrospective_encoder.graph.weights_lstm_hidden_arr,
self.__retrospective_encoder.graph.weights_lstm_observed_arr,
self.__retrospective_encoder.graph.lstm_bias_arr
]
self.__logger.debug("Best params are updated.")
self.__encoder_decoder_controller.encoder.graph.hidden_activity_arr = np.array([])
self.__encoder_decoder_controller.encoder.graph.cec_activity_arr = np.array([])
self.__encoder_decoder_controller.decoder.graph.hidden_activity_arr = np.array([])
self.__encoder_decoder_controller.decoder.graph.cec_activity_arr = np.array([])
self.__retrospective_encoder.graph.hidden_activity_arr = np.array([])
self.__retrospective_encoder.graph.cec_activity_arr = np.array([])
except FloatingPointError:
if epoch > int(self.__epochs * 0.7):
self.__logger.debug(
"Underflow occurred when the parameters are being updated. Because of early stopping, this error is catched and the parameter is not updated."
)
eary_stop_flag = True
break
else:
self.__logger.debug(
"Underflow occurred when the parameters are being updated."
)
raise
if self.__test_size_rate > 0:
rand_index = np.random.choice(test_observed_arr.shape[0], size=self.__batch_size)
test_batch_observed_arr = test_observed_arr[rand_index]
test_batch_target_arr = test_target_arr[rand_index]
self.__change_inferencing_mode(True)
_ = self.inference(test_batch_observed_arr)
_, _, test_loss = self.compute_retrospective_loss()
remember_flag = False
if len(loss_list) > 0:
if abs(test_loss - (sum(loss_list)/len(loss_list))) > self.__tld:
remember_flag = True
if remember_flag is True:
self.__remember_best_params(
encoder_best_params_list,
decoder_best_params_list,
re_encoder_best_params_list
)
# Re-try.
_ = self.inference(test_batch_observed_arr)
_, _, test_loss = self.compute_retrospective_loss()
self.__change_inferencing_mode(False)
self.__verificate_retrospective_loss(loss, test_loss)
self.__encoder_decoder_controller.encoder.graph.hidden_activity_arr = np.array([])
self.__encoder_decoder_controller.encoder.graph.cec_activity_arr = np.array([])
self.__encoder_decoder_controller.decoder.graph.hidden_activity_arr = np.array([])
self.__encoder_decoder_controller.decoder.graph.cec_activity_arr = np.array([])
if epoch > 1 and abs(loss - loss_list[-1]) < self.__tol:
eary_stop_flag = True
break
loss_list.append(loss)
except KeyboardInterrupt:
self.__logger.debug("Interrupt.")
if eary_stop_flag is True:
self.__logger.debug("Early stopping.")
eary_stop_flag = False
self.__remember_best_params(
encoder_best_params_list,
decoder_best_params_list,
re_encoder_best_params_list
)
self.__change_inferencing_mode(True)
self.__logger.debug("end. ")
| 368,938
|
Learn features generated by `FeatureGenerator`.
Args:
feature_generator: is-a `FeatureGenerator`.
|
def learn_generated(self, feature_generator):
if isinstance(feature_generator, FeatureGenerator) is False:
raise TypeError("The type of `feature_generator` must be `FeatureGenerator`.")
# Pre-learning.
if self.__pre_learning_epochs > 0:
self.__encoder_decoder_controller.learn_generated(feature_generator)
learning_rate = self.__learning_rate
encoder_best_params_list = []
decoder_best_params_list = []
re_encoder_best_params_list = []
try:
self.__change_inferencing_mode(False)
self.__memory_tuple_list = []
eary_stop_flag = False
loss_list = []
min_loss = None
epoch = 0
for batch_observed_arr, batch_target_arr, test_batch_observed_arr, test_batch_target_arr in feature_generator.generate():
epoch += 1
if ((epoch + 1) % self.__attenuate_epoch == 0):
learning_rate = learning_rate / self.__learning_attenuate_rate
try:
_ = self.inference(batch_observed_arr)
delta_arr, _, loss = self.compute_retrospective_loss()
remember_flag = False
if len(loss_list) > 0:
if abs(loss - (sum(loss_list)/len(loss_list))) > self.__tld:
remember_flag = True
if remember_flag is True:
self.__remember_best_params(
encoder_best_params_list,
decoder_best_params_list,
re_encoder_best_params_list
)
# Re-try.
_ = self.inference(batch_observed_arr)
delta_arr, _, loss = self.compute_retrospective_loss()
re_encoder_grads_list, decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.back_propagation(delta_arr)
self.optimize(
re_encoder_grads_list,
decoder_grads_list,
encoder_grads_list,
learning_rate,
epoch
)
if min_loss is None or min_loss > loss:
min_loss = loss
encoder_best_params_list = [
self.__encoder_decoder_controller.encoder.graph.weights_lstm_hidden_arr,
self.__encoder_decoder_controller.encoder.graph.weights_lstm_observed_arr,
self.__encoder_decoder_controller.encoder.graph.lstm_bias_arr
]
decoder_best_params_list = [
self.__encoder_decoder_controller.decoder.graph.weights_lstm_hidden_arr,
self.__encoder_decoder_controller.decoder.graph.weights_lstm_observed_arr,
self.__encoder_decoder_controller.decoder.graph.lstm_bias_arr
]
re_encoder_best_params_list = [
self.__retrospective_encoder.graph.weights_lstm_hidden_arr,
self.__retrospective_encoder.graph.weights_lstm_observed_arr,
self.__retrospective_encoder.graph.lstm_bias_arr
]
self.__logger.debug("Best params are updated.")
self.__encoder_decoder_controller.encoder.graph.hidden_activity_arr = np.array([])
self.__encoder_decoder_controller.encoder.graph.cec_activity_arr = np.array([])
self.__encoder_decoder_controller.decoder.graph.hidden_activity_arr = np.array([])
self.__encoder_decoder_controller.decoder.graph.cec_activity_arr = np.array([])
self.__retrospective_encoder.graph.hidden_activity_arr = np.array([])
self.__retrospective_encoder.graph.cec_activity_arr = np.array([])
except FloatingPointError:
if epoch > int(self.__epochs * 0.7):
self.__logger.debug(
"Underflow occurred when the parameters are being updated. Because of early stopping, this error is catched and the parameter is not updated."
)
eary_stop_flag = True
break
else:
self.__logger.debug(
"Underflow occurred when the parameters are being updated."
)
raise
if self.__test_size_rate > 0:
self.__change_inferencing_mode(True)
_ = self.inference(test_batch_observed_arr)
_, _, test_loss = self.compute_retrospective_loss()
remember_flag = False
if len(loss_list) > 0:
if abs(test_loss - (sum(loss_list)/len(loss_list))) > self.__tld:
remember_flag = True
if remember_flag is True:
self.__remember_best_params(
encoder_best_params_list,
decoder_best_params_list,
re_encoder_best_params_list
)
# Re-try.
_ = self.inference(test_batch_observed_arr)
_, _, test_loss = self.compute_retrospective_loss()
self.__change_inferencing_mode(False)
self.__verificate_retrospective_loss(loss, test_loss)
self.__encoder_decoder_controller.encoder.graph.hidden_activity_arr = np.array([])
self.__encoder_decoder_controller.encoder.graph.cec_activity_arr = np.array([])
self.__encoder_decoder_controller.decoder.graph.hidden_activity_arr = np.array([])
self.__encoder_decoder_controller.decoder.graph.cec_activity_arr = np.array([])
if epoch > 1 and abs(loss - loss_list[-1]) < self.__tol:
eary_stop_flag = True
break
loss_list.append(loss)
except KeyboardInterrupt:
self.__logger.debug("Interrupt.")
if eary_stop_flag is True:
self.__logger.debug("Early stopping.")
eary_stop_flag = False
self.__remember_best_params(
encoder_best_params_list,
decoder_best_params_list,
re_encoder_best_params_list
)
self.__change_inferencing_mode(True)
self.__logger.debug("end. ")
| 368,939
|
Infernece by the model.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced feature points.
|
def inference(self, observed_arr):
decoded_arr = self.__encoder_decoder_controller.inference(observed_arr)
encoded_arr = self.__encoder_decoder_controller.get_feature_points()
_ = self.__retrospective_encoder.inference(decoded_arr)
re_encoded_arr = self.__retrospective_encoder.get_feature_points()
self.__inferenced_tuple = (observed_arr, encoded_arr, decoded_arr, re_encoded_arr)
return re_encoded_arr
| 368,940
|
Summarize input document.
Args:
test_arr: `np.ndarray` of observed data points..
vectorizable_token: is-a `VectorizableToken`.
sentence_list: `list` of all sentences.
limit: The number of selected abstract sentence.
Returns:
`list` of `str` of abstract sentences.
|
def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5):
if isinstance(vectorizable_token, VectorizableToken) is False:
raise TypeError()
_ = self.inference(test_arr)
_, loss_arr, _ = self.compute_retrospective_loss()
loss_list = loss_arr.tolist()
abstract_list = []
for i in range(limit):
key = loss_arr.argmin()
_ = loss_list.pop(key)
loss_arr = np.array(loss_list)
seq_arr = test_arr[key]
token_arr = vectorizable_token.tokenize(seq_arr.tolist())
s = " ".join(token_arr.tolist())
_s = "".join(token_arr.tolist())
for sentence in sentence_list:
if s in sentence or _s in sentence:
abstract_list.append(sentence)
abstract_list = list(set(abstract_list))
if len(abstract_list) >= limit:
break
return abstract_list
| 368,941
|
Back propagation.
Args:
delta_output_arr: Delta.
Returns:
Tuple data.
- decoder's `list` of gradations,
- encoder's `np.ndarray` of Delta,
- encoder's `list` of gradations.
|
def back_propagation(self, delta_arr):
re_encoder_delta_arr, delta_hidden_arr, re_encoder_grads_list = self.__retrospective_encoder.hidden_back_propagate(
delta_arr[:, -1]
)
re_encoder_grads_list.insert(0, None)
re_encoder_grads_list.insert(0, None)
observed_arr, encoded_arr, decoded_arr, re_encoded_arr = self.__inferenced_tuple
delta_arr = self.__encoder_decoder_controller.computable_loss.compute_delta(
decoded_arr,
observed_arr
)
delta_arr[:, -1] += re_encoder_delta_arr[:, -1]
decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.__encoder_decoder_controller.back_propagation(
delta_arr
)
return re_encoder_grads_list, decoder_grads_list, encoder_delta_arr, encoder_grads_list
| 368,942
|
Back propagation.
Args:
re_encoder_grads_list: re-encoder's `list` of graduations.
decoder_grads_list: decoder's `list` of graduations.
encoder_grads_list: encoder's `list` of graduations.
learning_rate: Learning rate.
epoch: Now epoch.
|
def optimize(
self,
re_encoder_grads_list,
decoder_grads_list,
encoder_grads_list,
learning_rate,
epoch
):
self.__retrospective_encoder.optimize(re_encoder_grads_list, learning_rate, epoch)
self.__encoder_decoder_controller.optimize(
decoder_grads_list,
encoder_grads_list,
learning_rate,
epoch
)
| 368,943
|
Change dropout rate in Encoder/Decoder.
Args:
dropout_rate: The probalibity of dropout.
|
def __change_inferencing_mode(self, inferencing_mode):
self.__encoder_decoder_controller.decoder.opt_params.inferencing_mode = inferencing_mode
self.__encoder_decoder_controller.encoder.opt_params.inferencing_mode = inferencing_mode
self.__retrospective_encoder.opt_params.inferencing_mode = inferencing_mode
| 368,945
|
Remember best parameters.
Args:
encoder_best_params_list: `list` of encoder's parameters.
decoder_best_params_list: `list` of decoder's parameters.
re_encoder_best_params_list: `list` of re-decoder's parameters.
|
def __remember_best_params(self, encoder_best_params_list, decoder_best_params_list, re_encoder_best_params_list):
if len(encoder_best_params_list) > 0 and len(decoder_best_params_list) > 0:
self.__encoder_decoder_controller.encoder.graph.weights_lstm_hidden_arr = encoder_best_params_list[0]
self.__encoder_decoder_controller.encoder.graph.weights_lstm_observed_arr = encoder_best_params_list[1]
self.__encoder_decoder_controller.encoder.graph.lstm_bias_arr = encoder_best_params_list[2]
self.__encoder_decoder_controller.decoder.graph.weights_lstm_hidden_arr = decoder_best_params_list[0]
self.__encoder_decoder_controller.decoder.graph.weights_lstm_observed_arr = decoder_best_params_list[1]
self.__encoder_decoder_controller.decoder.graph.lstm_bias_arr = decoder_best_params_list[2]
self.__retrospective_encoder.graph.weights_lstm_hidden_arr = re_encoder_best_params_list[0]
self.__retrospective_encoder.graph.weights_lstm_observed_arr = re_encoder_best_params_list[1]
self.__retrospective_encoder.graph.lstm_bias_arr = re_encoder_best_params_list[2]
self.__logger.debug("Best params are saved.")
| 368,946
|
Entry Point.
Args:
url: PDF url.
|
def Main(url, similarity_mode="TfIdfCosine", similarity_limit=0.75):
# The object of Web-scraping.
web_scrape = WebScraping()
# Set the object of reading PDF files.
web_scrape.readable_web_pdf = WebPDFReading()
# Execute Web-scraping.
document = web_scrape.scrape(url)
if similarity_mode == "TfIdfCosine":
# The object of `Similarity Filter`.
# The similarity observed by this object is so-called cosine similarity of Tf-Idf vectors.
similarity_filter = TfIdfCosine()
elif similarity_mode == "Dice":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Dice coefficient.
similarity_filter = Dice()
elif similarity_mode == "Jaccard":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Jaccard coefficient.
similarity_filter = Jaccard()
elif similarity_mode == "Simpson":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Simpson coefficient.
similarity_filter = Simpson()
else:
raise ValueError()
# The object of the NLP.
nlp_base = NlpBase()
# Set tokenizer. This is japanese tokenizer with MeCab.
nlp_base.tokenizable_doc = MeCabTokenizer()
# Set the object of NLP.
similarity_filter.nlp_base = nlp_base
# If the similarity exceeds this value, the sentence will be cut off.
similarity_filter.similarity_limit = similarity_limit
# The object of automatic sumamrization.
auto_abstractor = AutoAbstractor()
# Set tokenizer. This is japanese tokenizer with MeCab.
auto_abstractor.tokenizable_doc = MeCabTokenizer()
# Object of abstracting and filtering document.
abstractable_doc = TopNRankAbstractor()
# Execute summarization.
result_dict = auto_abstractor.summarize(document, abstractable_doc, similarity_filter)
# Output summarized sentence.
[print(result_dict["summarize_result"][i]) for i in range(len(result_dict["summarize_result"])) if i < 3]
| 368,948
|
Entry Point.
Args:
url: target url.
|
def Main(url):
# The object of Web-Scraping.
web_scrape = WebScraping()
# Execute Web-Scraping.
document = web_scrape.scrape(url)
# The object of NLP.
nlp_base = NlpBase()
# Set tokenizer. This is japanese tokenizer with MeCab.
nlp_base.tokenizable_doc = MeCabTokenizer()
sentence_list = nlp_base.listup_sentence(document)
all_token_list = []
for i in range(len(sentence_list)):
nlp_base.tokenize(sentence_list[i])
all_token_list.extend(nlp_base.token)
sentence_list[i] = nlp_base.token
vectorlizable_sentence = EncoderDecoder()
vectorlizable_sentence.learn(
sentence_list=sentence_list,
token_master_list=list(set(all_token_list)),
epochs=60
)
test_list = sentence_list[:5]
feature_points_arr = vectorlizable_sentence.vectorize(test_list)
reconstruction_error_arr = vectorlizable_sentence.controller.get_reconstruction_error().mean()
print("Feature points (Top 5 sentences):")
print(feature_points_arr)
print("Reconstruction error(MSE):")
print(reconstruction_error_arr)
| 368,950
|
Tokenize vector.
Args:
vector_list: The list of vector of one token.
Returns:
token
|
def tokenize(self, vector_list):
if self.computable_distance is None:
self.computable_distance = EuclidDistance()
vector_arr = np.array(vector_list)
distance_arr = np.empty_like(vector_arr)
feature_arr = self.__dbm.get_feature_point(layer_number=0)
key_arr = np.empty(vector_arr.shape[0], dtype=int)
for i in range(vector_arr.shape[0]):
distance_arr = self.computable_distance.compute(
np.expand_dims(vector_arr[i], axis=0).repeat(feature_arr.shape[0], axis=0),
feature_arr
)
key_arr[i] = distance_arr.argmin(axis=0)
return self.token_arr[key_arr]
| 368,960
|
具象メソッド
モノラルビートを生成する
Args:
stream: PyAudioのストリーム
left_chunk: 左音源に対応するチャンク
right_chunk: 右音源に対応するチャンク
volume: 音量
Returns:
void
|
def write_stream(self, stream, left_chunk, right_chunk, volume):
if len(left_chunk) != len(right_chunk):
raise ValueError()
for i in range(len(left_chunk)):
chunk = (left_chunk[i] + right_chunk[i]) * volume
data = struct.pack("2f", chunk, chunk)
stream.write(data)
| 368,963
|
具象メソッド
wavファイルに保存するモノラルビートを読み込む
Args:
left_chunk: 左音源に対応するチャンク
right_chunk: 右音源に対応するチャンク
volume: 音量
bit16: 整数化の条件
Returns:
フレームのlist
|
def read_stream(self, left_chunk, right_chunk, volume, bit16=32767.0):
if len(left_chunk) != len(right_chunk):
raise ValueError()
frame_list = []
for i in range(len(left_chunk)):
chunk = int((left_chunk[i] + right_chunk[i]) * bit16 * volume)
data = struct.pack("2h", chunk, chunk)
frame_list.append(data)
return frame_list
| 368,964
|
Tokenize str.
Args:
sentence_str: tokenized string.
Returns:
[token, token, token, ...]
|
def tokenize(self, sentence_str):
mt = MeCab.Tagger("-Owakati")
wordlist = mt.parse(sentence_str)
token_list = wordlist.rstrip(" \n").split(" ")
return token_list
| 368,965
|
Init.
Args:
gans_value_function: is-a `GANsValueFunction`.
|
def __init__(self, gans_value_function=None):
if gans_value_function is None:
gans_value_function = MiniMax()
if isinstance(gans_value_function, GANsValueFunction) is False:
raise TypeError("The type of `gans_value_function` must be `GANsValueFunction`.")
self.__gans_value_function = gans_value_function
self.__logger = getLogger("pygan")
super().__init__(gans_value_function)
| 368,966
|
Train the generative model as the Auto-Encoder.
Args:
generative_model: Generator which draws samples from the `fake` distribution.
a_logs_list: `list` of the reconstruction errors.
Returns:
The tuple data. The shape is...
- Generator which draws samples from the `fake` distribution.
- `list` of the reconstruction errors.
|
def train_auto_encoder(self, generative_model, a_logs_list):
error_arr = generative_model.update()
if error_arr.ndim > 1:
error_arr = error_arr.mean()
a_logs_list.append(error_arr)
self.__logger.debug("The reconstruction error (mean): " + str(error_arr))
return generative_model, a_logs_list
| 368,968
|
Compute discriminator's reward.
Args:
true_posterior_arr: `np.ndarray` of `true` posterior inferenced by the discriminator.
generated_posterior_arr: `np.ndarray` of `fake` posterior inferenced by the discriminator.
Returns:
`np.ndarray` of Gradients.
|
def compute_discriminator_reward(
self,
true_posterior_arr,
generated_posterior_arr
):
grad_arr = np.log(true_posterior_arr + 1e-08) + np.log(1 - generated_posterior_arr + 1e-08)
return grad_arr
| 368,969
|
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
Tuple(`np.ndarray` of action., Q-Value)
|
def select_action(self, next_action_arr, next_q_arr):
key_arr = self.select_action_key(next_action_arr, next_q_arr)
return next_action_arr[key_arr], next_q_arr[key_arr]
| 368,974
|
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
`np.ndarray` of keys.
|
def select_action_key(self, next_action_arr, next_q_arr):
epsilon_greedy_flag = bool(np.random.binomial(n=1, p=self.epsilon_greedy_rate))
if epsilon_greedy_flag is False:
key = np.random.randint(low=0, high=next_action_arr.shape[0])
else:
key = next_q_arr.argmax()
return key
| 368,975
|
Return metadata for 1 ticker
Use TiingoClient.list_tickers() to get available options
Args:
ticker (str) : Unique identifier for stock
|
def get_ticker_metadata(self, ticker, fmt='json'):
url = "tiingo/daily/{}".format(ticker)
response = self._request('GET', url)
data = response.json()
if fmt == 'json':
return data
elif fmt == 'object':
return dict_to_object(data, "Ticker")
| 369,040
|
Base class for interacting with RESTful APIs
Child class MUST have a ._base_url property!
Args:
config (dict): Arbitrary options that child classes can access
|
def __init__(self, config={}):
self._config = config
# The child class should override these properties or else the
# restclient won't work. Reevalute whether to do this as an abstract
# base class so it doesn't get used by itself.
self._headers = {}
self._base_url = ""
if config.get('session'):
self._session = requests.Session()
else:
self._session = requests
| 369,047
|
Make HTTP request and return response object
Args:
method (str): GET, POST, PUT, DELETE
url (str): path appended to the base_url to create request
**kwargs: passed directly to a requests.request object
|
def _request(self, method, url, **kwargs):
resp = self._session.request(method,
'{}/{}'.format(self._base_url, url),
headers=self._headers,
**kwargs)
try:
resp.raise_for_status()
except HTTPError as e:
logging.error(resp.content)
raise RestClientError(e)
return resp
| 369,048
|
Retrieves information provided by the API root endpoint
``'/api/v1'``.
Args:
headers (dict): Optional headers to pass to the request.
Returns:
dict: Details of the HTTP API provided by the BigchainDB
server.
|
def api_info(self, headers=None):
return self.transport.forward_request(
method='GET',
path=self.api_prefix,
headers=headers,
)
| 370,049
|
Submit a transaction to the Federation with the mode `async`.
Args:
transaction (dict): the transaction to be sent
to the Federation node(s).
headers (dict): Optional headers to pass to the request.
Returns:
dict: The transaction sent to the Federation node(s).
|
def send_async(self, transaction, headers=None):
return self.transport.forward_request(
method='POST',
path=self.path,
json=transaction,
params={'mode': 'async'},
headers=headers)
| 370,052
|
Retrieves the transaction with the given id.
Args:
txid (str): Id of the transaction to retrieve.
headers (dict): Optional headers to pass to the request.
Returns:
dict: The transaction with the given id.
|
def retrieve(self, txid, headers=None):
path = self.path + txid
return self.transport.forward_request(
method='GET', path=path, headers=None)
| 370,053
|
Get the block that contains the given transaction id (``txid``)
else return ``None``
Args:
txid (str): Transaction id.
headers (dict): Optional headers to pass to the request.
Returns:
:obj:`list` of :obj:`int`: List of block heights.
|
def get(self, *, txid, headers=None):
block_list = self.transport.forward_request(
method='GET',
path=self.path,
params={'transaction_id': txid},
headers=headers,
)
return block_list[0] if len(block_list) else None
| 370,055
|
Retrieves the block with the given ``block_height``.
Args:
block_height (str): height of the block to retrieve.
headers (dict): Optional headers to pass to the request.
Returns:
dict: The block with the given ``block_height``.
|
def retrieve(self, block_height, headers=None):
path = self.path + block_height
return self.transport.forward_request(
method='GET', path=path, headers=None)
| 370,056
|
Retrieves the assets that match a given text search string.
Args:
search (str): Text search string.
limit (int): Limit the number of returned documents. Defaults to
zero meaning that it returns all the matching assets.
headers (dict): Optional headers to pass to the request.
Returns:
:obj:`list` of :obj:`dict`: List of assets that match the query.
|
def get(self, *, search, limit=0, headers=None):
return self.transport.forward_request(
method='GET',
path=self.path,
params={'search': search, 'limit': limit},
headers=headers
)
| 370,057
|
Fulfills the given transaction.
Args:
transaction (dict): The transaction to be fulfilled.
private_keys (:obj:`str` | :obj:`list` | :obj:`tuple`): One or
more private keys to be used for fulfilling the
transaction.
Returns:
dict: The fulfilled transaction payload, ready to be sent to a
BigchainDB federation.
Raises:
:exc:`~.exceptions.MissingPrivateKeyError`: If a private
key is missing.
|
def fulfill_transaction(transaction, *, private_keys):
if not isinstance(private_keys, (list, tuple)):
private_keys = [private_keys]
# NOTE: Needed for the time being. See
# https://github.com/bigchaindb/bigchaindb/issues/797
if isinstance(private_keys, tuple):
private_keys = list(private_keys)
transaction_obj = Transaction.from_dict(transaction)
try:
signed_transaction = transaction_obj.sign(private_keys)
except KeypairMismatchException as exc:
raise MissingPrivateKeyError('A private key is missing!') from exc
return signed_transaction.to_dict()
| 370,061
|
Initializes a :class:`~bigchaindb_driver.connection.Connection`
instance.
Args:
node_url (str): Url of the node to connect to.
headers (dict): Optional headers to send with each request.
|
def __init__(self, *, node_url, headers=None):
self.node_url = node_url
self.session = Session()
if headers:
self.session.headers.update(headers)
self._retries = 0
self.backoff_time = None
| 370,066
|
Picks a connection with the earliest backoff time.
As a result, the first connection is picked
for as long as it has no backoff time.
Otherwise, the connections are tried in a round robin fashion.
Args:
connections (:obj:list): List of
:class:`~bigchaindb_driver.connection.Connection` instances.
|
def pick(self, connections):
if len(connections) == 1:
return connections[0]
def key(conn):
return (datetime.min
if conn.backoff_time is None
else conn.backoff_time)
return min(*connections, key=key)
| 370,070
|
Initializes a :class:`~bigchaindb_driver.pool.Pool` instance.
Args:
connections (list): List of
:class:`~bigchaindb_driver.connection.Connection` instances.
|
def __init__(self, connections, picker_class=RoundRobinPicker):
self.connections = connections
self.picker = picker_class()
| 370,071
|
Initializes an instance of
:class:`~bigchaindb_driver.transport.Transport`.
Args:
nodes: each node is a dictionary with the keys `endpoint` and
`headers`
timeout (int): Optional timeout in seconds.
|
def __init__(self, *nodes, timeout=None):
self.nodes = nodes
self.timeout = timeout
self.connection_pool = Pool([Connection(node_url=node['endpoint'],
headers=node['headers'])
for node in nodes])
| 370,072
|
Validate all (nested) keys in `obj` by using `validation_fun`.
Args:
obj_name (str): name for `obj` being validated.
obj (dict): dictionary object.
validation_fun (function): function used to validate the value
of `key`.
Returns:
None: indicates validation successful
Raises:
ValidationError: `validation_fun` will raise this error on failure
|
def validate_all_keys(obj_name, obj, validation_fun):
for key, value in obj.items():
validation_fun(obj_name, key)
if isinstance(value, dict):
validate_all_keys(obj_name, value, validation_fun)
| 370,074
|
Validate value for all (nested) occurrence of `key` in `obj`
using `validation_fun`.
Args:
obj (dict): dictionary object.
key (str): key whose value is to be validated.
validation_fun (function): function used to validate the value
of `key`.
Raises:
ValidationError: `validation_fun` will raise this error on failure
|
def validate_all_values_for_key(obj, key, validation_fun):
for vkey, value in obj.items():
if vkey == key:
validation_fun(value)
elif isinstance(value, dict):
validate_all_values_for_key(value, key, validation_fun)
| 370,075
|
Validate the transaction ID of a transaction
Args:
tx_body (dict): The Transaction to be transformed.
|
def validate_id(tx_body):
# NOTE: Remove reference to avoid side effects
tx_body = deepcopy(tx_body)
try:
proposed_tx_id = tx_body['id']
except KeyError:
raise InvalidHash('No transaction id found!')
tx_body['id'] = None
tx_body_serialized = Transaction._to_str(tx_body)
valid_tx_id = Transaction._to_hash(tx_body_serialized)
if proposed_tx_id != valid_tx_id:
err_msg = ("The transaction's id '{}' isn't equal to "
"the hash of its body, i.e. it's not valid.")
raise InvalidHash(err_msg.format(proposed_tx_id))
| 370,081
|
Transforms a Python dictionary to a Transaction object.
Args:
tx_body (dict): The Transaction to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
def from_dict(cls, tx):
inputs = [Input.from_dict(input_) for input_ in tx['inputs']]
outputs = [Output.from_dict(output) for output in tx['outputs']]
return cls(tx['operation'], tx['asset'], inputs, outputs,
tx['metadata'], tx['version'], hash_id=tx['id'])
| 370,082
|
Initialize the dir entry with unset values.
Args:
filesystem: the fake filesystem used for implementation.
|
def __init__(self, filesystem):
self._filesystem = filesystem
self.name = ''
self.path = ''
self._inode = None
self._islink = False
self._isdir = False
self._statresult = None
self._statresult_symlink = None
| 370,190
|
Return a stat_result object for this entry.
Args:
follow_symlinks: If False and the entry is a symlink, return the
result for the symlink, otherwise for the object it points to.
|
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._statresult_symlink is None:
file_object = self._filesystem.resolve(self.path)
if self._filesystem.is_windows_fs:
file_object.st_nlink = 0
self._statresult_symlink = file_object.stat_result.copy()
return self._statresult_symlink
if self._statresult is None:
file_object = self._filesystem.lresolve(self.path)
self._inode = file_object.st_ino
if self._filesystem.is_windows_fs:
file_object.st_nlink = 0
self._statresult = file_object.stat_result.copy()
return self._statresult
| 370,192
|
Add the deprecated version of a member function to the given class.
Gives a deprecation warning on usage.
Args:
clss: the class where the deprecated function is to be added
func: the actual function that is called by the deprecated version
deprecated_name: the deprecated name of the function
|
def add(clss, func, deprecated_name):
@Deprecator(func.__name__, deprecated_name)
def _old_function(*args, **kwargs):
return func(*args, **kwargs)
setattr(clss, deprecated_name, _old_function)
| 370,198
|
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes
under Windows).
Args:
strict: If False (default) no exception is raised if the path
does not exist.
New in Python 3.6.
Raises:
IOError: if the path doesn't exist (strict=True or Python < 3.6)
|
def resolve(self, strict=None):
if sys.version_info >= (3, 6) or pathlib2:
if strict is None:
strict = False
else:
if strict is not None:
raise TypeError(
"resolve() got an unexpected keyword argument 'strict'")
strict = True
if self._closed:
self._raise_closed()
path = self._flavour.resolve(self, strict=strict)
if path is None:
self.stat()
path = str(self.absolute())
path = self.filesystem.absnormpath(path)
return FakePath(path)
| 370,212
|
Sets the file contents and size.
Called internally after initial file creation.
Args:
contents: string, new content of file.
Returns:
True if the contents have been changed.
Raises:
IOError: if the st_size is not a non-negative integer,
or if st_size exceeds the available file system space
|
def _set_initial_contents(self, contents):
contents = self._encode_contents(contents)
changed = self._byte_contents != contents
st_size = len(contents)
if self._byte_contents:
self.size = 0
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
self._byte_contents = contents
self.st_size = st_size
self.epoch += 1
return changed
| 370,225
|
Resizes file content, padding with nulls if new size exceeds the
old size.
Args:
st_size: The desired size for the file.
Raises:
IOError: if the st_size arg is not a non-negative integer
or if st_size exceeds the available file system space
|
def size(self, st_size):
self._check_positive_int(st_size)
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
if self._byte_contents:
if st_size < current_size:
self._byte_contents = self._byte_contents[:st_size]
else:
if IS_PY2:
self._byte_contents = '%s%s' % (
self._byte_contents, '\0' * (st_size - current_size))
else:
self._byte_contents += b'\0' * (st_size - current_size)
self.st_size = st_size
self.epoch += 1
| 370,228
|
Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
|
def add_entry(self, path_object):
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
exception = IOError if IS_PY2 else OSError
raise exception(errno.EACCES, 'Permission Denied', self.path)
if path_object.name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object.name] = path_object
path_object.parent_dir = self
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev)
| 370,238
|
Retrieves the specified child file or directory entry.
Args:
pathname_name: The basename of the child object to retrieve.
Returns:
The fake file or directory object.
Raises:
KeyError: if no child exists by the specified name.
|
def get_entry(self, pathname_name):
pathname_name = self._normalized_entryname(pathname_name)
return self.contents[pathname_name]
| 370,239
|
Raises IOError.
The error message is constructed from the given error code and shall
start with the error in the real system.
Args:
errno: A numeric error code from the C variable errno.
filename: The name of the affected file, if any.
|
def raise_io_error(self, errno, filename=None):
raise IOError(errno, self._error_message(errno), filename)
| 370,249
|
Add a new mount point for a filesystem device.
The mount point gets a new unique device number.
Args:
path: The root path for the new mount path.
total_size: The new total size of the added filesystem device
in bytes. Defaults to infinite size.
Returns:
The newly created mount point dict.
Raises:
OSError: if trying to mount an existing mount point again.
|
def add_mount_point(self, path, total_size=None):
path = self.absnormpath(path)
if path in self.mount_points:
self.raise_os_error(errno.EEXIST, path)
self._last_dev += 1
self.mount_points[path] = {
'idev': self._last_dev, 'total_size': total_size, 'used_size': 0
}
# special handling for root path: has been created before
root_dir = (self.root if path == self.root.name
else self.create_dir(path))
root_dir.st_dev = self._last_dev
return self.mount_points[path]
| 370,251
|
Return the total, used and free disk space in bytes as named tuple,
or placeholder values simulating unlimited space if not set.
.. note:: This matches the return value of shutil.disk_usage().
Args:
path: The disk space is returned for the file system device where
`path` resides.
Defaults to the root path (e.g. '/' on Unix systems).
|
def get_disk_usage(self, path=None):
DiskUsage = namedtuple('usage', 'total, used, free')
if path is None:
mount_point = self.mount_points[self.root.name]
else:
mount_point = self._mount_point_for_path(path)
if mount_point and mount_point['total_size'] is not None:
return DiskUsage(mount_point['total_size'],
mount_point['used_size'],
mount_point['total_size'] -
mount_point['used_size'])
return DiskUsage(
1024 * 1024 * 1024 * 1024, 0, 1024 * 1024 * 1024 * 1024)
| 370,255
|
Changes the total size of the file system, preserving the used space.
Example usage: set the size of an auto-mounted Windows drive.
Args:
total_size: The new total size of the filesystem in bytes.
path: The disk space is changed for the file system device where
`path` resides.
Defaults to the root path (e.g. '/' on Unix systems).
Raises:
IOError: if the new space is smaller than the used size.
|
def set_disk_usage(self, total_size, path=None):
if path is None:
path = self.root.name
mount_point = self._mount_point_for_path(path)
if (mount_point['total_size'] is not None and
mount_point['used_size'] > total_size):
self.raise_io_error(errno.ENOSPC, path)
mount_point['total_size'] = total_size
| 370,256
|
Change the used disk space by the given amount.
Args:
usage_change: Number of bytes added to the used space.
If negative, the used space will be decreased.
file_path: The path of the object needing the disk space.
st_dev: The device ID for the respective file system.
Raises:
IOError: if usage_change exceeds the free file system space
|
def change_disk_usage(self, usage_change, file_path, st_dev):
mount_point = self._mount_point_for_device(st_dev)
if mount_point:
total_size = mount_point['total_size']
if total_size is not None:
if total_size - mount_point['used_size'] < usage_change:
self.raise_io_error(errno.ENOSPC, file_path)
mount_point['used_size'] += usage_change
| 370,257
|
Return the os.stat-like tuple for the FakeFile object of entry_path.
Args:
entry_path: Path to filesystem object to retrieve.
follow_symlinks: If False and entry_path points to a symlink,
the link itself is inspected instead of the linked object.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist.
|
def stat(self, entry_path, follow_symlinks=True):
# stat should return the tuple representing return value of os.stat
try:
file_object = self.resolve(
entry_path, follow_symlinks, allow_fd=True)
self.raise_for_filepath_ending_with_separator(
entry_path, file_object, follow_symlinks)
return file_object.stat_result.copy()
except IOError as io_error:
winerror = (io_error.winerror if hasattr(io_error, 'winerror')
else io_error.errno)
self.raise_os_error(io_error.errno, entry_path, winerror=winerror)
| 370,258
|
Change the permissions of a file as encoded in integer mode.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
follow_symlinks: If `False` and `path` points to a symlink,
the link itself is affected instead of the linked object.
|
def chmod(self, path, mode, follow_symlinks=True):
try:
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
except IOError as io_error:
if io_error.errno == errno.ENOENT:
self.raise_os_error(errno.ENOENT, path)
raise
if self.is_windows_fs:
if mode & PERM_WRITE:
file_object.st_mode = file_object.st_mode | 0o222
else:
file_object.st_mode = file_object.st_mode & 0o777555
else:
file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |
(mode & PERM_ALL))
file_object.st_ctime = time.time()
| 370,260
|
Add file_obj to the list of open files on the filesystem.
Used internally to manage open files.
The position in the open_files array is the file descriptor number.
Args:
file_obj: File object to be added to open files list.
Returns:
File descriptor number for the file object.
|
def _add_open_file(self, file_obj):
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return len(self.open_files) - 1
| 370,263
|
Remove file object with given descriptor from the list
of open files.
Sets the entry in open_files to None.
Args:
file_des: Descriptor of file object to be removed from
open files list.
|
def _close_open_file(self, file_des):
self.open_files[file_des] = None
heapq.heappush(self._free_fd_heap, file_des)
| 370,264
|
Return an open file.
Args:
file_des: File descriptor of the open file.
Raises:
OSError: an invalid file descriptor.
TypeError: filedes is not an integer.
Returns:
Open file object.
|
def get_open_file(self, file_des):
if not is_int_type(file_des):
raise TypeError('an integer is required')
if (file_des >= len(self.open_files) or
self.open_files[file_des] is None):
self.raise_os_error(errno.EBADF, str(file_des))
return self.open_files[file_des][0]
| 370,265
|
Return True if the given file object is in the list of open files.
Args:
file_object: The FakeFile object to be checked.
Returns:
`True` if the file is open.
|
def has_open_file(self, file_object):
return (file_object in [wrappers[0].get_object()
for wrappers in self.open_files if wrappers])
| 370,266
|
Return a normalized case version of the given path for
case-insensitive file systems. For case-sensitive file systems,
return path unchanged.
Args:
path: the file path to be transformed
Returns:
A version of path matching the case of existing path elements.
|
def _original_path(self, path):
def components_to_path():
if len(path_components) > len(normalized_components):
normalized_components.extend(
path_components[len(normalized_components):])
sep = self._path_separator(path)
normalized_path = sep.join(normalized_components)
if path.startswith(sep) and not normalized_path.startswith(sep):
normalized_path = sep + normalized_path
return normalized_path
if self.is_case_sensitive or not path:
return path
path_components = self._path_components(path)
normalized_components = []
current_dir = self.root
for component in path_components:
if not isinstance(current_dir, FakeDirectory):
return components_to_path()
dir_name, current_dir = self._directory_content(
current_dir, component)
if current_dir is None or (
isinstance(current_dir, FakeDirectory) and
current_dir._byte_contents is None and
current_dir.st_size == 0):
return components_to_path()
normalized_components.append(dir_name)
return components_to_path()
| 370,269
|
Absolutize and minimalize the given path.
Forces all relative paths to be absolute, and normalizes the path to
eliminate dot and empty components.
Args:
path: Path to normalize.
Returns:
The normalized path relative to the current working directory,
or the root directory if path is empty.
|
def absnormpath(self, path):
path = self.normcase(path)
cwd = self._matching_string(path, self.cwd)
if not path:
path = self.path_separator
elif not self._starts_with_root_path(path):
# Prefix relative paths with cwd, if cwd is not root.
root_name = self._matching_string(path, self.root.name)
empty = self._matching_string(path, '')
path = self._path_separator(path).join(
(cwd != root_name and cwd or empty, path))
if path == self._matching_string(path, '.'):
path = cwd
return self.normpath(path)
| 370,270
|
Mimic os.path.splitpath using the specified path_separator.
Mimics os.path.splitpath using the path_separator that was specified
for this FakeFilesystem.
Args:
path: (str) The path to split.
Returns:
(str) A duple (pathname, basename) for which pathname does not
end with a slash, and basename does not contain a slash.
|
def splitpath(self, path):
path = self.normcase(path)
sep = self._path_separator(path)
path_components = path.split(sep)
if not path_components:
return ('', '')
starts_with_drive = self._starts_with_drive_letter(path)
basename = path_components.pop()
colon = self._matching_string(path, ':')
if not path_components:
if starts_with_drive:
components = basename.split(colon)
return (components[0] + colon, components[1])
return ('', basename)
for component in path_components:
if component:
# The path is not the root; it contains a non-separator
# component. Strip all trailing separators.
while not path_components[-1]:
path_components.pop()
if starts_with_drive:
if not path_components:
components = basename.split(colon)
return (components[0] + colon, components[1])
if (len(path_components) == 1 and
path_components[0].endswith(colon)):
return (path_components[0] + sep, basename)
return (sep.join(path_components), basename)
# Root path. Collapse all leading separators.
return (sep, basename)
| 370,271
|
Splits the path into the drive part and the rest of the path.
Taken from Windows specific implementation in Python 3.5
and slightly adapted.
Args:
path: the full path to be splitpath.
Returns:
A tuple of the drive part and the rest of the path, or of
an empty string and the full path if drive letters are
not supported or no drive is present.
|
def splitdrive(self, path):
path = make_string_path(path)
if self.is_windows_fs:
if len(path) >= 2:
path = self.normcase(path)
sep = self._path_separator(path)
# UNC path handling is here since Python 2.7.8,
# back-ported from Python 3
if sys.version_info >= (2, 7, 8):
if (path[0:2] == sep * 2) and (
path[2:3] != sep):
# UNC path handling - splits off the mount point
# instead of the drive
sep_index = path.find(sep, 2)
if sep_index == -1:
return path[:0], path
sep_index2 = path.find(sep, sep_index + 1)
if sep_index2 == sep_index + 1:
return path[:0], path
if sep_index2 == -1:
sep_index2 = len(path)
return path[:sep_index2], path[sep_index2:]
if path[1:2] == self._matching_string(path, ':'):
return path[:2], path[2:]
return path[:0], path
| 370,272
|
Mimic os.path.join using the specified path_separator.
Args:
*paths: (str) Zero or more paths to join.
Returns:
(str) The paths joined by the path separator, starting with
the last absolute path in paths.
|
def joinpaths(self, *paths):
if sys.version_info >= (3, 6):
paths = [os.fspath(path) for path in paths]
if len(paths) == 1:
return paths[0]
if self.is_windows_fs:
return self._join_paths_with_drive_support(*paths)
joined_path_segments = []
sep = self._path_separator(paths[0])
for path_segment in paths:
if self._starts_with_root_path(path_segment):
# An absolute path
joined_path_segments = [path_segment]
else:
if (joined_path_segments and
not joined_path_segments[-1].endswith(sep)):
joined_path_segments.append(sep)
if path_segment:
joined_path_segments.append(path_segment)
return self._matching_string(paths[0], '').join(joined_path_segments)
| 370,274
|
Return True if file_path starts with a drive letter.
Args:
file_path: the full path to be examined.
Returns:
`True` if drive letter support is enabled in the filesystem and
the path starts with a drive letter.
|
def _starts_with_drive_letter(self, file_path):
colon = self._matching_string(file_path, ':')
return (self.is_windows_fs and len(file_path) >= 2 and
file_path[:1].isalpha and (file_path[1:2]) == colon)
| 370,276
|
Return true if a path points to an existing file system object.
Args:
file_path: The path to examine.
Returns:
(bool) True if the corresponding object exists.
Raises:
TypeError: if file_path is None.
|
def exists(self, file_path, check_link=False):
if check_link and self.islink(file_path):
return True
file_path = make_string_path(file_path)
if file_path is None:
raise TypeError
if not file_path:
return False
if file_path == self.dev_null.name:
return not self.is_windows_fs
try:
if self.is_filepath_ending_with_separator(file_path):
return False
file_path = self.resolve_path(file_path)
except (IOError, OSError):
return False
if file_path == self.root.name:
return True
path_components = self._path_components(file_path)
current_dir = self.root
for component in path_components:
current_dir = self._directory_content(current_dir, component)[1]
if not current_dir:
return False
return True
| 370,282
|
Search for the specified filesystem object within the fake
filesystem.
Args:
file_path: Specifies target FakeFile object to retrieve, with a
path that has already been normalized/resolved.
Returns:
The FakeFile object corresponding to file_path.
Raises:
IOError: if the object is not found.
|
def get_object_from_normpath(self, file_path):
file_path = make_string_path(file_path)
if file_path == self.root.name:
return self.root
if file_path == self.dev_null.name:
return self.dev_null
file_path = self._original_path(file_path)
path_components = self._path_components(file_path)
target_object = self.root
try:
for component in path_components:
if S_ISLNK(target_object.st_mode):
target_object = self.resolve(target_object.contents)
if not S_ISDIR(target_object.st_mode):
if not self.is_windows_fs:
self.raise_io_error(errno.ENOTDIR, file_path)
self.raise_io_error(errno.ENOENT, file_path)
target_object = target_object.get_entry(component)
except KeyError:
self.raise_io_error(errno.ENOENT, file_path)
return target_object
| 370,289
|
Search for the specified filesystem object within the fake
filesystem.
Args:
file_path: Specifies the target FakeFile object to retrieve.
Returns:
The FakeFile object corresponding to `file_path`.
Raises:
IOError: if the object is not found.
|
def get_object(self, file_path):
file_path = make_string_path(file_path)
file_path = self.absnormpath(self._original_path(file_path))
return self.get_object_from_normpath(file_path)
| 370,290
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.