| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
|
|
| |
| wealth_distribution = torch.randn(100, 1) |
|
|
| |
| target_direction = torch.randn(100, 1) |
|
|
| |
| class WealthTransferModel(nn.Module): |
| def __init__(self, input_size, hidden_size, output_size): |
| super(WealthTransferModel, self).__init__() |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.fc2 = nn.Linear(hidden_size, hidden_size) |
| self.fc3 = nn.Linear(hidden_size, output_size) |
| self.relu = nn.ReLU() |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=1) |
| |
| x = self.relu(self.fc1(x)) |
| x = self.relu(self.fc2(x)) |
| x = self.fc3(x) |
| return x |
|
|
| |
| input_size = wealth_distribution.shape[1] + target_direction.shape[1] |
| hidden_size = 64 |
| output_size = wealth_distribution.shape[1] |
|
|
| model = WealthTransferModel(input_size, hidden_size, output_size) |
| loss_fn = nn.MSELoss() |
| optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
| |
| target_wealth_state = torch.randn(100, 1) |
|
|
| |
| num_epochs = 100 |
| for epoch in range(num_epochs): |
| |
| optimizer.zero_grad() |
|
|
| |
| output = model(wealth_distribution, target_direction) |
|
|
| |
| loss = loss_fn(output, target_wealth_state) |
|
|
| |
| loss.backward() |
| optimizer.step() |
|
|
| if (epoch + 1) % 10 == 0: |
| print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}') |
|
|
| |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
|
|
| |
| wealth_distribution = torch.randn(100, 1) |
|
|
| |
| target_direction = torch.randn(100, 1) |
|
|
| |
| class WealthTransferModelWithNerve(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size): |
| super(WealthTransferModelWithNerve, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=1) |
|
|
| |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x = x.unsqueeze(1) |
|
|
| |
| x, (hn, cn) = self.lstm(x) |
|
|
| |
| x = x.squeeze(1) |
|
|
| |
| x = self.fc2(x) |
| return x |
|
|
| |
| input_size = wealth_distribution.shape[1] + target_direction.shape[1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[1] |
|
|
| model = WealthTransferModelWithNerve(input_size, hidden_size, lstm_hidden_size, output_size) |
| loss_fn = nn.MSELoss() |
| optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
| |
| target_wealth_state = torch.randn(100, 1) |
|
|
| |
| num_epochs = 100 |
| for epoch in range(num_epochs): |
| |
| optimizer.zero_grad() |
|
|
| |
| output = model(wealth_distribution, target_direction) |
|
|
| |
| loss = loss_fn(output, target_wealth_state) |
|
|
| |
| loss.backward() |
| optimizer.step() |
|
|
| if (epoch + 1) % 10 == 0: |
| print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}') |
|
|
| |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
|
|
| |
| batch_size = 32 |
| seq_length = 10 |
| feature_size = 1 |
|
|
| |
| wealth_distribution = torch.randn(batch_size, seq_length, 100, feature_size) |
|
|
| |
| target_direction = torch.randn(batch_size, seq_length, 100, feature_size) |
|
|
| |
| class WealthTransferModelWithTimesteps(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size): |
| super(WealthTransferModelWithTimesteps, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| batch_size, seq_length, num_people, _ = x.shape |
| x = x.view(batch_size * seq_length * num_people, -1) |
| x = self.relu(self.fc1(x)) |
| x = x.view(batch_size, seq_length, num_people, -1) |
|
|
| |
| x = x.view(batch_size, seq_length, -1) |
|
|
| |
| x, (hn, cn) = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
| x = x.view(batch_size, seq_length, num_people, -1) |
| return x |
|
|
| |
| input_size = wealth_distribution.shape[-1] + target_direction.shape[-1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[-1] |
|
|
| model = WealthTransferModelWithTimesteps(input_size, hidden_size, lstm_hidden_size, output_size) |
| loss_fn = nn.MSELoss() |
| optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
| |
| target_wealth_state = torch.randn(batch_size, seq_length, 100, feature_size) |
|
|
| |
| num_epochs = 100 |
| for epoch in range(num_epochs): |
| |
| optimizer.zero_grad() |
|
|
| |
| output = model(wealth_distribution, target_direction) |
|
|
| |
| loss = loss_fn(output, target_wealth_state) |
|
|
| |
| loss.backward() |
| optimizer.step() |
|
|
| if (epoch + 1) % 10 == 0: |
| print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}') |
|
|
| |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
|
|
| |
| batch_size = 32 |
| seq_length = 10 |
| feature_size = 1 |
|
|
| |
| wealth_distribution = torch.randn(batch_size, seq_length, 100, feature_size) |
|
|
| |
| target_direction = torch.randn(batch_size, seq_length, 100, feature_size) |
|
|
| |
| class WealthTransferModelWithTimesteps(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size): |
| super(WealthTransferModelWithTimesteps, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| |
| self.lstm = nn.LSTM(hidden_size * 100, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| batch_size, seq_length, num_people, _ = x.shape |
| x = x.view(batch_size * seq_length * num_people, -1) |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x = x.view(batch_size, seq_length, num_people * hidden_size) |
|
|
| |
| x, (hn, cn) = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
| x = x.view() |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
|
|
| |
| batch_size = 32 |
| seq_length = 10 |
| feature_size = 1 |
|
|
| |
| wealth_distribution = torch.randn(batch_size, seq_length, 100, feature_size) |
|
|
| |
| target_direction = torch.randn(batch_size, seq_length, 100, feature_size) |
|
|
| |
| class WealthTransferModelWithVPN(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size, vpn_size): |
| super(WealthTransferModelWithVPN, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| |
| self.vpn_layer = nn.Linear(output_size, vpn_size) |
| self.decrypt_layer = nn.Linear(vpn_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| batch_size, seq_length, num_people, _ = x.shape |
| x = x.view(batch_size * seq_length * num_people, -1) |
| x = self.relu(self.fc1(x)) |
| x = x.view(batch_size, seq_length, num_people, -1) |
|
|
| |
| x = x.view(batch_size, seq_length, num_people * hidden_size) |
|
|
| |
| x, (hn, cn) = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
| x = x.view(batch_size, seq_length, num_people, -1) |
|
|
| |
| encrypted_output = torch.sigmoid(self.vpn_layer(x)) |
|
|
| |
| decrypted_output = self.decrypt_layer(encrypted_output) |
|
|
| return decrypted_output |
|
|
| |
| input_size = wealth_distribution.shape[-1] + target_direction.shape[-1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[-1] |
| vpn_size = 128 |
|
|
| model = WealthTransferModelWithVPN(input_size, hidden_size, lstm_hidden_size, output_size, vpn_size) |
| loss_fn = nn.MSELoss() |
| optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
| |
| target_wealth_state = torch.randn(batch_size, seq_length, 100, feature_size) |
|
|
| |
| num_epochs = 100 |
| for epoch in range(num_epochs): |
| |
| optimizer.zero_grad() |
|
|
| |
| output = model(wealth_distribution, target_direction) |
|
|
| |
| loss = loss_fn(output, target_wealth_state) |
|
|
| |
| loss.backward() |
| optimizer.step() |
|
|
| if (epoch + 1) % 10 == 0: |
| print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}') |
|
|
| |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
|
|
| |
| wealth_distribution = torch.randn(100, 1) |
|
|
| |
| target_direction = torch.randn(100, 1) |
|
|
| |
| class WealthTransferModel(nn.Module): |
| def __init__(self, input_size, hidden_size, output_size): |
| super(WealthTransferModel, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.fc2 = nn.Linear(hidden_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=1) |
|
|
| |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x = self.fc2(x) |
| return x |
|
|
| |
| input_size = wealth_distribution.shape[1] + target_direction.shape[1] |
| hidden_size = 64 |
| output_size = wealth_distribution.shape[1] |
|
|
| model = WealthTransferModel(input_size, hidden_size, output_size) |
|
|
| |
| loss_fn = nn.MSELoss() |
| optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
| |
| target_wealth_state = torch.randn(100, 1) |
|
|
| |
| num_epochs = 100 |
| for epoch in range(num_epochs): |
| |
| optimizer.zero_grad() |
|
|
| |
| output = model(wealth_distribution, target_direction) |
|
|
| |
| loss = loss_fn(output, target_wealth_state) |
|
|
| |
| loss.backward() |
| optimizer.step() |
|
|
| if (epoch + 1) % 10 == 0: |
| print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}') |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
|
|
| |
| wealth_distribution = torch.randn(32, 100, 1) |
|
|
| |
| target_direction = torch.randn(32, 100, 1) |
|
|
| |
| class WealthTransferModelWithNerves(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size): |
| super(WealthTransferModelWithNerves, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x, _ = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
| return x |
|
|
| |
| input_size = wealth_distribution.shape[-1] + target_direction.shape[-1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[-1] |
|
|
| model = WealthTransferModelWithNerves(input_size, hidden_size, lstm_hidden_size, output_size) |
|
|
| |
| loss_fn = nn.MSELoss() |
| optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
| |
| target_wealth_state = torch.randn(32, 100, 1) |
|
|
| |
| num_epochs = 100 |
| for epoch in range(num_epochs): |
| |
| optimizer.zero_grad() |
|
|
| |
| output = model(wealth_distribution, target_direction) |
|
|
| |
| loss = loss_fn(output, target_wealth_state) |
|
|
| |
| loss.backward() |
| optimizer.step() |
|
|
| if (epoch + 1) % 10 == 0: |
| print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}') |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
|
|
| |
| wealth_distribution = torch.randn(32, 100, 1) |
|
|
| |
| target_direction = torch.randn(32, 100, 1) |
|
|
| |
| class WealthTransferModelWithVPN(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size, vpn_size): |
| super(WealthTransferModelWithVPN, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| |
| self.vpn_layer = nn.Linear(output_size, vpn_size) |
| self.decrypt_layer = nn.Linear(vpn_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x, _ = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
|
|
| |
| encrypted_output = torch.sigmoid(self.vpn_layer(x)) |
|
|
| |
| decrypted_output = self.decrypt_layer(encrypted_output) |
|
|
| return decrypted_output |
|
|
| |
| input_size = wealth_distribution.shape[-1] + target_direction.shape[-1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[-1] |
| vpn_size = 128 |
|
|
| model = WealthTransferModelWithVPN(input_size, hidden_size, lstm_hidden_size, output_size, vpn_size) |
|
|
| |
| loss_fn = nn.MSELoss() |
| optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
| |
| target_wealth_state = torch.randn(32, 100, 1) |
|
|
| |
| num_epochs = 100 |
| for epoch in range(num_epochs): |
| |
| optimizer.zero_grad() |
|
|
| |
| output = model(wealth_distribution, target_direction) |
|
|
| |
| loss = loss_fn(output, target_wealth_state) |
|
|
| |
| loss.backward() |
| optimizer.step() |
|
|
| if (epoch + 1) % 10 == 0: |
| print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}') |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
| import matplotlib.pyplot as plt |
|
|
| |
| wealth_distribution = torch.randn(32, 100, 1) |
|
|
| |
| target_direction = torch.randn(32, 100, 1) |
|
|
| |
| class WealthTransferModelWithVPN(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size, vpn_size): |
| super(WealthTransferModelWithVPN, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| |
| self.vpn_layer = nn.Linear(output_size, vpn_size) |
| self.decrypt_layer = nn.Linear(vpn_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x, _ = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
|
|
| |
| encrypted_output = torch.sigmoid(self.vpn_layer(x)) |
|
|
| |
| decrypted_output = self.decrypt_layer(encrypted_output) |
|
|
| return decrypted_output |
|
|
| |
| input_size = wealth_distribution.shape[-1] + target_direction.shape[-1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[-1] |
| vpn_size = 128 |
|
|
| model = WealthTransferModelWithVPN(input_size, hidden_size, lstm_hidden_size, output_size, vpn_size) |
|
|
| |
| with torch.no_grad(): |
| output_signal = model(wealth_distribution, target_direction) |
|
|
| |
| wealth_waveform = output_signal[0].squeeze().numpy() |
|
|
| |
| plt.figure(figsize=(10, 5)) |
| plt.plot(wealth_waveform, label='Wealth Transfer Signal') |
| plt.title('Wealth Transfer Signal Waveform') |
| plt.xlabel('Individual (or Time Step)') |
| plt.ylabel('Wealth Signal Intensity') |
| plt.legend() |
| plt.grid(True) |
| plt.show() |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
| import matplotlib.pyplot as plt |
|
|
| |
| |
| wealth_distribution = torch.randn(32, 24, 1) |
|
|
| |
| target_direction = torch.randn(32, 24, 1) |
|
|
| |
| class WealthTransferModelWithVPN(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size, vpn_size): |
| super(WealthTransferModelWithVPN, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| |
| self.vpn_layer = nn.Linear(output_size, vpn_size) |
| self.decrypt_layer = nn.Linear(vpn_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x, _ = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
|
|
| |
| encrypted_output = torch.sigmoid(self.vpn_layer(x)) |
|
|
| |
| decrypted_output = self.decrypt_layer(encrypted_output) |
|
|
| return decrypted_output |
|
|
| |
| input_size = wealth_distribution.shape[-1] + target_direction.shape[-1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[-1] |
| vpn_size = 128 |
|
|
| model = WealthTransferModelWithVPN(input_size, hidden_size, lstm_hidden_size, output_size, vpn_size) |
|
|
| |
| with torch.no_grad(): |
| output_signal = model(wealth_distribution, target_direction) |
|
|
| |
| wealth_waveform = output_signal[0].squeeze().numpy() |
|
|
| |
| hours = list(range(24)) |
|
|
| |
| plt.figure(figsize=(10, 5)) |
| plt.plot(hours, wealth_waveform, label='Wealth Transfer Signal over 24 Hours', marker='o') |
| plt.title('Wealth Transfer Signal in 24-Hour Intervals') |
| plt.xlabel('Hour of the Day') |
| plt.ylabel('Wealth Signal Intensity') |
| plt.xticks(hours) |
| plt.grid(True) |
| plt.legend() |
| plt.show() |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
| import matplotlib.pyplot as plt |
| import numpy as np |
|
|
| |
| wealth_distribution = torch.randn(32, 24, 1) |
|
|
| |
| target_direction = torch.randn(32, 24, 1) |
|
|
| |
| class WealthTransferModelWithVPN(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size, vpn_size): |
| super(WealthTransferModelWithVPN, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| |
| self.vpn_layer = nn.Linear(output_size, vpn_size) |
| self.decrypt_layer = nn.Linear(vpn_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x, _ = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
|
|
| |
| encrypted_output = torch.sigmoid(self.vpn_layer(x)) |
|
|
| |
| decrypted_output = self.decrypt_layer(encrypted_output) |
|
|
| return decrypted_output |
|
|
| |
| input_size = wealth_distribution.shape[-1] + target_direction.shape[-1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[-1] |
| vpn_size = 128 |
|
|
| model = WealthTransferModelWithVPN(input_size, hidden_size, lstm_hidden_size, output_size, vpn_size) |
|
|
| |
| with torch.no_grad(): |
| output_signal = model(wealth_distribution, target_direction) |
|
|
| |
| wealth_waveform = output_signal[0].squeeze().numpy() |
|
|
| |
| mask = wealth_waveform > 0.5 |
|
|
| |
| masked_signal = wealth_waveform * mask |
|
|
| |
| hours = list(range(24)) |
|
|
| |
| plt.figure(figsize=(10, 5)) |
|
|
| |
| scatter = plt.scatter(hours, masked_signal, c=masked_signal, cmap='viridis', s=100, edgecolor='k', marker='o') |
|
|
| |
| plt.colorbar(scatter, label="Wealth Signal Intensity") |
|
|
| plt.title('Masked Wealth Transfer Signal in 24-Hour Intervals (Colorful Waveform)') |
| plt.xlabel('Hour of the Day') |
| plt.ylabel('Wealth Signal Intensity') |
| plt.xticks(hours) |
| plt.grid(True) |
| plt.show() |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
| import matplotlib.pyplot as plt |
| import numpy as np |
|
|
| |
| wealth_distribution = torch.randn(32, 24, 1) |
|
|
| |
| target_direction = torch.randn(32, 24, 1) |
|
|
| |
| class WealthTransferModelWithVPN(nn.Module): |
| def __init__(self, input_size, hidden_size, lstm_hidden_size, output_size, vpn_size): |
| super(WealthTransferModelWithVPN, self).__init__() |
| |
| self.fc1 = nn.Linear(input_size, hidden_size) |
| self.relu = nn.ReLU() |
|
|
| |
| self.lstm = nn.LSTM(hidden_size, lstm_hidden_size, batch_first=True) |
|
|
| |
| self.fc2 = nn.Linear(lstm_hidden_size, output_size) |
|
|
| |
| self.vpn_layer = nn.Linear(output_size, vpn_size) |
| self.decrypt_layer = nn.Linear(vpn_size, output_size) |
|
|
| def forward(self, x, target): |
| |
| x = torch.cat((x, target), dim=-1) |
|
|
| |
| x = self.relu(self.fc1(x)) |
|
|
| |
| x, _ = self.lstm(x) |
|
|
| |
| x = self.fc2(x) |
|
|
| |
| encrypted_output = torch.sigmoid(self.vpn_layer(x)) |
|
|
| |
| decrypted_output = self.decrypt_layer(encrypted_output) |
|
|
| return decrypted_output |
|
|
| |
| input_size = wealth_distribution.shape[-1] + target_direction.shape[-1] |
| hidden_size = 64 |
| lstm_hidden_size = 32 |
| output_size = wealth_distribution.shape[-1] |
| vpn_size = 128 |
|
|
| model = WealthTransferModelWithVPN(input_size, hidden_size, lstm_hidden_size, output_size, vpn_size) |
|
|
| |
| with torch.no_grad(): |
| output_signal = model(wealth_distribution, target_direction) |
|
|
| |
| wealth_waveform = output_signal[0].squeeze().numpy() |
|
|
| |
| mask1 = wealth_waveform > 0.5 |
|
|
| |
| masked_signal1 = wealth_waveform * mask1 |
|
|
| |
| mask2 = wealth_waveform < 0.2 |
|
|
| |
| masked_signal2 = wealth_waveform * mask2 |
|
|
| |
| combined_masked_signal = masked_signal1 + masked_signal2 |
|
|
| |
| hours = list(range(24)) |
|
|
| |
| plt.figure(figsize=(10, 5)) |
|
|
| |
| scatter = plt.scatter(hours, combined_masked_signal, c=combined_masked_signal, cmap='plasma', s=100, edgecolor='k', marker='o') |
|
|
| |
| plt.colorbar(scatter, label="Wealth Signal Intensity") |
|
|
| plt.title('Combined Masked Wealth Transfer Signal in 24-Hour Intervals (Colorful Waveform)') |
| plt.xlabel('Hour of the Day') |
| plt.ylabel('Wealth Signal Intensity') |
| plt.xticks(hours) |
| plt.grid(True) |
| plt.show() |