Spaces:
Sleeping
Sleeping
Commit
·
17fd5e3
1
Parent(s):
8ff38d6
Fix bug 1
Browse files- Dockerfile +1 -1
- requirements.txt +9 -0
- src/coroutines/env_loop.py +1 -1
- src/data/dataset.py +1 -1
- src/envs/world_model_env.py +3 -3
- src/models/actor_critic.py +3 -3
- src/models/diffusion/denoiser.py +3 -3
- src/models/rew_end_model.py +2 -2
Dockerfile
CHANGED
|
@@ -24,7 +24,7 @@ RUN mkdir -p csgo/spawn config checkpoints cache
|
|
| 24 |
# Set environment variables
|
| 25 |
ENV PYTHONPATH=/app/src:/app
|
| 26 |
ENV CUDA_VISIBLE_DEVICES=""
|
| 27 |
-
ENV OMP_NUM_THREADS=
|
| 28 |
|
| 29 |
# Expose port
|
| 30 |
EXPOSE 7860
|
|
|
|
| 24 |
# Set environment variables
|
| 25 |
ENV PYTHONPATH=/app/src:/app
|
| 26 |
ENV CUDA_VISIBLE_DEVICES=""
|
| 27 |
+
ENV OMP_NUM_THREADS=4
|
| 28 |
|
| 29 |
# Expose port
|
| 30 |
EXPOSE 7860
|
requirements.txt
CHANGED
|
@@ -28,6 +28,15 @@ h5py>=3.7.0
|
|
| 28 |
ale_py>=0.8.0
|
| 29 |
gymnasium>=0.28.0
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
# Optional: for better performance
|
| 32 |
# torch-audio # if needed for audio processing
|
| 33 |
|
|
|
|
| 28 |
ale_py>=0.8.0
|
| 29 |
gymnasium>=0.28.0
|
| 30 |
|
| 31 |
+
# Experiment tracking (required by utils.py)
|
| 32 |
+
wandb>=0.13.0
|
| 33 |
+
|
| 34 |
+
# Metrics (required by rew_end_model.py)
|
| 35 |
+
torcheval>=0.0.6
|
| 36 |
+
|
| 37 |
+
# Progress bars (may be used by various components)
|
| 38 |
+
tqdm>=4.64.0
|
| 39 |
+
|
| 40 |
# Optional: for better performance
|
| 41 |
# torch-audio # if needed for audio processing
|
| 42 |
|
src/coroutines/env_loop.py
CHANGED
|
@@ -6,7 +6,7 @@ import torch.nn as nn
|
|
| 6 |
from torch.distributions.categorical import Categorical
|
| 7 |
|
| 8 |
from . import coroutine
|
| 9 |
-
from envs import TorchEnv, WorldModelEnv
|
| 10 |
|
| 11 |
|
| 12 |
@coroutine
|
|
|
|
| 6 |
from torch.distributions.categorical import Categorical
|
| 7 |
|
| 8 |
from . import coroutine
|
| 9 |
+
from ..envs import TorchEnv, WorldModelEnv
|
| 10 |
|
| 11 |
|
| 12 |
@coroutine
|
src/data/dataset.py
CHANGED
|
@@ -13,7 +13,7 @@ from torch.utils.data import Dataset as TorchDataset
|
|
| 13 |
from .episode import Episode
|
| 14 |
from .segment import Segment, SegmentId
|
| 15 |
from .utils import make_segment
|
| 16 |
-
from utils import StateDictMixin
|
| 17 |
|
| 18 |
|
| 19 |
class Dataset(StateDictMixin, TorchDataset):
|
|
|
|
| 13 |
from .episode import Episode
|
| 14 |
from .segment import Segment, SegmentId
|
| 15 |
from .utils import make_segment
|
| 16 |
+
from ..utils import StateDictMixin
|
| 17 |
|
| 18 |
|
| 19 |
class Dataset(StateDictMixin, TorchDataset):
|
src/envs/world_model_env.py
CHANGED
|
@@ -9,9 +9,9 @@ from torch import Tensor
|
|
| 9 |
from torch.distributions.categorical import Categorical
|
| 10 |
import torch.nn.functional as F
|
| 11 |
|
| 12 |
-
from coroutines import coroutine
|
| 13 |
-
from models.diffusion import Denoiser, DiffusionSampler, DiffusionSamplerConfig
|
| 14 |
-
from models.rew_end_model import RewEndModel
|
| 15 |
|
| 16 |
ResetOutput = Tuple[torch.FloatTensor, Dict[str, Any]]
|
| 17 |
StepOutput = Tuple[Tensor, Tensor, Tensor, Tensor, Dict[str, Any]]
|
|
|
|
| 9 |
from torch.distributions.categorical import Categorical
|
| 10 |
import torch.nn.functional as F
|
| 11 |
|
| 12 |
+
from ..coroutines import coroutine
|
| 13 |
+
from ..models.diffusion import Denoiser, DiffusionSampler, DiffusionSamplerConfig
|
| 14 |
+
from ..models.rew_end_model import RewEndModel
|
| 15 |
|
| 16 |
ResetOutput = Tuple[torch.FloatTensor, Dict[str, Any]]
|
| 17 |
StepOutput = Tuple[Tensor, Tensor, Tensor, Tensor, Dict[str, Any]]
|
src/models/actor_critic.py
CHANGED
|
@@ -10,9 +10,9 @@ from torch.distributions.categorical import Categorical
|
|
| 10 |
import torch.nn.functional as F
|
| 11 |
|
| 12 |
from .blocks import Conv3x3, SmallResBlock
|
| 13 |
-
from coroutines.env_loop import make_env_loop
|
| 14 |
-
from envs import TorchEnv, WorldModelEnv
|
| 15 |
-
from utils import init_lstm, LossAndLogs
|
| 16 |
|
| 17 |
|
| 18 |
ActorCriticOutput = namedtuple("ActorCriticOutput", "logits_act val hx_cx")
|
|
|
|
| 10 |
import torch.nn.functional as F
|
| 11 |
|
| 12 |
from .blocks import Conv3x3, SmallResBlock
|
| 13 |
+
from ..coroutines.env_loop import make_env_loop
|
| 14 |
+
from ..envs import TorchEnv, WorldModelEnv
|
| 15 |
+
from ..utils import init_lstm, LossAndLogs
|
| 16 |
|
| 17 |
|
| 18 |
ActorCriticOutput = namedtuple("ActorCriticOutput", "logits_act val hx_cx")
|
src/models/diffusion/denoiser.py
CHANGED
|
@@ -10,11 +10,11 @@ from PIL import Image
|
|
| 10 |
import numpy as np
|
| 11 |
import cv2
|
| 12 |
|
| 13 |
-
from data import Batch
|
| 14 |
from .inner_model import InnerModel, InnerModelConfig
|
| 15 |
-
from utils import LossAndLogs
|
| 16 |
|
| 17 |
-
from
|
| 18 |
|
| 19 |
def add_dims(input: Tensor, n: int) -> Tensor:
|
| 20 |
return input.reshape(input.shape + (1,) * (n - input.ndim))
|
|
|
|
| 10 |
import numpy as np
|
| 11 |
import cv2
|
| 12 |
|
| 13 |
+
from ...data import Batch
|
| 14 |
from .inner_model import InnerModel, InnerModelConfig
|
| 15 |
+
from ...utils import LossAndLogs
|
| 16 |
|
| 17 |
+
from ..contour_detection_model import ContourDetectionModel
|
| 18 |
|
| 19 |
def add_dims(input: Tensor, n: int) -> Tensor:
|
| 20 |
return input.reshape(input.shape + (1,) * (n - input.ndim))
|
src/models/rew_end_model.py
CHANGED
|
@@ -8,8 +8,8 @@ import torch.nn.functional as F
|
|
| 8 |
from torcheval.metrics.functional import multiclass_confusion_matrix
|
| 9 |
|
| 10 |
from .blocks import Conv3x3, Downsample, ResBlocks
|
| 11 |
-
from data import Batch
|
| 12 |
-
from utils import init_lstm, LossAndLogs
|
| 13 |
|
| 14 |
|
| 15 |
@dataclass
|
|
|
|
| 8 |
from torcheval.metrics.functional import multiclass_confusion_matrix
|
| 9 |
|
| 10 |
from .blocks import Conv3x3, Downsample, ResBlocks
|
| 11 |
+
from ..data import Batch
|
| 12 |
+
from ..utils import init_lstm, LossAndLogs
|
| 13 |
|
| 14 |
|
| 15 |
@dataclass
|