medmekk HF Staff commited on
Commit
b9597c9
·
1 Parent(s): 5d4178a

Add Builds

Browse files
build/torch27-cxx11-cu118-x86_64-linux/layer_norm/layers.py CHANGED
@@ -10,7 +10,7 @@ class LayerNorm(nn.Module):
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(hidden_states.shape[0], -1),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
@@ -33,7 +33,7 @@ class LlamaRMSNorm(nn.Module):
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(hidden_states.shape[0], -1),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
 
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
 
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
build/torch27-cxx11-cu126-x86_64-linux/layer_norm/layers.py CHANGED
@@ -10,7 +10,7 @@ class LayerNorm(nn.Module):
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(hidden_states.shape[0], -1),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
@@ -33,7 +33,7 @@ class LlamaRMSNorm(nn.Module):
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(hidden_states.shape[0], -1),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
 
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
 
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
build/torch27-cxx11-cu128-x86_64-linux/layer_norm/layers.py CHANGED
@@ -10,7 +10,7 @@ class LayerNorm(nn.Module):
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(hidden_states.shape[0], -1),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
@@ -33,7 +33,7 @@ class LlamaRMSNorm(nn.Module):
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(hidden_states.shape[0], -1),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
 
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
 
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
build/torch28-cxx11-cu126-x86_64-linux/layer_norm/layers.py CHANGED
@@ -10,7 +10,7 @@ class LayerNorm(nn.Module):
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(hidden_states.shape[0], -1),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
@@ -33,7 +33,7 @@ class LlamaRMSNorm(nn.Module):
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(hidden_states.shape[0], -1),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
 
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
 
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
build/torch28-cxx11-cu128-x86_64-linux/layer_norm/layers.py CHANGED
@@ -10,7 +10,7 @@ class LayerNorm(nn.Module):
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(hidden_states.shape[0], -1),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
@@ -33,7 +33,7 @@ class LlamaRMSNorm(nn.Module):
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(hidden_states.shape[0], -1),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
 
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
 
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
build/torch28-cxx11-cu129-x86_64-linux/layer_norm/layers.py CHANGED
@@ -10,7 +10,7 @@ class LayerNorm(nn.Module):
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(hidden_states.shape[0], -1),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
@@ -33,7 +33,7 @@ class LlamaRMSNorm(nn.Module):
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(hidden_states.shape[0], -1),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
 
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
 
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
torch-ext/layer_norm/layers.py CHANGED
@@ -10,7 +10,7 @@ class LayerNorm(nn.Module):
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(hidden_states.shape[0], -1),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
@@ -33,7 +33,7 @@ class LlamaRMSNorm(nn.Module):
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(hidden_states.shape[0], -1),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,
 
10
 
11
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
  output = ops.dropout_add_ln_fwd(
13
+ hidden_states.view(-1, hidden_states.shape[-1]),
14
  gamma = self.weight,
15
  beta = None,
16
  rowscale = None,
 
33
 
34
  def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
  output = ops.dropout_add_ln_fwd(
36
+ hidden_states.view(-1, hidden_states.shape[-1]),
37
  gamma = self.weight,
38
  beta = None,
39
  rowscale = None,