repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_blob_container_legal_hold_test.go | provider/pkg/resources/customresources/custom_blob_container_legal_hold_test.go | package customresources
import (
"context"
"encoding/json"
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/azure"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Verbatim from https://learn.microsoft.com/en-us/rest/api/storagerp/blob-containers/get?view=rest-storagerp-2023-01-01&tabs=HTTP
const legalHoldPropertiesForContainer = `
{
"properties": {
"legalHold": {
"hasLegalHold": true,
"protectedAppendWritesHistory": {
"allowProtectedAppendWritesAll": true,
"timestamp": "2022-09-01T01:58:44.5044483Z"
},
"tags": [
{
"tag": "tag1",
"timestamp": "2018-03-26T05:06:09.6964643Z",
"objectIdentifier": "ce7cd28a-fc25-4bf1-8fb9-e1b9833ffd4b",
"tenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47"
},
{
"tag": "tag2",
"timestamp": "2018-03-26T05:06:09.6964643Z",
"objectIdentifier": "ce7cd28a-fc25-4bf1-8fb9-e1b9833ffd4b",
"tenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47"
},
{
"tag": "tag3",
"timestamp": "2018-03-26T05:06:09.6964643Z",
"objectIdentifier": "ce7cd28a-fc25-4bf1-8fb9-e1b9833ffd4b",
"tenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47"
}
]
}
}
}`
func TestCreate(t *testing.T) {
containerId := "/subscriptions/123-456/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/accountName/blobServices/default/containers/containerName"
t.Run("Basic Create", func(t *testing.T) {
m := azure.MockAzureClient{}
custom := blobContainerLegalHold(&m)
inputs := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
}),
}
_, err := custom.Create(context.Background(), containerId+"/legalHold", inputs)
require.NoError(t, err)
require.Len(t, m.PostIds, 1)
assert.Equal(t, m.PostIds[0], containerId+"/setLegalHold")
require.Len(t, m.PostBodies, 1)
assert.Contains(t, m.PostBodies[0], "tags")
assert.Contains(t, m.PostBodies[0]["tags"], "tag1")
assert.NotContains(t, m.PostBodies[0], "allowProtectedAppendWritesAll")
})
t.Run("Create with allowProtectedAppendWritesAll", func(t *testing.T) {
m := azure.MockAzureClient{}
custom := blobContainerLegalHold(&m)
inputs := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
}),
resource.PropertyKey("allowProtectedAppendWritesAll"): resource.NewBoolProperty(true),
}
_, err := custom.Create(context.Background(), containerId+"/legalHold", inputs)
require.NoError(t, err)
require.Len(t, m.PostIds, 1)
assert.Equal(t, m.PostIds[0], containerId+"/setLegalHold")
require.Len(t, m.PostBodies, 1)
assert.Contains(t, m.PostBodies[0], "tags")
assert.Contains(t, m.PostBodies[0]["tags"], "tag1")
assert.Contains(t, m.PostBodies[0], "allowProtectedAppendWritesAll")
assert.True(t, m.PostBodies[0]["allowProtectedAppendWritesAll"].(bool))
})
t.Run("Create without tags fails", func(t *testing.T) {
m := azure.MockAzureClient{}
custom := blobContainerLegalHold(&m)
inputs := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{}),
}
_, err := custom.Create(context.Background(), containerId+"/legalHold", inputs)
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "'tags'")
}
})
}
func TestRead(t *testing.T) {
id := "/subscriptions/123-456/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/accountName/blobServices/default/containers/containerName/legalHold"
azureResponse := map[string]any{}
err := json.Unmarshal([]byte(legalHoldPropertiesForContainer), &azureResponse)
require.NoError(t, err)
m := azure.MockAzureClient{
GetResponse: azureResponse,
}
custom := blobContainerLegalHold(&m)
res, found, err := custom.Read(context.Background(), id, nil)
require.NoError(t, err)
require.True(t, found)
require.NotNil(t, res)
require.Len(t, res, 2)
require.Contains(t, res, "tags")
tags := res["tags"].([]string)
assert.Equal(t, []string{"tag1", "tag2", "tag3"}, tags)
require.Contains(t, res, "allowProtectedAppendWritesAll")
assert.Equal(t, res["allowProtectedAppendWritesAll"], true)
}
func TestUpdate(t *testing.T) {
containerId := "/subscriptions/123-456/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/accountName/blobServices/default/containers/containerName"
t.Run("Basic Update: add a tag", func(t *testing.T) {
m := azure.MockAzureClient{}
custom := blobContainerLegalHold(&m)
olds := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
}),
}
news := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
resource.NewStringProperty("tag2"),
}),
}
_, err := custom.Update(context.Background(), containerId+"/legalHold", news, olds)
require.NoError(t, err)
require.Len(t, m.PostIds, 1)
assert.Equal(t, m.PostIds[0], containerId+"/setLegalHold")
require.Len(t, m.PostBodies, 1)
assert.Contains(t, m.PostBodies[0], "tags")
assert.NotContains(t, m.PostBodies[0]["tags"], "tag1")
assert.Contains(t, m.PostBodies[0]["tags"], "tag2")
})
t.Run("Basic Update: remove a tag", func(t *testing.T) {
m := azure.MockAzureClient{}
custom := blobContainerLegalHold(&m)
olds := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
resource.NewStringProperty("tag2"),
}),
}
news := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
}),
}
_, err := custom.Update(context.Background(), containerId+"/legalHold", news, olds)
require.NoError(t, err)
require.Len(t, m.PostIds, 1)
require.Len(t, m.PostBodies, 1)
assert.Equal(t, m.PostIds[0], containerId+"/clearLegalHold")
assert.Contains(t, m.PostBodies[0], "tags")
assert.NotContains(t, m.PostBodies[0]["tags"], "tag1")
assert.Contains(t, m.PostBodies[0]["tags"], "tag2")
})
t.Run("Full update", func(t *testing.T) {
m := azure.MockAzureClient{}
custom := blobContainerLegalHold(&m)
olds := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
resource.NewStringProperty("tag2"),
}),
resource.PropertyKey("allowProtectedAppendWritesAll"): resource.NewBoolProperty(true),
}
news := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag7"),
}),
resource.PropertyKey("allowProtectedAppendWritesAll"): resource.NewBoolProperty(false),
}
_, err := custom.Update(context.Background(), containerId+"/legalHold", news, olds)
require.NoError(t, err)
require.Len(t, m.PostIds, 2)
require.Len(t, m.PostBodies, 2)
assert.Equal(t, m.PostIds[0], containerId+"/setLegalHold")
assert.Contains(t, m.PostBodies[0], "tags")
assert.NotContains(t, m.PostBodies[0]["tags"], "tag1")
assert.Contains(t, m.PostBodies[0]["tags"], "tag7")
assert.Contains(t, m.PostBodies[0], "allowProtectedAppendWritesAll")
assert.Equal(t, m.PostBodies[0]["allowProtectedAppendWritesAll"], false)
assert.Equal(t, m.PostIds[1], containerId+"/clearLegalHold")
assert.Contains(t, m.PostBodies[1], "tags")
assert.Contains(t, m.PostBodies[1]["tags"], "tag1")
assert.Contains(t, m.PostBodies[1]["tags"], "tag2")
})
t.Run("No-op update for changed tag order", func(t *testing.T) {
m := azure.MockAzureClient{}
custom := blobContainerLegalHold(&m)
olds := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
resource.NewStringProperty("tag2"),
}),
resource.PropertyKey("allowProtectedAppendWritesAll"): resource.NewBoolProperty(true),
}
news := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag2"),
resource.NewStringProperty("tag1"),
}),
resource.PropertyKey("allowProtectedAppendWritesAll"): resource.NewBoolProperty(true),
}
_, err := custom.Update(context.Background(), containerId+"/legalHold", news, olds)
require.NoError(t, err)
assert.Len(t, m.PostIds, 0)
})
}
func TestDelete(t *testing.T) {
containerId := "/subscriptions/123-456/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/accountName/blobServices/default/containers/containerName"
t.Run("Delete removes all tags", func(t *testing.T) {
m := azure.MockAzureClient{}
custom := blobContainerLegalHold(&m)
olds := resource.PropertyMap{
resource.PropertyKey("tags"): resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("tag1"),
resource.NewStringProperty("tag2"),
}),
}
err := custom.Delete(context.Background(), containerId+"/legalHold", olds, nil)
require.NoError(t, err)
require.Len(t, m.PostIds, 1)
require.Len(t, m.PostBodies, 1)
assert.Equal(t, m.PostIds[0], containerId+"/clearLegalHold")
assert.Contains(t, m.PostBodies[0], "tags")
assert.Contains(t, m.PostBodies[0]["tags"], "tag1")
assert.Contains(t, m.PostBodies[0]["tags"], "tag2")
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_storage_test.go | provider/pkg/resources/customresources/custom_storage_test.go | // Copyright 2016-2023, Pulumi Corporation.
package customresources
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseBlobIdProperties(t *testing.T) {
id := "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myrg/providers/Microsoft.Storage/storageAccounts/mysa/blobServices/default/containers/myc/blobs/folder%2Flog.txt"
props, ok := parseBlobIdProperties(id)
assert.True(t, ok)
assert.Len(t, props, 5)
assert.Equal(t, "00000000-0000-0000-0000-000000000000", props[subscriptionId].StringValue())
assert.Equal(t, "myrg", props[resourceGroupName].StringValue())
assert.Equal(t, "mysa", props[accountName].StringValue())
assert.Equal(t, "myc", props[containerName].StringValue())
assert.Equal(t, "folder%2Flog.txt", props[blobName].StringValue())
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_pim.go | provider/pkg/resources/customresources/custom_pim.go | package customresources
import (
"cmp"
"context"
"fmt"
"slices"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/provider/crud"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
)
type Rule struct {
Id string
}
type PolicyWithRules struct {
Rules []Rule
}
// roleManagementPolicyClient is a helper struct containing dependencies and methods for CRUD operations on PIM Role Management Policies.
type roleManagementPolicyClient struct {
client crud.ResourceCrudClient
resourceMetadata resources.AzureAPIResource
}
func (c *roleManagementPolicyClient) create(ctx context.Context, id string, inputs resource.PropertyMap) (map[string]any, error) {
originalState, err := c.client.Read(ctx, id, "")
if err != nil {
return nil, err
}
bodyParams, err := c.client.PrepareAzureRESTBody(id, inputs)
if err != nil {
return nil, err
}
queryParams := map[string]any{"api-version": c.client.ApiVersion()}
// We could skip this if bodyParams = originalState, i.e., the user adds a policy
// in its default configuration to their program, but we don't have a diff function.
resp, _, err := c.client.CreateOrUpdate(ctx, id, bodyParams, queryParams)
if err != nil {
return nil, err
}
outputs := c.client.ResponseBodyToSdkOutputs(resp)
outputs[OriginalStateKey] = originalState
return outputs, nil
}
func (c *roleManagementPolicyClient) update(ctx context.Context, id string, news, olds resource.PropertyMap) (map[string]any, error) {
if !olds.HasValue(OriginalStateKey) {
logging.V(3).Infof("Warning: no original state found for %s, cannot reset deleted rules", id)
} else {
restoreDefaultsForDeletedRules(olds, news)
}
bodyParams, err := c.client.PrepareAzureRESTBody(id, news)
if err != nil {
return nil, err
}
queryParams := map[string]any{"api-version": c.client.ApiVersion()}
resp, _, err := c.client.CreateOrUpdate(ctx, id, bodyParams, queryParams)
if err != nil {
return nil, err
}
outputs := c.client.ResponseBodyToSdkOutputs(resp)
if olds.HasValue(OriginalStateKey) {
outputs[OriginalStateKey] = olds[OriginalStateKey].Mappable()
}
return outputs, nil
}
func (c *roleManagementPolicyClient) delete(ctx context.Context, id string, previousInputs, state resource.PropertyMap) error {
if !state.HasValue(OriginalStateKey) {
logging.V(3).Infof("Warning: no original state found for %s, cannot reset", id)
return nil
}
origState := state[OriginalStateKey].Mappable().(map[string]any)
pathItems, err := resources.ParseResourceID(id, c.resourceMetadata.Path)
if err != nil {
return err
}
origSdkInputs := c.client.ResponseToSdkInputs(pathItems, origState)
origRequest, err := c.client.SdkInputsToRequestBody(origSdkInputs, id)
if err != nil {
return err
}
queryParams := map[string]any{"api-version": c.client.ApiVersion()}
_, _, err = c.client.CreateOrUpdate(ctx, id, origRequest, queryParams)
return err
}
// pimRoleManagementPolicy creates a CustomResource for PIM Role Management Policies. See #2455 and
// https://learn.microsoft.com/en-us/rest/api/authorization/privileged-role-policy-rest-sample.
func pimRoleManagementPolicy(lookupResource resources.ResourceLookupFunc, crudClientFactory crud.ResourceCrudClientFactory) (*CustomResource, error) {
// A bit of a hack to initialize some resource. This func's parameters are all nil when the
// function is called for the first time, for customresources.featureLookup, which is ok but
// would break our initialization here.
var client crud.ResourceCrudClient
var res resources.AzureAPIResource
if lookupResource != nil && crudClientFactory != nil {
var found bool
var err error
res, found, err = lookupResource("azure-native:authorization:RoleManagementPolicy")
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("resource %q not found", "azure-native:authorization:RoleManagementPolicy")
}
client = crudClientFactory(&res)
}
policyClient := &roleManagementPolicyClient{
client: client,
resourceMetadata: res,
}
return &CustomResource{
path: "/{scope}/providers/Microsoft.Authorization/roleManagementPolicies/{roleManagementPolicyName}",
// PIM Role Management Policies cannot be created. Instead, each resource has a default policy.
// But we need to allow the user to create the Pulumi resource, so we return true here and then
// let Create just return the existing policy.
CanCreate: func(ctx context.Context, id string) error {
return nil
},
// PIM Role Management Policies cannot be created. Instead, each resource has a default policy.
// Simply look up the default policy and return it.
Create: policyClient.create,
// Tricky because rules can be removed from the list of rules of the policy, but simply removing them from the
// request will leave them in their current state, not the default state.
Update: policyClient.update,
// PIM Role Management Policies cannot be deleted. Instead, we reset the policy to its default.
Delete: policyClient.delete,
}, nil
}
// restoreDefaultsForDeletedRules restores the original values for rules that were deleted from the policy.
// For each rule in the original state that's not in news, it adds the original rule to news.
func restoreDefaultsForDeletedRules(olds, news resource.PropertyMap) {
newRules := mapRulesById(news)
origState := olds[OriginalStateKey].ObjectValue()
if !origState.HasValue("properties") {
logging.V(3).Infof("Warning: restoreDefaultsForDeletedRules: no 'properties' in original state")
return
}
origRules := mapRulesById(origState["properties"].ObjectValue())
if len(origRules) == 0 {
return
}
newRulesList := []resource.PropertyValue{}
if len(newRules) > 0 {
newRulesList = news["rules"].ArrayValue()
}
// For each rule in the original state, if it's not in the new rules (i.e., user didn't specify it),
// add it back from the original state to preserve it.
for id, origRule := range origRules {
if _, ok := newRules[id]; !ok {
newRulesList = append(newRulesList, origRule)
}
}
sortRules(newRulesList)
news["rules"] = resource.NewArrayProperty(newRulesList)
}
func sortRules(rules []resource.PropertyValue) {
slices.SortFunc(rules, func(a, b resource.PropertyValue) int {
return cmp.Compare(
a.ObjectValue()["id"].StringValue(),
b.ObjectValue()["id"].StringValue())
})
}
func mapRulesById(managementPolicy resource.PropertyMap) map[string]resource.PropertyValue {
if !managementPolicy.HasValue("rules") {
return nil
}
rules := managementPolicy["rules"].ArrayValue()
rulesById := map[string]resource.PropertyValue{}
for _, rule := range rules {
if !rule.IsObject() || !rule.ObjectValue().HasValue("id") {
logging.V(3).Infof("Warning: mapRulesById: rule has no id: %v", rule)
return nil
}
rulesById[rule.ObjectValue()["id"].StringValue()] = rule
}
return rulesById
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_roleassignment.go | provider/pkg/resources/customresources/custom_roleassignment.go | // Copyright 2025, Pulumi Corporation. All rights reserved.
package customresources
import (
"context"
"fmt"
"regexp"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/azure"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/provider/crud"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
)
const (
roleAssignmentTok = "azure-native:authorization:RoleAssignment"
roleAssignmentPath = "/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}"
)
var subscriptionIdRegex = regexp.MustCompile(`/subscriptions/([^/]+)`)
// roleAssignmentClient defines the interface for role assignment operations needed for testing.
type roleAssignmentClient interface {
// getTenantId retrieves the tenant ID for a given subscription.
getTenantId(ctx context.Context, subscriptionId string) (string, error)
// convertResponseToOutputs converts an Azure API response to SDK outputs.
convertResponseToOutputs(response map[string]any) map[string]any
}
// roleAssignmentClientImpl implements roleAssignmentClient using real Azure SDK clients.
type roleAssignmentClientImpl struct {
subsClient *armsubscriptions.Client
crudClient crud.ResourceCrudClient
}
func (c *roleAssignmentClientImpl) getTenantId(ctx context.Context, subscriptionId string) (string, error) {
if c.subsClient == nil {
return "", fmt.Errorf("subscriptions client is nil")
}
resp, err := c.subsClient.Get(ctx, subscriptionId, nil)
if err != nil {
return "", fmt.Errorf("failed to get subscription %s: %w", subscriptionId, err)
}
if resp.Subscription.TenantID == nil {
return "", fmt.Errorf("subscription %s has no tenant ID", subscriptionId)
}
return *resp.Subscription.TenantID, nil
}
func (c *roleAssignmentClientImpl) convertResponseToOutputs(response map[string]any) map[string]any {
return c.crudClient.ResponseBodyToSdkOutputs(response)
}
// roleAssignment returns a custom resource for RoleAssignment that handles cross-tenant scenarios.
//
// Cross-tenant role assignments (those with delegatedManagedIdentityResourceId) require a special
// tenantId query parameter during Read operations. This custom resource extracts the tenant ID from
// the subscription that owns the delegated managed identity and includes it when reading the assignment.
func roleAssignment(
lookupResource resources.ResourceLookupFunc,
crudClientFactory crud.ResourceCrudClientFactory,
azureClient azure.AzureClient,
tokenCred azcore.TokenCredential,
) (*CustomResource, error) {
// This func's parameters are all nil when the function is called for the first time, for
// `customresources.featureLookup`, so we initialize the objects we need conditionally
var client roleAssignmentClient
var res resources.AzureAPIResource
if lookupResource != nil && crudClientFactory != nil && tokenCred != nil {
var found bool
var err error
res, found, err = lookupResource(roleAssignmentTok)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("resource %q not found", roleAssignmentTok)
}
crudClient := crudClientFactory(&res)
subsClient, err := armsubscriptions.NewClient(tokenCred, nil)
if err != nil {
return nil, err
}
client = &roleAssignmentClientImpl{
subsClient: subsClient,
crudClient: crudClient,
}
}
return &CustomResource{
tok: roleAssignmentTok,
path: roleAssignmentPath,
Read: func(ctx context.Context, id string, inputs resource.PropertyMap) (map[string]any, bool, error) {
return readRoleAssignment(ctx, id, inputs, client, &res, azureClient)
},
}, nil
}
// readRoleAssignment implements the Read operation for RoleAssignment, handling cross-tenant scenarios.
func readRoleAssignment(
ctx context.Context,
id string,
inputs resource.PropertyMap,
client roleAssignmentClient,
res *resources.AzureAPIResource,
azureClient azure.AzureClient,
) (map[string]any, bool, error) {
// Check if this is a cross-tenant role assignment by looking for delegatedManagedIdentityResourceId
delegatedIdentityResourceId := ""
if prop, ok := inputs["delegatedManagedIdentityResourceId"]; ok && !prop.IsNull() {
delegatedIdentityResourceId = prop.StringValue()
}
// Determine the tenantId to use for the read operation
var tenantId string
if delegatedIdentityResourceId != "" {
logging.V(9).Infof("Cross-tenant role assignment detected with delegatedManagedIdentityResourceId: %s", delegatedIdentityResourceId)
// Extract subscription ID from the delegated identity resource ID
subscriptionId, err := extractSubscriptionId(delegatedIdentityResourceId)
if err != nil {
return nil, false, fmt.Errorf("failed to extract subscription ID from delegatedManagedIdentityResourceId: %w", err)
}
// Get tenant ID for the subscription
tenantId, err = client.getTenantId(ctx, subscriptionId)
if err != nil {
return nil, false, fmt.Errorf("failed to get tenant ID for subscription %s: %w", subscriptionId, err)
}
logging.V(9).Infof("Using tenant ID %s for cross-tenant role assignment read", tenantId)
}
// Prepare query parameters, adding tenantId if needed
queryParams := make(map[string]any)
for k, v := range res.ReadQueryParams {
queryParams[k] = v
}
if tenantId != "" {
queryParams["tenantId"] = tenantId
}
// Construct the read URL
url := id + res.ReadPath
apiVersion := res.APIVersion
// Call Azure API directly with the modified query parameters
response, err := azureClient.Get(ctx, url, apiVersion, queryParams)
if err != nil {
return nil, false, err
}
// Convert response to SDK outputs
outputs := client.convertResponseToOutputs(response)
return outputs, true, nil
}
// extractSubscriptionId extracts the subscription ID from an Azure resource ID.
// Format: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/...
func extractSubscriptionId(resourceId string) (string, error) {
matches := subscriptionIdRegex.FindStringSubmatch(resourceId)
if len(matches) < 2 {
return "", fmt.Errorf("could not extract subscription ID from resource ID: %s", resourceId)
}
return matches[1], nil
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_keyvault_accesspolicy_test.go | provider/pkg/resources/customresources/custom_keyvault_accesspolicy_test.go | package customresources
import (
"testing"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParsePathParams(t *testing.T) {
t.Run("Valid legacy path", func(t *testing.T) {
const accessPolicyAzureId = "/subscriptions/0282681f-7a9e-424b-80b2-96babd57a8a1/resourceGroups/pulumi-dev-shared/providers/Microsoft.KeyVault/vaults/pulumi-testing/accessPolicy"
params, err := parseKeyVaultPathParams(accessPolicyAzureId)
require.NoError(t, err)
assert.Equal(t, "pulumi-testing", params.VaultName)
assert.Equal(t, "pulumi-dev-shared", params.ResourceGroup)
assert.Nil(t, params.ObjectId)
})
t.Run("Valid path", func(t *testing.T) {
const accessPolicyAzureId = "/subscriptions/0282681f-7a9e-424b-80b2-96babd57a8a1/resourceGroups/pulumi-dev-shared/providers/Microsoft.KeyVault/vaults/pulumi-testing/accessPolicy/a5907b7f-6627-4a74-b831-b40bc5ceefdc"
params, err := parseKeyVaultPathParams(accessPolicyAzureId)
require.NoError(t, err)
assert.Equal(t, "pulumi-testing", params.VaultName)
assert.Equal(t, "pulumi-dev-shared", params.ResourceGroup)
assert.Equal(t, "a5907b7f-6627-4a74-b831-b40bc5ceefdc", *params.ObjectId)
})
t.Run("Invalid paths", func(t *testing.T) {
for _, invalidId := range []string{
"",
"/subscriptions/0282681f-7a9e-424b-80b2-96babd57a8a1/resourceGroups/pulumi-dev-shared",
"/subscriptions/0282681f-7a9e-424b-80b2-96babd57a8a1/resourceGroups/pulumi-dev-shared/providers/Microsoft.KeyVault",
"/subscriptions/0282681f-7a9e-424b-80b2-96babd57a8a1/resourceGroups/pulumi-dev-shared/providers/Microsoft.KeyVault/vaults/pulumi-testing",
"/subscriptions//resourceGroups/pulumi-dev-shared/providers/Microsoft.KeyVault/vaults/pulumi-testing/accessPolicy",
"/subscriptions/0282681f-7a9e-424b-80b2-96babd57a8a1/resourceGroups//providers/Microsoft.KeyVault/vaults/pulumi-testing/accessPolicy",
"/subscriptions/0282681f-7a9e-424b-80b2-96babd57a8a1/resourceGroups/pulumi-dev-shared/providers/Microsoft.KeyVault/vaults//accessPolicy",
} {
_, err := parseKeyVaultPathParams(invalidId)
assert.Error(t, err, invalidId)
}
})
}
func TestAzureIdFromProperties(t *testing.T) {
for _, incomplete := range []resource.PropertyMap{
{
resource.PropertyKey(resourceGroupName): resource.NewStringProperty("rg"),
},
{
resource.PropertyKey(vaultName): resource.NewStringProperty("vault"),
},
} {
_, err := azureIdFromProperties(incomplete)
assert.Error(t, err)
}
id, err := azureIdFromProperties(resource.PropertyMap{
resource.PropertyKey(resourceGroupName): resource.NewStringProperty("rg"),
resource.PropertyKey(vaultName): resource.NewStringProperty("vault"),
})
require.NoError(t, err)
assert.Equal(t, "/subscriptions/{subscription}/resourceGroups/rg/providers/Microsoft.KeyVault/vaults/vault/accessPolicy", id)
}
func TestSdkParamsFromProperties(t *testing.T) {
for _, incomplete := range []resource.PropertyMap{
{},
{
policy: resource.NewObjectProperty(resource.PropertyMap{
"objectId": resource.NewStringProperty("objectId"),
}),
},
{
policy: resource.NewObjectProperty(resource.PropertyMap{
"tenantId": resource.NewStringProperty("tenantId"),
}),
},
} {
_, err := sdkPolicyParamsFromProperties(incomplete)
assert.Error(t, err)
}
complete, err := sdkPolicyParamsFromProperties(resource.PropertyMap{
policy: resource.NewObjectProperty(resource.PropertyMap{
"objectId": resource.NewStringProperty("objectId"),
"tenantId": resource.NewStringProperty("tenantId"),
"applicationId": resource.NewStringProperty("applicationId"),
}),
})
require.NoError(t, err)
assert.Len(t, complete.Properties.AccessPolicies, 1)
assert.Equal(t, "objectId", *complete.Properties.AccessPolicies[0].ObjectID)
assert.Equal(t, "tenantId", *complete.Properties.AccessPolicies[0].TenantID)
assert.Equal(t, "applicationId", *complete.Properties.AccessPolicies[0].ApplicationID)
}
func TestPropertyPermissionsToSdk(t *testing.T) {
permissions := resource.PropertyMap{
"keys": resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("get"),
}),
"certificates": resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("get"),
resource.NewStringProperty("list"),
}),
"secrets": resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("purge"),
}),
"storage": resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("deletesas"),
}),
}
sdkPermissions := propertyPermissionsToSdk(permissions)
assert.Len(t, sdkPermissions.Keys, 1)
assert.Equal(t, armkeyvault.KeyPermissionsGet, *sdkPermissions.Keys[0])
assert.Len(t, sdkPermissions.Certificates, 2)
assert.Equal(t, armkeyvault.CertificatePermissionsGet, *sdkPermissions.Certificates[0])
assert.Equal(t, armkeyvault.CertificatePermissionsList, *sdkPermissions.Certificates[1])
assert.Len(t, sdkPermissions.Secrets, 1)
assert.Equal(t, armkeyvault.SecretPermissionsPurge, *sdkPermissions.Secrets[0])
assert.Len(t, sdkPermissions.Storage, 1)
assert.Equal(t, armkeyvault.StoragePermissionsDeletesas, *sdkPermissions.Storage[0])
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_pim_eligibility.go | provider/pkg/resources/customresources/custom_pim_eligibility.go | // Copyright 2025, Pulumi Corporation. All rights reserved.
package customresources
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/provider/crud"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/util"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v3"
"github.com/google/uuid"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
const (
pimRoleEligibilityScheduleResourceDescription = `A PIM (Privileged Identity Management) Role Eligibility Schedule.
Role Eligibility Schedules are used to limit standing administrator access to privileged roles in Azure PIM. See
[here](https://learn.microsoft.com/en-us/rest/api/authorization/privileged-role-eligibility-rest-sample) for details.
A Role Eligibility Schedule is uniquely defined by scope, principal, and role. At present, only one instance of this
resource can exist for a given scope|principal|role tuple.
Note that this resource cannot be updated. Each change leads to a recreation.
Internally, this resource uses the
[Role Eligibility Schedule Requests](https://learn.microsoft.com/en-us/rest/api/authorization/role-eligibility-schedule-requests?view=rest-authorization-2020-10-01)
API to create and delete the schedules.
`
pimRoleEligibilityScheduleResourceName = "PimRoleEligibilitySchedule"
pimRoleEligibilityScheduleTok = "azure-native:authorization:" + pimRoleEligibilityScheduleResourceName
pimRoleEligibilityScheduleRequestResourceName = "RoleEligibilityScheduleRequest"
pimRoleEligibilityScheduleRequestTok = "azure-native:authorization:" + pimRoleEligibilityScheduleRequestResourceName
PimRoleEligibilityScheduleRequestPath = "/{scope}/providers/Microsoft.Authorization/roleEligibilityScheduleRequests/{roleEligibilityScheduleRequestName}"
pimRoleEligibilityScheduleMaxWaitForCreate = 10 * time.Minute
pimRoleEligibilityScheduleMaxWaitForDelete = 10 * time.Minute
pimRoleEligibilityScheduleMaxWaitForDeleteRequest = 2 * time.Minute
)
// Tests are allowed to change this to a smaller value.
var pimRoleEligibilityScheduleTickerInterval = 10 * time.Second
// pimRoleEligibilitySchedule returns a custom resource for Role Eligibility Schedules.
//
// See the docs in the constant `pimRoleEligibilityScheduleResourceDescription` for details.
//
// The APIs used are Microsoft.Authorization/roleEligibilitySchedules and Microsoft.Authorization/
// roleEligibilityScheduleRequests.
func pimRoleEligibilitySchedule(
lookupResource resources.ResourceLookupFunc,
crudClientFactory crud.ResourceCrudClientFactory,
token azcore.TokenCredential,
) (*CustomResource, error) {
// This func's parameters are all nil when the function is called for the first time, for
// `customresources.featureLookup`, so we initialize the objects we need conditionally
var crudClient crud.ResourceCrudClient
var res resources.AzureAPIResource
var schedulesClient *armauthorization.RoleEligibilitySchedulesClient
var requestsClient *armauthorization.RoleEligibilityScheduleRequestsClient
if lookupResource != nil && crudClientFactory != nil {
var found bool
var err error
res, found, err = lookupResource(pimRoleEligibilityScheduleTok)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("resource %q not found", pimRoleEligibilityScheduleTok)
}
crudClient = crudClientFactory(&res)
schedulesClient, err = armauthorization.NewRoleEligibilitySchedulesClient(token, nil)
if err != nil {
return nil, err
}
requestsClient, err = armauthorization.NewRoleEligibilityScheduleRequestsClient(token, nil)
if err != nil {
return nil, err
}
}
client := &pimEligibilityScheduleClientImpl{
crudClient: crudClient,
schedulesClient: schedulesClient,
requestsClient: requestsClient,
}
return &CustomResource{
tok: pimRoleEligibilityScheduleRequestTok,
CustomResourceName: pimRoleEligibilityScheduleResourceName,
path: PimRoleEligibilityScheduleRequestPath,
apiVersion: pulumi.StringRef("2020-10-01"),
Schema: genSchema,
Read: func(ctx context.Context, id string, inputs resource.PropertyMap) (map[string]any, bool, error) {
return read(ctx, id, inputs, client)
},
Create: func(ctx context.Context, id string, inputs resource.PropertyMap) (map[string]any, error) {
return createPimEligibilitySchedule(ctx, id, inputs, client, pimRoleEligibilityScheduleMaxWaitForCreate)
},
Delete: func(ctx context.Context, id string, _, state resource.PropertyMap) error {
return deletePimEligibilitySchedule(ctx, id, state, client, pimRoleEligibilityScheduleMaxWaitForDelete)
},
}, nil
}
// genSchema modifies the schema extracted from the Azure spec.
func genSchema(resource *ResourceDefinition) (*ResourceDefinition, error) {
if resource == nil {
return nil, fmt.Errorf("resource %q not found in the spec", pimRoleEligibilityScheduleRequestTok)
}
// remove requestType, we're only supporting one so far (like TF)
if _, ok := resource.Resource.InputProperties["requestType"]; ok {
delete(resource.Resource.InputProperties, "requestType")
resource.makePropertyNotRequired("requestType")
} else {
logging.V(5).Infof("warning: expected property 'requestType' not found in the spec for %s", pimRoleEligibilityScheduleRequestTok)
}
// remove the name, it's a random GUID so we generate it ourselves
if _, ok := resource.Resource.InputProperties["roleEligibilityScheduleRequestName"]; ok {
delete(resource.Resource.InputProperties, "roleEligibilityScheduleRequestName")
resource.makePropertyNotRequired("roleEligibilityScheduleRequestName")
} else {
logging.V(5).Infof("warning: expected property 'roleEligibilityScheduleRequestName' not found in the spec for %s", pimRoleEligibilityScheduleRequestTok)
}
// All properties cause replacement because this resource doesn't support updates.
for name, p := range resource.Resource.InputProperties {
p.ReplaceOnChanges = true
resource.Resource.InputProperties[name] = p
}
updateResourceDescription(resource, pimRoleEligibilityScheduleResourceDescription)
return resource, nil
}
// updateResourceDescription replaces the resource description with the given one, but preserves API version
// documentation if present.
func updateResourceDescription(resource *ResourceDefinition, newDescription string) {
if idx := strings.Index(resource.Resource.Description, "Azure REST API version:"); idx != -1 {
newDescription += "\n\n" + resource.Resource.Description[idx:]
}
resource.Resource.Description = newDescription
}
// pimEligibilityScheduleClient is a client for the PIM Role Eligibility Schedule API. The interface allows to use fake
// clients in tests.
type pimEligibilityScheduleClient interface {
findSchedule(ctx context.Context, scope, principalId, roleDefinitionId string) (*armauthorization.RoleEligibilitySchedule, error)
getSchedule(ctx context.Context, scope, scheduleName string) (*armauthorization.RoleEligibilitySchedule, error)
// Matches armauthorization.RoleEligibilityScheduleRequestsClient.Create
createSchedule(ctx context.Context, scope string, roleEligibilityScheduleRequestName string,
parameters armauthorization.RoleEligibilityScheduleRequest,
) (armauthorization.RoleEligibilityScheduleRequestsClientCreateResponse, error)
cancelSchedule(ctx context.Context, scope, scheduleName string) (armauthorization.RoleEligibilityScheduleRequestsClientCancelResponse, error)
// Map an Azure SDK schedule object to Pulumi SDK shape output.
mapScheduleToOutputs(schedule *armauthorization.RoleEligibilitySchedule) (map[string]any, error)
}
type pimEligibilityScheduleClientImpl struct {
crudClient crud.ResourceCrudClient
schedulesClient *armauthorization.RoleEligibilitySchedulesClient
requestsClient *armauthorization.RoleEligibilityScheduleRequestsClient
}
// findSchedule finds a role eligibility schedule by scope, principalId, and roleDefinitionId. It considers only
// schedules with MemberType=Direct (there are also "Group" and "Inherited").
func (c *pimEligibilityScheduleClientImpl) findSchedule(ctx context.Context, scope, principalId, roleDefinitionId string,
) (*armauthorization.RoleEligibilitySchedule, error) {
pager := c.schedulesClient.NewListForScopePager(scope, &armauthorization.RoleEligibilitySchedulesClientListForScopeOptions{
Filter: pulumi.StringRef(fmt.Sprintf("principalId eq '%s'", principalId)),
})
for pager.More() {
page, err := pager.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("listing role eligibility schedules: %w", err)
}
for _, schedule := range page.Value {
if strings.EqualFold(*schedule.Properties.PrincipalID, principalId) &&
strings.EqualFold(*schedule.Properties.RoleDefinitionID, roleDefinitionId) &&
*schedule.Properties.MemberType == armauthorization.MemberTypeDirect {
return schedule, nil
}
}
}
return nil, nil
}
func (c *pimEligibilityScheduleClientImpl) getSchedule(ctx context.Context, scope, scheduleName string) (*armauthorization.RoleEligibilitySchedule, error) {
s, err := c.schedulesClient.Get(ctx, scope, scheduleName, nil)
if err != nil {
return nil, fmt.Errorf("retrieving role eligibility schedule: %w", err)
}
return &s.RoleEligibilitySchedule, nil
}
func (c *pimEligibilityScheduleClientImpl) createSchedule(ctx context.Context, scope string, roleEligibilityScheduleRequestName string,
parameters armauthorization.RoleEligibilityScheduleRequest,
) (armauthorization.RoleEligibilityScheduleRequestsClientCreateResponse, error) {
return c.requestsClient.Create(ctx, scope, roleEligibilityScheduleRequestName, parameters, nil)
}
func (c *pimEligibilityScheduleClientImpl) cancelSchedule(ctx context.Context, scope, scheduleName string) (armauthorization.RoleEligibilityScheduleRequestsClientCancelResponse, error) {
return c.requestsClient.Cancel(ctx, scope, scheduleName, nil)
}
// Map an Azure SDK schedule object to Pulumi SDK shape output. Since the SDK uses the same API, it's almost in the
// right shape, except the SDK ignores the `"x-ms-client-flatten": true` annotation in the API spec, so we run it
// through our converter to remove the extra layer of nesting.
func (c *pimEligibilityScheduleClientImpl) mapScheduleToOutputs(schedule *armauthorization.RoleEligibilitySchedule) (map[string]any, error) {
j, err := json.Marshal(schedule)
if err != nil {
return nil, fmt.Errorf("converting role eligibility schedule from SDK to JSON: %w", err)
}
var js map[string]any
err = json.Unmarshal(j, &js)
if err != nil {
return nil, fmt.Errorf("unmarshaling role eligibility schedule from JSON: %w", err)
}
// We don't need the "Microsoft.Authorization/roleEligibilitySchedules" type that the SDK adds.
delete(js, "type")
result := c.crudClient.ResponseBodyToSdkOutputs(js)
return result, nil
}
func read(ctx context.Context, id string, inputs resource.PropertyMap, client pimEligibilityScheduleClient) (map[string]any, bool, error) {
sdkInputs, err := inputsToSdk(inputs)
if err != nil {
return nil, false, fmt.Errorf("converting inputs to SDK shape: %w", err)
}
existingSchedule, err := client.findSchedule(ctx, *sdkInputs.Properties.Scope,
*sdkInputs.Properties.PrincipalID, *sdkInputs.Properties.RoleDefinitionID)
if err != nil {
return nil, false, fmt.Errorf("looking up role eligibility schedule: %w", err)
}
if existingSchedule == nil {
return nil, false, nil
}
result, err := client.mapScheduleToOutputs(existingSchedule)
if err != nil {
return nil, true, fmt.Errorf("mapping role eligibility schedule to outputs: %w", err)
}
return result, true, nil
}
// Create submits a Role Eligibility Schedule Request to the PIM service. It then waits and polls for the request to be
// in a terminal state like approved or denied. All possible states are defined
// [here](https://learn.microsoft.com/en-us/rest/api/authorization/role-eligibility-schedules/get?view=rest-authorization-2020-10-01&tabs=HTTP#status).
func createPimEligibilitySchedule(ctx context.Context, id string, inputs resource.PropertyMap,
client pimEligibilityScheduleClient, maxWait time.Duration,
) (map[string]any, error) {
payload, err := inputsToSdk(inputs)
if err != nil {
return nil, fmt.Errorf("converting inputs to SDK shape: %w", err)
}
// Generate a new GUID for the schedule request name
scheduleRequestName := uuid.New().String()
payload.Name = nil
typeAssign := armauthorization.RequestTypeAdminAssign
payload.Properties.RequestType = &typeAssign
// Create the schedule request
_, err = client.createSchedule(ctx, *payload.Properties.Scope, scheduleRequestName, *payload)
if err != nil {
return nil, fmt.Errorf("creating role eligibility schedule request: %w", err)
}
// Poll for completion. Success is when we find a schedule with the matching scope|principal|role tuple.
var schedule *armauthorization.RoleEligibilitySchedule
err = util.RetryOperation(maxWait, pimRoleEligibilityScheduleTickerInterval,
"waiting for role eligibility schedule to be created",
func() (bool, error) {
var err error
schedule, err = client.findSchedule(ctx, *payload.Properties.Scope,
*payload.Properties.PrincipalID, *payload.Properties.RoleDefinitionID)
if err != nil {
return true, fmt.Errorf("looking up role eligibility schedule: %w", err)
}
return schedule != nil, nil
})
if err != nil {
return nil, err
}
return client.mapScheduleToOutputs(schedule)
}
// Delete checks if the Role Eligibility Schedule Request for this schedule is still active, i.e., in a non-terminal
// state. If so, it cancels the request. Otherwise, it submits a new Schedule Request with RequestType=AdminRemove to
// delete the schedule.
func deletePimEligibilitySchedule(ctx context.Context,
id string,
state resource.PropertyMap,
client pimEligibilityScheduleClient,
maxWait time.Duration,
) error {
sdkState, err := inputsToSdk(state)
if err != nil {
return fmt.Errorf("converting inputs to SDK shape: %w", err)
}
scheduleName := *sdkState.Name
scope := *sdkState.Properties.Scope
// Check the current status of the schedule
schedule, err := client.getSchedule(ctx, scope, scheduleName)
if err != nil {
var responseErr *azcore.ResponseError
if errors.As(err, &responseErr) && responseErr.StatusCode == 404 {
// Schedule not found, nothing to delete
return nil
}
return fmt.Errorf("retrieving role eligibility schedule: %w", err)
}
// If the schedule is active, cancel it
if statusIsPending(schedule.Properties.Status) {
_, err := client.cancelSchedule(ctx, *sdkState.Properties.Scope, id)
if err != nil {
return fmt.Errorf("canceling role eligibility schedule: %w", err)
}
return nil
}
// If the schedule is not active, submit a new request with RequestType=AdminRemove.
// Generate a new GUID for the removal request.
removeRequestName := uuid.New().String()
typeRemove := armauthorization.RequestTypeAdminRemove
payload := armauthorization.RoleEligibilityScheduleRequest{
Name: pulumi.StringRef(removeRequestName),
Properties: &armauthorization.RoleEligibilityScheduleRequestProperties{
PrincipalID: sdkState.Properties.PrincipalID,
RoleDefinitionID: sdkState.Properties.RoleDefinitionID,
RequestType: &typeRemove,
Justification: pulumi.StringRef("Removed by Pulumi"),
},
}
err = util.RetryOperation(2*time.Minute, pimRoleEligibilityScheduleTickerInterval, "requesting role eligibility schedule removal", func() (bool, error) {
_, err = client.createSchedule(ctx, scope, removeRequestName, payload)
if err != nil {
var responseErr *azcore.ResponseError
if errors.As(err, &responseErr) {
// occasional intermittent error, retry
if responseErr.ErrorCode == "ActiveDurationTooShort" {
return false, nil
}
// already deleted, we're done
if responseErr.ErrorCode == "RoleAssignmentDoesNotExist" {
return true, nil
}
}
return true, fmt.Errorf("creating role eligibility schedule removal request: %w", err)
}
return true, nil
})
if err != nil {
return err
}
// Poll for the schedule to be gone.
err = util.RetryOperation(maxWait, pimRoleEligibilityScheduleTickerInterval, "role eligibility schedule deletion", func() (bool, error) {
_, err := client.getSchedule(ctx, scope, scheduleName)
if err != nil {
var responseErr *azcore.ResponseError
if errors.As(err, &responseErr) && responseErr.StatusCode == 404 {
return true, nil
}
return false, fmt.Errorf("retrieving role eligibility schedule: %w", err)
}
return false, nil
})
return err
}
func statusIsPending(status *armauthorization.Status) bool {
return *status == armauthorization.StatusGranted ||
strings.HasPrefix(string(*status), "Pending")
}
// inputsToSdk converts the Pulumi SDK shape inputs to the Azure SDK shape.
func inputsToSdk(inputs resource.PropertyMap) (*armauthorization.RoleEligibilityScheduleRequest, error) {
result := &armauthorization.RoleEligibilityScheduleRequest{
Properties: &armauthorization.RoleEligibilityScheduleRequestProperties{},
}
if name, ok := inputs["roleEligibilityScheduleRequestName"]; ok {
result.Name = pulumi.StringRef(name.StringValue())
} else if name, ok = inputs["name"]; ok {
result.Name = pulumi.StringRef(name.StringValue())
}
if scope, ok := inputs["scope"]; ok {
result.Properties.Scope = pulumi.StringRef(scope.StringValue())
} else {
return nil, fmt.Errorf("scope is required")
}
if principalId, ok := inputs["principalId"]; ok {
result.Properties.PrincipalID = pulumi.StringRef(principalId.StringValue())
} else {
return nil, fmt.Errorf("principalId is required")
}
if roleDefinitionId, ok := inputs["roleDefinitionId"]; ok {
result.Properties.RoleDefinitionID = pulumi.StringRef(roleDefinitionId.StringValue())
} else {
return nil, fmt.Errorf("roleDefinitionId is required")
}
if justification, ok := inputs["justification"]; ok {
result.Properties.Justification = pulumi.StringRef(justification.StringValue())
}
if status, ok := inputs["status"]; ok {
statusVal := armauthorization.Status(status.StringValue())
result.Properties.Status = &statusVal
}
if scheduleInfo, ok := inputs["scheduleInfo"]; ok {
info := scheduleInfo.ObjectValue()
result.Properties.ScheduleInfo = &armauthorization.RoleEligibilityScheduleRequestPropertiesScheduleInfo{
Expiration: &armauthorization.RoleEligibilityScheduleRequestPropertiesScheduleInfoExpiration{},
}
if startDateTime, ok := info["startDateTime"]; ok {
startTime, err := time.Parse(time.RFC3339, startDateTime.StringValue())
if err != nil {
return nil, fmt.Errorf("invalid start time: %w", err)
}
result.Properties.ScheduleInfo.StartDateTime = &startTime
}
if expiration, ok := info["expiration"]; ok {
exp := expiration.ObjectValue()
// duration is optional - only present for AfterDuration type
if duration, ok := exp["duration"]; ok {
result.Properties.ScheduleInfo.Expiration.Duration = pulumi.StringRef(duration.StringValue())
}
// endDateTime is optional - only present for AfterDateTime type
if endDateTime, ok := exp["endDateTime"]; ok {
endTime, err := time.Parse(time.RFC3339, endDateTime.StringValue())
if err != nil {
return nil, fmt.Errorf("invalid end time: %w", err)
}
result.Properties.ScheduleInfo.Expiration.EndDateTime = &endTime
}
// type is required
expirationType := armauthorization.Type(exp["type"].StringValue())
result.Properties.ScheduleInfo.Expiration.Type = &expirationType
}
}
if ticketInfo, ok := inputs["ticketInfo"]; ok {
info := ticketInfo.ObjectValue()
result.Properties.TicketInfo = &armauthorization.RoleEligibilityScheduleRequestPropertiesTicketInfo{}
if ticketNumber, ok := info["ticketNumber"]; ok {
result.Properties.TicketInfo.TicketNumber = pulumi.StringRef(ticketNumber.StringValue())
}
if ticketSystem, ok := info["ticketSystem"]; ok {
result.Properties.TicketInfo.TicketSystem = pulumi.StringRef(ticketSystem.StringValue())
}
}
return result, nil
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_webapp_delete_test.go | provider/pkg/resources/customresources/custom_webapp_delete_test.go | package customresources
import (
"context"
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/azure"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/provider/crud"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var mockResourceLookup resources.ResourceLookupFunc = func(resourceType string) (resources.AzureAPIResource, bool, error) {
return resources.AzureAPIResource{}, true, nil
}
func TestSetsDeleteParam(t *testing.T) {
azureClient := &azure.MockAzureClient{}
custom, err := webApp(nil, azureClient, mockResourceLookup)
require.NoError(t, err)
custom.Delete(context.Background(), "id", nil, nil)
assert.Len(t, azureClient.QueryParamsOfLastDelete, 1)
assert.Contains(t, azureClient.QueryParamsOfLastDelete, "deleteEmptyServerFarm")
}
func TestReadsSiteConfig(t *testing.T) {
appId := "/subscriptions/123/resourceGroups/rg123/providers/Microsoft.Web/sites/app123"
azureClient := &azure.MockAzureClient{}
var f crud.ResourceCrudClientFactory = func(res *resources.AzureAPIResource) crud.ResourceCrudClient {
return crud.NewResourceCrudClient(azureClient, nil, nil, "123", res)
}
custom, err := webApp(f, azureClient, mockResourceLookup)
require.NoError(t, err)
// Returns an error because the responses for both GETs are empty, but we only care about the requests.
_, _, _ = custom.Read(context.Background(), appId, resource.PropertyMap{})
require.Contains(t, azureClient.GetIds, appId)
require.Contains(t, azureClient.GetIds, appId+"/config/web")
}
func TestMergeWebAppSiteConfig(t *testing.T) {
webApp := map[string]any{
"id": "/subscriptions/123/resourceGroups/rg123/providers/Microsoft.Web/sites/app123",
"properties": map[string]any{
"enabled": true,
"siteConfig": map[string]any{
"defaultDocuments": nil,
"ipSecurityRestrictions": nil,
},
},
}
siteConfig := map[string]any{
"id": "/subscriptions/123/resourceGroups/rg123/providers/Microsoft.Web/sites/app123/config/web",
"properties": map[string]any{
"defaultDocuments": []any{
"pulumi.html",
},
"ipSecurityRestrictions": []any{
map[string]any{
"action": "Allow",
"ipAddress": "198.51.100.0/22",
"name": "pulumi",
"priority": float64(100),
"tag": "Default",
},
},
},
}
err := mergeWebAppSiteConfig(webApp, siteConfig)
require.NoError(t, err)
assert.Equal(t, webApp["properties"].(map[string]any)["siteConfig"], siteConfig["properties"])
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_storage.go | provider/pkg/resources/customresources/custom_storage.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package customresources
import (
"regexp"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
)
// Resource property names.
const (
staticWebsitePath = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/staticWebsite"
subscriptionId = "subscriptionId"
resourceGroupName = "resourceGroupName"
accountName = "accountName"
containerName = "containerName"
indexDocument = "indexDocument"
error404Document = "error404Document"
)
// Resource property names.
const (
blobPath = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/blobs/{blobName}"
accessTier = "accessTier"
blobName = "blobName"
contentMd5 = "contentMd5"
contentType = "contentType"
metadata = "metadata"
nameProp = "name"
source = "source"
typeProp = "type"
url = "url"
)
var storageAccountPathRegexStr = `(?i)^/subscriptions/(.+)/resourceGroups/(.+)/providers/Microsoft.Storage/storageAccounts/(.+?)/`
var storageAccountPathRegex = regexp.MustCompile(storageAccountPathRegexStr)
var blobIDPattern = regexp.MustCompile(storageAccountPathRegexStr + `blobServices/default/containers/(.+)/blobs/(.+)$`)
// parseBlobIdProperties parses an ID of a Blob resource to its identified properties.
// For instance, it will convert
// /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myrg/providers/Microsoft.Storage/storageAccounts/mysa/blobServices/default/containers/myc/blobs/log.txt
// to a map of
// resourceGroupName=myrg,accountName=mysa,containerName=myc,blobName=log.txt.
func parseBlobIdProperties(id string) (resource.PropertyMap, bool) {
match := blobIDPattern.FindStringSubmatch(id)
if len(match) != 6 {
return nil, false
}
clientProperties := resource.PropertyMap{}
clientProperties[subscriptionId] = resource.NewStringProperty(match[1])
clientProperties[resourceGroupName] = resource.NewStringProperty(match[2])
clientProperties[accountName] = resource.NewStringProperty(match[3])
clientProperties[containerName] = resource.NewStringProperty(match[4])
clientProperties[blobName] = resource.NewStringProperty(match[5])
return clientProperties, true
}
func readAssetBytes(s resource.PropertyValue) ([]byte, error) {
switch {
case s.IsAsset():
asset := s.AssetValue()
assetBytes, err := asset.Bytes()
if err != nil {
return nil, errors.Wrap(err, "reading asset bytes")
}
return assetBytes, nil
case s.IsArchive():
archive := s.ArchiveValue()
archiveBytes, err := archive.Bytes(resource.ZIPArchive)
if err != nil {
return nil, errors.Wrap(err, "reading archive bytes")
}
return archiveBytes, nil
default:
return nil, errors.Errorf("unknown asset type %q", s.TypeString())
}
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/resources/customresources/custom_keyvault_accesspolicy.go | provider/pkg/resources/customresources/custom_keyvault_accesspolicy.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package customresources
import (
"context"
"errors"
"fmt"
"regexp"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault"
"github.com/pulumi/pulumi/pkg/v3/codegen/schema"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
)
// Frequently used resource property names.
const (
vaultName = "vaultName"
policy = "policy"
)
// This used to be "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/accessPolicy"
// This was updated to include the objectId in the path so it accurately represents the logical resource so we can perform imports by ID.
// However, it's not possible to update an existing id, so we have to ensure we can still work with the old id format - where we fetch the objectId from the properties.
// The objectId is prefixed with `policy.` because it's filled in from the `objectId` within the `policy` property.
const path = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/accessPolicy/{policy.objectId}"
var keyVaultAccessPolicyProperties = map[string]schema.PropertySpec{
resourceGroupName: {
TypeSpec: schema.TypeSpec{Type: "string"},
Description: "Name of the resource group that contains the vault.",
},
vaultName: {
TypeSpec: schema.TypeSpec{Type: "string"},
Description: "Name of the Key Vault.",
},
policy: {
// This type is generated from the Azure spec because the Vault resource references it.
TypeSpec: schema.TypeSpec{Ref: "#/types/azure-native:keyvault:AccessPolicyEntry"},
Description: "The definition of the access policy.",
},
}
func keyVaultAccessPolicy(client *armkeyvault.VaultsClient) *CustomResource {
c := &accessPolicyClient{client: client}
return &CustomResource{
tok: "azure-native:keyvault:AccessPolicy",
path: path,
LegacySchema: &schema.ResourceSpec{
ObjectTypeSpec: schema.ObjectTypeSpec{
Description: "Key Vault Access Policy for managing policies on existing vaults.",
Type: "object",
Properties: keyVaultAccessPolicyProperties,
},
InputProperties: keyVaultAccessPolicyProperties,
RequiredInputs: []string{resourceGroupName, vaultName, policy},
},
Meta: &resources.AzureAPIResource{
Path: path,
PutParameters: []resources.AzureAPIParameter{
{Name: subscriptionId, Location: "path", IsRequired: true, Value: &resources.AzureAPIProperty{Type: "string"}},
{Name: resourceGroupName, Location: "path", IsRequired: true, Value: &resources.AzureAPIProperty{Type: "string"}},
{Name: vaultName, Location: "path", IsRequired: true, Value: &resources.AzureAPIProperty{Type: "string"}},
{Name: "policy.objectId", Location: "path", Value: &resources.AzureAPIProperty{Type: "string"}},
{
Name: "properties",
Location: "body",
Body: &resources.AzureAPIType{
Properties: map[string]resources.AzureAPIProperty{
policy: {Type: "#/types/azure-native:keyvault:AccessPolicyEntry"},
},
RequiredProperties: []string{resourceGroupName, vaultName, policy},
},
},
},
},
Read: c.read,
Create: func(ctx context.Context, id string, properties resource.PropertyMap) (map[string]interface{}, error) {
return c.write(ctx, properties, false /* shouldExist */)
},
Update: func(ctx context.Context, id string, properties, olds resource.PropertyMap) (map[string]interface{}, error) {
return c.write(ctx, properties, true /* shouldExist */)
},
Delete: func(ctx context.Context, id string, inputs, state resource.PropertyMap) error {
return c.modify(ctx, inputs, armkeyvault.AccessPolicyUpdateKindRemove)
},
}
}
type accessPolicyClient struct {
client *armkeyvault.VaultsClient
}
func (c *accessPolicyClient) read(ctx context.Context, id string, properties resource.PropertyMap) (map[string]interface{}, bool, error) {
// input from path
parsedId, err := parseKeyVaultPathParams(id)
if err != nil {
return nil, false, err
}
// The old id format doesn't have the objectId in the path, so we fetch it from the properties instead.
var objectId string
if parsedId.ObjectId != nil {
objectId = *parsedId.ObjectId
} else {
// input from body
policyObj := properties[policy].ObjectValue()
objectId = policyObj["objectId"].StringValue()
}
vaultResult, err := c.client.Get(ctx, parsedId.ResourceGroup, parsedId.VaultName, &armkeyvault.VaultsClientGetOptions{})
if err != nil {
var respErr *azcore.ResponseError
if errors.As(err, &respErr) && respErr.StatusCode == 404 {
return nil, false, nil
}
return nil, false, err
}
for _, ap := range vaultResult.Properties.AccessPolicies {
if *ap.ObjectID == objectId {
ape := map[string]interface{}{
"tenantId": ap.TenantID,
"objectId": ap.ObjectID,
"applicationId": ap.ApplicationID,
"permissions": sdkPermissionsToMap(ap.Permissions),
}
return map[string]interface{}{
resourceGroupName: parsedId.ResourceGroup,
vaultName: vaultResult.Name,
policy: ape,
}, true, nil
}
}
return properties.Mappable(), false, nil
}
func azureIdFromProperties(properties resource.PropertyMap) (string, error) {
if !properties.HasValue(resourceGroupName) || !properties.HasValue(vaultName) {
return "", fmt.Errorf("missing required property %s or %s", resourceGroupName, vaultName)
}
rg := properties[resourceGroupName].StringValue()
vaultName := properties[vaultName].StringValue()
return fmt.Sprintf("/subscriptions/{subscription}/resourceGroups/%s/providers/Microsoft.KeyVault/vaults/%s/accessPolicy",
rg, vaultName), nil
}
func (c *accessPolicyClient) readFromProperties(ctx context.Context, properties resource.PropertyMap) (map[string]interface{}, bool, error) {
id, err := azureIdFromProperties(properties)
if err != nil {
return nil, false, err
}
return c.read(ctx, id, properties)
}
func (c *accessPolicyClient) write(ctx context.Context, properties resource.PropertyMap, shouldExist bool) (map[string]interface{}, error) {
_, found, err := c.readFromProperties(ctx, properties)
if err != nil {
return nil, err
}
if (found && !shouldExist) || (!found && shouldExist) {
policyObj := properties[policy].ObjectValue()
objectId := policyObj["objectId"].StringValue()
msg := "access policy for %s already exists"
if shouldExist {
msg = "access policy for %s does not exist"
}
return nil, fmt.Errorf(msg, objectId)
}
err = c.modify(ctx, properties, armkeyvault.AccessPolicyUpdateKindReplace)
if err != nil {
return nil, err
}
// Read it back.
state, found, err := c.readFromProperties(ctx, properties)
if !found {
return nil, errors.New("newly written access policy not found")
}
return state, err
}
func sdkPolicyParamsFromProperties(properties resource.PropertyMap) (*armkeyvault.VaultAccessPolicyParameters, error) {
if !properties.HasValue(policy) {
return nil, fmt.Errorf("missing required property %s", policy)
}
policyObj := properties[policy].ObjectValue()
if !policyObj.HasValue("objectId") || !policyObj.HasValue("tenantId") {
return nil, fmt.Errorf("missing required property objectId or tenantId")
}
objectId := policyObj["objectId"].StringValue()
tenantId := policyObj["tenantId"].StringValue()
var applicationId string
if policyObj.HasValue("applicationId") {
applicationId = policyObj["applicationId"].StringValue()
}
var permissions armkeyvault.Permissions
if permissionMap, ok := policyObj["permissions"]; ok {
permissionVals := permissionMap.ObjectValue()
permissions = propertyPermissionsToSdk(permissionVals)
}
return &armkeyvault.VaultAccessPolicyParameters{
Properties: &armkeyvault.VaultAccessPolicyProperties{
AccessPolicies: []*armkeyvault.AccessPolicyEntry{
{
ObjectID: &objectId,
Permissions: &permissions,
TenantID: &tenantId,
ApplicationID: &applicationId,
},
},
},
}, nil
}
// modify creates, updates, or deletes depending on the op parameter.
func (c *accessPolicyClient) modify(ctx context.Context, properties resource.PropertyMap, op armkeyvault.AccessPolicyUpdateKind) error {
rg := properties[resourceGroupName].StringValue()
vaultName := properties[vaultName].StringValue()
params, err := sdkPolicyParamsFromProperties(properties)
if err != nil {
return err
}
_, err = c.client.UpdateAccessPolicy(ctx, rg, vaultName,
op,
*params,
&armkeyvault.VaultsClientUpdateAccessPolicyOptions{})
return err
}
type vaultPathParams struct {
ResourceGroup string
VaultName string
ObjectId *string
}
func parseKeyVaultPathParams(id string) (vaultPathParams, error) {
idMatcher := regexp.MustCompile(`(?i)^/subscriptions/.+?/resourceGroups/(.+?)/providers/Microsoft.KeyVault/vaults/(.+?)/accessPolicy/?(.*)$`)
matches := idMatcher.FindStringSubmatch(id)
if len(matches) < 3 {
return vaultPathParams{}, fmt.Errorf("unable to parse key vault access policy id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/accessPolicy/{objectId}: %s", id)
}
// Old ids don't have the objectId in the path, so it's optional.
// If it's not present, we'll fetch it from the properties.
var objectId *string
if len(matches) == 4 && matches[3] != "" {
objectId = &matches[3]
}
return vaultPathParams{
ResourceGroup: matches[1],
VaultName: matches[2],
ObjectId: objectId,
}, nil
}
func sdkPermissionsToMap(permissions *armkeyvault.Permissions) map[string]interface{} {
result := map[string]interface{}{}
if len(permissions.Certificates) > 0 {
certPermissions := make([]string, 0, len(permissions.Certificates))
for _, p := range permissions.Certificates {
certPermissions = append(certPermissions, string(*p))
}
result["certificates"] = certPermissions
}
if len(permissions.Keys) > 0 {
keyPermissions := make([]string, 0, len(permissions.Keys))
for _, p := range permissions.Keys {
keyPermissions = append(keyPermissions, string(*p))
}
result["keys"] = keyPermissions
}
if len(permissions.Secrets) > 0 {
secretPermissions := make([]string, 0, len(permissions.Secrets))
for _, p := range permissions.Secrets {
secretPermissions = append(secretPermissions, string(*p))
}
result["secrets"] = secretPermissions
}
if len(permissions.Storage) > 0 {
storagePermissions := make([]string, 0, len(permissions.Storage))
for _, p := range permissions.Storage {
storagePermissions = append(storagePermissions, string(*p))
}
result["storage"] = storagePermissions
}
return result
}
func propertyPermissionsToSdk(permissions resource.PropertyMap) armkeyvault.Permissions {
result := armkeyvault.Permissions{}
keyPermissions, ok := permissions["keys"]
if ok {
result.Keys = []*armkeyvault.KeyPermissions{}
for _, v := range keyPermissions.ArrayValue() {
perm := armkeyvault.KeyPermissions(v.StringValue())
result.Keys = append(result.Keys, &perm)
}
}
certPermissions, ok := permissions["certificates"]
if ok {
result.Certificates = []*armkeyvault.CertificatePermissions{}
for _, v := range certPermissions.ArrayValue() {
perm := armkeyvault.CertificatePermissions(v.StringValue())
result.Certificates = append(result.Certificates, &perm)
}
}
secretPermissions, ok := permissions["secrets"]
if ok {
result.Secrets = []*armkeyvault.SecretPermissions{}
for _, v := range secretPermissions.ArrayValue() {
perm := armkeyvault.SecretPermissions(v.StringValue())
result.Secrets = append(result.Secrets, &perm)
}
}
storagePermissions, ok := permissions["storage"]
if ok {
result.Storage = []*armkeyvault.StoragePermissions{}
for _, v := range storagePermissions.ArrayValue() {
perm := armkeyvault.StoragePermissions(v.StringValue())
result.Storage = append(result.Storage, &perm)
}
}
return result
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/debug/debug.go | provider/pkg/debug/debug.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package debug
import "fmt"
// TODO This is a hack. Consider using glog or another logging library instead.
// Debug determines if debug logging is turned on
var Debug *bool
// Log logs debug info to stdout if debug.Debug is set to true
func Log(format string, args ...interface{}) {
if Debug != nil && *Debug {
fmt.Printf(format, args...)
}
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/responseBodyToSdkOutputs_test.go | provider/pkg/convert/responseBodyToSdkOutputs_test.go | // Copyright 2016-2020, Pulumi Corporation.
package convert
import (
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
func TestResponseBodyToSdkOutputs(t *testing.T) {
t.Run("nil body returns empty map", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
body: nil,
})
expected := map[string]interface{}{}
assert.Equal(t, expected, actual)
})
t.Run("untyped non-empty values remain unchanged", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"untyped": {},
},
body: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"untyped": value,
}
assert.Equal(t, expected, actual)
}))
t.Run("any type values", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"untyped": {
Ref: TypeAny,
},
},
body: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"untyped": value,
}
assert.Equal(t, expected, actual)
}))
t.Run("renamed", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"x-threshold": {
SdkName: "threshold",
},
},
body: map[string]interface{}{
"x-threshold": 123,
},
})
var expected = map[string]interface{}{
"threshold": 123,
}
assert.Equal(t, expected, actual)
})
t.Run("containers", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"prop": {
Containers: []string{"container"},
},
},
body: map[string]interface{}{
"container": map[string]interface{}{
"prop": "value",
},
},
})
var expected = map[string]interface{}{
"prop": "value",
}
assert.Equal(t, expected, actual)
})
t.Run("nested containers", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"prop": {
Containers: []string{"a", "b", "c"},
},
},
body: map[string]interface{}{
"a": map[string]interface{}{
"b": map[string]interface{}{
"c": map[string]interface{}{
"prop": "value",
},
},
},
},
})
var expected = map[string]interface{}{
"prop": "value",
}
assert.Equal(t, expected, actual)
})
t.Run("mismatched const returns nil", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"const": {
Const: "value",
},
},
body: map[string]interface{}{
"const": "other",
},
})
var expected map[string]interface{} = nil
assert.Equal(t, expected, actual)
})
t.Run("array of empties not changed", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"emptyArray": {
Type: "array",
},
},
body: map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
},
})
var expected = map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
}
assert.Equal(t, expected, actual)
})
t.Run("map of empties unchanged", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"emptyDict": {
Type: "object",
},
},
body: map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
},
})
var expected = map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
}
assert.Equal(t, expected, actual)
})
t.Run("typed array doesn't change items", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"typedArray": {
Type: "array",
Items: &resources.AzureAPIProperty{
Type: "string",
},
},
},
body: map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
},
})
var expected = map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
}
assert.Equal(t, expected, actual)
})
t.Run("typed map doesn't change items", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"typedMap": {
Type: "object",
AdditionalProperties: &resources.AzureAPIProperty{
Type: "string",
},
},
},
body: map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
},
})
var expected = map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
}
assert.Equal(t, expected, actual)
})
t.Run("string set unchanged for outputs", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"userAssignedIdentities": {
Type: "object",
IsStringSet: true,
},
},
body: map[string]interface{}{
"userAssignedIdentities": map[string]interface{}{
"a": "b",
"c": map[string]interface{}{
"d": "e",
},
},
},
})
var expected = map[string]interface{}{
"userAssignedIdentities": map[string]interface{}{
"a": "b",
"c": map[string]interface{}{
"d": "e",
},
},
}
assert.Equal(t, expected, actual)
})
t.Run("missing ref type continues with no change", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"p": {
Ref: "#/types/azure-native:testing:Type1",
},
},
body: map[string]interface{}{
"p": map[string]interface{}{
"k": "v",
},
},
})
expected := map[string]interface{}{
"p": map[string]interface{}{
"k": "v",
},
}
assert.Equal(t, expected, actual)
})
t.Run("missing oneOf type continues with no change", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"oneOf": {
OneOf: []string{"#types/azure-native:testing:Type1", "#types/azure-native:testing:Type2"},
},
},
body: map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "value",
},
},
})
expected := map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "value",
},
}
assert.Equal(t, expected, actual)
})
}
func TestResponseBodyToSdkOutputsNestedTypes(t *testing.T) {
bodyParams := map[string]resources.AzureAPIProperty{
"nested": {
Ref: "#/types/azure-native:testing:SubType",
},
}
t.Run("nil property values are dropped", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"name": {},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"name": nil,
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{},
}
assert.Equal(t, expected, actual)
})
t.Run("untyped simple value", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"value": {},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"value": value,
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"value": value,
},
}
assert.Equal(t, expected, actual)
}))
t.Run("empty object unchanged", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"name": {},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{},
}
assert.Equal(t, expected, actual)
})
t.Run("sub-id not ignored", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"id": {},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"id": "id-value",
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"id": "id-value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("renamed", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"x-renamed": {
SdkName: "renamed",
},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"x-renamed": "value",
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"renamed": "value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("containered", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"containered": {
Containers: []string{"props"},
},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"props": map[string]interface{}{
"containered": true,
},
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"containered": true,
},
}
assert.Equal(t, expected, actual)
})
t.Run("mismatched const ignored", func(t *testing.T) {
actual := testResponseBodyToSdkOutputs(responseBodyToSdkOutputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"const": {
Const: "value",
},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"const": "other",
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}(nil),
}
assert.Equal(t, expected, actual)
})
}
type responseBodyToSdkOutputsTestCase struct {
body map[string]interface{}
bodyParameters map[string]resources.AzureAPIProperty
types map[string]map[string]resources.AzureAPIProperty
}
func testResponseBodyToSdkOutputs(testCase responseBodyToSdkOutputsTestCase) map[string]interface{} {
types := map[string]resources.AzureAPIType{}
if testCase.types != nil {
for typeName, typeProperties := range testCase.types {
types[typeName] = resources.AzureAPIType{
Properties: typeProperties,
}
}
}
c := NewSdkShapeConverterFull(types)
if testCase.bodyParameters == nil {
testCase.bodyParameters = map[string]resources.AzureAPIProperty{}
}
return c.ResponseBodyToSdkOutputs(testCase.bodyParameters, testCase.body)
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/isDefaultResponse_test.go | provider/pkg/convert/isDefaultResponse_test.go | package convert
import (
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/stretchr/testify/assert"
)
func TestIsDefaultResponseTrue(t *testing.T) {
c := getMockConverter()
response := map[string]interface{}{
"p1": true,
"p2": false, // false bools are always considered default
"name": "foo",
"irrelevantBool": false,
"untypedDict": map[string]interface{}{
"flag": true,
"string": "bar",
"map": map[string]interface{}{},
},
}
defaultBody := map[string]interface{}{
"p1": true,
"name": "foo",
"untypedDict": map[string]interface{}{
"flag": true,
"string": "bar",
"map": map[string]interface{}{},
"extra": true, // extra elements in the default body need not appear in the response
},
}
actual := c.IsDefaultResponse(resourcePutParameters(), response, defaultBody)
assert.True(t, actual)
}
func TestIsDefaultResponseFalse(t *testing.T) {
c := getMockConverter()
defaultBody := map[string]interface{}{}
response1 := map[string]interface{}{
"p1": true,
}
actual1 := c.IsDefaultResponse(resourcePutParameters(), response1, defaultBody)
assert.False(t, actual1)
response2 := map[string]interface{}{
"untypedArray": []string{"1", "2"},
}
actual2 := c.IsDefaultResponse(resourcePutParameters(), response2, defaultBody)
assert.False(t, actual2)
response3 := map[string]interface{}{
"untypedDict": map[string]interface{}{
"flag": true,
"string": "buzz",
},
}
defaultBody = map[string]interface{}{
"untypedDict": map[string]interface{}{
"flag": true,
"string": "bar",
},
}
actual3 := c.IsDefaultResponse(resourcePutParameters(), response3, defaultBody)
assert.False(t, actual3)
response4 := map[string]interface{}{
"untypedDict": map[string]interface{}{
"map": map[string]interface{}{
"key1": "value1",
},
},
}
defaultBody = map[string]interface{}{
"untypedDict": map[string]interface{}{
"map": map[string]interface{}{},
},
}
actual4 := c.IsDefaultResponse(resourcePutParameters(), response4, defaultBody)
assert.False(t, actual4)
}
func resourcePutParameters() []resources.AzureAPIParameter {
return []resources.AzureAPIParameter{
{
Location: "body",
Body: &resources.AzureAPIType{
Properties: map[string]resources.AzureAPIProperty{
"name": {},
"x-threshold": {
SdkName: "threshold",
},
"structure": {
Ref: "#/types/azure-native:testing:Structure",
},
"p1": {
Containers: []string{"properties"},
},
"p2": {
Containers: []string{"properties"},
},
"p3": {
Containers: []string{"properties", "document", "body"},
},
"more": {
Containers: []string{"properties"},
Ref: "#/types/azure-native:testing:More",
},
"union": {
OneOf: []string{"#/types/azure-native:testing:OptionA", "#/types/azure-native:testing:OptionB"},
},
"tags": {},
"untypedArray": {},
"untypedDict": {
Ref: TypeAny,
},
},
},
},
{
Location: "path",
Name: "subscriptionId",
},
{
Location: "path",
Name: "resourceGroupName",
},
{
Location: "path",
Name: "NetworkInterfaceName",
Value: &resources.AzureAPIProperty{
SdkName: "networkInterfaceName",
},
},
}
}
func getMockConverter() SdkShapeConverter {
return NewSdkShapeConverterFull(map[string]resources.AzureAPIType{
"azure-native:testing:Structure": {
Properties: map[string]resources.AzureAPIProperty{
"v1": {},
"v2": {},
"v3-odd": {
SdkName: "v3",
},
"v4-nested": {
SdkName: "v4",
Containers: []string{"props"},
},
"v5": {
Ref: "#/types/azure-native:testing:SubResource",
},
},
},
"azure-native:testing:StructureResponse": {
Properties: map[string]resources.AzureAPIProperty{
"v1": {},
"v2": {},
"v3-odd": {
SdkName: "v3",
},
"v4-nested": {
SdkName: "v4",
Containers: []string{"props"},
},
"v5ReadOnly": {},
},
},
"azure-native:testing:More": {
Properties: map[string]resources.AzureAPIProperty{
"items": {
Items: &resources.AzureAPIProperty{
Ref: "#/types/azure-native:testing:MoreItem",
},
},
"itemsMap": {
Type: "object",
AdditionalProperties: &resources.AzureAPIProperty{
Ref: "#/types/azure-native:testing:MoreItem",
},
},
},
},
"azure-native:testing:MoreItem": {
Properties: map[string]resources.AzureAPIProperty{
"aaa": {
SdkName: "Aaa",
},
"bbb": {
Containers: []string{"ccc"},
},
},
},
"azure-native:testing:OptionA": {
Properties: map[string]resources.AzureAPIProperty{
"type": {
Const: "AAA",
},
"a": {
Containers: []string{"aa"},
},
},
},
"azure-native:testing:OptionB": {
Properties: map[string]resources.AzureAPIProperty{
"type": {
Const: "BBB",
},
"b": {
Containers: []string{"bb"},
},
},
},
"azure-native:testing:SubResource": {
Properties: map[string]resources.AzureAPIProperty{
"id": {
Type: "string",
},
},
},
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/sdkInputsToRequestBody_test.go | provider/pkg/convert/sdkInputsToRequestBody_test.go | // Copyright 2016-2020, Pulumi Corporation.
package convert
import (
"bufio"
"bytes"
"fmt"
"os"
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
func TestSdkInputsToRequestBodySubResource(t *testing.T) {
c := NewSdkShapeConverterFull(map[string]resources.AzureAPIType{
"azure-native:testing:Structure": {
Properties: map[string]resources.AzureAPIProperty{
"v5": {
Ref: "#/types/azure-native:testing:SubResource",
},
},
},
"azure-native:testing:SubResource": {
Properties: map[string]resources.AzureAPIProperty{
"id": {
Type: "string",
},
},
},
})
var sdkData = map[string]interface{}{
"structure": map[string]interface{}{
"v5": map[string]interface{}{
"id": "$self/relative/id/123",
},
},
}
var expectedBody = map[string]interface{}{
"structure": map[string]interface{}{
"v5": map[string]interface{}{
"id": "/sub/456/rg/my/network/abc/relative/id/123",
},
},
}
bodyProperties := map[string]resources.AzureAPIProperty{
"structure": {
Ref: "#/types/azure-native:testing:Structure",
},
}
actualBody, err := c.SdkInputsToRequestBody(bodyProperties, sdkData, "/sub/456/rg/my/network/abc")
assert.Nil(t, err)
assert.Equal(t, expectedBody, actualBody)
}
func captureStderr(f func()) string {
// Create a pipe
r, w, _ := os.Pipe()
// Save the original stderr
originalStderr := os.Stderr
// Redirect stderr to the write end of the pipe
os.Stderr = w
// Run the target function
f()
// Close the write end of the pipe and restore stderr
w.Close()
os.Stderr = originalStderr
// Read the output from the read end of the pipe
var buf bytes.Buffer
reader := bufio.NewReader(r)
buf.ReadFrom(reader)
return buf.String()
}
func TestSdkInputsToRequestBody(t *testing.T) {
type testCaseArgs struct {
id string
types map[string]map[string]resources.AzureAPIProperty
props map[string]resources.AzureAPIProperty
inputs map[string]interface{}
}
convertWithError := func(args testCaseArgs) (map[string]interface{}, error) {
types := map[string]resources.AzureAPIType{}
if args.types != nil {
for typeName, typeProperties := range args.types {
types[typeName] = resources.AzureAPIType{
Properties: typeProperties,
}
}
}
c := NewSdkShapeConverterFull(types)
return c.SdkInputsToRequestBody(args.props, args.inputs, args.id)
}
convert := func(args testCaseArgs) map[string]interface{} {
body, err := convertWithError(args)
assert.Nil(t, err)
return body
}
t.Run("nil inputs", func(t *testing.T) {
actual := convert(testCaseArgs{
inputs: nil,
})
expected := map[string]interface{}{}
assert.Equal(t, expected, actual)
})
t.Run("unmatched inputs are reported", func(t *testing.T) {
prevLogToStderr := logging.LogToStderr
prevVerbose := logging.Verbose
prevLogFlow := logging.LogFlow
logging.InitLogging(true, 9, true)
defer func() {
logging.InitLogging(prevLogToStderr, prevVerbose, prevLogFlow)
}()
var actual map[string]any
stderr := captureStderr(func() {
actual = convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"propA": {
Type: "string",
},
},
inputs: map[string]interface{}{
"propA": "a",
"propB": "b",
},
})
})
assert.Equal(t, map[string]interface{}{"propA": "a"}, actual)
assert.Contains(t, stderr, "Unrecognized properties in SdkInputsToRequestBody: [propB]")
})
t.Run("untyped non-empty values remain unchanged", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"untyped": {},
},
inputs: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"untyped": value,
}
assert.Equal(t, expected, actual)
}))
t.Run("any type values", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"untyped": {
Ref: TypeAny,
},
},
inputs: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"untyped": value,
}
assert.Equal(t, expected, actual)
}))
t.Run("renamed", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"x-threshold": {
SdkName: "threshold",
},
},
inputs: map[string]interface{}{
"threshold": 123,
},
})
var expected = map[string]interface{}{
"x-threshold": 123,
}
assert.Equal(t, expected, actual)
})
t.Run("containers", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"prop": {
Containers: []string{"container"},
},
},
inputs: map[string]interface{}{
"prop": "value",
},
})
var expected = map[string]interface{}{
"container": map[string]interface{}{
"prop": "value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("nested containers", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"prop": {
Containers: []string{"a", "b", "c"},
},
},
inputs: map[string]interface{}{
"prop": "value",
},
})
var expected = map[string]interface{}{
"a": map[string]interface{}{
"b": map[string]interface{}{
"c": map[string]interface{}{
"prop": "value",
},
},
},
}
assert.Equal(t, expected, actual)
})
t.Run("mismatched const returns nil", func(t *testing.T) {
actual, err := convertWithError(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"const": {
Const: "value",
},
},
inputs: map[string]interface{}{
"const": "other",
},
})
assert.Nil(t, actual)
assert.Equal(t, err, fmt.Errorf("property const is a constant of value \"value\" and cannot be modified to be \"other\""))
})
t.Run("array of empties not changed", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"emptyArray": {
Type: "array",
},
},
inputs: map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
},
})
var expected = map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
}
assert.Equal(t, expected, actual)
})
t.Run("map of empties unchanged", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"emptyDict": {
Type: "object",
},
},
inputs: map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
},
})
var expected = map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
}
assert.Equal(t, expected, actual)
})
t.Run("typed array doesn't change items", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"typedArray": {
Type: "array",
Items: &resources.AzureAPIProperty{
Type: "string",
},
},
},
inputs: map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
},
})
var expected = map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
}
assert.Equal(t, expected, actual)
})
t.Run("typed map doesn't change items", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"typedMap": {
Type: "object",
AdditionalProperties: &resources.AzureAPIProperty{
Type: "string",
},
},
},
inputs: map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
},
})
var expected = map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
}
assert.Equal(t, expected, actual)
})
t.Run("string set", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"userAssignedIdentities": {
IsStringSet: true,
Type: "object",
},
},
inputs: map[string]interface{}{
"userAssignedIdentities": []interface{}{
"a", "b",
},
},
})
expected := map[string]interface{}{
"userAssignedIdentities": map[string]interface{}{
"a": struct{}{},
"b": struct{}{},
},
}
assert.Equal(t, expected, actual)
})
t.Run("string set with non-strings", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"userAssignedIdentities": {
IsStringSet: true,
Type: "object",
},
},
inputs: map[string]interface{}{
"userAssignedIdentities": []interface{}{
"a", "b", 3,
},
},
})
expected := map[string]interface{}{
"userAssignedIdentities": map[string]interface{}{
"a": struct{}{},
"b": struct{}{},
// 3 gets ignored
},
}
assert.Equal(t, expected, actual)
})
t.Run("missing ref type continues with no change", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"p": {
Ref: "#/types/azure-native:testing:Type1",
},
},
inputs: map[string]interface{}{
"p": map[string]interface{}{
"k": "v",
},
},
})
expected := map[string]interface{}{
"p": map[string]interface{}{
"k": "v",
},
}
assert.Equal(t, expected, actual)
})
t.Run("missing oneOf type continues with no change", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"oneOf": {
OneOf: []string{"#types/azure-native:testing:Type1", "#types/azure-native:testing:Type2"},
},
},
inputs: map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "value",
},
},
})
expected := map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("oneOf type", func(t *testing.T) {
actual := convert(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"oneOf": {
OneOf: []string{"#types/azure-native:testing:Type1", "#types/azure-native:testing:Type2"},
},
},
inputs: map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "type1",
},
},
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:Type1": {
"prop1": {
Const: "type1",
},
},
},
})
expected := map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "type1",
},
}
assert.Equal(t, expected, actual)
})
t.Run("mismatching oneOf type", func(t *testing.T) {
actual := convert(testCaseArgs{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:Type1": {
"prop1": {
Const: "type1",
},
},
"azure-native:testing:Type2": {
"prop1": {
Const: "type2",
},
},
},
props: map[string]resources.AzureAPIProperty{
"oneOf": {
OneOf: []string{"#types/azure-native:testing:Type1", "#types/azure-native:testing:Type2"},
},
},
inputs: map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "foo",
},
},
})
expected := map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "foo",
},
}
assert.Equal(t, expected, actual)
})
convertNested := func(args testCaseArgs) map[string]interface{} {
props := map[string]resources.AzureAPIProperty{
"nested": {
Ref: "#/types/azure-native:testing:SubType",
},
}
types := map[string]resources.AzureAPIType{
"azure-native:testing:SubType": {
Properties: args.props,
},
}
if args.types != nil {
for typeName, typeProperties := range args.types {
types[typeName] = resources.AzureAPIType{
Properties: typeProperties,
}
}
}
inputs := map[string]interface{}{
"nested": args.inputs,
}
c := NewSdkShapeConverterFull(types)
body, err := c.SdkInputsToRequestBody(props, inputs, args.id)
assert.Nil(t, err)
return body
}
t.Run("nil inputs", func(t *testing.T) {
actual := convertNested(testCaseArgs{
inputs: nil,
})
expected := map[string]interface{}{
"nested": map[string]interface{}{},
}
assert.Equal(t, expected, actual)
})
t.Run("untyped non-empty values remain unchanged", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"untyped": {},
},
inputs: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"untyped": value,
},
}
assert.Equal(t, expected, actual)
}))
t.Run("any type values", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"untyped": {
Ref: TypeAny,
},
},
inputs: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"untyped": value,
},
}
assert.Equal(t, expected, actual)
}))
t.Run("renamed", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"x-threshold": {
SdkName: "threshold",
},
},
inputs: map[string]interface{}{
"threshold": 123,
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"x-threshold": 123,
},
}
assert.Equal(t, expected, actual)
})
t.Run("containers", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"prop": {
Containers: []string{"container"},
},
},
inputs: map[string]interface{}{
"prop": "value",
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"container": map[string]interface{}{
"prop": "value",
},
},
}
assert.Equal(t, expected, actual)
})
t.Run("nested containers", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"prop": {
Containers: []string{"a", "b", "c"},
},
},
inputs: map[string]interface{}{
"prop": "value",
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"a": map[string]interface{}{
"b": map[string]interface{}{
"c": map[string]interface{}{
"prop": "value",
},
},
},
},
}
assert.Equal(t, expected, actual)
})
t.Run("mismatched const returns nil", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"const": {
Const: "value",
},
},
inputs: map[string]interface{}{
"const": "other",
},
})
expected := map[string]interface{}{
"nested": map[string]interface{}(nil),
}
assert.Equal(t, expected, actual)
})
t.Run("array of empties not changed", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"emptyArray": {
Type: "array",
},
},
inputs: map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
},
}
assert.Equal(t, expected, actual)
})
t.Run("map of empties unchanged", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"emptyDict": {
Type: "object",
},
},
inputs: map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
},
}
assert.Equal(t, expected, actual)
})
t.Run("typed array doesn't change items", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"typedArray": {
Type: "array",
Items: &resources.AzureAPIProperty{
Type: "string",
},
},
},
inputs: map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
},
}
assert.Equal(t, expected, actual)
})
t.Run("typed map doesn't change items", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"typedMap": {
Type: "object",
AdditionalProperties: &resources.AzureAPIProperty{
Type: "string",
},
},
},
inputs: map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
},
}
assert.Equal(t, expected, actual)
})
t.Run("string set", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"userAssignedIdentities": {
IsStringSet: true,
Type: "object",
},
},
inputs: map[string]interface{}{
"userAssignedIdentities": []interface{}{
"a", "b",
},
},
})
expected := map[string]interface{}{
"nested": map[string]interface{}{
"userAssignedIdentities": map[string]interface{}{
"a": struct{}{},
"b": struct{}{},
},
},
}
assert.Equal(t, expected, actual)
})
t.Run("missing ref type continues with no change", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"p": {
Ref: "#/types/azure-native:testing:Type1",
},
},
inputs: map[string]interface{}{
"p": map[string]interface{}{
"k": "v",
},
},
})
expected := map[string]interface{}{
"nested": map[string]interface{}{
"p": map[string]interface{}{
"k": "v",
},
},
}
assert.Equal(t, expected, actual)
})
t.Run("missing oneOf type continues with no change", func(t *testing.T) {
actual := convertNested(testCaseArgs{
props: map[string]resources.AzureAPIProperty{
"oneOf": {
OneOf: []string{"#types/azure-native:testing:Type1", "#types/azure-native:testing:Type2"},
},
},
inputs: map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "value",
},
},
})
expected := map[string]interface{}{
"nested": map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "value",
},
},
}
assert.Equal(t, expected, actual)
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/responseBodyToSdkOutputs.go | provider/pkg/convert/responseBodyToSdkOutputs.go | package convert
import (
"reflect"
"strings"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
)
// ResponseBodyToSdkOutputs converts a JSON request- or response body to the SDK shape.
func (k *SdkShapeConverter) ResponseBodyToSdkOutputs(props map[string]resources.AzureAPIProperty,
response map[string]interface{}) map[string]interface{} {
result := map[string]interface{}{}
for name, prop := range props {
p := prop // https://go.dev/wiki/CommonMistakes#using-reference-to-loop-iterator-variable
sdkName := name
if prop.SdkName != "" {
sdkName = prop.SdkName
}
values := response
for _, containerName := range prop.Containers {
if v, has := values[containerName]; has {
if v, ok := v.(map[string]interface{}); ok {
values = v
}
} else {
break
}
}
if value, has := values[name]; has {
if prop.Const != nil && value != prop.Const {
// Returning nil indicates that the given value is not of the expected type.
return nil
}
if value != nil {
result[sdkName] = k.convertBodyPropToSdkPropValue(&p, value)
}
}
}
return result
}
// convertBodyPropToSdkPropValue converts a value from a request body to an SDK property value.
func (k *SdkShapeConverter) convertBodyPropToSdkPropValue(prop *resources.AzureAPIProperty, value interface{}) interface{} {
return k.convertTypedResponseBodyObjectsToSdkOutputs(prop, value, func(typeName string, props map[string]resources.AzureAPIProperty, values map[string]interface{}) map[string]interface{} {
return k.ResponseBodyToSdkOutputs(props, values)
})
}
// convertTypedResponseBodyObjectsToSdkOutputs recursively finds map types with a known type and calls convertMap on them.
func (k *SdkShapeConverter) convertTypedResponseBodyObjectsToSdkOutputs(prop *resources.AzureAPIProperty, value interface{}, convertObject convertTypedObject) interface{} {
// This line is unreachable in the current implementation, but good to have for safety to prevent Kind() of a nil type from causing a nil pointer dereference.
if value == nil {
return nil
}
switch reflect.TypeOf(value).Kind() {
case reflect.Map:
// For union types, iterate through types and find the first one that matches the shape.
for _, t := range prop.OneOf {
typeName := strings.TrimPrefix(t, "#/types/")
typ, ok, err := k.GetType(typeName)
if !ok || err != nil {
continue
}
request := convertObject(typeName, typ.Properties, value.(map[string]interface{}))
if request != nil {
return request
}
}
valueMap, ok := value.(map[string]interface{})
if !ok {
return value
}
if strings.HasPrefix(prop.Ref, "#/types/") {
typeName := strings.TrimPrefix(prop.Ref, "#/types/")
typ, ok, err := k.GetType(typeName)
if !ok || err != nil {
return value
}
return convertObject(typeName, typ.Properties, valueMap)
}
if prop.AdditionalProperties != nil {
result := map[string]interface{}{}
for key, item := range valueMap {
result[key] = k.convertTypedResponseBodyObjectsToSdkOutputs(prop.AdditionalProperties, item, convertObject)
}
return result
}
return value
case reflect.Slice, reflect.Array:
if prop.Items == nil {
return value
}
result := make([]interface{}, 0)
s := reflect.ValueOf(value)
for i := 0; i < s.Len(); i++ {
result = append(result, k.convertTypedResponseBodyObjectsToSdkOutputs(prop.Items, s.Index(i).Interface(), convertObject))
}
return result
}
return value
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/responseToSdkInputs.go | provider/pkg/convert/responseToSdkInputs.go | package convert
import (
"strings"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
)
// ResponseToSdkInputs calculates a map of input values that would produce the given resource path and
// response. This is useful when we need to import an existing resource based on its current properties.
func (k *SdkShapeConverter) ResponseToSdkInputs(parameters []resources.AzureAPIParameter,
pathValues map[string]string, responseBody map[string]interface{}) map[string]interface{} {
result := map[string]interface{}{}
for _, param := range parameters {
switch {
case strings.EqualFold(param.Name, "subscriptionId"):
// Ignore
case param.Location == "path":
name := param.Name
sdkName := name
if param.Value != nil && param.Value.SdkName != "" {
sdkName = param.Value.SdkName
}
result[sdkName] = pathValues[name]
case param.Location == body:
outputs := k.ResponseBodyToSdkOutputs(param.Body.Properties, responseBody)
inputs := k.SdkOutputsToSdkInputs([]resources.AzureAPIParameter{param}, outputs)
for k, v := range inputs {
switch {
case k == "id":
// Some resources have a top-level `id` property which is (probably incorrectly) marked as
// non-readonly. `id` is a special property to Pulumi and will always cause diffs if set in
// the result of a Read operation and block import. So, don't copy it to inputs.
continue
default:
// Attempt to exclude insignificant properties from the inputs. A resource response would
// contain a lot of default values, e.g. empty arrays when no values were specified, empty
// strings, or false booleans. The decision to remove them is somewhat arbitrary but it
// seems to make the practical import experience smoother.
result[k] = removeEmptyCollections(v)
}
}
}
}
return result
}
// removeEmptyCollections returns nil if the given value is a default map or array with no values.
func removeEmptyCollections(value interface{}) interface{} {
switch value := value.(type) {
case map[string]interface{}:
result := map[string]interface{}{}
for k, v := range value {
resultValue := removeEmptyCollections(v)
if resultValue != nil {
result[k] = resultValue
}
}
if len(result) == 0 {
return nil
}
return result
case []interface{}:
var result []interface{}
for _, v := range value {
resultValue := removeEmptyCollections(v)
if resultValue != nil {
result = append(result, resultValue)
}
}
if len(result) == 0 {
return nil
}
return result
}
return value
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/previewOutputs.go | provider/pkg/convert/previewOutputs.go | package convert
import (
"strings"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
)
const TypeAny = "pulumi.json#/Any"
// PreviewOutputs calculates a map of outputs at the time of initial resource creation. It takes the provided resource
// inputs and maps them to the outputs shape, adding unknowns for all properties that are not defined in inputs.
func (k *SdkShapeConverter) PreviewOutputs(inputs resource.PropertyMap,
props map[string]resources.AzureAPIProperty) resource.PropertyMap {
result := resource.PropertyMap{}
for name, prop := range props {
p := prop
if prop.SdkName != "" {
name = prop.SdkName
}
key := resource.PropertyKey(name)
if inputValue, ok := inputs[key]; ok {
result[key] = k.previewOutputValue(inputValue, &p)
} else {
placeholderValue := resource.MakeComputed(resource.NewStringProperty(""))
result[key] = k.previewOutputValue(placeholderValue, &p)
}
}
return result
}
func (k *SdkShapeConverter) previewOutputValue(inputValue resource.PropertyValue,
prop *resources.AzureAPIProperty) resource.PropertyValue {
if prop == nil {
return resource.MakeComputed(resource.NewStringProperty(""))
}
if prop.Const != nil {
if asString, ok := prop.Const.(string); ok {
return resource.NewStringProperty(asString)
}
}
switch {
case prop.Type == "boolean" && inputValue.IsBool():
return inputValue
case prop.Type == "integer" && inputValue.IsNumber():
return inputValue
case prop.Type == "number" && inputValue.IsNumber():
return inputValue
case prop.Type == "string" && inputValue.IsString():
return inputValue
case (prop.Type == "array" || prop.Items != nil) && inputValue.IsArray():
var items []resource.PropertyValue
for _, item := range inputValue.ArrayValue() {
items = append(items, k.previewOutputValue(item, prop.Items))
}
return resource.NewArrayProperty(items)
case strings.HasPrefix(prop.Ref, "#/types/") && inputValue.IsObject():
typeName := strings.TrimPrefix(prop.Ref, "#/types/")
typ, _, _ := k.GetType(typeName)
v := k.PreviewOutputs(inputValue.ObjectValue(), typ.Properties)
return resource.NewObjectProperty(v)
case prop.AdditionalProperties != nil && inputValue.IsObject():
inputObject := inputValue.ObjectValue()
result := resource.PropertyMap{}
for name, value := range inputObject {
p := value
result[name] = k.previewOutputValue(p, prop.AdditionalProperties)
}
return resource.NewObjectProperty(result)
case prop.OneOf != nil && inputValue.IsObject():
// TODO: It would be nice to do something smart here and pick the right oneOf branch based on the input.
// The challenge is differentiating between a mis-match and valid unknowns in the input.
// inputObject := inputValue.ObjectValue()
// for _, oneOf := range prop.OneOf {
// typeName := strings.TrimPrefix(oneOf, "#/types/")
// typ, _, _ := k.GetType(typeName)
// v := k.PreviewOutputs(inputObject, typ.Properties)
// v.Mappable()
// if !v.ContainsUnknowns() {
// return resource.NewObjectProperty(v)
// }
// }
// Fallback to legacy behaviour - assuming the input is same as output
return inputValue
case prop.Ref == TypeAny:
return inputValue
case prop.Type == "" && prop.Items == nil && prop.AdditionalProperties == nil && prop.OneOf == nil && prop.Ref == "":
// Untyped property
if inputValue.IsString() || inputValue.IsNumber() || inputValue.IsBool() {
// Assume simple types with no type information remain unchanged
return inputValue
}
}
return resource.MakeComputed(k.makeComputedValue(prop))
}
func (k *SdkShapeConverter) makeComputedValue(prop *resources.AzureAPIProperty) resource.PropertyValue {
if prop != nil && prop.Const != nil {
return resource.NewStringProperty(prop.Const.(string))
}
// To mark something as computed, we always use a string property with an
// empty string, regardless of the type.
return resource.NewStringProperty("")
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/previewOutputs_test.go | provider/pkg/convert/previewOutputs_test.go | // Copyright 2016-2020, Pulumi Corporation.
package convert
import (
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/stretchr/testify/assert"
)
func TestPreviewOutputs(t *testing.T) {
type previewTestCase struct {
inputs map[string]interface{}
metadata map[string]resources.AzureAPIProperty
types map[string]resources.AzureAPIType
}
preview := func(testCase previewTestCase) resource.PropertyMap {
inputMap := resource.NewPropertyMapFromMap(testCase.inputs)
converter := NewSdkShapeConverterFull(testCase.types)
return converter.PreviewOutputs(inputMap, testCase.metadata)
}
t.Run("sdk renamed property", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"prop1": "value",
},
metadata: map[string]resources.AzureAPIProperty{
"x-renamed": {
SdkName: "prop1",
},
},
})
expected := resource.PropertyMap{
"prop1": resource.NewStringProperty("value"),
}
assert.Equal(t, expected, actual)
})
t.Run("constant property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"type": {
Const: "AAA",
},
}
expected := resource.PropertyMap{
"type": resource.NewStringProperty("AAA"),
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"type": "AAA",
},
metadata: metadata,
})
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
})
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"type": 123,
},
metadata: metadata,
})
assert.Equal(t, expected, actual)
})
})
t.Run("string property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"name": {
Type: "string",
},
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"name": "MyResource",
},
metadata: metadata,
})
expected := resource.PropertyMap{
"name": resource.NewStringProperty("MyResource"),
}
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
})
expected := resource.PropertyMap{
"name": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"name": 123,
},
metadata: metadata,
})
expected := resource.PropertyMap{
"name": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
})
t.Run("number property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"threshold": {
Type: "number",
},
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"threshold": 123,
},
metadata: metadata,
})
expected := resource.PropertyMap{
"threshold": resource.NewNumberProperty(123),
}
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
})
expected := resource.PropertyMap{
"threshold": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"threshold": "123",
},
metadata: metadata,
})
expected := resource.PropertyMap{
"threshold": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
})
t.Run("bool property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"enabled": {
Type: "boolean",
},
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"enabled": true,
},
metadata: metadata,
})
expected := resource.PropertyMap{
"enabled": resource.NewBoolProperty(true),
}
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
})
expected := resource.PropertyMap{
"enabled": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"enabled": 123,
},
metadata: metadata,
})
expected := resource.PropertyMap{
"enabled": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
})
t.Run("union property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"union": {
OneOf: []string{"#/types/azure-native:testing:OptionA", "#/types/azure-native:testing:OptionB"},
},
}
types := map[string]resources.AzureAPIType{
"azure-native:testing:OptionA": {
Properties: map[string]resources.AzureAPIProperty{
"type": {
Const: "AAA",
},
"a": {
Containers: []string{"aa"},
},
},
},
"azure-native:testing:OptionB": {
Properties: map[string]resources.AzureAPIProperty{
"type": {
Const: "BBB",
},
"b": {
Containers: []string{"bb"},
},
},
},
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"union": map[string]interface{}{
"type": "AAA",
"a": "a",
},
},
metadata: metadata,
types: types,
})
expected := resource.PropertyMap{
"union": resource.NewObjectProperty(resource.PropertyMap{
"type": resource.NewStringProperty("AAA"),
"a": resource.NewStringProperty("a"),
}),
}
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
types: types,
})
expected := resource.PropertyMap{
"union": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"union": 123,
},
metadata: metadata,
types: types,
})
expected := resource.PropertyMap{
"union": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
})
t.Run("string array property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"array": {
Type: "array",
Items: &resources.AzureAPIProperty{
Type: "string",
},
},
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"array": []interface{}{
"hello",
"world",
},
},
metadata: metadata,
})
expected := resource.PropertyMap{
"array": resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("hello"),
resource.NewStringProperty("world"),
}),
}
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
})
expected := resource.PropertyMap{
"array": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"array": 123,
},
metadata: metadata,
})
expected := resource.PropertyMap{
"array": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch element", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"array": []interface{}{
"hello",
123,
},
},
metadata: metadata,
})
expected := resource.PropertyMap{
"array": resource.NewArrayProperty([]resource.PropertyValue{
resource.NewStringProperty("hello"),
resource.MakeComputed(resource.NewStringProperty("")),
})}
assert.Equal(t, expected, actual)
})
})
t.Run("string map property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"object": {
Type: "object",
AdditionalProperties: &resources.AzureAPIProperty{
Type: "string",
},
},
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"object": map[string]interface{}{
"key1": "value1",
"key2": "value2",
},
},
metadata: metadata,
})
expected := resource.PropertyMap{
"object": resource.NewObjectProperty(resource.PropertyMap{
"key1": resource.NewStringProperty("value1"),
"key2": resource.NewStringProperty("value2"),
}),
}
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
})
expected := resource.PropertyMap{
"object": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"object": 123,
},
metadata: metadata,
})
expected := resource.PropertyMap{
"object": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch element", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"object": map[string]interface{}{
"key1": "value1",
"key2": 123,
},
},
metadata: metadata,
})
expected := resource.PropertyMap{
"object": resource.NewObjectProperty(resource.PropertyMap{
"key1": resource.NewStringProperty("value1"),
"key2": resource.MakeComputed(resource.NewStringProperty("")),
}),
}
assert.Equal(t, expected, actual)
})
})
t.Run("complex property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"complex": {
Ref: "#/types/azure-native:testing:StructureResponse",
},
}
types := map[string]resources.AzureAPIType{
"azure-native:testing:StructureResponse": {
Properties: map[string]resources.AzureAPIProperty{
"v1": {
Type: "string",
},
},
},
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"complex": map[string]interface{}{
"v1": "value1",
},
},
metadata: metadata,
types: types,
})
expected := resource.PropertyMap{
"complex": resource.NewObjectProperty(resource.PropertyMap{
"v1": resource.NewStringProperty("value1"),
}),
}
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
types: types,
})
expected := resource.PropertyMap{
"complex": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"complex": 123,
},
metadata: metadata,
types: types,
})
expected := resource.PropertyMap{
"complex": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch element", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"complex": map[string]interface{}{
"v1": 123,
},
},
metadata: metadata,
types: types,
})
expected := resource.PropertyMap{
"complex": resource.NewObjectProperty(resource.PropertyMap{
"v1": resource.MakeComputed(resource.NewStringProperty("")),
}),
}
assert.Equal(t, expected, actual)
})
})
t.Run("string set property", func(t *testing.T) {
metadata := map[string]resources.AzureAPIProperty{
"userAssignedIdentities": {
Type: "object",
AdditionalProperties: &resources.AzureAPIProperty{
Type: "string",
},
},
}
t.Run("valid", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"userAssignedIdentities": []interface{}{
"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-name/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity-name",
},
},
metadata: metadata,
})
expected := resource.PropertyMap{
// Whole object just marked as computed
"userAssignedIdentities": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("unknown", func(t *testing.T) {
actual := preview(previewTestCase{
metadata: metadata,
})
expected := resource.PropertyMap{
// Whole object just marked as computed
"userAssignedIdentities": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"userAssignedIdentities": []interface{}{
123,
},
},
metadata: metadata,
})
expected := resource.PropertyMap{
// Whole object just marked as computed
"userAssignedIdentities": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
t.Run("mismatch element", func(t *testing.T) {
actual := preview(previewTestCase{
inputs: map[string]interface{}{
"userAssignedIdentities": []interface{}{
"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-name/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity-name",
123,
},
},
metadata: metadata,
})
expected := resource.PropertyMap{
// Whole object just marked as computed
"userAssignedIdentities": resource.MakeComputed(resource.NewStringProperty("")),
}
assert.Equal(t, expected, actual)
})
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/propGenerators_test.go | provider/pkg/convert/propGenerators_test.go | package convert
import (
"pgregory.net/rapid"
)
func propSimpleValue() *rapid.Generator[interface{}] {
return rapid.OneOf[interface{}](
rapid.String().AsAny(),
rapid.Int().AsAny(),
rapid.Float64().AsAny(),
rapid.Bool().AsAny(),
)
}
// Non-zero length map of simple values
func propNonRecursiveMap() *rapid.Generator[map[string]interface{}] {
gen := rapid.MapOfN(rapid.String(), propSimpleValue(), 1, -1)
return gen
}
// Non-zero length array of simple values
func propNonRecursiveArray() *rapid.Generator[[]interface{}] {
gen := rapid.SliceOfN(propSimpleValue(), 1, -1)
return gen
}
func propComplex() *rapid.Generator[any] {
return rapid.OneOf[interface{}](
propSimpleValue(),
propNonRecursiveMap().AsAny(),
propNonRecursiveArray().AsAny(),
)
}
// Non-zero length array or map of complex values
func propComplexArray() *rapid.Generator[[]interface{}] {
return rapid.SliceOfN(propComplex(), 1, -1)
}
// Non-zero length map of complex values
func propComplexMap() *rapid.Generator[map[string]interface{}] {
return rapid.MapOfN(rapid.String(), propComplex(), 1, -1)
}
// Any complex value, including nested arrays and maps
func propNestedComplex() *rapid.Generator[any] {
return rapid.OneOf[interface{}](
propComplex(),
propComplexArray().AsAny(),
propComplexMap().AsAny(),
)
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/isDefaultResponse.go | provider/pkg/convert/isDefaultResponse.go | package convert
import (
"reflect"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
)
// IsDefaultResponse returns true if the shape of the HTTP response matches the expected shape.
// The following comparison rules apply:
// - response is converted to the SDK shape of inputs (so, the structure is flattened and read-only props are removed)
// - A boolean 'false' in the response is equivalent to a no-value in the expected map
// - Any non-empty map or slice leads to the 'false' result (may need to revise if any API endpoints have default
// non-empty collections, but none are found yet)
// - The response need not contain all properties that are present in the default body.
func (k *SdkShapeConverter) IsDefaultResponse(putParameters []resources.AzureAPIParameter, response map[string]interface{},
defaultBody map[string]interface{}) bool {
for _, param := range putParameters {
if param.Location == body {
for key, value := range k.ResponseBodyToSdkOutputs(param.Body.Properties, response) {
defaultValue, _ := defaultBody[key]
if !isDefaultValue(value, defaultValue) {
return false
}
}
}
}
return true
}
func isDefaultValue(value any, defaultValue any) bool {
switch reflect.TypeOf(value).Kind() {
case reflect.Slice, reflect.Array:
collection := reflect.ValueOf(value)
// TODO: Add support for non-empty collections in the case that provider/pkg/openapi/defaults/defaultResourcesState has non-empty arrays added.
if collection.Len() > 0 {
return false
}
case reflect.Map:
iter := reflect.ValueOf(value).MapRange()
for iter.Next() {
mk := iter.Key().String()
mv := iter.Value().Interface()
defaultMap, ok := defaultValue.(map[string]interface{})
if !ok {
return false
}
defaultValue, has := defaultMap[mk]
if !has || !isDefaultValue(mv, defaultValue) {
return false
}
}
case reflect.Bool:
b := value.(bool)
if b && defaultValue != value {
return false
}
default:
// `*` default body means that we want to accept any value there.
// It's used for values that are determined dynamically by Azure API.
if defaultValue != value && defaultValue != "*" {
return false
}
}
return true
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/sdkOutputsToSdkInputs.go | provider/pkg/convert/sdkOutputsToSdkInputs.go | package convert
import (
"reflect"
"sort"
"strings"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
)
// SdkOutputsToSdkInputs converts resource outputs (not a response body, but already valid outputs) to corresponding
// resource inputs, excluding the read-only properties from the map.
func (k *SdkShapeConverter) SdkOutputsToSdkInputs(parameters []resources.AzureAPIParameter, outputs map[string]interface{}) map[string]interface{} {
for _, param := range parameters {
if param.Location == body {
return k.sdkOutputsToSDKInputs(param.Body.Properties, outputs)
}
}
return nil
}
func (k *SdkShapeConverter) sdkOutputsToSDKInputs(props map[string]resources.AzureAPIProperty, outputs map[string]interface{}) map[string]interface{} {
result := map[string]interface{}{}
for name, prop := range props {
sdkName := name
if prop.SdkName != "" {
sdkName = prop.SdkName
}
if value, ok := outputs[sdkName]; ok {
if prop.Const != nil && value != prop.Const {
return nil
}
p := prop // https://go.dev/wiki/CommonMistakes#using-reference-to-loop-iterator-variable
result[sdkName] = k.convertOutputToInputPropValue(&p, value)
}
}
return result
}
// convertOutputToInputPropValue converts an output value back to an input value.
func (k *SdkShapeConverter) convertOutputToInputPropValue(prop *resources.AzureAPIProperty, value interface{}) interface{} {
return k.convertTypedSdkOutputObjectsToSdkInput(prop, value, func(typeName string, props map[string]resources.AzureAPIProperty, values map[string]interface{}) map[string]interface{} {
return k.sdkOutputsToSDKInputs(props, values)
})
}
// convertTypedSdkOutputObjectsToSdkInput recursively finds map types with a known type and calls convertMap on them.
func (k *SdkShapeConverter) convertTypedSdkOutputObjectsToSdkInput(prop *resources.AzureAPIProperty, value interface{}, convertObject convertTypedObject) interface{} {
// This line is unreachable in the current implementation, but good to have for safety to prevent Kind() of a nil type from causing a nil pointer dereference.
if value == nil {
return nil
}
switch reflect.TypeOf(value).Kind() {
case reflect.Map:
// For union types, iterate through types and find the first one that matches the shape.
for _, t := range prop.OneOf {
typeName := strings.TrimPrefix(t, "#/types/")
typ, ok, err := k.GetType(typeName)
if !ok || err != nil {
continue
}
request := convertObject(typeName, typ.Properties, value.(map[string]interface{}))
if request != nil {
return request
}
}
valueMap, ok := value.(map[string]interface{})
if !ok {
return value
}
if strings.HasPrefix(prop.Ref, "#/types/") {
typeName := strings.TrimPrefix(prop.Ref, "#/types/")
typ, ok, err := k.GetType(typeName)
if !ok || err != nil {
return value
}
return convertObject(typeName, typ.Properties, valueMap)
}
if prop.AdditionalProperties != nil {
result := map[string]interface{}{}
for key, item := range valueMap {
result[key] = k.convertTypedSdkOutputObjectsToSdkInput(prop.AdditionalProperties, item, convertObject)
}
return result
}
if prop.IsStringSet {
result := make([]interface{}, 0)
for key := range valueMap {
result = append(result, key)
}
sort.SliceStable(result, func(i, j int) bool {
return result[i].(string) < result[j].(string)
})
return result
}
return value
case reflect.Slice, reflect.Array:
if prop.Items == nil {
return value
}
result := make([]interface{}, 0)
s := reflect.ValueOf(value)
for i := 0; i < s.Len(); i++ {
result = append(result, k.convertTypedSdkOutputObjectsToSdkInput(prop.Items, s.Index(i).Interface(), convertObject))
}
return result
}
return value
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/sdkOutputsToSDKInputs_test.go | provider/pkg/convert/sdkOutputsToSDKInputs_test.go | // Copyright 2016-2020, Pulumi Corporation.
package convert
import (
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
func TestSdkOutputsToSdkInputs(t *testing.T) {
t.Run("nil outputs returns nil", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
outputs: nil,
})
var expected map[string]interface{} = nil
assert.Equal(t, expected, actual)
})
t.Run("untyped non-empty values remain unchanged", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"untyped": {},
},
outputs: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"untyped": value,
}
assert.Equal(t, expected, actual)
}))
t.Run("any type values", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"untyped": {
Ref: TypeAny,
},
},
outputs: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"untyped": value,
}
assert.Equal(t, expected, actual)
}))
t.Run("renamed", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"x-threshold": {
SdkName: "threshold",
},
},
outputs: map[string]interface{}{
"threshold": 123,
},
})
var expected = map[string]interface{}{
"threshold": 123,
}
assert.Equal(t, expected, actual)
})
t.Run("mismatched const returns nil", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"const": {
Const: "value",
},
},
outputs: map[string]interface{}{
"const": "other",
},
})
var expected map[string]interface{} = nil
assert.Equal(t, expected, actual)
})
t.Run("array of empties not changed", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"emptyArray": {
Type: "array",
},
},
outputs: map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
},
})
var expected = map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
}
assert.Equal(t, expected, actual)
})
t.Run("map of empties unchanged", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"emptyDict": {
Type: "object",
},
},
outputs: map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
},
})
var expected = map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
}
assert.Equal(t, expected, actual)
})
t.Run("typed array doesn't change items", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"typedArray": {
Type: "array",
Items: &resources.AzureAPIProperty{
Type: "string",
},
},
},
outputs: map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
},
})
var expected = map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
}
assert.Equal(t, expected, actual)
})
t.Run("typed map doesn't change items", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"typedMap": {
Type: "object",
AdditionalProperties: &resources.AzureAPIProperty{
Type: "string",
},
},
},
outputs: map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
},
})
var expected = map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
}
assert.Equal(t, expected, actual)
})
t.Run("string set mapped back to string list", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"userAssignedIdentities": {
Type: "object",
IsStringSet: true,
},
},
outputs: map[string]interface{}{
"userAssignedIdentities": map[string]interface{}{
"a": "b",
"c": map[string]interface{}{
"d": "e",
},
},
},
})
var expected = map[string]interface{}{
"userAssignedIdentities": []interface{}{"a", "c"},
}
assert.Equal(t, expected, actual)
})
t.Run("missing ref type continues with no change", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"p": {
Ref: "#/types/azure-native:testing:Type1",
},
},
outputs: map[string]interface{}{
"p": map[string]interface{}{
"k": "v",
},
},
})
expected := map[string]interface{}{
"p": map[string]interface{}{
"k": "v",
},
}
assert.Equal(t, expected, actual)
})
t.Run("missing oneOf type continues with no change", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"oneOf": {
OneOf: []string{"#types/azure-native:testing:Type1", "#types/azure-native:testing:Type2"},
},
},
outputs: map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "value",
},
},
})
expected := map[string]interface{}{
"oneOf": map[string]interface{}{
"prop1": "value",
},
}
assert.Equal(t, expected, actual)
})
}
func TestSdkOutputsToSdkInputsNestedTypes(t *testing.T) {
// Use the same body shape for all sub-tests
// but change the definition of SubType in each test.
bodyParams := map[string]resources.AzureAPIProperty{
"nested": {
Ref: "#/types/azure-native:testing:SubType",
},
}
t.Run("untyped simple value", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"value": {},
},
},
bodyParameters: bodyParams,
outputs: map[string]interface{}{
"nested": map[string]interface{}{
"value": value,
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"value": value,
},
}
assert.Equal(t, expected, actual)
}))
t.Run("empty object unchanged", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"name": {},
},
},
bodyParameters: bodyParams,
outputs: map[string]interface{}{
"nested": map[string]interface{}{},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{},
}
assert.Equal(t, expected, actual)
})
t.Run("sub-id not ignored", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"id": {},
},
},
bodyParameters: bodyParams,
outputs: map[string]interface{}{
"nested": map[string]interface{}{
"id": "id-value",
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"id": "id-value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("renamed", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"x-renamed": {
SdkName: "renamed",
},
},
},
bodyParameters: bodyParams,
outputs: map[string]interface{}{
"nested": map[string]interface{}{
"renamed": "value",
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"renamed": "value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("mismatched const ignored", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"const": {
Const: "value",
},
},
},
bodyParameters: bodyParams,
outputs: map[string]interface{}{
"nested": map[string]interface{}{
"const": "other",
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}(nil),
}
assert.Equal(t, expected, actual)
})
t.Run("string set mapped back to string list", func(t *testing.T) {
actual := testSdkOutputsToSDKInputs(sdkOutputsToSDKInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"userAssignedIdentities": {
Type: "object",
IsStringSet: true,
},
},
},
bodyParameters: bodyParams,
outputs: map[string]interface{}{
"nested": map[string]interface{}{
"userAssignedIdentities": map[string]interface{}{
"a": "b",
"c": map[string]interface{}{
"d": "e",
},
},
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"userAssignedIdentities": []interface{}{"a", "c"},
},
}
assert.Equal(t, expected, actual)
})
}
type sdkOutputsToSDKInputsTestCase struct {
outputs map[string]interface{}
bodyParameters map[string]resources.AzureAPIProperty
extraParameters []resources.AzureAPIParameter
types map[string]map[string]resources.AzureAPIProperty
}
func testSdkOutputsToSDKInputs(testCase sdkOutputsToSDKInputsTestCase) map[string]interface{} {
types := map[string]resources.AzureAPIType{}
if testCase.types != nil {
for typeName, typeProperties := range testCase.types {
types[typeName] = resources.AzureAPIType{
Properties: typeProperties,
}
}
}
c := NewSdkShapeConverterFull(types)
parameters := testCase.extraParameters
if testCase.bodyParameters != nil {
parameters = append(parameters, resources.AzureAPIParameter{
Location: body,
Body: &resources.AzureAPIType{
Properties: testCase.bodyParameters,
},
})
}
return c.SdkOutputsToSdkInputs(parameters, testCase.outputs)
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/sdkInputsToRequestBody.go | provider/pkg/convert/sdkInputsToRequestBody.go | // Copyright 2016-2020, Pulumi Corporation.
package convert
import (
"fmt"
"reflect"
"sort"
"strings"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
)
// SdkInputsToRequestBody converts a map of SDK properties to JSON request body to be sent to an HTTP API.
// Returns a map of request body properties and a map of unmapped values.
func (k *SdkShapeConverter) SdkInputsToRequestBody(props map[string]resources.AzureAPIProperty,
values map[string]interface{}, id string) (map[string]interface{}, error) {
result := map[string]interface{}{}
unusedValues := map[string]interface{}{}
for name, value := range values {
if _, ok := props[name]; !ok {
unusedValues[name] = value
}
}
for name, prop := range props {
p := prop // https://go.dev/wiki/CommonMistakes#using-reference-to-loop-iterator-variable
sdkName := name
if prop.SdkName != "" {
sdkName = prop.SdkName
}
if value, has := values[sdkName]; has {
delete(unusedValues, sdkName)
if prop.Const != nil && value != prop.Const {
return nil, fmt.Errorf("property %s is a constant of value %q and cannot be modified to be %q", name, prop.Const, value)
}
container := k.buildContainer(result, prop.Containers)
container[name] = k.convertSdkPropToRequestBodyPropValue(id, &p, value)
}
}
if len(unusedValues) > 0 {
unusedKeys := make([]string, 0, len(unusedValues))
for k := range unusedValues {
unusedKeys = append(unusedKeys, k)
}
sort.Strings(unusedKeys)
logging.V(9).Infof("Unrecognized properties in SdkInputsToRequestBody: %v", unusedKeys)
}
return result, nil
}
// convertSdkPropToRequestBodyPropValue converts an SDK property to a value to be used in a request body.
func (k *SdkShapeConverter) convertSdkPropToRequestBodyPropValue(id string, prop *resources.AzureAPIProperty, value interface{}) interface{} {
return k.convertTypedSdkInputObjectsToRequestBody(prop, value, func(typeName string, props map[string]resources.AzureAPIProperty, values map[string]interface{}) map[string]interface{} {
// Detect if we are dealing with a special case of a SubResource type with an ID property.
// These properties reference a sub-ID of the currently modified resource (e.g.
// an ID of a backend pool in a load balancer while creating the load balancer).
// In that case, we allow users to specify a relative ID ($self/backendPool/abc) instead of
// specifying the full Azure resource ID explicitly.
// The block below takes care of resolving those relative IDs to absolute IDs.
if _, _, resourceName, err := resources.ParseToken(typeName); err == nil && resourceName == "SubResource" {
if relId, ok := values["id"].(string); ok && strings.HasPrefix(relId, "$self/") {
values["id"] = strings.Replace(relId, "$self", id, 1)
}
}
// Otherwise, delegate to the normal map conversion flow.
converted, _ := k.SdkInputsToRequestBody(props, values, id)
// We ignore the error here as we haven't previously handled these errors recursively through convertTypedSdkInputObjectsToRequestBody.
// A difficulty is that the error is treated as a warning if we got a valid result, so should only be thrown if `converted` is nil.
return converted
})
}
// buildContainer creates a nested container for each item in 'path' and returns that inner-most container.
// For instance, a 'path' of ["top", "bottom"] would return a map, which is assigned to a key "bottom" in another
// map, which is assigned to a key "top" in the 'parent' map.
func (k *SdkShapeConverter) buildContainer(parent map[string]interface{}, path []string) map[string]interface{} {
for _, containerName := range path {
container := map[string]interface{}{}
if v, ok := parent[containerName]; ok {
if v, ok := v.(map[string]interface{}); ok {
container = v
}
}
parent[containerName] = container
parent = container
}
return parent
}
// convertTypedSdkInputObjectsToRequestBody recursively finds map types with a known type and calls convertMap on them.
func (k *SdkShapeConverter) convertTypedSdkInputObjectsToRequestBody(prop *resources.AzureAPIProperty, value interface{}, convertObject convertTypedObject) interface{} {
if value == nil {
return nil
}
switch reflect.TypeOf(value).Kind() {
case reflect.Map:
// For union types, iterate through types and find the first one that matches the shape.
for _, t := range prop.OneOf {
typeName := strings.TrimPrefix(t, "#/types/")
typ, ok, err := k.GetType(typeName)
if !ok || err != nil {
continue
}
request := convertObject(typeName, typ.Properties, value.(map[string]interface{}))
if request != nil {
return request
}
}
valueMap, ok := value.(map[string]interface{})
if !ok {
return value
}
if strings.HasPrefix(prop.Ref, "#/types/") {
typeName := strings.TrimPrefix(prop.Ref, "#/types/")
typ, ok, err := k.GetType(typeName)
if !ok || err != nil {
return value
}
return convertObject(typeName, typ.Properties, valueMap)
}
if prop.AdditionalProperties != nil {
result := map[string]interface{}{}
for key, item := range valueMap {
result[key] = k.convertTypedSdkInputObjectsToRequestBody(prop.AdditionalProperties, item, convertObject)
}
return result
}
return value
case reflect.Slice, reflect.Array:
if prop.IsStringSet {
emptyValue := struct{}{}
setResult := map[string]interface{}{}
for _, setItem := range value.([]interface{}) {
if reflect.TypeOf(setItem).Kind() != reflect.String {
// This should have been handled by validation
continue
}
setResult[setItem.(string)] = emptyValue
}
return setResult
}
if prop.Items == nil {
return value
}
result := make([]interface{}, 0)
s := reflect.ValueOf(value)
for i := 0; i < s.Len(); i++ {
result = append(result, k.convertTypedSdkInputObjectsToRequestBody(prop.Items, s.Index(i).Interface(), convertObject))
}
return result
}
return value
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/convertTypedObjects.go | provider/pkg/convert/convertTypedObjects.go | // Copyright 2016-2020, Pulumi Corporation.
package convert
import (
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
)
// convertTypedObject is a callback that performs some kind of arbitrary conversion of an object for which we know its property types.
// Returning nil indicates that the given value is not of the expected type.
type convertTypedObject func(typeName string, props map[string]resources.AzureAPIProperty, values map[string]interface{}) map[string]interface{}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/responseToSdkInputs_test.go | provider/pkg/convert/responseToSdkInputs_test.go | // Copyright 2016-2020, Pulumi Corporation.
package convert
import (
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
func TestResponseToSdkInputsPathParams(t *testing.T) {
t.Run("simple", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
pathParameters: []resources.AzureAPIParameter{
pathParam("resourceGroupName"),
},
pathValues: map[string]string{
"resourceGroupName": "rg-name",
},
})
// SubscriptionId is not included in the inputs
var expected = map[string]interface{}{
"resourceGroupName": "rg-name",
}
assert.Equal(t, expected, actual)
})
t.Run("subscription is removed", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
pathParameters: []resources.AzureAPIParameter{
pathParam("subscriptionId"),
},
pathValues: map[string]string{
"subscriptionID": "0282681f-7a9e-123b-40b2-96babd57a8a1",
},
})
var expected = map[string]interface{}{}
assert.Equal(t, expected, actual)
})
t.Run("renamed", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
pathParameters: []resources.AzureAPIParameter{
{
Name: "x-threshold",
Location: "path",
Value: &resources.AzureAPIProperty{
SdkName: "threshold",
},
},
},
pathValues: map[string]string{
"x-threshold": "123",
},
})
var expected = map[string]interface{}{
"threshold": "123",
}
assert.Equal(t, expected, actual)
})
}
func TestResponseToSdkInputsBodyProps(t *testing.T) {
t.Run("untyped non-empty values remain unchanged", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"untyped": {},
},
body: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"untyped": value,
}
assert.Equal(t, expected, actual)
}))
t.Run("any type values", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"untyped": {
Ref: TypeAny,
},
},
body: map[string]interface{}{
"untyped": value,
},
})
var expected = map[string]interface{}{
"untyped": value,
}
assert.Equal(t, expected, actual)
}))
t.Run("id-ignored", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"id": {},
},
body: map[string]interface{}{
"id": "123",
},
})
// Top-level SubscriptionId is not included in the inputs
var expected = map[string]interface{}{}
assert.Equal(t, expected, actual)
})
t.Run("renamed", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"x-threshold": {
SdkName: "threshold",
},
},
body: map[string]interface{}{
"x-threshold": 123,
},
})
var expected = map[string]interface{}{
"threshold": 123,
}
assert.Equal(t, expected, actual)
})
t.Run("containers", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"prop": {
Containers: []string{"container"},
},
},
body: map[string]interface{}{
"container": map[string]interface{}{
"prop": "value",
},
},
})
var expected = map[string]interface{}{
"prop": "value",
}
assert.Equal(t, expected, actual)
})
t.Run("nested containers", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"prop": {
Containers: []string{"a", "b", "c"},
},
},
body: map[string]interface{}{
"a": map[string]interface{}{
"b": map[string]interface{}{
"c": map[string]interface{}{
"prop": "value",
},
},
},
},
})
var expected = map[string]interface{}{
"prop": "value",
}
assert.Equal(t, expected, actual)
})
t.Run("mismatched const ignored", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"const": {
Const: "value",
},
},
body: map[string]interface{}{
"const": "other",
},
})
var expected = map[string]interface{}{}
assert.Equal(t, expected, actual)
})
t.Run("array of empties replaced with nil", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"emptyArray": {
Type: "array",
},
},
body: map[string]interface{}{
"emptyArray": []interface{}{nil, []interface{}{}, map[string]interface{}{}},
},
})
var expected = map[string]interface{}{
"emptyArray": nil,
}
assert.Equal(t, expected, actual)
})
t.Run("map of empties replaced with nil", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"emptyDict": {
Type: "object",
},
},
body: map[string]interface{}{
"emptyDict": map[string]interface{}{"a": nil, "b": map[string]interface{}{}, "c": []interface{}{}},
},
})
var expected = map[string]interface{}{
"emptyDict": nil,
}
assert.Equal(t, expected, actual)
})
t.Run("typed array doesn't change items", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"typedArray": {
Type: "array",
Items: &resources.AzureAPIProperty{
Type: "string",
},
},
},
body: map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
},
})
var expected = map[string]interface{}{
"typedArray": []interface{}{"a", "b", 3},
}
assert.Equal(t, expected, actual)
})
t.Run("typed map doesn't change items", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"typedMap": {
Type: "object",
AdditionalProperties: &resources.AzureAPIProperty{
Type: "string",
},
},
},
body: map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
},
})
var expected = map[string]interface{}{
"typedMap": map[string]interface{}{"a": "b", "c": 3},
}
assert.Equal(t, expected, actual)
})
t.Run("string set", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
bodyParameters: map[string]resources.AzureAPIProperty{
"userAssignedIdentities": {
Type: "object",
IsStringSet: true,
},
},
body: map[string]interface{}{
"userAssignedIdentities": map[string]interface{}{
"a": "b",
"c": map[string]interface{}{
"d": "e",
},
},
},
})
var expected = map[string]interface{}{
"userAssignedIdentities": []interface{}{"a", "c"},
}
assert.Equal(t, expected, actual)
})
}
func TestResponseToSdkInputsNestedTypes(t *testing.T) {
bodyParams := map[string]resources.AzureAPIProperty{
"nested": {
Ref: "#/types/azure-native:testing:SubType",
},
}
t.Run("untyped simple value", rapid.MakeCheck(func(t *rapid.T) {
value := propNestedComplex().Draw(t, "value")
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"value": {},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"value": value,
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"value": value,
},
}
assert.Equal(t, expected, actual)
}))
t.Run("empty object replaced with nil", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"name": {},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{},
},
})
var expected = map[string]interface{}{
"nested": nil,
}
assert.Equal(t, expected, actual)
})
t.Run("sub-id not ignored", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"id": {},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"id": "id-value",
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"id": "id-value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("renamed", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"x-renamed": {
SdkName: "renamed",
},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"x-renamed": "value",
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"renamed": "value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("containered", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"containered": {
Containers: []string{"props"},
},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"props": map[string]interface{}{
"containered": true,
},
},
},
})
var expected = map[string]interface{}{
"nested": map[string]interface{}{
"containered": true,
},
}
assert.Equal(t, expected, actual)
})
t.Run("mismatched const ignored", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:SubType": {
"const": {
Const: "value",
},
},
},
bodyParameters: bodyParams,
body: map[string]interface{}{
"nested": map[string]interface{}{
"const": "other",
},
},
})
var expected = map[string]interface{}{
"nested": nil,
}
assert.Equal(t, expected, actual)
})
}
func TestResponseToSdkInputsUnionTypes(t *testing.T) {
types := map[string]map[string]resources.AzureAPIProperty{
"azure-native:testing:OptionA": {
"type": {
Const: "AAA",
},
"a": {
Containers: []string{"aa"},
},
},
"azure-native:testing:OptionB": {
"type": {
Const: "BBB",
},
"b": {
Containers: []string{"bb"},
},
},
}
bodyParams := map[string]resources.AzureAPIProperty{
"union": {
OneOf: []string{"#/types/azure-native:testing:OptionA", "#/types/azure-native:testing:OptionB"},
},
}
t.Run("neither", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: types,
bodyParameters: bodyParams,
body: map[string]interface{}{},
})
expected := map[string]interface{}{}
assert.Equal(t, expected, actual)
})
t.Run("option a", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: types,
bodyParameters: bodyParams,
body: map[string]interface{}{
"union": map[string]interface{}{
"type": "AAA",
"aa": map[string]interface{}{
"a": "value",
},
},
},
})
expected := map[string]interface{}{
"union": map[string]interface{}{
"type": "AAA",
"a": "value",
},
}
assert.Equal(t, expected, actual)
})
t.Run("option b", func(t *testing.T) {
actual := testResponseToSdkInputs(responseToSdkInputsTestCase{
types: types,
bodyParameters: bodyParams,
body: map[string]interface{}{
"union": map[string]interface{}{
"type": "BBB",
"bb": map[string]interface{}{
"b": "value",
},
},
},
})
expected := map[string]interface{}{
"union": map[string]interface{}{
"type": "BBB",
"b": "value",
},
}
assert.Equal(t, expected, actual)
})
}
type responseToSdkInputsTestCase struct {
body map[string]interface{}
bodyParameters map[string]resources.AzureAPIProperty
pathParameters []resources.AzureAPIParameter
pathValues map[string]string
types map[string]map[string]resources.AzureAPIProperty
}
func testResponseToSdkInputs(testCase responseToSdkInputsTestCase) map[string]interface{} {
types := map[string]resources.AzureAPIType{}
if testCase.types != nil {
for typeName, typeProperties := range testCase.types {
types[typeName] = resources.AzureAPIType{
Properties: typeProperties,
}
}
}
if testCase.pathValues == nil {
testCase.pathValues = map[string]string{}
}
c := NewSdkShapeConverterFull(types)
if testCase.bodyParameters == nil {
testCase.bodyParameters = map[string]resources.AzureAPIProperty{}
}
parameters := []resources.AzureAPIParameter{
bodyParam(testCase.bodyParameters),
}
parameters = append(parameters, testCase.pathParameters...)
return c.ResponseToSdkInputs(parameters, testCase.pathValues, testCase.body)
}
func pathParam(name string) resources.AzureAPIParameter {
return resources.AzureAPIParameter{
Location: "path",
Name: name,
}
}
func bodyParam(properties map[string]resources.AzureAPIProperty) resources.AzureAPIParameter {
return resources.AzureAPIParameter{
Location: "body",
Body: &resources.AzureAPIType{
Properties: properties,
},
}
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/convert/sdkShapeConverter.go | provider/pkg/convert/sdkShapeConverter.go | // Copyright 2016-2020, Pulumi Corporation.
package convert
import (
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
)
const body = "body"
// SdkShapeConverter providers functions to convert between HTTP request/response shapes and
// Pulumi SDK shapes (with flattening, renaming, etc.).
type SdkShapeConverter struct {
Types resources.MapLike[resources.AzureAPIType]
}
func NewSdkShapeConverterPartial(ptypes resources.MapLike[resources.AzureAPIType]) SdkShapeConverter {
return SdkShapeConverter{
Types: ptypes,
}
}
func NewSdkShapeConverterFull(types map[string]resources.AzureAPIType) SdkShapeConverter {
return SdkShapeConverter{
Types: resources.GoMap[resources.AzureAPIType](types),
}
}
func (k *SdkShapeConverter) GetType(name string) (resources.AzureAPIType, bool, error) {
if k.Types == nil {
return resources.AzureAPIType{}, false, nil
}
return k.Types.Get(name)
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/additionalInvokes.go | provider/pkg/openapi/additionalInvokes.go | package openapi
import (
"strings"
"github.com/go-openapi/spec"
)
// We only create getFoo or listFoo functions in combination with a Foo resource. We don't turn all
// GET endpoints to functions. The assumption is that most of those aren't very useful. If we do want
// to include specific ones, we add them here. See, e.g., #2419.
func shouldIncludeInvoke(path string, op *spec.Operation) bool {
return op != nil &&
!op.Deprecated &&
// #2419
strings.HasSuffix(path, "/providers/Microsoft.Insights/diagnosticSettingsCategories") &&
op.OperationProps.ID == "DiagnosticSettingsCategory_List"
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/versioner_test.go | provider/pkg/openapi/versioner_test.go | // Copyright 2016-2020, Pulumi Corporation.
package openapi
import (
"testing"
"github.com/go-openapi/spec"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/collections"
"github.com/stretchr/testify/assert"
)
// Test cases:
// - Res 1 defined across all API versions
// - Res 2 defined in two API versions
// - Res 3 has been renamed but its path is the same, so we consider it the same resource
// (e.g., happened in Web module for ServerFarm -> AppServicePlan)
// - Res 4 is named consistently, but the path has changed over time
// (e.g., happened with several resources in ApiManagement)
var versionMap = map[ApiVersion]VersionResources{
"2020-01-01": {
Resources: map[string]*ResourceSpec{
"Res1": makeResource("/someprefix/Microsoft.Foo/res1/{res1Name}", "Res 1 v1"),
"Res2": makeResource("/someprefix/Microsoft.Foo/res2/{res2Name}", "Res 2 v1"),
"Res3": makeResource("/someprefix/Microsoft.Foo/res3/{res3Name}", "Res 3 v1"),
},
},
"2020-02-01": {
Resources: map[string]*ResourceSpec{
"Res1": makeResource("/someprefix/Microsoft.Foo/res1/{res1Name}", "Res 1 v2"),
"Res2": makeResource("/someprefix/Microsoft.Foo/res2/{res2Name}", "Res 2 v2"),
"Res3Renamed": makeResource("/someprefix/Microsoft.Foo/res3/{res3Name}", "Res 3 v2"),
"Res4": makeResource("/someprefix/Microsoft.Foo/res4/{res4Name}", "Res 4 v1"),
},
},
"2020-03-01": {
Resources: map[string]*ResourceSpec{
"Res1": makeResource("/someprefix/Microsoft.Foo/res1/{res1Name}", "Res 1 v3"),
"Res3Renamed": makeResource("/someprefix/Microsoft.Foo/res3/{res3Name}", "Res 3 v3"),
"Res4": makeResource("/someprefix/Microsoft.Foo/Res-4/{res4AnotherName}", "Res 4 v2"),
},
},
// The next version is "unknown" yet.
"2020-04-01": {
Resources: map[string]*ResourceSpec{
"Res1": makeResource("/someprefix/Microsoft.Foo/res1/{res1Name}", "Res 1 v4"),
"Res4": makeResource("/someprefix/Microsoft.Foo/Res-4/{res4AnotherName}", "Res 4 v3"),
},
},
}
func TestFindingPathVersions(t *testing.T) {
expected := map[string]*collections.OrderableSet[ApiVersion]{
"/someprefix/microsoft.foo/res1/{}": collections.NewOrderableSet[ApiVersion]("2020-01-01", "2020-02-01", "2020-03-01", "2020-04-01"),
"/someprefix/microsoft.foo/res2/{}": collections.NewOrderableSet[ApiVersion]("2020-01-01", "2020-02-01"),
"/someprefix/microsoft.foo/res3/{}": collections.NewOrderableSet[ApiVersion]("2020-01-01", "2020-02-01", "2020-03-01"),
"/someprefix/microsoft.foo/res4/{}": collections.NewOrderableSet[ApiVersion]("2020-02-01", "2020-03-01", "2020-04-01"),
}
actual := calculatePathVersions(versionMap)
assert.Equal(t, expected, actual)
}
func TestSqueezeSimple(t *testing.T) {
modules := AzureModules{
"module": {
"version1": {
Resources: map[ResourceName]*ResourceSpec{
"resourceA": {
Path: "/someprefix/Microsoft.Foo/res1/{res1Name}",
},
"resourceB": {
Path: "/someprefix/Microsoft.Foo/res2/{res2Name}",
},
},
Invokes: map[InvokeName]*ResourceSpec{
"invokeA": {
// Sits under resourceA
Path: "/someprefix/Microsoft.Foo/res1/{res2Name}/invokeA",
},
"invokeB": {
// Sits under resourceB
Path: "/someprefix/Microsoft.Foo/res2/{res2Name}/invokeB",
},
},
},
"version2": {
Resources: map[ResourceName]*ResourceSpec{
"resourceA": {
Path: "/someprefix/Microsoft.Foo/res1/{res1Name}",
},
"resourceB": {
Path: "/someprefix/Microsoft.Foo/res2/{res2Name}",
},
},
Invokes: map[InvokeName]*ResourceSpec{},
},
},
}
squeeze := RemovableResources{
"azure-native:module/version1:resourceA": "azure-native:module/version2:resourceA",
}
filteredSpec := RemoveResources(modules, squeeze)
expected := AzureModules{
"module": {
"version1": {
Resources: map[ResourceName]*ResourceSpec{
"resourceB": {
Path: "/someprefix/Microsoft.Foo/res2/{res2Name}",
},
},
Invokes: map[InvokeName]*ResourceSpec{
"invokeB": {
Path: "/someprefix/Microsoft.Foo/res2/{res2Name}/invokeB",
},
},
},
"version2": {
Resources: map[ResourceName]*ResourceSpec{
"resourceA": {
Path: "/someprefix/Microsoft.Foo/res1/{res1Name}",
},
"resourceB": {
Path: "/someprefix/Microsoft.Foo/res2/{res2Name}",
},
},
Invokes: map[InvokeName]*ResourceSpec{},
},
},
}
assert.Equal(t, expected, filteredSpec)
}
func makeResource(path, description string) *ResourceSpec {
return &ResourceSpec{
Path: path,
PathItem: &spec.PathItem{
PathItemProps: spec.PathItemProps{
Put: &spec.Operation{
OperationProps: spec.OperationProps{
Description: description,
},
},
},
},
}
}
func TestSdkToApiVersion(t *testing.T) {
testConvert := func(t *testing.T, input SdkVersion, expected ApiVersion) {
t.Helper()
actual, err := SdkToApiVersion(input)
assert.Nil(t, err)
assert.Equal(t, expected, actual)
}
t.Run("stable", func(t *testing.T) {
testConvert(t, "v20200101", "2020-01-01")
})
t.Run("preview", func(t *testing.T) {
testConvert(t, "v20200101preview", "2020-01-01-preview")
})
t.Run("privatepreview", func(t *testing.T) {
testConvert(t, "v20200101privatepreview", "2020-01-01-privatepreview")
})
t.Run("beta", func(t *testing.T) {
testConvert(t, "v20200101beta", "2020-01-01-beta")
})
t.Run("missing leading v", func(t *testing.T) {
_, err := SdkToApiVersion("20200101privatepreview")
assert.Error(t, err)
})
t.Run("date too short", func(t *testing.T) {
_, err := SdkToApiVersion("v2020011")
assert.Error(t, err)
})
t.Run("unknown suffix", func(t *testing.T) {
_, err := SdkToApiVersion("v20200101foo")
assert.Error(t, err)
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/resolver.go | provider/pkg/openapi/resolver.go | // Copyright 2016-2020, Pulumi Corporation.
package openapi
import (
"fmt"
"net/url"
"reflect"
"regexp"
"github.com/go-openapi/jsonreference"
"github.com/go-openapi/spec"
"github.com/go-openapi/swag"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/util"
)
// ReferenceContext contains a pointer to a swagger schema and can navigate references from that schema.
// A swagger specification may consist of multiple files with definitions pointing between those files. In order to
// resolve those definitions, we need to keep track of the context where a given reference was defined.
type ReferenceContext struct {
ReferenceName string
swagger *spec.Swagger
url *url.URL
}
// Spec is a swagger specification with reference context.
type Spec struct {
*ReferenceContext
spec.Swagger
}
// NewSpec load swagger specification from a given location.
func NewSpec(swaggerLocation string) (*Spec, error) {
base, err := url.Parse(swaggerLocation)
if err != nil {
return nil, err
}
swagger, err := loadSwaggerSpec(swaggerLocation)
if err != nil {
return nil, err
}
ctx := &ReferenceContext{swagger: swagger, url: base}
return &Spec{ctx, *swagger}, nil
}
// Parameter contains a fully resolved swagger parameter and can navigate references from its schema source.
type Parameter struct {
*ReferenceContext
*spec.Parameter
}
// Response contains a fully resolved swagger response and can navigate references from its schema source.
type Response struct {
*ReferenceContext
*spec.Response
}
// Response contains a fully resolved swagger response and can navigate references from its schema source.
type Schema struct {
*ReferenceContext
*spec.Schema
}
// ResolveParameter resolves a given swagger parameter. If needed, it navigates to the source of the parameter reference
// and returns the referenced parameter and its context.
func (ctx *ReferenceContext) ResolveParameter(param spec.Parameter) (*Parameter, error) {
ptr, ok, err := ctx.resolveReference(param.Ref)
if err != nil {
return nil, err
}
if !ok {
return &Parameter{ctx, ¶m}, nil
}
parameter := ptr.value.(spec.Parameter)
return &Parameter{ptr.ReferenceContext, ¶meter}, nil
}
// ResolveResponse resolves a given swagger response. If needed, it navigates to the source of the response reference
// and returns the referenced response and its context.
func (ctx *ReferenceContext) ResolveResponse(resp spec.Response) (*Response, error) {
ptr, ok, err := ctx.resolveReference(resp.Ref)
if err != nil {
return nil, err
}
if !ok {
return &Response{ctx, &resp}, nil
}
response := ptr.value.(spec.Response)
return &Response{ptr.ReferenceContext, &response}, nil
}
// ResolveSchema resolves a given swagger schema. If needed, it navigates to the source of the schema reference
// and returns the referenced schema and its context.
func (ctx *ReferenceContext) ResolveSchema(s *spec.Schema) (*Schema, error) {
ptr, ok, err := ctx.resolveReference(s.Ref)
if err != nil {
return nil, err
}
if !ok {
return &Schema{ctx, s}, nil
}
resolvedSchema := ptr.value.(spec.Schema)
// JSON Reference spec demands that all attributes sibling to a $ref are ignored:
// > Any members other than "$ref" in a JSON Reference object SHALL be ignored.
// https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03#section-3
// ---------
// Open API Spec v3 demands the same:
// > Any sibling elements of a $ref are ignored. This is because $ref works by replacing itself and
// > everything on its level with the definition it is pointing at.
// https://swagger.io/docs/specification/using-ref/#sibling
// ---------
// However, Open API Spec v2 doesn't seem to define this rule. Azure API specs use a lot
// of overrides with sibling attributes, so we should take those into account.
// This group contains the attributes that we know are commonly overridden:
// Default, ReadOnly, Required, and Extensions; and in one case, AllOf.
// If the source spec has a value, we merge that value in the resulting specs.
if s.Default != nil {
resolvedSchema.Default = s.Default
}
resolvedSchema.ReadOnly = resolvedSchema.ReadOnly || s.ReadOnly
if len(s.Required) > 0 {
resolvedSchema.Required = s.Required
}
if len(s.Extensions) > 0 {
if resolvedSchema.Extensions == nil {
resolvedSchema.Extensions = s.Extensions
} else {
for k, v := range s.Extensions {
resolvedSchema.Extensions[k] = v
}
}
}
if len(s.AllOf) > 0 {
resolvedSchema.AllOf = s.AllOf
}
// All the other properties aren't currently overridden. We add an assertion, so that
// if a new specification does override a value, we can catch this and decide what to do further.
if s.Maximum != nil {
return nil, errors.New("'Maximum' defined as a sibling to a $ref")
}
if s.Minimum != nil {
return nil, errors.New("'Minimum' defined as a sibling to a $ref")
}
if s.MaxLength != nil {
return nil, errors.New("'MaxLength' defined as a sibling to a $ref")
}
if s.MinLength != nil {
return nil, errors.New("'MinLength' defined as a sibling to a $ref")
}
if len(s.Pattern) > 0 {
return nil, errors.New("'Pattern' defined as a sibling to a $ref")
}
if len(s.Discriminator) > 0 {
return nil, errors.New("'Discriminator' defined as a sibling to a $ref")
}
// Don't error if the sibling enum matches the resolved schema's enum.
if len(s.Enum) > 0 && !reflect.DeepEqual(s.Enum, resolvedSchema.Enum) {
return nil, errors.New("'Enum' defined as a sibling to a $ref")
}
if s.Items != nil {
return nil, errors.New("'Items' defined as a sibling to a $ref")
}
if s.Properties != nil {
return nil, errors.New("'Properties' defined as a sibling to a $ref")
}
if s.AdditionalProperties != nil {
return nil, errors.New("'AdditionalProperties' defined as a sibling to a $ref")
}
// Note that many other Open API schema properties aren't validated above
// because those aren't used in our code generation, or in Azure specs in general.
return &Schema{ptr.ReferenceContext, &resolvedSchema}, nil
}
// FindSubtypes returns a slice of schemas, each schema is a reference to
// a type definition that is a subtype of a given type.
// The following rules apply:
// - All subtypes reside in the same Open API specification.
// - A subtype is defines as `allOf` of the base type.
// - The search is applied recursively and all types along `allOf` hiereachy are returned.
func (ctx *ReferenceContext) FindSubtypes() ([]*spec.Schema, error) {
var schemas []*spec.Schema
for name, def := range util.MapOrdered(ctx.swagger.Definitions) {
subTypes, err := ctx.recursiveAllOf(&def)
if err != nil {
return nil, err
}
for _, schema := range subTypes {
if resolved, ok, _ := ctx.resolveReference(schema.Ref); ok {
if resolved.ReferenceName == ctx.ReferenceName {
ref := spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: spec.Ref{
Ref: jsonreference.MustCreateRef("#/definitions/" + name),
},
},
}
schemas = append(schemas, &ref)
}
}
}
}
return schemas, nil
}
func (ctx *ReferenceContext) recursiveAllOf(def *spec.Schema) ([]spec.Schema, error) {
var result []spec.Schema
for _, ref := range def.AllOf {
result = append(result, ref)
schema, err := ctx.ResolveSchema(&ref)
if err != nil {
return nil, err
}
children, err := schema.ReferenceContext.recursiveAllOf(schema.Schema)
if err != nil {
return nil, err
}
result = append(result, children...)
}
return result, nil
}
// MergeParameters combines the Path Item parameters with Operation parameters.
func (ctx *ReferenceContext) MergeParameters(operation []spec.Parameter, pathItem []spec.Parameter) []spec.Parameter {
// Open API spec for operations:
// > If a parameter is already defined at the Path Item, the new definition will override it.
// > A unique parameter is defined by a combination of a name and location.
var result []spec.Parameter
seen := map[string]bool{}
for _, p := range operation {
schema, err := ctx.ResolveParameter(p)
if err != nil {
panic(err)
}
key := fmt.Sprintf("%s@%s", schema.Name, schema.In)
seen[key] = true
result = append(result, p)
}
for _, p := range pathItem {
schema, err := ctx.ResolveParameter(p)
if err != nil {
panic(err)
}
key := fmt.Sprintf("%s@%s", schema.Name, schema.In)
if _, ok := seen[key]; !ok {
result = append(result, p)
}
}
return result
}
// ResolveReference resolves a relative reference relative to current swagger spec URL
func (ctx *ReferenceContext) ResolveReference(ref string) (string, error) {
relativeURL, err := url.Parse(ref)
if err != nil {
return "", err
}
return ctx.url.ResolveReference(relativeURL).String(), nil
}
type reference struct {
*ReferenceContext
value interface{}
}
var leadingDotDot = regexp.MustCompile(`^(\.\./)+`)
func (ctx *ReferenceContext) resolveReference(ref spec.Ref) (*reference, bool, error) {
ptr := ref.GetPointer()
if ptr == nil || ptr.IsEmpty() {
return nil, false, nil
}
referenceName := ptr.DecodedTokens()[len(ptr.DecodedTokens())-1]
relative, err := url.Parse(ref.RemoteURI())
if err == nil && ref.RemoteURI() != "" {
// HACK: url.ResolveReference drops leading "../" so we need to re-add these after resolving
urlPrefix := ""
if !ctx.url.IsAbs() {
matches := leadingDotDot.FindStringSubmatch(ctx.url.Path)
if len(matches) > 0 {
urlPrefix = matches[0]
}
}
finalURL := ctx.url.ResolveReference(relative)
finalURL.Path = urlPrefix + finalURL.Path
swagger, err := loadSwaggerSpec(finalURL.String())
if err != nil {
return nil, false, errors.Wrapf(err, "load Swagger spec")
}
ctx = &ReferenceContext{swagger: swagger, ReferenceName: referenceName, url: finalURL}
}
value, _, err := ptr.Get(ctx.swagger)
if err != nil {
return nil, false, errors.Wrapf(err, "get pointer")
}
newCtx := &ReferenceContext{swagger: ctx.swagger, ReferenceName: referenceName, url: ctx.url}
return &reference{newCtx, value}, true, nil
}
// Cache of parsed Swagger specifications for a location.
var specCache = map[string]*spec.Swagger{}
func loadSwaggerSpec(path string) (*spec.Swagger, error) {
if cached, ok := specCache[path]; ok {
return cached, nil
}
byts, err := swag.LoadFromFileOrHTTP(path)
if err != nil {
return nil, err
}
swagger := spec.Swagger{}
err = swagger.UnmarshalJSON(byts)
if err != nil {
return nil, err
}
specCache[path] = &swagger
return &swagger, nil
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/versioner.go | provider/pkg/openapi/versioner.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package openapi
import (
"fmt"
"io"
"log"
"os"
"strings"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/collections"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/openapi/paths"
"github.com/segmentio/encoding/json"
"gopkg.in/yaml.v2"
)
func ReadModuleVersionList(path string) (ModuleVersionList, error) {
jsonFile, err := os.Open(path)
if err != nil {
return nil, err
}
defer jsonFile.Close()
byteValue, err := io.ReadAll(jsonFile)
if err != nil {
return nil, err
}
var curatedVersion ModuleVersionList
err = json.Unmarshal(byteValue, &curatedVersion)
if err != nil {
return nil, err
}
return curatedVersion, nil
}
func ReadDefaultVersions(path string) (DefaultVersions, error) {
jsonFile, err := os.Open(path)
if err != nil {
return nil, err
}
defer jsonFile.Close()
byteValue, err := io.ReadAll(jsonFile)
if err != nil {
return nil, err
}
var curatedVersion DefaultVersions
err = yaml.Unmarshal(byteValue, &curatedVersion)
if err != nil {
return nil, err
}
return curatedVersion, nil
}
// calculatePathVersions builds a map of all versions defined for an API paths from a map of all versions of a module.
// The result is a map from a normalized path to a set of versions for that path.
func calculatePathVersions(versionMap ModuleVersions) map[string]*collections.OrderableSet[ApiVersion] {
pathVersions := map[string]*collections.OrderableSet[ApiVersion]{}
for version, items := range versionMap {
for _, r := range items.Resources {
normalizedPath := paths.NormalizePath(r.Path)
versions, ok := pathVersions[normalizedPath]
if !ok {
versions = collections.NewOrderableSet[ApiVersion]()
pathVersions[normalizedPath] = versions
}
versions.Add(version)
}
}
return pathVersions
}
// 2022-02-02-preview -> v20220202preview
func ApiToSdkVersion(apiVersion ApiVersion) SdkVersion {
if apiVersion == "" {
return ""
}
return SdkVersion("v" + strings.ReplaceAll(string(apiVersion), "-", ""))
}
// v20220202preview -> 2022-02-02-preview
func SdkToApiVersion(v SdkVersion) (ApiVersion, error) {
if !strings.HasPrefix(string(v), "v") || len(v) < len("v20220202") || len(v) > len("v20220202privatepreview") {
return "", fmt.Errorf("invalid sdk version: %s", v)
}
res := v[1:5] + "-" + v[5:7] + "-" + v[7:9]
suffix := v[9:]
switch suffix {
case "preview", "privatepreview", "beta":
res += "-" + suffix
case "":
default:
return "", fmt.Errorf("invalid sdk version suffix: %s", v)
}
return ApiVersion(res), nil
}
// RemovableResources represents removable resources mapped to the resource that can replace them since the two are
// schema-compatible. Both are represented as fully qualified names like azure-native:azuread/v20210301:DomainService.
type RemovableResources map[string]string
// Returns azure-native:azureModule/version:resource.
// Version can be either ApiVersion or SdkVersion.
// TODO tkappler version should be optional
func ToFullyQualifiedName(moduleName ModuleName, resource, version string) string {
// construct fully qualified name like azure-native:aad/v20210301:DomainService
const fqnFmt = "azure-native:%s/%s:%s"
if !strings.HasPrefix(version, "v") {
version = string(ApiToSdkVersion(ApiVersion(version)))
}
return fmt.Sprintf(fqnFmt, moduleName.Lowered(), version, resource)
}
// Version can be either ApiVersion or SdkVersion
func (s RemovableResources) CanBeRemoved(moduleName ModuleName, resource, version string) bool {
fqn := ToFullyQualifiedName(moduleName, resource, version)
_, ok := s[fqn]
return ok
}
func RemoveResources(modules AzureModules, removable RemovableResources) AzureModules {
result := AzureModules{}
removedResourceCount := 0
removedInvokeCount := 0
for moduleName, versions := range modules {
newVersions := ModuleVersions{}
for version, resources := range versions {
filteredResources := NewVersionResources()
removedResourcePaths := []string{}
for resourceName, resource := range resources.Resources {
if removable.CanBeRemoved(moduleName, resourceName, string(version)) {
removedResourceCount++
removedResourcePaths = append(removedResourcePaths, paths.NormalizePath(resource.Path))
continue
}
filteredResources.Resources[resourceName] = resource
}
for invokeName, invoke := range resources.Invokes {
if removable.CanBeRemoved(moduleName, invokeName, string(version)) {
removedInvokeCount++
continue
}
invokePath := paths.NormalizePath(invoke.Path)
// If we can't match on name, we try to match on the path.
found := false
for _, resourcePath := range removedResourcePaths {
if strings.HasPrefix(invokePath, resourcePath) {
found = true
break
}
}
if found {
removedInvokeCount++
continue
}
filteredResources.Invokes[invokeName] = invoke
}
// If there are no resources left, we can remove the version entirely.
if version != "" && len(filteredResources.Resources) == 0 && len(filteredResources.Invokes) > 0 {
removedInvokeCount += len(filteredResources.Invokes)
for invokeName := range filteredResources.Invokes {
log.Printf("Removable invoke: azure-native:%s/%s:%s", moduleName.Lowered(), version, invokeName)
}
continue
}
newVersions[version] = filteredResources
}
result[moduleName] = newVersions
}
log.Printf("Removed %d resources and %d invokes from the spec", removedResourceCount, removedInvokeCount)
return result
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/apiVersion_test.go | provider/pkg/openapi/apiVersion_test.go | package openapi
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestApiVersionToDate(t *testing.T) {
t.Run("simple", func(t *testing.T) {
date, err := ApiVersionToDate("2020-01-01")
assert.NoError(t, err)
actual := date.Format("2006-01-02")
assert.Equal(t, "2020-01-01", actual)
})
t.Run("preview", func(t *testing.T) {
date, err := ApiVersionToDate("2020-01-01-preview")
assert.NoError(t, err)
actual := date.Format("2006-01-02")
assert.Equal(t, "2020-01-01", actual)
})
}
func TestSortApiVersions(t *testing.T) {
t.Run("already ordered", func(t *testing.T) {
versions := []ApiVersion{"2020-01-01", "2021-02-02"}
SortApiVersions(versions)
expected := []ApiVersion{"2020-01-01", "2021-02-02"}
assert.Equal(t, expected, versions)
})
t.Run("reversed", func(t *testing.T) {
versions := []ApiVersion{"2021-02-02", "2020-01-01"}
SortApiVersions(versions)
expected := []ApiVersion{"2020-01-01", "2021-02-02"}
assert.Equal(t, expected, versions)
})
t.Run("preview comes before stable", func(t *testing.T) {
versions := []ApiVersion{"2020-01-01", "2020-01-01-preview"}
SortApiVersions(versions)
expected := []ApiVersion{"2020-01-01-preview", "2020-01-01"}
assert.Equal(t, expected, versions)
})
t.Run("private comes before preview", func(t *testing.T) {
versions := []ApiVersion{"2020-01-01-preview", "2020-01-01-privatepreview"}
SortApiVersions(versions)
expected := []ApiVersion{"2020-01-01-privatepreview", "2020-01-01-preview"}
assert.Equal(t, expected, versions)
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/discover.go | provider/pkg/openapi/discover.go | // Copyright 2016-2020, Pulumi Corporation.
package openapi
import (
"fmt"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
"github.com/go-openapi/spec"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/openapi/defaults"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/openapi/paths"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources/customresources"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/util"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/version"
"github.com/pulumi/pulumi/pkg/v3/codegen"
)
type ModuleName = resources.ModuleName
// ApiVersion e.g. 2020-01-30
// Occasionally we use empty string to represent the default version or no version.
// Sometimes this is also used as a query and can include a wildcard.
type ApiVersion string
func (v ApiVersion) IsDefault() bool {
return v == ""
}
func (v ApiVersion) IsWildcard() bool {
return strings.Contains(string(v), "*")
}
func (v ApiVersion) ToSdkVersion() SdkVersion {
return ApiToSdkVersion(v)
}
// DefinitionName is the name of either an 'invoke' or a resource (e.g. listBuckets or Bucket)
type DefinitionName = string
// ResourceName e.g. Bucket
type ResourceName = string
// InvokeName e.g. listBuckets
type InvokeName = string
// SdkVersion e.g. v20200130
type SdkVersion string
// AzureModules maps module names (e.g. Compute) to versions in that module and resources therein.
type AzureModules map[ModuleName]ModuleVersions
// ModuleVersions maps API Versions (e.g. v20200801) to resources and invokes in that version.
type ModuleVersions = map[ApiVersion]VersionResources
// Represents a failed attempt to determine the module name for a given path within a spec.
// This results in the path being skipped and not considered for resource or invoke generation.
type ModuleNameError struct {
FilePath string
Path string
Error string
}
type DiscoveryDiagnostics struct {
NamingDisambiguations []resources.NameDisambiguation
// POST endpoints defined in the Azure spec that we don't include because they don't belong to a resource.
// Map is module -> operation id -> path.
SkippedPOSTEndpoints map[ModuleName]map[string]string
// module -> resource/type name -> path -> Endpoints
Endpoints Endpoints
// Errors where we can't determine the module name for a given path within a spec.
ModuleNameErrors []ModuleNameError
}
// module -> resource/type name -> path -> Endpoint
type Endpoints map[ModuleName]map[ResourceName]map[string]*Endpoint
// merge combines e2 into e, which is modified in-place.
func (e Endpoints) merge(e2 Endpoints) {
for moduleName, moduleEndpoints := range e2 {
if _, ok := e[moduleName]; !ok {
e[moduleName] = map[string]map[string]*Endpoint{}
}
for typeName, byPath := range moduleEndpoints {
if _, ok := e[moduleName][typeName]; !ok {
e[moduleName][typeName] = map[string]*Endpoint{}
}
for path, things := range byPath {
if existing, ok := e[moduleName][typeName][path]; ok {
// the POST endpoints are unique per version, but we don't track versions here, so check for duplicates
ops := codegen.NewStringSet(existing.PostOperations...)
for _, op := range things.PostOperations {
ops.Add(op)
}
existing.PostOperations = ops.SortedValues()
verbs := codegen.NewStringSet(existing.HttpVerbs...)
for _, verb := range things.HttpVerbs {
verbs.Add(verb)
}
existing.HttpVerbs = verbs.SortedValues()
} else {
e[moduleName][typeName][path] = things
}
}
}
}
}
func (e Endpoints) add(pathItem spec.PathItem, moduleName ModuleName, typeName, path, filePath string, addedResourceOrInvoke bool) {
if _, ok := e[moduleName]; !ok {
e[moduleName] = map[string]map[string]*Endpoint{}
}
endpoint := &Endpoint{
Path: path,
FilePath: filePath,
Added: addedResourceOrInvoke,
}
if pathItem.Delete != nil && !pathItem.Delete.Deprecated {
endpoint.HttpVerbs = append(endpoint.HttpVerbs, "DELETE")
}
if pathItem.Get != nil && !pathItem.Get.Deprecated {
endpoint.HttpVerbs = append(endpoint.HttpVerbs, "GET")
}
if pathItem.Head != nil && !pathItem.Head.Deprecated {
endpoint.HttpVerbs = append(endpoint.HttpVerbs, "HEAD")
}
if pathItem.Patch != nil && !pathItem.Patch.Deprecated {
endpoint.HttpVerbs = append(endpoint.HttpVerbs, "PATCH")
}
if pathItem.Post != nil && !pathItem.Post.Deprecated {
endpoint.HttpVerbs = append(endpoint.HttpVerbs, "POST")
lastSlash := strings.LastIndex(path, "/")
endpoint.PostOperations = append(endpoint.PostOperations, path[lastSlash+1:])
endpoint.Path = path[:lastSlash] // normalize path to not include the POST operation, to match the resource path
}
if pathItem.Put != nil && !pathItem.Put.Deprecated {
endpoint.HttpVerbs = append(endpoint.HttpVerbs, "PUT")
}
if _, ok := e[moduleName][typeName]; !ok {
e[moduleName][typeName] = map[string]*Endpoint{}
}
e[moduleName][typeName][endpoint.Path] = endpoint
}
type Endpoint struct {
Path string
FilePath string
HttpVerbs []string
Get, Put, Delete, Patch, Head string `json:",omitempty"` // operation id
PostOperations []string `json:",omitempty"` // path suffixes
Added bool `json:",omitempty"`
}
func (d *DiscoveryDiagnostics) addSkippedPOSTEndpoint(moduleName ModuleName, operation, path string) {
cur, ok := d.SkippedPOSTEndpoints[moduleName]
if !ok {
cur = map[string]string{}
d.SkippedPOSTEndpoints[moduleName] = cur
}
cur[operation] = path
}
func (d *DiscoveryDiagnostics) addPathItem(pathItem spec.PathItem, moduleName ModuleName, typeName, path, filePath string, addedResourceOrInvoke bool) {
if d.Endpoints == nil {
d.Endpoints = map[ModuleName]map[string]map[string]*Endpoint{}
}
d.Endpoints.add(pathItem, moduleName, typeName, path, filePath, addedResourceOrInvoke)
}
// VersionResources contains all resources and invokes in a given API version.
type VersionResources struct {
Resources map[ResourceName]*ResourceSpec
Invokes map[InvokeName]*ResourceSpec
}
func NewVersionResources() VersionResources {
return VersionResources{
Resources: map[ResourceName]*ResourceSpec{},
Invokes: map[InvokeName]*ResourceSpec{},
}
}
type ModuleVersionList = map[ModuleName][]ApiVersion
// A definition version is a resource or invoke version and its source information.
type DefinitionVersion struct {
ApiVersion ApiVersion `yaml:"ApiVersion,omitempty"`
SpecFilePath string `yaml:"SpecFilePath,omitempty"`
ResourceUri string `yaml:"ResourceUri,omitempty"`
RpNamespace string `yaml:"RpNamespace,omitempty"`
}
// DefaultVersions is an amalgamation of multiple API versions, generated from a specification.
type DefaultVersions map[ModuleName]map[DefinitionName]DefinitionVersion
func (defaultVersions DefaultVersions) IsAtVersion(moduleName ModuleName, typeName DefinitionName, version ApiVersion) bool {
if resources, ok := defaultVersions[moduleName]; ok {
if resourceVersion, ok := resources[typeName]; ok {
if resourceVersion.ApiVersion == version {
return true
}
}
}
return false
}
func (v VersionResources) All() map[string]*ResourceSpec {
specs := map[string]*ResourceSpec{}
for s, resourceSpec := range v.Invokes {
specs[s] = resourceSpec
}
for s, resourceSpec := range v.Resources {
specs[s] = resourceSpec
}
return specs
}
// ResourceSpec contains a pointer in an Open API Spec that defines a resource and related metadata.
type ResourceSpec struct {
// FileLocation is the path to the Open API Spec file.
FileLocation string
// API version of the resource
ApiVersion ApiVersion
// SDK version of the resource
SdkVersion SdkVersion
// Path is the API path in the Open API Spec.
Path string
PathItem *spec.PathItem
PathItemList *spec.PathItem
Swagger *Spec
// CompatibleVersions is a list of other API versions that are compatible with this resource.
// These versions can be the default version, indicated by an empty string.
CompatibleVersions []ApiVersion
DefaultBody map[string]interface{}
DeprecationMessage string
PreviousVersion ApiVersion
ModuleNaming resources.ModuleNaming
}
// ApplyTransformations adds the default version for each module and deprecates and removes specified API versions.
func ApplyTransformations(modules AzureModules, defaultVersions DefaultVersions, previousDefaultVersions DefaultVersions, deprecated, removed ModuleVersionList) AzureModules {
ApplyRemovals(modules, removed)
AddDefaultVersion(modules, defaultVersions, previousDefaultVersions)
ApplyDeprecations(modules, deprecated)
return modules
}
func ApplyRemovals(modules map[ModuleName]ModuleVersions, removed ModuleVersionList) {
for moduleName, versionMap := range util.MapOrdered(modules) {
if removedVersion, ok := removed[moduleName]; ok {
for _, versionToRemove := range removedVersion {
delete(versionMap, versionToRemove)
}
}
}
}
func AddDefaultVersion(modules AzureModules, defaultVersions DefaultVersions, previousDefaultVersions DefaultVersions) {
for moduleName, versionMap := range util.MapOrdered(modules) {
// Add a default version for each resource and invoke.
defaultResourceVersions := defaultVersions[moduleName]
versionMap[""] = buildDefaultVersion(versionMap, defaultResourceVersions, previousDefaultVersions[moduleName])
// Set compatible versions to all other versions of the resource with the same normalized API path.
pathVersions := calculatePathVersions(versionMap)
for version, items := range util.MapOrdered(versionMap) {
for _, resource := range util.MapOrdered(items.Resources) {
var otherVersions []ApiVersion
normalisedPath := paths.NormalizePath(resource.Path)
otherVersionsSorted := pathVersions[normalisedPath].SortedValues()
for _, otherVersion := range otherVersionsSorted {
if otherVersion != version {
otherVersions = append(otherVersions, otherVersion)
}
}
resource.CompatibleVersions = otherVersions
}
}
}
}
func ApplyDeprecations(modules AzureModules, deprecated ModuleVersionList) AzureModules {
for moduleName, versionMap := range util.MapOrdered(modules) {
if deprecatedVersions, ok := deprecated[moduleName]; ok {
for _, apiVersion := range deprecatedVersions {
resources := versionMap[apiVersion]
deprecateAll(resources.All(), apiVersion)
}
}
}
return modules
}
func buildDefaultVersion(versionMap ModuleVersions, defaultResourceVersions map[ResourceName]DefinitionVersion, previousResourceVersions map[ResourceName]DefinitionVersion) VersionResources {
resources := map[string]*ResourceSpec{}
invokes := map[string]*ResourceSpec{}
for resourceName, apiVersion := range util.MapOrdered(defaultResourceVersions) {
if versionResources, ok := versionMap[apiVersion.ApiVersion]; ok {
if resource, ok := versionResources.Resources[resourceName]; ok {
resourceCopy := *resource
if previousVersion, hasPreviousVersion := previousResourceVersions[resourceName]; hasPreviousVersion {
resourceCopy.PreviousVersion = previousVersion.ApiVersion
}
resources[resourceName] = &resourceCopy
} else if invoke, ok := versionResources.Invokes[resourceName]; ok {
invokeCopy := *invoke
if previousVersion, hasPreviousVersion := previousResourceVersions[resourceName]; hasPreviousVersion {
invokeCopy.PreviousVersion = previousVersion.ApiVersion
}
invokes[resourceName] = &invokeCopy
}
}
}
return VersionResources{
Resources: resources,
Invokes: invokes,
}
}
// ReadAzureModules finds Azure Open API specs on disk, parses them, and creates in-memory representation of resources,
// collected per Azure Module and API Version - for all API versions.
// Use the namespace "*" to load all available namespaces, or a specific namespace to filter, e.g. "Compute".
// Use apiVersions with a wildcard to filter versions, e.g. "2022*preview", or leave it blank to use the default of "20*".
func ReadAzureModules(specsDir, namespace, apiVersions string) (AzureModules, DiscoveryDiagnostics, error) {
diagnostics := DiscoveryDiagnostics{
SkippedPOSTEndpoints: map[ModuleName]map[string]string{},
Endpoints: map[ModuleName]map[string]map[string]*Endpoint{},
}
swaggerSpecLocations, err := swaggerLocations(specsDir, namespace, apiVersions)
if err != nil {
return nil, diagnostics, err
}
// Collect all versions for each path in the API across all Swagger files.
modules := AzureModules{}
for _, location := range swaggerSpecLocations {
relLocation, err := filepath.Rel(specsDir, location)
if err != nil {
return nil, diagnostics, errors.Wrapf(err, "failed to get relative path for %q", location)
}
if exclude(relLocation) {
continue
}
swagger, err := NewSpec(location)
if err != nil {
return nil, diagnostics, errors.Wrapf(err, "failed to parse %q", location)
}
orderedPaths := make([]string, 0, len(swagger.Paths.Paths))
for path := range swagger.Paths.Paths {
orderedPaths = append(orderedPaths, path)
}
sort.Strings(orderedPaths)
for _, path := range orderedPaths {
moduleDiagnostics := modules.addAPIPath(specsDir, relLocation, path, swagger)
// Update reports
diagnostics.NamingDisambiguations = append(diagnostics.NamingDisambiguations, moduleDiagnostics.NamingDisambiguations...)
for moduleName, operations := range moduleDiagnostics.SkippedPOSTEndpoints {
for op, path := range operations {
diagnostics.addSkippedPOSTEndpoint(moduleName, op, path)
}
}
diagnostics.Endpoints.merge(moduleDiagnostics.Endpoints)
diagnostics.ModuleNameErrors = append(diagnostics.ModuleNameErrors, moduleDiagnostics.ModuleNameErrors...)
}
}
return modules, diagnostics, nil
}
func deprecateAll(resourceSpecs map[string]*ResourceSpec, version ApiVersion) {
for _, resourceSpec := range resourceSpecs {
deprecationMessage := fmt.Sprintf(
"Version %s will be removed in v2 of the provider.",
version)
resourceSpec.DeprecationMessage = deprecationMessage
}
}
// SingleVersion returns only a single (latest or preview) version of each resource from the full list of resource
// versions.
func SingleVersion(modules AzureModules) AzureModules {
singleVersion := AzureModules{}
for moduleName, allVersionMap := range modules {
singleVersion[moduleName] = ModuleVersions{"": allVersionMap[""]}
}
return singleVersion
}
// IsPreview returns true for API versions that aren't considered stable.
func IsPreview(apiVersion string) bool {
lower := strings.ToLower(apiVersion)
return strings.Contains(lower, "preview") || strings.Contains(lower, "beta")
}
func ApiVersionToDate(apiVersion ApiVersion) (time.Time, error) {
if len(apiVersion) < 10 {
return time.Time{}, fmt.Errorf("invalid API version %q", apiVersion)
}
// The API version is in the format YYYY-MM-DD - ignore suffixes like "-preview".
return time.Parse("2006-01-02", string(apiVersion)[:10])
}
// Check if the string contains the word "private", ignoring case.
func IsPrivate(apiVersion string) bool {
lower := strings.ToLower(apiVersion)
return strings.Contains(lower, "private")
}
// Attempt to convert both versions to dates and compare them.
// Fall back to string comparison if either version is not a date.
// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
func CompareApiVersions(a, b ApiVersion) int {
timeA, err := ApiVersionToDate(a)
if err != nil {
return strings.Compare(string(a), string(b))
}
timeB, err := ApiVersionToDate(b)
if err != nil {
return strings.Compare(string(a), string(b))
}
timeDiff := timeA.Compare(timeB)
if timeDiff != 0 {
return timeDiff
}
// Sort private first, preview second, stable last.
aPrivate := IsPrivate(string(a))
bPrivate := IsPrivate(string(b))
if aPrivate != bPrivate {
if aPrivate {
return -1
}
return 1
}
aPreview := IsPreview(string(a))
bPreview := IsPreview(string(b))
if aPreview != bPreview {
if aPreview {
return -1
}
return 1
}
return 0
}
func SortApiVersions(versions []ApiVersion) {
sort.SliceStable(versions, func(i, j int) bool {
return CompareApiVersions(versions[i], versions[j]) < 0
})
}
// swaggerLocations returns a slice of URLs of all known Azure Resource Manager swagger files.
// namespace and apiVersion can be blank to return all files, or can be used to filter the results.
func swaggerLocations(specsDir, namespace, apiVersions string) ([]string, error) {
if namespace == "" {
namespace = "*"
}
if apiVersions == "" {
apiVersions = "20*"
}
pattern := filepath.Join(specsDir, "specification", "*", "resource-manager", "Microsoft."+namespace, "*", apiVersions, "*.json")
files, err := filepath.Glob(pattern)
if err != nil {
return nil, err
}
pattern2 := filepath.Join(specsDir, "specification", "*", "resource-manager", "Microsoft."+namespace, "*", "*", apiVersions, "*.json")
files2, err := filepath.Glob(pattern2)
if err != nil {
return nil, err
}
pattern3 := filepath.Join(specsDir, "specification", "*", "resource-manager", "PaloAltoNetworks."+namespace, "*", apiVersions, "*.json")
files3, err := filepath.Glob(pattern3)
if err != nil {
return nil, err
}
fileSet := codegen.NewStringSet()
for _, file := range append(append(files, files2...), files3...) {
// In December 2022, Azure started authoring some API specs in https://github.com/microsoft/cadl.
// pattern2 above matches some of these folders, like
// voiceservices/resource-manager/Microsoft.VoiceServices/cadl/examples/2023-01-31, so we exclude them.
if strings.Contains(file, "/cadl/") {
continue
}
if strings.Contains(file, "/examples/") {
continue
}
fileSet.Add(file)
}
// Sorting alphabetically means the schemas with the latest API version are the last.
return fileSet.SortedValues(), nil
}
var excludeRegexes = []*regexp.Regexp{
// This preview version defines two types with the same name (one enum, one object) which fails to pass our codegen.
// It's old, preview, and not important - so exclude the files of this version.
regexp.MustCompile(".*frontdoor/resource-manager/Microsoft.Network/preview/2018-08-01-preview.*"),
// This version conflicts with the managed folder version:
// servicefabricmanagedclusters/resource-manager/Microsoft.ServiceFabric/ServiceFabric/preview/2023-11-01-preview
// This causes a conflict in the version-specific folder, not the default version folder, so we have to completely exclude it.
regexp.MustCompile(".*servicefabric/resource-manager/Microsoft.ServiceFabric/ServiceFabric/preview/2023-11-01-preview.*"),
// This preview version is invalid OpenAPI JSON, reading it fails with encoding/json.UnmarshalTypeError in field "definitions".
regexp.MustCompile(".*network/resource-manager/Microsoft.Network/preview/2023-03-01-preview.*"),
// This version defines two "CustomRule" types so we ignore it.
regexp.MustCompile(".*cdn/resource-manager/Microsoft.Cdn/Cdn/preview/2025-05-01-preview.*"),
// Includes a type with an invalid reference.
regexp.MustCompile(".*azurearcdata/resource-manager/Microsoft.AzureArcData/preview/2025-06-01-preview.*"),
}
func exclude(filePath string) bool {
for _, re := range excludeRegexes {
if re.MatchString(filePath) {
return true
}
}
return false
}
// addAPIPath considers whether an API path contains resources and/or invokes and adds corresponding entries to the
// module map. Modules are mutated in-place.
func (modules AzureModules) addAPIPath(specsDir, fileLocation, path string, swagger *Spec) DiscoveryDiagnostics {
moduleNaming, err := resources.GetModuleName(version.GetVersion().Major, filepath.Join(specsDir, fileLocation), path)
if err != nil {
return DiscoveryDiagnostics{
ModuleNameErrors: []ModuleNameError{
{
FilePath: fileLocation,
Path: path,
Error: err.Error(),
},
},
}
}
moduleName := moduleNaming.ResolvedName
// Find (or create) the version map with this name.
versionMap, ok := modules[moduleName]
if !ok {
versionMap = map[ApiVersion]VersionResources{}
modules[moduleName] = versionMap
}
// Find (or create) the resource map with this name.
apiVersion := ApiVersion(swagger.Info.Version)
version, ok := versionMap[apiVersion]
if !ok {
version = NewVersionResources()
versionMap[apiVersion] = version
}
return addResourcesAndInvokes(version, fileLocation, path, moduleNaming, swagger)
}
// getTypeName returns the type name for a given operation and path. The path is used to check for custom resource
// names. In the standard case, it's unused and the name is based on the operation id.
func getTypeName(op *spec.Operation, path string) (string, *resources.NameDisambiguation) {
if typeName, found := customresources.GetCustomResourceName(path); found {
return typeName, nil
}
return resources.ResourceName(op.ID, path)
}
func addResourcesAndInvokes(version VersionResources, fileLocation, path string, moduleNaming resources.ModuleNaming, swagger *Spec) DiscoveryDiagnostics {
apiVersion := ApiVersion(swagger.Info.Version)
sdkVersion := ApiToSdkVersion(apiVersion)
pathItem := swagger.Paths.Paths[path]
pathItemList, hasList := swagger.Paths.Paths[path+"/list"]
if ok := customresources.HasCustomDelete(path); ok {
pathItem.Delete = &spec.Operation{
OperationProps: spec.OperationProps{
Description: "Custom implementation of this operation is available",
},
}
}
diagnostics := DiscoveryDiagnostics{
SkippedPOSTEndpoints: map[ModuleName]map[string]string{},
}
recordDisambiguation := func(disambiguation *resources.NameDisambiguation) {
if disambiguation != nil {
disambiguation.FileLocation = fileLocation
diagnostics.NamingDisambiguations = append(diagnostics.NamingDisambiguations, *disambiguation)
}
}
// Add a resource entry, if appropriate HTTP endpoints are defined.
foundResourceOrInvoke := false
addResource := func(typeName string, defaultBody map[string]interface{}, pathItemList *spec.PathItem) {
version.Resources[typeName] = &ResourceSpec{
FileLocation: fileLocation,
ApiVersion: apiVersion,
SdkVersion: sdkVersion,
Path: path,
PathItem: &pathItem,
Swagger: swagger,
DefaultBody: defaultBody,
PathItemList: pathItemList,
ModuleNaming: moduleNaming,
}
foundResourceOrInvoke = true
}
addInvoke := func(typeName string) {
version.Invokes[typeName] = &ResourceSpec{
FileLocation: fileLocation,
ApiVersion: apiVersion,
SdkVersion: sdkVersion,
Path: path,
PathItem: &pathItem,
Swagger: swagger,
ModuleNaming: moduleNaming,
}
foundResourceOrInvoke = true
}
var resourceBaseName string
for _, spec := range []*spec.Operation{pathItem.Post, pathItem.Put, pathItem.Get} {
if spec != nil && !spec.Deprecated {
resourceBaseName = strings.Split(spec.ID, "_")[0]
break
}
}
if pathItem.Put != nil && !pathItem.Put.Deprecated {
hasDelete := pathItem.Delete != nil && !pathItem.Delete.Deprecated
switch {
case pathItem.Get != nil && !pathItem.Get.Deprecated:
defaultState := defaults.GetDefaultResourceState(path, swagger.Info.Version)
if defaultState != nil && hasDelete && defaultState.HasNonEmptyCollections {
// See the limitation in `SdkShapeConverter.isDefaultResponse()`
panic(fmt.Sprintf("invalid defaultResourcesState '%s': non-empty collections aren't supported for deletable resources", path))
}
typeName, disambiguation := getTypeName(pathItem.Get, path)
recordDisambiguation(disambiguation)
isCustom, includeCustom := customresources.IncludeCustomResource(path, string(apiVersion))
canBeDeleted := hasDelete || defaultState != nil
if typeName != "" && ((isCustom && includeCustom) || (!isCustom && canBeDeleted)) {
if _, ok := version.Resources[typeName]; ok && version.Resources[typeName].Path != path {
fmt.Printf("warning: duplicate resource %s/%s at paths:\n - %s\n - %s\n", sdkVersion, typeName, path, version.Resources[typeName].Path)
}
var defaultBody map[string]any
if defaultState != nil {
defaultBody = defaultState.State
}
addResource(typeName, defaultBody, nil /* pathItemList */)
}
case pathItem.Head != nil && !pathItem.Head.Deprecated:
typeName, disambiguation := getTypeName(pathItem.Head, path)
recordDisambiguation(disambiguation)
if typeName != "" && hasDelete {
if _, ok := version.Resources[typeName]; ok && version.Resources[typeName].Path != path {
fmt.Printf("warning: duplicate resource %s/%s at paths:\n - %s\n - %s\n", sdkVersion, typeName, path, version.Resources[typeName].Path)
}
addResource(typeName, nil /* defaultBody */, nil /* pathItemList */)
}
case hasList:
var typeName string
var disambiguation *resources.NameDisambiguation
switch {
case pathItemList.Get != nil && !pathItemList.Get.Deprecated:
typeName, disambiguation = getTypeName(pathItemList.Get, path)
case pathItemList.Post != nil && !pathItemList.Post.Deprecated:
typeName, disambiguation = getTypeName(pathItemList.Post, path)
}
recordDisambiguation(disambiguation)
if typeName != "" {
var defaultBody map[string]interface{}
defaultState := defaults.GetDefaultResourceState(path, swagger.Info.Version)
if defaultState != nil {
defaultBody = defaultState.State
} else if !hasDelete {
// The /list pattern that we handle here seems to (almost) universally have this shape of the default body.
// Instead of maintaining the resources in defaultResourcesState, we can hard-code it here.
defaultBody = map[string]interface{}{
"properties": map[string]interface{}{},
}
}
if _, ok := version.Resources[typeName]; ok && version.Resources[typeName].Path != path {
fmt.Printf("warning: duplicate resource %s/%s at paths:\n - %s\n - %s\n", sdkVersion, typeName, path, version.Resources[typeName].Path)
}
addResource(typeName, defaultBody, &pathItemList)
}
}
}
// Add an entry for PATCH-based resources.
if pathItem.Patch != nil && !pathItem.Patch.Deprecated && pathItem.Get != nil && !pathItem.Get.Deprecated {
defaultState := defaults.GetDefaultResourceState(path, swagger.Info.Version)
typeName, disambiguation := getTypeName(pathItem.Get, path)
recordDisambiguation(disambiguation)
isCustom, includeCustom := customresources.IncludeCustomResource(path, string(apiVersion))
if typeName != "" && ((isCustom && includeCustom) || (!isCustom && defaultState != nil)) {
if _, ok := version.Resources[typeName]; ok && version.Resources[typeName].Path != path {
fmt.Printf("warning: duplicate resource %s/%s at paths:\n - %s\n - %s\n", sdkVersion, typeName, path, version.Resources[typeName].Path)
}
var defaultBody map[string]any
if defaultState != nil {
defaultBody = defaultState.State
}
addResource(typeName, defaultBody, nil /* pathItemList */)
}
}
// Add a POST invoke entry.
if pathItem.Post != nil && !pathItem.Post.Deprecated {
parts := strings.Split(path, "/")
operationName := strings.ToLower(parts[len(parts)-1])
operationId := operationFromOperationID(pathItem.Post.OperationProps.ID)
prefix := ""
switch {
case strings.HasPrefix(operationName, "list"):
prefix = "list"
case strings.HasPrefix(operationName, "get"):
prefix = "get"
case (strings.HasPrefix(operationId, "get") || strings.HasPrefix(operationId, "retrieve")) &&
pathItem.Put == nil &&
(strings.Contains(operationName, "key") ||
strings.Contains(operationName, "token") ||
strings.Contains(operationName, "credential")):
// Operation ID-based selection is a bit tricky, so we apply the following heuristic:
// - Called according to the pattern `Foo_GetBar`,
// - It's not a resource (ensured by lack of a PUT operation),
// - It's about a key, a token, or credentials.
prefix = "get"
default:
diagnostics.addSkippedPOSTEndpoint(moduleNaming.ResolvedName, pathItem.Post.ID, path)
}
if prefix != "" {
typeName, disambiguation := getTypeName(pathItem.Post, path)
if typeName != "" {
addInvoke(prefix + typeName)
recordDisambiguation(disambiguation)
}
}
}
// Add an invoke if a GET endpoint is defined, but only if we haven't added a resource for this path yet.
// Resources can be read through the Pulumi resource model without a dedicated invoke.
if !foundResourceOrInvoke && pathItem.Get != nil && shouldIncludeInvoke(path, pathItem.Get) {
typeName, disambiguation := resources.ResourceName(pathItem.Get.ID, path)
if typeName != "" {
operation := operationFromOperationID(pathItem.Get.OperationProps.ID)
prefix := "get"
if operation == "list" {
prefix = "list"
}
addInvoke(prefix + typeName)
recordDisambiguation(disambiguation)
}
}
diagnostics.addPathItem(pathItem, moduleNaming.ResolvedName, resourceBaseName, path, fileLocation, foundResourceOrInvoke)
return diagnostics
}
// DiagnosticSettingsCategory_List -> list
func operationFromOperationID(opID string) string {
parts := strings.Split(opID, "_")
return strings.ToLower(parts[len(parts)-1])
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/discover_test.go | provider/pkg/openapi/discover_test.go | package openapi
import (
"fmt"
"net/http"
"testing"
"github.com/go-openapi/spec"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/openapi/defaults"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources/customresources"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
)
func createTestSwaggerForInvoke(path, operationId string, httpMethod string) Spec {
op := &spec.Operation{
OperationProps: spec.OperationProps{
ID: operationId,
},
}
props := spec.PathItemProps{}
switch httpMethod {
case http.MethodGet:
props.Get = op
case http.MethodPost:
props.Post = op
default:
panic("unsupported HTTP method " + httpMethod)
}
return Spec{
Swagger: spec.Swagger{
SwaggerProps: spec.SwaggerProps{
Info: &spec.Info{
InfoProps: spec.InfoProps{
Version: "2020-01-01",
},
},
Paths: &spec.Paths{
Paths: map[string]spec.PathItem{
path: {
PathItemProps: props,
},
},
},
},
},
}
}
func TestAddInvokes(t *testing.T) {
for tcName, tc := range map[string]struct {
path string
operationId string
httpMethod string
expected string
}{
"listDiagnosticSettingsCategory": {
path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/diagnosticSettingsCategories",
operationId: "DiagnosticSettingsCategory_List",
httpMethod: http.MethodGet,
expected: "listDiagnosticSettingsCategory",
},
"retrieveRegistrationToken": {
path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DesktopVirtualization/hostPools/{hostPoolName}/retrieveRegistrationToken",
operationId: "HostPools_RetrieveRegistrationToken",
httpMethod: http.MethodPost,
expected: "getHostPoolRegistrationToken",
},
} {
version := NewVersionResources()
spec := createTestSwaggerForInvoke(tc.path, tc.operationId, tc.httpMethod)
addResourcesAndInvokes(version, "/file/path", tc.path, resources.ModuleNaming{ResolvedName: "foo"}, &spec)
assert.Empty(t, version.Resources, tcName)
assert.NotEmpty(t, version.Invokes, tcName)
invoke, ok := version.Invokes[tc.expected]
require.True(t, ok, fmt.Sprintf("%s: found invokes: %v", tcName, maps.Keys(version.Invokes)))
assert.Equal(t, tc.path, invoke.Path, tcName)
}
}
func TestDefaultState(t *testing.T) {
makeSwagger := func(path string) Spec {
return Spec{
Swagger: spec.Swagger{
SwaggerProps: spec.SwaggerProps{
Info: &spec.Info{
InfoProps: spec.InfoProps{
Version: "2020-01-01",
},
},
Paths: &spec.Paths{
Paths: map[string]spec.PathItem{
path: {
// Needs GET, DELETE, PUT to be discovered as a resource
PathItemProps: spec.PathItemProps{
Get: &spec.Operation{
OperationProps: spec.OperationProps{
ID: "DiagnosticSettingsCategory_Get",
},
},
Put: &spec.Operation{
OperationProps: spec.OperationProps{
ID: "DiagnosticSettingsCategory_Put",
},
},
Delete: &spec.Operation{
OperationProps: spec.OperationProps{
ID: "DiagnosticSettingsCategory_Delete",
},
},
},
},
},
},
},
},
}
}
parseSwagger := func(t *testing.T, path string) *ResourceSpec {
swagger := makeSwagger(path)
version := NewVersionResources()
addResourcesAndInvokes(version, "/file/path", path, resources.ModuleNaming{ResolvedName: "insights"}, &swagger)
require.NotEmpty(t, version.Resources)
res, ok := version.Resources["DiagnosticSettingsCategory"]
require.True(t, ok)
return res
}
t.Run("No default", func(t *testing.T) {
path := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/diagnosticSettingsCategories/{categoryName}"
def := defaults.GetDefaultResourceState(path, "2020-01-01")
require.Nil(t, def)
res := parseSwagger(t, path)
assert.Nil(t, res.DefaultBody)
})
t.Run("With default", func(t *testing.T) {
path := "/{resourceId}/providers/Microsoft.Security/advancedThreatProtectionSettings/{settingName}"
def := defaults.GetDefaultResourceState(path, "2020-01-01")
require.NotNil(t, def)
res := parseSwagger(t, path)
assert.Equal(t, def.State, res.DefaultBody)
})
}
func TestGetTypeName(t *testing.T) {
t.Run("Standard name", func(t *testing.T) {
op := &spec.Operation{
OperationProps: spec.OperationProps{
ID: "DiagnosticSettingsCategory_Get",
},
}
typeName, disambiguation := getTypeName(op, "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/diagnosticSettingsCategories/{categoryName}")
assert.Equal(t, "DiagnosticSettingsCategory", typeName)
assert.Nil(t, disambiguation)
})
t.Run("Custom name", func(t *testing.T) {
op := &spec.Operation{
OperationProps: spec.OperationProps{
ID: "Foo_Get",
},
}
typeName, disambiguation := getTypeName(op, "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.netapp/backupvaults/{vault}")
assert.Equal(t, "BackupVaultFoo", typeName)
assert.NotNil(t, disambiguation)
})
t.Run("Custom resource", func(t *testing.T) {
op := &spec.Operation{
OperationProps: spec.OperationProps{
ID: "Unused",
},
}
typeName, disambiguation := getTypeName(op, customresources.PimRoleEligibilityScheduleRequestPath)
assert.Equal(t, "PimRoleEligibilitySchedule", typeName)
assert.Nil(t, disambiguation)
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/defaults/defaultResourcesState_test.go | provider/pkg/openapi/defaults/defaultResourcesState_test.go | package defaults
import (
"testing"
"github.com/stretchr/testify/assert"
)
// Test GetDefaultResourceState returns expected values for a given resource type.
func TestNotFound(t *testing.T) {
actual := GetDefaultResourceState("notapath", "2020-01-01")
assert.Nil(t, actual)
}
func TestSkipDelete(t *testing.T) {
actual := GetDefaultResourceState("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/{tdeName}", "2020-01-01")
expected := DefaultResourceState{
SkipDelete: true,
}
assert.Equal(t, &expected, actual)
}
func TestPathNormalisation(t *testing.T) {
// Same as TestSkipDelete, but with a different casing and parameter naming.
actual := GetDefaultResourceState("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/{transparentDataEncryptionName}", "2020-01-01")
expected := DefaultResourceState{
SkipDelete: true,
}
assert.Equal(t, &expected, actual)
}
func TestEmptyBody(t *testing.T) {
actual := GetDefaultResourceState("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}", "2020-01-01")
expected := DefaultResourceState{
State: map[string]interface{}{},
}
assert.Equal(t, &expected, actual)
}
func TestNonEmptyCollection(t *testing.T) {
actual := GetDefaultResourceState("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/currentbillingfeatures", "2020-01-01")
expected := DefaultResourceState{
State: map[string]interface{}{
"currentBillingFeatures": []string{"Basic"},
"dataVolumeCap": map[string]string{},
},
HasNonEmptyCollections: true,
}
assert.Equal(t, &expected, actual)
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/defaults/defaultResourceStateConvert_test.go | provider/pkg/openapi/defaults/defaultResourceStateConvert_test.go | package defaults_test
import (
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/convert"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/openapi"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/openapi/defaults"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/openapi/paths"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/provider"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/provider/crud"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAllDefaultStatesConvertable(t *testing.T) {
metadataBytes, err := os.ReadFile(filepath.Join("..", "..", "..", "..", "bin", "metadata-compact.json"))
require.Nil(t, err)
metadata, err := provider.LoadMetadata(metadataBytes)
require.Nil(t, err)
resourcesByNormalisedPath := map[string][]string{}
for resourceToken, resource := range metadata.Resources {
normalisedPath := paths.NormalizePath(resource.Path)
resourcesByNormalisedPath[normalisedPath] = append(resourcesByNormalisedPath[normalisedPath], resourceToken)
}
resourceTokenVersionMatcher := regexp.MustCompile(`/(v[^:]+)`)
for _, path := range defaults.ListPathsWithDefaults() {
cleaned := strings.ReplaceAll(path, "/", "_")
// Remove version query parameter
pathWithoutVersion := path
if idx := strings.Index(path, "?"); idx != -1 {
pathWithoutVersion = path[:idx]
}
t.Run(cleaned, func(t *testing.T) {
resourceTokens, found := resourcesByNormalisedPath[pathWithoutVersion]
require.Truef(t, found, "Resource not found in test data: %s", pathWithoutVersion)
for _, resourceToken := range resourceTokens {
var apiVersion openapi.ApiVersion
sdkVersionMatch := resourceTokenVersionMatcher.FindStringSubmatch(resourceToken)
if len(sdkVersionMatch) > 1 {
apiVersion, err = openapi.SdkToApiVersion(openapi.SdkVersion(sdkVersionMatch[1]))
require.Nil(t, err, "Failed to convert SDK version to API version: %s", sdkVersionMatch[1])
}
defaultState := defaults.GetDefaultResourceState(path, string(apiVersion))
if defaultState == nil || defaultState.SkipDelete {
return
}
resource, found := metadata.Resources[resourceToken]
require.Truef(t, found, "Resource not found in metadata: %s", resourceToken)
converted, err := crud.PrepareAzureRESTBody("", resource.PutParameters, [][]string{}, defaultState.State, &convert.SdkShapeConverter{})
assert.Nil(t, err, "Failed to prepare body for %s", resourceToken)
assert.NotNil(t, converted, "No body returned for %s", resourceToken)
}
})
}
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/defaults/defaultResourcesState.go | provider/pkg/openapi/defaults/defaultResourcesState.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package defaults
import (
"fmt"
"reflect"
"sort"
"strings"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/openapi/paths"
)
// defaultResourcesStateRaw is a map non-normalized paths. It's handy to have paths as they are in the Open API spec's
// latest version for easy search. This map shouldn't be used for lookups: use 'defaultResourcesStateMap' instead.
var defaultResourcesStateRaw = map[string]map[string]interface{}{
"/{resourceId}/providers/Microsoft.Security/advancedThreatProtectionSettings/{settingName}": {
"isEnabled": false,
},
"/{scope}/providers/Microsoft.Resources/tags/default": {
"properties": map[string]interface{}{
"tags": map[string]interface{}{},
},
},
// https://github.com/pulumi/pulumi-azure-native/issues/1729
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/policies/{policyId}": {
"format": "xml",
"value": "<!--\r\n IMPORTANT:\r\n - Policy elements can appear only within the <inbound>, <outbound>, <backend> section elements.\r\n - Only the <forward-request> policy element can appear within the <backend> section element.\r\n - To apply a policy to the incoming request (before it is forwarded to the backend service), place a corresponding policy element within the <inbound> section element.\r\n - To apply a policy to the outgoing response (before it is sent back to the caller), place a corresponding policy element within the <outbound> section element.\r\n - To add a policy position the cursor at the desired insertion point and click on the round button associated with the policy.\r\n - To remove a policy, delete the corresponding policy statement from the policy document.\r\n - Policies are applied in the order of their appearance, from the top down.\r\n-->\r\n<policies>\r\n\t<inbound></inbound>\r\n\t<backend>\r\n\t\t<forward-request />\r\n\t</backend>\r\n\t<outbound></outbound>\r\n</policies>",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/policies/{policyId}?version=2018-06-01-preview": {
"contentFormat": "xml",
"policyContent": "",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default": {
"properties": map[string]interface{}{
"traceEnabled": false,
"appInsightsInstrumentationKey": nil,
"appInsightsSamplingRate": 10.0,
},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/configServers/default": {
"properties": map[string]interface{}{
"configServer": nil,
},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}": {
"properties": map[string]interface{}{
"poolSize": map[string]interface{}{
"name": "S1",
},
},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/configurations/{configurationName}": {
"source": "system-default",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMySQL/flexibleServers/{serverName}/configurations/{configurationName}": {
"source": "system-default",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMySQL/servers/{serverName}/configurations/{configurationName}": {
"source": "system-default",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForPostgreSQL/servers/{serverName}/configurations/{configurationName}": {
"source": "system-default",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/networkRuleSets/default": {
"defaultAction": "Deny",
"publicNetworkAccess": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/currentbillingfeatures": {
"currentBillingFeatures": []string{"Basic"},
"dataVolumeCap": map[string]string{},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}": {
"enabled": false,
},
"/{resourceId}/providers/Microsoft.Security/defenderForStorageSettings/{settingName}": {
"properties": map[string]interface{}{
"isEnabled": false,
// https://learn.microsoft.com/en-us/azure/storage/common/azure-defender-storage-configure?tabs=enable-subscription#rest-api-1
"overrideSubscriptionLevelSettings": true,
},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/networkRuleSets/default": {
"defaultAction": "Deny",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/advisors/{advisorName}": {
"autoExecuteStatus": "Default",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/advisors/{advisorName}?version=2014-04-01": {
"autoExecuteValue": "Default",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/azureADOnlyAuthentications/{authenticationName}": {
"azureADOnlyAuthentication": false,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/advisors/{advisorName}?version=2014-04-01": {
"autoExecuteValue": "Default",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/advisors/{advisorName}": {
"autoExecuteStatus": "Default",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/auditingSettings/{blobAuditingPolicyName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/backupLongTermRetentionPolicies/{policyName}": {
"weeklyRetention": "PT0S",
"monthlyRetention": "PT0S",
"yearlyRetention": "PT0S",
"weekOfYear": 1,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/backupShortTermRetentionPolicies/{policyName}": {
"retentionDays": 7,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/dataMaskingPolicies/{dataMaskingPolicyName}": {
"dataMaskingState": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/extendedAuditingSettings/{blobAuditingPolicyName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/geoBackupPolicies/{geoBackupPolicyName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/securityAlertPolicies/{securityAlertPolicyName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/auditingSettings/{blobAuditingPolicyName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}": {
"serverKeyType": "ServiceManaged",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/extendedAuditingSettings/{blobAuditingPolicyName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}": {
"recurringScans": map[string]interface{}{
"isEnabled": false,
"emailSubscriptionAdmins": true,
},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}": {},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}": {
"immutabilityPeriodSinceCreationInDays": 0,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}": {
"state": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}": {},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}": {},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}": {},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/transparentDataEncryption/{transparentDataEncryptionName}": {
"status": "Disabled",
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/authsettings": {
"enabled": false,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/authsettingsV2": {
"platform": map[string]interface{}{
"enabled": false,
},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/authsettingsV2": {
"platform": map[string]interface{}{
"enabled": false,
},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/logs": {
"applicationLogs": map[string]string{},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/logs": {
"applicationLogs": map[string]string{},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/slotConfigNames": {
"appSettingNames": []string{},
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/networkConfig/virtualNetwork": {
"swiftSupported": true,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/networkConfig/virtualNetwork": {
"subnetResourceId": "*", // This is going to be a resource ID, so we choose accept any value here.
"swiftSupported": true,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web": {},
// https://learn.microsoft.com/en-us/azure/templates/microsoft.web/sites/basicpublishingcredentialspolicies-ftp?pivots=deployment-language-arm-template#csmpublishingcredentialspoliciesentityproperties-1
// We set the default value to true, as this is the observed behavior when the resource is created.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/basicPublishingCredentialsPolicies/ftp": {
"allow": true,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/basicPublishingCredentialsPolicies/ftp": {
"allow": true,
},
// https://learn.microsoft.com/en-us/azure/templates/microsoft.web/sites/basicpublishingcredentialspolicies-ftp?pivots=deployment-language-arm-template#csmpublishingcredentialspoliciesentityproperties-1
// We set the default value to true, as this is the observed behavior when the resource is created.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/basicPublishingCredentialsPolicies/scm": {
"allow": true,
},
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/basicPublishingCredentialsPolicies/scm": {
"allow": true,
},
// https://github.com/pulumi/pulumi-azure-native/issues/3934
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/sqlVulnerabilityAssessments/{vulnerabilityAssessmentName}": {
"state": "Disabled",
},
}
var skipDeleteResources = map[string]bool{
// https://learn.microsoft.com/en-us/azure/azure-sql/database/transparent-data-encryption-tde-overview
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/transparentDataEncryption/{tdeName}": true,
}
// defaultResourcesStateNormalized maps normalized paths of resources to default state of a resource. The default state is
// the configuration of that resource after its parent resource is created and before any explicit PUT operations on
// its endpoint. These resources are created automatically by Azure with their parents, therefore, the provider can
// expect to see this state at the time of the Create operation.
var defaultResourcesStateNormalized map[string]DefaultResourceState
func init() {
defaultResourcesStateNormalized = map[string]DefaultResourceState{}
for key := range skipDeleteResources {
addNormalisedState(key, DefaultResourceState{
SkipDelete: true,
})
}
for key, value := range defaultResourcesStateRaw {
addNormalisedState(key, DefaultResourceState{
State: value,
HasNonEmptyCollections: containsNonEmptyCollections(value),
})
}
}
func addNormalisedState(path string, state DefaultResourceState) {
// Don't normalise the arguments part - just the path.
path, args := splitPathAndArguments(path)
normalizedPath := paths.NormalizePath(path) + args
if _, exists := defaultResourcesStateNormalized[normalizedPath]; exists {
panic(fmt.Errorf("FATAL: default state for %s is already set", normalizedPath))
}
defaultResourcesStateNormalized[normalizedPath] = state
}
func splitPathAndArguments(path string) (string, string) {
if idx := strings.Index(path, "?"); idx != -1 {
return path[:idx], path[idx:]
}
return path, ""
}
func containsNonEmptyCollections(value map[string]interface{}) bool {
for _, propValue := range value {
if propValue == nil {
continue
}
typ := reflect.TypeOf(propValue)
switch typ.Kind() {
case reflect.Slice, reflect.Array:
val := reflect.ValueOf(propValue)
if val.Len() > 0 {
return true
}
}
}
return false
}
type DefaultResourceState struct {
// State is the default state of a resource.
State map[string]interface{}
// SkipDelete is true if the PUT operation should be skipped during the delete step.
SkipDelete bool
// HasNonEmptyCollections is true if any property of the default state is a non-empty collection.
HasNonEmptyCollections bool
}
func GetDefaultResourceState(path, version string) *DefaultResourceState {
normalizedPath := paths.NormalizePath(path)
versionedPath := fmt.Sprintf("%s?version=%s", normalizedPath, version)
defaults, ok := defaultResourcesStateNormalized[versionedPath]
if ok {
return &defaults
}
defaults, ok = defaultResourcesStateNormalized[normalizedPath]
if ok {
return &defaults
}
return nil
}
func SkipDeleteOperation(path, version string) bool {
defaultState := GetDefaultResourceState(path, version)
if defaultState == nil {
return false
}
return defaultState.SkipDelete
}
func ListPathsWithDefaults() []string {
paths := make([]string, 0, len(defaultResourcesStateNormalized))
for path := range defaultResourcesStateNormalized {
paths = append(paths, path)
}
sort.Strings(paths)
return paths
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/pkg/openapi/paths/normalisePath.go | provider/pkg/openapi/paths/normalisePath.go | package paths
import "strings"
// Sometimes, Azure resources change the API paths between API versions. Most of the time, we can detect that based
// on operation names. However, in a number of cases, the operation names change at the same time.
// legacyPathMappings provides a manual map to help our codegen discover the old "aliases" of new resource paths
// and group them under the same Pulumi resource, including proper top-level resource calculation and aliasing.
var legacyPathMappings = map[string]string{
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/buildTasks/{buildTaskName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/tasks/{taskName}",
"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.CostManagement/connectors/{connectorName}": "/providers/Microsoft.CostManagement/cloudConnectors/{connectorName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/cassandra/keyspaces/{keyspaceName}/tables/{tableName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/gremlin/databases/{databaseName}/graphs/{graphName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/mongodb/databases/{databaseName}/collections/{collectionName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/sql/databases/{databaseName}/containers/{containerName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/apis/table/tables/{tableName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/tables/{tableName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/interfaceEndpoints/{interfaceEndpointName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}",
"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/listKeys": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/sharedKeys",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Solutions/appliances/{applianceName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Solutions/applications/{applicationName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Solutions/applianceDefinitions/{applianceDefinitionName}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Solutions/applicationDefinitions/{applicationDefinitionName}",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}",
}
var legacyPathMappingNormalized = map[string]string{}
func init() {
for key, value := range legacyPathMappings {
legacyPathMappingNormalized[NormalizePath(key)] = NormalizePath(value)
}
}
// NormalizePath converts an API path to its canonical form (lowercase, with all placeholders removed). The paths that
// convert to the same canonical path are considered to represent the same resource.
func NormalizePath(path string) string {
lowerPath := strings.ReplaceAll(strings.ToLower(strings.TrimSuffix(path, "/")), "-", "")
// Work around an odd version v2019-01-01-preview of SecurityInsights where they have a parameter for the provider.
// This breaks all path matching for that version which includes quite a lot of resources. Instead of providing
// a value per each resource, let's replace this path segment while normalizing.
lowerPath = strings.ReplaceAll(lowerPath, "providers/{operationalinsightsresourceprovider}", "providers/microsoft.operationalinsights")
parts := strings.Split(lowerPath, "/")
newParts := make([]string, len(parts))
for i, part := range parts {
if strings.HasPrefix(part, "{") && strings.HasSuffix(part, "}") {
newParts[i] = "{}"
} else {
newParts[i] = part
}
}
normalized := strings.Join(newParts, "/")
if override, ok := legacyPathMappingNormalized[normalized]; ok {
return override
}
return normalized
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/cmd/tools/regressed_versions.go | provider/cmd/tools/regressed_versions.go | package main
import (
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"gopkg.in/yaml.v3"
)
// ModuleData represents one spec file entry like
//
// Aad:
// tracking: "2022-12-01"
type ModuleData struct {
Tracking string `yaml:"tracking,omitempty"`
}
func main() {
if len(os.Args) != 3 {
fmt.Println("Usage: program vA-spec.yaml vB-spec.yaml")
os.Exit(1)
}
fileAPath := os.Args[1]
fileBPath := os.Args[2]
// Read and parse both YAML files
servicesA, err := parseYamlFile(fileAPath)
if err != nil {
fmt.Printf("Error parsing file %s: %v\n", fileAPath, err)
os.Exit(1)
}
servicesB, err := parseYamlFile(fileBPath)
if err != nil {
fmt.Printf("Error parsing file %s: %v\n", fileBPath, err)
os.Exit(1)
}
// Compare tracking values
compareTrackingValues(servicesA, servicesB)
}
func parseYamlFile(filePath string) (map[string]ModuleData, error) {
data, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
var services map[string]ModuleData
err = yaml.Unmarshal(data, &services)
if err != nil {
return nil, err
}
return services, nil
}
func compareTrackingValues(servicesA, servicesB map[string]ModuleData) {
keys := make([]string, 0, len(servicesA))
for key := range servicesA {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
serviceA := servicesA[key]
fmt.Println(key)
if serviceB, exists := servicesB[key]; exists {
// Skip if either service doesn't have a tracking value
if serviceA.Tracking == "" || serviceB.Tracking == "" {
continue
}
// Compare tracking values (as strings)
fmt.Printf(" comparing %q to %q in %s\n", serviceA.Tracking, serviceB.Tracking, key)
if compareVersions(serviceB.Tracking, serviceA.Tracking) < 0 {
fmt.Printf("%s: %q to %q\n", key, serviceA.Tracking, serviceB.Tracking)
}
}
}
}
// compareVersions compares two version strings
// Returns -1 if version1 < version2, 0 if equal, 1 if version1 > version2
func compareVersions(version1, version2 string) int {
// Handle preview versions with special suffix
isPreview1 := strings.Contains(version1, "preview")
isPreview2 := strings.Contains(version2, "preview")
// Extract the base date part for comparison
datePart1 := strings.Split(version1, "-preview")[0]
datePart2 := strings.Split(version2, "-preview")[0]
// Compare the date parts
if datePart1 < datePart2 {
return -1
} else if datePart1 > datePart2 {
return 1
}
// If date parts are equal, preview versions are considered less than non-preview
if isPreview1 && !isPreview2 {
return -1
} else if !isPreview1 && isPreview2 {
return 1
}
// Both are the same (both preview or both not preview with same date)
return 0
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/cmd/pulumi-resource-azure-native/main.go | provider/cmd/pulumi-resource-azure-native/main.go | // Copyright 2016-2020, Pulumi Corporation.
package main
import (
_ "embed"
"reflect"
"unsafe"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/provider"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/version"
)
var providerName = "azure-native"
//go:embed metadata-compact.json
var azureApiResources string
//go:embed schema-full.json
var pulumiSchema string
func unsafeStringToBytes(data string) []byte {
hdr := (*reflect.StringHeader)(unsafe.Pointer(&data))
var bytes []byte
bytes = unsafe.Slice((*byte)(unsafe.Pointer(hdr.Data)), hdr.Len)
return bytes
}
func main() {
provider.Serve(providerName, version.Version, unsafeStringToBytes(pulumiSchema), unsafeStringToBytes(azureApiResources))
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/provider/cmd/pulumi-gen-azure-native/main.go | provider/cmd/pulumi-gen-azure-native/main.go | // Copyright 2016-2020, Pulumi Corporation.
package main
import (
"bytes"
"fmt"
"os"
"path"
"path/filepath"
"text/template"
"github.com/segmentio/encoding/json"
"github.com/pkg/errors"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/debug"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/gen"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/resources"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/squeeze"
embeddedVersion "github.com/pulumi/pulumi-azure-native/v2/provider/pkg/version"
"github.com/pulumi/pulumi-azure-native/v2/provider/pkg/versioning"
gogen "github.com/pulumi/pulumi/pkg/v3/codegen/go"
"github.com/pulumi/pulumi/pkg/v3/codegen/schema"
)
func main() {
var debugEnabled bool
debugEnv := os.Getenv("DEBUG_CODEGEN")
if debugEnabled = debugEnv == "true"; debugEnabled {
debug.Debug = &debugEnabled
}
languages := os.Args[1]
// The third argument is the optional version.
// Exactly one of the CLI arg or embedded version must be set.
if len(os.Args) == 3 {
if embeddedVersion.Version != "" {
panic(fmt.Sprintf("embedded version already set to %s", embeddedVersion.Version))
}
embeddedVersion.Version = os.Args[2]
} else if embeddedVersion.Version == "" {
panic("no version provided via CLI argument or ldflags")
}
// Use the one true version everywhere.
version := embeddedVersion.GetVersion()
wd, err := os.Getwd()
if err != nil {
panic(err)
}
schemaPath := filepath.Join(wd, "bin", "schema-full.json")
if len(os.Args) == 4 {
schemaPath = os.Args[3]
if !filepath.IsAbs(schemaPath) {
schemaPath = filepath.Join(wd, schemaPath)
}
}
fmt.Printf("Generating with version %s and schema path %s\n", version, schemaPath)
// Use DEBUG_CODEGEN_NAMESPACES to just generate a single namespace (e.g. "Compute") for quick testing
namespaces := os.Getenv("DEBUG_CODEGEN_NAMESPACES")
if namespaces == "" {
namespaces = "*"
}
// Use DEBUG_CODEGEN_APIVERSIONS to just generate certain versions (e.g. "2019-09-01", "2019*") for quick testing,
// likely in combination with DEBUG_CODEGEN_NAMESPACES
apiVersions := os.Getenv("DEBUG_CODEGEN_APIVERSIONS")
codegenSchemaOutputPath := os.Getenv("CODEGEN_SCHEMA_OUTPUT_PATH")
codegenMetadataOutputPath := os.Getenv("CODEGEN_METADATA_OUTPUT_PATH")
buildSchemaArgs := versioning.BuildSchemaArgs{
Specs: versioning.ReadSpecsArgs{
SpecsDir: os.Getenv("CODEGEN_SPECS_DIR"),
NamespaceFilter: namespaces,
VersionsFilter: apiVersions,
},
RootDir: wd,
Version: version.String(),
}
switch languages {
case "schema":
buildSchemaResult, buildSchemaErr := versioning.BuildSchema(buildSchemaArgs)
// Attempt to write out the version metadata before failing as this might help us diagnose the issue.
if namespaces == "*" && apiVersions == "" && buildSchemaResult != nil {
written, err := buildSchemaResult.Version.WriteTo("versions")
if err != nil {
panic(err)
}
for _, v := range written {
fmt.Printf("Emitted %s\n", v)
}
written, err = buildSchemaResult.Reports.WriteTo("reports")
if err != nil {
panic(err)
}
for _, v := range written {
fmt.Printf("Emitted %s\n", v)
}
} else {
fmt.Println("Note: skipping writing version metadata and reports because DEBUG_CODEGEN_NAMESPACES or DEBUG_CODEGEN_APIVERSIONS is set.")
}
if buildSchemaErr != nil {
panic(buildSchemaErr)
}
if codegenSchemaOutputPath == "" {
codegenSchemaOutputPath = path.Join("bin", "schema-full.json")
}
if err = emitSchema(buildSchemaResult.PackageSpec, version.String(), codegenSchemaOutputPath); err != nil {
panic(err)
}
fmt.Printf("Emitted %s.\n", codegenSchemaOutputPath)
// Default versions only
buildSchemaArgs.ExcludeExplicitVersions = true
buildDefaultVersionsSchemaResult, err := versioning.BuildSchema(buildSchemaArgs)
if err != nil {
panic(err)
}
codegenDefaultVersionsSchemaOutputPath := filepath.Join(filepath.Dir(codegenSchemaOutputPath), "schema-default-versions.json")
if err = emitSchema(buildDefaultVersionsSchemaResult.PackageSpec, version.String(), codegenDefaultVersionsSchemaOutputPath); err != nil {
panic(err)
}
fmt.Printf("Emitted %s.\n", codegenDefaultVersionsSchemaOutputPath)
// We can't generate schema.json every time because it's slow and isn't reproducible.
// So we warn in case someone's expecting to see changes to schema.json after running this.
fmt.Println("Note: provider/cmd/pulumi-resource-azure-native/schema.json is generated by the `generate_docs` target.")
// Also, emit the resource metadata for the provider.
if codegenMetadataOutputPath == "" {
codegenMetadataOutputPath = path.Join("bin", "metadata-compact.json")
}
if err = emitMetadata(&buildSchemaResult.Metadata, codegenMetadataOutputPath); err != nil {
panic(err)
}
fmt.Printf("Emitted %s.\n", codegenMetadataOutputPath)
case "docs":
buildSchemaArgs.ExcludeExplicitVersions = true
buildSchemaArgs.ExampleLanguages = []string{"nodejs", "dotnet", "python", "go", "java", "yaml"}
buildSchemaResult, err := versioning.BuildSchema(buildSchemaArgs)
if err != nil {
panic(err)
}
if codegenSchemaOutputPath == "" {
codegenSchemaOutputPath = path.Join(".", "provider", "cmd", "pulumi-resource-azure-native", "schema.json")
}
err = emitDocsSchema(&buildSchemaResult.PackageSpec, codegenSchemaOutputPath)
if err != nil {
panic(err)
}
case "squeeze":
buildSchemaArgs.OnlyExplicitVersions = true
buildSchemaResult, err := versioning.BuildSchema(buildSchemaArgs)
if err != nil {
panic(err)
}
squeezedResources, err := squeeze.CompareAll(&buildSchemaResult.PackageSpec)
if err != nil {
panic(err)
}
squeezedInvokes := versioning.FindRemovedInvokesFromResources(buildSchemaResult.Modules, squeezedResources)
majorVersion := version.Major
err = gen.EmitFile(path.Join("versions", fmt.Sprintf("v%d-removed-resources.json", majorVersion)), squeezedResources)
if err != nil {
panic(err)
}
err = gen.EmitFile(path.Join("versions", fmt.Sprintf("v%d-removed-invokes.yaml", majorVersion)), squeezedInvokes)
if err != nil {
panic(err)
}
case "go":
// Just read existing schema if we're not re-generating
schemaBytes, err := os.ReadFile(schemaPath)
if err != nil {
panic(err)
}
var pkgSpec schema.PackageSpec
err = json.Unmarshal([]byte(schemaBytes), &pkgSpec)
if err != nil {
panic(err)
}
outdir := path.Join(".", "sdk", "pulumi-azure-native-sdk")
pkgSpec.Version = version.String()
err = emitSplitPackage(&pkgSpec, "go", outdir)
if err != nil {
panic(err)
}
default:
panic(fmt.Sprintf("unknown language %s", languages))
}
}
// emitSchema writes the Pulumi schema JSON to the 'schema.json' file in the given directory.
func emitSchema(pkgSpec schema.PackageSpec, version, outputPath string) error {
pkgSpec.Version = version
schemaJSON, err := json.Marshal(pkgSpec)
if err != nil {
return errors.Wrap(err, "marshaling Pulumi schema")
}
return gen.EmitFile(outputPath, schemaJSON)
}
// emitDocsSchema writes the Pulumi schema JSON to the 'schema.json' file in the given directory.
func emitDocsSchema(pkgSpec *schema.PackageSpec, outputPath string) error {
schemaJSON, err := json.MarshalIndent(pkgSpec, "", " ")
if err != nil {
return errors.Wrap(err, "marshaling Pulumi schema")
}
return gen.EmitFile(outputPath, schemaJSON)
}
func emitMetadata(metadata *resources.AzureAPIMetadata, outputPath string) error {
meta := bytes.Buffer{}
err := json.NewEncoder(&meta).Encode(metadata)
if err != nil {
return errors.Wrap(err, "marshaling metadata")
}
return gen.EmitFile(outputPath, meta.Bytes())
}
func emitSplitPackage(pkgSpec *schema.PackageSpec, language, outDir string) error {
moduleVersionPath := gen.GoModulePathVersion(pkgSpec.Version)
ppkg, err := schema.ImportSpec(*pkgSpec, nil, schema.ValidationOptions{
AllowDanglingReferences: true,
})
if err != nil {
return errors.Wrap(err, "reading schema")
}
version := gen.GoModVersion(ppkg.Version)
fmt.Printf("Go version %s calculated from %s\n", version, ppkg.Version)
files, err := gogen.GeneratePackage("the Pulumi SDK Generator", ppkg, nil)
if err != nil {
return errors.Wrapf(err, "generating %s package", language)
}
files["version.txt"] = []byte(version)
files["go.mod"] = []byte(goModTemplate(GoMod{
ModuleVersionPath: moduleVersionPath,
}))
for f, contents := range files {
if err := gen.EmitFile(path.Join(outDir, f), contents); err != nil {
return err
}
// Special case for identifying where we need modules in subdirectories.
matched, err := filepath.Match("*/init.go", f)
if err != nil {
return err
}
if matched {
dir := filepath.Dir(f)
module := filepath.Base(dir)
modPath := filepath.Join(dir, "go.mod")
modContent := goModTemplate(GoMod{
Version: version,
SubmoduleName: module,
ModuleVersionPath: moduleVersionPath,
})
if err := gen.EmitFile(path.Join(outDir, modPath), []byte(modContent)); err != nil {
return err
}
pluginPath := filepath.Join(dir, "pulumi-plugin.json")
pluginContent := files["pulumi-plugin.json"]
if err := gen.EmitFile(path.Join(outDir, pluginPath), pluginContent); err != nil {
return err
}
}
}
return nil
}
type GoMod struct {
Version string
SubmoduleName string
ModuleVersionPath string
}
var goModTemplateCache *template.Template
func goModTemplate(goMod GoMod) string {
var err error
if goModTemplateCache == nil {
goModTemplateCache, err = template.New("go-mod").Parse(`
{{ if eq .SubmoduleName "" }}
module github.com/pulumi/pulumi-azure-native-sdk{{ .ModuleVersionPath }}
{{ else }}
module github.com/pulumi/pulumi-azure-native-sdk/{{ .SubmoduleName }}{{ .ModuleVersionPath }}
{{ end }}
go 1.18
require (
github.com/blang/semver v3.5.1+incompatible
github.com/pkg/errors v0.9.1
{{ if ne .SubmoduleName "" }}
github.com/pulumi/pulumi-azure-native-sdk{{ .ModuleVersionPath }} {{ .Version }}
{{ end }}
github.com/pulumi/pulumi/sdk/v3 v3.37.2
)
{{ if ne .SubmoduleName "" }}
replace github.com/pulumi/pulumi-azure-native-sdk{{ .ModuleVersionPath }} => ../
{{ end }}
`)
if err != nil {
panic(err)
}
}
var result bytes.Buffer
err = goModTemplateCache.Execute(&result, goMod)
if err != nil {
panic(err)
}
return result.String()
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/examples_py_test.go | examples/examples_py_test.go | // Copyright 2016-2020, Pulumi Corporation. All rights reserved.
//go:build python || all
// +build python all
package examples
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAccSimplePython(t *testing.T) {
test := getPythonBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "py-simple"),
})
integration.ProgramTest(t, &test)
}
func TestAccLoadBalancer(t *testing.T) {
test := getPythonBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "py-loadbalancer"),
})
integration.ProgramTest(t, &test)
}
func TestAccLoadBalancerSubResource(t *testing.T) {
test := getPythonBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "py-loadbalancer-subresource"),
})
integration.ProgramTest(t, &test)
}
func TestAccNSG(t *testing.T) {
test := getPythonBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "py-nsg"),
})
integration.ProgramTest(t, &test)
}
func TestAccUserAssignedIdentityPython(t *testing.T) {
test := getPythonBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "py-user-assigned-identity"),
})
integration.ProgramTest(t, &test)
}
func TestAccWebAppWithSiteConfigPython(t *testing.T) {
test := getPythonBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "py-webapp"),
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
require.NotNil(t, stackInfo.Deployment)
require.NotNil(t, stackInfo.Deployment.Resources)
found := map[string]struct{}{}
for _, resource := range stackInfo.Deployment.Resources {
typeStr := string(resource.Type)
if typeStr != "azure-native:web:WebApp" && typeStr != "azure-native:web:WebAppSlot" {
continue
}
webApp := &resource
kind := strings.TrimPrefix(typeStr, "azure-native:web:")
found[kind] = struct{}{}
require.Contains(t, webApp.Outputs, "siteConfig", kind)
siteConfig, ok := webApp.Outputs["siteConfig"].(map[string]any)
require.True(t, ok, kind)
require.Contains(t, siteConfig, "ipSecurityRestrictions", kind)
restrictions, ok := siteConfig["ipSecurityRestrictions"].([]any)
require.True(t, ok, kind)
require.Len(t, restrictions, 2, kind) // one from our program, one default "deny all"
restriction, ok := restrictions[0].(map[string]any)
require.True(t, ok, kind)
require.Contains(t, restriction, "ipAddress", kind)
assert.Equal(t, "198.51.100.0/22", restriction["ipAddress"], kind)
assert.Equal(t, "pulumi", restriction["name"], kind)
require.Contains(t, siteConfig, "defaultDocuments", kind)
defaultDocs, ok := siteConfig["defaultDocuments"].([]any)
require.True(t, ok, kind)
assert.Contains(t, defaultDocs, "pulumi.html", kind)
}
assert.Contains(t, found, "WebApp")
assert.Contains(t, found, "WebAppSlot")
},
})
integration.ProgramTest(t, &test)
}
func getPythonBaseOptions(t *testing.T) integration.ProgramTestOptions {
base := getBaseOptions(t)
sdkPath := filepath.Join("..", "sdk", "python", "bin")
if _, err := os.Stat(sdkPath); os.IsNotExist(err) {
t.Fatalf("python SDK not found at %s, run `make build_python` first", sdkPath)
}
basePy := base.With(integration.ProgramTestOptions{
Dependencies: []string{sdkPath},
})
return basePy
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/examples_dotnet_test.go | examples/examples_dotnet_test.go | // Copyright 2016-2020, Pulumi Corporation. All rights reserved.
//go:build dotnet || all
// +build dotnet all
package examples
import (
"os"
"path/filepath"
"testing"
pexamples "github.com/pulumi/examples/misc/test/definitions"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
"github.com/stretchr/testify/require"
)
func TestAccAppServiceDockerDotnet(t *testing.T) {
t.Skip("temporarily skipping until a compatible docker release for 3.0 has been made")
test := getCsharpBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "cs-appservice-docker"),
})
integration.ProgramTest(t, &test)
}
func TestAccSimpleDotnet(t *testing.T) {
test := getCsharpBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "cs-simple"),
})
integration.ProgramTest(t, &test)
}
func TestAccSql(t *testing.T) {
test := getCsharpBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "cs-sql"),
})
integration.ProgramTest(t, &test)
}
func TestPortalDashboardDotnet(t *testing.T) {
test := getCsharpBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "cs-dashboard"),
})
integration.ProgramTest(t, &test)
}
func TestGetCustomDomainVerificationId(t *testing.T) {
test := getCsharpBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "cs-getcustomdomainverificationid"),
})
integration.ProgramTest(t, &test)
}
func TestDeletePostgresConfiguration(t *testing.T) {
test := getCsharpBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "cs-postgres-configuration"),
Config: map[string]string{
// Postgres was region-restricted at the time of writing.
"azure-native:location": "North Europe",
},
EditDirs: []integration.EditDir{
{
Dir: filepath.Join("cs-postgres-configuration", "step2"),
Additive: true,
},
},
})
integration.ProgramTest(t, &test)
}
func TestPulumiExamples(t *testing.T) {
if _, err := os.Stat(pulumiExamplesPath); os.IsNotExist(err) {
if os.Getenv("CI") != "" {
t.Errorf("pulumi examples not found at %q", pulumiExamplesPath)
}
t.Skipf("skipping: pulumi examples not found at %q", pulumiExamplesPath)
}
for _, example := range pexamples.GetTestsByTags(pexamples.AzureNativeProvider, pexamples.CS) {
t.Run(example.Dir, func(t *testing.T) {
test := getCsharpBaseOptions(t).
With(example.Options).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), pulumiExamplesPath, example.Dir),
})
integration.ProgramTest(t, &test)
})
}
}
func getCsharpBaseOptions(t *testing.T) integration.ProgramTestOptions {
if os.Getenv("PULUMI_LOCAL_NUGET") == "" {
nugetPath, err := filepath.Abs("../nuget")
require.NoErrorf(t, err, "could not resolve nuget path")
os.Setenv("PULUMI_LOCAL_NUGET", nugetPath)
}
base := getBaseOptions(t)
baseCsharp := base.With(integration.ProgramTestOptions{
Dependencies: []string{
"Pulumi.AzureNative",
},
})
return baseCsharp
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/examples_test.go | examples/examples_test.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package examples
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
)
const pulumiExamplesPath = "../p-examples"
func getLocation(t *testing.T) string {
azureLocation := os.Getenv("ARM_LOCATION")
if azureLocation == "" {
azureLocation = "westus2"
fmt.Println("Defaulting location to 'westus2'. You can override using the ARM_LOCATION variable.")
}
return azureLocation
}
func getBaseOptions(t *testing.T) integration.ProgramTestOptions {
azureLocation := getLocation(t)
binPath, err := filepath.Abs("../bin")
if err != nil {
t.Fatal(err)
}
fmt.Printf("Using binPath %s\n", binPath)
return integration.ProgramTestOptions{
ExpectRefreshChanges: true,
RequireEmptyPreviewAfterRefresh: true,
Config: map[string]string{
"azure-native:location": azureLocation,
},
LocalProviders: []integration.LocalDependency{
{
Package: "azure-native",
Path: binPath,
},
},
}
}
func getCwd(t *testing.T) string {
cwd, err := os.Getwd()
if err != nil {
t.FailNow()
}
return cwd
}
func skipIfShort(t *testing.T) {
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/examples_go_sdk_test.go | examples/examples_go_sdk_test.go | // Copyright 2016-2020, Pulumi Corporation. All rights reserved.
//go:build go || all
// +build go all
package examples
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"testing"
"github.com/stretchr/testify/require"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
)
const sdkPath = "../sdk/pulumi-azure-native-sdk"
func testDir(t *testing.T, testCaseDirs ...string) string {
segments := []string{getCwd(t), "azure-native-sdk-v2"}
segments = append(segments, testCaseDirs...)
return filepath.Join(segments...)
}
func TestAccSimpleGoSdk(t *testing.T) {
test := getGoBaseOptionsSdk(t, testDir(t, "go-simple"))
integration.ProgramTest(t, &test)
}
func TestFunctionScmFtpDeletion(t *testing.T) {
skipIfShort(t)
test := getGoBaseOptionsSdk(t, testDir(t, "go-function-scm-ftp-deletion"))
integration.ProgramTest(t, &test)
}
func TestAccClientConfigGoSdk(t *testing.T) {
test := getGoBaseOptionsSdk(t, testDir(t, "go-clientconfig"))
integration.ProgramTest(t, &test)
}
func TestAccUserAssignedIdentitySdk(t *testing.T) {
test := getGoBaseOptionsSdk(t, testDir(t, "go-user-assigned-identity")).
With(integration.ProgramTestOptions{
RunUpdateTest: false,
})
integration.ProgramTest(t, &test)
}
func TestAccAksGoSdk(t *testing.T) {
skipIfShort(t)
test := getGoBaseOptionsSdk(t, testDir(t, "go-aks")).
With(integration.ProgramTestOptions{
EditDirs: []integration.EditDir{
{
Dir: testDir(t, "go-aks", "step2"),
Additive: true,
},
{
Dir: testDir(t, "go-aks", "step3"),
Additive: true,
},
},
})
integration.ProgramTest(t, &test)
}
func TestServicebusRecreateSdk(t *testing.T) {
skipIfShort(t)
test := getGoBaseOptionsSdk(t, testDir(t, "go-servicebus-recreate", "step1")).
With(integration.ProgramTestOptions{
EditDirs: []integration.EditDir{
{
Dir: testDir(t, "go-servicebus-recreate", "step2"),
Additive: true,
},
},
})
// TODO: disable once #3361 is fixed
test.RequireEmptyPreviewAfterRefresh = false
integration.ProgramTest(t, &test)
}
func TestAzureInAzureWithSystemManagedIdentity(t *testing.T) {
test := getGoBaseOptionsSdk(t, testDir(t, "go-azure-in-azure"))
integration.ProgramTest(t, &test)
}
func TestAzureInAzureWithUserManagedIdentity(t *testing.T) {
skipIfShort(t)
test := getGoBaseOptionsSdk(t, testDir(t, "go-azure-in-azure")).
With(integration.ProgramTestOptions{
Env: []string{"PULUMI_TEST_USER_IDENTITY=true"},
})
integration.ProgramTest(t, &test)
}
func TestGenericResource(t *testing.T) {
test := getGoBaseOptionsSdk(t, testDir(t, "go-generic-resource")).
With(integration.ProgramTestOptions{
Verbose: true,
PreviewCommandlineFlags: []string{"--diff"},
})
integration.ProgramTest(t, &test)
}
func getGoBaseOptionsSdk(t *testing.T, dir string) integration.ProgramTestOptions {
base := getBaseOptions(t)
rootSdkPath, err := filepath.Abs(sdkPath)
require.NoError(t, err)
replacements, err := getSdkReplacements(dir, rootSdkPath)
require.NoError(t, err)
goDepRoot := os.Getenv("PULUMI_GO_DEP_ROOT")
if goDepRoot == "" {
goDepRoot, err = filepath.Abs("../..")
if err != nil {
t.Fatal(err)
}
}
baseGo := base.With(integration.ProgramTestOptions{
Dir: dir,
Dependencies: replacements,
Env: []string{
fmt.Sprintf("PULUMI_GO_DEP_ROOT=%s", goDepRoot),
},
})
return baseGo
}
func getSdkReplacements(dir, rootSdkPath string) ([]string, error) {
required, err := getRequiredPathsFromGoMod(dir)
if err != nil {
return nil, err
}
// Find pulumi-azure-native-sdk packages - ignoring the /vX suffix
// Match repo root or sub-packages with optional version suffix
matchAzureNativePackage := regexp.MustCompile(`^github\.com\/pulumi\/pulumi-azure-native-sdk\/?([^\/]*)(\/v\d+)$`)
var replacements []string
for _, pkg := range required {
matches := matchAzureNativePackage.FindStringSubmatch(pkg)
if len(matches) < 2 {
continue
}
module := matches[1]
modulePath := path.Join(rootSdkPath, module)
replacement := fmt.Sprintf("%s=%s", pkg, modulePath)
replacements = append(replacements, replacement)
}
return replacements, nil
}
func getRequiredPathsFromGoMod(dir string) ([]string, error) {
// Run the command `go mod edit --json` and parse the output to get the list of required packages
cmd := exec.Command("go", "mod", "edit", "--json")
cmd.Dir = dir
out, err := cmd.Output()
if err != nil {
return nil, err
}
// Deserialize output into a GoMod struct
var goMod GoMod
err = json.Unmarshal(out, &goMod)
if err != nil {
return nil, err
}
// Get the list of required packages
var required []string
for _, req := range goMod.Require {
required = append(required, req.Path)
}
return required, nil
}
type Module struct {
Path string
Version string
}
type GoMod struct {
Module ModPath
Go string
Require []Require
Exclude []Module
Replace []Replace
Retract []Retract
}
type ModPath struct {
Path string
Deprecated string
}
type Require struct {
Path string
Version string
Indirect bool
}
type Replace struct {
Old Module
New Module
}
type Retract struct {
Low string
High string
Rationale string
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/examples_nodejs_keyvault_test.go | examples/examples_nodejs_keyvault_test.go | // Copyright 2016-2020, Pulumi Corporation. All rights reserved.
//go:build nodejs || all
package examples
import (
"os"
"path/filepath"
"testing"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
)
func TestAccKeyVaultTs(t *testing.T) {
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "keyvault"),
})
integration.ProgramTest(t, &test)
}
func TestAccKeyVaultTs_OIDC(t *testing.T) {
oidcClientId := os.Getenv("OIDC_ARM_CLIENT_ID")
if oidcClientId == "" {
t.Skip("Skipping OIDC test without OIDC_ARM_CLIENT_ID")
}
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "keyvault"),
Env: []string{
"ARM_USE_OIDC=true",
"ARM_CLIENT_ID=" + oidcClientId,
// not strictly necessary but making sure we test the OIDC path
"ARM_CLIENT_SECRET=",
},
})
integration.ProgramTest(t, &test)
}
// This test is almost like TestAccKeyVaultTs_OIDC but uses an explicit provider.
// We want to test configuring the provider via its arguments, not the environment.
func TestAccKeyVaultTs_OIDCExplicit(t *testing.T) {
skipIfShort(t)
oidcClientId := os.Getenv("OIDC_ARM_CLIENT_ID")
if oidcClientId == "" {
t.Skip("Skipping OIDC test without OIDC_ARM_CLIENT_ID")
}
// These variables are set by GH. The provider reads them automatically, so we unset
// them and use custom ones instead to be sure we're actually testing the explicit provider path.
oidcToken := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN")
oidcUrl := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL")
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "keyvault-explicit-provider"),
Env: []string{
"OIDC_TOKEN_TEST=" + oidcToken,
"OIDC_URL_TEST=" + oidcUrl,
// unset to make sure we test the right code path
"ACTIONS_ID_TOKEN_REQUEST_TOKEN=",
"ACTIONS_ID_TOKEN_REQUEST_URL=",
"ARM_CLIENT_SECRET="},
})
integration.ProgramTest(t, &test)
}
func TestAccKeyVaultTs_ClientCert(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "keyvault"),
Env: []string{
"ARM_CLIENT_CERTIFICATE_PATH=" + os.Getenv("ARM_CLIENT_CERTIFICATE_PATH_FOR_TEST"),
"ARM_CLIENT_CERTIFICATE_PASSWORD=" + os.Getenv("ARM_CLIENT_CERTIFICATE_PASSWORD_FOR_TEST"),
// Make sure we test the client cert path
"ACTIONS_ID_TOKEN_REQUEST_TOKEN=",
"ACTIONS_ID_TOKEN_REQUEST_URL=",
"ARM_CLIENT_SECRET=",
},
})
integration.ProgramTest(t, &test)
}
func TestAccKeyVaultTs_CLI(t *testing.T) {
skipIfShort(t)
// AZURE_CONFIG_DIR_FOR_TEST is set by the GH workflow build-test.yml
// to provide an isolated configuration directory for the Azure CLI.
configDir := os.Getenv("AZURE_CONFIG_DIR_FOR_TEST")
if configDir == "" {
t.Skip("Skipping CLI test without AZURE_CONFIG_DIR_FOR_TEST")
}
t.Setenv("AZURE_CONFIG_DIR", configDir)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "keyvault"),
Env: []string{
// Unset auth variables to make sure we're testing the CLI auth path
"ARM_CLIENT_SECRET=",
"ARM_CLIENT_CERTIFICATE_PATH=",
"ARM_USE_MSI=false",
"ARM_USE_OIDC=false",
},
NoParallel: true,
})
integration.ProgramTest(t, &test)
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/examples_nodejs_test.go | examples/examples_nodejs_test.go | // Copyright 2016-2020, Pulumi Corporation. All rights reserved.
//go:build nodejs || all
package examples
import (
"context"
"encoding/json"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/pulumi/providertest/pulumitest"
"github.com/pulumi/providertest/pulumitest/opttest"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
"github.com/pulumi/pulumi/sdk/v3/go/auto/debug"
"github.com/pulumi/pulumi/sdk/v3/go/auto/optrefresh"
"github.com/pulumi/pulumi/sdk/v3/go/auto/optup"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2"
)
func TestAccAppServiceTs(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "appservice"),
// due to WebApp.SiteConfig that's modified by other WebApp* resources
ExpectRefreshChanges: true,
})
integration.ProgramTest(t, &test)
}
func TestAccWebApp429SerializationTs(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "webapp-429-serialization"),
})
integration.ProgramTest(t, &test)
}
func TestAccCosmosDBTs(t *testing.T) {
t.Skip("Skipping due to CosmosDB failing with ServiceUnavailable due to high demand")
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "cosmosdb"),
})
integration.ProgramTest(t, &test)
}
func TestAccSimpleTs(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "simple"),
// due to WebApp.SiteConfig that's modified by other WebApp* resources
ExpectRefreshChanges: true,
PreviewCommandlineFlags: []string{"--diff"},
})
integration.ProgramTest(t, &test)
}
func TestStaticWebsiteDestroyTs(t *testing.T) {
// Tests eventually-consistent deletion handling.
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "static-website"),
})
integration.ProgramTest(t, &test)
}
func TestImportTs(t *testing.T) {
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "import"),
})
integration.ProgramTest(t, &test)
}
func TestPostgresTs(t *testing.T) {
// t.Skip("takes longer than 10 minutes and can fail with 'unexpected error', issue #898")
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "postgres"),
})
integration.ProgramTest(t, &test)
}
func TestMySqlTs(t *testing.T) {
t.Skip("looks unreliable, getting errors with Code=ResourceNotFound Code=InternalServerError")
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "mysql"),
})
integration.ProgramTest(t, &test)
}
func TestMessagingTs(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "messaging"),
})
// TODO: disable once #3361 is fixed
test.RequireEmptyPreviewAfterRefresh = false
integration.ProgramTest(t, &test)
}
func TestSecretsTs(t *testing.T) {
secretMessage := "secret message for testing"
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "secrets"),
Config: map[string]string{
"message": secretMessage,
},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
state, err := json.Marshal(stackInfo.Deployment)
assert.NoError(t, err)
assert.NotContains(t, string(state), secretMessage)
},
})
integration.ProgramTest(t, &test)
}
func TestTimeSeriesTs(t *testing.T) {
t.Skip("Disabled due to server-side issue tracked by #3453")
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "timeseries"),
})
integration.ProgramTest(t, &test)
}
func TestPublicIpUpdateTs(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "public-ip-update"),
SkipRefresh: true,
EditDirs: []integration.EditDir{
{
Dir: filepath.Join("public-ip-update", "step2"),
Additive: true,
},
},
})
integration.ProgramTest(t, &test)
}
func TestVnetSubnetsResolution(t *testing.T) {
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "vnet-subnets-resolution"),
EditDirs: []integration.EditDir{
{
Dir: filepath.Join("vnet-subnets-resolution", "step2"),
Additive: true,
},
},
})
integration.ProgramTest(t, &test)
}
func TestStorageAccountNetworkRule(t *testing.T) {
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "storageaccount-networkrule"),
EditDirs: []integration.EditDir{
{
Dir: filepath.Join("storageaccount-networkrule", "step2"),
Additive: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment.Resources)
found := false
for _, resource := range stackInfo.Deployment.Resources {
if resource.Type == "azure-native:storage:StorageAccount" {
found = true
networkRuleSet, ok := resource.Outputs["networkRuleSet"]
assert.True(t, ok)
networkRuleSetMap, ok := networkRuleSet.(map[string]interface{})
assert.True(t, ok)
assert.Equal(t, "Allow", networkRuleSetMap["defaultAction"])
assert.Empty(t, networkRuleSetMap["ipRules"])
break
}
}
assert.True(t, found, "no storage account found in deployed resources")
},
},
},
Verbose: true,
})
integration.ProgramTest(t, &test)
}
func TestAccKeyVaultAccessPoliciesTs(t *testing.T) {
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
ExpectRefreshChanges: false,
Dir: filepath.Join(getCwd(t), "keyvault-accesspolicies"),
EditDirs: []integration.EditDir{
{
Dir: filepath.Join("keyvault-accesspolicies", "2-update-keyvault"),
Additive: true,
// Check that the stand-alone AccessPolicies are still there, not deleted by the Vault update.
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
require.NotNil(t, stackInfo.Deployment)
require.NotNil(t, stackInfo.Deployment.Resources)
accessPolicies := 0
for _, resource := range stackInfo.Deployment.Resources {
if resource.Type == "azure-native:keyvault:AccessPolicy" {
accessPolicies++
}
}
assert.Equal(t, 2, accessPolicies)
// check the number of policies as returned by Azure directly via invoke
numberOfAPs, ok := stackInfo.Outputs["numberOfAPs"].(float64)
require.True(t, ok)
assert.Equal(t, 2.0, numberOfAPs)
},
},
{
Dir: filepath.Join("keyvault-accesspolicies", "3-update-accesspolicies"),
Additive: true,
// Check that the stand-alone AccessPolicies were updated resp. deleted.
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
require.NotNil(t, stackInfo.Deployment)
require.NotNil(t, stackInfo.Deployment.Resources)
ap1Found := false
for _, resource := range stackInfo.Deployment.Resources {
urn := string(resource.URN)
if strings.HasSuffix(urn, "keyvault:AccessPolicy::ap1") {
ap1Found = true
accessPolicy, ok := resource.Outputs["policy"]
require.True(t, ok, "Property 'policy' not found")
accessPolicyObj, ok := accessPolicy.(map[string]interface{})
require.True(t, ok, "Property 'policy' is not an object")
permissions, ok := accessPolicyObj["permissions"]
require.True(t, ok, "Property 'policy.permissions' not found")
permissionsObj, ok := permissions.(map[string]interface{})
require.True(t, ok, "Property 'policy.permissions' is not an object")
keyPermissions, ok := permissionsObj["keys"]
require.True(t, ok, "Property 'policy.permissions.keys' not found")
keyPermissionsArray, ok := keyPermissions.([]any)
require.True(t, ok, "Property 'policy.permissions.keys' is not an array")
require.Equal(t, 1, len(keyPermissionsArray))
assert.Equal(t, "get", keyPermissionsArray[0].(string))
} else if strings.HasSuffix(urn, "keyvault:AccessPolicy::ap2") {
t.Errorf("AccessPolicy ap2 should have been deleted")
}
}
assert.True(t, ap1Found, "AccessPolicy ap1 not found")
// Check the number of policies as returned by Azure directly via invoke.
// This doesn't work here because we have no way of waiting for the deletion of ap2.
// numberOfAPs, ok := stackInfo.Outputs["numberOfAPs"].(float64)
// assert.True(t, ok)
// assert.Equal(t, 1.0, numberOfAPs)
},
},
},
})
integration.ProgramTest(t, &test)
}
func TestAccBlobContainerLegalHold(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "blobcontainer-legalhold"),
ExpectRefreshChanges: false,
EditDirs: []integration.EditDir{
{
Dir: filepath.Join("blobcontainer-legalhold", "2-update-legalhold"),
Additive: true,
},
},
})
integration.ProgramTest(t, &test)
}
func TestPortalDashboardTs(t *testing.T) {
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "dashboard"),
})
integration.ProgramTest(t, &test)
}
func TestRecoveryServicesProtectedItemTs(t *testing.T) {
t.Skip("Skipping due to #3832")
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "recoveryservices-protecteditem"),
// Backing up protected items increases `protectedItemsCount` in policy and container,
// and adds `AzureBackupProtected` to the item.
ExpectRefreshChanges: true,
})
integration.ProgramTest(t, &test)
}
func TestPIMRoleEligibilitySchedule(t *testing.T) {
t.Skip(`Skipping because each test run triggers an email notification to everyone. See
https://stackoverflow.com/questions/79454225/turn-off-notifications-like-pim-test-user-has-the-data-box-reader-role?noredirect=1#comment140124389_79454225.`)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "pim-roleeligibilityschedules"),
})
integration.ProgramTest(t, &test)
}
func TestRoleAssignmentsTs(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "roleassignments"),
})
integration.ProgramTest(t, &test)
}
func TestAccPIMRoleManagementPolicies(t *testing.T) {
skipIfShort(t)
// A randomly chosen Role Management Policy, from the list obtained by
// az rest --method get --url https://management.azure.com/subscriptions/0282681f-7a9e-424b-80b2-96babd57a8a1/providers/Microsoft.Authorization/roleManagementPolicies\?api-version\=2020-10-01
const policyId = "7ed63469-c833-4fba-9032-803ce289eabc"
// Retrieve the `maximumDuration` property of the randomly chosen Expiration_Admin_Eligibility rule.
// Used in ExtraRuntimeValidation to assert that the rule has the expected duration.
// Uses the Azure SDK to be able to retrieve the actual value from Azure, independent of Pulumi.
get_Expiration_Admin_Eligibility_RuleDuration := func() string {
cred, err := azidentity.NewClientSecretCredential(
os.Getenv("ARM_TENANT_ID"),
os.Getenv("ARM_CLIENT_ID"),
os.Getenv("ARM_CLIENT_SECRET"),
nil)
require.NoError(t, err)
sub := os.Getenv("ARM_SUBSCRIPTION_ID")
clientFactory, err := armauthorization.NewClientFactory(sub, cred, nil)
require.NoError(t, err)
client := clientFactory.NewRoleManagementPoliciesClient()
resp, err := client.Get(context.Background(), "subscriptions/"+sub, policyId, nil)
require.NoError(t, err)
var rule *armauthorization.RoleManagementPolicyExpirationRule
for _, r := range resp.RoleManagementPolicy.Properties.Rules {
if *r.GetRoleManagementPolicyRule().ID != "Expiration_Admin_Eligibility" {
continue
}
var castOk bool
rule, castOk = r.(*armauthorization.RoleManagementPolicyExpirationRule)
require.True(t, castOk, "%T", r)
break
}
assert.NotNil(t, rule)
return *(rule.MaximumDuration)
}
initialDuration := get_Expiration_Admin_Eligibility_RuleDuration()
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Verbose: true,
Dir: filepath.Join(getCwd(t), "pim-rolemanagementpolicies"),
Config: map[string]string{"policy": policyId},
ExpectRefreshChanges: false,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.Equal(t, "P365D", get_Expiration_Admin_Eligibility_RuleDuration())
},
EditDirs: []integration.EditDir{
{
Dir: filepath.Join("pim-rolemanagementpolicies", "2-update-rule"),
Additive: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.Equal(t, "P90D", get_Expiration_Admin_Eligibility_RuleDuration())
},
},
{
Dir: filepath.Join("pim-rolemanagementpolicies", "3-remove-rule"),
Additive: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.Equal(t, initialDuration, get_Expiration_Admin_Eligibility_RuleDuration())
},
},
},
})
integration.ProgramTest(t, &test)
}
// This test uses pulumitest instead of integration.ProgramTest because it needs to parameterize the provider in between
// steps. The high-level test sequence is:
// 1. Run `pulumi up` on the program with the default provider and default API version.
// 2. Parameterize with a different API version to generate the new SDK and install it in package.json.
// 3. The program using the new parameterized provider already exist in folder "2-...", update the test source with it.
// 4. Run `pulumi refresh` and `pulumi up` on the program with the new provider and new API version.
// 5. Copy the default version of the program back and run refresh+up again.
// There should be no changes at any point if the default and the parameterized resources are aliased correctly.
func TestParameterizeApiVersion(t *testing.T) {
cwd := getCwd(t)
location := getLocation(t)
pt := pulumitest.NewPulumiTest(t,
filepath.Join(cwd, "parameterize"),
opttest.YarnLink("@pulumi/azure-native"),
)
pt.SetConfig(t, "azure-native:location", location)
pt.Up(t)
// generate the new SDK and install it in package.json
pulumiPackageAdd(t, pt, "storage", "v20240101")
pt.UpdateSource(t, cwd, "parameterize", "2-explicit-version")
pt.Refresh(t, optrefresh.ExpectNoChanges())
pt.Up(t, optup.ExpectNoChanges())
// Pending #4019
// pt.UpdateSource(t, cwd, "parameterize", "3-back-to-default")
// pt.Refresh(t, optrefresh.ExpectNoChanges())
// pt.Up(t, optup.ExpectNoChanges())
pt.Destroy(t)
}
// A helper to use with optup.DebugLogging() and friends to get verbose logging.
func debugLogging() debug.LoggingOptions {
var level uint = 9
return debug.LoggingOptions{
LogLevel: &level,
Debug: true,
FlowToPlugins: true,
LogToStdErr: true,
}
}
func pulumiPackageAdd(t *testing.T, pt *pulumitest.PulumiTest, args ...string) {
var provider string
if _, debugMode := os.LookupEnv("PULUMI_DEBUG_PROVIDERS"); debugMode {
provider = "azure-native"
} else {
providerBinaryPath, err := exec.LookPath("pulumi-resource-azure-native")
require.NoError(t, err)
provider, err = filepath.Abs(providerBinaryPath)
require.NoError(t, err)
}
runPulumiPackageAdd(t, pt, provider, args...)
}
// runPulumiPackageAdd runs `pulumi package add` with the given args. `provider` can be the name of a provider for the
// engine to resolve, or a path to a provider binary.
func runPulumiPackageAdd(
t *testing.T,
pt *pulumitest.PulumiTest,
provider string,
args ...string,
) {
ctx := context.Background()
allArgs := append([]string{"package", "add", provider}, args...)
stdout, stderr, exitCode, err := pt.CurrentStack().Workspace().PulumiCommand().Run(
ctx,
pt.WorkingDir(),
nil, /* reader */
nil, /* additionalOutput */
nil, /* additionalErrorOutput */
nil, /* additionalEnv */
allArgs...,
)
if err != nil || exitCode != 0 {
t.Fatalf("Failed to run pulumi package add\nExit code: %d\nError: %v\n%s\n%s",
exitCode, err, stdout, stderr)
}
sdkPath := filepath.Join(pt.WorkingDir(), "sdks", "azure-native_storage_v20240101")
if _, err := os.Stat(sdkPath); os.IsNotExist(err) {
t.Fatalf("generated SDK directory not found at path: %s", sdkPath)
}
}
func TestParallelFederatedIdentityCredentials(t *testing.T) {
skipIfShort(t)
test := getJSBaseOptions(t).
With(integration.ProgramTestOptions{
Dir: filepath.Join(getCwd(t), "parallel-federated-identity-credentials"),
})
integration.ProgramTest(t, &test)
}
func getJSBaseOptions(t *testing.T) integration.ProgramTestOptions {
base := getBaseOptions(t)
baseJS := base.With(integration.ProgramTestOptions{
Dependencies: []string{
"@pulumi/azure-native",
},
// Show the diff instead of just the non-actionable error "no changes were expected but changes were proposed"
PreviewCommandlineFlags: []string{"--diff"},
})
return baseJS
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-clientconfig/main.go | examples/azure-native-sdk-v2/go-clientconfig/main.go | // Copyright 2022, Pulumi Corporation. All rights reserved.
package main
import (
"fmt"
"github.com/pulumi/pulumi-azure-native-sdk/authorization/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
config, err := authorization.GetClientConfig(ctx)
if err != nil {
return err
}
// The managed identity (MSI) authentication method will not have an object id, but it's
// unlikely our tests run on Azure VMs with this configuration.
if config.ClientId == "" || config.ObjectId == "" || config.SubscriptionId == "" || config.TenantId == "" {
return fmt.Errorf("Expected client, object, subscription, and tenant ids in auth config %v", config)
}
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-azure-in-azure/main.go | examples/azure-native-sdk-v2/go-azure-in-azure/main.go | package main
import (
"fmt"
"os"
"os/exec"
"github.com/pulumi/pulumi-azure-native-sdk/authorization/v3"
"github.com/pulumi/pulumi-azure-native-sdk/compute/v3"
"github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v3"
"github.com/pulumi/pulumi-azure-native-sdk/network/v3"
"github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
"github.com/pulumi/pulumi-command/sdk/go/command/remote"
"github.com/pulumi/pulumi-tls/sdk/v5/go/tls"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
const innerProgram = "yaml-simple"
// This program creates a VM that's open to SSH connections. It then copies a separate, "inner"
// Pulumi program to the VM and runs `pulumi up` and `down` on it. The intent is to prove that
// managed identity authentication, which needs to happen on an Azure resource, works as expected.
// The VM parts are mostly based on
// https://learn.microsoft.com/en-us/azure/virtual-machines/linux/quick-create-template.
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
clientConf, err := authorization.GetClientConfig(ctx)
if err != nil {
return err
}
rg, err := resources.NewResourceGroup(ctx, "rg", &resources.ResourceGroupArgs{})
if err != nil {
return err
}
// Generate a new RSA key pair
privateKey, err := tls.NewPrivateKey(ctx, "mySshKey", &tls.PrivateKeyArgs{
Algorithm: pulumi.String("RSA"),
RsaBits: pulumi.Int(4096),
})
if err != nil {
return err
}
vnet, err := network.NewVirtualNetwork(ctx, "vnet", &network.VirtualNetworkArgs{
ResourceGroupName: rg.Name,
AddressSpace: network.AddressSpaceArgs{
AddressPrefixes: pulumi.StringArray{pulumi.String("10.1.0.0/16")},
},
Subnets: network.SubnetTypeArray{network.SubnetTypeArgs{
Name: pulumi.String("subnet"),
AddressPrefix: pulumi.String("10.1.0.0/24"),
PrivateEndpointNetworkPolicies: pulumi.String("Enabled"),
PrivateLinkServiceNetworkPolicies: pulumi.String("Enabled"),
}},
})
if err != nil {
return err
}
publicIp, err := network.NewPublicIPAddress(ctx, "publicIp", &network.PublicIPAddressArgs{
ResourceGroupName: rg.Name,
PublicIPAllocationMethod: pulumi.String("Dynamic"),
PublicIPAddressVersion: pulumi.String("IPv4"),
IdleTimeoutInMinutes: pulumi.IntPtr(5),
// The actual IP address is not known until after the resource is created, so we ignore
// changes to the property and look the IP up later via LookupPublicIPAddress.
}, pulumi.IgnoreChanges([]string{"ipAddress"}))
if err != nil {
return err
}
nsg, err := network.NewNetworkSecurityGroup(ctx, "nsg", &network.NetworkSecurityGroupArgs{
ResourceGroupName: rg.Name,
SecurityRules: network.SecurityRuleTypeArray{
network.SecurityRuleTypeArgs{
Name: pulumi.String("SSH"),
Priority: pulumi.IntPtr(1000),
Protocol: pulumi.String("TCP"),
Access: pulumi.String("Allow"),
Direction: pulumi.String("Inbound"),
SourceAddressPrefix: pulumi.String("*"),
SourcePortRange: pulumi.String("*"),
DestinationAddressPrefix: pulumi.String("*"),
DestinationPortRange: pulumi.String("22"),
},
},
})
if err != nil {
return err
}
nic, err := network.NewNetworkInterface(ctx, "networkInterface", &network.NetworkInterfaceArgs{
ResourceGroupName: rg.Name,
IpConfigurations: network.NetworkInterfaceIPConfigurationArray{
&network.NetworkInterfaceIPConfigurationArgs{
Name: pulumi.String("ipconfig1"),
Subnet: &network.SubnetTypeArgs{
Id: vnet.Subnets.Index(pulumi.Int(0)).Id(),
},
PrivateIPAllocationMethod: pulumi.String("Dynamic"),
PublicIPAddress: network.PublicIPAddressTypeArgs{
Id: publicIp.ID(),
},
},
},
NetworkSecurityGroup: network.NetworkSecurityGroupTypeArgs{
Id: nsg.ID(),
},
})
if err != nil {
return err
}
// clientId is required for user-assigned identity to disambiguate between several identities.
var clientId pulumi.StringOutput = pulumi.String("").ToStringOutput()
vmIdentity := &compute.VirtualMachineIdentityArgs{Type: compute.ResourceIdentityTypeSystemAssigned}
var umi *managedidentity.UserAssignedIdentity
if os.Getenv("PULUMI_TEST_USER_IDENTITY") == "true" {
fmt.Printf("go-azure-in-azure: using user-assigned identity\n")
umi, err = managedidentity.NewUserAssignedIdentity(ctx, "umi", &managedidentity.UserAssignedIdentityArgs{
ResourceGroupName: rg.Name,
})
if err != nil {
return err
}
clientId = umi.ClientId
// Create a second user-assigned identity to test multiple identities. With multiple identities, the one to
// use needs to be specified via clientId.
umi2, err := managedidentity.NewUserAssignedIdentity(ctx, "umi2", &managedidentity.UserAssignedIdentityArgs{
ResourceGroupName: rg.Name,
})
if err != nil {
return err
}
vmIdentity = &compute.VirtualMachineIdentityArgs{
Type: compute.ResourceIdentityTypeUserAssigned,
UserAssignedIdentities: pulumi.StringArray{umi.ID(), umi2.ID()},
}
}
vm, err := compute.NewVirtualMachine(ctx, "virtualMachine", &compute.VirtualMachineArgs{
ResourceGroupName: rg.Name,
HardwareProfile: &compute.HardwareProfileArgs{
VmSize: pulumi.String(compute.VirtualMachineSizeTypes_Standard_A2_v2),
},
NetworkProfile: &compute.NetworkProfileArgs{
NetworkInterfaces: compute.NetworkInterfaceReferenceArray{
&compute.NetworkInterfaceReferenceArgs{
Id: nic.ID(),
Primary: pulumi.Bool(true),
},
},
},
OsProfile: &compute.OSProfileArgs{
AdminUsername: pulumi.String("pulumi"),
ComputerName: pulumi.String("myVM"),
LinuxConfiguration: &compute.LinuxConfigurationArgs{
DisablePasswordAuthentication: pulumi.Bool(true),
Ssh: &compute.SshConfigurationArgs{
PublicKeys: compute.SshPublicKeyTypeArray{
&compute.SshPublicKeyTypeArgs{
KeyData: privateKey.PublicKeyOpenssh,
Path: pulumi.String("/home/pulumi/.ssh/authorized_keys"),
},
},
},
},
},
StorageProfile: &compute.StorageProfileArgs{
ImageReference: &compute.ImageReferenceArgs{
Offer: pulumi.String("ubuntu-24_04-lts"),
Publisher: pulumi.String("canonical"),
Sku: pulumi.String("server-gen1"),
Version: pulumi.String("latest"),
},
OsDisk: &compute.OSDiskArgs{
Caching: compute.CachingTypesReadWrite,
CreateOption: pulumi.String(compute.DiskCreateOptionTypesFromImage),
ManagedDisk: &compute.ManagedDiskParametersArgs{
StorageAccountType: pulumi.String(compute.StorageAccountTypes_Standard_LRS),
},
Name: pulumi.String("myVMosdisk"),
},
},
VmName: pulumi.String("myVM"),
Identity: vmIdentity,
})
if err != nil {
return err
}
var principalId pulumi.StringOutput
if umi != nil {
principalId = umi.PrincipalId
} else {
principalId = vm.Identity.Elem().PrincipalId()
}
// Grant the new VM identity access to the resource group
roleAssignment, err := authorization.NewRoleAssignment(ctx, "rgAccess",
&authorization.RoleAssignmentArgs{
PrincipalId: principalId,
PrincipalType: authorization.PrincipalTypeServicePrincipal,
RoleDefinitionId: pulumi.String("/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c"), // Contributor
Scope: rg.ID(),
}, pulumi.DeleteBeforeReplace(true))
if err != nil {
return err
}
ipLookup := vm.ID().ApplyT(func(_ pulumi.ID) network.LookupPublicIPAddressResultOutput {
return network.LookupPublicIPAddressOutput(ctx, network.LookupPublicIPAddressOutputArgs{
ResourceGroupName: rg.Name,
PublicIpAddressName: publicIp.Name,
})
}).(network.LookupPublicIPAddressResultOutput)
// Poll the server until it responds. Because other commands depend on this command, they
// are guaranteed to hit an already booted server.
poll, err := remote.NewCommand(ctx, "poll", &remote.CommandArgs{
Connection: remote.ConnectionArgs{
Host: ipLookup.IpAddress().Elem(),
User: pulumi.String("pulumi"),
PrivateKey: privateKey.PrivateKeyOpenssh,
DialErrorLimit: pulumi.IntPtr(-1),
},
Create: pulumi.String("echo 'Connection established'"),
}, pulumi.Timeouts(&pulumi.CustomTimeouts{Create: "10m"}), pulumi.DependsOn([]pulumi.Resource{publicIp, vm}))
if err != nil {
return err
}
sshConn := remote.ConnectionArgs{
Host: ipLookup.IpAddress().Elem(),
User: pulumi.String("pulumi"),
PrivateKey: privateKey.PrivateKeyOpenssh,
}
copy, err := remote.NewCopyToRemote(ctx, "copy", &remote.CopyToRemoteArgs{
Connection: sshConn,
Source: pulumi.NewFileArchive(innerProgram + "/"),
RemotePath: pulumi.String(innerProgram),
Triggers: pulumi.ToArray([]any{vm.ID()}),
}, pulumi.DependsOn([]pulumi.Resource{poll}))
if err != nil {
return err
}
installPulumi, err := remote.NewCommand(ctx, "installPulumi", &remote.CommandArgs{
Connection: sshConn,
Create: pulumi.String("curl -fsSL https://get.pulumi.com | sh"),
Triggers: pulumi.ToArray([]any{vm.ID()}),
}, pulumi.DependsOn([]pulumi.Resource{poll}))
if err != nil {
return err
}
// Copy the provider binary under test (the one on PATH) to the VM.
// We put it in the same directory as the pulumi binary which is on the PATH.
// Note that this can only work if the VM has the same architecture as the local machine.
providerBinaryPath, err := exec.LookPath("pulumi-resource-azure-native")
if err != nil {
return err
}
copyProvider, err := remote.NewCopyToRemote(ctx, "copyProvider", &remote.CopyToRemoteArgs{
Connection: sshConn,
Source: pulumi.NewFileAsset(providerBinaryPath),
RemotePath: pulumi.String("/home/pulumi/.pulumi/bin/"),
Triggers: pulumi.ToArray([]any{vm.ID()}),
}, pulumi.DependsOn([]pulumi.Resource{poll, installPulumi}))
if err != nil {
return err
}
chmodProvider, err := remote.NewCommand(ctx, "chmodProvider", &remote.CommandArgs{
Connection: sshConn,
Create: pulumi.String("chmod +x /home/pulumi/.pulumi/bin/pulumi-resource-azure-native"),
Triggers: pulumi.ToArray([]any{vm.ID()}),
}, pulumi.DependsOn([]pulumi.Resource{copyProvider}))
if err != nil {
return err
}
// Pass feature flags into the VM.
var tenantId pulumi.StringOutput = pulumi.String(os.Getenv("ARM_TENANT_ID")).ToStringOutput()
// We pass the resource group's ID into the inner program via config so the program can
// create a resource in the resource group.
create := pulumi.Sprintf(`cd %s && \
set -euxo pipefail && \
export ARM_USE_MSI=true && \
export ARM_SUBSCRIPTION_ID=%s && \
export PATH="$HOME/.pulumi/bin:$PATH" && \
export PULUMI_CONFIG_PASSPHRASE=pass && \
rand=$(openssl rand -hex 4) && \
stackname="%s-$rand" && \
pulumi login --local && \
pulumi stack init $stackname && \
pulumi config set azure-native:clientId "%s" -s $stackname && \
pulumi config set azure-native:tenantId "%s" -s $stackname && \
pulumi config set objectId "%s" -s $stackname && \
pulumi config set rgId "%s" -s $stackname && \
pulumi config -s $stackname && \
pulumi up -s $stackname --skip-preview --logtostderr --logflow -v=9 && \
pulumi down -s $stackname --skip-preview --logtostderr --logflow -v=9 && \
pulumi stack rm --yes $stackname && \
pulumi logout --local`, innerProgram, clientConf.SubscriptionId, innerProgram, clientId, tenantId, principalId, rg.ID())
pulumiPreview, err := remote.NewCommand(ctx, "pulumiUpDown", &remote.CommandArgs{
Connection: sshConn,
Triggers: pulumi.ToArray([]any{vm.ID(), principalId, roleAssignment.ID()}),
Create: create,
}, pulumi.DependsOn([]pulumi.Resource{roleAssignment, copy, copyProvider, chmodProvider, installPulumi}))
if err != nil {
return err
}
ctx.Export("rg", rg.ID())
ctx.Export("vm", vm.Name)
ctx.Export("principal", principalId)
ctx.Export("publicIpAddress", ipLookup.IpAddress().Elem())
ctx.Export("installPulumi", installPulumi.Stdout)
ctx.Export("installPulumiStderr", installPulumi.Stderr)
ctx.Export("providerBinary", copyProvider.Source)
ctx.Export("pulumiStdout", pulumiPreview.Stdout)
ctx.Export("pulumiStderr", pulumiPreview.Stderr)
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-simple/main.go | examples/azure-native-sdk-v2/go-simple/main.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package main
import (
resources "github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
storage "github.com/pulumi/pulumi-azure-native-sdk/storage/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Create an Azure Resource Group
resourceGroup, err := resources.NewResourceGroup(ctx, "resourceGroup", nil)
if err != nil {
return err
}
// Create an Azure resource (Storage Account)
account, err := storage.NewStorageAccount(ctx, "sa", &storage.StorageAccountArgs{
ResourceGroupName: resourceGroup.Name,
Sku: &storage.SkuArgs{
Name: pulumi.String("Standard_LRS"),
},
Kind: pulumi.String("StorageV2"),
})
if err != nil {
return err
}
// Export the primary key of the Storage Account
ctx.Export("primaryStorageKey", pulumi.All(resourceGroup.Name, account.Name).ApplyT(
func(args []interface{}) (string, error) {
resourceGroupName := args[0].(string)
accountName := args[1].(string)
accountKeys, err := storage.ListStorageAccountKeys(ctx, &storage.ListStorageAccountKeysArgs{
ResourceGroupName: resourceGroupName,
AccountName: accountName,
})
if err != nil {
return "", err
}
return accountKeys.Keys[0].Value, nil
},
))
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-function-scm-ftp-deletion/main.go | examples/azure-native-sdk-v2/go-function-scm-ftp-deletion/main.go | package main
import (
"github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
"github.com/pulumi/pulumi-azure-native-sdk/storage/v3"
web "github.com/pulumi/pulumi-azure-native-sdk/web/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
rg, err := resources.NewResourceGroup(ctx, "rg", nil)
if err != nil {
return err
}
sta, err := storage.NewStorageAccount(ctx, "storage", &storage.StorageAccountArgs{
ResourceGroupName: rg.Name,
Kind: pulumi.String("StorageV2"),
Sku: &storage.SkuArgs{
Name: pulumi.String("Standard_LRS"),
},
})
if err != nil {
return err
}
plan, err := web.NewAppServicePlan(ctx, "plan", &web.AppServicePlanArgs{
ResourceGroupName: rg.Name,
Sku: &web.SkuDescriptionArgs{
Name: pulumi.String("B1"),
Tier: pulumi.String("Basic"),
},
})
if err != nil {
return err
}
storageAccountKeys := storage.ListStorageAccountKeysOutput(ctx, storage.ListStorageAccountKeysOutputArgs{
ResourceGroupName: rg.Name,
AccountName: sta.Name,
})
primaryStorageKey := storageAccountKeys.ApplyT(func(keys storage.ListStorageAccountKeysResult) string {
return keys.Keys[0].Value
}).(pulumi.StringOutput)
app, err := web.NewWebApp(ctx, "app", &web.WebAppArgs{
ResourceGroupName: rg.Name,
HttpsOnly: pulumi.Bool(true),
ServerFarmId: plan.ID(),
ClientAffinityEnabled: pulumi.Bool(true),
SiteConfig: &web.SiteConfigArgs{
AppSettings: web.NameValuePairArray{
&web.NameValuePairArgs{
Name: pulumi.String("AzureWebJobsStorage"),
Value: pulumi.Sprintf("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s", sta.Name, primaryStorageKey),
},
&web.NameValuePairArgs{
Name: pulumi.String("FUNCTIONS_EXTENSION_VERSION"),
Value: pulumi.String("~3"),
},
&web.NameValuePairArgs{
Name: pulumi.String("FUNCTIONS_WORKER_RUNTIME"),
Value: pulumi.String("dotnet"),
},
&web.NameValuePairArgs{
Name: pulumi.String("WEBSITE_CONTENTAZUREFILECONNECTIONSTRING"),
Value: pulumi.Sprintf("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s", sta.Name, primaryStorageKey),
},
&web.NameValuePairArgs{
Name: pulumi.String("WEBSITE_CONTENTSHARE"),
Value: pulumi.String("devpod"),
},
},
},
// SiteConfig is modified outside of this resource via the WebApp* resources.
}, pulumi.IgnoreChanges([]string{"siteConfig", "siteConfig.*"}))
if err != nil {
return err
}
ctx.Export("endpoint", pulumi.Sprintf("https://%s.azurewebsites.net", app.Name))
_, err = web.NewWebAppScmAllowed(ctx, "scm", &web.WebAppScmAllowedArgs{
Allow: pulumi.Bool(false),
ResourceGroupName: rg.Name,
Name: app.Name,
})
if err != nil {
return err
}
_, err = web.NewWebAppFtpAllowed(ctx, "ftp", &web.WebAppFtpAllowedArgs{
Allow: pulumi.Bool(false),
ResourceGroupName: rg.Name,
Name: app.Name,
})
if err != nil {
return err
}
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-generic-resource/main.go | examples/azure-native-sdk-v2/go-generic-resource/main.go | // Copyright 2025, Pulumi Corporation. All rights reserved.
package main
import (
"github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Create an Azure Resource Group
resourceGroup, err := resources.NewResourceGroup(ctx, "resourceGroup", nil)
if err != nil {
return err
}
_, err = resources.NewResource(ctx, "acc", &resources.ResourceArgs{
ResourceProviderNamespace: pulumi.String("Microsoft.Storage"),
ResourceType: pulumi.String("storageAccounts"),
ApiVersion: pulumi.String("2024-01-01"),
ParentResourcePath: pulumi.String(""),
ResourceGroupName: resourceGroup.Name,
Kind: pulumi.String("StorageV2"),
Sku: &resources.SkuArgs{
Name: pulumi.String("Standard_LRS"),
},
})
if err != nil {
return err
}
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-aks/k8sVersion.go | examples/azure-native-sdk-v2/go-aks/k8sVersion.go | package main
const k8sVersion = "1.32.9"
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-aks/main.go | examples/azure-native-sdk-v2/go-aks/main.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package main
import (
"encoding/base64"
"time"
"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v3"
"github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
"github.com/pulumi/pulumi-azuread/sdk/v5/go/azuread"
"github.com/pulumi/pulumi-random/sdk/v4/go/random"
"github.com/pulumi/pulumi-tls/sdk/v4/go/tls"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Create an Azure Resource Group
resourceGroup, err := resources.NewResourceGroup(ctx, "aksRG", nil)
if err != nil {
return err
}
// Create an AD service principal.
adApp, err := azuread.NewApplication(ctx, "aks", &azuread.ApplicationArgs{
DisplayName: pulumi.String("aks-app"),
})
if err != nil {
return err
}
adSp, err := azuread.NewServicePrincipal(ctx, "aksSp", &azuread.ServicePrincipalArgs{
ClientId: adApp.ClientId,
})
if err != nil {
return err
}
// Create the auto-generated Service Principal password.
adSpPassword, err := azuread.NewServicePrincipalPassword(ctx, "aksSpPassword", &azuread.ServicePrincipalPasswordArgs{
ServicePrincipalId: adSp.ID(),
EndDate: pulumi.String("2099-01-01T00:00:00Z"),
})
if err != nil {
return err
}
// Generate an SSH key.
sshArgs := tls.PrivateKeyArgs{
Algorithm: pulumi.String("RSA"),
RsaBits: pulumi.Int(4096),
}
sshKey, err := tls.NewPrivateKey(ctx, "ssh-key", &sshArgs)
if err != nil {
return err
}
randomClusterName, err := random.NewRandomString(ctx, "randomClusterName", &random.RandomStringArgs{
Length: pulumi.Int(12),
Special: pulumi.Bool(false),
Upper: pulumi.Bool(false),
})
if err != nil {
return err
}
// Although Azure returns success on creation of the Service Principal, we would often see
// "Searching service principal failed. Details: service principal is not found" errors without this sleep.
time.Sleep(20 * time.Second)
cluster, err := containerservice.NewManagedCluster(ctx, "cluster", &containerservice.ManagedClusterArgs{
ResourceName: randomClusterName.Result,
ResourceGroupName: resourceGroup.Name,
DnsPrefix: randomClusterName.Result,
AgentPoolProfiles: containerservice.ManagedClusterAgentPoolProfileArray{
&containerservice.ManagedClusterAgentPoolProfileArgs{
Name: pulumi.String("agentpool"),
Mode: pulumi.String("System"),
OsDiskSizeGB: pulumi.Int(30),
Count: pulumi.Int(3),
VmSize: pulumi.String("Standard_DS2_v2"),
OsType: pulumi.String("Linux"),
},
},
LinuxProfile: &containerservice.ContainerServiceLinuxProfileArgs{
AdminUsername: pulumi.String("testuser"),
Ssh: containerservice.ContainerServiceSshConfigurationArgs{
PublicKeys: containerservice.ContainerServiceSshPublicKeyArray{
containerservice.ContainerServiceSshPublicKeyArgs{
KeyData: sshKey.PublicKeyOpenssh,
},
},
},
},
ServicePrincipalProfile: &containerservice.ManagedClusterServicePrincipalProfileArgs{
ClientId: adApp.ClientId,
Secret: adSpPassword.Value,
},
KubernetesVersion: pulumi.String(k8sVersion),
})
if err != nil {
return err
}
ctx.Export("kubeconfig", pulumi.All(cluster.Name, resourceGroup.Name, resourceGroup.ID()).ApplyT(func(args interface{}) (string, error) {
clusterName := args.([]interface{})[0].(string)
resourceGroupNAme := args.([]interface{})[1].(string)
creds, err := containerservice.ListManagedClusterUserCredentials(ctx, &containerservice.ListManagedClusterUserCredentialsArgs{
ResourceGroupName: resourceGroupNAme,
ResourceName: clusterName,
})
if err != nil {
return "", err
}
encoded := creds.Kubeconfigs[0].Value
kubeconfig, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return "", err
}
return string(kubeconfig), nil
}).(pulumi.StringOutput))
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-aks/step3/main.go | examples/azure-native-sdk-v2/go-aks/step3/main.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package main
import (
"encoding/base64"
"github.com/pulumi/pulumi-azure-native-sdk/containerservice/v3"
"github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
"github.com/pulumi/pulumi-azuread/sdk/v5/go/azuread"
"github.com/pulumi/pulumi-random/sdk/v4/go/random"
"github.com/pulumi/pulumi-tls/sdk/v4/go/tls"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Only difference to steps 1 and 2 is that we add `OrchestratorVersion: pulumi.String(k8sVersion)`
// to the agent pool profile.
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Create an Azure Resource Group
resourceGroup, err := resources.NewResourceGroup(ctx, "aksRG", nil)
if err != nil {
return err
}
// Create an AD service principal.
adApp, err := azuread.NewApplication(ctx, "aks", &azuread.ApplicationArgs{
DisplayName: pulumi.String("aks-app"),
})
if err != nil {
return err
}
adSp, err := azuread.NewServicePrincipal(ctx, "aksSp", &azuread.ServicePrincipalArgs{
ClientId: adApp.ClientId,
})
if err != nil {
return err
}
// Create the auto-generated Service Principal password.
adSpPassword, err := azuread.NewServicePrincipalPassword(ctx, "aksSpPassword", &azuread.ServicePrincipalPasswordArgs{
ServicePrincipalId: adSp.ID(),
EndDate: pulumi.String("2099-01-01T00:00:00Z"),
})
if err != nil {
return err
}
// Generate an SSH key.
sshArgs := tls.PrivateKeyArgs{
Algorithm: pulumi.String("RSA"),
RsaBits: pulumi.Int(4096),
}
sshKey, err := tls.NewPrivateKey(ctx, "ssh-key", &sshArgs)
if err != nil {
return err
}
randomClusterName, err := random.NewRandomString(ctx, "randomClusterName", &random.RandomStringArgs{
Length: pulumi.Int(12),
Special: pulumi.Bool(false),
Upper: pulumi.Bool(false),
})
if err != nil {
return err
}
cluster, err := containerservice.NewManagedCluster(ctx, "cluster", &containerservice.ManagedClusterArgs{
ResourceName: randomClusterName.Result,
ResourceGroupName: resourceGroup.Name,
DnsPrefix: randomClusterName.Result,
AgentPoolProfiles: containerservice.ManagedClusterAgentPoolProfileArray{
&containerservice.ManagedClusterAgentPoolProfileArgs{
Name: pulumi.String("agentpool"),
Mode: pulumi.String("System"),
OsDiskSizeGB: pulumi.Int(30),
Count: pulumi.Int(3),
VmSize: pulumi.String("Standard_DS2_v2"),
OsType: pulumi.String("Linux"),
OrchestratorVersion: pulumi.String(k8sVersion),
},
},
LinuxProfile: &containerservice.ContainerServiceLinuxProfileArgs{
AdminUsername: pulumi.String("testuser"),
Ssh: containerservice.ContainerServiceSshConfigurationArgs{
PublicKeys: containerservice.ContainerServiceSshPublicKeyArray{
containerservice.ContainerServiceSshPublicKeyArgs{
KeyData: sshKey.PublicKeyOpenssh,
},
},
},
},
ServicePrincipalProfile: &containerservice.ManagedClusterServicePrincipalProfileArgs{
ClientId: adApp.ClientId,
Secret: adSpPassword.Value,
},
KubernetesVersion: pulumi.String(k8sVersion),
})
if err != nil {
return err
}
ctx.Export("kubeconfig", pulumi.All(cluster.Name, resourceGroup.Name, resourceGroup.ID()).ApplyT(func(args interface{}) (string, error) {
clusterName := args.([]interface{})[0].(string)
resourceGroupNAme := args.([]interface{})[1].(string)
creds, err := containerservice.ListManagedClusterUserCredentials(ctx, &containerservice.ListManagedClusterUserCredentialsArgs{
ResourceGroupName: resourceGroupNAme,
ResourceName: clusterName,
})
if err != nil {
return "", err
}
encoded := creds.Kubeconfigs[0].Value
kubeconfig, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return "", err
}
return string(kubeconfig), nil
}).(pulumi.StringOutput))
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-aks/step2/k8sVersion.go | examples/azure-native-sdk-v2/go-aks/step2/k8sVersion.go | package main
const k8sVersion = "1.33.5"
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-user-assigned-identity/main.go | examples/azure-native-sdk-v2/go-user-assigned-identity/main.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package main
import (
managedidentity "github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v3"
resources "github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
storage "github.com/pulumi/pulumi-azure-native-sdk/storage/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Create an Azure Resource Group
resourceGroup, err := resources.NewResourceGroup(ctx, "resourceGroup", nil)
if err != nil {
return err
}
userAssignedId, err := managedidentity.NewUserAssignedIdentity(ctx, "ua-id", &managedidentity.UserAssignedIdentityArgs{
ResourceGroupName: resourceGroup.Name,
})
if err != nil {
return err
}
// Create an Azure resource (Storage Account)
_, err = storage.NewStorageAccount(ctx, "sa", &storage.StorageAccountArgs{
ResourceGroupName: resourceGroup.Name,
Sku: &storage.SkuArgs{
Name: pulumi.String("Standard_LRS"),
},
Kind: pulumi.String("StorageV2"),
Identity: &storage.IdentityArgs{
Type: pulumi.String("UserAssigned"),
UserAssignedIdentities: pulumi.StringArray{userAssignedId.ID()},
},
})
if err != nil {
return err
}
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-servicebus-recreate/step1/main.go | examples/azure-native-sdk-v2/go-servicebus-recreate/step1/main.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package main
import (
"github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
"github.com/pulumi/pulumi-azure-native-sdk/servicebus/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Create an Azure Resource Group
resourceGroup, err := resources.NewResourceGroup(ctx, "resourceGroup", nil)
if err != nil {
return err
}
namespace, err := servicebus.NewNamespace(ctx, "ns", &servicebus.NamespaceArgs{
ResourceGroupName: resourceGroup.Name,
})
if err != nil {
return err
}
topic, err := servicebus.NewTopic(ctx, "topic", &servicebus.TopicArgs{
ResourceGroupName: resourceGroup.Name,
NamespaceName: namespace.Name,
RequiresDuplicateDetection: pulumi.Bool(false),
})
if err != nil {
return err
}
// Export the topic name
ctx.Export("topicName", topic.Name)
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/examples/azure-native-sdk-v2/go-servicebus-recreate/step2/main.go | examples/azure-native-sdk-v2/go-servicebus-recreate/step2/main.go | // Copyright 2021, Pulumi Corporation. All rights reserved.
package main
import (
"github.com/pulumi/pulumi-azure-native-sdk/resources/v3"
"github.com/pulumi/pulumi-azure-native-sdk/servicebus/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
// Create an Azure Resource Group
resourceGroup, err := resources.NewResourceGroup(ctx, "resourceGroup", nil)
if err != nil {
return err
}
namespace, err := servicebus.NewNamespace(ctx, "ns", &servicebus.NamespaceArgs{
ResourceGroupName: resourceGroup.Name,
})
if err != nil {
return err
}
topic, err := servicebus.NewTopic(ctx, "topic", &servicebus.TopicArgs{
ResourceGroupName: resourceGroup.Name,
NamespaceName: namespace.Name,
RequiresDuplicateDetection: pulumi.Bool(true), // Changing this should trigger recreation otherwise will be rejected.
})
if err != nil {
return err
}
// Export the topic name
ctx.Export("topicName", topic.Name)
return nil
})
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
pulumi/pulumi-azure-native | https://github.com/pulumi/pulumi-azure-native/blob/1f14b038c133f406184984d9980dfcacb7141599/versions/tools/compare_major_versions.go | versions/tools/compare_major_versions.go | // Copyright 2025, Pulumi Corporation.
package main
import (
"encoding/json"
"flag"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"gopkg.in/yaml.v3"
)
// ResourceInfo represents the common structure for each resource
type ResourceInfo struct {
ApiVersion string `yaml:"ApiVersion"`
ResourceUri string `yaml:"ResourceUri"`
}
// Config represents the root YAML structure
// Each service contains a map of resource names to their info
type Config map[string]map[string]*ResourceInfo
type Resource struct {
Module string
ResourceName string
ApiVersion string
}
type ResourceComparison struct {
Previous *Resource `json:"previous,omitempty"`
Next *Resource `json:"next,omitempty"`
Path string
}
type ResourceComparisons []ResourceComparison
func (cc ResourceComparisons) toChanges() Changes {
added := []ResourceComparison{}
removed := []ResourceComparison{}
changed := []ResourceComparison{}
for _, c := range cc {
if c.Previous != nil && c.Next == nil {
removed = append(removed, c)
}
if c.Previous == nil && c.Next != nil {
added = append(added, c)
}
if c.Previous != nil && c.Next != nil &&
(!strings.EqualFold(c.Previous.Module, c.Next.Module) || c.Previous.ResourceName != c.Next.ResourceName) {
changed = append(changed, c)
}
}
return Changes{added, removed, changed}
}
type Changes struct {
Added, Removed, Changed ResourceComparisons
}
func mapConfigByPath(config Config) map[string]*Resource {
resourceMap := make(map[string]*Resource)
for service, resources := range config {
for r, info := range resources {
resourceMap[strings.ToLower(info.ResourceUri)] = &Resource{service, r, info.ApiVersion}
}
}
return resourceMap
}
func main() {
// Define command-line flag for version number
baseVersion := flag.Int("olderMajorVersion", 2, "Base version N to compare vN.yaml with v(N+1).yaml")
mode := flag.String("out", "changes", "Output mode: `changes` (in JSON) or `table` for a Markdown table of all resources")
flag.Parse()
if *mode != "changes" && *mode != "table" {
fmt.Printf("Invalid output mode: %s\n", *mode)
flag.Usage()
os.Exit(1)
}
// Construct file paths
currentFile := filepath.Join("..", fmt.Sprintf("v%d.yaml", *baseVersion))
nextFile := filepath.Join("..", fmt.Sprintf("v%d.yaml", *baseVersion+1))
// Read YAML files
currentData, err := os.ReadFile(currentFile)
if err != nil {
fmt.Printf("Error reading %s: %v\n", currentFile, err)
return
}
nextData, err := os.ReadFile(nextFile)
if err != nil {
fmt.Printf("Error reading %s: %v\n", nextFile, err)
return
}
// Parse YAML files
var currentConfig Config
var nextConfig Config
if err := yaml.Unmarshal(currentData, ¤tConfig); err != nil {
fmt.Printf("Error parsing %s: %v\n", currentFile, err)
return
}
if err := yaml.Unmarshal(nextData, &nextConfig); err != nil {
fmt.Printf("Error parsing %s: %v\n", nextFile, err)
return
}
if *mode == "changes" {
currentConfigByPath := mapConfigByPath(currentConfig)
nextConfigByPath := mapConfigByPath(nextConfig)
allPaths := make([]string, 0, len(currentConfigByPath)+len(nextConfigByPath))
for path := range currentConfigByPath {
allPaths = append(allPaths, path)
}
for path := range nextConfigByPath {
allPaths = append(allPaths, path)
}
sort.Strings(allPaths)
var comparisons ResourceComparisons = make([]ResourceComparison, 0, len(allPaths))
for _, path := range allPaths {
cur := currentConfigByPath[path]
next := nextConfigByPath[path]
comparisons = append(comparisons, ResourceComparison{cur, next, path})
}
changes := comparisons.toChanges()
changesJson, err := json.MarshalIndent(changes, "", " ")
if err != nil {
fmt.Printf("Error marshalling changes: %v\n", err)
return
}
fmt.Println(string(changesJson))
}
if *mode == "table" {
// Collect all services
services := make(map[string]bool)
for service := range currentConfig {
services[service] = true
}
for service := range nextConfig {
services[service] = true
}
// Convert to sorted slice
servicesList := make([]string, 0, len(services))
for service := range services {
servicesList = append(servicesList, service)
}
sort.Strings(servicesList)
// Print markdown table
fmt.Printf("| Service | Resource | REST version in v%d | REST version in v%d |\n", *baseVersion, *baseVersion+1)
fmt.Println("|---|---|---|---|")
// Print rows
for _, service := range servicesList {
// Collect all resources for this service
resources := make(map[string]bool)
if currentResources, ok := currentConfig[service]; ok {
for resource := range currentResources {
resources[resource] = true
}
}
if nextResources, ok := nextConfig[service]; ok {
for resource := range nextResources {
resources[resource] = true
}
}
// Convert to sorted slice
resourcesList := make([]string, 0, len(resources))
for resource := range resources {
resourcesList = append(resourcesList, resource)
}
sort.Strings(resourcesList)
// Print each resource
for _, resource := range resourcesList {
currentVersion := "not present"
nextVersion := "not present"
if currentResources, ok := currentConfig[service]; ok {
if currentResource := currentResources[resource]; currentResource != nil {
currentVersion = currentResource.ApiVersion
}
}
if nextResources, ok := nextConfig[service]; ok {
if nextResource := nextResources[resource]; nextResource != nil {
nextVersion = nextResource.ApiVersion
}
}
fmt.Printf("| %s | %s | %s | %s |\n", service, resource, currentVersion, nextVersion)
}
}
}
}
| go | Apache-2.0 | 1f14b038c133f406184984d9980dfcacb7141599 | 2026-01-07T09:42:26.479506Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/verify.go | pkg/hfdownloader/verify.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package hfdownloader
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"os"
"strings"
)
// verifySHA256 computes the SHA256 of a file and compares it to expected.
func verifySHA256(path string, expected string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
return err
}
sum := hex.EncodeToString(h.Sum(nil))
if !strings.EqualFold(sum, expected) {
return fmt.Errorf("sha256 mismatch: expected %s got %s", expected, sum)
}
return nil
}
// shouldSkipLocal checks if a file already exists and matches expected hash/size.
// Returns (skip, reason, error).
func shouldSkipLocal(it PlanItem, dst string) (bool, string, error) {
fi, err := os.Stat(dst)
if err != nil {
// no file
return false, "", nil
}
// Quick size check first: if known and different, don't skip
if it.Size > 0 && fi.Size() != it.Size {
return false, "", nil
}
// LFS with known sha: compute and compare
if it.LFS && it.SHA256 != "" {
if err := verifySHA256(dst, it.SHA256); err == nil {
return true, "sha256 match", nil
}
// size matched but sha mismatched -> re-download
return false, "", nil
}
// Non-LFS (or unknown sha): size match is sufficient
if it.Size > 0 && fi.Size() == it.Size {
return true, "size match", nil
}
return false, "", nil
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/plan.go | pkg/hfdownloader/plan.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package hfdownloader
import (
"context"
"net/http"
"path/filepath"
"strings"
"time"
)
// PlanItem represents a single file in the download plan.
type PlanItem struct {
RelativePath string `json:"path"`
URL string `json:"url"`
LFS bool `json:"lfs"`
SHA256 string `json:"sha256,omitempty"`
Size int64 `json:"size"`
AcceptRanges bool `json:"acceptRanges"`
// Subdir holds the matched filter (if any) used when --append-filter-subdir is set.
Subdir string `json:"subdir,omitempty"`
}
// Plan contains the list of files to download.
type Plan struct {
Items []PlanItem `json:"items"`
}
// PlanRepo builds the file list without downloading.
func PlanRepo(ctx context.Context, job Job, cfg Settings) (*Plan, error) {
if ctx == nil {
ctx = context.Background()
}
if err := validate(job, cfg); err != nil {
return nil, err
}
if job.Revision == "" {
job.Revision = "main"
}
httpc := buildHTTPClient()
return scanRepo(ctx, httpc, cfg.Token, job, cfg)
}
// scanRepo walks the repo tree and builds a download plan.
func scanRepo(ctx context.Context, httpc *http.Client, token string, job Job, cfg Settings) (*Plan, error) {
var items []PlanItem
seen := make(map[string]struct{}) // ensure each relative path appears once in the plan
err := walkTree(ctx, httpc, token, cfg.Endpoint, job, "", func(n hfNode) error {
if n.Type != "file" && n.Type != "blob" {
return nil
}
rel := n.Path
// Deduplicate by relative path
if _, ok := seen[rel]; ok {
return nil
}
seen[rel] = struct{}{}
name := filepath.Base(rel)
nameLower := strings.ToLower(name)
relLower := strings.ToLower(rel)
isLFS := n.LFS != nil
// Check excludes first - if file matches any exclude pattern, skip it
// Credits: Exclude feature suggested by jeroenkroese (#41)
for _, ex := range job.Excludes {
exLower := strings.ToLower(ex)
if strings.Contains(nameLower, exLower) || strings.Contains(relLower, exLower) {
return nil // excluded
}
}
// Determine which filter (if any) matches this file name, prefer the longest match
// Filter matching is case-insensitive (e.g., q4_0 matches Q4_0)
matchedFilter := ""
if isLFS && len(job.Filters) > 0 {
for _, f := range job.Filters {
fLower := strings.ToLower(f)
if strings.Contains(nameLower, fLower) {
if len(f) > len(matchedFilter) {
matchedFilter = f
}
}
}
// If filters provided and none matched, skip typical large LFS blobs
if matchedFilter == "" {
ln := strings.ToLower(name)
ext := strings.ToLower(filepath.Ext(name))
if ext == ".bin" || ext == ".act" || ext == ".safetensors" || ext == ".zip" || strings.HasSuffix(ln, ".gguf") || strings.HasSuffix(ln, ".ggml") {
return nil
}
}
}
// Build URL and file size
var urlStr string
if isLFS {
urlStr = lfsURL(cfg.Endpoint, job, rel)
} else {
urlStr = rawURL(cfg.Endpoint, job, rel)
}
// For LFS files, ALWAYS use LFS.Size (n.Size is the pointer file size, not actual)
var size int64
if n.LFS != nil && n.LFS.Size > 0 {
size = n.LFS.Size
} else {
size = n.Size
}
// Assume LFS files support range requests (HuggingFace always does)
// Don't block with HEAD requests during planning - too slow for large repos
acceptRanges := isLFS
sha := n.Sha256
if sha == "" && n.LFS != nil {
sha = n.LFS.Sha256
}
items = append(items, PlanItem{
RelativePath: rel,
URL: urlStr,
LFS: isLFS,
SHA256: sha,
Size: size,
AcceptRanges: acceptRanges,
Subdir: matchedFilter, // empty when no filter matched
})
return nil
})
if err != nil {
return nil, err
}
return &Plan{Items: items}, nil
}
// destinationBase returns the base output directory for a job.
func destinationBase(job Job, cfg Settings) string {
// Always OutputDir/<repo>; per-file filter subdirs are applied in Download().
return filepath.Join(cfg.OutputDir, job.Repo)
}
// ScanPlan scans a repository and emits plan_item events via the progress callback.
// This is useful for dry-run/preview functionality.
func ScanPlan(ctx context.Context, job Job, cfg Settings, progress ProgressFunc) error {
plan, err := PlanRepo(ctx, job, cfg)
if err != nil {
return err
}
if progress != nil {
for _, item := range plan.Items {
progress(ProgressEvent{
Time: time.Now().UTC(),
Event: "plan_item",
Repo: job.Repo,
Revision: job.Revision,
Path: item.RelativePath,
Total: item.Size,
IsLFS: item.LFS,
})
}
}
return nil
}
// Run is an alias for Download for API compatibility.
func Run(ctx context.Context, job Job, cfg Settings, progress ProgressFunc) error {
return Download(ctx, job, cfg, progress)
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/client.go | pkg/hfdownloader/client.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package hfdownloader
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"time"
)
// DefaultEndpoint is the default HuggingFace Hub URL.
// Can be overridden via Settings.Endpoint for mirrors or enterprise deployments.
// Credits: Custom endpoint feature suggested by windtail (#38)
const DefaultEndpoint = "https://huggingface.co"
// getEndpoint returns the endpoint to use, falling back to default if empty.
func getEndpoint(endpoint string) string {
if endpoint == "" {
return DefaultEndpoint
}
return strings.TrimSuffix(endpoint, "/")
}
// hfNode represents a file or directory in the HuggingFace repo tree.
type hfNode struct {
Type string `json:"type"` // "file"|"directory" (sometimes "blob"|"tree")
Path string `json:"path"`
Size int64 `json:"size,omitempty"`
LFS *hfLfsInfo `json:"lfs,omitempty"`
Sha256 string `json:"sha256,omitempty"`
}
// hfLfsInfo contains LFS metadata for large files.
type hfLfsInfo struct {
Oid string `json:"oid,omitempty"`
Size int64 `json:"size,omitempty"`
Sha256 string `json:"sha256,omitempty"`
}
// buildHTTPClient creates an HTTP client with sensible defaults.
func buildHTTPClient() *http.Client {
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
MaxIdleConns: 64,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
return &http.Client{Transport: tr}
}
// addAuth adds authentication and user-agent headers to a request.
func addAuth(req *http.Request, token string) {
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
req.Header.Set("User-Agent", "hfdownloader/2")
}
// quickHeadAcceptRanges checks if a URL supports range requests.
func quickHeadAcceptRanges(ctx context.Context, httpc *http.Client, token string, urlStr string) (bool, bool) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
req, _ := http.NewRequestWithContext(ctx, "HEAD", urlStr, nil)
addAuth(req, token)
resp, err := httpc.Do(req)
if err != nil {
return false, false
}
defer resp.Body.Close()
return true, strings.Contains(strings.ToLower(resp.Header.Get("Accept-Ranges")), "bytes")
}
// headForETag fetches ETag and SHA256 headers for a file.
func headForETag(ctx context.Context, httpc *http.Client, token string, it PlanItem) (etag string, remoteSha string, _ error) {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
req, _ := http.NewRequestWithContext(ctx, "HEAD", it.URL, nil)
addAuth(req, token)
resp, err := httpc.Do(req)
if err != nil {
return "", "", err
}
defer resp.Body.Close()
return resp.Header.Get("ETag"), resp.Header.Get("x-amz-meta-sha256"), nil
}
// walkTree recursively walks the HuggingFace repo tree.
func walkTree(ctx context.Context, httpc *http.Client, token, endpoint string, job Job, prefix string, fn func(hfNode) error) error {
reqURL := treeURL(endpoint, job, prefix)
req, _ := http.NewRequestWithContext(ctx, "GET", reqURL, nil)
addAuth(req, token)
resp, err := httpc.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == 401 {
return fmt.Errorf("401 unauthorized: repo requires token or you do not have access (visit %s)", agreementURL(endpoint, job))
}
if resp.StatusCode == 403 {
return fmt.Errorf("403 forbidden: please accept the repository terms: %s", agreementURL(endpoint, job))
}
if resp.StatusCode != 200 {
return fmt.Errorf("tree API failed: %s", resp.Status)
}
var nodes []hfNode
dec := json.NewDecoder(resp.Body)
if err := dec.Decode(&nodes); err != nil {
return err
}
for _, n := range nodes {
switch n.Type {
case "directory", "tree":
if err := walkTree(ctx, httpc, token, endpoint, job, n.Path, fn); err != nil {
return err
}
default:
if err := fn(n); err != nil {
return err
}
}
}
return nil
}
// URL builders - all accept endpoint to support custom mirrors
func rawURL(endpoint string, job Job, path string) string {
ep := getEndpoint(endpoint)
// Note: job.Repo contains "/" which must NOT be escaped (HuggingFace requires literal slash)
if job.IsDataset {
return fmt.Sprintf("%s/datasets/%s/raw/%s/%s", ep, job.Repo, url.PathEscape(job.Revision), pathEscapeAll(path))
}
return fmt.Sprintf("%s/%s/raw/%s/%s", ep, job.Repo, url.PathEscape(job.Revision), pathEscapeAll(path))
}
func lfsURL(endpoint string, job Job, path string) string {
ep := getEndpoint(endpoint)
if job.IsDataset {
return fmt.Sprintf("%s/datasets/%s/resolve/%s/%s", ep, job.Repo, url.PathEscape(job.Revision), pathEscapeAll(path))
}
return fmt.Sprintf("%s/%s/resolve/%s/%s", ep, job.Repo, url.PathEscape(job.Revision), pathEscapeAll(path))
}
func treeURL(endpoint string, job Job, prefix string) string {
ep := getEndpoint(endpoint)
// Build URL without trailing slash when prefix is empty
if prefix == "" {
if job.IsDataset {
return fmt.Sprintf("%s/api/datasets/%s/tree/%s", ep, job.Repo, url.PathEscape(job.Revision))
}
return fmt.Sprintf("%s/api/models/%s/tree/%s", ep, job.Repo, url.PathEscape(job.Revision))
}
if job.IsDataset {
return fmt.Sprintf("%s/api/datasets/%s/tree/%s/%s", ep, job.Repo, url.PathEscape(job.Revision), pathEscapeAll(prefix))
}
return fmt.Sprintf("%s/api/models/%s/tree/%s/%s", ep, job.Repo, url.PathEscape(job.Revision), pathEscapeAll(prefix))
}
func agreementURL(endpoint string, job Job) string {
ep := getEndpoint(endpoint)
if job.IsDataset {
return fmt.Sprintf("%s/datasets/%s", ep, job.Repo)
}
return fmt.Sprintf("%s/%s", ep, job.Repo)
}
func pathEscapeAll(p string) string {
segs := strings.Split(p, "/")
for i := range segs {
segs[i] = url.PathEscape(segs[i])
}
return strings.Join(segs, "/")
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/downloader.go | pkg/hfdownloader/downloader.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package hfdownloader
import (
"context"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"time"
)
// progressReader wraps an io.Reader and emits progress events during reads.
type progressReader struct {
reader io.Reader
total int64
downloaded int64
path string
emit func(ProgressEvent)
lastEmit time.Time
interval time.Duration
}
func newProgressReader(r io.Reader, total int64, path string, emit func(ProgressEvent)) *progressReader {
return &progressReader{
reader: r,
total: total,
path: path,
emit: emit,
lastEmit: time.Now(),
interval: 200 * time.Millisecond, // Emit at most 5 times per second
}
}
func (pr *progressReader) Read(p []byte) (n int, err error) {
n, err = pr.reader.Read(p)
if n > 0 {
pr.downloaded += int64(n)
// Throttle emissions to avoid flooding
if time.Since(pr.lastEmit) >= pr.interval || err == io.EOF {
pr.emit(ProgressEvent{
Event: "file_progress",
Path: pr.path,
Downloaded: pr.downloaded,
Total: pr.total,
})
pr.lastEmit = time.Now()
}
}
return n, err
}
// Download scans and downloads files from a HuggingFace repo.
// Resume is always ON—skip decisions rely ONLY on the filesystem:
// - LFS files: sha256 comparison when SHA is available.
// - non-LFS files: size comparison.
//
// Cancellation: all loops/sleeps/requests are tied to ctx for fast abort.
func Download(ctx context.Context, job Job, cfg Settings, progress ProgressFunc) error {
if ctx == nil {
ctx = context.Background()
}
if err := validate(job, cfg); err != nil {
return err
}
// Apply defaults
if job.Revision == "" {
job.Revision = "main"
}
if cfg.OutputDir == "" {
cfg.OutputDir = "Storage"
}
if cfg.Concurrency <= 0 {
cfg.Concurrency = 8
}
if cfg.MaxActiveDownloads <= 0 {
cfg.MaxActiveDownloads = runtime.GOMAXPROCS(0)
}
thresholdBytes, err := parseSizeString(cfg.MultipartThreshold, 256<<20)
if err != nil {
return fmt.Errorf("invalid multipart-threshold: %w", err)
}
httpc := buildHTTPClient()
emit := func(ev ProgressEvent) {
if progress != nil {
if ev.Time.IsZero() {
ev.Time = time.Now()
}
if ev.Repo == "" {
ev.Repo = job.Repo
}
if ev.Revision == "" {
ev.Revision = job.Revision
}
progress(ev)
}
}
emit(ProgressEvent{Event: "scan_start", Message: "scanning repo"})
plan, err := scanRepo(ctx, httpc, cfg.Token, job, cfg)
if err != nil {
return err
}
// Emit ALL plan_item events upfront so TUI knows total size immediately
for _, item := range plan.Items {
displayRel := item.RelativePath
if job.AppendFilterSubdir && item.Subdir != "" {
displayRel = filepath.ToSlash(filepath.Join(item.Subdir, item.RelativePath))
}
emit(ProgressEvent{Event: "plan_item", Path: displayRel, Total: item.Size})
}
// Ensure destination root exists
if err := os.MkdirAll(destinationBase(job, cfg), 0o755); err != nil {
return err
}
// Overall concurrency limiter (ctx-aware acquisition)
type token struct{}
lim := make(chan token, cfg.MaxActiveDownloads)
var wg sync.WaitGroup
errCh := make(chan error, len(plan.Items))
// To print "skip" only once per final path per run
var skipOnce sync.Map
var skippedCount int64
var downloadedCount int64
LOOP:
for _, item := range plan.Items {
// Stop scheduling more work once canceled
select {
case <-ctx.Done():
break LOOP
default:
}
it := item // capture for goroutine
// Acquire a slot or abort if canceled
select {
case lim <- token{}:
case <-ctx.Done():
break LOOP
}
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-lim }()
// Per-file context; ensures all inner loops stop on cancellation
fileCtx, fileCancel := context.WithCancel(ctx)
defer fileCancel()
// Final destination path
base := destinationBase(job, cfg)
finalRel := it.RelativePath
if job.AppendFilterSubdir && it.Subdir != "" {
finalRel = filepath.ToSlash(filepath.Join(it.Subdir, it.RelativePath))
}
dst := filepath.Join(base, finalRel)
if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil {
select {
case errCh <- err:
default:
}
return
}
// Filesystem-based skip/resume
alreadyOK, reason, err := shouldSkipLocal(it, dst)
if err != nil {
select {
case errCh <- err:
default:
}
return
}
if alreadyOK {
if _, loaded := skipOnce.LoadOrStore(finalRel, struct{}{}); !loaded {
emit(ProgressEvent{Event: "file_done", Path: finalRel, Message: "skip (" + reason + ")"})
atomic.AddInt64(&skippedCount, 1)
}
return
}
emit(ProgressEvent{Event: "file_start", Path: finalRel, Total: it.Size})
// Create a copy with updated RelativePath for progress display
itForIO := it
itForIO.RelativePath = finalRel
// Choose single/multipart path
var dlErr error
if it.Size >= thresholdBytes && it.AcceptRanges {
dlErr = downloadMultipart(fileCtx, httpc, cfg.Token, job, cfg, itForIO, dst, emit)
} else {
dlErr = downloadSingle(fileCtx, httpc, cfg.Token, job, cfg, itForIO, dst, emit)
}
if dlErr != nil {
select {
case errCh <- fmt.Errorf("download %s: %w", finalRel, dlErr):
default:
}
return
}
// Verify after download
if it.LFS && it.SHA256 != "" {
if err := verifySHA256(dst, it.SHA256); err != nil {
select {
case errCh <- fmt.Errorf("sha256 verify failed: %s: %w", finalRel, err):
default:
}
return
}
} else if cfg.Verify == "size" && it.Size > 0 {
fi, err := os.Stat(dst)
if err != nil || fi.Size() != it.Size {
select {
case errCh <- fmt.Errorf("size mismatch for %s", finalRel):
default:
}
return
}
} else if cfg.Verify == "sha256" {
_, remoteSha, _ := headForETag(fileCtx, httpc, cfg.Token, itForIO)
if remoteSha != "" {
if err := verifySHA256(dst, remoteSha); err != nil {
select {
case errCh <- fmt.Errorf("sha256 verify failed: %s: %w", finalRel, err):
default:
}
return
}
}
}
emit(ProgressEvent{Event: "file_done", Path: finalRel})
atomic.AddInt64(&downloadedCount, 1)
}()
}
wg.Wait()
close(errCh)
// Drain errors
var firstErr error
for e := range errCh {
if e != nil {
firstErr = e
break
}
}
if firstErr != nil {
emit(ProgressEvent{Level: "error", Event: "error", Message: firstErr.Error()})
return firstErr
}
if ctx.Err() != nil {
return ctx.Err()
}
emit(ProgressEvent{
Event: "done",
Message: fmt.Sprintf("download complete (downloaded %d, skipped %d)", downloadedCount, skippedCount),
})
return nil
}
// downloadSingle downloads a file in a single request.
func downloadSingle(ctx context.Context, httpc *http.Client, token string, job Job, cfg Settings, it PlanItem, dst string, emit func(ProgressEvent)) error {
tmp := dst + ".part"
out, err := os.Create(tmp)
if err != nil {
return err
}
defer out.Close()
retry := newRetry(cfg)
var lastErr error
for attempt := 0; attempt <= cfg.Retries; attempt++ {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
req, _ := http.NewRequestWithContext(ctx, "GET", it.URL, nil)
addAuth(req, token)
resp, err := httpc.Do(req)
if err != nil {
lastErr = err
} else {
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
lastErr = fmt.Errorf("bad status: %s", resp.Status)
resp.Body.Close()
} else {
// Use progress reader to emit periodic updates
pr := newProgressReader(resp.Body, it.Size, it.RelativePath, emit)
_, cerr := io.Copy(out, pr)
resp.Body.Close()
if cerr == nil {
out.Close()
return os.Rename(tmp, dst)
}
lastErr = cerr
}
}
if attempt < cfg.Retries {
emit(ProgressEvent{Event: "retry", Path: it.RelativePath, Attempt: attempt + 1, Message: lastErr.Error()})
if d := retry.Next(); !sleepCtx(ctx, d) {
return ctx.Err()
}
}
}
return lastErr
}
// downloadMultipart downloads a file using multiple parallel range requests.
func downloadMultipart(ctx context.Context, httpc *http.Client, token string, job Job, cfg Settings, it PlanItem, dst string, emit func(ProgressEvent)) error {
// HEAD to resolve size
req, _ := http.NewRequestWithContext(ctx, "HEAD", it.URL, nil)
addAuth(req, token)
resp, err := httpc.Do(req)
if err != nil {
return err
}
resp.Body.Close()
if it.Size == 0 {
if clen := resp.Header.Get("Content-Length"); clen != "" {
var n int64
fmt.Sscan(clen, &n)
it.Size = n
}
}
if it.Size == 0 {
return downloadSingle(ctx, httpc, token, job, cfg, it, dst, emit)
}
// Plan parts
n := cfg.Concurrency
chunk := it.Size / int64(n)
if chunk <= 0 {
chunk = it.Size
n = 1
}
tmpParts := make([]string, n)
for i := 0; i < n; i++ {
tmpParts[i] = fmt.Sprintf("%s.part-%02d", dst, i)
}
// Download parts in parallel
var wg sync.WaitGroup
errCh := make(chan error, n)
for i := 0; i < n; i++ {
i := i
start := int64(i) * chunk
end := start + chunk - 1
if i == n-1 {
end = it.Size - 1
}
wg.Add(1)
go func() {
defer wg.Done()
tmp := tmpParts[i]
// Resume: skip if already correct size
if fi, err := os.Stat(tmp); err == nil && fi.Size() == (end-start+1) {
return
}
retry := newRetry(cfg)
var lastErr error
for attempt := 0; attempt <= cfg.Retries; attempt++ {
select {
case <-ctx.Done():
return
default:
}
rq, _ := http.NewRequestWithContext(ctx, "GET", it.URL, nil)
addAuth(rq, token)
rq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
rs, err := httpc.Do(rq)
if err != nil {
lastErr = err
} else if rs.StatusCode != 206 {
lastErr = fmt.Errorf("range not supported (status %s)", rs.Status)
rs.Body.Close()
} else {
out, err := os.Create(tmp)
if err != nil {
lastErr = err
rs.Body.Close()
} else {
_, lastErr = io.Copy(out, rs.Body)
out.Close()
rs.Body.Close()
if lastErr == nil {
return
}
}
}
if attempt < cfg.Retries {
emit(ProgressEvent{Event: "retry", Path: it.RelativePath, Attempt: attempt + 1, Message: lastErr.Error()})
if d := retry.Next(); !sleepCtx(ctx, d) {
return
}
}
}
select {
case errCh <- lastErr:
default:
}
}()
}
// Emit periodic progress
go func() {
t := time.NewTicker(200 * time.Millisecond) // More frequent updates for responsive UI
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
var downloaded int64
for _, p := range tmpParts {
if fi, err := os.Stat(p); err == nil {
downloaded += fi.Size()
}
}
emit(ProgressEvent{Event: "file_progress", Path: it.RelativePath, Downloaded: downloaded, Total: it.Size})
}
}
}()
wg.Wait()
select {
case e := <-errCh:
return e
default:
}
// Assemble parts
out, err := os.Create(dst + ".part")
if err != nil {
return err
}
for i := 0; i < n; i++ {
p := tmpParts[i]
in, err := os.Open(p)
if err != nil {
out.Close()
return err
}
if _, err := io.Copy(out, in); err != nil {
in.Close()
out.Close()
return err
}
in.Close()
}
out.Close()
if err := os.Rename(dst+".part", dst); err != nil {
return err
}
for _, p := range tmpParts {
_ = os.Remove(p)
}
return nil
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/types.go | pkg/hfdownloader/types.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package hfdownloader
import "time"
// Job defines what to download from the HuggingFace Hub.
//
// A Job specifies the repository, revision, and optional filters for selecting
// which files to download. The Repo field is required and must be in
// "owner/name" format (e.g., "TheBloke/Mistral-7B-GGUF").
//
// Example:
//
// job := hfdownloader.Job{
// Repo: "TheBloke/Mistral-7B-GGUF",
// Revision: "main",
// Filters: []string{"q4_k_m"},
// }
type Job struct {
// Repo is the repository ID in "owner/name" format.
// This field is required.
//
// Examples:
// - "TheBloke/Mistral-7B-GGUF"
// - "meta-llama/Llama-2-7b"
// - "facebook/flores" (dataset)
Repo string
// IsDataset indicates this is a dataset repo, not a model.
// When true, the HuggingFace datasets API is used instead of the models API.
IsDataset bool
// Revision is the branch, tag, or commit SHA to download.
// If empty, defaults to "main".
//
// Examples:
// - "main" (default branch)
// - "v1.0" (tag)
// - "abc123" (commit SHA)
Revision string
// Filters specify which LFS files to download, matched case-insensitively.
// If empty, all files are downloaded.
//
// Each filter is matched as a substring against file names. A file is
// included if it contains any of the filter strings.
//
// Examples:
// - []string{"q4_0"} matches "model.Q4_0.gguf"
// - []string{"q4_k_m", "q5_k_m"} matches either quantization
// - []string{"gguf"} matches all GGUF files
Filters []string
// Excludes specify patterns to exclude from download, matched case-insensitively.
// Files matching any exclude pattern will be skipped, even if they match a filter.
// Excludes are applied after filters.
//
// Examples:
// - []string{".md"} excludes all markdown files
// - []string{"fp16", "fp32"} excludes full precision models
// - []string{"onnx"} excludes ONNX format files
//
// Credits: Feature suggested by jeroenkroese (#41)
Excludes []string
// AppendFilterSubdir puts each filter's matched files in a subdirectory
// named after the filter. Useful for organizing multiple quantizations.
//
// When true, a file matching filter "q4_0" would be saved as:
// <output>/<repo>/q4_0/<filename>
// Instead of:
// <output>/<repo>/<filename>
AppendFilterSubdir bool
}
// Settings configures download behavior.
//
// All fields have sensible defaults. At minimum, you only need to set
// OutputDir for where files should be saved.
//
// Example with defaults:
//
// cfg := hfdownloader.Settings{
// OutputDir: "./Models",
// }
//
// Example with full configuration:
//
// cfg := hfdownloader.Settings{
// OutputDir: "./Models",
// Concurrency: 8,
// MaxActiveDownloads: 4,
// MultipartThreshold: "32MiB",
// Verify: "sha256",
// Retries: 4,
// Token: os.Getenv("HF_TOKEN"),
// }
type Settings struct {
// OutputDir is the base directory for downloads.
// Files are saved as: <OutputDir>/<owner>/<repo>/<path>
// If empty, defaults to "Storage".
OutputDir string
// Concurrency is the number of parallel HTTP connections per file
// when using multipart downloads. Higher values can improve speed
// on fast networks but increase memory usage.
// If <= 0, defaults to 8.
Concurrency int
// MaxActiveDownloads limits how many files download simultaneously.
// This controls overall parallelism across all files in a job.
// If <= 0, defaults to GOMAXPROCS (number of CPU cores).
MaxActiveDownloads int
// MultipartThreshold is the minimum file size to use multipart downloads.
// Files smaller than this are downloaded in a single request.
// Accepts human-readable sizes: "32MiB", "256MB", "1GiB", etc.
// If empty, defaults to "256MiB".
MultipartThreshold string
// Verify specifies how to verify non-LFS files after download.
// LFS files are always verified by SHA-256 when the hash is available.
//
// Options:
// - "none": No verification (fastest)
// - "size": Verify file size matches expected (default, fast)
// - "etag": Compare ETag header from server
// - "sha256": Full SHA-256 hash verification (most secure, slower)
Verify string
// Retries is the maximum number of retry attempts per HTTP request.
// Each retry uses exponential backoff with jitter.
// If <= 0, defaults to 4.
Retries int
// BackoffInitial is the initial delay before the first retry.
// Accepts duration strings: "400ms", "1s", "2s", etc.
// If empty, defaults to "400ms".
BackoffInitial string
// BackoffMax is the maximum delay between retries.
// The actual delay grows exponentially but caps at this value.
// If empty, defaults to "10s".
BackoffMax string
// Token is the HuggingFace access token for private or gated repos.
// Get yours at: https://huggingface.co/settings/tokens
// Can also be set via HF_TOKEN environment variable.
Token string
// Endpoint is the base URL for HuggingFace Hub API.
// Use this to specify a custom mirror or enterprise endpoint.
// If empty, defaults to "https://huggingface.co".
//
// Examples:
// - "https://huggingface.co" (default)
// - "https://hf-mirror.com" (China mirror)
// - "https://your-enterprise.com/hf" (enterprise)
//
// Credits: Feature suggested by windtail (#38)
Endpoint string
}
// ProgressEvent represents a progress update during download.
//
// Events are emitted throughout the download process to allow for
// progress display, logging, or integration with other systems.
//
// The Event field indicates the type of event:
// - "scan_start": Repository scanning has begun
// - "plan_item": A file has been added to the download plan
// - "file_start": Download of a file has started
// - "file_progress": Periodic progress update during download
// - "file_done": File download complete (check Message for "skip" info)
// - "retry": A retry attempt is being made
// - "error": An error occurred
// - "done": All downloads complete
type ProgressEvent struct {
// Time is when the event occurred (UTC).
Time time.Time `json:"time"`
// Level is the log level: "debug", "info", "warn", "error".
// Empty defaults to "info".
Level string `json:"level,omitempty"`
// Event is the event type identifier.
Event string `json:"event"`
// Repo is the repository being processed.
Repo string `json:"repo,omitempty"`
// Revision is the branch/tag/commit being downloaded.
Revision string `json:"revision,omitempty"`
// Path is the relative file path within the repository.
Path string `json:"path,omitempty"`
// Bytes is the number of bytes in the current progress update.
// Used in "file_progress" events.
Bytes int64 `json:"bytes,omitempty"`
// Total is the total expected size in bytes.
Total int64 `json:"total,omitempty"`
// Downloaded is the cumulative bytes downloaded so far.
Downloaded int64 `json:"downloaded,omitempty"`
// Attempt is the retry attempt number (1-based).
// Only set in "retry" events.
Attempt int `json:"attempt,omitempty"`
// Message contains additional context or error details.
// For "file_done" events, may contain "skip (reason)" if skipped.
Message string `json:"message,omitempty"`
// IsLFS indicates whether this file is stored in Git LFS.
IsLFS bool `json:"isLfs,omitempty"`
}
// ProgressFunc is a callback for receiving progress events.
//
// Implement this to display progress in a UI, log events, or track downloads.
// The callback is invoked from multiple goroutines and should be thread-safe.
//
// Example:
//
// progress := func(e hfdownloader.ProgressEvent) {
// switch e.Event {
// case "file_start":
// fmt.Printf("Downloading: %s\n", e.Path)
// case "file_done":
// fmt.Printf("Complete: %s\n", e.Path)
// case "error":
// fmt.Printf("Error: %s\n", e.Message)
// }
// }
type ProgressFunc func(ProgressEvent)
// DefaultSettings returns Settings with sensible defaults filled in.
//
// Use this as a starting point and override specific fields:
//
// cfg := hfdownloader.DefaultSettings()
// cfg.OutputDir = "./MyModels"
// cfg.Token = os.Getenv("HF_TOKEN")
func DefaultSettings() Settings {
return Settings{
OutputDir: "Storage",
Concurrency: 8,
MaxActiveDownloads: 4,
MultipartThreshold: "256MiB",
Verify: "size",
Retries: 4,
BackoffInitial: "400ms",
BackoffMax: "10s",
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/errors.go | pkg/hfdownloader/errors.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package hfdownloader
import (
"errors"
"fmt"
)
// Common errors returned by the library.
var (
// ErrInvalidRepo is returned when the repository ID is not in "owner/name" format.
ErrInvalidRepo = errors.New("invalid repository ID: expected owner/name format")
// ErrMissingRepo is returned when no repository is specified.
ErrMissingRepo = errors.New("missing repository ID")
// ErrUnauthorized is returned when authentication is required but not provided.
ErrUnauthorized = errors.New("unauthorized: this repository requires authentication")
// ErrNotFound is returned when the repository or revision does not exist.
ErrNotFound = errors.New("repository or revision not found")
// ErrRateLimited is returned when the API rate limit is exceeded.
ErrRateLimited = errors.New("rate limited: too many requests")
)
// DownloadError wraps an error with file context.
type DownloadError struct {
Path string
Err error
}
func (e *DownloadError) Error() string {
return fmt.Sprintf("download %s: %v", e.Path, e.Err)
}
func (e *DownloadError) Unwrap() error {
return e.Err
}
// VerificationError is returned when file verification fails.
type VerificationError struct {
Path string
Expected string
Actual string
Method string // "sha256", "size", "etag"
}
func (e *VerificationError) Error() string {
return fmt.Sprintf("verification failed for %s: %s mismatch (expected %s, got %s)",
e.Path, e.Method, e.Expected, e.Actual)
}
// APIError represents an error from the HuggingFace API.
type APIError struct {
StatusCode int
Status string
Message string
URL string
}
func (e *APIError) Error() string {
if e.Message != "" {
return fmt.Sprintf("API error %d (%s): %s", e.StatusCode, e.Status, e.Message)
}
return fmt.Sprintf("API error %d: %s", e.StatusCode, e.Status)
}
// IsRetryable returns true if the error might succeed on retry.
func (e *APIError) IsRetryable() bool {
switch e.StatusCode {
case 429, 500, 502, 503, 504:
return true
default:
return false
}
}
// Is implements errors.Is for common error comparisons.
func (e *APIError) Is(target error) bool {
switch e.StatusCode {
case 401, 403:
return errors.Is(target, ErrUnauthorized)
case 404:
return errors.Is(target, ErrNotFound)
case 429:
return errors.Is(target, ErrRateLimited)
default:
return false
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/utils.go | pkg/hfdownloader/utils.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package hfdownloader
import (
"context"
"errors"
"fmt"
"strings"
"time"
)
// IsValidModelName checks if the model name is in "owner/name" format.
func IsValidModelName(modelName string) bool {
if modelName == "" || !strings.Contains(modelName, "/") {
return false
}
parts := strings.Split(modelName, "/")
return len(parts) == 2 && parts[0] != "" && parts[1] != ""
}
// validate checks that the job and settings are valid.
func validate(job Job, cfg Settings) error {
if job.Repo == "" {
return errors.New("missing repo")
}
if !IsValidModelName(job.Repo) {
return fmt.Errorf("invalid repo id %q (expected owner/name)", job.Repo)
}
return nil
}
// backoff implements exponential backoff with jitter.
type backoff struct {
next time.Duration
max time.Duration
mult float64
jitter time.Duration
}
// newRetry creates a new backoff instance from settings.
func newRetry(cfg Settings) *backoff {
init := 400 * time.Millisecond
max := 10 * time.Second
if d, err := time.ParseDuration(defaultString(cfg.BackoffInitial, "400ms")); err == nil {
init = d
}
if d, err := time.ParseDuration(defaultString(cfg.BackoffMax, "10s")); err == nil {
max = d
}
return &backoff{next: init, max: max, mult: 1.6, jitter: 120 * time.Millisecond}
}
// Next returns the next backoff duration.
func (b *backoff) Next() time.Duration {
d := b.next + time.Duration(int64(b.jitter)*int64(time.Now().UnixNano()%3)/2)
b.next = time.Duration(float64(b.next) * b.mult)
if b.next > b.max {
b.next = b.max
}
return d
}
// sleepCtx waits for d or returns false if ctx is canceled first.
func sleepCtx(ctx context.Context, d time.Duration) bool {
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return false
case <-timer.C:
return true
}
}
// parseSizeString parses a human-readable size string (e.g., "32MiB") to bytes.
func parseSizeString(s string, def int64) (int64, error) {
if s == "" {
return def, nil
}
var n float64
var unit string
_, err := fmt.Sscanf(strings.ToUpper(strings.TrimSpace(s)), "%f%s", &n, &unit)
if err != nil {
var nn int64
if _, e2 := fmt.Sscanf(s, "%d", &nn); e2 == nil {
return nn, nil
}
return 0, err
}
switch unit {
case "B", "":
return int64(n), nil
case "KB":
return int64(n * 1000), nil
case "MB":
return int64(n * 1000 * 1000), nil
case "GB":
return int64(n * 1000 * 1000 * 1000), nil
case "KIB":
return int64(n * 1024), nil
case "MIB":
return int64(n * 1024 * 1024), nil
case "GIB":
return int64(n * 1024 * 1024 * 1024), nil
default:
return 0, fmt.Errorf("unknown unit %q", unit)
}
}
// defaultString returns s if non-empty, otherwise def.
func defaultString(s string, def string) string {
if s == "" {
return def
}
return s
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/example_test.go | pkg/hfdownloader/example_test.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package hfdownloader_test
import (
"context"
"fmt"
"os"
"github.com/bodaay/HuggingFaceModelDownloader/pkg/hfdownloader"
)
func ExampleDownload() {
job := hfdownloader.Job{
Repo: "hf-internal-testing/tiny-random-gpt2",
Revision: "main",
}
cfg := hfdownloader.Settings{
OutputDir: "./example_output",
Concurrency: 4,
MaxActiveDownloads: 2,
}
// Progress callback
progress := func(e hfdownloader.ProgressEvent) {
switch e.Event {
case "scan_start":
fmt.Println("Scanning repository...")
case "file_done":
fmt.Printf("Downloaded: %s\n", e.Path)
case "done":
fmt.Println("Complete!")
}
}
ctx := context.Background()
err := hfdownloader.Download(ctx, job, cfg, progress)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
// Cleanup
os.RemoveAll("./example_output")
}
func ExampleDownload_withFilters() {
// Download only specific quantizations
job := hfdownloader.Job{
Repo: "TheBloke/Mistral-7B-Instruct-v0.2-GGUF",
Filters: []string{"q4_k_m", "q5_k_m"}, // Case-insensitive matching
}
cfg := hfdownloader.Settings{
OutputDir: "./Models",
}
err := hfdownloader.Download(context.Background(), job, cfg, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}
func ExampleDownload_dataset() {
// Download a dataset instead of a model
job := hfdownloader.Job{
Repo: "facebook/flores",
IsDataset: true,
}
cfg := hfdownloader.Settings{
OutputDir: "./Datasets",
}
err := hfdownloader.Download(context.Background(), job, cfg, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}
func ExamplePlanRepo() {
job := hfdownloader.Job{
Repo: "hf-internal-testing/tiny-random-gpt2",
}
cfg := hfdownloader.Settings{}
plan, err := hfdownloader.PlanRepo(context.Background(), job, cfg)
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Printf("Found %d files:\n", len(plan.Items))
for _, item := range plan.Items {
lfsTag := ""
if item.LFS {
lfsTag = " [LFS]"
}
fmt.Printf(" %s (%d bytes)%s\n", item.RelativePath, item.Size, lfsTag)
}
}
func ExampleIsValidModelName() {
// Valid names
fmt.Println(hfdownloader.IsValidModelName("TheBloke/Mistral-7B-GGUF")) // true
fmt.Println(hfdownloader.IsValidModelName("facebook/opt-1.3b")) // true
fmt.Println(hfdownloader.IsValidModelName("hf-internal-testing/tiny-gpt")) // true
// Invalid names
fmt.Println(hfdownloader.IsValidModelName("Mistral-7B-GGUF")) // false (no owner)
fmt.Println(hfdownloader.IsValidModelName("")) // false (empty)
fmt.Println(hfdownloader.IsValidModelName("/model")) // false (empty owner)
// Output:
// true
// true
// true
// false
// false
// false
}
func ExampleJob_filterSubdirs() {
// Organize downloaded files by filter match
job := hfdownloader.Job{
Repo: "TheBloke/Llama-2-7B-GGUF",
Filters: []string{"q4_0", "q5_0"},
AppendFilterSubdir: true, // Creates separate subdirectories
}
// This will create:
// ./Models/TheBloke/Llama-2-7B-GGUF/q4_0/llama-2-7b.Q4_0.gguf
// ./Models/TheBloke/Llama-2-7B-GGUF/q5_0/llama-2-7b.Q5_0.gguf
cfg := hfdownloader.Settings{
OutputDir: "./Models",
}
_ = hfdownloader.Download(context.Background(), job, cfg, nil)
}
func ExampleSettings_withAuth() {
// For private or gated repositories
cfg := hfdownloader.Settings{
OutputDir: "./Models",
Token: os.Getenv("HF_TOKEN"), // Use environment variable
}
job := hfdownloader.Job{
Repo: "meta-llama/Llama-2-7b", // Requires authentication
}
err := hfdownloader.Download(context.Background(), job, cfg, nil)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
}
func ExampleSettings_performance() {
// High-performance settings for fast networks
cfg := hfdownloader.Settings{
OutputDir: "./Models",
Concurrency: 16, // 16 parallel connections per file
MaxActiveDownloads: 4, // 4 files at once
MultipartThreshold: "16MiB", // Use multipart for files >= 16MiB
Retries: 6, // More retries for unstable connections
BackoffInitial: "200ms", // Faster retry
BackoffMax: "30s", // Longer max for rate limiting
Verify: "sha256", // Full verification
}
_ = cfg // Use in Download()
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/pkg/hfdownloader/doc.go | pkg/hfdownloader/doc.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
/*
Package hfdownloader provides a Go library for downloading models and datasets
from the HuggingFace Hub with resume support, multipart downloads, and verification.
# Features
- Resumable downloads: Interrupted downloads automatically resume from where they left off
- Multipart downloads: Large files are downloaded in parallel chunks for faster speeds
- LFS support: Handles Git LFS files transparently
- Filtering: Download only specific files matching patterns (e.g., "q4_0", "gguf")
- Verification: SHA-256, ETag, or size-based integrity verification
- Progress events: Real-time progress callbacks for UI integration
- Context cancellation: Full support for graceful shutdown via context
# Quick Start
Download a model with default settings:
package main
import (
"context"
"fmt"
"log"
"github.com/bodaay/HuggingFaceModelDownloader/pkg/hfdownloader"
)
func main() {
job := hfdownloader.Job{
Repo: "TheBloke/Mistral-7B-Instruct-v0.2-GGUF",
Revision: "main",
Filters: []string{"q4_0"}, // Only download q4_0 quantization
}
cfg := hfdownloader.Settings{
OutputDir: "./Models",
Concurrency: 8,
Token: "", // Set for private repos
}
err := hfdownloader.Download(context.Background(), job, cfg, func(e hfdownloader.ProgressEvent) {
fmt.Printf("[%s] %s: %s\n", e.Event, e.Path, e.Message)
})
if err != nil {
log.Fatal(err)
}
}
# Downloading Datasets
Set IsDataset to true for dataset repositories:
job := hfdownloader.Job{
Repo: "facebook/flores",
IsDataset: true,
}
# Dry-Run / Planning
Get the file list without downloading:
plan, err := hfdownloader.PlanRepo(ctx, job, cfg)
if err != nil {
log.Fatal(err)
}
for _, item := range plan.Items {
fmt.Printf("%s (%d bytes, LFS=%v)\n", item.RelativePath, item.Size, item.LFS)
}
# Progress Events
The ProgressFunc callback receives events throughout the download:
- scan_start: Repository scanning has begun
- plan_item: A file has been added to the download plan
- file_start: Download of a file has started
- file_progress: Periodic progress update during download
- file_done: File download complete (or skipped)
- retry: A retry attempt is being made
- error: An error occurred
- done: All downloads complete
# Filter Matching
Filters are matched case-insensitively against LFS file names. Multiple filters
can be specified; a file matches if it contains any of the filter strings:
job := hfdownloader.Job{
Repo: "TheBloke/Llama-2-7B-GGUF",
Filters: []string{"q4_0", "q5_0", "q8_0"},
}
With AppendFilterSubdir, matched files are organized into subdirectories:
job := hfdownloader.Job{
Repo: "TheBloke/Llama-2-7B-GGUF",
Filters: []string{"q4_0", "q5_0"},
AppendFilterSubdir: true, // Creates q4_0/ and q5_0/ subdirectories
}
# Resume Behavior
Resume is always enabled. Skip decisions are filesystem-based:
- LFS files: Compared by SHA-256 hash (if available in metadata)
- Non-LFS files: Compared by file size
No external metadata files are created or required.
# Verification Options
The Settings.Verify field controls post-download verification:
- "none": No verification (fastest)
- "size": Verify file size matches expected (default)
- "etag": Compare ETag header from server
- "sha256": Full SHA-256 hash verification (most secure, slower)
# Concurrency
Two levels of concurrency are configurable:
- Concurrency: Number of parallel HTTP connections per file (for multipart downloads)
- MaxActiveDownloads: Maximum number of files downloading simultaneously
# Error Handling
Download returns the first error encountered. The context can be used for
cancellation, and canceled downloads can be resumed on the next run.
# Authentication
For private or gated repositories, set the Token field in Settings:
cfg := hfdownloader.Settings{
Token: "hf_xxxxxxxxxxxxx", // Your HuggingFace access token
}
Tokens can be generated at: https://huggingface.co/settings/tokens
*/
package hfdownloader
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/cmd/hfdownloader/main.go | cmd/hfdownloader/main.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package main
import (
"os"
"github.com/bodaay/HuggingFaceModelDownloader/internal/cli"
)
// Version is set at build time via ldflags
var Version = "2.3.0-dev"
func main() {
if err := cli.Execute(Version); err != nil {
os.Exit(1)
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/tui/progress.go | internal/tui/progress.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package tui
import (
"fmt"
"os"
"runtime"
"sort"
"strings"
"sync"
"time"
"unicode/utf8"
"golang.org/x/term"
"github.com/bodaay/HuggingFaceModelDownloader/pkg/hfdownloader"
)
// LiveRenderer renders a cross-platform, adaptive, colorful progress table.
// - Uses ANSI when available; plain text fallback otherwise.
// - Adapts to terminal width/height.
// - Shows job header + totals + active file rows with progress bars.
type LiveRenderer struct {
job hfdownloader.Job
cfg hfdownloader.Settings
mu sync.Mutex
start time.Time
events chan hfdownloader.ProgressEvent
done chan struct{}
stopped bool
hideCur bool
supports bool // ANSI + interactive
noColor bool
lastRedraw time.Time
// aggregate
totalFiles int
totalBytes int64
// per-file state
files map[string]*fileState
// overall rolling speed (EMA smoothed)
lastTotalBytes int64
lastTick time.Time
smoothedSpeed float64 // EMA smoothed overall speed
}
type fileState struct {
path string
total int64
bytes int64
status string // "queued","downloading","done","skip","error"
err string
// rolling speed (EMA smoothed)
lastBytes int64
lastTime time.Time
smoothedSpeed float64 // EMA smoothed per-file speed
// metrics
started time.Time
}
// EMA smoothing factor (0.1 = very smooth, 0.5 = responsive)
const speedSmoothingFactor = 0.3
func smoothSpeed(current, previous float64) float64 {
if previous == 0 {
return current
}
// Exponential moving average
return speedSmoothingFactor*current + (1-speedSmoothingFactor)*previous
}
// NewLiveRenderer creates a new live TUI renderer.
func NewLiveRenderer(job hfdownloader.Job, cfg hfdownloader.Settings) *LiveRenderer {
lr := &LiveRenderer{
job: job,
cfg: cfg,
start: time.Now(),
events: make(chan hfdownloader.ProgressEvent, 2048),
done: make(chan struct{}),
files: map[string]*fileState{},
noColor: os.Getenv("NO_COLOR") != "",
}
// Detect interactive + ANSI support
lr.supports = isInteractive() && ansiOkay()
if lr.supports && !lr.noColor {
// Hide cursor
fmt.Fprint(os.Stdout, "\x1b[?25l")
lr.hideCur = true
}
go lr.loop()
return lr
}
// Close stops the renderer and restores the terminal.
func (lr *LiveRenderer) Close() {
lr.mu.Lock()
if lr.stopped {
lr.mu.Unlock()
return
}
lr.stopped = true
close(lr.done)
lr.mu.Unlock()
// Wait a tick
time.Sleep(60 * time.Millisecond)
if lr.hideCur {
fmt.Fprint(os.Stdout, "\x1b[?25h") // show cursor
}
// Final newline to separate from prompt
fmt.Fprintln(os.Stdout)
}
// Handler returns a ProgressFunc that feeds events to the renderer.
func (lr *LiveRenderer) Handler() hfdownloader.ProgressFunc {
return func(ev hfdownloader.ProgressEvent) {
select {
case lr.events <- ev:
default:
// Drop events if UI is congested; we keep rendering smoothly.
}
}
}
func (lr *LiveRenderer) loop() {
ticker := time.NewTicker(150 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-lr.done:
lr.render(true)
return
case ev := <-lr.events:
lr.apply(ev)
case <-ticker.C:
lr.render(false)
}
}
}
func (lr *LiveRenderer) apply(ev hfdownloader.ProgressEvent) {
lr.mu.Lock()
defer lr.mu.Unlock()
switch ev.Event {
case "plan_item":
fs := lr.ensure(ev.Path)
fs.total = ev.Total
fs.status = "queued"
lr.totalFiles++
lr.totalBytes += ev.Total
case "file_start":
fs := lr.ensure(ev.Path)
fs.total = ev.Total
fs.status = "downloading"
if fs.started.IsZero() {
fs.started = time.Now()
}
case "file_progress":
fs := lr.ensure(ev.Path)
// Only update total if it's provided and reasonable
if ev.Total > 0 {
fs.total = ev.Total
}
// Prefer Downloaded (cumulative) over Bytes (legacy/delta)
if ev.Downloaded > 0 {
fs.bytes = ev.Downloaded
} else if ev.Bytes > 0 {
fs.bytes = ev.Bytes
}
if fs.lastTime.IsZero() {
fs.lastTime = time.Now()
fs.lastBytes = fs.bytes
}
case "file_done":
fs := lr.ensure(ev.Path)
if strings.HasPrefix(strings.ToLower(ev.Message), "skip") {
fs.status = "skip"
} else {
fs.status = "done"
}
fs.bytes = fs.total
case "retry":
// Could record attempts if you want a column
case "error":
fs := lr.ensure(ev.Path)
fs.status = "error"
fs.err = ev.Message
case "done":
// mark all as done if any left
}
}
func (lr *LiveRenderer) ensure(path string) *fileState {
if fs, ok := lr.files[path]; ok {
return fs
}
fs := &fileState{path: path}
lr.files[path] = fs
return fs
}
func (lr *LiveRenderer) render(final bool) {
lr.mu.Lock()
defer lr.mu.Unlock()
// compute size
w, h := termSize()
minW := 70
if w < minW {
w = minW
}
if h < 12 {
h = 12
}
// aggregate totals - compute from individual file states (more accurate)
var aggBytes int64
var aggTotal int64
var active []*fileState
var doneCnt, skipCnt, errCnt int
for _, fs := range lr.files {
if fs.status == "downloading" {
active = append(active, fs)
}
if fs.status == "done" {
doneCnt++
}
if fs.status == "skip" {
skipCnt++
}
if fs.status == "error" {
errCnt++
}
// Accumulate total from each file's known total (more accurate than plan_item accumulation)
aggTotal += fs.total
if fs.bytes > 0 {
aggBytes += fs.bytes
} else if fs.status == "done" || fs.status == "skip" {
aggBytes += fs.total
}
}
// Use computed total if we have files, otherwise fall back to plan_item total
if aggTotal > 0 {
lr.totalBytes = aggTotal
}
queued := lr.totalFiles - (len(active) + doneCnt + skipCnt + errCnt)
if queued < 0 {
queued = 0
}
// overall speed (EMA smoothed)
now := time.Now()
if !lr.lastTick.IsZero() && now.After(lr.lastTick) {
deltaB := aggBytes - lr.lastTotalBytes
deltaT := now.Sub(lr.lastTick).Seconds()
if deltaT > 0.05 { // Only update if enough time passed (50ms min)
instantSpeed := float64(deltaB) / deltaT
if instantSpeed >= 0 { // Ignore negative deltas (can happen with rounding)
lr.smoothedSpeed = smoothSpeed(instantSpeed, lr.smoothedSpeed)
}
lr.lastTick = now
lr.lastTotalBytes = aggBytes
}
} else if lr.lastTick.IsZero() {
lr.lastTick = now
lr.lastTotalBytes = aggBytes
}
speed := lr.smoothedSpeed
// overall ETA
var etaStr string
if speed > 0 && lr.totalBytes > 0 && aggBytes < lr.totalBytes {
rem := float64(lr.totalBytes-aggBytes) / speed
etaStr = fmtDuration(time.Duration(rem) * time.Second)
} else {
etaStr = "—"
}
// Clear + render (ANSI) or plain
if lr.supports {
// Clear screen and go home
fmt.Fprint(os.Stdout, "\x1b[H\x1b[2J")
}
// Header
rev := lr.job.Revision
if rev == "" {
rev = "main"
}
jobline := fmt.Sprintf("Repo: %s Rev: %s Dataset: %v", lr.job.Repo, rev, lr.job.IsDataset)
fmt.Fprintln(os.Stdout, colorize(bold(jobline), "fg=cyan", lr))
cfgline := fmt.Sprintf("Out: %s Conns: %d MaxActive: %d Verify: %s Retries: %d Threshold: %s",
lr.cfg.OutputDir, lr.cfg.Concurrency, lr.cfg.MaxActiveDownloads, lr.cfg.Verify, lr.cfg.Retries, lr.cfg.MultipartThreshold)
fmt.Fprintln(os.Stdout, dim(cfgline))
// Totals line with bar
prog := float64(0)
if lr.totalBytes > 0 {
prog = float64(aggBytes) / float64(lr.totalBytes)
if prog < 0 {
prog = 0
}
if prog > 1 {
prog = 1
}
}
bar := renderBar(int(float64(w)*0.4), prog, lr) // 40% of width
speedStr := humanBytes(int64(speed)) + "/s"
fmt.Fprintf(os.Stdout, "%s %s %s/%s %s ETA %s\n",
colorize(bar, "fg=green", lr),
percent(prog),
humanBytes(aggBytes), humanBytes(lr.totalBytes),
speedStr, etaStr,
)
// Table header
fmt.Fprintln(os.Stdout)
cols := []string{"Status", "File", "Progress", "Speed", "ETA"}
fmt.Fprintln(os.Stdout, headerRow(cols, w))
// Determine rows to show
maxRows := h - 8 // header+totals+footer allowance
if maxRows < 3 {
maxRows = 3
}
// Sort active by bytes desc (more movement first)
sort.Slice(active, func(i, j int) bool { return active[i].bytes > active[j].bytes })
// Compose rows
shown := 0
for _, fs := range active {
if shown >= maxRows {
break
}
shown++
fmt.Fprintln(os.Stdout, renderFileRow(fs, w, lr))
}
// If space remains, show recently finished or queued small set
if shown < maxRows {
var rest []*fileState
for _, fs := range lr.files {
if fs.status == "done" || fs.status == "skip" || fs.status == "error" {
rest = append(rest, fs)
}
}
sort.Slice(rest, func(i, j int) bool { return rest[i].started.After(rest[j].started) })
for _, fs := range rest {
if shown >= maxRows {
break
}
fmt.Fprintln(os.Stdout, renderFileRow(fs, w, lr))
shown++
}
}
// Footer hint
if lr.supports {
fmt.Fprintln(os.Stdout, dim(fmt.Sprintf("Press Ctrl+C to cancel • %s %s",
runtime.GOOS, runtime.GOARCH)))
}
}
func renderFileRow(fs *fileState, w int, lr *LiveRenderer) string {
// column widths (adaptive)
statusW := 9
speedW := 10
etaW := 9
// remaining for filename + progress
remain := w - (statusW + speedW + etaW + 8) // gutters
if remain < 20 {
remain = 20
}
// split for file/progress
fileW := int(float64(remain) * 0.50)
if fileW < 18 {
fileW = 18
}
progressW := remain - fileW
// status
var st, col string
switch fs.status {
case "downloading":
st, col = "▶", "fg=yellow"
case "done":
st, col = "✓", "fg=green"
case "skip":
st, col = "•", "fg=blue"
case "error":
st, col = "×", "fg=red"
default:
st, col = "…", "fg=magenta"
}
status := pad(colorize(st+" "+fs.status, col, lr), statusW)
// filename
name := ellipsizeMiddle(fs.path, fileW)
// progress
var p float64
if fs.total > 0 {
p = float64(fs.bytes) / float64(fs.total)
if p < 0 {
p = 0
}
if p > 1 {
p = 1
}
}
bar := renderBar(progressW-18, p, lr) // leave room for numbers
progTxt := fmt.Sprintf(" %s/%s %s", humanBytes(fs.bytes), humanBytes(fs.total), percent(p))
progress := bar + progTxt
if utf8.RuneCountInString(progress) > progressW {
// simple cut if needed
runes := []rune(progress)
progress = string(runes[:progressW])
}
// speed (per-file, EMA smoothed)
now := time.Now()
if !fs.lastTime.IsZero() {
dt := now.Sub(fs.lastTime).Seconds()
if dt > 0.05 { // Only update if enough time passed (50ms min)
delta := fs.bytes - fs.lastBytes
instantSpeed := float64(delta) / dt
if instantSpeed >= 0 {
fs.smoothedSpeed = smoothSpeed(instantSpeed, fs.smoothedSpeed)
}
fs.lastTime = now
fs.lastBytes = fs.bytes
}
} else {
fs.lastTime = now
fs.lastBytes = fs.bytes
}
speed := fs.smoothedSpeed
speedTxt := pad(humanBytes(int64(speed))+"/s", speedW)
// eta (use smoothed speed for stable ETA)
eta := "—"
if speed > 0 && fs.total > 0 && fs.bytes < fs.total {
rem := float64(fs.total-fs.bytes) / speed
eta = fmtDuration(time.Duration(rem) * time.Second)
}
etaTxt := pad(eta, etaW)
return fmt.Sprintf("%s %s %s %s %s", status, pad(name, fileW), progress, speedTxt, etaTxt)
}
func headerRow(cols []string, w int) string {
parts := make([]string, len(cols))
for i, c := range cols {
parts[i] = bold(c)
}
s := strings.Join(parts, " ")
if utf8.RuneCountInString(s) > w {
runes := []rune(s)
return string(runes[:w])
}
return s
}
func ellipsizeMiddle(s string, w int) string {
if w <= 3 || utf8.RuneCountInString(s) <= w {
return pad(s, w)
}
runes := []rune(s)
half := (w - 3) / 2
if 2*half+3 > len(runes) {
return pad(s, w)
}
return pad(string(runes[:half])+"..."+string(runes[len(runes)-half:]), w)
}
func pad(s string, w int) string {
r := utf8.RuneCountInString(s)
if r >= w {
return s
}
return s + strings.Repeat(" ", w-r)
}
func renderBar(width int, p float64, lr *LiveRenderer) string {
if width < 3 {
width = 3
}
filled := int(p * float64(width))
if filled > width {
filled = width
}
bar := strings.Repeat("█", filled) + strings.Repeat("░", width-filled)
return bar
}
func percent(p float64) string {
return fmt.Sprintf("%3.0f%%", p*100)
}
func humanBytes(n int64) string {
const unit = 1024
if n < unit {
return fmt.Sprintf("%d B", n)
}
div, exp := int64(unit), 0
for n/div >= unit && exp < 6 {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %ciB", float64(n)/float64(div), "KMGTPE"[exp])
}
func fmtDuration(d time.Duration) string {
if d < 0 {
d = 0
}
h := int(d.Hours())
m := int(d.Minutes()) % 60
s := int(d.Seconds()) % 60
if h > 0 {
return fmt.Sprintf("%02d:%02d:%02d", h, m, s)
}
return fmt.Sprintf("%02d:%02d", m, s)
}
func termSize() (int, int) {
w, h, err := term.GetSize(int(os.Stdout.Fd()))
if err != nil || w <= 0 || h <= 0 {
return 100, 30
}
return w, h
}
func isInteractive() bool {
return term.IsTerminal(int(os.Stdout.Fd()))
}
func ansiOkay() bool {
if runtime.GOOS == "windows" {
// On modern Windows 10+ terminals this is typically fine.
// Fall back to plain output when TERM=dumb or NO_COLOR set.
}
termEnv := strings.ToLower(os.Getenv("TERM"))
if termEnv == "dumb" {
return false
}
return true
}
func colorize(s, style string, lr *LiveRenderer) string {
if lr.noColor || !lr.supports {
return s
}
switch style {
case "fg=green":
return "\x1b[32m" + s + "\x1b[0m"
case "fg=yellow":
return "\x1b[33m" + s + "\x1b[0m"
case "fg=red":
return "\x1b[31m" + s + "\x1b[0m"
case "fg=blue":
return "\x1b[34m" + s + "\x1b[0m"
case "fg=magenta":
return "\x1b[35m" + s + "\x1b[0m"
case "fg=cyan":
return "\x1b[36m" + s + "\x1b[0m"
default:
return s
}
}
func bold(s string) string { return "\x1b[1m" + s + "\x1b[0m" }
func dim(s string) string { return "\x1b[2m" + s + "\x1b[0m" }
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/assets/embed.go | internal/assets/embed.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
// Package assets provides embedded static files for the web UI.
package assets
import (
"embed"
"io/fs"
)
// staticFiles contains the embedded static files for the web interface.
//
//go:embed static/*
var staticFiles embed.FS
// StaticFS returns the filesystem for serving static files.
// Use with http.FileServer(http.FS(assets.StaticFS()))
func StaticFS() fs.FS {
sub, _ := fs.Sub(staticFiles, "static")
return sub
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/server/api.go | internal/server/api.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package server
import (
"context"
"encoding/json"
"net/http"
"strings"
"time"
"github.com/bodaay/HuggingFaceModelDownloader/pkg/hfdownloader"
)
// DownloadRequest is the request body for starting a download.
// Note: Output path is NOT configurable via API for security reasons.
// The server uses its configured OutputDir (Models/ for models, Datasets/ for datasets).
type DownloadRequest struct {
Repo string `json:"repo"`
Revision string `json:"revision,omitempty"`
Dataset bool `json:"dataset,omitempty"`
Filters []string `json:"filters,omitempty"`
Excludes []string `json:"excludes,omitempty"`
AppendFilterSubdir bool `json:"appendFilterSubdir,omitempty"`
DryRun bool `json:"dryRun,omitempty"`
}
// PlanResponse is the response for a dry-run/plan request.
type PlanResponse struct {
Repo string `json:"repo"`
Revision string `json:"revision"`
Files []PlanFile `json:"files"`
TotalSize int64 `json:"totalSize"`
TotalFiles int `json:"totalFiles"`
}
// PlanFile represents a file in the plan.
type PlanFile struct {
Path string `json:"path"`
Size int64 `json:"size"`
LFS bool `json:"lfs"`
}
// SettingsResponse represents current settings.
type SettingsResponse struct {
Token string `json:"token,omitempty"`
ModelsDir string `json:"modelsDir"`
DatasetsDir string `json:"datasetsDir"`
Concurrency int `json:"connections"`
MaxActive int `json:"maxActive"`
MultipartThreshold string `json:"multipartThreshold"`
Verify string `json:"verify"`
Retries int `json:"retries"`
Endpoint string `json:"endpoint,omitempty"`
}
// ErrorResponse represents an API error.
type ErrorResponse struct {
Error string `json:"error"`
Details string `json:"details,omitempty"`
}
// SuccessResponse represents a simple success message.
type SuccessResponse struct {
Success bool `json:"success"`
Message string `json:"message,omitempty"`
}
// --- Handlers ---
// handleHealth returns server health status.
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
writeJSON(w, http.StatusOK, map[string]any{
"status": "ok",
"version": "2.3.3",
"time": time.Now().UTC().Format(time.RFC3339),
})
}
// handleStartDownload starts a new download job.
func (s *Server) handleStartDownload(w http.ResponseWriter, r *http.Request) {
var req DownloadRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "Invalid request body", err.Error())
return
}
// Validate
if req.Repo == "" {
writeError(w, http.StatusBadRequest, "Missing required field: repo", "")
return
}
// Parse filters from repo:filter syntax
if strings.Contains(req.Repo, ":") && len(req.Filters) == 0 {
parts := strings.SplitN(req.Repo, ":", 2)
req.Repo = parts[0]
if parts[1] != "" {
for _, f := range strings.Split(parts[1], ",") {
f = strings.TrimSpace(f)
if f != "" {
req.Filters = append(req.Filters, f)
}
}
}
}
if !hfdownloader.IsValidModelName(req.Repo) {
writeError(w, http.StatusBadRequest, "Invalid repo format", "Expected owner/name")
return
}
// If dry-run, return the plan
if req.DryRun {
s.handlePlanInternal(w, req)
return
}
// Create and start the job (or return existing if duplicate)
job, wasExisting, err := s.jobs.CreateJob(req)
if err != nil {
writeError(w, http.StatusInternalServerError, "Failed to create job", err.Error())
return
}
// Return appropriate status
if wasExisting {
// Job already exists for this repo - return it with 200
writeJSON(w, http.StatusOK, map[string]any{
"job": job,
"message": "Download already in progress",
})
} else {
// New job created
writeJSON(w, http.StatusAccepted, job)
}
}
// handlePlan returns a download plan without starting the download.
func (s *Server) handlePlan(w http.ResponseWriter, r *http.Request) {
var req DownloadRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "Invalid request body", err.Error())
return
}
req.DryRun = true
s.handlePlanInternal(w, req)
}
func (s *Server) handlePlanInternal(w http.ResponseWriter, req DownloadRequest) {
if req.Repo == "" {
writeError(w, http.StatusBadRequest, "Missing required field: repo", "")
return
}
// Parse filters from repo:filter syntax
if strings.Contains(req.Repo, ":") && len(req.Filters) == 0 {
parts := strings.SplitN(req.Repo, ":", 2)
req.Repo = parts[0]
if parts[1] != "" {
for _, f := range strings.Split(parts[1], ",") {
f = strings.TrimSpace(f)
if f != "" {
req.Filters = append(req.Filters, f)
}
}
}
}
revision := req.Revision
if revision == "" {
revision = "main"
}
// Create job for scanning
dlJob := hfdownloader.Job{
Repo: req.Repo,
Revision: revision,
IsDataset: req.Dataset,
Filters: req.Filters,
Excludes: req.Excludes,
AppendFilterSubdir: req.AppendFilterSubdir,
}
// Use server-configured output directory (not from request for security)
outputDir := s.config.ModelsDir
if req.Dataset {
outputDir = s.config.DatasetsDir
}
settings := hfdownloader.Settings{
OutputDir: outputDir,
Token: s.config.Token,
Endpoint: s.config.Endpoint,
}
// Collect plan items
var files []PlanFile
var totalSize int64
progressFunc := func(evt hfdownloader.ProgressEvent) {
if evt.Event == "plan_item" {
files = append(files, PlanFile{
Path: evt.Path,
Size: evt.Total,
LFS: evt.IsLFS,
})
totalSize += evt.Total
}
}
// Run in dry-run mode (plan only)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
// We need to get the plan - use a modified Run that returns early
// For now, we'll scan the repo manually
err := hfdownloader.ScanPlan(ctx, dlJob, settings, progressFunc)
if err != nil {
writeError(w, http.StatusInternalServerError, "Failed to scan repository", err.Error())
return
}
resp := PlanResponse{
Repo: req.Repo,
Revision: revision,
Files: files,
TotalSize: totalSize,
TotalFiles: len(files),
}
writeJSON(w, http.StatusOK, resp)
}
// handleListJobs returns all jobs.
func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) {
jobs := s.jobs.ListJobs()
writeJSON(w, http.StatusOK, map[string]any{
"jobs": jobs,
"count": len(jobs),
})
}
// handleGetJob returns a specific job.
func (s *Server) handleGetJob(w http.ResponseWriter, r *http.Request) {
id := r.PathValue("id")
if id == "" {
writeError(w, http.StatusBadRequest, "Missing job ID", "")
return
}
job, ok := s.jobs.GetJob(id)
if !ok {
writeError(w, http.StatusNotFound, "Job not found", "")
return
}
writeJSON(w, http.StatusOK, job)
}
// handleCancelJob cancels a job.
func (s *Server) handleCancelJob(w http.ResponseWriter, r *http.Request) {
id := r.PathValue("id")
if id == "" {
writeError(w, http.StatusBadRequest, "Missing job ID", "")
return
}
if s.jobs.CancelJob(id) {
writeJSON(w, http.StatusOK, SuccessResponse{
Success: true,
Message: "Job cancelled",
})
} else {
writeError(w, http.StatusNotFound, "Job not found or already completed", "")
}
}
// handleGetSettings returns current settings.
func (s *Server) handleGetSettings(w http.ResponseWriter, r *http.Request) {
// Don't expose full token, just indicate if set
tokenStatus := ""
if s.config.Token != "" {
tokenStatus = "********" + s.config.Token[max(0, len(s.config.Token)-4):]
}
resp := SettingsResponse{
Token: tokenStatus,
ModelsDir: s.config.ModelsDir,
DatasetsDir: s.config.DatasetsDir,
Concurrency: s.config.Concurrency,
MaxActive: s.config.MaxActive,
MultipartThreshold: s.config.MultipartThreshold,
Verify: s.config.Verify,
Retries: s.config.Retries,
Endpoint: s.config.Endpoint,
}
writeJSON(w, http.StatusOK, resp)
}
// handleUpdateSettings updates settings.
// Note: Output directories cannot be changed via API for security.
func (s *Server) handleUpdateSettings(w http.ResponseWriter, r *http.Request) {
var req struct {
Token *string `json:"token,omitempty"`
Concurrency *int `json:"connections,omitempty"`
MaxActive *int `json:"maxActive,omitempty"`
MultipartThreshold *string `json:"multipartThreshold,omitempty"`
Verify *string `json:"verify,omitempty"`
Retries *int `json:"retries,omitempty"`
// Note: ModelsDir and DatasetsDir are NOT updatable via API for security
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "Invalid request body", err.Error())
return
}
// Update config (only safe fields)
if req.Token != nil {
s.config.Token = *req.Token
}
if req.Concurrency != nil && *req.Concurrency > 0 {
s.config.Concurrency = *req.Concurrency
}
if req.MaxActive != nil && *req.MaxActive > 0 {
s.config.MaxActive = *req.MaxActive
}
if req.MultipartThreshold != nil && *req.MultipartThreshold != "" {
s.config.MultipartThreshold = *req.MultipartThreshold
}
if req.Verify != nil && *req.Verify != "" {
s.config.Verify = *req.Verify
}
if req.Retries != nil && *req.Retries > 0 {
s.config.Retries = *req.Retries
}
// Also update job manager config
s.jobs.config = s.config
writeJSON(w, http.StatusOK, SuccessResponse{
Success: true,
Message: "Settings updated",
})
}
// --- Helpers ---
func writeJSON(w http.ResponseWriter, status int, data any) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
json.NewEncoder(w).Encode(data)
}
func writeError(w http.ResponseWriter, status int, message, details string) {
writeJSON(w, status, ErrorResponse{
Error: message,
Details: details,
})
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/server/websocket.go | internal/server/websocket.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package server
import (
"encoding/json"
"log"
"net/http"
"sync"
"time"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
// Allow all origins for development
// In production, you'd want to check the Origin header
return true
},
}
// WSMessage represents a message sent over WebSocket.
type WSMessage struct {
Type string `json:"type"`
Data any `json:"data"`
}
// WSClient represents a connected WebSocket client.
type WSClient struct {
conn *websocket.Conn
send chan []byte
hub *WSHub
closed bool
mu sync.Mutex
}
// WSHub manages WebSocket clients and broadcasts.
type WSHub struct {
clients map[*WSClient]bool
broadcast chan []byte
register chan *WSClient
unregister chan *WSClient
mu sync.RWMutex
}
// NewWSHub creates a new WebSocket hub.
func NewWSHub() *WSHub {
return &WSHub{
clients: make(map[*WSClient]bool),
broadcast: make(chan []byte, 256),
register: make(chan *WSClient),
unregister: make(chan *WSClient),
}
}
// Run starts the hub's main loop.
func (h *WSHub) Run() {
for {
select {
case client := <-h.register:
h.mu.Lock()
h.clients[client] = true
h.mu.Unlock()
log.Printf("[WS] Client connected (%d total)", len(h.clients))
case client := <-h.unregister:
h.mu.Lock()
if _, ok := h.clients[client]; ok {
delete(h.clients, client)
close(client.send)
}
h.mu.Unlock()
log.Printf("[WS] Client disconnected (%d total)", len(h.clients))
case message := <-h.broadcast:
h.mu.RLock()
for client := range h.clients {
select {
case client.send <- message:
default:
// Client's buffer is full, disconnect
close(client.send)
delete(h.clients, client)
}
}
h.mu.RUnlock()
}
}
}
// Broadcast sends a message to all connected clients.
func (h *WSHub) Broadcast(msgType string, data any) {
msg := WSMessage{
Type: msgType,
Data: data,
}
jsonData, err := json.Marshal(msg)
if err != nil {
log.Printf("[WS] Failed to marshal message: %v", err)
return
}
select {
case h.broadcast <- jsonData:
default:
log.Printf("[WS] Broadcast channel full, dropping message")
}
}
// BroadcastJob sends a job update to all clients.
func (h *WSHub) BroadcastJob(job *Job) {
h.Broadcast("job_update", job)
}
// BroadcastEvent sends a progress event to all clients.
func (h *WSHub) BroadcastEvent(event any) {
h.Broadcast("event", event)
}
// ClientCount returns the number of connected clients.
func (h *WSHub) ClientCount() int {
h.mu.RLock()
defer h.mu.RUnlock()
return len(h.clients)
}
// handleWebSocket handles WebSocket connections.
func (s *Server) handleWebSocket(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Printf("[WS] Upgrade failed: %v", err)
return
}
client := &WSClient{
conn: conn,
send: make(chan []byte, 256),
hub: s.wsHub,
}
s.wsHub.register <- client
// Start read/write pumps
go client.writePump()
go client.readPump()
// Send initial state
s.sendInitialState(client)
}
// sendInitialState sends current job state to newly connected client.
func (s *Server) sendInitialState(client *WSClient) {
jobs := s.jobs.ListJobs()
msg := WSMessage{
Type: "init",
Data: map[string]any{
"jobs": jobs,
"version": "2.3.0",
},
}
data, err := json.Marshal(msg)
if err != nil {
return
}
client.mu.Lock()
defer client.mu.Unlock()
if !client.closed {
select {
case client.send <- data:
default:
}
}
}
// writePump pumps messages from the hub to the WebSocket connection.
func (c *WSClient) writePump() {
ticker := time.NewTicker(30 * time.Second)
defer func() {
ticker.Stop()
c.conn.Close()
}()
for {
select {
case message, ok := <-c.send:
c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if !ok {
// Hub closed the channel
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
w, err := c.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(message)
// Batch any queued messages
n := len(c.send)
for i := 0; i < n; i++ {
w.Write([]byte("\n"))
w.Write(<-c.send)
}
if err := w.Close(); err != nil {
return
}
case <-ticker.C:
c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}
// readPump pumps messages from the WebSocket connection to the hub.
func (c *WSClient) readPump() {
defer func() {
c.mu.Lock()
c.closed = true
c.mu.Unlock()
c.hub.unregister <- c
c.conn.Close()
}()
c.conn.SetReadLimit(512 * 1024) // 512KB
c.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
c.conn.SetPongHandler(func(string) error {
c.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
return nil
})
for {
_, message, err := c.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
log.Printf("[WS] Read error: %v", err)
}
break
}
// Handle incoming messages (for future use)
_ = message
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/server/api_test.go | internal/server/api_test.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package server
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
)
func newTestServer() *Server {
cfg := Config{
Addr: "127.0.0.1",
Port: 0, // Random port
ModelsDir: "./test_models",
DatasetsDir: "./test_datasets",
Concurrency: 2,
MaxActive: 1,
}
return New(cfg)
}
func TestAPI_Health(t *testing.T) {
srv := newTestServer()
req := httptest.NewRequest("GET", "/api/health", nil)
w := httptest.NewRecorder()
srv.handleHealth(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", w.Code)
}
var resp map[string]any
json.Unmarshal(w.Body.Bytes(), &resp)
if resp["status"] != "ok" {
t.Errorf("Expected status ok, got %v", resp["status"])
}
if resp["version"] != "2.3.0" {
t.Errorf("Expected version 2.3.0, got %v", resp["version"])
}
}
func TestAPI_GetSettings(t *testing.T) {
srv := newTestServer()
req := httptest.NewRequest("GET", "/api/settings", nil)
w := httptest.NewRecorder()
srv.handleGetSettings(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", w.Code)
}
var resp SettingsResponse
json.Unmarshal(w.Body.Bytes(), &resp)
if resp.ModelsDir != "./test_models" {
t.Errorf("Expected modelsDir ./test_models, got %s", resp.ModelsDir)
}
if resp.DatasetsDir != "./test_datasets" {
t.Errorf("Expected datasetsDir ./test_datasets, got %s", resp.DatasetsDir)
}
}
func TestAPI_GetSettings_TokenMasked(t *testing.T) {
cfg := Config{
ModelsDir: "./test",
Token: "hf_abcdefghijklmnop",
}
srv := New(cfg)
req := httptest.NewRequest("GET", "/api/settings", nil)
w := httptest.NewRecorder()
srv.handleGetSettings(w, req)
var resp SettingsResponse
json.Unmarshal(w.Body.Bytes(), &resp)
// Token should be masked, not exposed
if resp.Token == "hf_abcdefghijklmnop" {
t.Error("Token should be masked, not exposed in full")
}
if resp.Token != "********mnop" {
t.Errorf("Expected masked token ********mnop, got %s", resp.Token)
}
}
func TestAPI_UpdateSettings(t *testing.T) {
srv := newTestServer()
// Update concurrency
body := `{"connections": 16, "maxActive": 8}`
req := httptest.NewRequest("POST", "/api/settings", bytes.NewBufferString(body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
srv.handleUpdateSettings(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", w.Code)
}
// Verify changes applied
if srv.config.Concurrency != 16 {
t.Errorf("Expected concurrency 16, got %d", srv.config.Concurrency)
}
if srv.config.MaxActive != 8 {
t.Errorf("Expected maxActive 8, got %d", srv.config.MaxActive)
}
}
func TestAPI_UpdateSettings_CantChangeOutputDir(t *testing.T) {
srv := newTestServer()
originalModels := srv.config.ModelsDir
// Try to inject a different output path (should be ignored)
body := `{"modelsDir": "/etc/passwd", "datasetsDir": "/tmp/evil"}`
req := httptest.NewRequest("POST", "/api/settings", bytes.NewBufferString(body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
srv.handleUpdateSettings(w, req)
// Paths should NOT have changed
if srv.config.ModelsDir != originalModels {
t.Errorf("ModelsDir should not be changeable via API! Got %s", srv.config.ModelsDir)
}
}
func TestAPI_StartDownload_ValidatesRepo(t *testing.T) {
srv := newTestServer()
tests := []struct {
name string
body string
wantCode int
}{
{
name: "missing repo",
body: `{}`,
wantCode: http.StatusBadRequest,
},
{
name: "invalid repo format",
body: `{"repo": "invalid"}`,
wantCode: http.StatusBadRequest,
},
{
name: "valid repo",
body: `{"repo": "owner/name"}`,
wantCode: http.StatusAccepted,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest("POST", "/api/download", bytes.NewBufferString(tt.body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
srv.handleStartDownload(w, req)
if w.Code != tt.wantCode {
t.Errorf("Expected %d, got %d. Body: %s", tt.wantCode, w.Code, w.Body.String())
}
})
}
}
func TestAPI_StartDownload_OutputIgnored(t *testing.T) {
srv := newTestServer()
// Try to specify custom output path
body := `{"repo": "test/model", "output": "/etc/evil"}`
req := httptest.NewRequest("POST", "/api/download", bytes.NewBufferString(body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
srv.handleStartDownload(w, req)
if w.Code != http.StatusAccepted {
t.Fatalf("Expected 202, got %d", w.Code)
}
var resp Job
json.Unmarshal(w.Body.Bytes(), &resp)
// Output should be server-controlled, not from request
if resp.OutputDir == "/etc/evil" {
t.Error("Output path from request should be ignored!")
}
if resp.OutputDir != "./test_models" {
t.Errorf("Expected server-controlled output, got %s", resp.OutputDir)
}
}
func TestAPI_StartDownload_DatasetUsesDatasetDir(t *testing.T) {
srv := newTestServer()
body := `{"repo": "test/dataset", "dataset": true}`
req := httptest.NewRequest("POST", "/api/download", bytes.NewBufferString(body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
srv.handleStartDownload(w, req)
var resp Job
json.Unmarshal(w.Body.Bytes(), &resp)
if resp.OutputDir != "./test_datasets" {
t.Errorf("Dataset should use datasets dir, got %s", resp.OutputDir)
}
}
func TestAPI_StartDownload_DuplicateReturnsExisting(t *testing.T) {
srv := newTestServer()
body := `{"repo": "dup/test"}`
// First request
req1 := httptest.NewRequest("POST", "/api/download", bytes.NewBufferString(body))
req1.Header.Set("Content-Type", "application/json")
w1 := httptest.NewRecorder()
srv.handleStartDownload(w1, req1)
if w1.Code != http.StatusAccepted {
t.Fatalf("First request should return 202, got %d", w1.Code)
}
var job1 Job
json.Unmarshal(w1.Body.Bytes(), &job1)
// Second request (duplicate)
req2 := httptest.NewRequest("POST", "/api/download", bytes.NewBufferString(body))
req2.Header.Set("Content-Type", "application/json")
w2 := httptest.NewRecorder()
srv.handleStartDownload(w2, req2)
if w2.Code != http.StatusOK {
t.Fatalf("Duplicate request should return 200, got %d", w2.Code)
}
var resp map[string]any
json.Unmarshal(w2.Body.Bytes(), &resp)
if resp["message"] != "Download already in progress" {
t.Errorf("Expected duplicate message, got %v", resp["message"])
}
jobMap := resp["job"].(map[string]any)
if jobMap["id"] != job1.ID {
t.Error("Duplicate should return same job ID")
}
}
func TestAPI_ListJobs(t *testing.T) {
srv := newTestServer()
// Create a job first
body := `{"repo": "list/test"}`
req := httptest.NewRequest("POST", "/api/download", bytes.NewBufferString(body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
srv.handleStartDownload(w, req)
// List jobs
listReq := httptest.NewRequest("GET", "/api/jobs", nil)
listW := httptest.NewRecorder()
srv.handleListJobs(listW, listReq)
if listW.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", listW.Code)
}
var resp map[string]any
json.Unmarshal(listW.Body.Bytes(), &resp)
count := int(resp["count"].(float64))
if count < 1 {
t.Error("Expected at least 1 job")
}
}
func TestAPI_ParseFiltersFromRepo(t *testing.T) {
srv := newTestServer()
body := `{"repo": "owner/model:q4_0,q5_0"}`
req := httptest.NewRequest("POST", "/api/download", bytes.NewBufferString(body))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
srv.handleStartDownload(w, req)
var resp Job
json.Unmarshal(w.Body.Bytes(), &resp)
if resp.Repo != "owner/model" {
t.Errorf("Repo should be parsed without filters, got %s", resp.Repo)
}
if len(resp.Filters) != 2 {
t.Errorf("Expected 2 filters, got %d", len(resp.Filters))
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/server/websocket_test.go | internal/server/websocket_test.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package server
import (
"testing"
"time"
)
func TestWSHub_Broadcast(t *testing.T) {
hub := NewWSHub()
go hub.Run()
// Give hub time to start
time.Sleep(10 * time.Millisecond)
// Test broadcast doesn't panic with no clients
hub.Broadcast("test", map[string]string{"key": "value"})
// Test BroadcastJob
job := &Job{
ID: "test123",
Repo: "test/repo",
Status: JobStatusRunning,
}
hub.BroadcastJob(job)
// Test BroadcastEvent
hub.BroadcastEvent(map[string]string{"event": "test"})
}
func TestWSHub_ClientCount(t *testing.T) {
hub := NewWSHub()
go hub.Run()
time.Sleep(10 * time.Millisecond)
count := hub.ClientCount()
if count != 0 {
t.Errorf("Expected 0 clients, got %d", count)
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/server/server.go | internal/server/server.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
// Package server provides the HTTP server for the web UI and REST API.
package server
import (
"context"
"fmt"
"io/fs"
"log"
"net/http"
"time"
"github.com/bodaay/HuggingFaceModelDownloader/internal/assets"
)
// Config holds server configuration.
type Config struct {
Addr string
Port int
Token string // HuggingFace token
ModelsDir string // Output directory for models (not configurable via API)
DatasetsDir string // Output directory for datasets (not configurable via API)
Concurrency int
MaxActive int
MultipartThreshold string // Minimum size for multipart download
Verify string // Verification mode: none, size, sha256
Retries int // Number of retry attempts
AllowedOrigins []string // CORS origins
Endpoint string // Custom HuggingFace endpoint (e.g., for mirrors)
}
// DefaultConfig returns sensible defaults.
func DefaultConfig() Config {
return Config{
Addr: "0.0.0.0",
Port: 8080,
ModelsDir: "./Models",
DatasetsDir: "./Datasets",
Concurrency: 8,
MaxActive: 3,
MultipartThreshold: "32MiB",
Verify: "size",
Retries: 4,
}
}
// Server is the HTTP server for hfdownloader.
type Server struct {
config Config
httpServer *http.Server
jobs *JobManager
wsHub *WSHub
}
// New creates a new server with the given configuration.
func New(cfg Config) *Server {
wsHub := NewWSHub()
s := &Server{
config: cfg,
jobs: NewJobManager(cfg, wsHub),
wsHub: wsHub,
}
return s
}
// ListenAndServe starts the HTTP server.
func (s *Server) ListenAndServe(ctx context.Context) error {
// Start WebSocket hub
go s.wsHub.Run()
mux := http.NewServeMux()
// API routes
s.registerAPIRoutes(mux)
// Static files (embedded)
staticFS := assets.StaticFS()
fileServer := http.FileServer(http.FS(staticFS))
// Serve index.html for SPA routes
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// Try to serve the file directly
path := r.URL.Path
if path == "/" {
path = "/index.html"
}
// Check if file exists
if f, err := staticFS.(fs.ReadFileFS).ReadFile(path[1:]); err == nil {
// Serve with correct content type
contentType := "text/html; charset=utf-8"
switch {
case len(path) > 4 && path[len(path)-4:] == ".css":
contentType = "text/css; charset=utf-8"
case len(path) > 3 && path[len(path)-3:] == ".js":
contentType = "application/javascript; charset=utf-8"
case len(path) > 5 && path[len(path)-5:] == ".json":
contentType = "application/json; charset=utf-8"
case len(path) > 4 && path[len(path)-4:] == ".svg":
contentType = "image/svg+xml"
}
w.Header().Set("Content-Type", contentType)
w.Write(f)
return
}
// Fallback to index.html for SPA routing
fileServer.ServeHTTP(w, r)
})
addr := fmt.Sprintf("%s:%d", s.config.Addr, s.config.Port)
s.httpServer = &http.Server{
Addr: addr,
Handler: s.corsMiddleware(s.loggingMiddleware(mux)),
ReadTimeout: 30 * time.Second,
WriteTimeout: 30 * time.Second,
IdleTimeout: 120 * time.Second,
}
// Graceful shutdown
go func() {
<-ctx.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
s.httpServer.Shutdown(shutdownCtx)
}()
log.Printf("🚀 Server starting on http://%s", addr)
log.Printf(" Dashboard: http://localhost:%d", s.config.Port)
log.Printf(" API: http://localhost:%d/api", s.config.Port)
err := s.httpServer.ListenAndServe()
if err == http.ErrServerClosed {
return nil
}
return err
}
// registerAPIRoutes sets up all API endpoints.
func (s *Server) registerAPIRoutes(mux *http.ServeMux) {
// Health check
mux.HandleFunc("GET /api/health", s.handleHealth)
// Downloads
mux.HandleFunc("POST /api/download", s.handleStartDownload)
mux.HandleFunc("GET /api/jobs", s.handleListJobs)
mux.HandleFunc("GET /api/jobs/{id}", s.handleGetJob)
mux.HandleFunc("DELETE /api/jobs/{id}", s.handleCancelJob)
// Settings
mux.HandleFunc("GET /api/settings", s.handleGetSettings)
mux.HandleFunc("POST /api/settings", s.handleUpdateSettings)
// Plan (dry-run)
mux.HandleFunc("POST /api/plan", s.handlePlan)
// WebSocket
mux.HandleFunc("GET /api/ws", s.handleWebSocket)
}
// Middleware
func (s *Server) loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
next.ServeHTTP(w, r)
log.Printf("%s %s %s", r.Method, r.URL.Path, time.Since(start).Round(time.Millisecond))
})
}
func (s *Server) corsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
// Allow same-origin and configured origins
if origin != "" {
allowed := false
if len(s.config.AllowedOrigins) == 0 {
// Default: allow same host
allowed = true
} else {
for _, o := range s.config.AllowedOrigins {
if o == "*" || o == origin {
allowed = true
break
}
}
}
if allowed {
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
w.Header().Set("Access-Control-Max-Age", "86400")
}
}
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusNoContent)
return
}
next.ServeHTTP(w, r)
})
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/server/integration_test.go | internal/server/integration_test.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
//go:build integration
package server
import (
"bytes"
"context"
"encoding/json"
"net"
"net/http"
"strconv"
"testing"
"time"
)
// getFreePort finds an available port
func getFreePort() int {
l, _ := net.Listen("tcp", "127.0.0.1:0")
defer l.Close()
return l.Addr().(*net.TCPAddr).Port
}
// These tests require network access and actually download from HuggingFace.
// Run with: go test -tags=integration -v ./internal/server/
func TestIntegration_FullDownloadFlow(t *testing.T) {
port := getFreePort()
cfg := Config{
Addr: "127.0.0.1",
Port: port,
ModelsDir: t.TempDir(),
DatasetsDir: t.TempDir(),
Concurrency: 4,
MaxActive: 2,
}
srv := New(cfg)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Start server in background
go srv.ListenAndServe(ctx)
time.Sleep(200 * time.Millisecond)
baseURL := "http://127.0.0.1:" + strconv.Itoa(port)
t.Run("health check", func(t *testing.T) {
resp, err := http.Get(baseURL + "/api/health")
if err != nil {
t.Fatalf("Health check failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
t.Errorf("Expected 200, got %d", resp.StatusCode)
}
})
t.Run("start download and track progress", func(t *testing.T) {
// Start a tiny model download
body := `{"repo": "hf-internal-testing/tiny-random-gpt2"}`
resp, err := http.Post(
baseURL+"/api/download",
"application/json",
bytes.NewBufferString(body),
)
if err != nil {
t.Fatalf("Start download failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 202 {
t.Fatalf("Expected 202, got %d", resp.StatusCode)
}
var job Job
json.NewDecoder(resp.Body).Decode(&job)
if job.ID == "" {
t.Error("Job ID should not be empty")
}
// Poll for completion
timeout := time.After(60 * time.Second)
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-timeout:
t.Fatal("Download timed out")
case <-ticker.C:
jobResp, _ := http.Get(baseURL + "/api/jobs/" + job.ID)
var current Job
json.NewDecoder(jobResp.Body).Decode(¤t)
jobResp.Body.Close()
t.Logf("Job status: %s, progress: %d/%d files",
current.Status, current.Progress.CompletedFiles, current.Progress.TotalFiles)
if current.Status == JobStatusCompleted {
t.Log("Download completed successfully!")
return
}
if current.Status == JobStatusFailed {
t.Fatalf("Download failed: %s", current.Error)
}
}
}
})
}
func TestIntegration_DryRun(t *testing.T) {
port := getFreePort()
cfg := Config{
Addr: "127.0.0.1",
Port: port,
ModelsDir: t.TempDir(),
DatasetsDir: t.TempDir(),
}
srv := New(cfg)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go srv.ListenAndServe(ctx)
time.Sleep(200 * time.Millisecond)
baseURL := "http://127.0.0.1:" + strconv.Itoa(port)
body := `{"repo": "hf-internal-testing/tiny-random-gpt2"}`
resp, err := http.Post(
baseURL+"/api/plan",
"application/json",
bytes.NewBufferString(body),
)
if err != nil {
t.Fatalf("Plan request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
t.Fatalf("Expected 200, got %d", resp.StatusCode)
}
var plan PlanResponse
json.NewDecoder(resp.Body).Decode(&plan)
if plan.TotalFiles == 0 {
t.Error("Expected files in plan")
}
t.Logf("Plan: %d files, %d bytes", plan.TotalFiles, plan.TotalSize)
for _, f := range plan.Files {
t.Logf(" %s (%d bytes, LFS=%v)", f.Path, f.Size, f.LFS)
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/server/jobs.go | internal/server/jobs.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package server
import (
"context"
"crypto/rand"
"encoding/hex"
"sync"
"time"
"github.com/bodaay/HuggingFaceModelDownloader/pkg/hfdownloader"
)
// JobStatus represents the state of a download job.
type JobStatus string
const (
JobStatusQueued JobStatus = "queued"
JobStatusRunning JobStatus = "running"
JobStatusCompleted JobStatus = "completed"
JobStatusFailed JobStatus = "failed"
JobStatusCancelled JobStatus = "cancelled"
)
// Job represents a download job.
type Job struct {
ID string `json:"id"`
Repo string `json:"repo"`
Revision string `json:"revision"`
IsDataset bool `json:"isDataset,omitempty"`
Filters []string `json:"filters,omitempty"`
Excludes []string `json:"excludes,omitempty"`
OutputDir string `json:"outputDir"`
Status JobStatus `json:"status"`
Progress JobProgress `json:"progress"`
Error string `json:"error,omitempty"`
CreatedAt time.Time `json:"createdAt"`
StartedAt *time.Time `json:"startedAt,omitempty"`
EndedAt *time.Time `json:"endedAt,omitempty"`
Files []JobFileProgress `json:"files,omitempty"`
cancel context.CancelFunc `json:"-"`
}
// JobProgress holds aggregate progress info.
type JobProgress struct {
TotalFiles int `json:"totalFiles"`
CompletedFiles int `json:"completedFiles"`
TotalBytes int64 `json:"totalBytes"`
DownloadedBytes int64 `json:"downloadedBytes"`
BytesPerSecond int64 `json:"bytesPerSecond"`
}
// JobFileProgress holds per-file progress.
type JobFileProgress struct {
Path string `json:"path"`
TotalBytes int64 `json:"totalBytes"`
Downloaded int64 `json:"downloaded"`
Status string `json:"status"` // pending, active, complete, skipped, error
}
// JobManager manages download jobs.
type JobManager struct {
mu sync.RWMutex
jobs map[string]*Job
config Config
listeners []chan *Job
listenerMu sync.RWMutex
wsHub *WSHub
}
// NewJobManager creates a new job manager.
func NewJobManager(cfg Config, wsHub *WSHub) *JobManager {
return &JobManager{
jobs: make(map[string]*Job),
config: cfg,
wsHub: wsHub,
}
}
// generateID creates a short random ID.
func generateID() string {
b := make([]byte, 6)
rand.Read(b)
return hex.EncodeToString(b)
}
// CreateJob creates a new download job.
// Returns existing job if same repo+revision+dataset is already in progress.
func (m *JobManager) CreateJob(req DownloadRequest) (*Job, bool, error) {
revision := req.Revision
if revision == "" {
revision = "main"
}
// Determine output directory based on type (NOT from request for security)
outputDir := m.config.ModelsDir
if req.Dataset {
outputDir = m.config.DatasetsDir
}
// Check for existing active job with same repo+revision+type
m.mu.Lock()
for _, existing := range m.jobs {
if existing.Repo == req.Repo &&
existing.Revision == revision &&
existing.IsDataset == req.Dataset &&
(existing.Status == JobStatusQueued || existing.Status == JobStatusRunning) {
m.mu.Unlock()
return existing, true, nil // Return existing, wasExisting=true
}
}
job := &Job{
ID: generateID(),
Repo: req.Repo,
Revision: revision,
IsDataset: req.Dataset,
Filters: req.Filters,
Excludes: req.Excludes,
OutputDir: outputDir, // Server-controlled, not from request
Status: JobStatusQueued,
CreatedAt: time.Now(),
Progress: JobProgress{},
}
m.jobs[job.ID] = job
m.mu.Unlock()
// Start the job
go m.runJob(job)
return job, false, nil // New job, wasExisting=false
}
// GetJob retrieves a job by ID.
func (m *JobManager) GetJob(id string) (*Job, bool) {
m.mu.RLock()
defer m.mu.RUnlock()
job, ok := m.jobs[id]
return job, ok
}
// ListJobs returns all jobs.
func (m *JobManager) ListJobs() []*Job {
m.mu.RLock()
defer m.mu.RUnlock()
jobs := make([]*Job, 0, len(m.jobs))
for _, job := range m.jobs {
jobs = append(jobs, job)
}
return jobs
}
// CancelJob cancels a running or queued job.
func (m *JobManager) CancelJob(id string) bool {
m.mu.Lock()
defer m.mu.Unlock()
job, ok := m.jobs[id]
if !ok {
return false
}
if job.Status == JobStatusQueued || job.Status == JobStatusRunning {
if job.cancel != nil {
job.cancel()
}
job.Status = JobStatusCancelled
now := time.Now()
job.EndedAt = &now
m.notifyListeners(job)
return true
}
return false
}
// DeleteJob removes a job from the list.
func (m *JobManager) DeleteJob(id string) bool {
m.mu.Lock()
defer m.mu.Unlock()
job, ok := m.jobs[id]
if !ok {
return false
}
// Cancel if running
if job.cancel != nil && (job.Status == JobStatusQueued || job.Status == JobStatusRunning) {
job.cancel()
}
delete(m.jobs, id)
return true
}
// Subscribe adds a listener for job updates.
func (m *JobManager) Subscribe() chan *Job {
ch := make(chan *Job, 100)
m.listenerMu.Lock()
m.listeners = append(m.listeners, ch)
m.listenerMu.Unlock()
return ch
}
// Unsubscribe removes a listener.
func (m *JobManager) Unsubscribe(ch chan *Job) {
m.listenerMu.Lock()
defer m.listenerMu.Unlock()
for i, listener := range m.listeners {
if listener == ch {
m.listeners = append(m.listeners[:i], m.listeners[i+1:]...)
close(ch)
return
}
}
}
func (m *JobManager) notifyListeners(job *Job) {
// Notify channel listeners
m.listenerMu.RLock()
for _, ch := range m.listeners {
select {
case ch <- job:
default:
// Listener is slow, skip
}
}
m.listenerMu.RUnlock()
// Broadcast to WebSocket clients
if m.wsHub != nil {
m.wsHub.BroadcastJob(job)
}
}
// runJob executes the download job.
func (m *JobManager) runJob(job *Job) {
ctx, cancel := context.WithCancel(context.Background())
job.cancel = cancel
// Update status
m.mu.Lock()
job.Status = JobStatusRunning
now := time.Now()
job.StartedAt = &now
m.mu.Unlock()
m.notifyListeners(job)
// Create hfdownloader job and settings
dlJob := hfdownloader.Job{
Repo: job.Repo,
Revision: job.Revision,
IsDataset: job.IsDataset,
Filters: job.Filters,
Excludes: job.Excludes,
AppendFilterSubdir: false,
}
settings := hfdownloader.Settings{
OutputDir: job.OutputDir,
Concurrency: m.config.Concurrency,
MaxActiveDownloads: m.config.MaxActive,
Token: m.config.Token,
MultipartThreshold: m.config.MultipartThreshold,
Verify: m.config.Verify,
Retries: m.config.Retries,
BackoffInitial: "400ms",
BackoffMax: "10s",
Endpoint: m.config.Endpoint,
}
// Progress callback - NOTE: must not hold lock when calling notifyListeners
progressFunc := func(evt hfdownloader.ProgressEvent) {
m.mu.Lock()
switch evt.Event {
case "plan_item":
job.Progress.TotalFiles++
job.Progress.TotalBytes += evt.Total
job.Files = append(job.Files, JobFileProgress{
Path: evt.Path,
TotalBytes: evt.Total,
Status: "pending",
})
case "file_start":
for i := range job.Files {
if job.Files[i].Path == evt.Path {
job.Files[i].Status = "active"
break
}
}
case "file_progress":
for i := range job.Files {
if job.Files[i].Path == evt.Path {
job.Files[i].Downloaded = evt.Downloaded
break
}
}
// Update aggregate
var total int64
for _, f := range job.Files {
total += f.Downloaded
}
job.Progress.DownloadedBytes = total
case "file_done":
for i := range job.Files {
if job.Files[i].Path == evt.Path {
job.Files[i].Status = "complete"
job.Files[i].Downloaded = job.Files[i].TotalBytes
break
}
}
job.Progress.CompletedFiles++
// Recalculate total downloaded
var total int64
for _, f := range job.Files {
total += f.Downloaded
}
job.Progress.DownloadedBytes = total
}
m.mu.Unlock() // Unlock BEFORE notifying to avoid deadlock
m.notifyListeners(job)
}
// Run the download
err := hfdownloader.Run(ctx, dlJob, settings, progressFunc)
// Update final status
m.mu.Lock()
endTime := time.Now()
job.EndedAt = &endTime
if ctx.Err() != nil {
job.Status = JobStatusCancelled
} else if err != nil {
job.Status = JobStatusFailed
job.Error = err.Error()
} else {
job.Status = JobStatusCompleted
}
m.mu.Unlock()
m.notifyListeners(job)
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/server/jobs_test.go | internal/server/jobs_test.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package server
import (
"testing"
"time"
)
func TestJobManager_CreateJob(t *testing.T) {
cfg := Config{
ModelsDir: "./test_models",
DatasetsDir: "./test_datasets",
Concurrency: 2,
MaxActive: 1,
}
hub := NewWSHub()
go hub.Run()
mgr := NewJobManager(cfg, hub)
t.Run("creates model job with server-controlled output", func(t *testing.T) {
req := DownloadRequest{
Repo: "test/model",
Revision: "main",
Dataset: false,
}
job, wasExisting, err := mgr.CreateJob(req)
if err != nil {
t.Fatalf("CreateJob failed: %v", err)
}
if wasExisting {
t.Error("Expected new job, got existing")
}
if job.OutputDir != "./test_models" {
t.Errorf("Expected output ./test_models, got %s", job.OutputDir)
}
if job.IsDataset {
t.Error("Expected model, got dataset")
}
})
t.Run("creates dataset job with server-controlled output", func(t *testing.T) {
req := DownloadRequest{
Repo: "test/dataset",
Dataset: true,
}
job, _, err := mgr.CreateJob(req)
if err != nil {
t.Fatalf("CreateJob failed: %v", err)
}
if job.OutputDir != "./test_datasets" {
t.Errorf("Expected output ./test_datasets, got %s", job.OutputDir)
}
if !job.IsDataset {
t.Error("Expected dataset, got model")
}
})
t.Run("defaults revision to main", func(t *testing.T) {
req := DownloadRequest{
Repo: "test/no-revision",
}
job, _, _ := mgr.CreateJob(req)
if job.Revision != "main" {
t.Errorf("Expected revision main, got %s", job.Revision)
}
})
}
func TestJobManager_Deduplication(t *testing.T) {
cfg := Config{
ModelsDir: "./test_models",
}
hub := NewWSHub()
go hub.Run()
mgr := NewJobManager(cfg, hub)
// Create first job
req := DownloadRequest{
Repo: "dedup/test",
Revision: "main",
}
job1, wasExisting1, _ := mgr.CreateJob(req)
if wasExisting1 {
t.Error("First job should not be existing")
}
// Try to create same job again
job2, wasExisting2, _ := mgr.CreateJob(req)
if !wasExisting2 {
t.Error("Second job should be detected as existing")
}
if job1.ID != job2.ID {
t.Errorf("Expected same job ID, got %s vs %s", job1.ID, job2.ID)
}
}
func TestJobManager_DifferentRevisionsNotDeduplicated(t *testing.T) {
cfg := Config{
ModelsDir: "./test_models",
}
hub := NewWSHub()
go hub.Run()
mgr := NewJobManager(cfg, hub)
job1, _, _ := mgr.CreateJob(DownloadRequest{
Repo: "revision/test",
Revision: "v1",
})
job2, wasExisting, _ := mgr.CreateJob(DownloadRequest{
Repo: "revision/test",
Revision: "v2",
})
if wasExisting {
t.Error("Different revisions should create different jobs")
}
if job1.ID == job2.ID {
t.Error("Different revisions should have different IDs")
}
}
func TestJobManager_ModelVsDatasetNotDeduplicated(t *testing.T) {
cfg := Config{
ModelsDir: "./test_models",
DatasetsDir: "./test_datasets",
}
hub := NewWSHub()
go hub.Run()
mgr := NewJobManager(cfg, hub)
job1, _, _ := mgr.CreateJob(DownloadRequest{
Repo: "type/test",
Dataset: false,
})
job2, wasExisting, _ := mgr.CreateJob(DownloadRequest{
Repo: "type/test",
Dataset: true,
})
if wasExisting {
t.Error("Model and dataset with same repo should be different jobs")
}
if job1.ID == job2.ID {
t.Error("Model and dataset should have different IDs")
}
}
func TestJobManager_GetJob(t *testing.T) {
cfg := Config{ModelsDir: "./test"}
hub := NewWSHub()
go hub.Run()
mgr := NewJobManager(cfg, hub)
job, _, _ := mgr.CreateJob(DownloadRequest{Repo: "get/test"})
t.Run("returns existing job", func(t *testing.T) {
found, ok := mgr.GetJob(job.ID)
if !ok {
t.Error("Expected to find job")
}
if found.ID != job.ID {
t.Error("Wrong job returned")
}
})
t.Run("returns false for missing job", func(t *testing.T) {
_, ok := mgr.GetJob("nonexistent")
if ok {
t.Error("Should not find nonexistent job")
}
})
}
func TestJobManager_ListJobs(t *testing.T) {
cfg := Config{ModelsDir: "./test"}
hub := NewWSHub()
go hub.Run()
mgr := NewJobManager(cfg, hub)
// Create multiple jobs with unique repos
mgr.CreateJob(DownloadRequest{Repo: "list/test1"})
mgr.CreateJob(DownloadRequest{Repo: "list/test2"})
mgr.CreateJob(DownloadRequest{Repo: "list/test3"})
jobs := mgr.ListJobs()
if len(jobs) < 3 {
t.Errorf("Expected at least 3 jobs, got %d", len(jobs))
}
}
func TestJobManager_CancelJob(t *testing.T) {
cfg := Config{ModelsDir: "./test"}
hub := NewWSHub()
go hub.Run()
mgr := NewJobManager(cfg, hub)
job, _, _ := mgr.CreateJob(DownloadRequest{Repo: "cancel/test"})
// Wait a bit for job to start
time.Sleep(50 * time.Millisecond)
t.Run("cancels running job", func(t *testing.T) {
ok := mgr.CancelJob(job.ID)
if !ok {
t.Error("Cancel should succeed")
}
found, _ := mgr.GetJob(job.ID)
if found.Status != JobStatusCancelled {
t.Errorf("Expected cancelled status, got %s", found.Status)
}
})
t.Run("returns false for nonexistent job", func(t *testing.T) {
ok := mgr.CancelJob("nonexistent")
if ok {
t.Error("Cancel should fail for nonexistent job")
}
})
}
func TestJobStatus_Values(t *testing.T) {
statuses := []JobStatus{
JobStatusQueued,
JobStatusRunning,
JobStatusCompleted,
JobStatusFailed,
JobStatusCancelled,
}
for _, s := range statuses {
if s == "" {
t.Error("Status should not be empty")
}
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/cli/root.go | internal/cli/root.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"os/signal"
"path/filepath"
"strings"
"sync"
"syscall"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"github.com/bodaay/HuggingFaceModelDownloader/internal/tui"
"github.com/bodaay/HuggingFaceModelDownloader/pkg/hfdownloader"
)
// RootOpts holds global CLI options.
type RootOpts struct {
Token string
JSONOut bool
Quiet bool
Verbose bool
Config string
LogFile string
LogLevel string
}
// Execute runs the CLI with the given version string.
func Execute(version string) error {
ro := &RootOpts{}
ctx, cancel := signalContext(context.Background())
defer cancel()
root := &cobra.Command{
Use: "hfdownloader",
Short: "Fast, resumable downloader for Hugging Face models & datasets",
SilenceUsage: true,
SilenceErrors: true,
Version: version,
}
// Global flags
root.PersistentFlags().StringVarP(&ro.Token, "token", "t", "", "Hugging Face access token (also reads HF_TOKEN env)")
root.PersistentFlags().BoolVar(&ro.JSONOut, "json", false, "Emit machine-readable JSON events (progress, plan, results)")
root.PersistentFlags().BoolVarP(&ro.Quiet, "quiet", "q", false, "Quiet mode (minimal logs)")
root.PersistentFlags().BoolVarP(&ro.Verbose, "verbose", "v", false, "Verbose logs (debug details)")
root.PersistentFlags().StringVar(&ro.Config, "config", "", "Path to config file (JSON or YAML)")
root.PersistentFlags().StringVar(&ro.LogFile, "log-file", "", "Write logs to file (in addition to stderr)")
root.PersistentFlags().StringVar(&ro.LogLevel, "log-level", "info", "Log level: debug, info, warn, error")
// Add commands
downloadCmd := newDownloadCmd(ctx, ro)
root.AddCommand(downloadCmd)
root.AddCommand(newVersionCmd(version))
root.AddCommand(newServeCmd(ro))
root.AddCommand(newConfigCmd())
// Make download the default command when no subcommand is given
root.RunE = downloadCmd.RunE
root.SetHelpCommand(&cobra.Command{Use: "help", Hidden: true})
if err := root.ExecuteContext(ctx); err != nil {
fmt.Fprintln(os.Stderr, "error:", err)
return err
}
return nil
}
func newDownloadCmd(ctx context.Context, ro *RootOpts) *cobra.Command {
job := &hfdownloader.Job{}
cfg := &hfdownloader.Settings{}
var dryRun bool
var planFmt string
cmd := &cobra.Command{
Use: "download [REPO]",
Short: "Download a model or dataset from the Hugging Face Hub",
Args: cobra.MaximumNArgs(1),
PreRunE: func(cmd *cobra.Command, args []string) error {
return applySettingsDefaults(cmd, ro, cfg)
},
RunE: func(cmd *cobra.Command, args []string) error {
finalJob, finalCfg, err := finalize(cmd, ro, args, job, cfg)
if err != nil {
return err
}
// Plan-only mode
if dryRun {
p, err := hfdownloader.PlanRepo(ctx, finalJob, finalCfg)
if err != nil {
return err
}
if strings.ToLower(planFmt) == "json" || ro.JSONOut {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(p)
}
rev := finalJob.Revision
if rev == "" {
rev = "main"
}
fmt.Printf("Plan for %s@%s (%d files):\n", finalJob.Repo, rev, len(p.Items))
for _, it := range p.Items {
fmt.Printf(" %s %8d lfs=%t\n", it.RelativePath, it.Size, it.LFS)
}
return nil
}
// Progress mode selection
var progress hfdownloader.ProgressFunc
if ro.JSONOut {
progress = jsonProgress(os.Stdout)
} else if ro.Quiet {
progress = cliProgress(ro, finalJob)
} else {
// Live TUI
ui := tui.NewLiveRenderer(finalJob, finalCfg)
defer ui.Close()
progress = ui.Handler()
}
return hfdownloader.Download(ctx, finalJob, finalCfg, progress)
},
}
// Job flags
cmd.Flags().StringVarP(&job.Repo, "repo", "r", "", "Repository ID (owner/name). If omitted, positional REPO is used")
cmd.Flags().BoolVar(&job.IsDataset, "dataset", false, "Treat repo as a dataset")
cmd.Flags().StringVarP(&job.Revision, "revision", "b", "main", "Revision/branch to download (e.g. main, refs/pr/1)")
cmd.Flags().StringSliceVarP(&job.Filters, "filters", "F", nil, "Comma-separated filters to match LFS artifacts (e.g. q4_0,q5_0)")
cmd.Flags().StringSliceVarP(&job.Excludes, "exclude", "E", nil, "Comma-separated patterns to exclude (e.g. .md,fp16)")
cmd.Flags().BoolVar(&job.AppendFilterSubdir, "append-filter-subdir", false, "Append each filter as a subdirectory")
// Settings flags
cmd.Flags().StringVarP(&cfg.OutputDir, "output", "o", "", "Destination base directory (default: Models or Datasets)")
cmd.Flags().IntVarP(&cfg.Concurrency, "connections", "c", 8, "Per-file concurrent connections for LFS range requests")
cmd.Flags().IntVar(&cfg.MaxActiveDownloads, "max-active", 3, "Maximum number of files downloading at once")
cmd.Flags().StringVar(&cfg.MultipartThreshold, "multipart-threshold", "32MiB", "Use multipart/range downloads only for files >= this size")
cmd.Flags().StringVar(&cfg.Verify, "verify", "size", "Verification for non-LFS files: none|size|etag|sha256")
cmd.Flags().IntVar(&cfg.Retries, "retries", 4, "Max retry attempts per HTTP request/part")
cmd.Flags().StringVar(&cfg.BackoffInitial, "backoff-initial", "400ms", "Initial retry backoff duration")
cmd.Flags().StringVar(&cfg.BackoffMax, "backoff-max", "10s", "Maximum retry backoff duration")
cmd.Flags().StringVar(&cfg.Endpoint, "endpoint", "", "Custom HuggingFace endpoint URL (e.g. https://hf-mirror.com)")
// CLI-only flags
cmd.Flags().BoolVar(&dryRun, "dry-run", false, "Plan only: print the file list and exit")
cmd.Flags().StringVar(&planFmt, "plan-format", "table", "Plan output format for --dry-run: table|json")
return cmd
}
func signalContext(parent context.Context) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(parent)
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
go func() {
select {
case <-ch:
cancel()
case <-ctx.Done():
}
}()
return ctx, cancel
}
func finalize(cmd *cobra.Command, ro *RootOpts, args []string, job *hfdownloader.Job, cfg *hfdownloader.Settings) (hfdownloader.Job, hfdownloader.Settings, error) {
j := *job
c := *cfg
// Token
tok := strings.TrimSpace(ro.Token)
if tok == "" {
tok = strings.TrimSpace(os.Getenv("HF_TOKEN"))
}
c.Token = tok
// Repo from args
if j.Repo == "" && len(args) > 0 {
j.Repo = args[0]
}
// Parse filters from repo:filter syntax
if strings.Contains(j.Repo, ":") && len(j.Filters) == 0 {
parts := strings.SplitN(j.Repo, ":", 2)
j.Repo = parts[0]
if strings.TrimSpace(parts[1]) != "" {
j.Filters = splitComma(parts[1])
}
}
if j.Repo == "" {
return j, c, fmt.Errorf("missing REPO (owner/name). Pass as positional arg or --repo")
}
if !hfdownloader.IsValidModelName(j.Repo) {
return j, c, fmt.Errorf("invalid repo id %q (expected owner/name)", j.Repo)
}
// Set default output directory based on type (Models or Datasets)
// Only if user didn't explicitly set --output
if c.OutputDir == "" {
if j.IsDataset {
c.OutputDir = "Datasets"
} else {
c.OutputDir = "Models"
}
}
return j, c, nil
}
func applySettingsDefaults(cmd *cobra.Command, ro *RootOpts, dst *hfdownloader.Settings) error {
path := ro.Config
if path == "" {
home, _ := os.UserHomeDir()
// Try JSON first, then YAML
jsonPath := filepath.Join(home, ".config", "hfdownloader.json")
yamlPath := filepath.Join(home, ".config", "hfdownloader.yaml")
ymlPath := filepath.Join(home, ".config", "hfdownloader.yml")
if _, err := os.Stat(jsonPath); err == nil {
path = jsonPath
} else if _, err := os.Stat(yamlPath); err == nil {
path = yamlPath
} else if _, err := os.Stat(ymlPath); err == nil {
path = ymlPath
}
}
if path == "" {
return nil
}
b, err := os.ReadFile(path)
if err != nil {
return err
}
var cfg map[string]any
// Parse based on file extension
ext := strings.ToLower(filepath.Ext(path))
switch ext {
case ".yaml", ".yml":
if err := yaml.Unmarshal(b, &cfg); err != nil {
return fmt.Errorf("invalid YAML config file: %w", err)
}
default: // .json or unknown
if err := json.Unmarshal(b, &cfg); err != nil {
return fmt.Errorf("invalid JSON config file: %w", err)
}
}
setStr := func(flagName string, set func(string)) {
if cmd.Flags().Changed(flagName) {
return
}
if v, ok := cfg[flagName]; ok && v != nil {
set(fmt.Sprint(v))
}
}
setInt := func(flagName string, set func(int)) {
if cmd.Flags().Changed(flagName) {
return
}
if v, ok := cfg[flagName]; ok && v != nil {
var x int
fmt.Sscan(fmt.Sprint(v), &x)
set(x)
}
}
// Note: We don't load "output" from config - it's now dynamic based on model/dataset type
// setStr("output", func(v string) { dst.OutputDir = v })
setInt("connections", func(v int) { dst.Concurrency = v })
setInt("max-active", func(v int) { dst.MaxActiveDownloads = v })
setStr("multipart-threshold", func(v string) { dst.MultipartThreshold = v })
setStr("verify", func(v string) { dst.Verify = v })
setInt("retries", func(v int) { dst.Retries = v })
setStr("backoff-initial", func(v string) { dst.BackoffInitial = v })
setStr("backoff-max", func(v string) { dst.BackoffMax = v })
setStr("endpoint", func(v string) { dst.Endpoint = v })
if !cmd.Flags().Changed("token") && os.Getenv("HF_TOKEN") == "" {
if v, ok := cfg["token"]; ok && v != nil {
ro.Token = fmt.Sprint(v)
}
}
return nil
}
func splitComma(s string) []string {
if s == "" {
return nil
}
parts := strings.Split(s, ",")
out := make([]string, 0, len(parts))
for _, p := range parts {
p = strings.TrimSpace(p)
if p != "" {
out = append(out, p)
}
}
return out
}
// cliProgress returns a simple text-based progress handler.
func cliProgress(ro *RootOpts, job hfdownloader.Job) hfdownloader.ProgressFunc {
return func(ev hfdownloader.ProgressEvent) {
rev := job.Revision
if rev == "" {
rev = "main"
}
switch ev.Event {
case "scan_start":
fmt.Printf("Scanning %s@%s ...\n", job.Repo, rev)
case "retry":
fmt.Printf("retry %s (attempt %d): %s\n", ev.Path, ev.Attempt, ev.Message)
case "file_start":
fmt.Printf("downloading: %s (%d bytes)\n", ev.Path, ev.Total)
case "file_done":
if strings.HasPrefix(ev.Message, "skip") {
fmt.Printf("skip: %s %s\n", ev.Path, ev.Message)
} else {
fmt.Printf("done: %s\n", ev.Path)
}
case "error":
fmt.Fprintf(os.Stderr, "error: %s\n", ev.Message)
case "done":
fmt.Println(ev.Message)
}
}
}
// jsonProgress returns a JSON-lines progress handler.
func jsonProgress(w io.Writer) hfdownloader.ProgressFunc {
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
var mu sync.Mutex
return func(ev hfdownloader.ProgressEvent) {
mu.Lock()
_ = enc.Encode(ev)
mu.Unlock()
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/cli/config.go | internal/cli/config.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
)
// DefaultConfig returns the default configuration.
func DefaultConfig() map[string]any {
return map[string]any{
"output": "Storage",
"connections": 8,
"max-active": 3,
"multipart-threshold": "32MiB",
"verify": "size",
"retries": 4,
"backoff-initial": "400ms",
"backoff-max": "10s",
"token": "",
}
}
func newConfigCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "config",
Short: "Manage configuration",
}
cmd.AddCommand(newConfigInitCmd())
cmd.AddCommand(newConfigShowCmd())
cmd.AddCommand(newConfigPathCmd())
return cmd
}
func newConfigInitCmd() *cobra.Command {
var (
force bool
useYAML bool
)
cmd := &cobra.Command{
Use: "init",
Short: "Create a default configuration file",
Long: `Creates a default configuration file at ~/.config/hfdownloader.json (or .yaml)
The configuration file sets default values for all command flags.
CLI flags always override config file values.`,
RunE: func(cmd *cobra.Command, args []string) error {
home, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("could not find home directory: %w", err)
}
configDir := filepath.Join(home, ".config")
ext := ".json"
if useYAML {
ext = ".yaml"
}
configPath := filepath.Join(configDir, "hfdownloader"+ext)
// Check if file exists
if _, err := os.Stat(configPath); err == nil && !force {
return fmt.Errorf("config file already exists: %s\nUse --force to overwrite", configPath)
}
// Create config directory if needed
if err := os.MkdirAll(configDir, 0o755); err != nil {
return fmt.Errorf("could not create config directory: %w", err)
}
// Write config
cfg := DefaultConfig()
var data []byte
if useYAML {
data, err = yaml.Marshal(cfg)
} else {
data, err = json.MarshalIndent(cfg, "", " ")
}
if err != nil {
return err
}
if err := os.WriteFile(configPath, data, 0o644); err != nil {
return fmt.Errorf("could not write config file: %w", err)
}
fmt.Printf("✓ Created config file: %s\n", configPath)
fmt.Println()
fmt.Println("Edit this file to set your defaults. For example:")
fmt.Println(" - Set your HuggingFace token")
fmt.Println(" - Change default output directory")
fmt.Println(" - Adjust connection settings")
return nil
},
}
cmd.Flags().BoolVarP(&force, "force", "f", false, "Overwrite existing config file")
cmd.Flags().BoolVar(&useYAML, "yaml", false, "Create YAML config instead of JSON")
return cmd
}
func newConfigShowCmd() *cobra.Command {
return &cobra.Command{
Use: "show",
Short: "Show current configuration",
RunE: func(cmd *cobra.Command, args []string) error {
home, _ := os.UserHomeDir()
configPath := filepath.Join(home, ".config", "hfdownloader.json")
if _, err := os.Stat(configPath); err != nil {
fmt.Println("No config file found.")
fmt.Printf("Run 'hfdownloader config init' to create one at:\n %s\n", configPath)
return nil
}
data, err := os.ReadFile(configPath)
if err != nil {
return err
}
fmt.Printf("Config file: %s\n\n", configPath)
fmt.Println(string(data))
return nil
},
}
}
func newConfigPathCmd() *cobra.Command {
return &cobra.Command{
Use: "path",
Short: "Print the config file path",
Run: func(cmd *cobra.Command, args []string) {
home, _ := os.UserHomeDir()
configPath := filepath.Join(home, ".config", "hfdownloader.json")
fmt.Println(configPath)
},
}
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/cli/serve.go | internal/cli/serve.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"context"
"fmt"
"os"
"os/signal"
"strings"
"syscall"
"github.com/spf13/cobra"
"github.com/bodaay/HuggingFaceModelDownloader/internal/server"
)
func newServeCmd(ro *RootOpts) *cobra.Command {
var (
addr string
port int
modelsDir string
datasetsDir string
conns int
active int
multipartThreshold string
verify string
retries int
endpoint string
)
cmd := &cobra.Command{
Use: "serve",
Short: "Start HTTP server for web-based downloads",
Long: `Start an HTTP server that provides:
- REST API for download management
- WebSocket for live progress updates
- Web UI for browser-based downloads
Output paths are configured server-side only (not via API) for security.
Example:
hfdownloader serve
hfdownloader serve --port 3000
hfdownloader serve --models-dir ./Models --datasets-dir ./Datasets
hfdownloader serve --endpoint https://hf-mirror.com`,
RunE: func(cmd *cobra.Command, args []string) error {
// Build server config
cfg := server.Config{
Addr: addr,
Port: port,
ModelsDir: modelsDir,
DatasetsDir: datasetsDir,
Concurrency: conns,
MaxActive: active,
MultipartThreshold: multipartThreshold,
Verify: verify,
Retries: retries,
Endpoint: endpoint,
}
// Get token from flag or env
token := strings.TrimSpace(ro.Token)
if token == "" {
token = strings.TrimSpace(os.Getenv("HF_TOKEN"))
}
cfg.Token = token
// Create and start server
srv := server.New(cfg)
// Handle shutdown signals
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer cancel()
fmt.Println()
fmt.Println("╭────────────────────────────────────────────────────────────╮")
fmt.Println("│ 🤗 HuggingFace Downloader │")
fmt.Println("│ Web Server Mode │")
fmt.Println("╰────────────────────────────────────────────────────────────╯")
fmt.Println()
return srv.ListenAndServe(ctx)
},
}
cmd.Flags().StringVar(&addr, "addr", "0.0.0.0", "Address to bind to")
cmd.Flags().IntVarP(&port, "port", "p", 8080, "Port to listen on")
cmd.Flags().StringVar(&modelsDir, "models-dir", "./Models", "Output directory for models")
cmd.Flags().StringVar(&datasetsDir, "datasets-dir", "./Datasets", "Output directory for datasets")
cmd.Flags().IntVarP(&conns, "connections", "c", 8, "Connections per file")
cmd.Flags().IntVar(&active, "max-active", 3, "Max concurrent file downloads")
cmd.Flags().StringVar(&multipartThreshold, "multipart-threshold", "32MiB", "Use multipart for files >= this size")
cmd.Flags().StringVar(&verify, "verify", "size", "Verification mode: none|size|sha256")
cmd.Flags().IntVar(&retries, "retries", 4, "Max retry attempts per HTTP request")
cmd.Flags().StringVar(&endpoint, "endpoint", "", "Custom HuggingFace endpoint URL (e.g., https://hf-mirror.com)")
return cmd
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
bodaay/HuggingFaceModelDownloader | https://github.com/bodaay/HuggingFaceModelDownloader/blob/fd209ac449f0237f98b89e97915b9c250ff70c46/internal/cli/version.go | internal/cli/version.go | // Copyright 2025
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"fmt"
"runtime"
"runtime/debug"
"github.com/spf13/cobra"
)
// BuildInfo holds version and build information.
type BuildInfo struct {
Version string
GoVersion string
OS string
Arch string
Commit string
BuildTime string
}
// GetBuildInfo returns the current build information.
func GetBuildInfo(version string) BuildInfo {
info := BuildInfo{
Version: version,
GoVersion: runtime.Version(),
OS: runtime.GOOS,
Arch: runtime.GOARCH,
Commit: "unknown",
BuildTime: "unknown",
}
// Try to get VCS info from debug.BuildInfo
if bi, ok := debug.ReadBuildInfo(); ok {
for _, setting := range bi.Settings {
switch setting.Key {
case "vcs.revision":
if len(setting.Value) >= 7 {
info.Commit = setting.Value[:7]
} else {
info.Commit = setting.Value
}
case "vcs.time":
info.BuildTime = setting.Value
}
}
}
return info
}
func newVersionCmd(version string) *cobra.Command {
var short bool
cmd := &cobra.Command{
Use: "version",
Short: "Show version and build information",
Run: func(cmd *cobra.Command, args []string) {
info := GetBuildInfo(version)
if short {
fmt.Println(info.Version)
return
}
fmt.Printf("hfdownloader %s\n", info.Version)
fmt.Printf(" Go: %s\n", info.GoVersion)
fmt.Printf(" OS/Arch: %s/%s\n", info.OS, info.Arch)
fmt.Printf(" Commit: %s\n", info.Commit)
fmt.Printf(" Built: %s\n", info.BuildTime)
},
}
cmd.Flags().BoolVarP(&short, "short", "s", false, "Print only the version number")
return cmd
}
| go | Apache-2.0 | fd209ac449f0237f98b89e97915b9c250ff70c46 | 2026-01-07T10:05:21.869383Z | false |
gocardless/logjam | https://github.com/gocardless/logjam/blob/8831e6e12043885c7f8181a8e092aa5f26497194/entry.go | entry.go | package main
import (
"encoding/json"
"os"
)
var Hostname string = "unknown"
type Entry struct {
Host string `json:"host"`
Message string `json:"message"`
}
func (e *Entry) ToJSON() []byte {
e.Host = Hostname
dump, _ := json.Marshal(e)
return dump
}
func init() {
Hostname, _ = os.Hostname()
}
| go | MIT | 8831e6e12043885c7f8181a8e092aa5f26497194 | 2026-01-07T10:05:21.970057Z | false |
gocardless/logjam | https://github.com/gocardless/logjam/blob/8831e6e12043885c7f8181a8e092aa5f26497194/receiver.go | receiver.go | package main
import (
"bufio"
"log"
"net"
"os"
"regexp"
"github.com/ActiveState/tail"
)
var (
ValidJSON = regexp.MustCompile("\\{.*\\}")
)
type Receiver struct {
Host string // listen address
Port int // listen port
messages chan []byte // incomming messages
}
// create a new receiver server
func NewReceiver(host string, port, bufferSize int) Receiver {
return Receiver{
Host: host,
Port: port,
messages: make(chan []byte, bufferSize),
}
}
func (r *Receiver) ListenAndServe() error {
addr := net.UDPAddr{Port: r.Port, IP: net.ParseIP(r.Host)}
conn, err := net.ListenUDP("udp", &addr)
if err != nil {
return err
}
defer conn.Close()
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
b := scanner.Bytes()
if !ValidJSON.Match(b) {
log.Printf("Receiver: Error: Invalid Message\n")
continue
}
r.messages <- b
}
if err = scanner.Err(); err != nil {
return err
}
return nil
}
// tail a file on disk
func (r *Receiver) TailFile(path string) (*tail.Tail, error) {
t, err := tail.TailFile(path, tail.Config{Follow: true, ReOpen: true})
return t, err
}
// listen for tail events as if they were entries on the network socket
func (r *Receiver) ListenToTail(t *tail.Tail) {
for line := range t.Lines {
m := Entry{Message: line.Text}
r.messages <- m.ToJSON()
}
}
// write entries on messages channel to filename
func (r *Receiver) WriteToFile(filename string) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0644)
if err != nil {
log.Printf("Writer: Error: %s\n", err)
return
}
defer file.Close()
for {
file.Write(<-r.messages)
file.Write([]byte("\n"))
}
}
| go | MIT | 8831e6e12043885c7f8181a8e092aa5f26497194 | 2026-01-07T10:05:21.970057Z | false |
gocardless/logjam | https://github.com/gocardless/logjam/blob/8831e6e12043885c7f8181a8e092aa5f26497194/config.go | config.go | package main
import (
"encoding/json"
"io/ioutil"
)
type Config struct {
Bind string `json:"bind"` // interface to bind to (0.0.0.0 for all)
Port int `json:"port"` // listen port
Server string `json:"server"` // remote server to publish logs to
DiskBufferPath string `json:"buffer"` // path for disk buffer
BufferSize int `json:"buffer_size"` // queue length of memory bufer
TruncatePeriod int `json:"truncate"` // cleanup buffer every x seconds
Files []string `json:"files"` // files to include in publish
}
// read config file
func ReadConfigFile(path string) (*Config, error) {
file, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var cfg Config
err = json.Unmarshal(file, &cfg)
return &cfg, err
}
| go | MIT | 8831e6e12043885c7f8181a8e092aa5f26497194 | 2026-01-07T10:05:21.970057Z | false |
gocardless/logjam | https://github.com/gocardless/logjam/blob/8831e6e12043885c7f8181a8e092aa5f26497194/shipper_test.go | shipper_test.go | package main
import (
"bytes"
"net"
"testing"
)
type StubConn struct {
net.Conn
buffer *bytes.Buffer
}
func (sc StubConn) Write(p []byte) (int, error) {
return sc.buffer.Write(p)
}
func TestWriteWithBackoff(t *testing.T) {
conn := StubConn{buffer: new(bytes.Buffer)}
s := Shipper{conn}
s.WriteWithBackoff([]byte("hello"), 125)
if bytes.Compare(conn.buffer.Bytes(), []byte("hello")) != 0 {
t.Fatal("Write Mismatch")
}
}
| go | MIT | 8831e6e12043885c7f8181a8e092aa5f26497194 | 2026-01-07T10:05:21.970057Z | false |
gocardless/logjam | https://github.com/gocardless/logjam/blob/8831e6e12043885c7f8181a8e092aa5f26497194/main.go | main.go | package main
import (
"flag"
"log"
"time"
)
var (
ConfigPath = flag.String("config", "/etc/log-stream.json", "Config File Path")
)
func main() {
flag.Parse()
config, err := ReadConfigFile(*ConfigPath)
if err != nil {
log.Fatalf("Config: Error: %s\n", err)
}
r := NewReceiver(config.Bind, config.Port, config.BufferSize)
s, err := NewShipper("udp", config.Server)
if err != nil {
log.Fatalf("Shipper: Error: %s\n", err)
}
log.Printf("Shipper: Connected: %s\n", config.Server)
go r.WriteToFile(config.DiskBufferPath)
go s.Ship(config.DiskBufferPath)
go s.TruncateEvery(config.DiskBufferPath, time.Duration(config.TruncatePeriod)*time.Second)
// Ship Files
for _, path := range config.Files {
t, err := r.TailFile(path)
if err != nil {
log.Fatalf("Tail: Error: %s\n", err)
}
go r.ListenToTail(t)
}
// Ship Socket
err = r.ListenAndServe()
if err != nil {
log.Fatalf("Receiver: Error: %s\n", err)
}
}
| go | MIT | 8831e6e12043885c7f8181a8e092aa5f26497194 | 2026-01-07T10:05:21.970057Z | false |
gocardless/logjam | https://github.com/gocardless/logjam/blob/8831e6e12043885c7f8181a8e092aa5f26497194/shipper.go | shipper.go | package main
import (
"log"
"net"
"os"
"time"
"github.com/ActiveState/tail"
)
type Shipper struct {
net.Conn
}
// create a new shipper client
func NewShipper(proto string, addr string) (*Shipper, error) {
conn, err := net.Dial(proto, addr)
if err != nil {
return nil, err
}
return &Shipper{conn}, nil
}
// write to socket with exponential backoff in milliseconds
func (s *Shipper) WriteWithBackoff(p []byte, initial int) {
var timeout time.Duration = time.Duration(initial) * time.Millisecond
for {
_, err := s.Write(p)
if err != nil {
timeout = timeout * 2
time.Sleep(timeout)
continue
}
return
}
}
// ship entries to remote log server
func (s *Shipper) Ship(filename string) {
t, err := tail.TailFile(filename, tail.Config{Follow: true, ReOpen: true})
if err != nil {
log.Printf("Shipper: Error: %s\n", err)
return
}
for line := range t.Lines {
s.WriteWithBackoff([]byte(line.Text), 125)
}
}
// truncate a file every period
func (s *Shipper) TruncateEvery(filename string, period time.Duration) {
for {
time.Sleep(period)
file, err := os.OpenFile(filename, os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
log.Printf("Shipper: Truncate: Error: %s\n", err)
continue
}
file.Close()
log.Printf("Shipper: Truncate: %s\n", filename)
}
}
| go | MIT | 8831e6e12043885c7f8181a8e092aa5f26497194 | 2026-01-07T10:05:21.970057Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/interface.go | pkg/infra/interface.go | package infra
import (
"time"
"github.com/hyperledger-twgc/tape/internal/fabric/protoutil"
"github.com/hyperledger/fabric-protos-go-apiv2/common"
)
const (
FULLPROCESS = 6
TRAFFIC = 7
OBSERVER = 0
ENDORSEMENT = 4
COMMIT = 3
PROPOSALFILTER = 4
COMMITFILTER = 3
QUERYFILTER = 2
)
/*
to do for #127 SM crypto
just need to do an impl for this interface and replace
and impl a function for func (c Config) LoadCrypto() (*CryptoImpl, error) {
as generator
*/
type Crypto interface {
protoutil.Signer
NewSignatureHeader() (*common.SignatureHeader, error)
/*Serialize() ([]byte, error)
Sign(message []byte) ([]byte, error)*/
}
/*
as Tape major as Producer and Consumer pattern
define an interface here as Worker with start here
as for #56 and #174,in cli imp adjust sequence of P&C impl to control workflow.
*/
type Worker interface {
Start()
}
type ObserverWorker interface {
Worker
GetTime() time.Time
}
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/infra_suite_test.go | pkg/infra/infra_suite_test.go | package infra_test
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestInfra(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Infra Suite")
}
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/observer_test.go | pkg/infra/observer/observer_test.go | package observer_test
import (
"context"
"os"
"sync"
"time"
"github.com/hyperledger-twgc/tape/e2e"
"github.com/hyperledger-twgc/tape/e2e/mock"
"github.com/hyperledger-twgc/tape/pkg/infra/basic"
"github.com/hyperledger-twgc/tape/pkg/infra/observer"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
)
var _ = Describe("Observer", func() {
var (
tmpDir string
logger *log.Logger
PolicyFile, mtlsCertFile, mtlsKeyFile *os.File
)
type key string
const start key = "start"
BeforeEach(func() {
logger = log.New()
var err error
tmpDir, err = os.MkdirTemp("", "tape-")
Expect(err).NotTo(HaveOccurred())
mtlsCertFile, err = os.CreateTemp(tmpDir, "mtls-*.crt")
Expect(err).NotTo(HaveOccurred())
mtlsKeyFile, err = os.CreateTemp(tmpDir, "mtls-*.key")
Expect(err).NotTo(HaveOccurred())
PolicyFile, err = os.CreateTemp(tmpDir, "policy")
Expect(err).NotTo(HaveOccurred())
err = e2e.GenerateCertAndKeys(mtlsKeyFile, mtlsCertFile)
Expect(err).NotTo(HaveOccurred())
err = e2e.GeneratePolicy(PolicyFile)
Expect(err).NotTo(HaveOccurred())
PolicyFile.Close()
mtlsCertFile.Close()
mtlsKeyFile.Close()
})
AfterEach(func() {
os.RemoveAll(tmpDir)
})
It("It should work with mock", func() {
txC := make(chan struct{}, mock.MockTxSize)
mpeer, err := mock.NewPeer(txC, nil)
Expect(err).NotTo(HaveOccurred())
go mpeer.Start()
defer mpeer.Stop()
configFile, err := os.CreateTemp(tmpDir, "config*.yaml")
Expect(err).NotTo(HaveOccurred())
paddrs := make([]string, 0)
paddrs = append(paddrs, mpeer.Addrs())
configValue := e2e.Values{
PrivSk: mtlsKeyFile.Name(),
SignCert: mtlsCertFile.Name(),
Mtls: false,
PeersAddrs: paddrs,
OrdererAddr: "",
CommitThreshold: 1,
PolicyFile: PolicyFile.Name(),
}
e2e.GenerateConfigFile(configFile.Name(), configValue)
config, err := basic.LoadConfig(configFile.Name())
Expect(err).NotTo(HaveOccurred())
crypto, err := config.LoadCrypto()
Expect(err).NotTo(HaveOccurred())
ctx, cancel := context.WithCancel(context.Background())
ctx = context.WithValue(ctx, start, time.Now())
defer cancel()
errorCh := make(chan error, 10)
blockCh := make(chan *observer.AddressedBlock)
observers, err := observer.CreateObservers(ctx, crypto, errorCh, blockCh, config, logger)
Expect(err).NotTo(HaveOccurred())
finishCh := make(chan struct{})
var once sync.Once
blockCollector, err := observer.NewBlockCollector(config.CommitThreshold, len(config.Committers), ctx, blockCh, finishCh, mock.MockTxSize, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go blockCollector.Start()
go observers.Start()
go func() {
for i := 0; i < mock.MockTxSize; i++ {
txC <- struct{}{}
}
}()
Eventually(finishCh).Should(BeClosed())
completed := time.Now()
Expect(ctx.Value(start).(time.Time).Sub(completed)).Should(BeNumerically("<", 0.002), "observer with mock shouldn't take too long.")
})
It("It should work as 2 committed of 3 peers", func() {
TotalPeers := 3
CommitThreshold := 2
paddrs := make([]string, 0)
txCs := make([]chan struct{}, 0)
var mpeers []*mock.Peer
for i := 0; i < TotalPeers; i++ {
txC := make(chan struct{}, mock.MockTxSize)
mpeer, err := mock.NewPeer(txC, nil)
Expect(err).NotTo(HaveOccurred())
go mpeer.Start()
defer mpeer.Stop()
paddrs = append(paddrs, mpeer.Addrs())
mpeers = append(mpeers, mpeer)
txCs = append(txCs, txC)
}
configFile, err := os.CreateTemp(tmpDir, "config*.yaml")
Expect(err).NotTo(HaveOccurred())
configValue := e2e.Values{
PrivSk: mtlsKeyFile.Name(),
SignCert: mtlsCertFile.Name(),
Mtls: false,
PeersAddrs: paddrs,
OrdererAddr: "",
CommitThreshold: CommitThreshold,
PolicyFile: PolicyFile.Name(),
}
e2e.GenerateConfigFile(configFile.Name(), configValue)
config, err := basic.LoadConfig(configFile.Name())
Expect(err).NotTo(HaveOccurred())
crypto, err := config.LoadCrypto()
Expect(err).NotTo(HaveOccurred())
ctx, cancel := context.WithCancel(context.Background())
ctx = context.WithValue(ctx, start, time.Now())
defer cancel()
blockCh := make(chan *observer.AddressedBlock)
errorCh := make(chan error, 10)
observers, err := observer.CreateObservers(ctx, crypto, errorCh, blockCh, config, logger)
Expect(err).NotTo(HaveOccurred())
finishCh := make(chan struct{})
var once sync.Once
blockCollector, err := observer.NewBlockCollector(config.CommitThreshold, len(config.Committers), ctx, blockCh, finishCh, mock.MockTxSize, true, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go blockCollector.Start()
go observers.Start()
for i := 0; i < TotalPeers; i++ {
go func(k int) {
for j := 0; j < mock.MockTxSize; j++ {
txCs[k] <- struct{}{}
}
}(i)
}
for i := 0; i < CommitThreshold; i++ {
mpeers[i].Pause()
}
Consistently(finishCh).ShouldNot(Receive())
for i := 0; i < CommitThreshold; i++ {
mpeers[i].Unpause()
}
Eventually(finishCh).Should(BeClosed())
})
})
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/observer.go | pkg/infra/observer/observer.go | package observer
import (
"context"
"time"
"github.com/hyperledger-twgc/tape/pkg/infra"
"github.com/hyperledger-twgc/tape/pkg/infra/basic"
"github.com/hyperledger-twgc/tape/pkg/infra/trafficGenerator"
"github.com/hyperledger/fabric-protos-go-apiv2/peer"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
type Observers struct {
workers []*Observer
errorCh chan error
blockCh chan *AddressedBlock
ctx context.Context
}
type Observer struct {
index int
Address string
d peer.Deliver_DeliverFilteredClient
logger *log.Logger
}
type key string
const start key = "start"
func CreateObservers(ctx context.Context, crypto infra.Crypto, errorCh chan error, blockCh chan *AddressedBlock, config basic.Config, logger *log.Logger) (*Observers, error) {
var workers []*Observer
for i, node := range config.Committers {
worker, err := CreateObserver(ctx, config.Channel, node, crypto, logger)
if err != nil {
return nil, err
}
worker.index = i
workers = append(workers, worker)
}
return &Observers{
workers: workers,
errorCh: errorCh,
blockCh: blockCh,
ctx: ctx,
}, nil
}
func (o *Observers) Start() {
//o.StartTime = time.Now()
o.ctx = context.WithValue(o.ctx, start, time.Now())
for i := 0; i < len(o.workers); i++ {
go o.workers[i].Start(o.errorCh, o.blockCh, o.ctx.Value(start).(time.Time))
}
}
func (o *Observers) GetTime() time.Time {
return o.ctx.Value(start).(time.Time)
}
func CreateObserver(ctx context.Context, channel string, node basic.Node, crypto infra.Crypto, logger *log.Logger) (*Observer, error) {
seek, err := trafficGenerator.CreateSignedDeliverNewestEnv(channel, crypto)
if err != nil {
return nil, err
}
deliverer, err := basic.CreateDeliverFilteredClient(ctx, node, logger)
if err != nil {
return nil, err
}
if err = deliverer.Send(seek); err != nil {
return nil, err
}
// drain first response
if _, err = deliverer.Recv(); err != nil {
return nil, err
}
return &Observer{Address: node.Addr, d: deliverer, logger: logger}, nil
}
func (o *Observer) Start(errorCh chan error, blockCh chan<- *AddressedBlock, now time.Time) {
o.logger.Debugf("start observer for peer %s", o.Address)
for {
r, err := o.d.Recv()
if err != nil {
errorCh <- err
}
if r == nil {
errorCh <- errors.Errorf("received nil message, but expect a valid block instead. You could look into your peer logs for more info")
return
}
fb := r.Type.(*peer.DeliverResponse_FilteredBlock)
for _, b := range fb.FilteredBlock.FilteredTransactions {
basic.LogEvent(o.logger, b.Txid, "CommitAtPeer")
tapeSpan := basic.GetGlobalSpan()
tapeSpan.FinishWithMap(b.Txid, o.Address, basic.COMMIT_AT_PEER)
}
o.logger.Debugf("receivedTime %8.2fs\tBlock %6d\tTx %6d\t Address %s\n", time.Since(now).Seconds(), fb.FilteredBlock.Number, len(fb.FilteredBlock.FilteredTransactions), o.Address)
blockCh <- &AddressedBlock{fb.FilteredBlock, o.index, time.Since(now)}
}
}
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/commitObserver.go | pkg/infra/observer/commitObserver.go | package observer
import (
"fmt"
"math"
"sync"
"time"
"github.com/opentracing/opentracing-go"
"github.com/hyperledger-twgc/tape/internal/fabric/protoutil"
"github.com/hyperledger-twgc/tape/pkg/infra"
"github.com/hyperledger-twgc/tape/pkg/infra/basic"
"github.com/hyperledger/fabric-protos-go-apiv2/common"
"github.com/hyperledger/fabric-protos-go-apiv2/orderer"
log "github.com/sirupsen/logrus"
)
type CommitObserver struct {
d orderer.AtomicBroadcast_DeliverClient
n int
logger *log.Logger
Now time.Time
errorCh chan error
finishCh chan struct{}
once *sync.Once
addresses []string
finishflag bool
}
func CreateCommitObserver(
channel string,
node basic.Node,
crypto *basic.CryptoImpl,
logger *log.Logger,
n int,
config basic.Config,
errorCh chan error,
finishCh chan struct{},
once *sync.Once,
finishflag bool) (*CommitObserver, error) {
if len(node.Addr) == 0 {
return nil, nil
}
deliverer, err := basic.CreateDeliverClient(node)
if err != nil {
return nil, err
}
seek, err := CreateSignedDeliverNewestEnv(channel, crypto)
if err != nil {
return nil, err
}
if err = deliverer.Send(seek); err != nil {
return nil, err
}
// drain first response
_, err = deliverer.Recv()
if err != nil {
return nil, err
}
addresses := make([]string, 0)
for _, v := range config.Committers {
addresses = append(addresses, v.Addr)
}
return &CommitObserver{d: deliverer,
n: n,
logger: logger,
errorCh: errorCh,
finishCh: finishCh,
addresses: addresses,
once: once,
finishflag: finishflag,
}, nil
}
func (o *CommitObserver) Start() {
o.Now = time.Now()
o.logger.Debugf("start observer for orderer")
n := 0
for {
r, err := o.d.Recv()
if err != nil {
o.errorCh <- err
}
if r == nil {
panic("Received nil message, but expect a valid block instead. You could look into your peer logs for more info")
}
block := r.GetBlock()
tx := len(block.Data.Data)
n += tx
fmt.Printf("From Orderer Time %8.2fs\tBlock %6d\t Tx %6d\n", time.Since(o.Now).Seconds(), block.Header.Number, tx)
for _, data := range block.Data.Data {
txID := ""
env, err := protoutil.GetEnvelopeFromBlock(data)
if err != nil {
continue
}
payload, err := protoutil.UnmarshalPayload(env.Payload)
if err != nil {
continue
}
chdr, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader)
if err != nil {
continue
}
if common.HeaderType(chdr.Type) == common.HeaderType_ENDORSER_TRANSACTION {
txID = chdr.TxId
}
if txID != "" {
tapeSpan := basic.GetGlobalSpan()
tapeSpan.FinishWithMap(txID, "", basic.CONSESUS)
var span opentracing.Span
if basic.GetMod() == infra.FULLPROCESS {
Global_Span := tapeSpan.GetSpan(txID, "", basic.TRANSACTION)
span = tapeSpan.SpanIntoMap(txID, "", basic.COMMIT_AT_ALL_PEERS, Global_Span)
} else {
span = tapeSpan.SpanIntoMap(txID, "", basic.COMMIT_AT_ALL_PEERS, nil)
}
tapeSpan.SpanIntoMap(txID, "", basic.COMMIT_AT_NETWORK, span)
if basic.GetMod() != infra.COMMIT {
for _, v := range o.addresses {
tapeSpan.SpanIntoMap(txID, v, basic.COMMIT_AT_PEER, span)
}
}
basic.LogEvent(o.logger, txID, "BlockFromOrderer")
}
}
if o.n > 0 && o.finishflag {
if n >= o.n {
// consider with multiple threads need close this channel, need a once here to avoid channel been closed in multiple times
o.once.Do(func() {
close(o.finishCh)
})
return
}
}
}
}
func (o *CommitObserver) GetTime() time.Time {
return o.Now
}
func CreateSignedDeliverNewestEnv(ch string, signer *basic.CryptoImpl) (*common.Envelope, error) {
start := &orderer.SeekPosition{
Type: &orderer.SeekPosition_Newest{
Newest: &orderer.SeekNewest{},
},
}
stop := &orderer.SeekPosition{
Type: &orderer.SeekPosition_Specified{
Specified: &orderer.SeekSpecified{
Number: math.MaxUint64,
},
},
}
seekInfo := &orderer.SeekInfo{
Start: start,
Stop: stop,
Behavior: orderer.SeekInfo_BLOCK_UNTIL_READY,
}
return protoutil.CreateSignedEnvelope(
common.HeaderType_DELIVER_SEEK_INFO,
ch,
signer,
seekInfo,
0,
0,
)
}
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/block_collector.go | pkg/infra/observer/block_collector.go | package observer
import (
"context"
"fmt"
"sync"
"time"
"github.com/hyperledger-twgc/tape/pkg/infra"
"github.com/hyperledger-twgc/tape/pkg/infra/basic"
"github.com/hyperledger-twgc/tape/pkg/infra/bitmap"
"github.com/hyperledger/fabric-protos-go-apiv2/peer"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// BlockCollector keeps track of committed blocks on multiple peers.
// This is used when a block is considered confirmed only when committed
// on a certain number of peers within network.
type BlockCollector struct {
sync.Mutex
thresholdP, totalP, totalTx int
registry map[uint64]*bitmap.BitMap
ctx context.Context
blockCh chan *AddressedBlock
finishCh chan struct{}
logger *log.Logger
once *sync.Once
printResult bool // controls whether to print block commit message. Tests set this to false to avoid polluting stdout.
finishflag bool
}
// AddressedBlock describe the source of block
type AddressedBlock struct {
*peer.FilteredBlock
Address int // source peer's number
Now time.Duration
}
// NewBlockCollector creates a BlockCollector
func NewBlockCollector(threshold int, totalP int,
ctx context.Context,
blockCh chan *AddressedBlock,
finishCh chan struct{},
totalTx int,
printResult bool,
logger *log.Logger,
once *sync.Once, finishflag bool) (*BlockCollector, error) {
registry := make(map[uint64]*bitmap.BitMap)
if threshold <= 0 || totalP <= 0 {
return nil, errors.New("threshold and total must be greater than zero")
}
if threshold > totalP {
return nil, errors.Errorf("threshold [%d] must be less than or equal to total [%d]", threshold, totalP)
}
return &BlockCollector{
thresholdP: threshold,
totalP: totalP,
totalTx: totalTx,
registry: registry,
ctx: ctx,
blockCh: blockCh,
finishCh: finishCh,
printResult: printResult,
logger: logger,
once: once,
finishflag: finishflag,
}, nil
}
func (bc *BlockCollector) Start() {
for {
select {
case block := <-bc.blockCh:
bc.commit(block)
case <-bc.ctx.Done():
return
}
}
}
// TODO This function contains too many functions and needs further optimization
// commit commits a block to collector.
// If the number of peers on which this block has been committed has satisfied thresholdP,
// adds the number to the totalTx.
func (bc *BlockCollector) commit(block *AddressedBlock) {
breakbynumber := true
if bc.totalTx <= 0 {
breakbynumber = false
}
bitMap, ok := bc.registry[block.Number]
if !ok {
// The block with Number is received for the first time
b, err := bitmap.NewBitMap(bc.totalP)
if err != nil {
panic("Can not make new bitmap for BlockCollector" + err.Error())
}
bc.registry[block.Number] = &b
bitMap = &b
}
// When the block from Address has been received before, return directly.
if bitMap.Has(block.Address) {
return
}
bitMap.Set(block.Address)
cnt := bitMap.Count()
// newly committed block just hits threshold
if cnt == bc.thresholdP {
if bc.printResult {
// todo: logging
// receive tx over threshold
fmt.Printf("Time %8.2fs\tBlock %6d\tTx %6d\t \n", block.Now.Seconds(), block.Number, len(block.FilteredTransactions))
for _, b := range block.FilteredBlock.FilteredTransactions {
basic.LogEvent(bc.logger, b.Txid, "CommitAtPeersOverThreshold")
tapeSpan := basic.GetGlobalSpan()
tapeSpan.FinishWithMap(b.Txid, "", basic.COMMIT_AT_NETWORK)
// if prometheus
// report transaction readlatency with peer in label
basic.GetLatencyMap().TransactionLatency(b.Txid)
}
}
if breakbynumber {
bc.totalTx -= len(block.FilteredTransactions)
if bc.totalTx <= 0 && bc.finishflag {
// consider with multiple threads need close this channel, need a once here to avoid channel been closed in multiple times
bc.once.Do(func() {
close(bc.finishCh)
})
}
}
}
// TODO issue176
if cnt == bc.totalP {
// committed on all peers, remove from registry
// todo: logging
// end of from peers
// end of transaction creation
delete(bc.registry, block.Number)
for _, b := range block.FilteredBlock.FilteredTransactions {
basic.LogEvent(bc.logger, b.Txid, "CommitAtPeers")
tapeSpan := basic.GetGlobalSpan()
tapeSpan.FinishWithMap(b.Txid, "", basic.COMMIT_AT_ALL_PEERS)
if basic.GetMod() == infra.FULLPROCESS {
tapeSpan.FinishWithMap(b.Txid, "", basic.TRANSACTION)
}
}
}
}
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/endorsementObersver.go | pkg/infra/observer/endorsementObersver.go | package observer
import (
"fmt"
"sync"
"time"
"github.com/hyperledger-twgc/tape/pkg/infra/basic"
log "github.com/sirupsen/logrus"
)
type EndorseObserver struct {
Envs chan *basic.TracingEnvelope
n int
logger *log.Logger
Now time.Time
once *sync.Once
finishCh chan struct{}
}
func CreateEndorseObserver(Envs chan *basic.TracingEnvelope, N int, finishCh chan struct{}, once *sync.Once, logger *log.Logger) *EndorseObserver {
return &EndorseObserver{
Envs: Envs,
n: N,
logger: logger,
finishCh: finishCh,
once: once,
}
}
func (o *EndorseObserver) Start() {
o.Now = time.Now()
o.logger.Debugf("start observer for endorsement")
i := 0
for {
e := <-o.Envs
tapeSpan := basic.GetGlobalSpan()
tapeSpan.FinishWithMap(e.TxId, "", basic.TRANSACTIONSTART)
i++
fmt.Printf("Time %8.2fs\tTx %6d Processed\n", time.Since(o.Now).Seconds(), i)
if o.n > 0 {
if o.n == i {
// consider with multiple threads need close this channel, need a once here to avoid channel been closed in multiple times
o.once.Do(func() {
close(o.finishCh)
})
return
}
}
}
}
func (o *EndorseObserver) GetTime() time.Time {
return o.Now
}
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/benchmark_test.go | pkg/infra/observer/benchmark_test.go | //go:build !race
// +build !race
package observer_test
import (
"context"
"sync"
"testing"
"github.com/opentracing/opentracing-go"
"github.com/hyperledger-twgc/tape/e2e/mock"
"github.com/hyperledger-twgc/tape/pkg/infra/basic"
"github.com/hyperledger-twgc/tape/pkg/infra/observer"
"github.com/hyperledger-twgc/tape/pkg/infra/trafficGenerator"
"github.com/google/uuid"
"github.com/hyperledger/fabric-protos-go-apiv2/peer"
log "github.com/sirupsen/logrus"
)
func StartProposer(ctx context.Context, signed, processed chan *basic.Elements, logger *log.Logger, threshold int, addr string) {
peer := basic.Node{
Addr: addr,
}
tr, closer := basic.Init("test")
defer closer.Close()
opentracing.SetGlobalTracer(tr)
rule := `
package tape
default allow = false
allow {
1 == 1
}
`
Proposer, _ := trafficGenerator.CreateProposer(peer, logger, rule)
go Proposer.Start(ctx, signed, processed)
}
func benchmarkNPeer(concurrency int, b *testing.B) {
processed := make(chan *basic.Elements, 10)
signeds := make([]chan *basic.Elements, concurrency)
ctx, cancel := context.WithCancel(context.Background())
logger := log.New()
defer cancel()
for i := 0; i < concurrency; i++ {
signeds[i] = make(chan *basic.Elements, 10)
mockpeer, err := mock.NewServer(1, nil)
if err != nil {
b.Fatal(err)
}
mockpeer.Start()
defer mockpeer.Stop()
StartProposer(ctx, signeds[i], processed, logger, concurrency, mockpeer.PeersAddresses()[0])
}
b.ReportAllocs()
b.ResetTimer()
go func() {
for i := 0; i < b.N; i++ {
uuid, _ := uuid.NewRandom()
span := opentracing.GlobalTracer().StartSpan("start transaction process", opentracing.Tag{Key: "txid", Value: uuid.String()})
ed_span := opentracing.GlobalTracer().StartSpan("endorsement", opentracing.Tag{Key: "txid", Value: uuid.String()})
data := &basic.Elements{SignedProp: &peer.SignedProposal{}, TxId: uuid.String(), Span: span, EndorsementSpan: ed_span}
for _, s := range signeds {
s <- data
}
}
}()
var n int
for n < b.N {
<-processed
n++
}
b.StopTimer()
}
func BenchmarkPeerEndorsement1(b *testing.B) { benchmarkNPeer(1, b) }
func BenchmarkPeerEndorsement2(b *testing.B) { benchmarkNPeer(2, b) }
func BenchmarkPeerEndorsement4(b *testing.B) { benchmarkNPeer(4, b) }
func BenchmarkPeerEndorsement8(b *testing.B) { benchmarkNPeer(8, b) }
func benchmarkAsyncCollector(concurrent int, b *testing.B) {
block := make(chan *observer.AddressedBlock, 100)
done := make(chan struct{})
logger := log.New()
var once sync.Once
instance, _ := observer.NewBlockCollector(concurrent, concurrent, context.Background(), block, done, b.N, false, logger, &once, true)
go instance.Start()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < concurrent; i++ {
go func(idx int) {
for j := 0; j < b.N; j++ {
uuid, _ := uuid.NewRandom()
FilteredTransactions := make([]*peer.FilteredTransaction, 0)
FilteredTransactions = append(FilteredTransactions, &peer.FilteredTransaction{Txid: uuid.String()})
data := &observer.AddressedBlock{Address: idx, FilteredBlock: &peer.FilteredBlock{Number: uint64(j), FilteredTransactions: FilteredTransactions}}
block <- data
}
}(i)
}
<-done
b.StopTimer()
}
func BenchmarkAsyncCollector1(b *testing.B) { benchmarkAsyncCollector(1, b) }
func BenchmarkAsyncCollector2(b *testing.B) { benchmarkAsyncCollector(2, b) }
func BenchmarkAsyncCollector4(b *testing.B) { benchmarkAsyncCollector(4, b) }
func BenchmarkAsyncCollector8(b *testing.B) { benchmarkAsyncCollector(8, b) }
func BenchmarkAsyncCollector16(b *testing.B) { benchmarkAsyncCollector(16, b) }
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/observerFactory.go | pkg/infra/observer/observerFactory.go | package observer
import (
"context"
"sync"
"github.com/hyperledger-twgc/tape/pkg/infra"
"github.com/hyperledger-twgc/tape/pkg/infra/basic"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
type ObserverFactory struct {
config basic.Config
crypto infra.Crypto
blockCh chan *AddressedBlock
logger *log.Logger
ctx context.Context
finishCh chan struct{}
num int
parallel int
envs chan *basic.TracingEnvelope
errorCh chan error
}
func NewObserverFactory(config basic.Config,
crypto infra.Crypto,
blockCh chan *AddressedBlock,
logger *log.Logger,
ctx context.Context,
finishCh chan struct{},
num, parallel int,
envs chan *basic.TracingEnvelope,
errorCh chan error) *ObserverFactory {
return &ObserverFactory{config,
crypto,
blockCh,
logger,
ctx,
finishCh,
num,
parallel,
envs,
errorCh,
}
}
func (of *ObserverFactory) CreateObserverWorkers(mode int) ([]infra.Worker, infra.ObserverWorker, error) {
switch mode {
case infra.ENDORSEMENT:
return of.CreateEndorsementObserverWorkers()
case infra.OBSERVER:
return of.CreateFullProcessObserverWorkers()
case infra.COMMIT:
return of.CreateCommitObserverWorkers()
default:
return of.CreateFullProcessObserverWorkers()
}
}
// 6
func (of *ObserverFactory) CreateFullProcessObserverWorkers() ([]infra.Worker, infra.ObserverWorker, error) {
observer_workers := make([]infra.Worker, 0)
total := of.parallel * of.num
var once sync.Once
blockCollector, err := NewBlockCollector(of.config.CommitThreshold, len(of.config.Committers), of.ctx, of.blockCh, of.finishCh, total, true, of.logger, &once, true)
if err != nil {
return observer_workers, nil, errors.Wrap(err, "failed to create block collector")
}
observer_workers = append(observer_workers, blockCollector)
observers, err := CreateObservers(of.ctx, of.crypto, of.errorCh, of.blockCh, of.config, of.logger)
if err != nil {
return observer_workers, observers, err
}
observer_workers = append(observer_workers, observers)
cryptoImpl, err := of.config.LoadCrypto()
if err != nil {
return observer_workers, observers, err
}
EndorseObserverWorker, err := CreateCommitObserver(of.config.Channel,
of.config.Orderer,
cryptoImpl,
of.logger,
total,
of.config,
of.errorCh,
of.finishCh,
&once,
false)
if err != nil {
return nil, nil, err
}
observer_workers = append(observer_workers, EndorseObserverWorker)
return observer_workers, observers, nil
}
// 4
func (of *ObserverFactory) CreateEndorsementObserverWorkers() ([]infra.Worker, infra.ObserverWorker, error) {
observer_workers := make([]infra.Worker, 0)
total := of.parallel * of.num
var once sync.Once
EndorseObserverWorker := CreateEndorseObserver(of.envs, total, of.finishCh, &once, of.logger)
observer_workers = append(observer_workers, EndorseObserverWorker)
return observer_workers, EndorseObserverWorker, nil
}
// 3
func (of *ObserverFactory) CreateCommitObserverWorkers() ([]infra.Worker, infra.ObserverWorker, error) {
observer_workers := make([]infra.Worker, 0)
cryptoImpl, err := of.config.LoadCrypto()
if err != nil {
return observer_workers, nil, err
}
var once sync.Once
total := of.parallel * of.num
EndorseObserverWorker, err := CreateCommitObserver(of.config.Channel,
of.config.Orderer,
cryptoImpl,
of.logger,
total,
of.config,
of.errorCh,
of.finishCh,
&once,
true)
if err != nil {
return nil, nil, err
}
observer_workers = append(observer_workers, EndorseObserverWorker)
return observer_workers, EndorseObserverWorker, nil
}
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/endorsementObersver_test.go | pkg/infra/observer/endorsementObersver_test.go | package observer_test
import (
"sync"
"github.com/hyperledger-twgc/tape/pkg/infra/basic"
"github.com/hyperledger-twgc/tape/pkg/infra/observer"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
)
var _ = Describe("EndorsementObersver", func() {
BeforeEach(func() {
log.New()
})
It("Should work with number limit", func() {
envs := make(chan *basic.TracingEnvelope, 1024)
finishCh := make(chan struct{})
logger := log.New()
var once sync.Once
instance := observer.CreateEndorseObserver(envs, 2, finishCh, &once, logger)
go instance.Start()
envs <- &basic.TracingEnvelope{}
Consistently(finishCh).ShouldNot(BeClosed())
envs <- &basic.TracingEnvelope{}
Eventually(finishCh).Should(BeClosed())
})
It("Should work with number limit", func() {
envs := make(chan *basic.TracingEnvelope, 1024)
finishCh := make(chan struct{})
logger := log.New()
var once sync.Once
instance := observer.CreateEndorseObserver(envs, 1, finishCh, &once, logger)
go instance.Start()
envs <- &basic.TracingEnvelope{}
Eventually(finishCh).Should(BeClosed())
})
It("Should work without number limit", func() {
envs := make(chan *basic.TracingEnvelope, 1024)
finishCh := make(chan struct{})
logger := log.New()
var once sync.Once
instance := observer.CreateEndorseObserver(envs, 0, finishCh, &once, logger)
go instance.Start()
envs <- &basic.TracingEnvelope{}
Consistently(finishCh).ShouldNot(BeClosed())
envs <- &basic.TracingEnvelope{}
Eventually(finishCh).ShouldNot(BeClosed())
})
})
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/block_collector_test.go | pkg/infra/observer/block_collector_test.go | package observer_test
import (
"context"
"sync"
"github.com/hyperledger-twgc/tape/pkg/infra/observer"
"github.com/google/uuid"
"github.com/hyperledger/fabric-protos-go-apiv2/peer"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
)
func newAddressedBlock(addr int, blockNum uint64) *observer.AddressedBlock {
uuid, _ := uuid.NewRandom()
FilteredTransactions := make([]*peer.FilteredTransaction, 0)
FilteredTransactions = append(FilteredTransactions, &peer.FilteredTransaction{Txid: uuid.String()})
data := &observer.AddressedBlock{Address: addr, FilteredBlock: &peer.FilteredBlock{Number: blockNum, FilteredTransactions: FilteredTransactions}}
return data
}
var _ = Describe("BlockCollector", func() {
var logger *log.Logger
BeforeEach(func() {
logger = log.New()
})
Context("Async Commit", func() {
It("should work with threshold 1 and observer 1", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(1, 1, context.Background(), block, done, 2, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go instance.Start()
block <- newAddressedBlock(0, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(0, 1)
Eventually(done).Should(BeClosed())
})
It("should work with threshold 1 and observer 2", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(1, 2, context.Background(), block, done, 2, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go instance.Start()
block <- newAddressedBlock(0, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(1, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(0, 1)
Eventually(done).Should(BeClosed())
select {
case block <- newAddressedBlock(1, 1):
default:
Fail("Block collector should still be able to consume blocks")
}
})
It("should work with threshold 4 and observer 4", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(4, 4, context.Background(), block, done, 2, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go instance.Start()
block <- newAddressedBlock(0, 1)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(1, 1)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(2, 1)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(3, 1)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(0, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(1, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(2, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(3, 0)
Eventually(done).Should(BeClosed())
})
It("should work with threshold 2 and observer 4", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(2, 4, context.Background(), block, done, 1, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go instance.Start()
block <- newAddressedBlock(0, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(1, 0)
Eventually(done).Should(BeClosed())
})
PIt("should not count tx for repeated block", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(1, 1, context.Background(), block, done, 2, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go instance.Start()
block <- newAddressedBlock(0, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(0, 0)
Consistently(done).ShouldNot(BeClosed())
block <- newAddressedBlock(0, 1)
Eventually(done).Should(BeClosed())
})
It("should return err when threshold is greater than total", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(2, 1, context.Background(), block, done, 2, false, logger, &once, true)
Expect(err).Should(MatchError("threshold [2] must be less than or equal to total [1]"))
Expect(instance).Should(BeNil())
})
It("should return err when threshold or total is zero", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(0, 1, context.Background(), block, done, 2, false, logger, &once, true)
Expect(err).Should(MatchError("threshold and total must be greater than zero"))
Expect(instance).Should(BeNil())
instance, err = observer.NewBlockCollector(1, 0, context.Background(), block, done, 2, false, logger, &once, true)
Expect(err).Should(MatchError("threshold and total must be greater than zero"))
Expect(instance).Should(BeNil())
})
It("Should supports parallel committers", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(100, 100, context.Background(), block, done, 1, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go instance.Start()
var wg sync.WaitGroup
wg.Add(100)
for i := 0; i < 100; i++ {
go func(idx int) {
defer wg.Done()
block <- newAddressedBlock(idx, 0)
}(i)
}
wg.Wait()
Eventually(done).Should(BeClosed())
})
It("Should supports threshold 3 and observer 5 as parallel committers", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(3, 5, context.Background(), block, done, 10, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go instance.Start()
for i := 0; i < 3; i++ {
go func(idx int) {
for j := 0; j < 10; j++ {
block <- newAddressedBlock(idx, uint64(j))
}
}(i)
}
Eventually(done).Should(BeClosed())
})
It("Should supports threshold 5 and observer 5 as parallel committers", func() {
block := make(chan *observer.AddressedBlock)
done := make(chan struct{})
var once sync.Once
instance, err := observer.NewBlockCollector(5, 5, context.Background(), block, done, 10, false, logger, &once, true)
Expect(err).NotTo(HaveOccurred())
go instance.Start()
for i := 0; i < 5; i++ {
go func(idx int) {
for j := 0; j < 10; j++ {
block <- newAddressedBlock(idx, uint64(j))
}
}(i)
}
Eventually(done).Should(BeClosed())
})
})
})
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Hyperledger-TWGC/tape | https://github.com/Hyperledger-TWGC/tape/blob/ef65cc6c14e3fdf5d47d919d2be880c1817efb56/pkg/infra/observer/observer_suite_test.go | pkg/infra/observer/observer_suite_test.go | package observer_test
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestObserver(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Observer Suite")
}
| go | Apache-2.0 | ef65cc6c14e3fdf5d47d919d2be880c1817efb56 | 2026-01-07T10:05:21.922186Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.