mirror of
https://github.com/grafana/grafana.git
synced 2025-08-02 11:42:32 +08:00
Provisioning: refactor and test migrate job (#103976)
* Rename and make folder migrator public * Move skipRepo out of tree * Move folder logic out of worker * Use wrapper function for clone * Introduce storage swapper * Use RepositoryResources interface * Add TODO * Signature with option * Use plural for folders * Split more logic out of worker * Refactor further * Remove todo * Refactor further * Test worker * Add test for unified storage * Add mock for bulk store * Clean line * Test more about storage * Happy path storage test * Finish storage tests * Add more interfaces * Add some tests legacy * Complete test coverage legacy * Complete coverage legacy * Fix compilation * Add initial folders tests * Finish coverage for folders * Test namespace cleaner * Fix typo * Add some tests for resources * Finish coverage migrate package * Update tests * Fix jobs tests * Unit test users * Convert user tests to table tests * Fix linting in tests * Fix typo * Use PreloadAllUserInfo * Add FIXME
This commit is contained in:

committed by
GitHub

parent
e93fc9c003
commit
08316103b5
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
@ -23,9 +24,17 @@ func ExportFolders(ctx context.Context, repoName string, options provisioning.Ex
|
||||
if tree.Count() >= resources.MaxNumberOfFolders {
|
||||
return errors.New("too many folders")
|
||||
}
|
||||
meta, err := utils.MetaAccessor(item)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extract meta accessor: %w", err)
|
||||
}
|
||||
|
||||
// FIXME: repoName should be part of skip folder export
|
||||
return tree.AddUnstructured(item, repoName)
|
||||
manager, _ := meta.GetManagerProperties()
|
||||
if manager.Identity == repoName {
|
||||
return nil // skip it... already in tree?
|
||||
}
|
||||
|
||||
return tree.AddUnstructured(item)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("load folder tree: %w", err)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
v0alpha1 "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
@ -13,8 +14,10 @@ import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
dynamicfake "k8s.io/client-go/dynamic/fake"
|
||||
k8testing "k8s.io/client-go/testing"
|
||||
)
|
||||
@ -336,13 +339,11 @@ func TestExportFolders(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
require.NoError(t, metav1.AddMetaToScheme(scheme))
|
||||
listGVK := schema.GroupVersionKind{
|
||||
Group: resources.FolderResource.Group,
|
||||
Version: resources.FolderResource.Version,
|
||||
Kind: "FolderList",
|
||||
}
|
||||
|
||||
scheme.AddKnownTypeWithName(listGVK, &metav1.PartialObjectMetadataList{})
|
||||
scheme.AddKnownTypeWithName(schema.GroupVersionKind{
|
||||
Group: resources.FolderResource.Group,
|
||||
@ -377,3 +378,135 @@ func TestExportFolders(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFolderMetaAccessor(t *testing.T) {
|
||||
t.Run("should export folders from another manager", func(t *testing.T) {
|
||||
obj := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-folder",
|
||||
"annotations": map[string]interface{}{
|
||||
"folder.grafana.app/uid": "test-folder-uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
meta, err := utils.MetaAccessor(obj)
|
||||
require.NoError(t, err)
|
||||
meta.SetManagerProperties(utils.ManagerProperties{
|
||||
Kind: utils.ManagerKindRepo,
|
||||
Identity: "other-manager",
|
||||
AllowsEdits: true,
|
||||
Suspended: false,
|
||||
})
|
||||
fakeFolderClient := &mockDynamicInterface{
|
||||
items: []unstructured.Unstructured{*obj},
|
||||
}
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("EnsureFolderTreeExists", mock.Anything, "feature/branch", "grafana", mock.MatchedBy(func(tree resources.FolderTree) bool {
|
||||
return tree.Count() == 1
|
||||
}), mock.MatchedBy(func(fn func(folder resources.Folder, created bool, err error) error) bool {
|
||||
require.NoError(t, fn(resources.Folder{ID: "test-folder-uid", Path: "grafana/test-folder"}, true, nil))
|
||||
return true
|
||||
})).Return(nil)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionCreated &&
|
||||
result.Name == "test-folder-uid" &&
|
||||
result.Error == nil &&
|
||||
result.Path == "grafana/test-folder"
|
||||
})).Return()
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
err = ExportFolders(context.Background(), "test-repo", v0alpha1.ExportJobOptions{
|
||||
Path: "grafana",
|
||||
Branch: "feature/branch",
|
||||
}, fakeFolderClient, mockRepoResources, progress)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRepoResources.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
t.Run("should skip if repo is the manager", func(t *testing.T) {
|
||||
obj := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-folder",
|
||||
"annotations": map[string]interface{}{
|
||||
"folder.grafana.app/uid": "test-folder-uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
meta, err := utils.MetaAccessor(obj)
|
||||
require.NoError(t, err)
|
||||
meta.SetManagerProperties(utils.ManagerProperties{
|
||||
Kind: utils.ManagerKindRepo,
|
||||
Identity: "test-repo",
|
||||
AllowsEdits: true,
|
||||
Suspended: false,
|
||||
})
|
||||
fakeFolderClient := &mockDynamicInterface{
|
||||
items: []unstructured.Unstructured{*obj},
|
||||
}
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return().Twice()
|
||||
mockRepoResources.On("EnsureFolderTreeExists", mock.Anything, "feature/branch", "grafana", mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
err = ExportFolders(context.Background(), "test-repo", v0alpha1.ExportJobOptions{
|
||||
Path: "grafana",
|
||||
Branch: "feature/branch",
|
||||
}, fakeFolderClient, mockRepoResources, progress)
|
||||
|
||||
require.NoError(t, err)
|
||||
mockRepoResources.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
t.Run("should fail with invalid meta accessor", func(t *testing.T) {
|
||||
t.Skip("skipping this test for now as we cannot make it invalid")
|
||||
|
||||
obj := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
// make it invalid
|
||||
},
|
||||
}
|
||||
fakeFolderClient := &mockDynamicInterface{
|
||||
items: []unstructured.Unstructured{*obj},
|
||||
}
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return().Twice()
|
||||
err := ExportFolders(context.Background(), "test-repo", v0alpha1.ExportJobOptions{
|
||||
Path: "grafana",
|
||||
Branch: "feature/branch",
|
||||
}, fakeFolderClient, mockRepoResources, progress)
|
||||
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "extract meta accessor")
|
||||
mockRepoResources.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
// mockDynamicInterface implements a simplified version of the dynamic.ResourceInterface
|
||||
type mockDynamicInterface struct {
|
||||
dynamic.ResourceInterface
|
||||
items []unstructured.Unstructured
|
||||
deleteError error
|
||||
}
|
||||
|
||||
func (m *mockDynamicInterface) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
|
||||
return &unstructured.UnstructuredList{
|
||||
Items: m.items,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockDynamicInterface) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error {
|
||||
return m.deleteError
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func (r *ExportWorker) Process(ctx context.Context, repo repository.Repository,
|
||||
return errors.New("export job submitted targeting repository that is not a ReaderWriter")
|
||||
}
|
||||
|
||||
repositoryResources, err := r.repositoryResources.Client(ctx, rw)
|
||||
repositoryResources, err := r.repositoryResources.Client(ctx, rw, resources.RepositoryResourcesOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create repository resource client: %w", err)
|
||||
}
|
||||
|
@ -96,6 +96,7 @@ func TestExportWorker_ProcessWriteNotAllowed(t *testing.T) {
|
||||
err := r.Process(context.Background(), mockRepo, job, nil)
|
||||
require.EqualError(t, err, "this repository is read only")
|
||||
}
|
||||
|
||||
func TestExportWorker_ProcessBranchNotAllowedForLocal(t *testing.T) {
|
||||
job := v0alpha1.Job{
|
||||
Spec: v0alpha1.JobSpec{
|
||||
@ -248,7 +249,7 @@ func TestExportWorker_ProcessRepositoryResourcesError(t *testing.T) {
|
||||
mockClients.On("Clients", context.Background(), "test-namespace").Return(resourceClients, nil)
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResourcesFactory(t)
|
||||
mockRepoResources.On("Client", context.Background(), mockRepo).Return(nil, fmt.Errorf("failed to create repository resources client"))
|
||||
mockRepoResources.On("Client", context.Background(), mockRepo, resources.RepositoryResourcesOptions{}).Return(nil, fmt.Errorf("failed to create repository resources client"))
|
||||
|
||||
mockProgress := jobs.NewMockJobProgressRecorder(t)
|
||||
mockCloneFn := NewMockWrapWithCloneFn(t)
|
||||
@ -291,7 +292,7 @@ func TestExportWorker_ProcessCloneAndPushOptions(t *testing.T) {
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResourcesFactory(t)
|
||||
mockRepoResourcesClient := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("Client", mock.Anything, mock.Anything).Return(mockRepoResourcesClient, nil)
|
||||
mockRepoResources.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResourcesClient, nil)
|
||||
|
||||
mockExportFn := NewMockExportFn(t)
|
||||
mockExportFn.On("Execute", mock.Anything, "test-repo", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
@ -342,7 +343,7 @@ func TestExportWorker_ProcessExportFnError(t *testing.T) {
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResourcesFactory(t)
|
||||
mockRepoResourcesClient := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("Client", mock.Anything, mock.Anything).Return(mockRepoResourcesClient, nil)
|
||||
mockRepoResources.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResourcesClient, nil)
|
||||
|
||||
mockExportFn := NewMockExportFn(t)
|
||||
mockExportFn.On("Execute", mock.Anything, "test-repo", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("export failed"))
|
||||
|
62
pkg/registry/apis/provisioning/jobs/migrate/clean.go
Normal file
62
pkg/registry/apis/provisioning/jobs/migrate/clean.go
Normal file
@ -0,0 +1,62 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
//go:generate mockery --name NamespaceCleaner --structname MockNamespaceCleaner --inpackage --filename mock_namespace_cleaner.go --with-expecter
|
||||
type NamespaceCleaner interface {
|
||||
Clean(ctx context.Context, namespace string, progress jobs.JobProgressRecorder) error
|
||||
}
|
||||
|
||||
type namespaceCleaner struct {
|
||||
clients resources.ClientFactory
|
||||
}
|
||||
|
||||
func NewNamespaceCleaner(clients resources.ClientFactory) NamespaceCleaner {
|
||||
return &namespaceCleaner{clients: clients}
|
||||
}
|
||||
|
||||
func (c *namespaceCleaner) Clean(ctx context.Context, namespace string, progress jobs.JobProgressRecorder) error {
|
||||
clients, err := c.clients.Clients(ctx, namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get clients: %w", err)
|
||||
}
|
||||
|
||||
for _, kind := range resources.SupportedProvisioningResources {
|
||||
progress.SetMessage(ctx, fmt.Sprintf("remove unprovisioned %s", kind.Resource))
|
||||
client, _, err := clients.ForResource(kind)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get resource client: %w", err)
|
||||
}
|
||||
|
||||
if err = resources.ForEach(ctx, client, func(item *unstructured.Unstructured) error {
|
||||
result := jobs.JobResourceResult{
|
||||
Name: item.GetName(),
|
||||
Resource: item.GetKind(),
|
||||
Group: item.GroupVersionKind().Group,
|
||||
Action: repository.FileActionDeleted,
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}); err != nil {
|
||||
result.Error = err
|
||||
progress.Record(ctx, result)
|
||||
return fmt.Errorf("delete resource: %w", err)
|
||||
}
|
||||
|
||||
progress.Record(ctx, result)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
216
pkg/registry/apis/provisioning/jobs/migrate/clean_test.go
Normal file
216
pkg/registry/apis/provisioning/jobs/migrate/clean_test.go
Normal file
@ -0,0 +1,216 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
)
|
||||
|
||||
type mockClients struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *mockClients) ForResource(gvr schema.GroupVersionResource) (dynamic.ResourceInterface, schema.GroupVersionKind, error) {
|
||||
args := m.Called(gvr)
|
||||
var ri dynamic.ResourceInterface
|
||||
if args.Get(0) != nil {
|
||||
ri = args.Get(0).(dynamic.ResourceInterface)
|
||||
}
|
||||
return ri, args.Get(1).(schema.GroupVersionKind), args.Error(2)
|
||||
}
|
||||
|
||||
func (m *mockClients) ForKind(gvk schema.GroupVersionKind) (dynamic.ResourceInterface, schema.GroupVersionResource, error) {
|
||||
args := m.Called(gvk)
|
||||
var ri dynamic.ResourceInterface
|
||||
if args.Get(0) != nil {
|
||||
ri = args.Get(0).(dynamic.ResourceInterface)
|
||||
}
|
||||
return ri, args.Get(1).(schema.GroupVersionResource), args.Error(2)
|
||||
}
|
||||
|
||||
func (m *mockClients) Folder() (dynamic.ResourceInterface, error) {
|
||||
args := m.Called()
|
||||
var ri dynamic.ResourceInterface
|
||||
if args.Get(0) != nil {
|
||||
ri = args.Get(0).(dynamic.ResourceInterface)
|
||||
}
|
||||
return ri, args.Error(1)
|
||||
}
|
||||
|
||||
func (m *mockClients) User() (dynamic.ResourceInterface, error) {
|
||||
args := m.Called()
|
||||
var ri dynamic.ResourceInterface
|
||||
if args.Get(0) != nil {
|
||||
ri = args.Get(0).(dynamic.ResourceInterface)
|
||||
}
|
||||
return ri, args.Error(1)
|
||||
}
|
||||
|
||||
func TestNamespaceCleaner_Clean(t *testing.T) {
|
||||
t.Run("should fail when getting clients fails", func(t *testing.T) {
|
||||
mockClientFactory := resources.NewMockClientFactory(t)
|
||||
mockClientFactory.On("Clients", mock.Anything, "test-namespace").
|
||||
Return(nil, errors.New("failed to get clients"))
|
||||
|
||||
cleaner := NewNamespaceCleaner(mockClientFactory)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
err := cleaner.Clean(context.Background(), "test-namespace", progress)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "get clients: failed to get clients")
|
||||
|
||||
mockClientFactory.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should fail when getting resource client fails", func(t *testing.T) {
|
||||
clients := &mockClients{}
|
||||
clients.On("ForResource", resources.SupportedProvisioningResources[0]).
|
||||
Return(nil, schema.GroupVersionKind{}, errors.New("failed to get resource client"))
|
||||
|
||||
mockClientFactory := resources.NewMockClientFactory(t)
|
||||
mockClientFactory.On("Clients", mock.Anything, "test-namespace").
|
||||
Return(clients, nil)
|
||||
|
||||
cleaner := NewNamespaceCleaner(mockClientFactory)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
|
||||
err := cleaner.Clean(context.Background(), "test-namespace", progress)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "get resource client: failed to get resource client")
|
||||
|
||||
mockClientFactory.AssertExpectations(t)
|
||||
clients.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should fail when delete operation fails", func(t *testing.T) {
|
||||
// Create a mock dynamic client that returns a list with one item
|
||||
mockDynamicClient := &mockDynamicInterface{
|
||||
items: []unstructured.Unstructured{
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "folder.grafana.app/v1alpha1",
|
||||
"kind": "Folder",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-folder",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
deleteError: errors.New("delete failed"),
|
||||
}
|
||||
|
||||
clients := &mockClients{}
|
||||
clients.On("ForResource", mock.Anything).
|
||||
Return(mockDynamicClient, schema.GroupVersionKind{}, nil)
|
||||
|
||||
mockClientFactory := resources.NewMockClientFactory(t)
|
||||
mockClientFactory.On("Clients", mock.Anything, "test-namespace").
|
||||
Return(clients, nil)
|
||||
|
||||
cleaner := NewNamespaceCleaner(mockClientFactory)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionDeleted &&
|
||||
result.Name == "test-folder" &&
|
||||
result.Error != nil &&
|
||||
result.Error.Error() == "delete failed"
|
||||
})).Return()
|
||||
|
||||
err := cleaner.Clean(context.Background(), "test-namespace", progress)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "delete resource: delete failed")
|
||||
|
||||
mockClientFactory.AssertExpectations(t)
|
||||
clients.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should successfully clean namespace", func(t *testing.T) {
|
||||
// Create a mock dynamic client that returns a list with multiple items
|
||||
mockDynamicClient := &mockDynamicInterface{
|
||||
items: []unstructured.Unstructured{
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "folder.grafana.app/v1alpha1",
|
||||
"kind": "Folder",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "folder-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "dashboard.grafana.app/v1alpha1",
|
||||
"kind": "Dashboard",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "dashboard-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
clients := &mockClients{}
|
||||
clients.On("ForResource", mock.Anything).
|
||||
Return(mockDynamicClient, schema.GroupVersionKind{}, nil)
|
||||
|
||||
mockClientFactory := resources.NewMockClientFactory(t)
|
||||
mockClientFactory.On("Clients", mock.Anything, "test-namespace").
|
||||
Return(clients, nil)
|
||||
|
||||
cleaner := NewNamespaceCleaner(mockClientFactory)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, "remove unprovisioned folders").Return()
|
||||
progress.On("SetMessage", mock.Anything, "remove unprovisioned dashboards").Return()
|
||||
|
||||
// Expect two successful deletions
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionDeleted &&
|
||||
result.Name == "dashboard-1" &&
|
||||
result.Error == nil
|
||||
})).Return()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionDeleted &&
|
||||
result.Name == "folder-1" &&
|
||||
result.Error == nil
|
||||
})).Return()
|
||||
|
||||
err := cleaner.Clean(context.Background(), "test-namespace", progress)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockClientFactory.AssertExpectations(t)
|
||||
clients.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
// mockDynamicInterface implements a simplified version of the dynamic.ResourceInterface
|
||||
type mockDynamicInterface struct {
|
||||
dynamic.ResourceInterface
|
||||
items []unstructured.Unstructured
|
||||
deleteError error
|
||||
}
|
||||
|
||||
func (m *mockDynamicInterface) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
|
||||
return &unstructured.UnstructuredList{
|
||||
Items: m.items,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockDynamicInterface) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error {
|
||||
return m.deleteError
|
||||
}
|
@ -9,6 +9,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/parquet"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
@ -16,36 +18,36 @@ import (
|
||||
|
||||
const maxFolders = 10000
|
||||
|
||||
var _ resource.BulkResourceWriter = (*legacyFolderReader)(nil)
|
||||
|
||||
type legacyFolderReader struct {
|
||||
tree resources.FolderTree
|
||||
repoName string
|
||||
legacyMigrator legacy.LegacyMigrator
|
||||
namespace string
|
||||
//go:generate mockery --name LegacyFoldersMigrator --structname MockLegacyFoldersMigrator --inpackage --filename mock_legacy_folders_migrator.go --with-expecter
|
||||
type LegacyFoldersMigrator interface {
|
||||
resource.BulkResourceWriter
|
||||
Migrate(ctx context.Context, namespace string, repositoryResources resources.RepositoryResources, progress jobs.JobProgressRecorder) error
|
||||
}
|
||||
|
||||
func NewLegacyFolderReader(legacyMigrator legacy.LegacyMigrator, repoName, namespace string) *legacyFolderReader {
|
||||
return &legacyFolderReader{
|
||||
type legacyFoldersMigrator struct {
|
||||
tree resources.FolderTree
|
||||
legacyMigrator legacy.LegacyMigrator
|
||||
}
|
||||
|
||||
func NewLegacyFoldersMigrator(legacyMigrator legacy.LegacyMigrator) LegacyFoldersMigrator {
|
||||
return &legacyFoldersMigrator{
|
||||
legacyMigrator: legacyMigrator,
|
||||
repoName: repoName,
|
||||
namespace: namespace,
|
||||
tree: resources.NewEmptyFolderTree(),
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements resource.BulkResourceWrite.
|
||||
func (f *legacyFolderReader) Close() error {
|
||||
func (f *legacyFoldersMigrator) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWithResults implements resource.BulkResourceWrite.
|
||||
func (f *legacyFolderReader) CloseWithResults() (*resource.BulkResponse, error) {
|
||||
func (f *legacyFoldersMigrator) CloseWithResults() (*resource.BulkResponse, error) {
|
||||
return &resource.BulkResponse{}, nil
|
||||
}
|
||||
|
||||
// Write implements resource.BulkResourceWrite.
|
||||
func (f *legacyFolderReader) Write(ctx context.Context, key *resource.ResourceKey, value []byte) error {
|
||||
func (f *legacyFoldersMigrator) Write(ctx context.Context, key *resource.ResourceKey, value []byte) error {
|
||||
item := &unstructured.Unstructured{}
|
||||
err := item.UnmarshalJSON(value)
|
||||
if err != nil {
|
||||
@ -56,18 +58,41 @@ func (f *legacyFolderReader) Write(ctx context.Context, key *resource.ResourceKe
|
||||
return errors.New("too many folders")
|
||||
}
|
||||
|
||||
return f.tree.AddUnstructured(item, f.repoName)
|
||||
// TODO: should we check if managed already and abort migration?
|
||||
|
||||
return f.tree.AddUnstructured(item)
|
||||
}
|
||||
|
||||
func (f *legacyFolderReader) Read(ctx context.Context, legacyMigrator legacy.LegacyMigrator, name, namespace string) error {
|
||||
_, err := legacyMigrator.Migrate(ctx, legacy.MigrateOptions{
|
||||
func (f *legacyFoldersMigrator) Migrate(ctx context.Context, namespace string, repositoryResources resources.RepositoryResources, progress jobs.JobProgressRecorder) error {
|
||||
progress.SetMessage(ctx, "read folders from SQL")
|
||||
if _, err := f.legacyMigrator.Migrate(ctx, legacy.MigrateOptions{
|
||||
Namespace: namespace,
|
||||
Resources: []schema.GroupResource{resources.FolderResource.GroupResource()},
|
||||
Store: parquet.NewBulkResourceWriterClient(f),
|
||||
})
|
||||
return err
|
||||
}
|
||||
}); err != nil {
|
||||
return fmt.Errorf("read folders from SQL: %w", err)
|
||||
}
|
||||
|
||||
func (f *legacyFolderReader) Tree() resources.FolderTree {
|
||||
return f.tree
|
||||
progress.SetMessage(ctx, "export folders from SQL")
|
||||
if err := repositoryResources.EnsureFolderTreeExists(ctx, "", "", f.tree, func(folder resources.Folder, created bool, err error) error {
|
||||
result := jobs.JobResourceResult{
|
||||
Action: repository.FileActionCreated,
|
||||
Name: folder.ID,
|
||||
Resource: resources.FolderResource.Resource,
|
||||
Group: resources.FolderResource.Group,
|
||||
Path: folder.Path,
|
||||
Error: err,
|
||||
}
|
||||
|
||||
if !created {
|
||||
result.Action = repository.FileActionIgnored
|
||||
}
|
||||
|
||||
progress.Record(ctx, result)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("export folders from SQL: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
265
pkg/registry/apis/provisioning/jobs/migrate/folders_test.go
Normal file
265
pkg/registry/apis/provisioning/jobs/migrate/folders_test.go
Normal file
@ -0,0 +1,265 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
)
|
||||
|
||||
func TestLegacyFoldersMigrator_Write(t *testing.T) {
|
||||
t.Run("should fail when json is invalid", func(t *testing.T) {
|
||||
migrator := NewLegacyFoldersMigrator(legacy.NewMockLegacyMigrator(t))
|
||||
err := migrator.Write(context.Background(), nil, []byte("invalid json"))
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "unmarshal unstructured to JSON")
|
||||
})
|
||||
|
||||
t.Run("should fail when too many folders", func(t *testing.T) {
|
||||
migrator := NewLegacyFoldersMigrator(legacy.NewMockLegacyMigrator(t))
|
||||
|
||||
// Write more than maxFolders
|
||||
for i := 0; i <= maxFolders+1; i++ {
|
||||
folder := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "folder.grafana.app/v1alpha1",
|
||||
"kind": "Folder",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": fmt.Sprintf("test-folder-%d", i),
|
||||
},
|
||||
},
|
||||
}
|
||||
folder.SetKind("Folder")
|
||||
folder.SetAPIVersion("folder.grafana.app/v1alpha1")
|
||||
|
||||
data, err := folder.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
if i == maxFolders+1 {
|
||||
err = migrator.Write(context.Background(), nil, data)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "too many folders", err.Error())
|
||||
return
|
||||
}
|
||||
err = migrator.Write(context.Background(), nil, data)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("should add folder to tree", func(t *testing.T) {
|
||||
migrator := NewLegacyFoldersMigrator(legacy.NewMockLegacyMigrator(t))
|
||||
folder := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "folder.grafana.app/v1alpha1",
|
||||
"kind": "Folder",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-folder",
|
||||
"annotations": map[string]interface{}{
|
||||
"folder.grafana.app/uid": "test-folder-uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
folder.SetKind("Folder")
|
||||
folder.SetAPIVersion("folder.grafana.app/v1alpha1")
|
||||
|
||||
data, err := folder.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = migrator.Write(context.Background(), nil, data)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyFoldersMigrator_Migrate(t *testing.T) {
|
||||
t.Run("should fail when legacy migrator fails", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.Namespace == "test-namespace" &&
|
||||
len(opts.Resources) == 1 &&
|
||||
opts.Resources[0] == resources.FolderResource.GroupResource()
|
||||
}), mock.Anything).Return(nil, errors.New("migration failed"))
|
||||
|
||||
migrator := NewLegacyFoldersMigrator(mockLegacyMigrator)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, "read folders from SQL").Return()
|
||||
|
||||
err := migrator.Migrate(context.Background(), "test-namespace", nil, progress)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "read folders from SQL: migration failed")
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should fail when folder tree creation fails", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.Namespace == "test-namespace" &&
|
||||
len(opts.Resources) == 1 &&
|
||||
opts.Resources[0] == resources.FolderResource.GroupResource()
|
||||
}), mock.Anything).Return(&resource.BulkResponse{}, nil)
|
||||
|
||||
mockRepositoryResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepositoryResources.On("EnsureFolderTreeExists", mock.Anything, "", "", mock.Anything, mock.Anything).
|
||||
Return(errors.New("folder tree creation failed"))
|
||||
|
||||
migrator := NewLegacyFoldersMigrator(mockLegacyMigrator)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, "read folders from SQL").Return()
|
||||
progress.On("SetMessage", mock.Anything, "export folders from SQL").Return()
|
||||
|
||||
err := migrator.Migrate(context.Background(), "test-namespace", mockRepositoryResources, progress)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "export folders from SQL: folder tree creation failed")
|
||||
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should successfully migrate folders", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.Namespace == "test-namespace" &&
|
||||
len(opts.Resources) == 1 &&
|
||||
opts.Resources[0] == resources.FolderResource.GroupResource()
|
||||
}), mock.Anything).Run(func(args mock.Arguments) {
|
||||
// Simulate writing a folder through the bulk writer
|
||||
opts := args.Get(1).(legacy.MigrateOptions)
|
||||
folder := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "folder.grafana.app/v1alpha1",
|
||||
"kind": "Folder",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-folder",
|
||||
"annotations": map[string]interface{}{
|
||||
"folder.grafana.app/uid": "test-folder-uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
folder.SetKind("Folder")
|
||||
folder.SetAPIVersion("folder.grafana.app/v1alpha1")
|
||||
|
||||
data, err := folder.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
client, err := opts.Store.BulkProcess(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Send(&resource.BulkRequest{
|
||||
Key: &resource.ResourceKey{Namespace: "test-namespace", Name: "test-folder"},
|
||||
Value: data,
|
||||
}))
|
||||
}).Return(&resource.BulkResponse{}, nil)
|
||||
|
||||
mockRepositoryResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepositoryResources.On("EnsureFolderTreeExists", mock.Anything, "", "", mock.Anything, mock.Anything).
|
||||
Run(func(args mock.Arguments) {
|
||||
callback := args.Get(4).(func(folder resources.Folder, created bool, err error) error)
|
||||
err := callback(resources.Folder{
|
||||
ID: "test-folder-uid",
|
||||
Path: "/test-folder",
|
||||
}, true, nil)
|
||||
require.NoError(t, err)
|
||||
}).Return(nil)
|
||||
|
||||
migrator := NewLegacyFoldersMigrator(mockLegacyMigrator)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, "read folders from SQL").Return()
|
||||
progress.On("SetMessage", mock.Anything, "export folders from SQL").Return()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionCreated &&
|
||||
result.Name == "test-folder-uid" &&
|
||||
result.Resource == resources.FolderResource.Resource &&
|
||||
result.Group == resources.FolderResource.Group &&
|
||||
result.Path == "/test-folder" &&
|
||||
result.Error == nil
|
||||
})).Return()
|
||||
|
||||
err := migrator.Migrate(context.Background(), "test-namespace", mockRepositoryResources, progress)
|
||||
require.NoError(t, err)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
t.Run("should ignore folders that already exist", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.Namespace == "test-namespace" &&
|
||||
len(opts.Resources) == 1 &&
|
||||
opts.Resources[0] == resources.FolderResource.GroupResource()
|
||||
}), mock.Anything).Run(func(args mock.Arguments) {
|
||||
// Simulate writing a folder through the bulk writer
|
||||
opts := args.Get(1).(legacy.MigrateOptions)
|
||||
folder := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "folder.grafana.app/v1alpha1",
|
||||
"kind": "Folder",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test-folder",
|
||||
"annotations": map[string]interface{}{
|
||||
"folder.grafana.app/uid": "test-folder-uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
folder.SetKind("Folder")
|
||||
folder.SetAPIVersion("folder.grafana.app/v1alpha1")
|
||||
|
||||
data, err := folder.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
client, err := opts.Store.BulkProcess(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Send(&resource.BulkRequest{
|
||||
Key: &resource.ResourceKey{Namespace: "test-namespace", Name: "test-folder"},
|
||||
Value: data,
|
||||
}))
|
||||
}).Return(&resource.BulkResponse{}, nil)
|
||||
|
||||
mockRepositoryResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepositoryResources.On("EnsureFolderTreeExists", mock.Anything, "", "", mock.Anything, mock.Anything).
|
||||
Run(func(args mock.Arguments) {
|
||||
callback := args.Get(4).(func(folder resources.Folder, created bool, err error) error)
|
||||
err := callback(resources.Folder{
|
||||
ID: "test-folder-uid",
|
||||
Path: "/test-folder",
|
||||
}, false, nil)
|
||||
require.NoError(t, err)
|
||||
}).Return(nil)
|
||||
|
||||
migrator := NewLegacyFoldersMigrator(mockLegacyMigrator)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, "read folders from SQL").Return()
|
||||
progress.On("SetMessage", mock.Anything, "export folders from SQL").Return()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionIgnored &&
|
||||
result.Name == "test-folder-uid" &&
|
||||
result.Resource == resources.FolderResource.Resource &&
|
||||
result.Group == resources.FolderResource.Group &&
|
||||
result.Path == "/test-folder" &&
|
||||
result.Error == nil
|
||||
})).Return()
|
||||
|
||||
err := migrator.Migrate(context.Background(), "test-namespace", mockRepositoryResources, progress)
|
||||
require.NoError(t, err)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyFoldersMigrator_Close(t *testing.T) {
|
||||
t.Run("should close without error", func(t *testing.T) {
|
||||
migrator := NewLegacyFoldersMigrator(legacy.NewMockLegacyMigrator(t))
|
||||
err := migrator.Close()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("should close with results without error", func(t *testing.T) {
|
||||
migrator := NewLegacyFoldersMigrator(legacy.NewMockLegacyMigrator(t))
|
||||
resp, err := migrator.CloseWithResults()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
})
|
||||
}
|
106
pkg/registry/apis/provisioning/jobs/migrate/legacy.go
Normal file
106
pkg/registry/apis/provisioning/jobs/migrate/legacy.go
Normal file
@ -0,0 +1,106 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
)
|
||||
|
||||
type LegacyMigrator struct {
|
||||
legacyMigrator LegacyResourcesMigrator
|
||||
storageSwapper StorageSwapper
|
||||
syncWorker jobs.Worker
|
||||
wrapWithCloneFn WrapWithCloneFn
|
||||
}
|
||||
|
||||
func NewLegacyMigrator(
|
||||
legacyMigrator LegacyResourcesMigrator,
|
||||
storageSwapper StorageSwapper,
|
||||
syncWorker jobs.Worker,
|
||||
wrapWithCloneFn WrapWithCloneFn,
|
||||
) *LegacyMigrator {
|
||||
return &LegacyMigrator{
|
||||
legacyMigrator: legacyMigrator,
|
||||
storageSwapper: storageSwapper,
|
||||
syncWorker: syncWorker,
|
||||
wrapWithCloneFn: wrapWithCloneFn,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *LegacyMigrator) Migrate(ctx context.Context, rw repository.ReaderWriter, options provisioning.MigrateJobOptions, progress jobs.JobProgressRecorder) error {
|
||||
namespace := rw.Config().Namespace
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
progress.SetMessage(ctx, scanner.Text())
|
||||
}
|
||||
}()
|
||||
|
||||
cloneOptions := repository.CloneOptions{
|
||||
PushOnWrites: options.History,
|
||||
// TODO: make this configurable
|
||||
Timeout: 10 * time.Minute,
|
||||
Progress: writer,
|
||||
BeforeFn: func() error {
|
||||
progress.SetMessage(ctx, "clone repository")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
pushOptions := repository.PushOptions{
|
||||
// TODO: make this configurable
|
||||
Timeout: 10 * time.Minute,
|
||||
Progress: writer,
|
||||
BeforeFn: func() error {
|
||||
progress.SetMessage(ctx, "push changes")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
if err := m.wrapWithCloneFn(ctx, rw, cloneOptions, pushOptions, func(repo repository.Repository, cloned bool) error {
|
||||
rw, ok := repo.(repository.ReaderWriter)
|
||||
if !ok {
|
||||
return errors.New("migration job submitted targeting repository that is not a ReaderWriter")
|
||||
}
|
||||
|
||||
return m.legacyMigrator.Migrate(ctx, rw, namespace, options, progress)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("migrate from SQL: %w", err)
|
||||
}
|
||||
|
||||
progress.SetMessage(ctx, "resetting unified storage")
|
||||
if err := m.storageSwapper.WipeUnifiedAndSetMigratedFlag(ctx, namespace); err != nil {
|
||||
return fmt.Errorf("unable to reset unified storage %w", err)
|
||||
}
|
||||
|
||||
// Reset the results after the export as pull will operate on the same resources
|
||||
progress.ResetResults()
|
||||
|
||||
// Delegate the import to a sync (from the already checked out go-git repository!)
|
||||
progress.SetMessage(ctx, "pulling resources")
|
||||
if err := m.syncWorker.Process(ctx, rw, provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Pull: &provisioning.SyncJobOptions{
|
||||
Incremental: false,
|
||||
},
|
||||
},
|
||||
}, progress); err != nil { // this will have an error when too many errors exist
|
||||
progress.SetMessage(ctx, "error importing resources, reverting")
|
||||
if e2 := m.storageSwapper.StopReadingUnifiedStorage(ctx); e2 != nil {
|
||||
logger := logging.FromContext(ctx)
|
||||
logger.Warn("error trying to revert dual write settings after an error", "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
447
pkg/registry/apis/provisioning/jobs/migrate/legacy_test.go
Normal file
447
pkg/registry/apis/provisioning/jobs/migrate/legacy_test.go
Normal file
@ -0,0 +1,447 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
)
|
||||
|
||||
func TestWrapWithCloneFn(t *testing.T) {
|
||||
t.Run("should return error when repository is not a ReaderWriter", func(t *testing.T) {
|
||||
// Setup
|
||||
ctx := context.Background()
|
||||
// Create the wrapper function that matches WrapWithCloneFn signature
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
// pass a reader to function call
|
||||
repo := repository.NewMockReader(t)
|
||||
return fn(repo, true)
|
||||
}
|
||||
|
||||
legacyFoldersMigrator := NewLegacyMigrator(
|
||||
NewMockLegacyResourcesMigrator(t),
|
||||
NewMockStorageSwapper(t),
|
||||
jobs.NewMockWorker(t),
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
// Execute
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
err := legacyFoldersMigrator.Migrate(ctx, repo, provisioning.MigrateJobOptions{}, progress)
|
||||
// Assert
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "migration job submitted targeting repository that is not a ReaderWriter")
|
||||
})
|
||||
}
|
||||
func TestWrapWithCloneFn_Error(t *testing.T) {
|
||||
t.Run("should return error when wrapFn fails", func(t *testing.T) {
|
||||
// Setup
|
||||
ctx := context.Background()
|
||||
expectedErr := errors.New("clone failed")
|
||||
|
||||
// Create the wrapper function that returns an error
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
return expectedErr
|
||||
}
|
||||
|
||||
legacyMigrator := NewLegacyMigrator(
|
||||
NewMockLegacyResourcesMigrator(t),
|
||||
NewMockStorageSwapper(t),
|
||||
jobs.NewMockWorker(t),
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
// Execute
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
|
||||
err := legacyMigrator.Migrate(ctx, repo, provisioning.MigrateJobOptions{}, progress)
|
||||
|
||||
// Assert
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "migrate from SQL: clone failed")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyMigrator_MigrateFails(t *testing.T) {
|
||||
t.Run("should return error when legacyMigrator.Migrate fails", func(t *testing.T) {
|
||||
// Setup
|
||||
ctx := context.Background()
|
||||
expectedErr := errors.New("migration failed")
|
||||
|
||||
mockLegacyMigrator := NewMockLegacyResourcesMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.Anything, "test-namespace", mock.Anything, mock.Anything).
|
||||
Return(expectedErr)
|
||||
|
||||
mockStorageSwapper := NewMockStorageSwapper(t)
|
||||
mockWorker := jobs.NewMockWorker(t)
|
||||
|
||||
// Create a wrapper function that calls the provided function
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
return fn(rw, true)
|
||||
}
|
||||
|
||||
legacyMigrator := NewLegacyMigrator(
|
||||
mockLegacyMigrator,
|
||||
mockStorageSwapper,
|
||||
mockWorker,
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
// Execute
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
|
||||
err := legacyMigrator.Migrate(ctx, repo, provisioning.MigrateJobOptions{}, progress)
|
||||
|
||||
// Assert
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "migrate from SQL: migration failed")
|
||||
|
||||
// Storage swapper should not be called when migration fails
|
||||
mockStorageSwapper.AssertNotCalled(t, "WipeUnifiedAndSetMigratedFlag")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyMigrator_ResetUnifiedStorageFails(t *testing.T) {
|
||||
t.Run("should return error when storage reset fails", func(t *testing.T) {
|
||||
// Setup
|
||||
ctx := context.Background()
|
||||
expectedErr := errors.New("reset failed")
|
||||
|
||||
mockLegacyMigrator := NewMockLegacyResourcesMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.Anything, "test-namespace", mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
mockStorageSwapper := NewMockStorageSwapper(t)
|
||||
mockStorageSwapper.On("WipeUnifiedAndSetMigratedFlag", mock.Anything, "test-namespace").
|
||||
Return(expectedErr)
|
||||
|
||||
mockWorker := jobs.NewMockWorker(t)
|
||||
|
||||
// Create a wrapper function that calls the provided function
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
return fn(rw, true)
|
||||
}
|
||||
|
||||
legacyMigrator := NewLegacyMigrator(
|
||||
mockLegacyMigrator,
|
||||
mockStorageSwapper,
|
||||
mockWorker,
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
|
||||
// Execute
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
|
||||
err := legacyMigrator.Migrate(ctx, repo, provisioning.MigrateJobOptions{}, progress)
|
||||
|
||||
// Assert
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "unable to reset unified storage")
|
||||
|
||||
// Sync worker should not be called when reset fails
|
||||
mockWorker.AssertNotCalled(t, "Process")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyMigrator_SyncFails(t *testing.T) {
|
||||
t.Run("should revert storage settings when sync fails", func(t *testing.T) {
|
||||
// Setup
|
||||
ctx := context.Background()
|
||||
expectedErr := errors.New("sync failed")
|
||||
|
||||
mockLegacyMigrator := NewMockLegacyResourcesMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.Anything, "test-namespace", mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
mockStorageSwapper := NewMockStorageSwapper(t)
|
||||
mockStorageSwapper.On("WipeUnifiedAndSetMigratedFlag", mock.Anything, "test-namespace").
|
||||
Return(nil)
|
||||
mockStorageSwapper.On("StopReadingUnifiedStorage", mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
mockWorker := jobs.NewMockWorker(t)
|
||||
mockWorker.On("Process", mock.Anything, mock.Anything, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Pull != nil && !job.Spec.Pull.Incremental
|
||||
}), mock.Anything).Return(expectedErr)
|
||||
|
||||
// Create a wrapper function that calls the provided function
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
return fn(rw, true)
|
||||
}
|
||||
|
||||
legacyMigrator := NewLegacyMigrator(
|
||||
mockLegacyMigrator,
|
||||
mockStorageSwapper,
|
||||
mockWorker,
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
progress.On("ResetResults").Return()
|
||||
|
||||
// Execute
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
|
||||
err := legacyMigrator.Migrate(ctx, repo, provisioning.MigrateJobOptions{}, progress)
|
||||
|
||||
// Assert
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "sync failed")
|
||||
|
||||
// Verify storage settings were reverted
|
||||
mockStorageSwapper.AssertCalled(t, "StopReadingUnifiedStorage", mock.Anything)
|
||||
})
|
||||
|
||||
t.Run("should handle revert failure after sync failure", func(t *testing.T) {
|
||||
// Setup
|
||||
ctx := context.Background()
|
||||
syncErr := errors.New("sync failed")
|
||||
revertErr := errors.New("revert failed")
|
||||
|
||||
mockLegacyMigrator := NewMockLegacyResourcesMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.Anything, "test-namespace", mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
mockStorageSwapper := NewMockStorageSwapper(t)
|
||||
mockStorageSwapper.On("WipeUnifiedAndSetMigratedFlag", mock.Anything, "test-namespace").
|
||||
Return(nil)
|
||||
mockStorageSwapper.On("StopReadingUnifiedStorage", mock.Anything).
|
||||
Return(revertErr)
|
||||
|
||||
mockWorker := jobs.NewMockWorker(t)
|
||||
mockWorker.On("Process", mock.Anything, mock.Anything, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Pull != nil && !job.Spec.Pull.Incremental
|
||||
}), mock.Anything).Return(syncErr)
|
||||
|
||||
// Create a wrapper function that calls the provided function
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
return fn(rw, true)
|
||||
}
|
||||
|
||||
legacyMigrator := NewLegacyMigrator(
|
||||
mockLegacyMigrator,
|
||||
mockStorageSwapper,
|
||||
mockWorker,
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
progress.On("ResetResults").Return()
|
||||
|
||||
// Execute
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
|
||||
err := legacyMigrator.Migrate(ctx, repo, provisioning.MigrateJobOptions{}, progress)
|
||||
|
||||
// Assert
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "sync failed")
|
||||
|
||||
// Verify both errors occurred
|
||||
mockStorageSwapper.AssertCalled(t, "StopReadingUnifiedStorage", mock.Anything)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyMigrator_Success(t *testing.T) {
|
||||
t.Run("should complete migration successfully", func(t *testing.T) {
|
||||
// Setup
|
||||
ctx := context.Background()
|
||||
|
||||
mockLegacyMigrator := NewMockLegacyResourcesMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.Anything, "test-namespace", mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
mockStorageSwapper := NewMockStorageSwapper(t)
|
||||
mockStorageSwapper.On("WipeUnifiedAndSetMigratedFlag", mock.Anything, "test-namespace").
|
||||
Return(nil)
|
||||
|
||||
mockWorker := jobs.NewMockWorker(t)
|
||||
mockWorker.On("Process", mock.Anything, mock.Anything, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Pull != nil && !job.Spec.Pull.Incremental
|
||||
}), mock.Anything).Return(nil)
|
||||
|
||||
// Create a wrapper function that calls the provided function
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
return fn(rw, true)
|
||||
}
|
||||
|
||||
legacyMigrator := NewLegacyMigrator(
|
||||
mockLegacyMigrator,
|
||||
mockStorageSwapper,
|
||||
mockWorker,
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
progress.On("ResetResults").Return()
|
||||
|
||||
// Execute
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
|
||||
err := legacyMigrator.Migrate(ctx, repo, provisioning.MigrateJobOptions{}, progress)
|
||||
|
||||
// Assert
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify all expected operations were called in order
|
||||
mockLegacyMigrator.AssertCalled(t, "Migrate", mock.Anything, mock.Anything, "test-namespace", mock.Anything, mock.Anything)
|
||||
mockStorageSwapper.AssertCalled(t, "WipeUnifiedAndSetMigratedFlag", mock.Anything, "test-namespace")
|
||||
mockWorker.AssertCalled(t, "Process", mock.Anything, mock.Anything, mock.Anything, mock.Anything)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyMigrator_BeforeFnExecution(t *testing.T) {
|
||||
t.Run("should execute beforeFn functions", func(t *testing.T) {
|
||||
// Setup
|
||||
mockLegacyMigrator := NewMockLegacyResourcesMigrator(t)
|
||||
mockStorageSwapper := NewMockStorageSwapper(t)
|
||||
mockWorker := jobs.NewMockWorker(t)
|
||||
// Create a wrapper function that calls the provided function
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
if clone.BeforeFn != nil {
|
||||
if err := clone.BeforeFn(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if push.BeforeFn != nil {
|
||||
if err := push.BeforeFn(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("abort test here")
|
||||
}
|
||||
|
||||
legacyMigrator := NewLegacyMigrator(
|
||||
mockLegacyMigrator,
|
||||
mockStorageSwapper,
|
||||
mockWorker,
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, "clone repository").Return()
|
||||
progress.On("SetMessage", mock.Anything, "push changes").Return()
|
||||
|
||||
// Execute
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
|
||||
err := legacyMigrator.Migrate(context.Background(), repo, provisioning.MigrateJobOptions{}, progress)
|
||||
require.EqualError(t, err, "migrate from SQL: abort test here")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyMigrator_ProgressScanner(t *testing.T) {
|
||||
t.Run("should update progress with scanner", func(t *testing.T) {
|
||||
mockLegacyMigrator := NewMockLegacyResourcesMigrator(t)
|
||||
mockStorageSwapper := NewMockStorageSwapper(t)
|
||||
mockWorker := jobs.NewMockWorker(t)
|
||||
|
||||
// Create a wrapper function that calls the provided function
|
||||
wrapFn := func(ctx context.Context, rw repository.Repository, clone repository.CloneOptions, push repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
if clone.Progress != nil {
|
||||
if _, err := clone.Progress.Write([]byte("clone repository\n")); err != nil {
|
||||
return fmt.Errorf("failed to write to clone progress in tests: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if push.Progress != nil {
|
||||
if _, err := push.Progress.Write([]byte("push changes\n")); err != nil {
|
||||
return fmt.Errorf("failed to write to push progress in tests: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("abort test here")
|
||||
}
|
||||
|
||||
legacyMigrator := NewLegacyMigrator(
|
||||
mockLegacyMigrator,
|
||||
mockStorageSwapper,
|
||||
mockWorker,
|
||||
wrapFn,
|
||||
)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, "clone repository").Return()
|
||||
progress.On("SetMessage", mock.Anything, "push changes").Return()
|
||||
|
||||
repo := repository.NewMockRepository(t)
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
|
||||
err := legacyMigrator.Migrate(context.Background(), repo, provisioning.MigrateJobOptions{}, progress)
|
||||
require.EqualError(t, err, "migrate from SQL: abort test here")
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
if len(progress.Calls) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
return progress.AssertExpectations(t)
|
||||
}, time.Second, 10*time.Millisecond)
|
||||
})
|
||||
}
|
@ -0,0 +1,430 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
metadata "google.golang.org/grpc/metadata"
|
||||
|
||||
resource "github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
)
|
||||
|
||||
// BulkStore_BulkProcessClient is an autogenerated mock type for the BulkStore_BulkProcessClient type
|
||||
type BulkStore_BulkProcessClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type BulkStore_BulkProcessClient_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *BulkStore_BulkProcessClient) EXPECT() *BulkStore_BulkProcessClient_Expecter {
|
||||
return &BulkStore_BulkProcessClient_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// CloseAndRecv provides a mock function with no fields
|
||||
func (_m *BulkStore_BulkProcessClient) CloseAndRecv() (*resource.BulkResponse, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CloseAndRecv")
|
||||
}
|
||||
|
||||
var r0 *resource.BulkResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (*resource.BulkResponse, error)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() *resource.BulkResponse); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*resource.BulkResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BulkStore_BulkProcessClient_CloseAndRecv_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CloseAndRecv'
|
||||
type BulkStore_BulkProcessClient_CloseAndRecv_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// CloseAndRecv is a helper method to define mock.On call
|
||||
func (_e *BulkStore_BulkProcessClient_Expecter) CloseAndRecv() *BulkStore_BulkProcessClient_CloseAndRecv_Call {
|
||||
return &BulkStore_BulkProcessClient_CloseAndRecv_Call{Call: _e.mock.On("CloseAndRecv")}
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_CloseAndRecv_Call) Run(run func()) *BulkStore_BulkProcessClient_CloseAndRecv_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_CloseAndRecv_Call) Return(_a0 *resource.BulkResponse, _a1 error) *BulkStore_BulkProcessClient_CloseAndRecv_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_CloseAndRecv_Call) RunAndReturn(run func() (*resource.BulkResponse, error)) *BulkStore_BulkProcessClient_CloseAndRecv_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// CloseSend provides a mock function with no fields
|
||||
func (_m *BulkStore_BulkProcessClient) CloseSend() error {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CloseSend")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BulkStore_BulkProcessClient_CloseSend_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CloseSend'
|
||||
type BulkStore_BulkProcessClient_CloseSend_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// CloseSend is a helper method to define mock.On call
|
||||
func (_e *BulkStore_BulkProcessClient_Expecter) CloseSend() *BulkStore_BulkProcessClient_CloseSend_Call {
|
||||
return &BulkStore_BulkProcessClient_CloseSend_Call{Call: _e.mock.On("CloseSend")}
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_CloseSend_Call) Run(run func()) *BulkStore_BulkProcessClient_CloseSend_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_CloseSend_Call) Return(_a0 error) *BulkStore_BulkProcessClient_CloseSend_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_CloseSend_Call) RunAndReturn(run func() error) *BulkStore_BulkProcessClient_CloseSend_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Context provides a mock function with no fields
|
||||
func (_m *BulkStore_BulkProcessClient) Context() context.Context {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Context")
|
||||
}
|
||||
|
||||
var r0 context.Context
|
||||
if rf, ok := ret.Get(0).(func() context.Context); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(context.Context)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BulkStore_BulkProcessClient_Context_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Context'
|
||||
type BulkStore_BulkProcessClient_Context_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Context is a helper method to define mock.On call
|
||||
func (_e *BulkStore_BulkProcessClient_Expecter) Context() *BulkStore_BulkProcessClient_Context_Call {
|
||||
return &BulkStore_BulkProcessClient_Context_Call{Call: _e.mock.On("Context")}
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Context_Call) Run(run func()) *BulkStore_BulkProcessClient_Context_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Context_Call) Return(_a0 context.Context) *BulkStore_BulkProcessClient_Context_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Context_Call) RunAndReturn(run func() context.Context) *BulkStore_BulkProcessClient_Context_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Header provides a mock function with no fields
|
||||
func (_m *BulkStore_BulkProcessClient) Header() (metadata.MD, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Header")
|
||||
}
|
||||
|
||||
var r0 metadata.MD
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (metadata.MD, error)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() metadata.MD); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(metadata.MD)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BulkStore_BulkProcessClient_Header_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Header'
|
||||
type BulkStore_BulkProcessClient_Header_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Header is a helper method to define mock.On call
|
||||
func (_e *BulkStore_BulkProcessClient_Expecter) Header() *BulkStore_BulkProcessClient_Header_Call {
|
||||
return &BulkStore_BulkProcessClient_Header_Call{Call: _e.mock.On("Header")}
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Header_Call) Run(run func()) *BulkStore_BulkProcessClient_Header_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Header_Call) Return(_a0 metadata.MD, _a1 error) *BulkStore_BulkProcessClient_Header_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Header_Call) RunAndReturn(run func() (metadata.MD, error)) *BulkStore_BulkProcessClient_Header_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// RecvMsg provides a mock function with given fields: m
|
||||
func (_m *BulkStore_BulkProcessClient) RecvMsg(m interface{}) error {
|
||||
ret := _m.Called(m)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RecvMsg")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(interface{}) error); ok {
|
||||
r0 = rf(m)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BulkStore_BulkProcessClient_RecvMsg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecvMsg'
|
||||
type BulkStore_BulkProcessClient_RecvMsg_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// RecvMsg is a helper method to define mock.On call
|
||||
// - m interface{}
|
||||
func (_e *BulkStore_BulkProcessClient_Expecter) RecvMsg(m interface{}) *BulkStore_BulkProcessClient_RecvMsg_Call {
|
||||
return &BulkStore_BulkProcessClient_RecvMsg_Call{Call: _e.mock.On("RecvMsg", m)}
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_RecvMsg_Call) Run(run func(m interface{})) *BulkStore_BulkProcessClient_RecvMsg_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(interface{}))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_RecvMsg_Call) Return(_a0 error) *BulkStore_BulkProcessClient_RecvMsg_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_RecvMsg_Call) RunAndReturn(run func(interface{}) error) *BulkStore_BulkProcessClient_RecvMsg_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Send provides a mock function with given fields: _a0
|
||||
func (_m *BulkStore_BulkProcessClient) Send(_a0 *resource.BulkRequest) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Send")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*resource.BulkRequest) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BulkStore_BulkProcessClient_Send_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Send'
|
||||
type BulkStore_BulkProcessClient_Send_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Send is a helper method to define mock.On call
|
||||
// - _a0 *resource.BulkRequest
|
||||
func (_e *BulkStore_BulkProcessClient_Expecter) Send(_a0 interface{}) *BulkStore_BulkProcessClient_Send_Call {
|
||||
return &BulkStore_BulkProcessClient_Send_Call{Call: _e.mock.On("Send", _a0)}
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Send_Call) Run(run func(_a0 *resource.BulkRequest)) *BulkStore_BulkProcessClient_Send_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*resource.BulkRequest))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Send_Call) Return(_a0 error) *BulkStore_BulkProcessClient_Send_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Send_Call) RunAndReturn(run func(*resource.BulkRequest) error) *BulkStore_BulkProcessClient_Send_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SendMsg provides a mock function with given fields: m
|
||||
func (_m *BulkStore_BulkProcessClient) SendMsg(m interface{}) error {
|
||||
ret := _m.Called(m)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SendMsg")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(interface{}) error); ok {
|
||||
r0 = rf(m)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BulkStore_BulkProcessClient_SendMsg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendMsg'
|
||||
type BulkStore_BulkProcessClient_SendMsg_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SendMsg is a helper method to define mock.On call
|
||||
// - m interface{}
|
||||
func (_e *BulkStore_BulkProcessClient_Expecter) SendMsg(m interface{}) *BulkStore_BulkProcessClient_SendMsg_Call {
|
||||
return &BulkStore_BulkProcessClient_SendMsg_Call{Call: _e.mock.On("SendMsg", m)}
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_SendMsg_Call) Run(run func(m interface{})) *BulkStore_BulkProcessClient_SendMsg_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(interface{}))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_SendMsg_Call) Return(_a0 error) *BulkStore_BulkProcessClient_SendMsg_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_SendMsg_Call) RunAndReturn(run func(interface{}) error) *BulkStore_BulkProcessClient_SendMsg_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Trailer provides a mock function with no fields
|
||||
func (_m *BulkStore_BulkProcessClient) Trailer() metadata.MD {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Trailer")
|
||||
}
|
||||
|
||||
var r0 metadata.MD
|
||||
if rf, ok := ret.Get(0).(func() metadata.MD); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(metadata.MD)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BulkStore_BulkProcessClient_Trailer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Trailer'
|
||||
type BulkStore_BulkProcessClient_Trailer_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Trailer is a helper method to define mock.On call
|
||||
func (_e *BulkStore_BulkProcessClient_Expecter) Trailer() *BulkStore_BulkProcessClient_Trailer_Call {
|
||||
return &BulkStore_BulkProcessClient_Trailer_Call{Call: _e.mock.On("Trailer")}
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Trailer_Call) Run(run func()) *BulkStore_BulkProcessClient_Trailer_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Trailer_Call) Return(_a0 metadata.MD) *BulkStore_BulkProcessClient_Trailer_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *BulkStore_BulkProcessClient_Trailer_Call) RunAndReturn(run func() metadata.MD) *BulkStore_BulkProcessClient_Trailer_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewBulkStore_BulkProcessClient creates a new instance of BulkStore_BulkProcessClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewBulkStore_BulkProcessClient(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *BulkStore_BulkProcessClient {
|
||||
mock := &BulkStore_BulkProcessClient{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -0,0 +1,113 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
resource "github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
)
|
||||
|
||||
// MockBulkStoreClient is an autogenerated mock type for the BulkStoreClient type
|
||||
type MockBulkStoreClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockBulkStoreClient_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockBulkStoreClient) EXPECT() *MockBulkStoreClient_Expecter {
|
||||
return &MockBulkStoreClient_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// BulkProcess provides a mock function with given fields: ctx, opts
|
||||
func (_m *MockBulkStoreClient) BulkProcess(ctx context.Context, opts ...grpc.CallOption) (resource.BulkStore_BulkProcessClient, error) {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for BulkProcess")
|
||||
}
|
||||
|
||||
var r0 resource.BulkStore_BulkProcessClient
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, ...grpc.CallOption) (resource.BulkStore_BulkProcessClient, error)); ok {
|
||||
return rf(ctx, opts...)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, ...grpc.CallOption) resource.BulkStore_BulkProcessClient); ok {
|
||||
r0 = rf(ctx, opts...)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(resource.BulkStore_BulkProcessClient)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, ...grpc.CallOption) error); ok {
|
||||
r1 = rf(ctx, opts...)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockBulkStoreClient_BulkProcess_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BulkProcess'
|
||||
type MockBulkStoreClient_BulkProcess_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// BulkProcess is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - opts ...grpc.CallOption
|
||||
func (_e *MockBulkStoreClient_Expecter) BulkProcess(ctx interface{}, opts ...interface{}) *MockBulkStoreClient_BulkProcess_Call {
|
||||
return &MockBulkStoreClient_BulkProcess_Call{Call: _e.mock.On("BulkProcess",
|
||||
append([]interface{}{ctx}, opts...)...)}
|
||||
}
|
||||
|
||||
func (_c *MockBulkStoreClient_BulkProcess_Call) Run(run func(ctx context.Context, opts ...grpc.CallOption)) *MockBulkStoreClient_BulkProcess_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
variadicArgs := make([]grpc.CallOption, len(args)-1)
|
||||
for i, a := range args[1:] {
|
||||
if a != nil {
|
||||
variadicArgs[i] = a.(grpc.CallOption)
|
||||
}
|
||||
}
|
||||
run(args[0].(context.Context), variadicArgs...)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBulkStoreClient_BulkProcess_Call) Return(_a0 resource.BulkStore_BulkProcessClient, _a1 error) *MockBulkStoreClient_BulkProcess_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBulkStoreClient_BulkProcess_Call) RunAndReturn(run func(context.Context, ...grpc.CallOption) (resource.BulkStore_BulkProcessClient, error)) *MockBulkStoreClient_BulkProcess_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockBulkStoreClient creates a new instance of MockBulkStoreClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockBulkStoreClient(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockBulkStoreClient {
|
||||
mock := &MockBulkStoreClient{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -0,0 +1,240 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
jobs "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
resource "github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
|
||||
resources "github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
)
|
||||
|
||||
// MockLegacyFoldersMigrator is an autogenerated mock type for the LegacyFoldersMigrator type
|
||||
type MockLegacyFoldersMigrator struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockLegacyFoldersMigrator_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockLegacyFoldersMigrator) EXPECT() *MockLegacyFoldersMigrator_Expecter {
|
||||
return &MockLegacyFoldersMigrator_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// Close provides a mock function with no fields
|
||||
func (_m *MockLegacyFoldersMigrator) Close() error {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Close")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockLegacyFoldersMigrator_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
|
||||
type MockLegacyFoldersMigrator_Close_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Close is a helper method to define mock.On call
|
||||
func (_e *MockLegacyFoldersMigrator_Expecter) Close() *MockLegacyFoldersMigrator_Close_Call {
|
||||
return &MockLegacyFoldersMigrator_Close_Call{Call: _e.mock.On("Close")}
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Close_Call) Run(run func()) *MockLegacyFoldersMigrator_Close_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Close_Call) Return(_a0 error) *MockLegacyFoldersMigrator_Close_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Close_Call) RunAndReturn(run func() error) *MockLegacyFoldersMigrator_Close_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// CloseWithResults provides a mock function with no fields
|
||||
func (_m *MockLegacyFoldersMigrator) CloseWithResults() (*resource.BulkResponse, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CloseWithResults")
|
||||
}
|
||||
|
||||
var r0 *resource.BulkResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (*resource.BulkResponse, error)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() *resource.BulkResponse); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*resource.BulkResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockLegacyFoldersMigrator_CloseWithResults_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CloseWithResults'
|
||||
type MockLegacyFoldersMigrator_CloseWithResults_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// CloseWithResults is a helper method to define mock.On call
|
||||
func (_e *MockLegacyFoldersMigrator_Expecter) CloseWithResults() *MockLegacyFoldersMigrator_CloseWithResults_Call {
|
||||
return &MockLegacyFoldersMigrator_CloseWithResults_Call{Call: _e.mock.On("CloseWithResults")}
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_CloseWithResults_Call) Run(run func()) *MockLegacyFoldersMigrator_CloseWithResults_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_CloseWithResults_Call) Return(_a0 *resource.BulkResponse, _a1 error) *MockLegacyFoldersMigrator_CloseWithResults_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_CloseWithResults_Call) RunAndReturn(run func() (*resource.BulkResponse, error)) *MockLegacyFoldersMigrator_CloseWithResults_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Migrate provides a mock function with given fields: ctx, namespace, repositoryResources, progress
|
||||
func (_m *MockLegacyFoldersMigrator) Migrate(ctx context.Context, namespace string, repositoryResources resources.RepositoryResources, progress jobs.JobProgressRecorder) error {
|
||||
ret := _m.Called(ctx, namespace, repositoryResources, progress)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Migrate")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, resources.RepositoryResources, jobs.JobProgressRecorder) error); ok {
|
||||
r0 = rf(ctx, namespace, repositoryResources, progress)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockLegacyFoldersMigrator_Migrate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Migrate'
|
||||
type MockLegacyFoldersMigrator_Migrate_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Migrate is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - namespace string
|
||||
// - repositoryResources resources.RepositoryResources
|
||||
// - progress jobs.JobProgressRecorder
|
||||
func (_e *MockLegacyFoldersMigrator_Expecter) Migrate(ctx interface{}, namespace interface{}, repositoryResources interface{}, progress interface{}) *MockLegacyFoldersMigrator_Migrate_Call {
|
||||
return &MockLegacyFoldersMigrator_Migrate_Call{Call: _e.mock.On("Migrate", ctx, namespace, repositoryResources, progress)}
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Migrate_Call) Run(run func(ctx context.Context, namespace string, repositoryResources resources.RepositoryResources, progress jobs.JobProgressRecorder)) *MockLegacyFoldersMigrator_Migrate_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string), args[2].(resources.RepositoryResources), args[3].(jobs.JobProgressRecorder))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Migrate_Call) Return(_a0 error) *MockLegacyFoldersMigrator_Migrate_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Migrate_Call) RunAndReturn(run func(context.Context, string, resources.RepositoryResources, jobs.JobProgressRecorder) error) *MockLegacyFoldersMigrator_Migrate_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Write provides a mock function with given fields: ctx, key, value
|
||||
func (_m *MockLegacyFoldersMigrator) Write(ctx context.Context, key *resource.ResourceKey, value []byte) error {
|
||||
ret := _m.Called(ctx, key, value)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Write")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *resource.ResourceKey, []byte) error); ok {
|
||||
r0 = rf(ctx, key, value)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockLegacyFoldersMigrator_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write'
|
||||
type MockLegacyFoldersMigrator_Write_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Write is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - key *resource.ResourceKey
|
||||
// - value []byte
|
||||
func (_e *MockLegacyFoldersMigrator_Expecter) Write(ctx interface{}, key interface{}, value interface{}) *MockLegacyFoldersMigrator_Write_Call {
|
||||
return &MockLegacyFoldersMigrator_Write_Call{Call: _e.mock.On("Write", ctx, key, value)}
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Write_Call) Run(run func(ctx context.Context, key *resource.ResourceKey, value []byte)) *MockLegacyFoldersMigrator_Write_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*resource.ResourceKey), args[2].([]byte))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Write_Call) Return(_a0 error) *MockLegacyFoldersMigrator_Write_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyFoldersMigrator_Write_Call) RunAndReturn(run func(context.Context, *resource.ResourceKey, []byte) error) *MockLegacyFoldersMigrator_Write_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockLegacyFoldersMigrator creates a new instance of MockLegacyFoldersMigrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockLegacyFoldersMigrator(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockLegacyFoldersMigrator {
|
||||
mock := &MockLegacyFoldersMigrator{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -0,0 +1,91 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
jobs "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
repository "github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
|
||||
v0alpha1 "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
)
|
||||
|
||||
// MockLegacyResourcesMigrator is an autogenerated mock type for the LegacyResourcesMigrator type
|
||||
type MockLegacyResourcesMigrator struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockLegacyResourcesMigrator_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockLegacyResourcesMigrator) EXPECT() *MockLegacyResourcesMigrator_Expecter {
|
||||
return &MockLegacyResourcesMigrator_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// Migrate provides a mock function with given fields: ctx, rw, namespace, opts, progress
|
||||
func (_m *MockLegacyResourcesMigrator) Migrate(ctx context.Context, rw repository.ReaderWriter, namespace string, opts v0alpha1.MigrateJobOptions, progress jobs.JobProgressRecorder) error {
|
||||
ret := _m.Called(ctx, rw, namespace, opts, progress)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Migrate")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.ReaderWriter, string, v0alpha1.MigrateJobOptions, jobs.JobProgressRecorder) error); ok {
|
||||
r0 = rf(ctx, rw, namespace, opts, progress)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockLegacyResourcesMigrator_Migrate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Migrate'
|
||||
type MockLegacyResourcesMigrator_Migrate_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Migrate is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - rw repository.ReaderWriter
|
||||
// - namespace string
|
||||
// - opts v0alpha1.MigrateJobOptions
|
||||
// - progress jobs.JobProgressRecorder
|
||||
func (_e *MockLegacyResourcesMigrator_Expecter) Migrate(ctx interface{}, rw interface{}, namespace interface{}, opts interface{}, progress interface{}) *MockLegacyResourcesMigrator_Migrate_Call {
|
||||
return &MockLegacyResourcesMigrator_Migrate_Call{Call: _e.mock.On("Migrate", ctx, rw, namespace, opts, progress)}
|
||||
}
|
||||
|
||||
func (_c *MockLegacyResourcesMigrator_Migrate_Call) Run(run func(ctx context.Context, rw repository.ReaderWriter, namespace string, opts v0alpha1.MigrateJobOptions, progress jobs.JobProgressRecorder)) *MockLegacyResourcesMigrator_Migrate_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(repository.ReaderWriter), args[2].(string), args[3].(v0alpha1.MigrateJobOptions), args[4].(jobs.JobProgressRecorder))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyResourcesMigrator_Migrate_Call) Return(_a0 error) *MockLegacyResourcesMigrator_Migrate_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockLegacyResourcesMigrator_Migrate_Call) RunAndReturn(run func(context.Context, repository.ReaderWriter, string, v0alpha1.MigrateJobOptions, jobs.JobProgressRecorder) error) *MockLegacyResourcesMigrator_Migrate_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockLegacyResourcesMigrator creates a new instance of MockLegacyResourcesMigrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockLegacyResourcesMigrator(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockLegacyResourcesMigrator {
|
||||
mock := &MockLegacyResourcesMigrator{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
90
pkg/registry/apis/provisioning/jobs/migrate/mock_migrator.go
Normal file
90
pkg/registry/apis/provisioning/jobs/migrate/mock_migrator.go
Normal file
@ -0,0 +1,90 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
jobs "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
repository "github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
|
||||
v0alpha1 "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
)
|
||||
|
||||
// MockMigrator is an autogenerated mock type for the Migrator type
|
||||
type MockMigrator struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockMigrator_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockMigrator) EXPECT() *MockMigrator_Expecter {
|
||||
return &MockMigrator_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// Migrate provides a mock function with given fields: ctx, rw, opts, progress
|
||||
func (_m *MockMigrator) Migrate(ctx context.Context, rw repository.ReaderWriter, opts v0alpha1.MigrateJobOptions, progress jobs.JobProgressRecorder) error {
|
||||
ret := _m.Called(ctx, rw, opts, progress)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Migrate")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.ReaderWriter, v0alpha1.MigrateJobOptions, jobs.JobProgressRecorder) error); ok {
|
||||
r0 = rf(ctx, rw, opts, progress)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockMigrator_Migrate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Migrate'
|
||||
type MockMigrator_Migrate_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Migrate is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - rw repository.ReaderWriter
|
||||
// - opts v0alpha1.MigrateJobOptions
|
||||
// - progress jobs.JobProgressRecorder
|
||||
func (_e *MockMigrator_Expecter) Migrate(ctx interface{}, rw interface{}, opts interface{}, progress interface{}) *MockMigrator_Migrate_Call {
|
||||
return &MockMigrator_Migrate_Call{Call: _e.mock.On("Migrate", ctx, rw, opts, progress)}
|
||||
}
|
||||
|
||||
func (_c *MockMigrator_Migrate_Call) Run(run func(ctx context.Context, rw repository.ReaderWriter, opts v0alpha1.MigrateJobOptions, progress jobs.JobProgressRecorder)) *MockMigrator_Migrate_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(repository.ReaderWriter), args[2].(v0alpha1.MigrateJobOptions), args[3].(jobs.JobProgressRecorder))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockMigrator_Migrate_Call) Return(_a0 error) *MockMigrator_Migrate_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockMigrator_Migrate_Call) RunAndReturn(run func(context.Context, repository.ReaderWriter, v0alpha1.MigrateJobOptions, jobs.JobProgressRecorder) error) *MockMigrator_Migrate_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockMigrator creates a new instance of MockMigrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockMigrator(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockMigrator {
|
||||
mock := &MockMigrator{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
jobs "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockNamespaceCleaner is an autogenerated mock type for the NamespaceCleaner type
|
||||
type MockNamespaceCleaner struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockNamespaceCleaner_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockNamespaceCleaner) EXPECT() *MockNamespaceCleaner_Expecter {
|
||||
return &MockNamespaceCleaner_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// Clean provides a mock function with given fields: ctx, namespace, progress
|
||||
func (_m *MockNamespaceCleaner) Clean(ctx context.Context, namespace string, progress jobs.JobProgressRecorder) error {
|
||||
ret := _m.Called(ctx, namespace, progress)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Clean")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, jobs.JobProgressRecorder) error); ok {
|
||||
r0 = rf(ctx, namespace, progress)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNamespaceCleaner_Clean_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Clean'
|
||||
type MockNamespaceCleaner_Clean_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Clean is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - namespace string
|
||||
// - progress jobs.JobProgressRecorder
|
||||
func (_e *MockNamespaceCleaner_Expecter) Clean(ctx interface{}, namespace interface{}, progress interface{}) *MockNamespaceCleaner_Clean_Call {
|
||||
return &MockNamespaceCleaner_Clean_Call{Call: _e.mock.On("Clean", ctx, namespace, progress)}
|
||||
}
|
||||
|
||||
func (_c *MockNamespaceCleaner_Clean_Call) Run(run func(ctx context.Context, namespace string, progress jobs.JobProgressRecorder)) *MockNamespaceCleaner_Clean_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string), args[2].(jobs.JobProgressRecorder))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNamespaceCleaner_Clean_Call) Return(_a0 error) *MockNamespaceCleaner_Clean_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNamespaceCleaner_Clean_Call) RunAndReturn(run func(context.Context, string, jobs.JobProgressRecorder) error) *MockNamespaceCleaner_Clean_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockNamespaceCleaner creates a new instance of MockNamespaceCleaner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockNamespaceCleaner(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockNamespaceCleaner {
|
||||
mock := &MockNamespaceCleaner{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -0,0 +1,129 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockStorageSwapper is an autogenerated mock type for the StorageSwapper type
|
||||
type MockStorageSwapper struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockStorageSwapper_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockStorageSwapper) EXPECT() *MockStorageSwapper_Expecter {
|
||||
return &MockStorageSwapper_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// StopReadingUnifiedStorage provides a mock function with given fields: ctx
|
||||
func (_m *MockStorageSwapper) StopReadingUnifiedStorage(ctx context.Context) error {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for StopReadingUnifiedStorage")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockStorageSwapper_StopReadingUnifiedStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StopReadingUnifiedStorage'
|
||||
type MockStorageSwapper_StopReadingUnifiedStorage_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// StopReadingUnifiedStorage is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
func (_e *MockStorageSwapper_Expecter) StopReadingUnifiedStorage(ctx interface{}) *MockStorageSwapper_StopReadingUnifiedStorage_Call {
|
||||
return &MockStorageSwapper_StopReadingUnifiedStorage_Call{Call: _e.mock.On("StopReadingUnifiedStorage", ctx)}
|
||||
}
|
||||
|
||||
func (_c *MockStorageSwapper_StopReadingUnifiedStorage_Call) Run(run func(ctx context.Context)) *MockStorageSwapper_StopReadingUnifiedStorage_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStorageSwapper_StopReadingUnifiedStorage_Call) Return(_a0 error) *MockStorageSwapper_StopReadingUnifiedStorage_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStorageSwapper_StopReadingUnifiedStorage_Call) RunAndReturn(run func(context.Context) error) *MockStorageSwapper_StopReadingUnifiedStorage_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// WipeUnifiedAndSetMigratedFlag provides a mock function with given fields: ctx, namespace
|
||||
func (_m *MockStorageSwapper) WipeUnifiedAndSetMigratedFlag(ctx context.Context, namespace string) error {
|
||||
ret := _m.Called(ctx, namespace)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for WipeUnifiedAndSetMigratedFlag")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, namespace)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WipeUnifiedAndSetMigratedFlag'
|
||||
type MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// WipeUnifiedAndSetMigratedFlag is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - namespace string
|
||||
func (_e *MockStorageSwapper_Expecter) WipeUnifiedAndSetMigratedFlag(ctx interface{}, namespace interface{}) *MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call {
|
||||
return &MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call{Call: _e.mock.On("WipeUnifiedAndSetMigratedFlag", ctx, namespace)}
|
||||
}
|
||||
|
||||
func (_c *MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call) Run(run func(ctx context.Context, namespace string)) *MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call) Return(_a0 error) *MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call) RunAndReturn(run func(context.Context, string) error) *MockStorageSwapper_WipeUnifiedAndSetMigratedFlag_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockStorageSwapper creates a new instance of MockStorageSwapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockStorageSwapper(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockStorageSwapper {
|
||||
mock := &MockStorageSwapper{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -0,0 +1,87 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
repository "github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockWrapWithCloneFn is an autogenerated mock type for the WrapWithCloneFn type
|
||||
type MockWrapWithCloneFn struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockWrapWithCloneFn_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockWrapWithCloneFn) EXPECT() *MockWrapWithCloneFn_Expecter {
|
||||
return &MockWrapWithCloneFn_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// Execute provides a mock function with given fields: ctx, repo, cloneOptions, pushOptions, fn
|
||||
func (_m *MockWrapWithCloneFn) Execute(ctx context.Context, repo repository.Repository, cloneOptions repository.CloneOptions, pushOptions repository.PushOptions, fn func(repository.Repository, bool) error) error {
|
||||
ret := _m.Called(ctx, repo, cloneOptions, pushOptions, fn)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Execute")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.Repository, repository.CloneOptions, repository.PushOptions, func(repository.Repository, bool) error) error); ok {
|
||||
r0 = rf(ctx, repo, cloneOptions, pushOptions, fn)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockWrapWithCloneFn_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute'
|
||||
type MockWrapWithCloneFn_Execute_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Execute is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - repo repository.Repository
|
||||
// - cloneOptions repository.CloneOptions
|
||||
// - pushOptions repository.PushOptions
|
||||
// - fn func(repository.Repository , bool) error
|
||||
func (_e *MockWrapWithCloneFn_Expecter) Execute(ctx interface{}, repo interface{}, cloneOptions interface{}, pushOptions interface{}, fn interface{}) *MockWrapWithCloneFn_Execute_Call {
|
||||
return &MockWrapWithCloneFn_Execute_Call{Call: _e.mock.On("Execute", ctx, repo, cloneOptions, pushOptions, fn)}
|
||||
}
|
||||
|
||||
func (_c *MockWrapWithCloneFn_Execute_Call) Run(run func(ctx context.Context, repo repository.Repository, cloneOptions repository.CloneOptions, pushOptions repository.PushOptions, fn func(repository.Repository, bool) error)) *MockWrapWithCloneFn_Execute_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(repository.Repository), args[2].(repository.CloneOptions), args[3].(repository.PushOptions), args[4].(func(repository.Repository, bool) error))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockWrapWithCloneFn_Execute_Call) Return(_a0 error) *MockWrapWithCloneFn_Execute_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockWrapWithCloneFn_Execute_Call) RunAndReturn(run func(context.Context, repository.Repository, repository.CloneOptions, repository.PushOptions, func(repository.Repository, bool) error) error) *MockWrapWithCloneFn_Execute_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockWrapWithCloneFn creates a new instance of MockWrapWithCloneFn. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockWrapWithCloneFn(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockWrapWithCloneFn {
|
||||
mock := &MockWrapWithCloneFn{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -18,7 +18,67 @@ import (
|
||||
|
||||
var _ resource.BulkResourceWriter = (*legacyResourceResourceMigrator)(nil)
|
||||
|
||||
// TODO: can we use the same migrator for folders?
|
||||
//go:generate mockery --name LegacyResourcesMigrator --structname MockLegacyResourcesMigrator --inpackage --filename mock_legacy_resources_migrator.go --with-expecter
|
||||
type LegacyResourcesMigrator interface {
|
||||
Migrate(ctx context.Context, rw repository.ReaderWriter, namespace string, opts provisioning.MigrateJobOptions, progress jobs.JobProgressRecorder) error
|
||||
}
|
||||
|
||||
type legacyResourcesMigrator struct {
|
||||
repositoryResources resources.RepositoryResourcesFactory
|
||||
parsers resources.ParserFactory
|
||||
legacyMigrator legacy.LegacyMigrator
|
||||
folderMigrator LegacyFoldersMigrator
|
||||
}
|
||||
|
||||
func NewLegacyResourcesMigrator(
|
||||
repositoryResources resources.RepositoryResourcesFactory,
|
||||
parsers resources.ParserFactory,
|
||||
legacyMigrator legacy.LegacyMigrator,
|
||||
folderMigrator LegacyFoldersMigrator,
|
||||
) LegacyResourcesMigrator {
|
||||
return &legacyResourcesMigrator{
|
||||
repositoryResources: repositoryResources,
|
||||
parsers: parsers,
|
||||
legacyMigrator: legacyMigrator,
|
||||
folderMigrator: folderMigrator,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *legacyResourcesMigrator) Migrate(ctx context.Context, rw repository.ReaderWriter, namespace string, opts provisioning.MigrateJobOptions, progress jobs.JobProgressRecorder) error {
|
||||
parser, err := m.parsers.GetParser(ctx, rw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get parser: %w", err)
|
||||
}
|
||||
|
||||
repoOpts := resources.RepositoryResourcesOptions{
|
||||
PreloadAllUserInfo: opts.History,
|
||||
}
|
||||
|
||||
repositoryResources, err := m.repositoryResources.Client(ctx, rw, repoOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get repository resources: %w", err)
|
||||
}
|
||||
|
||||
progress.SetMessage(ctx, "migrate folders from SQL")
|
||||
if err := m.folderMigrator.Migrate(ctx, namespace, repositoryResources, progress); err != nil {
|
||||
return fmt.Errorf("migrate folders from SQL: %w", err)
|
||||
}
|
||||
|
||||
progress.SetMessage(ctx, "migrate resources from SQL")
|
||||
for _, kind := range resources.SupportedProvisioningResources {
|
||||
if kind == resources.FolderResource {
|
||||
continue
|
||||
}
|
||||
|
||||
reader := NewLegacyResourceMigrator(m.legacyMigrator, parser, repositoryResources, progress, opts, namespace, kind.GroupResource())
|
||||
if err := reader.Migrate(ctx); err != nil {
|
||||
return fmt.Errorf("migrate resource %s: %w", kind, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type legacyResourceResourceMigrator struct {
|
||||
legacy legacy.LegacyMigrator
|
||||
parser resources.Parser
|
||||
@ -26,10 +86,10 @@ type legacyResourceResourceMigrator struct {
|
||||
namespace string
|
||||
kind schema.GroupResource
|
||||
options provisioning.MigrateJobOptions
|
||||
resources *resources.ResourcesManager
|
||||
resources resources.RepositoryResources
|
||||
}
|
||||
|
||||
func NewLegacyResourceMigrator(legacy legacy.LegacyMigrator, parser resources.Parser, resources *resources.ResourcesManager, progress jobs.JobProgressRecorder, options provisioning.MigrateJobOptions, namespace string, kind schema.GroupResource) *legacyResourceResourceMigrator {
|
||||
func NewLegacyResourceMigrator(legacy legacy.LegacyMigrator, parser resources.Parser, resources resources.RepositoryResources, progress jobs.JobProgressRecorder, options provisioning.MigrateJobOptions, namespace string, kind schema.GroupResource) *legacyResourceResourceMigrator {
|
||||
return &legacyResourceResourceMigrator{
|
||||
legacy: legacy,
|
||||
parser: parser,
|
||||
@ -59,7 +119,7 @@ func (r *legacyResourceResourceMigrator) Write(ctx context.Context, key *resourc
|
||||
Data: value,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal unstructured: %w", err)
|
||||
return fmt.Errorf("unmarshal unstructured: %w", err)
|
||||
}
|
||||
|
||||
// clear anything so it will get written
|
||||
@ -117,7 +177,7 @@ func (r *legacyResourceResourceMigrator) Migrate(ctx context.Context) error {
|
||||
opts.OnlyCount = false // this time actually write
|
||||
_, err = r.legacy.Migrate(ctx, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running legacy migrate (%s) %w", r.kind.Resource, err)
|
||||
return fmt.Errorf("migrate legacy %s: %w", r.kind.Resource, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
578
pkg/registry/apis/provisioning/jobs/migrate/resources_test.go
Normal file
578
pkg/registry/apis/provisioning/jobs/migrate/resources_test.go
Normal file
@ -0,0 +1,578 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
)
|
||||
|
||||
func TestLegacyResourcesMigrator_Migrate(t *testing.T) {
|
||||
t.Run("should fail when parser factory fails", func(t *testing.T) {
|
||||
mockParserFactory := resources.NewMockParserFactory(t)
|
||||
mockParserFactory.On("GetParser", mock.Anything, mock.Anything).
|
||||
Return(nil, errors.New("parser factory error"))
|
||||
|
||||
migrator := NewLegacyResourcesMigrator(
|
||||
nil,
|
||||
mockParserFactory,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background(), nil, "test-namespace", provisioning.MigrateJobOptions{}, jobs.NewMockJobProgressRecorder(t))
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, "get parser: parser factory error")
|
||||
|
||||
mockParserFactory.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should fail when repository resources factory fails", func(t *testing.T) {
|
||||
mockParserFactory := resources.NewMockParserFactory(t)
|
||||
mockParserFactory.On("GetParser", mock.Anything, mock.Anything).
|
||||
Return(resources.NewMockParser(t), nil)
|
||||
|
||||
mockRepoResourcesFactory := resources.NewMockRepositoryResourcesFactory(t)
|
||||
mockRepoResourcesFactory.On("Client", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(nil, errors.New("repo resources factory error"))
|
||||
|
||||
migrator := NewLegacyResourcesMigrator(
|
||||
mockRepoResourcesFactory,
|
||||
mockParserFactory,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background(), nil, "test-namespace", provisioning.MigrateJobOptions{}, jobs.NewMockJobProgressRecorder(t))
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, "get repository resources: repo resources factory error")
|
||||
|
||||
mockParserFactory.AssertExpectations(t)
|
||||
mockRepoResourcesFactory.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should fail when folder migrator fails", func(t *testing.T) {
|
||||
mockParserFactory := resources.NewMockParserFactory(t)
|
||||
mockParserFactory.On("GetParser", mock.Anything, mock.Anything).
|
||||
Return(resources.NewMockParser(t), nil)
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResourcesFactory := resources.NewMockRepositoryResourcesFactory(t)
|
||||
mockRepoResourcesFactory.On("Client", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(mockRepoResources, nil)
|
||||
|
||||
mockFolderMigrator := NewMockLegacyFoldersMigrator(t)
|
||||
mockFolderMigrator.On("Migrate", mock.Anything, "test-namespace", mockRepoResources, mock.Anything).
|
||||
Return(errors.New("folder migrator error"))
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
|
||||
migrator := NewLegacyResourcesMigrator(
|
||||
mockRepoResourcesFactory,
|
||||
mockParserFactory,
|
||||
nil,
|
||||
mockFolderMigrator,
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background(), nil, "test-namespace", provisioning.MigrateJobOptions{}, progress)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "migrate folders from SQL")
|
||||
|
||||
mockParserFactory.AssertExpectations(t)
|
||||
mockRepoResourcesFactory.AssertExpectations(t)
|
||||
mockFolderMigrator.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should fail when resource migration fails", func(t *testing.T) {
|
||||
mockParserFactory := resources.NewMockParserFactory(t)
|
||||
mockParserFactory.On("GetParser", mock.Anything, mock.Anything).
|
||||
Return(resources.NewMockParser(t), nil)
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResourcesFactory := resources.NewMockRepositoryResourcesFactory(t)
|
||||
mockRepoResourcesFactory.On("Client", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(mockRepoResources, nil)
|
||||
|
||||
mockFolderMigrator := NewMockLegacyFoldersMigrator(t)
|
||||
mockFolderMigrator.On("Migrate", mock.Anything, "test-namespace", mockRepoResources, mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, errors.New("legacy migrator error"))
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
|
||||
migrator := NewLegacyResourcesMigrator(
|
||||
mockRepoResourcesFactory,
|
||||
mockParserFactory,
|
||||
mockLegacyMigrator,
|
||||
mockFolderMigrator,
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background(), nil, "test-namespace", provisioning.MigrateJobOptions{}, progress)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "migrate resource")
|
||||
|
||||
mockParserFactory.AssertExpectations(t)
|
||||
mockRepoResourcesFactory.AssertExpectations(t)
|
||||
mockFolderMigrator.AssertExpectations(t)
|
||||
mockLegacyMigrator.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should successfully migrate all resources", func(t *testing.T) {
|
||||
mockParser := resources.NewMockParser(t)
|
||||
mockParserFactory := resources.NewMockParserFactory(t)
|
||||
mockParserFactory.On("GetParser", mock.Anything, mock.Anything).
|
||||
Return(mockParser, nil)
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResourcesFactory := resources.NewMockRepositoryResourcesFactory(t)
|
||||
mockRepoResourcesFactory.On("Client", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(mockRepoResources, nil)
|
||||
|
||||
mockFolderMigrator := NewMockLegacyFoldersMigrator(t)
|
||||
mockFolderMigrator.On("Migrate", mock.Anything, "test-namespace", mockRepoResources, mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, nil).Once() // Count phase
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return !opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{
|
||||
Summary: []*resource.BulkResponse_Summary{
|
||||
{
|
||||
Group: "test.grafana.app",
|
||||
Resource: "tests",
|
||||
Count: 10,
|
||||
History: 5,
|
||||
},
|
||||
},
|
||||
}, nil).Once() // Migration phase
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, "migrate folders from SQL").Return()
|
||||
progress.On("SetMessage", mock.Anything, "migrate resources from SQL").Return()
|
||||
progress.On("SetMessage", mock.Anything, "migrate dashboards resource").Return()
|
||||
|
||||
migrator := NewLegacyResourcesMigrator(
|
||||
mockRepoResourcesFactory,
|
||||
mockParserFactory,
|
||||
mockLegacyMigrator,
|
||||
mockFolderMigrator,
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background(), nil, "test-namespace", provisioning.MigrateJobOptions{}, progress)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockParserFactory.AssertExpectations(t)
|
||||
mockRepoResourcesFactory.AssertExpectations(t)
|
||||
mockFolderMigrator.AssertExpectations(t)
|
||||
mockLegacyMigrator.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyResourceResourceMigrator_Write(t *testing.T) {
|
||||
t.Run("should fail when parser fails", func(t *testing.T) {
|
||||
mockParser := resources.NewMockParser(t)
|
||||
mockParser.On("Parse", mock.Anything, mock.Anything).
|
||||
Return(nil, errors.New("parser error"))
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
nil,
|
||||
mockParser,
|
||||
nil,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "tests"},
|
||||
)
|
||||
|
||||
err := migrator.Write(context.Background(), &resource.ResourceKey{}, []byte("test"))
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "unmarshal unstructured")
|
||||
|
||||
mockParser.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("records error when create resource file fails", func(t *testing.T) {
|
||||
mockParser := resources.NewMockParser(t)
|
||||
obj := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
meta, err := utils.MetaAccessor(obj)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockParser.On("Parse", mock.Anything, mock.Anything).
|
||||
Return(&resources.ParsedResource{
|
||||
Meta: meta,
|
||||
Obj: obj,
|
||||
}, nil)
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("CreateResourceFileFromObject", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return("", errors.New("create file error"))
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionCreated &&
|
||||
result.Name == "test" &&
|
||||
result.Error != nil &&
|
||||
result.Error.Error() == "create file error"
|
||||
})).Return()
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
nil,
|
||||
mockParser,
|
||||
mockRepoResources,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "tests"},
|
||||
)
|
||||
|
||||
err = migrator.Write(context.Background(), &resource.ResourceKey{}, []byte("test"))
|
||||
require.NoError(t, err) // Error is recorded but not returned
|
||||
|
||||
mockParser.AssertExpectations(t)
|
||||
mockRepoResources.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should successfully write resource", func(t *testing.T) {
|
||||
mockParser := resources.NewMockParser(t)
|
||||
obj := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
meta, err := utils.MetaAccessor(obj)
|
||||
require.NoError(t, err)
|
||||
meta.SetManagerProperties(utils.ManagerProperties{
|
||||
Kind: utils.ManagerKindRepo,
|
||||
Identity: "test",
|
||||
AllowsEdits: true,
|
||||
Suspended: false,
|
||||
})
|
||||
meta.SetSourceProperties(utils.SourceProperties{
|
||||
Path: "test",
|
||||
Checksum: "test",
|
||||
TimestampMillis: 1234567890,
|
||||
})
|
||||
|
||||
mockParser.On("Parse", mock.Anything, mock.MatchedBy(func(info *repository.FileInfo) bool {
|
||||
return info != nil && info.Path == "" && string(info.Data) == "test"
|
||||
})).
|
||||
Return(&resources.ParsedResource{
|
||||
Meta: meta,
|
||||
Obj: obj,
|
||||
}, nil)
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("CreateResourceFileFromObject", mock.Anything, mock.MatchedBy(func(obj *unstructured.Unstructured) bool {
|
||||
if obj == nil {
|
||||
return false
|
||||
}
|
||||
if obj.GetName() != "test" {
|
||||
return false
|
||||
}
|
||||
|
||||
meta, err := utils.MetaAccessor(obj)
|
||||
require.NoError(t, err)
|
||||
managerProps, _ := meta.GetManagerProperties()
|
||||
sourceProps, _ := meta.GetSourceProperties()
|
||||
|
||||
return assert.Zero(t, sourceProps) && assert.Zero(t, managerProps)
|
||||
}), resources.WriteOptions{
|
||||
Path: "",
|
||||
Ref: "",
|
||||
}).
|
||||
Return("test/path", nil)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionCreated &&
|
||||
result.Name == "test" &&
|
||||
result.Error == nil &&
|
||||
result.Resource == "tests" &&
|
||||
result.Group == "test.grafana.app" &&
|
||||
result.Path == "test/path"
|
||||
})).Return()
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
nil,
|
||||
mockParser,
|
||||
mockRepoResources,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "tests"},
|
||||
)
|
||||
|
||||
err = migrator.Write(context.Background(), &resource.ResourceKey{}, []byte("test"))
|
||||
require.NoError(t, err)
|
||||
|
||||
mockParser.AssertExpectations(t)
|
||||
mockRepoResources.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should fail when too many errors", func(t *testing.T) {
|
||||
mockParser := resources.NewMockParser(t)
|
||||
obj := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
meta, err := utils.MetaAccessor(obj)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockParser.On("Parse", mock.Anything, mock.Anything).
|
||||
Return(&resources.ParsedResource{
|
||||
Meta: meta,
|
||||
Obj: obj,
|
||||
}, nil)
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("CreateResourceFileFromObject", mock.Anything, mock.Anything, resources.WriteOptions{}).
|
||||
Return("test/path", nil)
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("Record", mock.Anything, mock.Anything).Return()
|
||||
progress.On("TooManyErrors").Return(errors.New("too many errors"))
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
nil,
|
||||
mockParser,
|
||||
mockRepoResources,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "tests"},
|
||||
)
|
||||
|
||||
err = migrator.Write(context.Background(), &resource.ResourceKey{}, []byte("test"))
|
||||
require.EqualError(t, err, "too many errors")
|
||||
|
||||
mockParser.AssertExpectations(t)
|
||||
mockRepoResources.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyResourceResourceMigrator_Migrate(t *testing.T) {
|
||||
t.Run("should fail when legacy migrate count fails", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, errors.New("count error"))
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
mockLegacyMigrator,
|
||||
nil,
|
||||
nil,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "tests"},
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background())
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "unable to count legacy items")
|
||||
|
||||
mockLegacyMigrator.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should fail when legacy migrate write fails", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, nil).Once() // Count phase
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return !opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, errors.New("write error")).Once() // Write phase
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
mockLegacyMigrator,
|
||||
nil,
|
||||
nil,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "test-resources"},
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background())
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "migrate legacy test-resources: write error")
|
||||
|
||||
mockLegacyMigrator.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("should successfully migrate resource", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, nil).Once() // Count phase
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return !opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, nil).Once() // Write phase
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
mockLegacyMigrator,
|
||||
nil,
|
||||
nil,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "tests"},
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
mockLegacyMigrator.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
t.Run("should set total to history if history is greater than count", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{
|
||||
Summary: []*resource.BulkResponse_Summary{
|
||||
{
|
||||
Group: "test.grafana.app",
|
||||
Resource: "tests",
|
||||
Count: 1,
|
||||
History: 100,
|
||||
},
|
||||
},
|
||||
}, nil).Once() // Count phase
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return !opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, nil).Once() // Write phase
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
progress.On("SetTotal", mock.Anything, 100).Return()
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
mockLegacyMigrator,
|
||||
nil,
|
||||
nil,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "tests"},
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
mockLegacyMigrator.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
t.Run("should set total to count if history is less than count", func(t *testing.T) {
|
||||
mockLegacyMigrator := legacy.NewMockLegacyMigrator(t)
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{
|
||||
Summary: []*resource.BulkResponse_Summary{
|
||||
{
|
||||
Group: "test.grafana.app",
|
||||
Resource: "tests",
|
||||
Count: 200,
|
||||
History: 1,
|
||||
},
|
||||
},
|
||||
}, nil).Once() // Count phase
|
||||
mockLegacyMigrator.On("Migrate", mock.Anything, mock.MatchedBy(func(opts legacy.MigrateOptions) bool {
|
||||
return !opts.OnlyCount && opts.Namespace == "test-namespace"
|
||||
})).Return(&resource.BulkResponse{}, nil).Once() // Write phase
|
||||
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
progress.On("SetMessage", mock.Anything, mock.Anything).Return()
|
||||
progress.On("SetTotal", mock.Anything, 200).Return()
|
||||
|
||||
migrator := NewLegacyResourceMigrator(
|
||||
mockLegacyMigrator,
|
||||
nil,
|
||||
nil,
|
||||
progress,
|
||||
provisioning.MigrateJobOptions{},
|
||||
"test-namespace",
|
||||
schema.GroupResource{Group: "test.grafana.app", Resource: "tests"},
|
||||
)
|
||||
|
||||
err := migrator.Migrate(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
mockLegacyMigrator.AssertExpectations(t)
|
||||
progress.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyResourceResourceMigrator_Close(t *testing.T) {
|
||||
t.Run("should return nil error", func(t *testing.T) {
|
||||
migrator := &legacyResourceResourceMigrator{}
|
||||
err := migrator.Close()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyResourceResourceMigrator_CloseWithResults(t *testing.T) {
|
||||
t.Run("should return empty bulk response and nil error", func(t *testing.T) {
|
||||
migrator := &legacyResourceResourceMigrator{}
|
||||
response, err := migrator.CloseWithResults()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, response)
|
||||
require.IsType(t, &resource.BulkResponse{}, response)
|
||||
require.Empty(t, response.Summary)
|
||||
})
|
||||
}
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
@ -13,13 +14,40 @@ import (
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
)
|
||||
|
||||
func stopReadingUnifiedStorage(ctx context.Context, dual dualwrite.Service) error {
|
||||
//go:generate mockery --name BulkStoreClient --structname MockBulkStoreClient --inpackage --filename mock_bulk_store_client.go --with-expecter
|
||||
//go:generate mockery --name=BulkStore_BulkProcessClient --srcpkg=github.com/grafana/grafana/pkg/storage/unified/resource --output=. --outpkg=migrate --filename=mock_bulk_process_client.go --with-expecter
|
||||
type BulkStoreClient interface {
|
||||
BulkProcess(ctx context.Context, opts ...grpc.CallOption) (resource.BulkStore_BulkProcessClient, error)
|
||||
}
|
||||
|
||||
//go:generate mockery --name StorageSwapper --structname MockStorageSwapper --inpackage --filename mock_storage_swapper.go --with-expecter
|
||||
type StorageSwapper interface {
|
||||
StopReadingUnifiedStorage(ctx context.Context) error
|
||||
WipeUnifiedAndSetMigratedFlag(ctx context.Context, namespace string) error
|
||||
}
|
||||
|
||||
type storageSwapper struct {
|
||||
// Direct access to unified storage... use carefully!
|
||||
bulk BulkStoreClient
|
||||
dual dualwrite.Service
|
||||
}
|
||||
|
||||
func NewStorageSwapper(bulk BulkStoreClient, dual dualwrite.Service) StorageSwapper {
|
||||
return &storageSwapper{
|
||||
bulk: bulk,
|
||||
dual: dual,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *storageSwapper) StopReadingUnifiedStorage(ctx context.Context) error {
|
||||
// FIXME: dual writer is not namespaced which means that we would consider all namespaces migrated
|
||||
// after one migrates
|
||||
for _, gr := range resources.SupportedProvisioningResources {
|
||||
status, _ := dual.Status(ctx, gr.GroupResource())
|
||||
status, _ := s.dual.Status(ctx, gr.GroupResource())
|
||||
status.ReadUnified = false
|
||||
status.Migrated = 0
|
||||
status.Migrating = 0
|
||||
_, err := dual.Update(ctx, status)
|
||||
_, err := s.dual.Update(ctx, status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -28,9 +56,9 @@ func stopReadingUnifiedStorage(ctx context.Context, dual dualwrite.Service) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func wipeUnifiedAndSetMigratedFlag(ctx context.Context, dual dualwrite.Service, namespace string, batch resource.BulkStoreClient) error {
|
||||
func (s *storageSwapper) WipeUnifiedAndSetMigratedFlag(ctx context.Context, namespace string) error {
|
||||
for _, gr := range resources.SupportedProvisioningResources {
|
||||
status, _ := dual.Status(ctx, gr.GroupResource())
|
||||
status, _ := s.dual.Status(ctx, gr.GroupResource())
|
||||
if status.ReadUnified {
|
||||
return fmt.Errorf("unexpected state - already using unified storage for: %s", gr)
|
||||
}
|
||||
@ -48,7 +76,7 @@ func wipeUnifiedAndSetMigratedFlag(ctx context.Context, dual dualwrite.Service,
|
||||
}},
|
||||
}
|
||||
ctx = metadata.NewOutgoingContext(ctx, settings.ToMD())
|
||||
stream, err := batch.BulkProcess(ctx)
|
||||
stream, err := s.bulk.BulkProcess(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error clearing unified %s / %w", gr, err)
|
||||
}
|
||||
@ -57,12 +85,12 @@ func wipeUnifiedAndSetMigratedFlag(ctx context.Context, dual dualwrite.Service,
|
||||
return fmt.Errorf("error clearing unified %s / %w", gr, err)
|
||||
}
|
||||
logger := logging.FromContext(ctx)
|
||||
logger.Error("cleared unified stoage", "stats", stats)
|
||||
logger.Error("cleared unified storage", "stats", stats)
|
||||
|
||||
status.Migrated = time.Now().UnixMilli() // but not really... since the sync is starting
|
||||
status.ReadUnified = true
|
||||
status.WriteLegacy = false // keep legacy "clean"
|
||||
_, err = dual.Update(ctx, status)
|
||||
_, err = s.dual.Update(ctx, status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
202
pkg/registry/apis/provisioning/jobs/migrate/storage_test.go
Normal file
202
pkg/registry/apis/provisioning/jobs/migrate/storage_test.go
Normal file
@ -0,0 +1,202 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
"github.com/grafana/grafana/pkg/storage/legacysql/dualwrite"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func TestStorageSwapper_StopReadingUnifiedStorage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupMocks func(*MockBulkStoreClient, *dualwrite.MockService)
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "should update status for all resources",
|
||||
setupMocks: func(bulk *MockBulkStoreClient, dual *dualwrite.MockService) {
|
||||
for _, gr := range resources.SupportedProvisioningResources {
|
||||
status := dualwrite.StorageStatus{
|
||||
ReadUnified: true,
|
||||
Migrated: 123,
|
||||
Migrating: 456,
|
||||
}
|
||||
dual.On("Status", mock.Anything, gr.GroupResource()).Return(status, nil)
|
||||
dual.On("Update", mock.Anything, mock.MatchedBy(func(status dualwrite.StorageStatus) bool {
|
||||
return !status.ReadUnified && status.Migrated == 0 && status.Migrating == 0
|
||||
})).Return(dualwrite.StorageStatus{}, nil)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should fail if status update fails",
|
||||
setupMocks: func(bulk *MockBulkStoreClient, dual *dualwrite.MockService) {
|
||||
gr := resources.SupportedProvisioningResources[0]
|
||||
dual.On("Status", mock.Anything, gr.GroupResource()).Return(dualwrite.StorageStatus{}, nil)
|
||||
dual.On("Update", mock.Anything, mock.Anything).Return(dualwrite.StorageStatus{}, errors.New("update failed"))
|
||||
},
|
||||
expectedError: "update failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
bulk := NewMockBulkStoreClient(t)
|
||||
dual := dualwrite.NewMockService(t)
|
||||
|
||||
if tt.setupMocks != nil {
|
||||
tt.setupMocks(bulk, dual)
|
||||
}
|
||||
|
||||
swapper := NewStorageSwapper(bulk, dual)
|
||||
err := swapper.StopReadingUnifiedStorage(context.Background())
|
||||
|
||||
if tt.expectedError != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tt.expectedError)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageSwapper_WipeUnifiedAndSetMigratedFlag(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupMocks func(*MockBulkStoreClient, *dualwrite.MockService)
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "should fail if already using unified storage",
|
||||
setupMocks: func(bulk *MockBulkStoreClient, dual *dualwrite.MockService) {
|
||||
gr := resources.SupportedProvisioningResources[0]
|
||||
status := dualwrite.StorageStatus{
|
||||
ReadUnified: true,
|
||||
}
|
||||
dual.On("Status", mock.Anything, gr.GroupResource()).Return(status, nil)
|
||||
},
|
||||
expectedError: "unexpected state - already using unified storage",
|
||||
},
|
||||
{
|
||||
name: "should fail if migration is in progress",
|
||||
setupMocks: func(bulk *MockBulkStoreClient, dual *dualwrite.MockService) {
|
||||
gr := resources.SupportedProvisioningResources[0]
|
||||
status := dualwrite.StorageStatus{
|
||||
ReadUnified: false,
|
||||
Migrating: time.Now().UnixMilli(),
|
||||
}
|
||||
dual.On("Status", mock.Anything, gr.GroupResource()).Return(status, nil)
|
||||
},
|
||||
expectedError: "another migration job is running",
|
||||
},
|
||||
{
|
||||
name: "should fail if bulk process fails",
|
||||
setupMocks: func(bulk *MockBulkStoreClient, dual *dualwrite.MockService) {
|
||||
gr := resources.SupportedProvisioningResources[0]
|
||||
dual.On("Status", mock.Anything, gr.GroupResource()).Return(dualwrite.StorageStatus{}, nil)
|
||||
bulk.On("BulkProcess", mock.Anything, mock.Anything).Return(nil, errors.New("bulk process failed"))
|
||||
},
|
||||
expectedError: "error clearing unified",
|
||||
},
|
||||
{
|
||||
name: "should fail if status update fails after bulk process",
|
||||
setupMocks: func(bulk *MockBulkStoreClient, dual *dualwrite.MockService) {
|
||||
gr := resources.SupportedProvisioningResources[0]
|
||||
dual.On("Status", mock.Anything, gr.GroupResource()).Return(dualwrite.StorageStatus{}, nil)
|
||||
|
||||
mockStream := NewBulkStore_BulkProcessClient(t)
|
||||
mockStream.On("CloseAndRecv").Return(&resource.BulkResponse{}, nil)
|
||||
bulk.On("BulkProcess", mock.Anything, mock.Anything).Return(mockStream, nil)
|
||||
|
||||
dual.On("Update", mock.Anything, mock.MatchedBy(func(status dualwrite.StorageStatus) bool {
|
||||
return status.ReadUnified && !status.WriteLegacy && status.Migrated > 0
|
||||
})).Return(dualwrite.StorageStatus{}, errors.New("update failed"))
|
||||
},
|
||||
expectedError: "update failed",
|
||||
},
|
||||
{
|
||||
name: "should fail if bulk process stream close fails",
|
||||
setupMocks: func(bulk *MockBulkStoreClient, dual *dualwrite.MockService) {
|
||||
gr := resources.SupportedProvisioningResources[0]
|
||||
dual.On("Status", mock.Anything, gr.GroupResource()).Return(dualwrite.StorageStatus{}, nil)
|
||||
|
||||
mockStream := NewBulkStore_BulkProcessClient(t)
|
||||
mockStream.On("CloseAndRecv").Return(nil, errors.New("stream close failed"))
|
||||
bulk.On("BulkProcess", mock.Anything, mock.Anything).Return(mockStream, nil)
|
||||
},
|
||||
expectedError: "error clearing unified",
|
||||
},
|
||||
{
|
||||
name: "should succeed with complete workflow",
|
||||
setupMocks: func(bulk *MockBulkStoreClient, dual *dualwrite.MockService) {
|
||||
for _, gr := range resources.SupportedProvisioningResources {
|
||||
dual.On("Status", mock.Anything, gr.GroupResource()).Return(dualwrite.StorageStatus{}, nil)
|
||||
|
||||
mockStream := NewBulkStore_BulkProcessClient(t)
|
||||
mockStream.On("CloseAndRecv").Return(&resource.BulkResponse{}, nil)
|
||||
bulk.On("BulkProcess", mock.MatchedBy(func(ctx context.Context) bool {
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
//nolint:errcheck // hits the err != nil gotcha
|
||||
settings, _ := resource.NewBulkSettings(md)
|
||||
if !settings.RebuildCollection {
|
||||
return false
|
||||
}
|
||||
if len(settings.Collection) != 1 {
|
||||
return false
|
||||
}
|
||||
if settings.Collection[0].Namespace != "test-namespace" {
|
||||
return false
|
||||
}
|
||||
if settings.Collection[0].Group != gr.Group {
|
||||
return false
|
||||
}
|
||||
if settings.Collection[0].Resource != gr.Resource {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}), mock.Anything).Return(mockStream, nil)
|
||||
|
||||
dual.On("Update", mock.Anything, mock.MatchedBy(func(status dualwrite.StorageStatus) bool {
|
||||
return status.ReadUnified && !status.WriteLegacy && status.Migrated > 0
|
||||
})).Return(dualwrite.StorageStatus{}, nil)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
bulk := NewMockBulkStoreClient(t)
|
||||
dual := dualwrite.NewMockService(t)
|
||||
|
||||
if tt.setupMocks != nil {
|
||||
tt.setupMocks(bulk, dual)
|
||||
}
|
||||
|
||||
swapper := NewStorageSwapper(bulk, dual)
|
||||
err := swapper.WipeUnifiedAndSetMigratedFlag(context.Background(), "test-namespace")
|
||||
|
||||
if tt.expectedError != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tt.expectedError)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -0,0 +1,67 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
)
|
||||
|
||||
//go:generate mockery --name WrapWithCloneFn --structname MockWrapWithCloneFn --inpackage --filename mock_wrap_with_clone_fn.go --with-expecter
|
||||
type WrapWithCloneFn func(ctx context.Context, repo repository.Repository, cloneOptions repository.CloneOptions, pushOptions repository.PushOptions, fn func(repo repository.Repository, cloned bool) error) error
|
||||
|
||||
type UnifiedStorageMigrator struct {
|
||||
namespaceCleaner NamespaceCleaner
|
||||
exportWorker jobs.Worker
|
||||
syncWorker jobs.Worker
|
||||
}
|
||||
|
||||
func NewUnifiedStorageMigrator(
|
||||
namespaceCleaner NamespaceCleaner,
|
||||
exportWorker jobs.Worker,
|
||||
syncWorker jobs.Worker,
|
||||
) *UnifiedStorageMigrator {
|
||||
return &UnifiedStorageMigrator{
|
||||
namespaceCleaner: namespaceCleaner,
|
||||
exportWorker: exportWorker,
|
||||
syncWorker: syncWorker,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *UnifiedStorageMigrator) Migrate(ctx context.Context, repo repository.ReaderWriter, options provisioning.MigrateJobOptions, progress jobs.JobProgressRecorder) error {
|
||||
namespace := repo.Config().GetNamespace()
|
||||
progress.SetMessage(ctx, "export resources")
|
||||
exportJob := provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Push: &provisioning.ExportJobOptions{},
|
||||
},
|
||||
}
|
||||
if err := m.exportWorker.Process(ctx, repo, exportJob, progress); err != nil {
|
||||
return fmt.Errorf("export resources: %w", err)
|
||||
}
|
||||
|
||||
// Reset the results after the export as pull will operate on the same resources
|
||||
progress.ResetResults()
|
||||
progress.SetMessage(ctx, "pull resources")
|
||||
|
||||
syncJob := provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Pull: &provisioning.SyncJobOptions{
|
||||
Incremental: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := m.syncWorker.Process(ctx, repo, syncJob, progress); err != nil {
|
||||
return fmt.Errorf("pull resources: %w", err)
|
||||
}
|
||||
|
||||
progress.SetMessage(ctx, "clean namespace")
|
||||
if err := m.namespaceCleaner.Clean(ctx, namespace, progress); err != nil {
|
||||
return fmt.Errorf("clean namespace: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,140 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestUnifiedStorageMigrator_Migrate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupMocks func(*MockNamespaceCleaner, *jobs.MockWorker, *jobs.MockWorker, *jobs.MockJobProgressRecorder, *repository.MockRepository)
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "should fail when export job fails",
|
||||
setupMocks: func(nc *MockNamespaceCleaner, ew *jobs.MockWorker, sw *jobs.MockWorker, pr *jobs.MockJobProgressRecorder, rw *repository.MockRepository) {
|
||||
rw.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
pr.On("SetMessage", mock.Anything, "export resources").Return()
|
||||
ew.On("Process", mock.Anything, rw, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Push != nil
|
||||
}), pr).Return(errors.New("export failed"))
|
||||
},
|
||||
expectedError: "export resources: export failed",
|
||||
},
|
||||
{
|
||||
name: "should fail when sync job fails",
|
||||
setupMocks: func(nc *MockNamespaceCleaner, ew *jobs.MockWorker, sw *jobs.MockWorker, pr *jobs.MockJobProgressRecorder, rw *repository.MockRepository) {
|
||||
rw.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
pr.On("SetMessage", mock.Anything, "export resources").Return()
|
||||
ew.On("Process", mock.Anything, rw, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Push != nil
|
||||
}), pr).Return(nil)
|
||||
pr.On("ResetResults").Return()
|
||||
pr.On("SetMessage", mock.Anything, "pull resources").Return()
|
||||
sw.On("Process", mock.Anything, rw, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Pull != nil && !job.Spec.Pull.Incremental
|
||||
}), pr).Return(errors.New("sync failed"))
|
||||
},
|
||||
expectedError: "pull resources: sync failed",
|
||||
},
|
||||
{
|
||||
name: "should fail when resource cleanup fails",
|
||||
setupMocks: func(nc *MockNamespaceCleaner, ew *jobs.MockWorker, sw *jobs.MockWorker, pr *jobs.MockJobProgressRecorder, rw *repository.MockRepository) {
|
||||
rw.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
pr.On("SetMessage", mock.Anything, "export resources").Return()
|
||||
nc.On("Clean", mock.Anything, "test-namespace", pr).Return(errors.New("clean failed"))
|
||||
|
||||
// Export and sync jobs succeed
|
||||
ew.On("Process", mock.Anything, rw, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Push != nil
|
||||
}), pr).Return(nil)
|
||||
pr.On("ResetResults").Return()
|
||||
pr.On("SetMessage", mock.Anything, "pull resources").Return()
|
||||
sw.On("Process", mock.Anything, rw, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Pull != nil && !job.Spec.Pull.Incremental
|
||||
}), pr).Return(nil)
|
||||
pr.On("SetMessage", mock.Anything, "clean namespace").Return()
|
||||
},
|
||||
expectedError: "clean namespace: clean failed",
|
||||
},
|
||||
{
|
||||
name: "should succeed with complete workflow",
|
||||
setupMocks: func(nc *MockNamespaceCleaner, ew *jobs.MockWorker, sw *jobs.MockWorker, pr *jobs.MockJobProgressRecorder, rw *repository.MockRepository) {
|
||||
rw.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
})
|
||||
pr.On("SetMessage", mock.Anything, "export resources").Return()
|
||||
// Export job succeeds
|
||||
ew.On("Process", mock.Anything, rw, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Push != nil
|
||||
}), pr).Return(nil)
|
||||
|
||||
nc.On("Clean", mock.Anything, "test-namespace", pr).Return(nil)
|
||||
// Reset progress and sync job succeeds
|
||||
pr.On("ResetResults").Return()
|
||||
pr.On("SetMessage", mock.Anything, "pull resources").Return()
|
||||
sw.On("Process", mock.Anything, rw, mock.MatchedBy(func(job provisioning.Job) bool {
|
||||
return job.Spec.Pull != nil && !job.Spec.Pull.Incremental
|
||||
}), pr).Return(nil)
|
||||
|
||||
pr.On("SetMessage", mock.Anything, "clean namespace").Return()
|
||||
},
|
||||
expectedError: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
exportWorker := jobs.NewMockWorker(t)
|
||||
syncWorker := jobs.NewMockWorker(t)
|
||||
progressRecorder := jobs.NewMockJobProgressRecorder(t)
|
||||
readerWriter := repository.NewMockRepository(t)
|
||||
mockNamespaceCleaner := NewMockNamespaceCleaner(t)
|
||||
|
||||
if tt.setupMocks != nil {
|
||||
tt.setupMocks(mockNamespaceCleaner, exportWorker, syncWorker, progressRecorder, readerWriter)
|
||||
}
|
||||
|
||||
migrator := NewUnifiedStorageMigrator(mockNamespaceCleaner, exportWorker, syncWorker)
|
||||
|
||||
err := migrator.Migrate(context.Background(), readerWriter, provisioning.MigrateJobOptions{}, progressRecorder)
|
||||
|
||||
if tt.expectedError != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tt.expectedError)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
mock.AssertExpectationsForObjects(t, mockNamespaceCleaner, exportWorker, syncWorker, progressRecorder, readerWriter)
|
||||
})
|
||||
}
|
||||
}
|
@ -1,65 +1,35 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
"github.com/grafana/grafana/pkg/storage/legacysql/dualwrite"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
)
|
||||
|
||||
//go:generate mockery --name Migrator --structname MockMigrator --inpackage --filename mock_migrator.go --with-expecter
|
||||
type Migrator interface {
|
||||
Migrate(ctx context.Context, rw repository.ReaderWriter, opts provisioning.MigrateJobOptions, progress jobs.JobProgressRecorder) error
|
||||
}
|
||||
|
||||
type MigrationWorker struct {
|
||||
// temporary... while we still do an import
|
||||
parsers resources.ParserFactory
|
||||
|
||||
clients resources.ClientFactory
|
||||
|
||||
// Check where values are currently saved
|
||||
storageStatus dualwrite.Service
|
||||
|
||||
// Support reading from history
|
||||
legacyMigrator legacy.LegacyMigrator
|
||||
|
||||
// Direct access to unified storage... use carefully!
|
||||
bulk resource.BulkStoreClient
|
||||
|
||||
// Delegate the export to the export worker
|
||||
exportWorker jobs.Worker
|
||||
|
||||
// Delegate the import to sync worker
|
||||
syncWorker jobs.Worker
|
||||
storageStatus dualwrite.Service
|
||||
legacyMigrator Migrator
|
||||
unifiedMigrator Migrator
|
||||
}
|
||||
|
||||
func NewMigrationWorker(
|
||||
legacyMigrator legacy.LegacyMigrator,
|
||||
parsers resources.ParserFactory, // should not be necessary!
|
||||
clients resources.ClientFactory,
|
||||
legacyMigrator Migrator,
|
||||
unifiedMigrator Migrator,
|
||||
storageStatus dualwrite.Service,
|
||||
batch resource.BulkStoreClient,
|
||||
exportWorker jobs.Worker,
|
||||
syncWorker jobs.Worker,
|
||||
) *MigrationWorker {
|
||||
return &MigrationWorker{
|
||||
parsers,
|
||||
clients,
|
||||
storageStatus,
|
||||
legacyMigrator,
|
||||
batch,
|
||||
exportWorker,
|
||||
syncWorker,
|
||||
unifiedMigrator: unifiedMigrator,
|
||||
legacyMigrator: legacyMigrator,
|
||||
storageStatus: storageStatus,
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,7 +37,6 @@ func (w *MigrationWorker) IsSupported(ctx context.Context, job provisioning.Job)
|
||||
return job.Spec.Action == provisioning.JobActionMigrate
|
||||
}
|
||||
|
||||
// Process will start a job
|
||||
func (w *MigrationWorker) Process(ctx context.Context, repo repository.Repository, job provisioning.Job, progress jobs.JobProgressRecorder) error {
|
||||
options := job.Spec.Migrate
|
||||
if options == nil {
|
||||
@ -79,216 +48,10 @@ func (w *MigrationWorker) Process(ctx context.Context, repo repository.Repositor
|
||||
if !ok {
|
||||
return errors.New("migration job submitted targeting repository that is not a ReaderWriter")
|
||||
}
|
||||
parser, err := w.parsers.GetParser(ctx, rw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting parser: %w", err)
|
||||
}
|
||||
|
||||
clients, err := w.clients.Clients(ctx, rw.Config().Namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting clients: %w", err)
|
||||
}
|
||||
|
||||
if dualwrite.IsReadingLegacyDashboardsAndFolders(ctx, w.storageStatus) {
|
||||
return w.migrateFromLegacy(ctx, rw, parser, clients, *options, progress)
|
||||
return w.legacyMigrator.Migrate(ctx, rw, *options, progress)
|
||||
}
|
||||
|
||||
return w.migrateFromAPIServer(ctx, rw, clients, *options, progress)
|
||||
}
|
||||
|
||||
// migrateFromLegacy will export the resources from legacy storage and import them into the target repository
|
||||
func (w *MigrationWorker) migrateFromLegacy(ctx context.Context, rw repository.ReaderWriter, parser resources.Parser, clients resources.ResourceClients, options provisioning.MigrateJobOptions, progress jobs.JobProgressRecorder) error {
|
||||
var (
|
||||
err error
|
||||
clone repository.ClonedRepository
|
||||
)
|
||||
|
||||
clonable, ok := rw.(repository.ClonableRepository)
|
||||
if ok {
|
||||
progress.SetMessage(ctx, "clone "+rw.Config().Spec.GitHub.URL)
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
progress.SetMessage(ctx, scanner.Text())
|
||||
}
|
||||
}()
|
||||
|
||||
clone, err = clonable.Clone(ctx, repository.CloneOptions{
|
||||
PushOnWrites: options.History,
|
||||
// TODO: make this configurable
|
||||
Timeout: 10 * time.Minute,
|
||||
Progress: writer,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to clone target: %w", err)
|
||||
}
|
||||
|
||||
rw = clone // send all writes to the buffered repo
|
||||
defer func() {
|
||||
if err := clone.Remove(ctx); err != nil {
|
||||
logging.FromContext(ctx).Error("failed to remove cloned repository after migrate", "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var userInfo map[string]repository.CommitSignature
|
||||
if options.History {
|
||||
progress.SetMessage(ctx, "loading users")
|
||||
userInfo, err = loadUsers(ctx, clients)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading users: %w", err)
|
||||
}
|
||||
}
|
||||
namespace := rw.Config().Namespace
|
||||
|
||||
progress.SetMessage(ctx, "loading folders from SQL")
|
||||
reader := NewLegacyFolderReader(w.legacyMigrator, rw.Config().Name, namespace)
|
||||
if err = reader.Read(ctx, w.legacyMigrator, rw.Config().Name, namespace); err != nil {
|
||||
return fmt.Errorf("error loading folder tree: %w", err)
|
||||
}
|
||||
|
||||
folderClient, err := clients.Folder()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting folder client: %w", err)
|
||||
}
|
||||
|
||||
folders := resources.NewFolderManager(rw, folderClient, resources.NewEmptyFolderTree())
|
||||
progress.SetMessage(ctx, "exporting folders from SQL")
|
||||
err = folders.EnsureFolderTreeExists(ctx, "", "", reader.Tree(), func(folder resources.Folder, created bool, err error) error {
|
||||
result := jobs.JobResourceResult{
|
||||
Action: repository.FileActionCreated,
|
||||
Name: folder.ID,
|
||||
Resource: resources.FolderResource.Resource,
|
||||
Group: resources.FolderResource.Group,
|
||||
Path: folder.Path,
|
||||
Error: err,
|
||||
}
|
||||
|
||||
if !created {
|
||||
result.Action = repository.FileActionIgnored
|
||||
}
|
||||
|
||||
progress.Record(ctx, result)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error exporting legacy folders: %w", err)
|
||||
}
|
||||
|
||||
progress.SetMessage(ctx, "exporting resources from SQL")
|
||||
resourceManager := resources.NewResourcesManager(rw, folders, parser, clients, userInfo)
|
||||
for _, kind := range resources.SupportedProvisioningResources {
|
||||
if kind == resources.FolderResource {
|
||||
continue
|
||||
}
|
||||
|
||||
reader := NewLegacyResourceMigrator(w.legacyMigrator, parser, resourceManager, progress, options, namespace, kind.GroupResource())
|
||||
if err := reader.Migrate(ctx); err != nil {
|
||||
return fmt.Errorf("error migrating resource %s: %w", kind, err)
|
||||
}
|
||||
}
|
||||
|
||||
if clone != nil {
|
||||
progress.SetMessage(ctx, "pushing changes")
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
progress.SetMessage(ctx, scanner.Text())
|
||||
}
|
||||
}()
|
||||
|
||||
if err := clone.Push(ctx, repository.PushOptions{
|
||||
// TODO: make this configurable
|
||||
Timeout: 10 * time.Minute,
|
||||
Progress: writer,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error pushing changes: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
progress.SetMessage(ctx, "resetting unified storage")
|
||||
if err = wipeUnifiedAndSetMigratedFlag(ctx, w.storageStatus, namespace, w.bulk); err != nil {
|
||||
return fmt.Errorf("unable to reset unified storage %w", err)
|
||||
}
|
||||
|
||||
// Reset the results after the export as pull will operate on the same resources
|
||||
progress.ResetResults()
|
||||
|
||||
// Delegate the import to a sync (from the already checked out go-git repository!)
|
||||
progress.SetMessage(ctx, "pulling resources")
|
||||
err = w.syncWorker.Process(ctx, rw, provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Pull: &provisioning.SyncJobOptions{
|
||||
Incremental: false,
|
||||
},
|
||||
},
|
||||
}, progress)
|
||||
if err != nil { // this will have an error when too many errors exist
|
||||
progress.SetMessage(ctx, "error importing resources, reverting")
|
||||
if e2 := stopReadingUnifiedStorage(ctx, w.storageStatus); e2 != nil {
|
||||
logger := logging.FromContext(ctx)
|
||||
logger.Warn("error trying to revert dual write settings after an error", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// migrateFromAPIServer will export the resources from unified storage and import them into the target repository
|
||||
func (w *MigrationWorker) migrateFromAPIServer(ctx context.Context, repo repository.ReaderWriter, clients resources.ResourceClients, options provisioning.MigrateJobOptions, progress jobs.JobProgressRecorder) error {
|
||||
progress.SetMessage(ctx, "exporting unified storage resources")
|
||||
exportJob := provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Push: &provisioning.ExportJobOptions{},
|
||||
},
|
||||
}
|
||||
if err := w.exportWorker.Process(ctx, repo, exportJob, progress); err != nil {
|
||||
return fmt.Errorf("export resources: %w", err)
|
||||
}
|
||||
|
||||
// Reset the results after the export as pull will operate on the same resources
|
||||
progress.ResetResults()
|
||||
|
||||
progress.SetMessage(ctx, "pulling resources")
|
||||
syncJob := provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Pull: &provisioning.SyncJobOptions{
|
||||
Incremental: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := w.syncWorker.Process(ctx, repo, syncJob, progress); err != nil {
|
||||
return fmt.Errorf("pull resources: %w", err)
|
||||
}
|
||||
|
||||
for _, kind := range resources.SupportedProvisioningResources {
|
||||
progress.SetMessage(ctx, fmt.Sprintf("removing unprovisioned %s", kind.Resource))
|
||||
client, _, err := clients.ForResource(kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = resources.ForEach(ctx, client, func(item *unstructured.Unstructured) error {
|
||||
result := jobs.JobResourceResult{
|
||||
Name: item.GetName(),
|
||||
Resource: item.GetKind(),
|
||||
Group: item.GroupVersionKind().Group,
|
||||
Action: repository.FileActionDeleted,
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}); err != nil {
|
||||
result.Error = fmt.Errorf("failed to delete folder: %w", err)
|
||||
progress.Record(ctx, result)
|
||||
return result.Error
|
||||
}
|
||||
|
||||
progress.Record(ctx, result)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return w.unifiedMigrator.Migrate(ctx, rw, *options, progress)
|
||||
}
|
||||
|
164
pkg/registry/apis/provisioning/jobs/migrate/worker_test.go
Normal file
164
pkg/registry/apis/provisioning/jobs/migrate/worker_test.go
Normal file
@ -0,0 +1,164 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
provisioning "github.com/grafana/grafana/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
"github.com/grafana/grafana/pkg/storage/legacysql/dualwrite"
|
||||
)
|
||||
|
||||
func TestMigrationWorker_IsSupported(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
job provisioning.Job
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "should support migrate action",
|
||||
job: provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Action: provisioning.JobActionMigrate,
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "should not support other actions",
|
||||
job: provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Action: "other",
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
worker := NewMigrationWorker(nil, nil, nil)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := worker.IsSupported(context.Background(), tt.job)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrationWorker_ProcessNotReaderWriter(t *testing.T) {
|
||||
worker := NewMigrationWorker(nil, nil, nil)
|
||||
job := provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Action: provisioning.JobActionMigrate,
|
||||
Migrate: &provisioning.MigrateJobOptions{},
|
||||
},
|
||||
}
|
||||
progressRecorder := jobs.NewMockJobProgressRecorder(t)
|
||||
progressRecorder.On("SetTotal", mock.Anything, 10).Return()
|
||||
|
||||
repo := repository.NewMockReader(t)
|
||||
err := worker.Process(context.Background(), repo, job, progressRecorder)
|
||||
require.EqualError(t, err, "migration job submitted targeting repository that is not a ReaderWriter")
|
||||
}
|
||||
|
||||
func TestMigrationWorker_Process(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupMocks func(*MockMigrator, *MockMigrator, *dualwrite.MockService, *jobs.MockJobProgressRecorder)
|
||||
job provisioning.Job
|
||||
expectedError string
|
||||
isLegacyActive bool
|
||||
}{
|
||||
{
|
||||
name: "should fail when migrate settings are missing",
|
||||
job: provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Action: provisioning.JobActionMigrate,
|
||||
Migrate: nil,
|
||||
},
|
||||
},
|
||||
setupMocks: func(lm *MockMigrator, um *MockMigrator, ds *dualwrite.MockService, pr *jobs.MockJobProgressRecorder) {
|
||||
},
|
||||
expectedError: "missing migrate settings",
|
||||
},
|
||||
{
|
||||
name: "should use legacy migrator when legacy storage is active",
|
||||
job: provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Action: provisioning.JobActionMigrate,
|
||||
Migrate: &provisioning.MigrateJobOptions{},
|
||||
},
|
||||
},
|
||||
isLegacyActive: true,
|
||||
setupMocks: func(lm *MockMigrator, um *MockMigrator, ds *dualwrite.MockService, pr *jobs.MockJobProgressRecorder) {
|
||||
pr.On("SetTotal", mock.Anything, 10).Return()
|
||||
ds.On("ReadFromUnified", mock.Anything, mock.Anything).Return(false, nil)
|
||||
lm.On("Migrate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should use unified storage migrator when legacy storage is not active",
|
||||
job: provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Action: provisioning.JobActionMigrate,
|
||||
Migrate: &provisioning.MigrateJobOptions{},
|
||||
},
|
||||
},
|
||||
isLegacyActive: false,
|
||||
setupMocks: func(lm *MockMigrator, um *MockMigrator, ds *dualwrite.MockService, pr *jobs.MockJobProgressRecorder) {
|
||||
pr.On("SetTotal", mock.Anything, 10).Return()
|
||||
ds.On("ReadFromUnified", mock.Anything, mock.Anything).Return(true, nil)
|
||||
um.On("Migrate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should propagate migrator errors",
|
||||
job: provisioning.Job{
|
||||
Spec: provisioning.JobSpec{
|
||||
Action: provisioning.JobActionMigrate,
|
||||
Migrate: &provisioning.MigrateJobOptions{},
|
||||
},
|
||||
},
|
||||
isLegacyActive: true,
|
||||
setupMocks: func(lm *MockMigrator, um *MockMigrator, ds *dualwrite.MockService, pr *jobs.MockJobProgressRecorder) {
|
||||
pr.On("SetTotal", mock.Anything, 10).Return()
|
||||
ds.On("ReadFromUnified", mock.Anything, mock.Anything).Return(false, nil)
|
||||
lm.On("Migrate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("migration failed"))
|
||||
},
|
||||
expectedError: "migration failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
legacyMigrator := NewMockMigrator(t)
|
||||
unifiedMigrator := NewMockMigrator(t)
|
||||
dualWriteService := dualwrite.NewMockService(t)
|
||||
progressRecorder := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
worker := NewMigrationWorker(legacyMigrator, unifiedMigrator, dualWriteService)
|
||||
|
||||
if tt.setupMocks != nil {
|
||||
tt.setupMocks(legacyMigrator, unifiedMigrator, dualWriteService, progressRecorder)
|
||||
}
|
||||
|
||||
rw := repository.NewMockRepository(t)
|
||||
err := worker.Process(context.Background(), rw, tt.job, progressRecorder)
|
||||
|
||||
if tt.expectedError != "" {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.expectedError)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
mock.AssertExpectationsForObjects(t, legacyMigrator, unifiedMigrator, dualWriteService, progressRecorder, rw)
|
||||
})
|
||||
}
|
||||
}
|
@ -86,7 +86,7 @@ func (r *SyncWorker) Process(ctx context.Context, repo repository.Repository, jo
|
||||
return fmt.Errorf("update repo with job status at start: %w", err)
|
||||
}
|
||||
|
||||
repositoryResources, err := r.repositoryResources.Client(ctx, rw)
|
||||
repositoryResources, err := r.repositoryResources.Client(ctx, rw, resources.RepositoryResourcesOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create repository resources client: %w", err)
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
rpf.On("Execute", mock.Anything, repoConfig, mock.Anything).Return(nil)
|
||||
|
||||
// Repository resources creation fails
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(nil, errors.New("failed to create repository resources client"))
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(nil, errors.New("failed to create repository resources client"))
|
||||
},
|
||||
expectedError: "create repository resources client: failed to create repository resources client",
|
||||
},
|
||||
@ -189,7 +189,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
rpf.On("Execute", mock.Anything, repoConfig, mock.Anything).Return(nil)
|
||||
|
||||
// Repository resources creation succeeds
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(&resources.MockRepositoryResources{}, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(&resources.MockRepositoryResources{}, nil)
|
||||
|
||||
// Getting clients for namespace fails
|
||||
cf.On("Clients", mock.Anything, "test-namespace").Return(nil, errors.New("failed to get clients"))
|
||||
@ -222,7 +222,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
// Setup resources and clients
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("Stats", mock.Anything).Return(nil, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(mockRepoResources, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResources, nil)
|
||||
|
||||
mockClients := resources.NewMockResourceClients(t)
|
||||
cf.On("Clients", mock.Anything, "test-namespace").Return(mockClients, nil)
|
||||
@ -277,7 +277,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
// Setup resources and clients
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("Stats", mock.Anything).Return(nil, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(mockRepoResources, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResources, nil)
|
||||
|
||||
mockClients := resources.NewMockResourceClients(t)
|
||||
cf.On("Clients", mock.Anything, "test-namespace").Return(mockClients, nil)
|
||||
@ -321,7 +321,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("Stats", mock.Anything).Return(nil, errors.New("stats error"))
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(mockRepoResources, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResources, nil)
|
||||
|
||||
// Simple mocks for other calls
|
||||
mockClients := resources.NewMockResourceClients(t)
|
||||
@ -347,7 +347,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("Stats", mock.Anything).Return(nil, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(mockRepoResources, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResources, nil)
|
||||
|
||||
// Verify only sync status is patched
|
||||
rpf.On("Execute", mock.Anything, mock.Anything, mock.MatchedBy(func(patch []map[string]interface{}) bool {
|
||||
@ -391,7 +391,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
},
|
||||
}
|
||||
mockRepoResources.On("Stats", mock.Anything).Return(stats, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(mockRepoResources, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResources, nil)
|
||||
|
||||
// Verify both sync status and stats are patched
|
||||
rpf.On("Execute", mock.Anything, mock.Anything, mock.MatchedBy(func(patch []map[string]interface{}) bool {
|
||||
@ -463,7 +463,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
},
|
||||
}
|
||||
mockRepoResources.On("Stats", mock.Anything).Return(stats, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(mockRepoResources, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResources, nil)
|
||||
|
||||
// Verify only sync status is patched (multiple stats should be ignored)
|
||||
rpf.On("Execute", mock.Anything, mock.Anything, mock.MatchedBy(func(patch []map[string]interface{}) bool {
|
||||
@ -497,7 +497,7 @@ func TestSyncWorker_Process(t *testing.T) {
|
||||
// Setup resources and clients
|
||||
mockRepoResources := resources.NewMockRepositoryResources(t)
|
||||
mockRepoResources.On("Stats", mock.Anything).Return(nil, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything).Return(mockRepoResources, nil)
|
||||
rrf.On("Client", mock.Anything, mock.Anything, resources.RepositoryResourcesOptions{}).Return(mockRepoResources, nil)
|
||||
|
||||
mockClients := resources.NewMockResourceClients(t)
|
||||
cf.On("Clients", mock.Anything, mock.Anything).Return(mockClients, nil)
|
||||
|
@ -547,16 +547,34 @@ func (b *APIBuilder) GetPostStartHooks() (map[string]genericapiserver.PostStartH
|
||||
syncer,
|
||||
)
|
||||
|
||||
migrationWorker := migrate.NewMigrationWorker(
|
||||
b.legacyMigrator,
|
||||
legacyFolders := migrate.NewLegacyFoldersMigrator(b.legacyMigrator)
|
||||
legacyResources := migrate.NewLegacyResourcesMigrator(
|
||||
b.repositoryResources,
|
||||
b.parsers,
|
||||
b.clients,
|
||||
b.storageStatus,
|
||||
b.unified,
|
||||
b.legacyMigrator,
|
||||
legacyFolders,
|
||||
)
|
||||
storageSwapper := migrate.NewStorageSwapper(b.unified, b.storageStatus)
|
||||
legacyMigrator := migrate.NewLegacyMigrator(
|
||||
legacyResources,
|
||||
storageSwapper,
|
||||
syncWorker,
|
||||
repository.WrapWithCloneAndPushIfPossible,
|
||||
)
|
||||
|
||||
cleaner := migrate.NewNamespaceCleaner(b.clients)
|
||||
unifiedStorageMigrator := migrate.NewUnifiedStorageMigrator(
|
||||
cleaner,
|
||||
exportWorker,
|
||||
syncWorker,
|
||||
)
|
||||
|
||||
migrationWorker := migrate.NewMigrationWorker(
|
||||
legacyMigrator,
|
||||
unifiedStorageMigrator,
|
||||
b.storageStatus,
|
||||
)
|
||||
|
||||
// Pull request worker
|
||||
renderer := pullrequest.NewScreenshotRenderer(b.render, b.unified)
|
||||
evaluator := pullrequest.NewEvaluator(renderer, b.parsers, b.urlProvider)
|
||||
|
@ -10,9 +10,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type RepositoryResourcesOptions struct {
|
||||
// FIXME: this is a temporary option to preload all user info
|
||||
// we should remove this once we have a better way to handle user info and commit signatures
|
||||
PreloadAllUserInfo bool
|
||||
}
|
||||
|
||||
//go:generate mockery --name RepositoryResourcesFactory --structname MockRepositoryResourcesFactory --inpackage --filename repository_resources_factory_mock.go --with-expecter
|
||||
type RepositoryResourcesFactory interface {
|
||||
Client(ctx context.Context, repo repository.ReaderWriter) (RepositoryResources, error)
|
||||
Client(ctx context.Context, repo repository.ReaderWriter, opts RepositoryResourcesOptions) (RepositoryResources, error)
|
||||
}
|
||||
|
||||
//go:generate mockery --name RepositoryResources --structname MockRepositoryResources --inpackage --filename repository_resources_mock.go --with-expecter
|
||||
@ -33,7 +39,7 @@ type RepositoryResources interface {
|
||||
List(ctx context.Context) (*provisioning.ResourceList, error)
|
||||
}
|
||||
|
||||
type repositoryResourcesFactor struct {
|
||||
type repositoryResourcesFactory struct {
|
||||
parsers ParserFactory
|
||||
clients ClientFactory
|
||||
lister ResourceLister
|
||||
@ -55,10 +61,10 @@ func (r *repositoryResources) List(ctx context.Context) (*provisioning.ResourceL
|
||||
}
|
||||
|
||||
func NewRepositoryResourcesFactory(parsers ParserFactory, clients ClientFactory, lister ResourceLister) RepositoryResourcesFactory {
|
||||
return &repositoryResourcesFactor{parsers, clients, lister}
|
||||
return &repositoryResourcesFactory{parsers, clients, lister}
|
||||
}
|
||||
|
||||
func (r *repositoryResourcesFactor) Client(ctx context.Context, repo repository.ReaderWriter) (RepositoryResources, error) {
|
||||
func (r *repositoryResourcesFactory) Client(ctx context.Context, repo repository.ReaderWriter, opts RepositoryResourcesOptions) (RepositoryResources, error) {
|
||||
clients, err := r.clients.Clients(ctx, repo.Config().Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create clients: %w", err)
|
||||
@ -73,8 +79,21 @@ func (r *repositoryResourcesFactor) Client(ctx context.Context, repo repository.
|
||||
return nil, fmt.Errorf("create parser: %w", err)
|
||||
}
|
||||
|
||||
signatures := map[string]repository.CommitSignature{}
|
||||
if opts.PreloadAllUserInfo {
|
||||
userClient, err := clients.User()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create user client: %w", err)
|
||||
}
|
||||
|
||||
signatures, err = loadUsers(ctx, userClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load users: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
folders := NewFolderManager(repo, folderClient, NewEmptyFolderTree())
|
||||
resources := NewResourcesManager(repo, folders, parser, clients, map[string]repository.CommitSignature{})
|
||||
resources := NewResourcesManager(repo, folders, parser, clients, signatures)
|
||||
|
||||
return &repositoryResources{
|
||||
FolderManager: folders,
|
||||
|
@ -22,9 +22,9 @@ func (_m *MockRepositoryResourcesFactory) EXPECT() *MockRepositoryResourcesFacto
|
||||
return &MockRepositoryResourcesFactory_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// Client provides a mock function with given fields: ctx, repo
|
||||
func (_m *MockRepositoryResourcesFactory) Client(ctx context.Context, repo repository.ReaderWriter) (RepositoryResources, error) {
|
||||
ret := _m.Called(ctx, repo)
|
||||
// Client provides a mock function with given fields: ctx, repo, opts
|
||||
func (_m *MockRepositoryResourcesFactory) Client(ctx context.Context, repo repository.ReaderWriter, opts RepositoryResourcesOptions) (RepositoryResources, error) {
|
||||
ret := _m.Called(ctx, repo, opts)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Client")
|
||||
@ -32,19 +32,19 @@ func (_m *MockRepositoryResourcesFactory) Client(ctx context.Context, repo repos
|
||||
|
||||
var r0 RepositoryResources
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.ReaderWriter) (RepositoryResources, error)); ok {
|
||||
return rf(ctx, repo)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.ReaderWriter, RepositoryResourcesOptions) (RepositoryResources, error)); ok {
|
||||
return rf(ctx, repo, opts)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.ReaderWriter) RepositoryResources); ok {
|
||||
r0 = rf(ctx, repo)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.ReaderWriter, RepositoryResourcesOptions) RepositoryResources); ok {
|
||||
r0 = rf(ctx, repo, opts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(RepositoryResources)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, repository.ReaderWriter) error); ok {
|
||||
r1 = rf(ctx, repo)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, repository.ReaderWriter, RepositoryResourcesOptions) error); ok {
|
||||
r1 = rf(ctx, repo, opts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@ -60,13 +60,14 @@ type MockRepositoryResourcesFactory_Client_Call struct {
|
||||
// Client is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - repo repository.ReaderWriter
|
||||
func (_e *MockRepositoryResourcesFactory_Expecter) Client(ctx interface{}, repo interface{}) *MockRepositoryResourcesFactory_Client_Call {
|
||||
return &MockRepositoryResourcesFactory_Client_Call{Call: _e.mock.On("Client", ctx, repo)}
|
||||
// - opts RepositoryResourcesOptions
|
||||
func (_e *MockRepositoryResourcesFactory_Expecter) Client(ctx interface{}, repo interface{}, opts interface{}) *MockRepositoryResourcesFactory_Client_Call {
|
||||
return &MockRepositoryResourcesFactory_Client_Call{Call: _e.mock.On("Client", ctx, repo, opts)}
|
||||
}
|
||||
|
||||
func (_c *MockRepositoryResourcesFactory_Client_Call) Run(run func(ctx context.Context, repo repository.ReaderWriter)) *MockRepositoryResourcesFactory_Client_Call {
|
||||
func (_c *MockRepositoryResourcesFactory_Client_Call) Run(run func(ctx context.Context, repo repository.ReaderWriter, opts RepositoryResourcesOptions)) *MockRepositoryResourcesFactory_Client_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(repository.ReaderWriter))
|
||||
run(args[0].(context.Context), args[1].(repository.ReaderWriter), args[2].(RepositoryResourcesOptions))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
@ -76,7 +77,7 @@ func (_c *MockRepositoryResourcesFactory_Client_Call) Return(_a0 RepositoryResou
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockRepositoryResourcesFactory_Client_Call) RunAndReturn(run func(context.Context, repository.ReaderWriter) (RepositoryResources, error)) *MockRepositoryResourcesFactory_Client_Call {
|
||||
func (_c *MockRepositoryResourcesFactory_Client_Call) RunAndReturn(run func(context.Context, repository.ReaderWriter, RepositoryResourcesOptions) (RepositoryResources, error)) *MockRepositoryResourcesFactory_Client_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ type FolderTree interface {
|
||||
In(folder string) bool
|
||||
DirPath(folder, baseFolder string) (Folder, bool)
|
||||
Add(folder Folder, parent string)
|
||||
AddUnstructured(item *unstructured.Unstructured, skipRepo string) error
|
||||
AddUnstructured(item *unstructured.Unstructured) error
|
||||
Count() int
|
||||
Walk(ctx context.Context, fn WalkFunc) error
|
||||
}
|
||||
@ -116,15 +116,12 @@ func NewEmptyFolderTree() FolderTree {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *folderTree) AddUnstructured(item *unstructured.Unstructured, skipRepo string) error {
|
||||
func (t *folderTree) AddUnstructured(item *unstructured.Unstructured) error {
|
||||
meta, err := utils.MetaAccessor(item)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extract meta accessor: %w", err)
|
||||
}
|
||||
manager, _ := meta.GetManagerProperties()
|
||||
if manager.Identity == skipRepo {
|
||||
return nil // skip it... already in tree?
|
||||
}
|
||||
|
||||
folder := Folder{
|
||||
Title: meta.FindTitle(item.GetName()),
|
||||
ID: item.GetName(),
|
||||
|
@ -56,17 +56,17 @@ func (_c *MockFolderTree_Add_Call) RunAndReturn(run func(Folder, string)) *MockF
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddUnstructured provides a mock function with given fields: item, skipRepo
|
||||
func (_m *MockFolderTree) AddUnstructured(item *unstructured.Unstructured, skipRepo string) error {
|
||||
ret := _m.Called(item, skipRepo)
|
||||
// AddUnstructured provides a mock function with given fields: item
|
||||
func (_m *MockFolderTree) AddUnstructured(item *unstructured.Unstructured) error {
|
||||
ret := _m.Called(item)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AddUnstructured")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*unstructured.Unstructured, string) error); ok {
|
||||
r0 = rf(item, skipRepo)
|
||||
if rf, ok := ret.Get(0).(func(*unstructured.Unstructured) error); ok {
|
||||
r0 = rf(item)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
@ -81,14 +81,13 @@ type MockFolderTree_AddUnstructured_Call struct {
|
||||
|
||||
// AddUnstructured is a helper method to define mock.On call
|
||||
// - item *unstructured.Unstructured
|
||||
// - skipRepo string
|
||||
func (_e *MockFolderTree_Expecter) AddUnstructured(item interface{}, skipRepo interface{}) *MockFolderTree_AddUnstructured_Call {
|
||||
return &MockFolderTree_AddUnstructured_Call{Call: _e.mock.On("AddUnstructured", item, skipRepo)}
|
||||
func (_e *MockFolderTree_Expecter) AddUnstructured(item interface{}) *MockFolderTree_AddUnstructured_Call {
|
||||
return &MockFolderTree_AddUnstructured_Call{Call: _e.mock.On("AddUnstructured", item)}
|
||||
}
|
||||
|
||||
func (_c *MockFolderTree_AddUnstructured_Call) Run(run func(item *unstructured.Unstructured, skipRepo string)) *MockFolderTree_AddUnstructured_Call {
|
||||
func (_c *MockFolderTree_AddUnstructured_Call) Run(run func(item *unstructured.Unstructured)) *MockFolderTree_AddUnstructured_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*unstructured.Unstructured), args[1].(string))
|
||||
run(args[0].(*unstructured.Unstructured))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
@ -98,7 +97,7 @@ func (_c *MockFolderTree_AddUnstructured_Call) Return(_a0 error) *MockFolderTree
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockFolderTree_AddUnstructured_Call) RunAndReturn(run func(*unstructured.Unstructured, string) error) *MockFolderTree_AddUnstructured_Call {
|
||||
func (_c *MockFolderTree_AddUnstructured_Call) RunAndReturn(run func(*unstructured.Unstructured) error) *MockFolderTree_AddUnstructured_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
package migrate
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -6,22 +6,17 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
)
|
||||
|
||||
const maxUsers = 10000
|
||||
|
||||
func loadUsers(ctx context.Context, clients resources.ResourceClients) (map[string]repository.CommitSignature, error) {
|
||||
client, err := clients.User()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func loadUsers(ctx context.Context, client dynamic.ResourceInterface) (map[string]repository.CommitSignature, error) {
|
||||
userInfo := make(map[string]repository.CommitSignature)
|
||||
var count int
|
||||
err = resources.ForEach(ctx, client, func(item *unstructured.Unstructured) error {
|
||||
err := ForEach(ctx, client, func(item *unstructured.Unstructured) error {
|
||||
count++
|
||||
if count > maxUsers {
|
||||
return errors.New("too many users")
|
||||
@ -29,7 +24,11 @@ func loadUsers(ctx context.Context, clients resources.ResourceClients) (map[stri
|
||||
|
||||
sig := repository.CommitSignature{}
|
||||
// FIXME: should we improve logging here?
|
||||
var ok bool
|
||||
var (
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
sig.Name, ok, err = unstructured.NestedString(item.Object, "spec", "login")
|
||||
if !ok || err != nil {
|
||||
return nil
|
195
pkg/registry/apis/provisioning/resources/users_test.go
Normal file
195
pkg/registry/apis/provisioning/resources/users_test.go
Normal file
@ -0,0 +1,195 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/repository"
|
||||
)
|
||||
|
||||
// mockDynamicInterface implements a simplified version of the dynamic.ResourceInterface
|
||||
type mockDynamicInterface struct {
|
||||
dynamic.ResourceInterface
|
||||
items []unstructured.Unstructured
|
||||
}
|
||||
|
||||
func (m *mockDynamicInterface) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
|
||||
return &unstructured.UnstructuredList{
|
||||
Items: m.items,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestLoadUsers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
items []unstructured.Unstructured
|
||||
expectedUsers map[string]repository.CommitSignature
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "should load users successfully",
|
||||
items: []unstructured.Unstructured{
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "user1",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"login": "johndoe",
|
||||
"email": "john@example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "user2",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"login": "janedoe",
|
||||
"email": "jane@example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedUsers: map[string]repository.CommitSignature{
|
||||
"user:user1": {
|
||||
Name: "johndoe",
|
||||
Email: "john@example.com",
|
||||
},
|
||||
"user:user2": {
|
||||
Name: "janedoe",
|
||||
Email: "jane@example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should handle missing email",
|
||||
items: []unstructured.Unstructured{
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "user1",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"login": "johndoe",
|
||||
// email missing
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedUsers: map[string]repository.CommitSignature{},
|
||||
},
|
||||
{
|
||||
name: "should handle missing login",
|
||||
items: []unstructured.Unstructured{
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "user1",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
// login missing
|
||||
"email": "john@example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedUsers: map[string]repository.CommitSignature{},
|
||||
},
|
||||
{
|
||||
name: "should handle same login and email",
|
||||
items: []unstructured.Unstructured{
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "user1",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"login": "john@example.com",
|
||||
"email": "john@example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedUsers: map[string]repository.CommitSignature{
|
||||
"user:user1": {
|
||||
Name: "john@example.com",
|
||||
Email: "", // Email should be empty when same as login
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should handle empty login and email",
|
||||
items: []unstructured.Unstructured{
|
||||
{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "user1",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"login": "",
|
||||
"email": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedUsers: map[string]repository.CommitSignature{
|
||||
"user:user1": {
|
||||
Name: "user1", // Should use metadata name when login is empty
|
||||
Email: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should fail when too many users",
|
||||
items: func() []unstructured.Unstructured {
|
||||
items := make([]unstructured.Unstructured, maxUsers+1)
|
||||
for i := 0; i < maxUsers+1; i++ {
|
||||
items[i] = unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "user1",
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"login": "johndoe",
|
||||
"email": "john@example.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return items
|
||||
}(),
|
||||
expectedError: "too many users",
|
||||
},
|
||||
{
|
||||
name: "should handle empty user list",
|
||||
items: []unstructured.Unstructured{},
|
||||
expectedUsers: map[string]repository.CommitSignature{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := &mockDynamicInterface{
|
||||
items: tt.items,
|
||||
}
|
||||
|
||||
userInfo, err := loadUsers(context.Background(), client)
|
||||
|
||||
if tt.expectedError != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tt.expectedError)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedUsers, userInfo)
|
||||
})
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user