Testing Guide
This guide covers the testing strategies and patterns used in Tenki Cloud, with a focus on writing effective tests for backend services, particularly those using Temporal workflows.
Overview
Tenki Cloud uses a comprehensive testing approach that includes:
- Unit Tests: Fast, isolated tests using mocks to verify business logic
- Integration Tests: End-to-end tests running in a real environment
- Table-Driven Tests: Systematic approach for testing multiple scenarios
- BDD-Style Tests: Behavior-driven tests using Ginkgo/Gomega
Testing Stack
Core Libraries
// Unit Testing
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/mock"
)
// Integration Testing
import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
// Temporal Testing
import (
"go.temporal.io/sdk/testsuite"
)
Project Structure
internal/domain/{domain}/
├── service/ # Business logic
├── db/ # Database queries (sqlc generated)
├── interface.go # Service interfaces
├── mock_*.go # Generated mocks
└── worker/ # Temporal workers
├── activities/ # Temporal activities
│ ├── *.go # Activity implementations
│ └── *_test.go # Activity unit tests
├── workflows/ # Temporal workflows
│ ├── *.go # Workflow implementations
│ └── *_test.go # Workflow unit tests
└── integration_*.go # Integration tests
Unit Testing
Activity Testing
Activities should be tested with mocked dependencies to ensure business logic correctness.
Basic Pattern
func TestActivities_GetRunnerInstallation(t *testing.T) {
t.Parallel()
tests := []struct {
name string
installationId int64
mockResponse *connect.Response[runnerproto.GetRunnerInstallationResponse]
mockError error
expectedResult *runnerproto.RunnerInstallation
expectErr bool
}{
{
name: "success",
installationId: 1234,
mockResponse: connect.NewResponse(&runnerproto.GetRunnerInstallationResponse{
RunnerInstallation: &runnerproto.RunnerInstallation{
Id: "abc123",
},
}),
expectedResult: &runnerproto.RunnerInstallation{Id: "abc123"},
},
{
name: "service error",
installationId: 1234,
mockError: connect.NewError(connect.CodeInternal, nil),
expectErr: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
// Setup mock
svc := &runner.MockService{}
svc.On("GetRunnerInstallation", mock.Anything, mock.Anything).
Return(tc.mockResponse, tc.mockError)
// Create activities with mock
a := newTestActivities(svc, t)
// Execute
result, err := a.GetRunnerInstallation(context.Background(), tc.installationId)
// Assert
if tc.expectErr {
assert.Error(t, err)
assert.Nil(t, result)
} else {
assert.NoError(t, err)
assert.Equal(t, tc.expectedResult, result)
}
})
}
}
Testing with Complex Arguments
// Use MatchedBy for complex argument validation
svc.On("UpdateRunners", mock.Anything,
mock.MatchedBy(func(req *connect.Request[runnerproto.UpdateRunnersRequest]) bool {
return assert.ElementsMatch(t, req.Msg.Ids, expectedIds) &&
assert.Equal(t, req.Msg.State, expectedState)
})).Return(nil, nil)
Test Helper Functions
Create reusable test helpers to reduce boilerplate:
func newTestActivities(svc runner.Service, t *testing.T) *activities {
logger := log.NewTestLogger(t)
sr := trace.NewSpanRecorder()
tracer, _ := trace.NewTestTracer(sr)
return &activities{
logger: logger,
svc: svc,
tracer: tracer,
}
}
Workflow Testing
Workflows require mocking activities since they orchestrate multiple operations.
Basic Workflow Test
func TestGithubJobWorkflow(t *testing.T) {
var ts testsuite.WorkflowTestSuite
t.Run("happy path", func(t *testing.T) {
env := ts.NewTestWorkflowEnvironment()
// Register activities with stubs
env.RegisterActivityWithOptions(stubFunc,
temporal.RegisterOptions{Name: runner.GithubJobWorkflowActivity})
// Mock activity responses
env.OnActivity(runner.GithubJobWorkflowActivity, mock.Anything, mock.Anything).
Return(nil, nil)
// Execute workflow
event := github.WorkflowJobEvent{
Action: github.String("completed"),
Installation: &github.Installation{ID: github.Int64(123)},
}
env.ExecuteWorkflow((&workflows{}).GithubJobWorkflow, event)
// Assert completion
require.True(t, env.IsWorkflowCompleted())
require.NoError(t, env.GetWorkflowError())
})
}
Testing Retry Logic
t.Run("retry on transient error", func(t *testing.T) {
env := ts.NewTestWorkflowEnvironment()
callCount := 0
env.OnActivity(runner.SomeActivity, mock.Anything, mock.Anything).
Return(func(context.Context, interface{}) error {
callCount++
if callCount < 3 {
return errors.New("transient error")
}
return nil
})
env.ExecuteWorkflow(workflow, input)
require.True(t, env.IsWorkflowCompleted())
require.NoError(t, env.GetWorkflowError())
assert.Equal(t, 3, callCount)
})
Integration Testing
Integration tests verify the entire system working together with real dependencies.
Setup with Ginkgo
Test Suite Entry Point
//go:build integration
func TestIntegration(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Runner Worker Integration Tests")
}
Suite Configuration
var _ = BeforeSuite(func() {
// Start Temporal dev server
cmd := exec.Command("temporal", "server", "start-dev",
"--port", "7233",
"--ui-port", "8233",
"--db-filename", filepath.Join(tempDir, "temporal.db"))
// Initialize global dependencies
initializeDatabase()
initializeTracing()
})
var _ = AfterSuite(func() {
// Clean up
stopTemporalServer()
closeDatabase()
})
var _ = BeforeEach(func() {
// Start transaction for test isolation
tx = db.BeginTx()
// Create service instances
runnerService = createRunnerService(tx)
// Start worker
worker = temporal.NewWorker(client, taskQueue, temporal.WorkerOptions{})
temporal.RegisterWorkflows(worker)
temporal.RegisterActivities(worker, activities)
worker.Start()
})
var _ = AfterEach(func() {
// Rollback transaction
tx.Rollback()
// Stop worker
worker.Stop()
})
Writing Integration Tests
var _ = Describe("Runner Installation", func() {
Context("when installing runners", func() {
It("should install runner successfully", func() {
// Start workflow
workflowId := fmt.Sprintf("test-install-%s", uuid.New())
run, err := temporalClient.ExecuteWorkflow(
context.Background(),
client.StartWorkflowOptions{
ID: workflowId,
TaskQueue: runner.TaskQueue,
},
runner.RunnerInstallWorkflow,
installationId,
)
Expect(err).ToNot(HaveOccurred())
// Trigger installation via service
_, err = runnerService.InstallRunners(ctx, connect.NewRequest(
&runnerproto.InstallRunnersRequest{
InstallationId: installationId,
WorkspaceId: workspaceId,
},
))
Expect(err).ToNot(HaveOccurred())
// Send signal to workflow
err = temporalClient.SignalWorkflow(
context.Background(),
workflowId,
"",
runner.InstallSignal,
runner.InstallSignalPayload{},
)
Expect(err).ToNot(HaveOccurred())
// Wait for expected state
Eventually(func() string {
ins, err := runnerService.GetRunnerInstallation(ctx, req)
if err != nil || ins == nil {
return ""
}
return ins.Msg.RunnerInstallation.State
}, 30*time.Second, 1*time.Second).Should(Equal("active"))
// Verify final state
var result runner.RunnerInstallWorkflowResult
err = run.Get(context.Background(), &result)
Expect(err).ToNot(HaveOccurred())
Expect(result.Success).To(BeTrue())
})
})
})
Testing Patterns & Best Practices
1. Table-Driven Tests
Use table-driven tests to cover multiple scenarios systematically:
tests := []struct {
name string
input string
want string
wantErr bool
errMsg string
}{
{
name: "valid input",
input: "test",
want: "TEST",
},
{
name: "empty input",
input: "",
wantErr: true,
errMsg: "input cannot be empty",
},
}
2. Mock Best Practices
- Mock at interface boundaries
- Use
mock.MatchedByfor complex argument matching - Verify mock expectations when needed:
defer svc.AssertExpectations(t)
3. Test Isolation
- Each test should be independent
- Use database transactions with rollback
- Clean up created resources
- Reset global state between tests
4. Async Testing
Use Eventually for testing async operations:
Eventually(func() bool {
// Check condition
return conditionMet
}, timeout, interval).Should(BeTrue())
5. Error Testing
Always test both success and failure paths:
{
name: "network error",
mockError: errors.New("connection refused"),
expectErr: true,
},
{
name: "timeout error",
mockError: context.DeadlineExceeded,
expectErr: true,
},
6. Test Naming
Use descriptive test names that explain the scenario:
t.Run("returns error when installation not found", func(t *testing.T) {
// test
})
7. Tracing in Tests
Verify tracing behavior when applicable:
sr := trace.NewSpanRecorder()
tracer, _ := trace.NewTestTracer(sr)
// After execution
spans := sr.Ended()
assert.Len(t, spans, 1)
assert.Equal(t, "OperationName", spans[0].Name())
assert.Equal(t, codes.Ok, spans[0].Status().Code)
Common Testing Scenarios
Testing Database Operations
func TestDatabaseOperation(t *testing.T) {
// Use test database
db := setupTestDatabase(t)
defer cleanupDatabase(db)
// Create queries
queries := runnerdb.New(db)
// Test operation
err := queries.CreateRunner(context.Background(), params)
require.NoError(t, err)
// Verify
runner, err := queries.GetRunner(context.Background(), id)
require.NoError(t, err)
assert.Equal(t, expectedName, runner.Name)
}
Testing Kubernetes Operations
func TestKubernetesOperation(t *testing.T) {
// Create fake client
objects := []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
},
}
k8sClient := fake.NewSimpleClientset(objects...)
// Test operation
err := createDeployment(k8sClient, namespace, deployment)
require.NoError(t, err)
// Verify
deploy, err := k8sClient.AppsV1().Deployments(namespace).Get(
context.Background(), name, metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, expectedReplicas, *deploy.Spec.Replicas)
}
Testing External API Calls
func TestExternalAPI(t *testing.T) {
// Create mock HTTP server
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "/api/v1/resource", r.URL.Path)
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(expectedResponse)
}))
defer server.Close()
// Test with mock server URL
client := NewAPIClient(server.URL)
result, err := client.GetResource(context.Background(), "id")
require.NoError(t, err)
assert.Equal(t, expectedResponse, result)
}
Running Tests
Unit Tests
# Run all unit tests
gotest
# Run specific package tests
cd backend && go test ./internal/domain/runner/...
# Run with coverage
cd backend && go test -coverprofile=coverage.out ./...
go tool cover -html=coverage.out
# Run specific test
cd backend && go test -run TestActivities_GetRunnerInstallation ./...
Integration Tests
# Ensure services are running
dev up
# Run all integration tests
gotest-integration
# Run specific integration test suite
cd backend && ginkgo -v ./internal/domain/runner/worker/
Continuous Integration
Tests should be part of your CI pipeline:
test:
script:
- gotest
- gotest-integration
coverage: '/coverage: \d+\.\d+%/'
Debugging Tests
Verbose Output
go test -v ./...
Focus on Specific Tests (Ginkgo)
FIt("should focus on this test", func() {
// This test will run exclusively
})
Debug Logging
logger := log.NewTestLogger(t)
logger.Debug("test state", "value", someValue)
Test Timeouts
func TestLongRunning(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
// Use ctx for operations
}
Summary
Effective testing in Tenki Cloud requires:
- Clear separation between unit and integration tests
- Proper use of mocks for isolation
- Table-driven tests for comprehensive coverage
- Integration tests for end-to-end validation
- Consistent patterns across the codebase
Follow these patterns to ensure your code is well-tested, maintainable, and reliable.