# Pprof > // main.go - A tool that processes data --- # pprof Usage Examples Source: https://go.dev/doc/diagnostics, https://go.dev/blog/profiling-go-programs ## Basic CPU Profiling ### Example 1: Profile a Command-Line Tool ```go // main.go - A tool that processes data package main import ( "flag" "log" "os" "runtime/pprof" ) var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") func main() { flag.Parse() if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal("could not create CPU profile: ", err) } defer f.Close() if err := pprof.StartCPUProfile(f); err != nil { log.Fatal("could not start CPU profile: ", err) } defer pprof.StopCPUProfile() } // Your application logic doWork() } func doWork() { // CPU-intensive work... sum := 0 for i := 0; i < 1000000000; i++ { sum += i } } ``` **Usage:** ```bash # Run with CPU profiling go run main.go -cpuprofile=cpu.prof # Analyze the profile go tool pprof cpu.prof # Interactive commands (pprof) top # Show top functions (pprof) web # Generate graph (pprof) list doWork # Show source with costs ``` ## Memory Profiling ### Example 2: Detect Memory Leaks ```go package main import ( "fmt" "log" "os" "runtime/pprof" "time" ) func main() { // Write heap profile before and after work f, err := os.Create("heap.prof") if err != nil { log.Fatal("could not create profile: ", err) } defer f.Close() // Do some memory allocation doMemoryWork() // Capture heap profile if err := pprof.WriteHeapProfile(f); err != nil { log.Fatal("could not write memory profile: ", err) } fmt.Println("Heap profile written to heap.prof") } func doMemoryWork() { // Create a slice that grows var data [][]int for i := 0; i < 1000; i++ { // Create 1MB slice chunk := make([]int, 256000) data = append(data, chunk) if i%100 == 0 { fmt.Printf("Allocated %d MB\n", (i+1)*4/1024) } } // Keep reference to prevent GC _ = data time.Sleep(1 * time.Second) } ``` **Usage:** ```bash # Run to generate profile go run main.go # Analyze heap profile go tool pprof heap.prof # Show top memory allocators (pprof) top (pprof) alloc_space # Total allocations (pprof) alloc_objects # Object count # Show functions with large allocations (pprof) list doMemoryWork ``` ## HTTP-Based Profiling ### Example 3: Always-On Profiling in a Web Server ```go package main import ( "fmt" "log" "net/http" _ "net/http/pprof" "time" ) func main() { // Standard HTTP routes http.HandleFunc("/api/users", handleUsers) http.HandleFunc("/api/posts", handlePosts) // pprof endpoints automatically available at /debug/pprof/ log.Println("Server running on :8080") log.Println("Profiles available at http://localhost:8080/debug/pprof/") log.Fatal(http.ListenAndServe(":8080", nil)) } func handleUsers(w http.ResponseWriter, r *http.Request) { // Simulate database query time.Sleep(100 * time.Millisecond) w.Header().Set("Content-Type", "application/json") fmt.Fprint(w, `[{"id":1,"name":"Alice"},{"id":2,"name":"Bob"}]`) } func handlePosts(w http.ResponseWriter, r *http.Request) { // Simulate processing processData() w.Header().Set("Content-Type", "application/json") fmt.Fprint(w, `[{"id":1,"title":"Hello"}]`) } func processData() { sum := 0 for i := 0; i < 100000000; i++ { sum += i * i } } ``` **Usage:** ```bash # Start server go run main.go # In another terminal, download heap profile go tool pprof http://localhost:8080/debug/pprof/heap # Download 30-second CPU profile go tool pprof http://localhost:8080/debug/pprof/profile?seconds=30 # Download goroutine profile go tool pprof http://localhost:8080/debug/pprof/goroutine # View goroutines in plaintext curl "http://localhost:8080/debug/pprof/goroutine?debug=2" # Generate execution trace curl -o trace.out http://localhost:8080/debug/pprof/trace?seconds=5 go tool trace trace.out ``` ## Testing with Profiling ### Example 4: Profile Your Tests ```go // mypackage/compute_test.go package mypackage import ( "testing" ) func TestComputePerformance(t *testing.T) { result := compute(1000000) if result <= 0 { t.Fatal("expected positive result") } } func BenchmarkCompute(b *testing.B) { for i := 0; i < b.N; i++ { compute(1000000) } } func compute(n int) int { sum := 0 for i := 0; i < n; i++ { sum += i } return sum } ``` **Usage:** ```bash # Run tests with CPU profiling go test -cpuprofile=cpu.prof -v ./... # Run tests with memory profiling go test -memprofile=mem.prof -v ./... # Run benchmarks with profiling go test -bench=. -cpuprofile=bench.prof -benchtime=10s ./... # Analyze results go tool pprof cpu.prof (pprof) web (pprof) top ``` ## Custom Profiling ### Example 5: Profile Specific Operations ```go package main import ( "context" "fmt" "log" "os" "runtime/pprof" "time" ) var requestsProfile = pprof.NewProfile("requests") type Request struct { ID int UserID int Operation string StartTime time.Time } func main() { // Capture profile at end f, err := os.Create("requests.prof") if err != nil { log.Fatal(err) } defer f.Close() // Process requests for i := 1; i <= 100; i++ { request := &Request{ ID: i, UserID: i % 10, Operation: fmt.Sprintf("op-%d", i%5), StartTime: time.Now(), } processRequest(request) } // Write profile if err := requestsProfile.WriteTo(f, 0); err != nil { log.Fatal("could not write profile: ", err) } fmt.Println("Profile written to requests.prof") } func processRequest(req *Request) { requestsProfile.Add(req, 1) defer requestsProfile.Remove(req) // Simulate work duration := time.Since(req.StartTime) fmt.Printf("Processed request %d (%s) in %v\n", req.ID, req.Operation, duration) time.Sleep(10 * time.Millisecond) } ``` ## Label-Based Profiling ### Example 6: Correlate Profiles with Request Context (Go 1.9+) ```go package main import ( "context" "fmt" "log" "net/http" _ "net/http/pprof" "runtime/pprof" ) func main() { http.HandleFunc("/api/process", handleProcess) log.Println("Server on :8080") log.Println("Profile at http://localhost:8080/debug/pprof/profile?seconds=30") log.Fatal(http.ListenAndServe(":8080", nil)) } func handleProcess(w http.ResponseWriter, r *http.Request) { // Extract request parameters userID := r.URL.Query().Get("user_id") if userID == "" { userID = "anonymous" } endpoint := r.URL.Path requestID := r.Header.Get("X-Request-ID") if requestID == "" { requestID = "unknown" } // Create labeled context ctx := context.Background() labels := pprof.Labels( "user_id", userID, "endpoint", endpoint, "request_id", requestID, ) // Execute request with labels // All profiling samples are tagged with these labels pprof.Do(ctx, labels, func(ctx context.Context) { processRequest(ctx, r) w.Write([]byte("OK")) }) } func processRequest(ctx context.Context, r *http.Request) { // CPU-intensive work here sum := 0 for i := 0; i < 100000000; i++ { sum += i * i } fmt.Printf("Processed: %v\n", sum) } ``` **Usage:** ```bash # Start server go run main.go # Download CPU profile go tool pprof http://localhost:8080/debug/pprof/profile?seconds=30 # In pprof, filter by label (pprof) focus user_id=123 (pprof) top (pprof) web ``` ## Goroutine Leak Detection ### Example 7: Find Goroutine Leaks ```go package main import ( "fmt" "io" "log" "net/http" _ "net/http/pprof" "runtime" "time" ) func main() { http.HandleFunc("/leak", handleLeak) http.HandleFunc("/status", handleStatus) log.Println("Server on :8080") log.Println("Goroutines: http://localhost:8080/debug/pprof/goroutine?debug=1") log.Println("Status: http://localhost:8080/status") log.Fatal(http.ListenAndServe(":8080", nil)) } func handleLeak(w http.ResponseWriter, r *http.Request) { // Spawn goroutine that never exits (leak!) go func() { for { time.Sleep(1 * time.Second) } }() w.Write([]byte("Started background goroutine\n")) } func handleStatus(w http.ResponseWriter, r *http.Request) { count := runtime.NumGoroutine() w.Header().Set("Content-Type", "text/plain") fmt.Fprintf(w, "Active goroutines: %d\n", count) } ``` **Usage:** ```bash # Start server go run main.go # Check initial goroutine count curl http://localhost:8080/status # Output: Active goroutines: 2 # Trigger leak multiple times for i in {1..10}; do curl http://localhost:8080/leak done # Check goroutine count again curl http://localhost:8080/status # Output: Active goroutines: 12 (10 leaked + 2 initial) # Analyze goroutine profile curl -o goroutine.prof http://localhost:8080/debug/pprof/goroutine go tool pprof goroutine.prof (pprof) top ``` ## Mutex Contention Analysis ### Example 8: Detect Lock Contention ```go package main import ( "fmt" "log" "net/http" _ "net/http/pprof" "runtime" "sync" "time" ) var ( counter int mu sync.Mutex ) func main() { // Enable mutex profiling runtime.SetMutexProfileFraction(1) http.HandleFunc("/increment", handleIncrement) log.Println("Server on :8080") log.Println("Mutex profile: http://localhost:8080/debug/pprof/mutex") log.Fatal(http.ListenAndServe(":8080", nil)) } func handleIncrement(w http.ResponseWriter, r *http.Request) { // Spawn concurrent increments var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) go func() { defer wg.Done() incrementCounter() }() } wg.Wait() w.Write([]byte(fmt.Sprintf("Counter: %d\n", counter))) } func incrementCounter() { mu.Lock() defer mu.Unlock() counter++ // Simulate some work time.Sleep(1 * time.Millisecond) } ``` **Usage:** ```bash # Start server go run main.go # Download mutex profile go tool pprof http://localhost:8080/debug/pprof/mutex # Analyze contention (pprof) top # Shows lock holders and contention (pprof) list handleIncrement ``` ## Production Monitoring Script ### Example 9: Automated Profiling in Production ```bash #!/bin/bash # profile-production.sh - Collect profiles from production service SERVICE_URL="http://prod-service:8080" OUTPUT_DIR="./profiles/$(date +%Y%m%d-%H%M%S)" mkdir -p "$OUTPUT_DIR" echo "Collecting profiles from $SERVICE_URL" # Collect multiple profiles echo "Collecting heap profile..." curl -s "$SERVICE_URL/debug/pprof/heap" > "$OUTPUT_DIR/heap.prof" echo "Collecting 30-second CPU profile..." curl -s "$SERVICE_URL/debug/pprof/profile?seconds=30" > "$OUTPUT_DIR/cpu.prof" echo "Collecting goroutine profile..." curl -s "$SERVICE_URL/debug/pprof/goroutine" > "$OUTPUT_DIR/goroutine.prof" echo "Collecting mutex profile..." curl -s "$SERVICE_URL/debug/pprof/mutex" > "$OUTPUT_DIR/mutex.prof" echo "Collecting 5-second trace..." curl -s "$SERVICE_URL/debug/pprof/trace?seconds=5" > "$OUTPUT_DIR/trace.out" echo "Profiles collected to $OUTPUT_DIR" echo "" echo "Analyze with:" echo " go tool pprof $OUTPUT_DIR/heap.prof" echo " go tool pprof $OUTPUT_DIR/cpu.prof" echo " go tool trace $OUTPUT_DIR/trace.out" ``` **Usage:** ```bash chmod +x profile-production.sh ./profile-production.sh # Analyze results go tool pprof ./profiles/20240115-143022/heap.prof (pprof) top (pprof) web ``` ## Performance Analysis Workflow ### Step-by-Step: Identify Performance Bottleneck ```bash # 1. Collect CPU profile go tool pprof http://localhost:8080/debug/pprof/profile?seconds=60 # 2. View top functions (pprof) top # 3. Focus on problematic function (pprof) list functionName # 4. View as graph (pprof) web # 5. Compare with previous run (pprof) save baseline.prof # ... make optimization ... go tool pprof -base baseline.prof http://localhost:8080/debug/pprof/profile?seconds=60 (pprof) top ``` ## Memory Leak Diagnosis ```bash # 1. Take initial heap snapshot curl http://localhost:8080/debug/pprof/heap > heap1.prof # 2. Wait 5 minutes... sleep 300 # 3. Take second snapshot curl http://localhost:8080/debug/pprof/heap > heap2.prof # 4. Compare allocations go tool pprof -base heap1.prof heap2.prof (pprof) top # New allocations since first snapshot (pprof) list # Source lines ``` --- # net/http/pprof API Reference Source: https://pkg.go.dev/net/http/pprof ## Package Overview Package `pprof` serves runtime profiling data via HTTP in the format expected by the pprof visualization tool. Profiling endpoints are available at paths under `/debug/pprof/`. ## Installation ### Automatic Registration Simply import the package to register handlers with the default HTTP mux: ```go import _ "net/http/pprof" ``` This automatically installs handlers for all profiling endpoints. ### Custom Registration For advanced use cases, explicitly register handlers to a custom mux: ```go package main import ( "log" "net/http" "net/http/pprof" ) func main() { mux := http.NewServeMux() // Register specific handlers mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) mux.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) mux.HandleFunc("/debug/pprof/trace", pprof.Trace) log.Fatal(http.ListenAndServe(":6060", mux)) } ``` ## Handler Functions ### Index ```go func Index(w http.ResponseWriter, r *http.Request) ``` Serves the pprof index page listing all available profiles or retrieves a named profile. **Behavior:** - When accessed without query parameter: Returns HTML page listing all available profiles - When called with profile name in query: Returns the named profile data **URL:** - `GET /debug/pprof/` - `GET /debug/pprof/` **Query Parameters:** - `debug=N` - `0` = binary format (default), `>0` = plaintext format **Returns:** - HTML list of profiles when accessed without parameters - Profile data in specified format when profile name is provided **Example:** ```bash # List all profiles curl http://localhost:6060/debug/pprof/ # Get goroutine profile (plaintext) curl "http://localhost:6060/debug/pprof/goroutine?debug=2" ``` ### Profile (CPU Profile Handler) ```go func Profile(w http.ResponseWriter, r *http.Request) ``` Responds with the pprof-formatted CPU profile. Profile data is collected for the duration specified by the `seconds` query parameter. **URL:** - `GET /debug/pprof/profile` **Query Parameters:** - `seconds=N` - Profiling duration in seconds (default: 30) **Returns:** - Binary pprof-formatted CPU profile data **Example:** ```bash # 30-second CPU profile (default) go tool pprof http://localhost:6060/debug/pprof/profile # 60-second CPU profile go tool pprof http://localhost:6060/debug/pprof/profile?seconds=60 # Download profile for offline analysis curl -o cpu.prof http://localhost:6060/debug/pprof/profile?seconds=30 go tool pprof cpu.prof ``` ### Trace ```go func Trace(w http.ResponseWriter, r *http.Request) ``` Responds with execution trace data in binary format. Available since Go 1.5. **URL:** - `GET /debug/pprof/trace` **Query Parameters:** - `seconds=N` - Trace duration in seconds (default: 1) **Returns:** - Binary trace format data **Example:** ```bash # 5-second execution trace curl -o trace.out http://localhost:6060/debug/pprof/trace?seconds=5 # Analyze with trace viewer go tool trace trace.out ``` ### Cmdline ```go func Cmdline(w http.ResponseWriter, r *http.Request) ``` Responds with the running program's command line with arguments separated by NUL bytes. **URL:** - `GET /debug/pprof/cmdline` **Returns:** - Command line arguments separated by NUL bytes **Example:** ```bash curl http://localhost:6060/debug/pprof/cmdline | tr '\0' '\n' ``` ### Symbol ```go func Symbol(w http.ResponseWriter, r *http.Request) ``` Maps program counters to function names. Used internally by pprof tools for symbol resolution. **URL:** - `GET /debug/pprof/symbol` **Returns:** - Symbol mapping in pprof format **Example:** ```bash go tool pprof http://localhost:6060/debug/pprof/profile?seconds=5 ``` The pprof tool automatically uses this endpoint for symbol resolution. ### Handler ```go func Handler(name string) http.Handler ``` Returns an HTTP handler for the named profile. Allows serving profiles with custom paths or settings. **Parameters:** - `name string` - Profile name (e.g., "heap", "goroutine", "mutex", "block", etc.) **Returns:** - http.Handler - Handler for the specified profile **Example:** ```go mux := http.NewServeMux() mux.Handle("/custom/heap", pprof.Handler("heap")) mux.Handle("/custom/goroutine", pprof.Handler("goroutine")) ``` ## Available Profiles The following profiles are automatically available through `pprof.Handler()`: | Profile | Description | Query Parameters | |---------|-------------|------------------| | `heap` | Memory allocations | `debug=N`, `gc=N` | | `allocs` | All past allocations | `debug=N`, `seconds=N` | | `goroutine` | Goroutine stack traces | `debug=N`, `seconds=N` | | `threadcreate` | OS thread creation | `debug=N`, `seconds=N` | | `block` | Synchronization blocking | `debug=N`, `seconds=N` | | `mutex` | Mutex contention | `debug=N`, `seconds=N` | | `profile` | CPU profile | `seconds=N` | | `trace` | Execution trace | `seconds=N` | ## Query Parameters Reference ### debug Parameter ``` debug=N ``` Controls output format: - `0` (default) - Binary pprof format - `>0` - Plaintext human-readable format **Example:** ```bash # Binary format (for pprof tool) curl http://localhost:6060/debug/pprof/heap > heap.prof # Plaintext format (human-readable) curl "http://localhost:6060/debug/pprof/heap?debug=1" ``` ### gc Parameter ``` gc=N ``` Run garbage collection before profiling (heap and allocs profiles only): - `0` (default) - Do not run GC - `>0` - Run GC before collecting profile **Example:** ```bash # Heap profile with GC run first curl "http://localhost:6060/debug/pprof/heap?gc=1" ``` ### seconds Parameter ``` seconds=N ``` Profiling duration in seconds (CPU, trace, and delta profiles): - Default: 30 for CPU, 1 for trace - Can be any positive integer **Example:** ```bash # 60-second CPU profile curl http://localhost:6060/debug/pprof/profile?seconds=60 # 5-second execution trace curl -o trace.out http://localhost:6060/debug/pprof/trace?seconds=5 ``` ## Common Usage Patterns ### Interactive Profiling ```bash # Start interactive pprof session with heap profile go tool pprof http://localhost:6060/debug/pprof/heap # Start interactive pprof session with CPU profile go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30 # View top CPU-consuming functions go tool pprof -top http://localhost:6060/debug/pprof/profile?seconds=30 ``` ### Goroutine Debugging ```bash # Get current goroutine count and stack traces go tool pprof http://localhost:6060/debug/pprof/goroutine # Export plaintext format curl "http://localhost:6060/debug/pprof/goroutine?debug=1" > goroutines.txt ``` ### Memory Analysis ```bash # Heap profile go tool pprof http://localhost:6060/debug/pprof/heap # All allocations go tool pprof http://localhost:6060/debug/pprof/allocs # With visualization go tool pprof -http=:8080 http://localhost:6060/debug/pprof/heap ``` ### Lock Contention ```bash # Mutex contention profile go tool pprof http://localhost:6060/debug/pprof/mutex # Block profile (synchronization blocking) go tool pprof http://localhost:6060/debug/pprof/block ``` ### Execution Tracing ```bash # Collect 5-second trace curl -o trace.out http://localhost:6060/debug/pprof/trace?seconds=5 # View in trace viewer go tool trace trace.out ``` ## Setup Examples ### Simple HTTP Server with pprof ```go package main import ( "log" "net/http" _ "net/http/pprof" ) func main() { // pprof handlers automatically registered http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("Hello World")) }) log.Println("Server running on :6060") log.Println("Profiles available at http://localhost:6060/debug/pprof/") log.Fatal(http.ListenAndServe(":6060", nil)) } ``` ### Custom Port and Path ```go package main import ( "log" "net/http" "net/http/pprof" ) func main() { mux := http.NewServeMux() // Serve application routes mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("Hello World")) }) // Register pprof handlers on custom port/path debugMux := http.NewServeMux() debugMux.HandleFunc("/", pprof.Index) debugMux.HandleFunc("/profile", pprof.Profile) debugMux.Handle("/heap", pprof.Handler("heap")) debugMux.Handle("/goroutine", pprof.Handler("goroutine")) debugMux.HandleFunc("/cmdline", pprof.Cmdline) debugMux.HandleFunc("/trace", pprof.Trace) // Serve public API on :8080 go func() { log.Fatal(http.ListenAndServe(":8080", mux)) }() // Serve debugging on :6060 log.Println("Profiler available at http://localhost:6060/") log.Fatal(http.ListenAndServe(":6060", debugMux)) } ``` ### Integrated with Default Mux ```go package main import ( "log" "net/http" _ "net/http/pprof" ) func init() { // pprof handlers automatically registered to DefaultMux // Available at: http://localhost:6060/debug/pprof/ } func main() { http.HandleFunc("/api/data", func(w http.ResponseWriter, r *http.Request) { // API handler w.Write([]byte("data")) }) log.Println("Server + pprof on :6060") log.Fatal(http.ListenAndServe(":6060", nil)) } ``` ## Combining with Production Monitoring ```go package main import ( "context" "log" "net/http" _ "net/http/pprof" "time" ) func main() { // Main application server (public) mux := http.NewServeMux() mux.HandleFunc("/api", handleAPI) // Debug server (pprof already registered to default mux) go func() { log.Println("Debug server on :6060") log.Fatal(http.ListenAndServe(":6060", nil)) }() // Main server log.Println("Server on :8080") log.Fatal(http.ListenAndServe(":8080", mux)) } func handleAPI(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) defer cancel() // API logic... w.Write([]byte("OK")) } ``` ## Performance Considerations - CPU profiling adds 2-5% overhead - Heap profiling adds minimal overhead (sampling-based) - Block and mutex profiles must be explicitly enabled - Only collect one profile at a time in production - Use fixed sampling windows (e.g., 30 seconds every 5 minutes) --- # runtime/pprof API Reference Source: https://pkg.go.dev/runtime/pprof ## Package Overview Package `pprof` writes runtime profiling data in the format expected by the pprof visualization tool. It enables CPU profiling, memory profiling, and custom profiling through stack traces. ## CPU Profiling Functions ### StartCPUProfile ```go func StartCPUProfile(w io.Writer) error ``` Enables CPU profiling, buffering profile output to the provided writer. Only one profile can be active at a time. Returns an error if profiling is already enabled. **Parameters:** - `w io.Writer` - Writer to buffer CPU profile output **Returns:** - Error if profiling is already enabled; nil on success **Example:** ```go f, err := os.Create("cpu.prof") if err != nil { log.Fatal(err) } defer f.Close() if err := pprof.StartCPUProfile(f); err != nil { log.Fatal("could not start CPU profile: ", err) } defer pprof.StopCPUProfile() // ... program runs and collects profile data ... ``` ### StopCPUProfile ```go func StopCPUProfile() ``` Stops the running CPU profile and waits for all writes to complete. Must be called after StartCPUProfile. **Example:** ```go pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() ``` ## Memory Profiling Functions ### WriteHeapProfile ```go func WriteHeapProfile(w io.Writer) error ``` Shorthand for `Lookup("heap").WriteTo(w, 0)`. Writes a heap profile snapshot to the provided writer. Preserved for backwards compatibility. **Parameters:** - `w io.Writer` - Writer to buffer heap profile output **Returns:** - Error if write fails; nil on success **Example:** ```go f, err := os.Create("heap.prof") if err != nil { log.Fatal(err) } defer f.Close() if err := pprof.WriteHeapProfile(f); err != nil { log.Fatal("could not write heap profile: ", err) } ``` ## Profile Type ### Type Definition ```go type Profile struct { // contains filtered or unexported fields } ``` A Profile is a collection of stack traces showing the call sequences that led to instances of allocation/blocking/etc. ### Profile Functions #### Lookup ```go func Lookup(name string) *Profile ``` Returns the profile with the given name, creating it if necessary. Returns nil if the name is not recognized. **Predefined profile names:** - `goroutine` - Stack traces of all current goroutines - `heap` - Sampling of live object allocations - `allocs` - Sampling of all past allocations - `threadcreate` - Stack traces that led to the creation of new OS threads - `block` - Stack traces that led to blocking on synchronization primitives - `mutex` - Stack traces of holders of contended mutexes **Parameters:** - `name string` - Name of the profile **Returns:** - Pointer to Profile, or nil if name is not recognized **Example:** ```go heapProfile := pprof.Lookup("heap") goroutineProfile := pprof.Lookup("goroutine") ``` #### NewProfile ```go func NewProfile(name string) *Profile ``` Creates a new profile with the given name. Panics if a profile with that name already exists. **Parameters:** - `name string` - Unique name for the new profile **Returns:** - Pointer to the new Profile **Panics:** - If a profile with the given name already exists **Example:** ```go requestsProfile := pprof.NewProfile("requests") ``` #### Profiles ```go func Profiles() []*Profile ``` Returns a slice of all known profiles, sorted by name. **Returns:** - Slice of pointers to all known profiles **Example:** ```go allProfiles := pprof.Profiles() for _, profile := range allProfiles { fmt.Println(profile.Name()) } ``` ### Profile Methods #### Name ```go func (p *Profile) Name() string ``` Returns the profile's name. **Returns:** - String name of the profile **Example:** ```go name := heapProfile.Name() // "heap" ``` #### Count ```go func (p *Profile) Count() int ``` Returns the number of execution stacks currently in the profile. **Returns:** - Integer count of stacks **Example:** ```go count := goroutineProfile.Count() fmt.Printf("Current goroutines: %d\n", count) ``` #### Add ```go func (p *Profile) Add(value any, skip int) ``` Adds the current execution stack associated with a value to the profile. The caller can use `skip` to skip the stack frames from the profiling code itself. **Parameters:** - `value any` - Any value to associate with this stack - `skip int` - Number of stack frames to skip (0 means include Add itself) **Example:** ```go type Request struct{} var requestsProfile = pprof.NewProfile("requests") func handleRequest(req *Request) { requestsProfile.Add(req, 1) defer requestsProfile.Remove(req) // ... handle request ... } ``` #### Remove ```go func (p *Profile) Remove(value any) ``` Removes the execution stack for a value from the profile. Does nothing if the value is not in the profile. **Parameters:** - `value any` - The value to remove **Example:** ```go defer requestsProfile.Remove(req) ``` #### WriteTo ```go func (p *Profile) WriteTo(w io.Writer, debug int) error ``` Writes a pprof-formatted snapshot of the profile to the given writer. **Parameters:** - `w io.Writer` - Writer to output profile data - `debug int` - Debug level (0=binary format, >0=plaintext human-readable format) **Returns:** - Error if write fails; nil on success **Example:** ```go f, err := os.Create("goroutine.prof") if err != nil { log.Fatal(err) } defer f.Close() if err := goroutineProfile.WriteTo(f, 0); err != nil { log.Fatal("could not write profile: ", err) } ``` ## Label-Based Profiling (Go 1.9+) Labels allow associating profiling samples with additional context information, useful for grouping samples by request ID, user ID, etc. ### Label ```go func Label(ctx context.Context, key string) (string, bool) ``` Returns the label value for the given key in the context. The boolean indicates whether the label was present. **Parameters:** - `ctx context.Context` - Context to check for labels - `key string` - Label key to look up **Returns:** - `string` - Label value (empty if not present) - `bool` - True if label exists, false otherwise **Example:** ```go val, ok := pprof.Label(ctx, "user_id") if ok { fmt.Printf("User ID: %s\n", val) } ``` ### Labels ```go func Labels(args ...string) LabelSet ``` Creates a LabelSet from an even number of key-value string pairs. Panics if odd number of arguments. **Parameters:** - `args ...string` - Key-value pairs (k1, v1, k2, v2, ...) **Returns:** - LabelSet - New label set **Example:** ```go labels := pprof.Labels("user_id", "123", "request_id", "abc-def") ``` ### WithLabels ```go func WithLabels(ctx context.Context, labels LabelSet) context.Context ``` Returns a new context with the provided labels added. New goroutines spawned will inherit these labels. **Parameters:** - `ctx context.Context` - Base context - `labels LabelSet` - Labels to add **Returns:** - context.Context - New context with labels **Example:** ```go ctx := context.Background() labels := pprof.Labels("user_id", "123") newCtx := pprof.WithLabels(ctx, labels) ``` ### Do ```go func Do(ctx context.Context, labels LabelSet, f func(context.Context)) ``` Calls function f with the augmented label context. All profiling samples taken during f execution are tagged with the given labels. Any goroutines spawned from f will inherit the labels. **Parameters:** - `ctx context.Context` - Base context - `labels LabelSet` - Labels to apply - `f func(context.Context)` - Function to execute with labeled context **Example:** ```go labels := pprof.Labels("user_id", "123", "endpoint", "/api/users") pprof.Do(ctx, labels, func(ctx context.Context) { handleRequest(ctx, req) }) ``` ### ForLabels ```go func ForLabels(ctx context.Context, f func(key, value string) bool) ``` Invokes the provided function for each label in the context. The invoked function should return true to continue iteration or false to stop. **Parameters:** - `ctx context.Context` - Context containing labels - `f func(key, value string) bool` - Callback function for each label **Example:** ```go pprof.ForLabels(ctx, func(key, value string) bool { fmt.Printf("%s=%s\n", key, value) return true // continue }) ``` ### SetGoroutineLabels ```go func SetGoroutineLabels(ctx context.Context) ``` Sets the current goroutine's labels to match the labels in the context. Lower-level than Do(), used when you can't wrap the entire operation in Do(). **Parameters:** - `ctx context.Context` - Context with labels to apply **Example:** ```go ctx := pprof.WithLabels(context.Background(), pprof.Labels("request_id", "xyz")) pprof.SetGoroutineLabels(ctx) // Current goroutine now has "request_id" label ``` ## LabelSet Type ```go type LabelSet struct { // contains filtered or unexported fields } ``` A LabelSet is an immutable set of labels created by the Labels function. ## Complete Example: Custom Profiling with Labels ```go package main import ( "context" "fmt" "log" "os" "runtime/pprof" ) var customProfile = pprof.NewProfile("requests") func main() { // Write profile when done f, err := os.Create("custom.prof") if err != nil { log.Fatal(err) } defer f.Close() defer func() { if err := customProfile.WriteTo(f, 0); err != nil { log.Fatal("could not write profile: ", err) } }() // Process requests with labels ctx := context.Background() for i := 1; i <= 5; i++ { labels := pprof.Labels("request_id", fmt.Sprintf("req-%d", i)) pprof.Do(ctx, labels, func(ctx context.Context) { // Simulated request handling processRequest(ctx, i) }) } } func processRequest(ctx context.Context, id int) { // Request processing logic fmt.Printf("Processing request %d\n", id) } ``` Then analyze with pprof: ```bash go tool pprof custom.prof (pprof) top (pprof) list processRequest ```