[{"data":1,"prerenderedAt":11419},["ShallowReactive",2],{"search-sections-pipz":3,"nav-pipz":4414,"content-tree-pipz":4531,"footer-resources":4590,"content-/v1.0.7/guides/connector-selection":8716,"surround-/v1.0.7/guides/connector-selection":11416},[4,10,14,20,25,30,36,41,46,51,56,61,65,70,75,80,84,89,94,99,104,109,113,118,123,128,133,138,143,148,153,158,162,166,171,176,181,185,190,195,200,205,210,215,220,224,229,232,237,242,247,251,255,260,264,269,274,278,282,287,292,297,302,307,312,316,320,325,328,332,337,342,346,351,356,360,364,368,372,377,381,386,390,395,400,405,409,414,419,424,428,433,438,443,447,452,457,461,466,471,476,480,485,490,495,500,504,508,513,517,522,527,532,537,542,547,552,557,561,566,571,576,581,586,591,594,598,603,608,613,617,621,625,629,633,637,641,645,649,654,659,664,669,673,678,683,687,692,697,702,707,712,717,722,727,732,737,742,747,751,756,761,766,771,776,780,785,790,794,799,804,809,814,819,823,828,833,838,842,847,852,857,860,865,870,875,880,885,890,894,898,902,907,912,918,923,928,933,937,942,947,952,957,962,967,972,977,982,986,991,996,1000,1005,1010,1015,1020,1024,1029,1033,1038,1043,1048,1052,1057,1061,1066,1071,1076,1081,1086,1091,1096,1101,1106,1111,1116,1121,1126,1131,1136,1141,1146,1151,1156,1161,1166,1171,1176,1180,1185,1190,1195,1200,1205,1209,1214,1219,1224,1228,1233,1238,1243,1248,1253,1258,1263,1268,1272,1277,1281,1286,1291,1294,1299,1304,1309,1314,1319,1324,1328,1333,1338,1343,1348,1353,1358,1363,1367,1372,1376,1381,1386,1391,1396,1401,1406,1411,1416,1421,1426,1431,1435,1440,1445,1450,1455,1459,1464,1469,1474,1478,1483,1487,1492,1497,1501,1506,1511,1516,1521,1526,1531,1536,1541,1546,1551,1556,1560,1564,1568,1572,1577,1582,1586,1591,1596,1600,1605,1610,1614,1619,1623,1628,1633,1638,1642,1647,1651,1656,1660,1665,1670,1675,1679,1683,1688,1693,1697,1702,1707,1712,1717,1722,1727,1732,1737,1742,1747,1752,1757,1762,1767,1772,1776,1780,1785,1789,1794,1799,1804,1809,1814,1819,1822,1827,1832,1837,1842,1847,1852,1856,1861,1865,1869,1873,1876,1881,1886,1891,1896,1901,1906,1910,1915,1919,1924,1927,1932,1937,1941,1946,1951,1956,1961,1966,1971,1975,1980,1985,1990,1995,2000,2003,2008,2013,2017,2022,2027,2031,2036,2041,2046,2051,2055,2060,2065,2070,2075,2080,2084,2089,2094,2098,2103,2108,2113,2118,2123,2127,2132,2136,2141,2146,2151,2156,2159,2164,2169,2174,2179,2183,2188,2193,2197,2201,2206,2211,2216,2221,2226,2231,2235,2240,2244,2249,2252,2257,2261,2266,2271,2276,2281,2286,2290,2295,2300,2304,2308,2311,2316,2321,2326,2331,2335,2340,2344,2348,2352,2356,2360,2364,2368,2372,2376,2380,2383,2388,2393,2398,2403,2407,2411,2416,2420,2424,2428,2432,2436,2440,2444,2448,2453,2457,2460,2465,2470,2475,2480,2484,2488,2492,2495,2500,2504,2508,2512,2516,2520,2524,2528,2532,2536,2540,2543,2548,2553,2558,2563,2568,2572,2577,2581,2585,2589,2592,2596,2600,2604,2608,2612,2616,2619,2624,2628,2632,2636,2640,2644,2648,2652,2656,2660,2664,2669,2674,2677,2682,2687,2692,2697,2702,2707,2712,2715,2720,2725,2730,2735,2739,2744,2749,2753,2757,2762,2767,2770,2775,2780,2785,2790,2795,2799,2803,2808,2812,2816,2820,2825,2829,2833,2837,2842,2845,2850,2855,2860,2864,2869,2874,2878,2882,2886,2891,2895,2898,2903,2908,2913,2918,2922,2927,2931,2935,2940,2944,2948,2952,2956,2960,2964,2968,2973,2978,2982,2986,2989,2994,2999,3003,3006,3011,3016,3019,3024,3029,3034,3039,3044,3049,3053,3058,3062,3066,3071,3076,3081,3086,3091,3095,3099,3103,3107,3112,3115,3120,3125,3130,3135,3140,3145,3149,3152,3157,3162,3167,3171,3175,3179,3183,3187,3191,3195,3199,3203,3207,3211,3214,3219,3224,3229,3234,3239,3244,3248,3253,3257,3261,3266,3270,3274,3278,3282,3286,3291,3296,3301,3306,3310,3315,3319,3322,3327,3332,3337,3342,3346,3351,3356,3361,3365,3368,3373,3378,3383,3388,3392,3397,3401,3405,3409,3413,3417,3421,3425,3429,3433,3438,3442,3447,3450,3455,3460,3465,3470,3474,3478,3482,3485,3490,3494,3498,3502,3506,3510,3515,3519,3523,3527,3532,3536,3540,3544,3549,3553,3557,3560,3564,3568,3572,3576,3580,3584,3588,3592,3596,3599,3604,3609,3614,3619,3624,3629,3633,3637,3641,3645,3649,3653,3657,3660,3664,3667,3672,3677,3682,3686,3690,3694,3698,3702,3706,3710,3713,3718,3722,3727,3732,3737,3742,3747,3752,3756,3760,3764,3768,3772,3776,3780,3784,3787,3791,3794,3799,3804,3808,3812,3816,3819,3824,3829,3834,3839,3843,3847,3851,3855,3860,3864,3868,3871,3875,3879,3883,3887,3891,3895,3900,3904,3908,3912,3915,3920,3925,3930,3935,3940,3945,3949,3953,3958,3962,3967,3972,3977,3982,3987,3992,3997,4002,4005,4010,4015,4020,4025,4029,4034,4039,4043,4047,4052,4056,4060,4063,4067,4072,4076,4080,4084,4089,4094,4098,4102,4106,4110,4114,4118,4123,4127,4131,4134,4139,4144,4149,4154,4159,4164,4168,4172,4176,4180,4184,4188,4192,4195,4199,4203,4207,4211,4215,4220,4225,4229,4233,4236,4241,4246,4251,4256,4261,4266,4270,4274,4278,4282,4286,4289,4293,4297,4300,4304,4308,4312,4316,4320,4325,4329,4333,4337,4340,4345,4350,4355,4360,4363,4368,4373,4378,4383,4388,4393,4397,4400,4405,4410],{"id":5,"title":6,"titles":7,"content":8,"level":9},"/v1.0.7/overview","Overview",[],"Type-safe, composable data pipelines for Go",1,{"id":11,"title":6,"titles":12,"content":13,"level":9},"/v1.0.7/overview#overview",[],"Data processing in Go often means choosing between type safety and flexibility. pipz offers both: a single interface that everything implements, with zero reflection and full compile-time checking. type Chainable[T any] interface {\n    Process(context.Context, T) (T, error)\n    Identity() Identity\n    Schema() Node\n    Close() error\n} Implement it directly for custom processors. Use the provided wrappers for common patterns. Mix both approaches in the same pipeline. // Custom processor - full control\ntype RateLimiter[T any] struct {\n    limiter *rate.Limiter\n}\n\nfunc (r *RateLimiter[T]) Process(ctx context.Context, data T) (T, error) {\n    if err := r.limiter.Wait(ctx); err != nil {\n        return data, err\n    }\n    return data, nil\n}\n\n// Define identities upfront\nvar (\n    ValidateID  = pipz.NewIdentity(\"validate\", \"Validate order fields\")\n    EnrichID    = pipz.NewIdentity(\"enrich\", \"Add timestamp to order\")\n    OrderFlowID = pipz.NewIdentity(\"order-flow\", \"Process customer orders\")\n)\n\n// Built-in wrappers - convenience\nvalidate := pipz.Apply(ValidateID, validateOrder)\nenrich := pipz.Transform(EnrichID, addTimestamp)\n\n// Compose freely\npipeline := pipz.NewSequence(OrderFlowID,\n    validate,\n    &RateLimiter[Order]{limiter: limiter},\n    enrich,\n) Type-safe, minimal dependencies, panic-recovered by default.",{"id":15,"title":16,"titles":17,"content":18,"level":19},"/v1.0.7/overview#philosophy","Philosophy",[6],"pipz draws inspiration from functional composition: small, focused units that combine into complex behaviors. Any type implementing Chainable[T] can participate in a pipeline. This creates a uniform abstraction that spans your entire processing flow. // Define identities upfront\nvar (\n    ValidatePaymentID = pipz.NewIdentity(\"validate\", \"Validate payment details\")\n    ChargeCardID      = pipz.NewIdentity(\"charge\", \"Charge customer card\")\n    RetryID           = pipz.NewIdentity(\"retry\", \"Retry charge on failure\")\n    GatewayID         = pipz.NewIdentity(\"gateway\", \"Try multiple payment gateways\")\n    BoundedID         = pipz.NewIdentity(\"bounded\", \"Enforce processing timeout\")\n    PaymentID         = pipz.NewIdentity(\"payment\", \"Complete payment processing flow\")\n    ChargeTimeoutID   = pipz.NewIdentity(\"charge-timeout\", \"Bound total charge time\")\n    MultiGatewayID    = pipz.NewIdentity(\"multi-gateway\", \"Failover between gateways\")\n    PrimaryID         = pipz.NewIdentity(\"primary\", \"Retry primary gateway\")\n)\n\n// Processors transform data\nvalidate := pipz.Apply(ValidatePaymentID, validatePayment)\ncharge := pipz.Apply(ChargeCardID, chargeCard)\n\n// Connectors compose processors\nwithRetry := pipz.NewRetry(RetryID, charge, 3)\nwithFallback := pipz.NewFallback(GatewayID, primaryCharge, backupCharge)\nwithTimeout := pipz.NewTimeout(BoundedID, pipeline, 5*time.Second)\n\n// Everything composes the same way\nrobust := pipz.NewSequence(PaymentID,\n    validate,\n    pipz.NewTimeout(ChargeTimeoutID,\n        pipz.NewFallback(MultiGatewayID,\n            pipz.NewRetry(PrimaryID, primaryCharge, 3),\n            backupCharge,\n        ),\n        10*time.Second,\n    ),\n) Three levels of composition, one interface, complete type safety throughout.",2,{"id":21,"title":22,"titles":23,"content":24,"level":19},"/v1.0.7/overview#capabilities","Capabilities",[6],"A uniform interface opens possibilities: Resilience - Retry failed operations, fall back to alternatives, enforce timeouts. Layer these patterns without changing your processing logic. Parallelism - Run processors concurrently with Concurrent, bound parallelism with WorkerPool, race for first success with Race. Routing - Direct data through different paths with Switch. Filter conditionally with Filter. Contest for the first result meeting criteria. Observability - Emit typed signals on state changes. CircuitBreaker, RateLimiter, and WorkerPool broadcast their state for monitoring and alerting. pipz provides the composition layer. What you build on top is up to you.",{"id":26,"title":27,"titles":28,"content":29,"level":19},"/v1.0.7/overview#priorities","Priorities",[6],"",{"id":31,"title":32,"titles":33,"content":34,"level":35},"/v1.0.7/overview#type-safety","Type Safety",[6,27],"Generics eliminate runtime type assertions. Data flows through pipelines with compile-time checking at every step. // Define identities upfront\nvar (\n    ProcessID  = pipz.NewIdentity(\"process\", \"Process customer orders\")\n    ValidateID = pipz.NewIdentity(\"validate\", \"Validate order fields\")\n)\n\n// Compile-time type checking\npipeline := pipz.NewSequence[Order](ProcessID,\n    pipz.Apply(ValidateID, func(_ context.Context, o Order) (Order, error) {\n        // o is Order, not interface{}\n        return o, nil\n    }),\n)\n\n// Type mismatch caught at compile time, not runtime\n// pipeline.Process(ctx, \"not an order\") // Won't compile",3,{"id":37,"title":38,"titles":39,"content":40,"level":35},"/v1.0.7/overview#composability","Composability",[6,27],"Small processors combine into complex behaviors. Each connector serves one purpose and combines cleanly with others. // Define identities upfront\nvar (\n    BoundedID      = pipz.NewIdentity(\"bounded\", \"Enforce overall timeout\")\n    WithRetryID    = pipz.NewIdentity(\"with-retry\", \"Retry failed operations\")\n    WithFallbackID = pipz.NewIdentity(\"with-fallback\", \"Fallback on error\")\n)\n\n// Layer resilience patterns\nresilient := pipz.NewTimeout(BoundedID,\n    pipz.NewRetry(WithRetryID,\n        pipz.NewFallback(WithFallbackID, primary, backup),\n        3,\n    ),\n    5*time.Second,\n) No configuration objects. No builder patterns. Just composition.",{"id":42,"title":43,"titles":44,"content":45,"level":35},"/v1.0.7/overview#error-context","Error Context",[6,27],"Errors carry their full path through the pipeline. Know exactly where failures occurred, how long operations took, and what data was being processed. result, err := pipeline.Process(ctx, order)\nif err != nil {\n    var pipeErr *pipz.Error[Order]\n    if errors.As(err, &pipeErr) {\n        // Path is []Identity - extract names for display\n        var path []string\n        for _, id := range pipeErr.Path {\n            path = append(path, id.Name())\n        }\n        fmt.Printf(\"Failed at: %s\\n\", strings.Join(path, \" -> \"))\n        fmt.Printf(\"Duration: %v\\n\", pipeErr.Duration)\n        fmt.Printf(\"Input: %+v\\n\", pipeErr.InputData)\n    }\n}\n// Output: Failed at: payment -> gateway -> primary\n//         Duration: 2.3s\n//         Input: Order{ID: \"ORD-123\", Total: 99.99}",{"id":47,"title":48,"titles":49,"content":50,"level":35},"/v1.0.7/overview#errors-as-data","Errors as Data",[6,27],"Most frameworks treat errors as exceptions. pipz treats them as data that flows through pipelines. Build error recovery using the same tools you use for regular processing. // Define identities upfront\nvar (\n    RecoverID    = pipz.NewIdentity(\"recover\", \"Error recovery pipeline\")\n    CategorizeID = pipz.NewIdentity(\"categorize\", \"Categorize error type\")\n    RouteID      = pipz.NewIdentity(\"route\", \"Route by severity\")\n    OrderFlowID  = pipz.NewIdentity(\"order-flow\", \"Order processing with error handling\")\n)\n\n// Error recovery pipeline - same patterns, same composition\nerrorHandler := pipz.NewSequence[*pipz.Error[Order]](RecoverID,\n    pipz.Transform(CategorizeID, categorizeError),\n    pipz.NewSwitch(RouteID, routeBySeverity),\n)\n\n// Attach to any pipeline\nrobust := pipz.NewHandle(OrderFlowID, mainPipeline, errorHandler)",{"id":52,"title":53,"titles":54,"content":55,"level":35},"/v1.0.7/overview#safety","Safety",[6,27],"Panics are recovered automatically with security-focused sanitization. One misbehaving processor won't crash your system or leak sensitive data in error messages. // Panics become errors, automatically\nresult, err := pipeline.Process(ctx, data)\n// err contains sanitized panic info if a processor panicked html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}",{"id":57,"title":58,"titles":59,"content":60,"level":9},"/v1.0.7/learn/quickstart","Getting Started with pipz",[],"Build your first type-safe data pipeline in 10 minutes with practical examples and best practices",{"id":62,"title":58,"titles":63,"content":64,"level":9},"/v1.0.7/learn/quickstart#getting-started-with-pipz",[],"Build your first data pipeline in 10 minutes.",{"id":66,"title":67,"titles":68,"content":69,"level":19},"/v1.0.7/learn/quickstart#what-is-pipz","What is pipz?",[58],"pipz is a Go library for building type-safe, composable data pipelines. Think of it as LEGO blocks for data processing - small, focused pieces that connect together to solve complex problems.",{"id":71,"title":72,"titles":73,"content":74,"level":19},"/v1.0.7/learn/quickstart#installation","Installation",[58],"go get github.com/zoobzio/pipz Requires Go 1.21+ for generics support.",{"id":76,"title":77,"titles":78,"content":79,"level":19},"/v1.0.7/learn/quickstart#your-first-pipeline","Your First Pipeline",[58],"Let's build a simple pipeline that processes user registration: package main\n\nimport (\n    \"context\"\n    \"errors\"\n    \"fmt\"\n    \"strings\"\n    \"time\"\n\n    \"github.com/zoobzio/pipz\"\n)\n\n// Our data structure\ntype User struct {\n    Email    string\n    Username string\n    Age      int\n    Verified bool\n}\n\nfunc main() {\n    // Define identities upfront\n    var (\n        RegistrationID = pipz.NewIdentity(\"registration\", \"User registration pipeline\")\n        ValidateID     = pipz.NewIdentity(\"validate\", \"Validates user input fields\")\n        NormalizeID    = pipz.NewIdentity(\"normalize\", \"Normalizes user data\")\n        LogID          = pipz.NewIdentity(\"log\", \"Logs successful user registration\")\n    )\n\n    // Create a pipeline with 3 steps\n    pipeline := pipz.NewSequence[User](RegistrationID,\n        // Step 1: Validate\n        pipz.Apply(ValidateID, validateUser),\n\n        // Step 2: Normalize\n        pipz.Transform(NormalizeID, normalizeUser),\n\n        // Step 3: Log\n        pipz.Effect(LogID, logUser),\n    )\n\n    // Process a user\n    user := User{\n        Email:    \"JOHN.DOE@EXAMPLE.COM\",\n        Username: \"johndoe123\",\n        Age:      25,\n    }\n\n    result, err := pipeline.Process(context.Background(), user)\n    if err != nil {\n        fmt.Printf(\"Pipeline failed: %v\\n\", err)\n        return\n    }\n\n    fmt.Printf(\"Processed user: %+v\\n\", result)\n}\n\n// Validation can fail\nfunc validateUser(ctx context.Context, u User) (User, error) {\n    if u.Email == \"\" {\n        return u, errors.New(\"email required\")\n    }\n    if u.Age \u003C 18 {\n        return u, errors.New(\"must be 18 or older\")\n    }\n    return u, nil\n}\n\n// Transformation can't fail\nfunc normalizeUser(ctx context.Context, u User) User {\n    u.Email = strings.ToLower(u.Email)\n    u.Verified = true\n    return u\n}\n\n// Side effect (logging)\nfunc logUser(ctx context.Context, u User) error {\n    fmt.Printf(\"User registered: %s\\n\", u.Email)\n    return nil\n}",{"id":81,"title":82,"titles":83,"content":29,"level":19},"/v1.0.7/learn/quickstart#understanding-the-building-blocks","Understanding the Building Blocks",[58],{"id":85,"title":86,"titles":87,"content":88,"level":35},"/v1.0.7/learn/quickstart#processors-transform-your-data","Processors: Transform Your Data",[58,82],"pipz provides different processor types for different needs: // Define identities upfront\nvar (\n    UpperID = pipz.NewIdentity(\"upper\", \"Converts strings to uppercase\")\n    ParseID = pipz.NewIdentity(\"parse\", \"Parses JSON data\")\n    LogID   = pipz.NewIdentity(\"log\", \"Logs data processing\")\n)\n\n// Transform: Pure functions that can't fail\nupperCase := pipz.Transform(UpperID, func(ctx context.Context, s string) string {\n    return strings.ToUpper(s)\n})\n\n// Apply: Operations that can fail\nparse := pipz.Apply(ParseID, func(ctx context.Context, s string) (Data, error) {\n    var data Data\n    err := json.Unmarshal([]byte(s), &data)\n    return data, err\n})\n\n// Effect: Side effects without modifying data\nlog := pipz.Effect(LogID, func(ctx context.Context, d Data) error {\n    fmt.Printf(\"Processing: %+v\\n\", d)\n    return nil\n})",{"id":90,"title":91,"titles":92,"content":93,"level":35},"/v1.0.7/learn/quickstart#connectors-control-the-flow","Connectors: Control the Flow",[58,82],"Connectors determine how processors are executed: // Define identities upfront\nvar (\n    StepsID     = pipz.NewIdentity(\"steps\", \"Sequential processing\")\n    ParallelID  = pipz.NewIdentity(\"parallel\", \"Parallel processing\")\n    ResilientID = pipz.NewIdentity(\"resilient\", \"Resilient processing with fallback\")\n    RouterID    = pipz.NewIdentity(\"router\", \"Routes data by type\")\n)\n\n// Sequence: Run steps in order\nsequential := pipz.NewSequence[Data](StepsID, step1, step2, step3)\n\n// Concurrent: Run in parallel (requires Clone())\nparallel := pipz.NewConcurrent[Data](ParallelID, task1, task2, task3)\n\n// Fallback: Try primary, use backup on failure\nresilient := pipz.NewFallback[Data](ResilientID, primary, backup)\n\n// Switch: Route based on conditions\nrouter := pipz.NewSwitch[Data](RouterID, getType).\n    AddRoute(\"typeA\", processTypeA).\n    AddRoute(\"typeB\", processTypeB)",{"id":95,"title":96,"titles":97,"content":98,"level":19},"/v1.0.7/learn/quickstart#building-a-real-world-pipeline","Building a Real-World Pipeline",[58],"Let's create a more realistic example - an order processing pipeline with validation, enrichment, and error handling: package main\n\nimport (\n    \"context\"\n    \"errors\"\n    \"fmt\"\n    \"time\"\n\n    \"github.com/zoobzio/pipz\"\n)\n\ntype Order struct {\n    ID         string\n    CustomerID string\n    Items      []Item\n    Total      float64\n    Status     string\n    CreatedAt  time.Time\n}\n\ntype Item struct {\n    ProductID string\n    Quantity  int\n    Price     float64\n}\n\n// Implement Clone for concurrent processing\nfunc (o Order) Clone() Order {\n    items := make([]Item, len(o.Items))\n    copy(items, o.Items)\n    return Order{\n        ID:         o.ID,\n        CustomerID: o.CustomerID,\n        Items:      items,\n        Total:      o.Total,\n        Status:     o.Status,\n        CreatedAt:  o.CreatedAt,\n    }\n}\n\nfunc createOrderPipeline() pipz.Chainable[Order] {\n    // Define identities upfront\n    var (\n        OrderProcessingID = pipz.NewIdentity(\"order-processing\", \"Complete order processing pipeline\")\n        ValidateID        = pipz.NewIdentity(\"validate\", \"Validates order data\")\n        CalculateTotalID  = pipz.NewIdentity(\"calculate-total\", \"Calculates order total\")\n        AddCustomerDataID = pipz.NewIdentity(\"add-customer-data\", \"Enriches order with customer data\")\n        CheckInventoryID  = pipz.NewIdentity(\"check-inventory\", \"Checks product inventory\")\n        ProcessPaymentID  = pipz.NewIdentity(\"process-payment\", \"Processes payment\")\n        NotificationsID   = pipz.NewIdentity(\"notifications\", \"Sends notifications\")\n        EmailID           = pipz.NewIdentity(\"email\", \"Sends email confirmation\")\n        SMSID             = pipz.NewIdentity(\"sms\", \"Sends SMS notification\")\n        AnalyticsID       = pipz.NewIdentity(\"analytics\", \"Tracks order metrics\")\n        CompleteID        = pipz.NewIdentity(\"complete\", \"Marks order as completed\")\n    )\n\n    return pipz.NewSequence[Order](OrderProcessingID,\n        // Validation phase\n        pipz.Apply(ValidateID, validateOrder),\n\n        // Enrichment phase\n        pipz.Transform(CalculateTotalID, calculateTotal),\n        pipz.Enrich(AddCustomerDataID, enrichWithCustomerData),\n\n        // Processing phase\n        pipz.Apply(CheckInventoryID, checkInventory),\n        pipz.Apply(ProcessPaymentID, processPayment),\n\n        // Parallel notifications\n        pipz.NewConcurrent[Order](NotificationsID,\n            pipz.Effect(EmailID, sendEmailConfirmation),\n            pipz.Effect(SMSID, sendSMSNotification),\n            pipz.Effect(AnalyticsID, trackOrderMetrics),\n        ),\n\n        // Final status update\n        pipz.Transform(CompleteID, func(ctx context.Context, o Order) Order {\n            o.Status = \"completed\"\n            return o\n        }),\n    )\n}\n\nfunc validateOrder(ctx context.Context, o Order) (Order, error) {\n    if len(o.Items) == 0 {\n        return o, errors.New(\"order must have items\")\n    }\n    if o.CustomerID == \"\" {\n        return o, errors.New(\"customer ID required\")\n    }\n    return o, nil\n}\n\nfunc calculateTotal(ctx context.Context, o Order) Order {\n    total := 0.0\n    for _, item := range o.Items {\n        total += item.Price * float64(item.Quantity)\n    }\n    o.Total = total\n    return o\n}\n\nfunc enrichWithCustomerData(ctx context.Context, o Order) (Order, error) {\n    // This is optional - won't fail the pipeline\n    customer, err := fetchCustomer(o.CustomerID)\n    if err != nil {\n        // Log but continue\n        fmt.Printf(\"Could not enrich with customer data: %v\\n\", err)\n        return o, err // Enrich logs but doesn't fail\n    }\n    // Add customer tier for discounts, etc.\n    o.Status = fmt.Sprintf(\"processing-%s\", customer.Tier)\n    return o, nil\n}\n\nfunc checkInventory(ctx context.Context, o Order) (Order, error) {\n    for _, item := range o.Items {\n        available, err := getInventoryCount(item.ProductID)\n        if err != nil {\n            return o, fmt.Errorf(\"inventory check failed: %w\", err)\n        }\n        if available \u003C item.Quantity {\n            return o, fmt.Errorf(\"insufficient inventory for %s\", item.ProductID)\n        }\n    }\n    return o, nil\n}\n\nfunc processPayment(ctx context.Context, o Order) (Order, error) {\n    // Process payment...\n    fmt.Printf(\"Processing payment of $%.2f\\n\", o.Total)\n    return o, nil\n}\n\nfunc sendEmailConfirmation(ctx context.Context, o Order) error {\n    fmt.Printf(\"Sending email for order %s\\n\", o.ID)\n    return nil\n}\n\nfunc sendSMSNotification(ctx context.Context, o Order) error {\n    fmt.Printf(\"Sending SMS for order %s\\n\", o.ID)\n    return nil\n}\n\nfunc trackOrderMetrics(ctx context.Context, o Order) error {\n    fmt.Printf(\"Tracking metrics for order %s: $%.2f\\n\", o.ID, o.Total)\n    return nil\n}",{"id":100,"title":101,"titles":102,"content":103,"level":19},"/v1.0.7/learn/quickstart#adding-resilience","Adding Resilience",[58],"Real-world systems need error handling, retries, and timeouts: func createResilientPipeline() pipz.Chainable[Order] {\n    // Define identities upfront\n    var (\n        TimeoutProtectionID = pipz.NewIdentity(\"timeout-protection\", \"Protects against slow operations\")\n        RetryOnFailureID    = pipz.NewIdentity(\"retry-on-failure\", \"Retries failed operations\")\n        WithFallbackID      = pipz.NewIdentity(\"with-fallback\", \"Provides fallback processing\")\n        FallbackID          = pipz.NewIdentity(\"fallback\", \"Fallback order processing\")\n    )\n\n    // Basic pipeline\n    basicPipeline := createOrderPipeline()\n\n    // Add resilience layers\n    return pipz.NewTimeout(TimeoutProtectionID,\n        pipz.NewRetry(RetryOnFailureID,\n            pipz.NewFallback(WithFallbackID,\n                basicPipeline,\n                pipz.Apply(FallbackID, processFallbackOrder),\n            ),\n            3, // Retry up to 3 times\n        ),\n        30*time.Second, // Overall timeout\n    )\n}\n\nfunc processFallbackOrder(ctx context.Context, o Order) (Order, error) {\n    // Simplified processing for fallback\n    o.Status = \"pending-manual-review\"\n    fmt.Printf(\"Order %s sent for manual review\\n\", o.ID)\n    return o, nil\n}",{"id":105,"title":106,"titles":107,"content":108,"level":19},"/v1.0.7/learn/quickstart#error-handling","Error Handling",[58],"pipz provides rich error information: func handlePipelineError(err error) {\n    var pipeErr *pipz.Error[Order]\n    if errors.As(err, &pipeErr) {\n        // Path is []Identity - get the last stage name\n        if len(pipeErr.Path) > 0 {\n            fmt.Printf(\"Pipeline failed at: %s\\n\", pipeErr.Path[len(pipeErr.Path)-1].Name())\n        }\n        fmt.Printf(\"Error: %v\\n\", pipeErr.Err)\n        fmt.Printf(\"Order state at failure: %+v\\n\", pipeErr.InputData)\n\n        if pipeErr.Timeout {\n            fmt.Println(\"Failure was due to timeout\")\n        }\n    }\n}\n\nfunc main() {\n    pipeline := createResilientPipeline()\n\n    order := Order{\n        ID:         \"ORD-123\",\n        CustomerID: \"CUST-456\",\n        Items: []Item{\n            {ProductID: \"PROD-1\", Quantity: 2, Price: 29.99},\n            {ProductID: \"PROD-2\", Quantity: 1, Price: 49.99},\n        },\n        CreatedAt: time.Now(),\n    }\n\n    result, err := pipeline.Process(context.Background(), order)\n    if err != nil {\n        handlePipelineError(err)\n        return\n    }\n\n    fmt.Printf(\"Order processed successfully: %+v\\n\", result)\n}",{"id":110,"title":111,"titles":112,"content":29,"level":19},"/v1.0.7/learn/quickstart#advanced-patterns","Advanced Patterns",[58],{"id":114,"title":115,"titles":116,"content":117,"level":35},"/v1.0.7/learn/quickstart#conditional-processing","Conditional Processing",[58,111],"// Define identities upfront\nvar (\n    ValueRouterID = pipz.NewIdentity(\"value-router\", \"Routes orders by value\")\n)\n\n// Route orders based on value\nvalueRouter := pipz.NewSwitch[Order](ValueRouterID,\n    func(ctx context.Context, o Order) string {\n        if o.Total > 1000 {\n            return \"high-value\"\n        }\n        if o.Total > 100 {\n            return \"standard\"\n        }\n        return \"low-value\"\n    },\n).\nAddRoute(\"high-value\", highValuePipeline).\nAddRoute(\"standard\", standardPipeline).\nAddRoute(\"low-value\", lowValuePipeline)",{"id":119,"title":120,"titles":121,"content":122,"level":35},"/v1.0.7/learn/quickstart#rate-limiting","Rate Limiting",[58,111],"// Define identities upfront\nvar (\n    APILimitID  = pipz.NewIdentity(\"api-limit\", \"Rate limits API calls\")\n    ProtectedID = pipz.NewIdentity(\"protected\", \"Protected API calls\")\n    APICallID   = pipz.NewIdentity(\"api-call\", \"Calls external API\")\n)\n\n// Protect external APIs\nvar apiLimiter = pipz.NewRateLimiter[Order](APILimitID, 100, 10)\n\nprotectedAPI := pipz.NewSequence[Order](ProtectedID,\n    apiLimiter,\n    pipz.Apply(APICallID, callExternalAPI),\n)",{"id":124,"title":125,"titles":126,"content":127,"level":35},"/v1.0.7/learn/quickstart#circuit-breaking","Circuit Breaking",[58,111],"// Define identities upfront\nvar (\n    BreakerID = pipz.NewIdentity(\"breaker\", \"Protects against cascading failures\")\n)\n\n// Prevent cascading failures\ncircuitBreaker := pipz.NewCircuitBreaker[Order](BreakerID,\n    externalService,\n    5,                  // Open after 5 failures\n    30*time.Second,     // Try recovery after 30s\n)",{"id":129,"title":130,"titles":131,"content":132,"level":19},"/v1.0.7/learn/quickstart#testing-your-pipelines","Testing Your Pipelines",[58],"Pipelines are easy to test: func TestOrderPipeline(t *testing.T) {\n    pipeline := createOrderPipeline()\n\n    // Test valid order\n    validOrder := Order{\n        ID:         \"TEST-1\",\n        CustomerID: \"CUST-1\",\n        Items:      []Item{{ProductID: \"P1\", Quantity: 1, Price: 10.00}},\n    }\n\n    result, err := pipeline.Process(context.Background(), validOrder)\n    assert.NoError(t, err)\n    assert.Equal(t, \"completed\", result.Status)\n    assert.Equal(t, 10.00, result.Total)\n\n    // Test invalid order\n    invalidOrder := Order{ID: \"TEST-2\"} // Missing customer ID\n\n    _, err = pipeline.Process(context.Background(), invalidOrder)\n    assert.Error(t, err)\n    assert.Contains(t, err.Error(), \"customer ID required\")\n}",{"id":134,"title":135,"titles":136,"content":137,"level":19},"/v1.0.7/learn/quickstart#best-practices","Best Practices",[58],"Define identities upfront as package-level variables // Create reusable identity constants\nvar (\n    StageValidate = pipz.NewIdentity(\"validate\", \"Validates input data\")\n    StageEnrich   = pipz.NewIdentity(\"enrich\", \"Enriches with metadata\")\n    StageProcess  = pipz.NewIdentity(\"process\", \"Processes the data\")\n) Implement Clone() properly for concurrent processing func (d Data) Clone() Data {\n    // Deep copy all reference types\n    newSlice := make([]Item, len(d.Items))\n    copy(newSlice, d.Items)\n    return Data{Items: newSlice}\n} Respect context cancellation func slowOperation(ctx context.Context, data Data) (Data, error) {\n    select {\n    case \u003C-ctx.Done():\n        return data, ctx.Err()\n    case result := \u003C-doWork(data):\n        return result, nil\n    }\n} Create singletons for stateful connectors // Define identities upfront\nvar (\n    APIID = pipz.NewIdentity(\"api\", \"API rate limiter\")\n)\n\n// RIGHT - Shared instance\nvar rateLimiter = pipz.NewRateLimiter[Data](APIID, 100, 10)\n\n// WRONG - New instance each time\nfunc process(data Data) {\n    limiter := pipz.NewRateLimiter[Data](APIID, 100, 10) // Don't do this!\n}",{"id":139,"title":140,"titles":141,"content":142,"level":19},"/v1.0.7/learn/quickstart#next-steps","Next Steps",[58],"Now that you understand the basics: Explore the Core Concepts for deeper understandingCheck the Cookbook for real-world recipesBrowse the API Reference for detailed documentation",{"id":144,"title":145,"titles":146,"content":147,"level":19},"/v1.0.7/learn/quickstart#common-questions","Common Questions",[58],"Q: When should I use pipz instead of regular Go code?\nA: Use pipz when you need composable, reusable data processing with good error handling and built-in patterns like retry, circuit breaking, and rate limiting. Q: How does pipz compare to other pipeline libraries?\nA: pipz focuses on type safety, simplicity, and composability. It's lighter than workflow engines like Temporal but more structured than basic function chaining. Q: Can I use pipz for streaming data?\nA: pipz processes one item at a time. For streaming, wrap your stream processing in pipz processors. Q: How do I handle errors in the middle of a pipeline?\nA: Use Fallback for recovery, Handle for cleanup, or check the Safety and Reliability guide.",{"id":149,"title":150,"titles":151,"content":152,"level":19},"/v1.0.7/learn/quickstart#getting-help","Getting Help",[58],"Check the Troubleshooting guideOpen an Issue on GitHub Happy pipelining! html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}",{"id":154,"title":155,"titles":156,"content":157,"level":9},"/v1.0.7/learn/introduction","Introduction to pipz",[],"Type-safe, composable data pipelines with Go generics for robust and maintainable data processing",{"id":159,"title":155,"titles":160,"content":161,"level":9},"/v1.0.7/learn/introduction#introduction-to-pipz",[],"pipz is a Go library for building type-safe, composable data pipelines. It leverages Go's generics to provide compile-time type safety while offering a simple, functional API for complex data transformations.",{"id":163,"title":164,"titles":165,"content":29,"level":19},"/v1.0.7/learn/introduction#why-pipz","Why pipz?",[155],{"id":167,"title":168,"titles":169,"content":170,"level":35},"/v1.0.7/learn/introduction#the-problem","The Problem",[155,164],"Building data processing pipelines in Go often involves: Writing boilerplate code for error handlingManaging complex control flow (retries, timeouts, fallbacks)Dealing with interface{} and runtime type assertionsDifficulty in testing individual componentsHard-to-reuse processing logic",{"id":172,"title":173,"titles":174,"content":175,"level":35},"/v1.0.7/learn/introduction#the-solution","The Solution",[155,164],"pipz provides: Type Safety: Full compile-time type checking with genericsComposability: Small, reusable components that combine easilyError Handling: Built-in patterns for retries, fallbacks, and recoveryTestability: Each component is independently testablePerformance: Optimized design with minimal allocations",{"id":177,"title":178,"titles":179,"content":180,"level":19},"/v1.0.7/learn/introduction#core-philosophy","Core Philosophy",[155],"pipz follows these principles: Interface-First Design: Everything implements Chainable[T] - a single, simple interfaceYour Code, Your Way: Implement the interface directly or use our convenience wrappersComposition over Configuration: Build complex behavior by combining simple piecesType Safety First: No interface{}, no runtime type assertionsErrors are Values: Explicit error handling at every stepContext Awareness: Full support for cancellation and timeouts",{"id":182,"title":183,"titles":184,"content":29,"level":19},"/v1.0.7/learn/introduction#key-concepts","Key Concepts",[155],{"id":186,"title":187,"titles":188,"content":189,"level":35},"/v1.0.7/learn/introduction#the-chainable-interface","The Chainable Interface",[155,183],"The foundation of pipz - everything implements this simple interface: type Chainable[T any] interface {\n    Process(context.Context, T) (T, error)\n    Identity() Identity\n    Schema() Node\n    Close() error\n} Any type implementing this interface can be used in a pipeline. This gives you complete flexibility: Implement it directly for custom processorsUse the provided wrapper functions for common patternsMix both approaches in the same pipeline",{"id":191,"title":192,"titles":193,"content":194,"level":35},"/v1.0.7/learn/introduction#processors","Processors",[155,183],"The atomic units that transform data. You can create them by: Direct Implementation: Implement Chainable[T] for full controlWrapper Functions: Use Transform, Apply, Effect, etc. for convenience",{"id":196,"title":197,"titles":198,"content":199,"level":35},"/v1.0.7/learn/introduction#connectors","Connectors",[155,183],"Mutable components that combine any Chainable[T] implementations into more complex behaviors: NewSequence: Run processors in orderNewSwitch: Route to different processors based on conditionsNewConcurrent: Run multiple processors in parallelNewRace: Use the first successful resultAnd many more...",{"id":201,"title":202,"titles":203,"content":204,"level":35},"/v1.0.7/learn/introduction#pipelines","Pipelines",[155,183],"Managed sequences of processors with introspection and modification capabilities.",{"id":206,"title":207,"titles":208,"content":209,"level":19},"/v1.0.7/learn/introduction#use-cases","Use Cases",[155],"pipz excels at: ETL (Extract, Transform, Load) pipelinesAPI request/response processingEvent stream processingPayment processing with failoverContent moderation pipelinesData validation workflowsMicroservice orchestration",{"id":211,"title":212,"titles":213,"content":214,"level":19},"/v1.0.7/learn/introduction#what-makes-pipz-different","What Makes pipz Different?",[155],"Unlike traditional pipeline libraries, pipz: Uses Go generics for complete type safetyRequires no code generation or reflectionHas minimal external dependencies (clockz, capitan, uuid)Provides both functional and object-oriented APIsIncludes battle-tested patterns (retry, timeout, fallback)Returns rich error context showing the exact failure pathSupports both declarative and dynamic pipeline constructionTreats errors as data flowing through pipelines - use the same Switch, Concurrent, Sequence patterns for sophisticated error recovery",{"id":216,"title":217,"titles":218,"content":219,"level":19},"/v1.0.7/learn/introduction#a-unique-approach-to-error-handling","A Unique Approach to Error Handling",[155],"Most frameworks treat errors as exceptions or callbacks. pipz treats them as data that flows through pipelines. This means you can build sophisticated error recovery flows using the exact same tools you use for regular data processing: // Define identities as package-level variables\nvar (\n    ErrorRecoveryID   = pipz.NewIdentity(\"error-recovery\", \"Error recovery pipeline\")\n    CategorizeID      = pipz.NewIdentity(\"categorize\", \"Categorizes errors by type\")\n    SeverityRouterID  = pipz.NewIdentity(\"severity-router\", \"Routes errors by severity\")\n    ParallelRecoveryID = pipz.NewIdentity(\"parallel-recovery\", \"Parallel recovery actions\")\n    OrderProcessingID = pipz.NewIdentity(\"order-processing\", \"Order processing with error handling\")\n)\n\n// Error recovery pipeline - same tools, same patterns!\nerrorRecovery := pipz.NewSequence[*pipz.Error[Order]](ErrorRecoveryID,\n    pipz.Transform(CategorizeID, categorizeError),\n    pipz.NewSwitch(SeverityRouterID, routeBySeverity),\n    pipz.NewConcurrent(ParallelRecoveryID, notifyCustomer, updateInventory),\n)\n\n// Errors flow through this pipeline automatically\nrobustPipeline := pipz.NewHandle(OrderProcessingID, mainPipeline, errorRecovery) This pattern enables type-safe, composable, and testable error handling that scales with your application complexity. See Safety and Reliability for the full power of this approach.",{"id":221,"title":140,"titles":222,"content":223,"level":19},"/v1.0.7/learn/introduction#next-steps",[155],"Quick Start - Build your first pipeline in minutesCore Concepts - Deep dive into pipz concepts and composition patternsBuilding Pipelines - Complete production example html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}",{"id":225,"title":226,"titles":227,"content":228,"level":9},"/v1.0.7/learn/core-concepts","Core Concepts",[],"Understanding processors, connectors, and composition patterns for building data pipelines",{"id":230,"title":226,"titles":231,"content":29,"level":9},"/v1.0.7/learn/core-concepts#core-concepts",[],{"id":233,"title":234,"titles":235,"content":236,"level":19},"/v1.0.7/learn/core-concepts#the-pipeline-mental-model","The Pipeline Mental Model",[226],"Think of pipz as a conveyor belt for your data. Each processor is a station that transforms, validates, or enriches data as it passes through. Connectors determine how data flows between stations - sequentially, in parallel, or conditionally.",{"id":238,"title":239,"titles":240,"content":241,"level":35},"/v1.0.7/learn/core-concepts#pipeline-lifecycle-data-flow","Pipeline Lifecycle & Data Flow",[226,234],"┌──────────────────────────────────────────────────────────────────┐\n│                     Complete Pipeline Lifecycle                  │\n└──────────────────────────────────────────────────────────────────┘\n\n┌──────────┐      ┌──────────┐      ┌──────────┐      ┌──────────┐\n│  Input   │─────→│ Validate │─────→│Transform │─────→│  Output  │\n│   Data   │      │  Stage   │      │  Stage   │      │   Data   │\n└──────────┘      └──────────┘      └──────────┘      └──────────┘\n      │                 │                 │                 │\n      ▼                 ▼                 ▼                 ▼\n┌──────────┐      ┌──────────┐      ┌──────────┐      ┌──────────┐\n│ Context  │      │  Error   │      │  Error   │      │  Result  │\n│  State   │      │ Handling │      │ Handling │      │  State   │\n└──────────┘      └──────────┘      └──────────┘      └──────────┘\n\nLegend:\n─────→  Data flow\n  ▼     Context/Error propagation\n  [✓]   Success state\n  [✗]   Failure state",{"id":243,"title":244,"titles":245,"content":246,"level":35},"/v1.0.7/learn/core-concepts#data-flow-patterns","Data Flow Patterns",[226,234],"Sequential:  Input → [A] → [B] → [C] → Output\n                      ↓     ↓     ↓\n                    (T,e) (T,e) (T,e)\n\nParallel:    Input → ┌─[A]─┐\n                     ├─[B]─┼→ Output\n                     └─[C]─┘\n\nConditional: Input → [?] → [A] → Output\n                      ↓\n                     [B] → Output\n\nError Flow:  Input → [A] → [B] ✗ → Error[T]\n                            ↓\n                          Stop The power comes from composition: small, focused functions combine into complex workflows. This isn't just functional programming - it's a structured way to organize business logic that scales from simple validations to distributed systems.",{"id":248,"title":187,"titles":249,"content":250,"level":19},"/v1.0.7/learn/core-concepts#the-chainable-interface",[226],"Everything in pipz implements a single interface: type Chainable[T any] interface {\n    Process(context.Context, T) (T, error)\n    Identity() Identity\n    Schema() Node\n    Close() error\n} This simplicity is deliberate. Any type implementing this interface can be: Used in any pipelineComposed with any other ChainableTested in isolationReplaced at runtime You can implement Chainable directly for custom needs, or use the provided wrappers for common patterns.",{"id":252,"title":86,"titles":253,"content":254,"level":19},"/v1.0.7/learn/core-concepts#processors-transform-your-data",[226],"Processors are immutable functions that act on data. pipz provides wrappers for common patterns: ProcessorPurposeCan Fail?Modifies Data?Example UseTransformPure transformationNoYesFormatting, calculationsApplyFallible transformationYesYesParsing, validationEffectSide effectsYesNoLogging, metricsMutateConditional modificationNoYes/NoFeature flagsEnrichOptional enhancementLogs errorsYesAdding metadataHandleError observationYesNoCleanup, notifications",{"id":256,"title":257,"titles":258,"content":259,"level":35},"/v1.0.7/learn/core-concepts#example-building-a-validation-pipeline","Example: Building a Validation Pipeline",[226,86],"// Define identities upfront\nvar (\n    ValidateEmailID = pipz.NewIdentity(\"validate-email\", \"Validates email format\")\n    NormalizeDataID = pipz.NewIdentity(\"normalize-data\", \"Normalizes user data\")\n    AuditLogID      = pipz.NewIdentity(\"audit-log\", \"Logs validation events\")\n    UserFlowID      = pipz.NewIdentity(\"user-flow\", \"User validation flow\")\n)\n\n// Create processors\nvalidators := []pipz.Chainable[User]{\n    pipz.Apply(ValidateEmailID, func(ctx context.Context, u User) (User, error) {\n        if !strings.Contains(u.Email, \"@\") {\n            return u, errors.New(\"invalid email\")\n        }\n        return u, nil\n    }),\n    pipz.Transform(NormalizeDataID, func(ctx context.Context, u User) User {\n        u.Email = strings.ToLower(u.Email)\n        return u\n    }),\n    pipz.Effect(AuditLogID, func(ctx context.Context, u User) error {\n        log.Printf(\"User validated: %s\", u.Email)\n        return nil\n    }),\n}",{"id":261,"title":91,"titles":262,"content":263,"level":19},"/v1.0.7/learn/core-concepts#connectors-control-the-flow",[226],"Connectors compose processors and control execution flow:",{"id":265,"title":266,"titles":267,"content":268,"level":35},"/v1.0.7/learn/core-concepts#sequential-processing","Sequential Processing",[226,91],"Sequence - Process data through steps in order: pipeline := pipz.NewSequence(UserFlowID, validators...)",{"id":270,"title":271,"titles":272,"content":273,"level":35},"/v1.0.7/learn/core-concepts#parallel-processing","Parallel Processing",[226,91],"These require T to implement Cloner[T] for safe concurrent execution: Concurrent - Run all processors simultaneously: var NotifyID = pipz.NewIdentity(\"notify\", \"Sends notifications\")\n\nnotifications := pipz.NewConcurrent(NotifyID,\n    sendEmail,\n    sendSMS,\n    updateMetrics,\n) Race - Return first successful result: var FetchID = pipz.NewIdentity(\"fetch\", \"Fetches from multiple sources\")\n\nfetch := pipz.NewRace(FetchID,\n    primaryDB,\n    replicaDB,\n    cache,\n) Contest - Return first result meeting criteria: var (\n    QualityID     = pipz.NewIdentity(\"quality\", \"Finds best quality result\")\n    QualityCheckF = func(ctx context.Context, result Result) bool {\n        return result.Confidence > 0.9\n    }\n)\n\nquality := pipz.NewContest(QualityID, QualityCheckF,\n    aiModel1, aiModel2, aiModel3,\n)",{"id":275,"title":115,"titles":276,"content":277,"level":35},"/v1.0.7/learn/core-concepts#conditional-processing",[226,91],"Switch - Route based on conditions: var (\n    RouterID = pipz.NewIdentity(\"router\", \"Routes requests by tier\")\n    RouterF  = func(ctx context.Context, req Request) string {\n        if req.Premium {\n            return \"premium\"\n        }\n        return \"standard\"\n    }\n)\n\nrouter := pipz.NewSwitch(RouterID, RouterF).\n    AddRoute(\"premium\", premiumPipeline).\n    AddRoute(\"standard\", standardPipeline)",{"id":279,"title":106,"titles":280,"content":281,"level":35},"/v1.0.7/learn/core-concepts#error-handling",[226,91],"Fallback - Provide alternative on failure: var SafeID = pipz.NewIdentity(\"safe\", \"Safe operation with fallback\")\n\nsafe := pipz.NewFallback(SafeID, riskyOperation, safeDefault) Retry - Retry transient failures: var APIID = pipz.NewIdentity(\"api\", \"Retries API calls\")\n\nreliable := pipz.NewRetry(APIID, apiCall, 3)",{"id":283,"title":284,"titles":285,"content":286,"level":35},"/v1.0.7/learn/core-concepts#resilience","Resilience",[226,91],"CircuitBreaker - Prevent cascading failures: var ServiceID = pipz.NewIdentity(\"service\", \"Protected service\")\n\nprotected := pipz.NewCircuitBreaker(ServiceID, processor, 5, 30*time.Second) RateLimiter - Control throughput: var RateLimitID = pipz.NewIdentity(\"api\", \"API rate limiter\")\n\nthrottled := pipz.NewRateLimiter[Request](RateLimitID, 100, 10) // 100/sec, burst 10 Timeout - Bound execution time: var SlowID = pipz.NewIdentity(\"slow\", \"Timeout protection\")\n\nbounded := pipz.NewTimeout(SlowID, processor, 5*time.Second)",{"id":288,"title":289,"titles":290,"content":291,"level":35},"/v1.0.7/learn/core-concepts#execution-context","Execution Context",[226,91],"Pipeline - Wrap pipelines with execution context for distributed tracing: var (\n    OrderPipelineID = pipz.NewIdentity(\"order-processing\", \"Main order flow\")\n    InternalSeqID   = pipz.NewIdentity(\"order-steps\", \"Processing sequence\")\n)\n\n// Build processing logic\nsequence := pipz.NewSequence(InternalSeqID, validate, enrich, save)\n\n// Wrap with Pipeline for execution context\npipeline := pipz.NewPipeline(OrderPipelineID, sequence)\n\n// Each Process() call gets a unique execution ID\nresult, err := pipeline.Process(ctx, order) Pipeline injects two correlation IDs into context: Execution ID - Unique per Process() call, for tracing individual requestsPipeline ID - Stable across executions, for grouping by pipeline Extract these in signal handlers or processors: if execID, ok := pipz.ExecutionIDFromContext(ctx); ok {\n    log.Printf(\"Execution: %s\", execID)\n}\nif pipeID, ok := pipz.PipelineIDFromContext(ctx); ok {\n    log.Printf(\"Pipeline: %s\", pipeID)\n}",{"id":293,"title":294,"titles":295,"content":296,"level":19},"/v1.0.7/learn/core-concepts#type-safety-through-generics","Type Safety Through Generics",[226],"pipz leverages Go generics for compile-time type safety: // Define identities upfront\nvar (\n    UserPipelineID = pipz.NewIdentity(\"user-pipeline\", \"User processing pipeline\")\n    ConvertID      = pipz.NewIdentity(\"convert\", \"Converts user to order\")\n)\n\n// Type is locked at pipeline creation\npipeline := pipz.NewSequence[User](UserPipelineID)\n\n// This won't compile - type mismatch\npipeline.Register(processOrder) // Error: expects Chainable[User], got Chainable[Order]\n\n// Transform between types explicitly\nconverter := pipz.Apply(ConvertID, func(ctx context.Context, u User) (Order, error) {\n    return u.CreateOrder()\n})",{"id":298,"title":299,"titles":300,"content":301,"level":35},"/v1.0.7/learn/core-concepts#the-cloner-constraint","The Cloner Constraint",[226,294],"For concurrent processing, your type must implement Cloner[T]: type Data struct {\n    Values []int\n}\n\nfunc (d Data) Clone() Data {\n    newValues := make([]int, len(d.Values))\n    copy(newValues, d.Values)\n    return Data{Values: newValues}\n}",{"id":303,"title":304,"titles":305,"content":306,"level":19},"/v1.0.7/learn/core-concepts#error-philosophy","Error Philosophy",[226],"Errors in pipz are first-class citizens with rich context: type Error[T any] struct {\n    Path      []Name        // Full path through pipeline\n    Err       error         // The underlying error\n    InputData T             // Data at failure\n    Timestamp time.Time     // When the error occurred\n    Duration  time.Duration // How long before failure\n    Timeout   bool          // Was it a timeout?\n    Canceled  bool          // Was it canceled?\n} This design enables: Precise debugging: Know exactly where and why failures occurError recovery: Access data state for compensationError pipelines: Process errors through their own pipelines",{"id":308,"title":309,"titles":310,"content":311,"level":35},"/v1.0.7/learn/core-concepts#error-pipeline-pattern","Error Pipeline Pattern",[226,304],"// Define identities upfront\nvar (\n    ErrorHandlerID = pipz.NewIdentity(\"error-handler\", \"Error handling pipeline\")\n    LogID          = pipz.NewIdentity(\"log\", \"Logs errors\")\n    ClassifyID     = pipz.NewIdentity(\"classify\", \"Classifies error types\")\n    RecoveryID     = pipz.NewIdentity(\"recovery\", \"Selects recovery strategy\")\n    MainID         = pipz.NewIdentity(\"main\", \"Main processing pipeline\")\n    RecoverID      = pipz.NewIdentity(\"recover\", \"Error recovery\")\n)\n\n// Errors are just data - process them like anything else\nerrorPipeline := pipz.NewSequence[*pipz.Error[Order]](ErrorHandlerID,\n    pipz.Effect(LogID, logError),\n    pipz.Apply(ClassifyID, classifyError),\n    pipz.Switch(RecoveryID, selectRecoveryStrategy),\n)\n\n// Use with main pipeline\nmainPipeline := pipz.NewFallback(MainID,\n    orderProcessing,\n    pipz.Handle(RecoverID, errorPipeline),\n)",{"id":313,"title":135,"titles":314,"content":315,"level":19},"/v1.0.7/learn/core-concepts#best-practices",[226],"Name processors with identities - Use descriptive names and descriptionsKeep processors focused - Each should do one thing wellCompose, don't configure - Build complex behavior from simple partsTest in isolation - Each processor should be independently testableHandle context - Always respect cancellation and timeoutsClone properly - Deep copy slices and maps in Clone() methods",{"id":317,"title":140,"titles":318,"content":319,"level":19},"/v1.0.7/learn/core-concepts#next-steps",[226],"Architecture - System design and internalsQuickstart Tutorial - Build your first pipelineConnector Selection - Choose the right connectorAPI Reference - Complete API documentation html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}",{"id":321,"title":322,"titles":323,"content":324,"level":9},"/v1.0.7/learn/architecture","Architecture",[],"System design, data flow patterns, and internal architecture of the pipz library",{"id":326,"title":322,"titles":327,"content":29,"level":9},"/v1.0.7/learn/architecture#architecture",[],{"id":329,"title":6,"titles":330,"content":331,"level":19},"/v1.0.7/learn/architecture#overview",[322],"pipz is designed as a composable data processing library built on a single, uniform interface. The architecture emphasizes type safety, immutability, and clean separation of concerns to enable developers to build maintainable and testable data pipelines.",{"id":333,"title":334,"titles":335,"content":336,"level":19},"/v1.0.7/learn/architecture#core-design-principles","Core Design Principles",[322],"Single Interface Pattern: Everything implements Chainable[T], enabling seamless compositionType Safety: Leverages Go generics (1.21+) for compile-time type checkingImmutable Processors: Adapter functions are immutable values ensuring thread safetyMutable Connectors: Container types that manage state and configurationFail-Fast Execution: Processing stops at the first error, simplifying error handlingContext-Aware: All operations support context for cancellation and timeout control",{"id":338,"title":339,"titles":340,"content":341,"level":19},"/v1.0.7/learn/architecture#system-architecture","System Architecture",[322],"┌──────────────────────────────────────────────────────────────┐\n│                         Application                          │\n└───────────────────────┬──────────────────────────────────────┘\n                        │\n                        ▼\n┌──────────────────────────────────────────────────────────────┐\n│                    Chainable[T] Interface                    │\n│                 Process(ctx, T) → (T, *Error)                │\n│                        Name() → Name                         │\n└────────────┬─────────────────────────────┬───────────────────┘\n             │                             │\n             ▼                             ▼\n┌────────────────────────────┐ ┌──────────────────────────────┐\n│      Processors (Values)   │ │    Connectors (Pointers)     │\n├────────────────────────────┤ ├──────────────────────────────┤\n│ • Transform - Pure function│ │ • Sequence - Sequential flow │\n│ • Apply - Can fail         │ │ • Concurrent - Parallel exec │\n│ • Effect - Side effects    │ │ • Race - First success       │\n│ • Mutate - Conditional     │ │ • Contest - First meeting    │\n│ • Enrich - Optional enhance│ │ • Switch - Conditional branch│\n│ • Filter - Pass/block data │ │ • Fallback - Error recovery │\n│ • Handle - Error transform │ │ • Retry - Retry on failure   │\n│ • Scaffold - Development   │ │ • CircuitBreaker - Fail fast │\n│                            │ │ • RateLimiter - Control flow │\n│                            │ │ • Timeout - Time boundaries  │\n└────────────────────────────┘ └──────────────────────────────┘",{"id":343,"title":344,"titles":345,"content":29,"level":19},"/v1.0.7/learn/architecture#component-relationships","Component Relationships",[322],{"id":347,"title":348,"titles":349,"content":350,"level":35},"/v1.0.7/learn/architecture#processors-adapters","Processors (Adapters)",[322,344],"Processors are lightweight wrappers around user functions that implement the Chainable interface: type processor[T any] struct {\n    name Name\n    fn   func(context.Context, T) (T, error)\n} Key characteristics: Immutable: Once created, cannot be modifiedStateless: No internal state, pure function wrappersThread-Safe: Can be used concurrently without synchronizationComposable: Can be combined using connectors",{"id":352,"title":353,"titles":354,"content":355,"level":35},"/v1.0.7/learn/architecture#connectors-composition","Connectors (Composition)",[322,344],"Connectors manage the composition and execution flow of multiple Chainables: type connector[T any] struct {\n    name       Name\n    processors []Chainable[T]\n    // Additional state (mutex, config, etc.)\n} Key characteristics: Mutable: Can be modified at runtime (add/remove processors)Stateful: May maintain internal state (circuit breaker state, rate limits)Configurable: Support runtime configuration changesOrchestrators: Control execution flow and error handling",{"id":357,"title":358,"titles":359,"content":29,"level":19},"/v1.0.7/learn/architecture#data-flow-architecture","Data Flow Architecture",[322],{"id":361,"title":266,"titles":362,"content":363,"level":35},"/v1.0.7/learn/architecture#sequential-processing",[322,358],"Input → [Processor 1] → [Processor 2] → [Processor 3] → Output\n         ↓ error          ↓ error          ↓ error\n         Return           Return           Return The Sequence connector processes data through each step sequentially, stopping at the first error.",{"id":365,"title":271,"titles":366,"content":367,"level":35},"/v1.0.7/learn/architecture#parallel-processing",[322,358],"┌→ [Processor 1] →┐\nInput →─┼→ [Processor 2] →┼→ Aggregation → Output\n        └→ [Processor 3] →┘ Parallel connectors (Concurrent, Race, Contest) require T to implement Cloner[T] for safe concurrent processing.",{"id":369,"title":115,"titles":370,"content":371,"level":35},"/v1.0.7/learn/architecture#conditional-processing",[322,358],"┌─[condition]─→ [Branch A] →┐\nInput →─┤                            ├→ Output\n         └─[default]──→ [Branch B] →┘ The Switch connector routes data based on conditions, similar to a switch statement.",{"id":373,"title":374,"titles":375,"content":376,"level":35},"/v1.0.7/learn/architecture#execution-flow-diagram","Execution Flow Diagram",[322,358],"┌─────────────┐     ┌─────────────┐     ┌─────────────┐\n│   Context   │────→│  Pipeline   │────→│  Processor  │\n└─────────────┘     └─────────────┘     └─────────────┘\n      │                    │                    │\n      │                    ▼                    ▼\n      │             ┌─────────────┐     ┌─────────────┐\n      └────────────→│   Router    │────→│  Transform  │\n                    └─────────────┘     └─────────────┘\n                           │                    │\n                           ▼                    ▼\n                    ┌─────────────┐     ┌─────────────┐\n                    │Error Handler│←────│   Result    │\n                    └─────────────┘     └─────────────┘",{"id":378,"title":379,"titles":380,"content":29,"level":19},"/v1.0.7/learn/architecture#error-handling-architecture","Error Handling Architecture",[322],{"id":382,"title":383,"titles":384,"content":385,"level":35},"/v1.0.7/learn/architecture#error-type-hierarchy","Error Type Hierarchy",[322,379],"type Error[T any] struct {\n    Path      []Name        // Full path through pipeline where error occurred\n    Err       error         // The underlying error\n    InputData T             // Data state at error time\n    Timestamp time.Time     // When the error occurred\n    Duration  time.Duration // How long before failure\n    Timeout   bool          // Was it a timeout?\n    Canceled  bool          // Was it canceled?\n} Error handling patterns: Fail-Fast: Default behavior, stop on first errorRecovery: Use Fallback for error recovery with alternate processingTransformation: Use Handle to transform errors into valid dataResilience: Use Retry, CircuitBreaker, RateLimiter for fault tolerance",{"id":387,"title":388,"titles":389,"content":29,"level":19},"/v1.0.7/learn/architecture#memory-model","Memory Model",[322],{"id":391,"title":392,"titles":393,"content":394,"level":35},"/v1.0.7/learn/architecture#cloner-interface","Cloner Interface",[322,388],"For concurrent processing, data must be cloneable: type Cloner[T any] interface {\n    Clone() T\n} This ensures: Thread safety in parallel operationsData isolation between concurrent branchesPrevention of race conditions",{"id":396,"title":397,"titles":398,"content":399,"level":35},"/v1.0.7/learn/architecture#context-propagation","Context Propagation",[322,388],"Every operation receives a context, enabling: Request-scoped valuesCancellation propagationTimeout enforcementTracing and monitoring integration",{"id":401,"title":402,"titles":403,"content":404,"level":35},"/v1.0.7/learn/architecture#pipeline-execution-context","Pipeline Execution Context",[322,388],"The Pipeline connector provides semantic execution context for distributed tracing: ┌──────────────────────────────────────────────────────────────┐\n│                       Pipeline Wrapper                        │\n├──────────────────────────────────────────────────────────────┤\n│  Identity: \"order-processing\"                                │\n│  Pipeline ID: 550e8400-e29b-41d4-a716-446655440000 (stable)  │\n└───────────────────────┬──────────────────────────────────────┘\n                        │\n                        ▼ Process(ctx, data)\n┌──────────────────────────────────────────────────────────────┐\n│                    Context Injection                          │\n│  + Execution ID: a8b9c0d1-... (unique per call)              │\n│  + Pipeline ID:  550e8400-... (from Identity)                │\n└───────────────────────┬──────────────────────────────────────┘\n                        │\n                        ▼\n┌──────────────────────────────────────────────────────────────┐\n│                   Root Chainable                              │\n│  (Sequence, Concurrent, etc.)                                │\n│                                                              │\n│  All nested connectors receive enriched context              │\n│  Signals emitted include correlation IDs                     │\n└──────────────────────────────────────────────────────────────┘ This enables: Request Correlation: Link all signals from a single executionPipeline Grouping: Aggregate metrics by pipeline identityDistributed Tracing: Propagate IDs to external systemsDebug Context: Know which pipeline and execution produced logs",{"id":406,"title":407,"titles":408,"content":29,"level":19},"/v1.0.7/learn/architecture#extension-points","Extension Points",[322],{"id":410,"title":411,"titles":412,"content":413,"level":35},"/v1.0.7/learn/architecture#custom-processors","Custom Processors",[322,407],"Create custom processors by wrapping functions: func CustomProcessor[T any](id Identity, fn func(context.Context, T) (T, error)) Chainable[T] {\n    return Apply(id, fn)\n}",{"id":415,"title":416,"titles":417,"content":418,"level":35},"/v1.0.7/learn/architecture#custom-connectors","Custom Connectors",[322,407],"Implement the Chainable interface for custom composition logic: type CustomConnector[T any] struct {\n    identity Identity\n    // Your fields\n}\n\nfunc (c *CustomConnector[T]) Process(ctx context.Context, data T) (T, error) {\n    // Your logic\n}\n\nfunc (c *CustomConnector[T]) Identity() Identity {\n    return c.identity\n}\n\nfunc (c *CustomConnector[T]) Schema() Node {\n    return Node{Identity: c.identity, Type: \"custom\"}\n}\n\nfunc (c *CustomConnector[T]) Close() error {\n    return nil\n}",{"id":420,"title":421,"titles":422,"content":423,"level":35},"/v1.0.7/learn/architecture#integration-points","Integration Points",[322,407],"Common integration patterns: HTTP Middleware: Wrap pipelines as HTTP handlersMessage Queue Consumers: Process messages through pipelinesBatch Processing: Use pipelines in batch job frameworksStream Processing: Integrate with streaming platformsService Mesh: Use as sidecar processing logic",{"id":425,"title":426,"titles":427,"content":29,"level":19},"/v1.0.7/learn/architecture#observability-architecture","Observability Architecture",[322],{"id":429,"title":430,"titles":431,"content":432,"level":35},"/v1.0.7/learn/architecture#hook-system","Hook System",[322,426],"Pipz integrates with capitan to provide type-safe event hooks for monitoring and debugging. Stateful connectors emit signals at critical decision points: ┌──────────────────────────────────────────────────────────────┐\n│                      Application Code                        │\n└───────────────────────┬──────────────────────────────────────┘\n                        │\n                        ▼\n┌──────────────────────────────────────────────────────────────┐\n│                   Stateful Connectors                        │\n│  ┌──────────────┐  ┌──────────────┐  ┌──────────────┐       │\n│  │CircuitBreaker│  │ RateLimiter  │  │ WorkerPool   │       │\n│  └──────┬───────┘  └──────┬───────┘  └──────┬───────┘       │\n└─────────┼──────────────────┼──────────────────┼──────────────┘\n          │                  │                  │\n          │ capitan.Emit()   │                  │\n          ▼                  ▼                  ▼\n┌──────────────────────────────────────────────────────────────┐\n│                    Capitan Event Bus                         │\n│              (Async, Per-Signal Workers)                     │\n└───────────────────────┬──────────────────────────────────────┘\n                        │\n                        ▼\n        ┌───────────────┴───────────────┐\n        ▼                               ▼\n┌──────────────┐              ┌──────────────────┐\n│  Observers   │              │  Hook Handlers   │\n├──────────────┤              ├──────────────────┤\n│ • Metrics    │              │ • Logging        │\n│ • Alerting   │              │ • Tracing        │\n│ • Debugging  │              │ • Custom Logic   │\n└──────────────┘              └──────────────────┘",{"id":434,"title":435,"titles":436,"content":437,"level":35},"/v1.0.7/learn/architecture#signal-emission-points","Signal Emission Points",[322,426],"Signals are emitted at state transitions and decision points: CircuitBreaker: circuitbreaker.opened - Threshold reachedcircuitbreaker.closed - Recovery successfulcircuitbreaker.half-open - Testing recoverycircuitbreaker.rejected - Request blocked RateLimiter: ratelimiter.allowed - Token consumedratelimiter.throttled - Waiting for tokenratelimiter.dropped - Request dropped WorkerPool: workerpool.saturated - All workers busyworkerpool.acquired - Worker slot takenworkerpool.released - Worker slot freed",{"id":439,"title":440,"titles":441,"content":442,"level":35},"/v1.0.7/learn/architecture#asynchronous-processing","Asynchronous Processing",[322,426],"All events are processed asynchronously via per-signal worker goroutines: Emit() → Buffered Channel → Worker Goroutine → Handler\n  ↓           (size: 16)          ↓               ↓\nReturns                        Isolated       Panic Safe\nImmediately                   Execution This architecture ensures: Zero impact on pipeline performanceIsolation between different signal typesPanic safety with automatic recoveryBackpressure via configurable buffers See Hooks Documentation for detailed usage and examples.",{"id":444,"title":445,"titles":446,"content":29,"level":19},"/v1.0.7/learn/architecture#performance-considerations","Performance Considerations",[322],{"id":448,"title":449,"titles":450,"content":451,"level":35},"/v1.0.7/learn/architecture#optimization-strategies","Optimization Strategies",[322,445],"Processor Granularity: Balance between too many small processors (overhead) and large monolithic ones (reduced reusability)Parallel Execution: Use Concurrent for independent operationsEarly Filtering: Place Filter processors early to reduce downstream processingResource Pooling: Reuse expensive resources across pipeline executionsContext Timeouts: Set appropriate timeouts to prevent hanging operations",{"id":453,"title":454,"titles":455,"content":456,"level":35},"/v1.0.7/learn/architecture#benchmarking-results","Benchmarking Results",[322,445],"The library includes comprehensive benchmarks showing: Minimal overhead for processor wrapping (~2-5ns)Linear scaling for sequential processingNear-linear scaling for parallel processing with proper data isolation",{"id":458,"title":459,"titles":460,"content":29,"level":19},"/v1.0.7/learn/architecture#security-considerations","Security Considerations",[322],{"id":462,"title":463,"titles":464,"content":465,"level":35},"/v1.0.7/learn/architecture#input-validation","Input Validation",[322,459],"Always validate input at pipeline boundaries: // Define identities upfront\nvar (\n    SecureID   = pipz.NewIdentity(\"secure\", \"Secure processing pipeline\")\n    SanitizeID = pipz.NewIdentity(\"sanitize\", \"Sanitizes input\")\n    ValidateID = pipz.NewIdentity(\"validate\", \"Validates data\")\n)\n\npipeline := pipz.NewSequence(SecureID,\n    pipz.Transform(SanitizeID, sanitizeInput),\n    pipz.Apply(ValidateID, validateData),\n    // ... rest of pipeline\n)",{"id":467,"title":468,"titles":469,"content":470,"level":35},"/v1.0.7/learn/architecture#resource-limits","Resource Limits",[322,459],"Protect against resource exhaustion: Use Timeout for time boundariesUse RateLimiter for throughput controlUse CircuitBreaker for cascading failure prevention",{"id":472,"title":473,"titles":474,"content":475,"level":35},"/v1.0.7/learn/architecture#error-information","Error Information",[322,459],"Be careful with error details in production: The Error[T] type includes the data state at failureConsider sanitizing sensitive data in error statesUse structured logging for audit trails",{"id":477,"title":478,"titles":479,"content":29,"level":19},"/v1.0.7/learn/architecture#future-architecture-considerations","Future Architecture Considerations",[322],{"id":481,"title":482,"titles":483,"content":484,"level":35},"/v1.0.7/learn/architecture#planned-enhancements","Planned Enhancements",[322,478],"Distributed Execution: Support for distributed pipeline executionPersistent State: Durable state management for long-running pipelinesVisual Pipeline Builder: Tool for visual pipeline compositionSchema Evolution: Support for data schema versioning",{"id":486,"title":487,"titles":488,"content":489,"level":35},"/v1.0.7/learn/architecture#api-stability","API Stability",[322,478],"The core Chainable[T] interface is stable and will remain backward compatible. New features will be added through: New adapter functionsNew connector typesOptional interfaces for advanced featuresConfiguration options on existing types",{"id":491,"title":492,"titles":493,"content":494,"level":19},"/v1.0.7/learn/architecture#summary","Summary",[322],"The pipz architecture provides a clean, composable foundation for building data processing pipelines. By adhering to a single interface and clear separation between processors and connectors, it enables developers to build complex data flows from simple, testable components while maintaining type safety and performance. html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}",{"id":496,"title":497,"titles":498,"content":499,"level":9},"/v1.0.7/learn/hooks","Hooks and Observability",[],"Type-safe event hooks for monitoring, debugging, and observability with capitan integration",{"id":501,"title":497,"titles":502,"content":503,"level":9},"/v1.0.7/learn/hooks#hooks-and-observability",[],"Pipz integrates with capitan to provide type-safe event hooks for observability, monitoring, and debugging. Stateful connectors emit signals at critical state transitions and decision points, allowing you to observe system behavior without modifying your processing logic.",{"id":505,"title":6,"titles":506,"content":507,"level":19},"/v1.0.7/learn/hooks#overview",[497],"Hooks enable you to: Monitor circuit breaker state changes and timeout eventsTrack rate limiting behavior and backpressureObserve worker pool saturation, retry exhaustion, and backoff patternsDetect when fallback processors are being usedAlert on threshold violations and failure patternsCollect metrics for dashboardsDebug pipeline behavior in production All events are emitted asynchronously via per-signal worker goroutines, ensuring hooks don't impact pipeline performance.",{"id":509,"title":510,"titles":511,"content":512,"level":19},"/v1.0.7/learn/hooks#event-severity","Event Severity",[497],"As of capitan v0.0.5, all events include a severity level that indicates their importance: Error: System failures requiring immediate attention (circuit opened, requests rejected/dropped, all retries exhausted, timeouts)Warn: Degraded performance or fallback scenarios (circuit half-open, rate limiting throttled, pool saturated, individual retry failures, using fallback processors, backoff delays)Info: Normal operations (circuit closed, rate limiter allowed, worker acquired/released, retry attempts, using primary processor)Debug: Detailed operational information (currently unused, but available for verbose logging) Events can be filtered by severity in your hooks using e.Severity().",{"id":514,"title":515,"titles":516,"content":29,"level":19},"/v1.0.7/learn/hooks#available-signals","Available Signals",[497],{"id":518,"title":519,"titles":520,"content":521,"level":35},"/v1.0.7/learn/hooks#circuitbreaker","CircuitBreaker",[497,515],"SignalWhen EmittedKey Fieldscircuitbreaker.openedCircuit opens after failure threshold reachedname, state, failures, failure_thresholdcircuitbreaker.closedCircuit closes after successful recoveryname, state, successes, success_thresholdcircuitbreaker.half-openCircuit transitions to half-open for testingname, state, generationcircuitbreaker.rejectedRequest rejected while circuit is openname, state, generation",{"id":523,"title":524,"titles":525,"content":526,"level":35},"/v1.0.7/learn/hooks#ratelimiter","RateLimiter",[497,515],"SignalWhen EmittedKey Fieldsratelimiter.allowedRequest allowed, token consumedname, tokens, rate, burstratelimiter.throttledRequest waiting for tokens (wait mode)name, wait_time, tokens, rateratelimiter.droppedRequest dropped, no tokens available (drop mode)name, tokens, rate, burst, mode",{"id":528,"title":529,"titles":530,"content":531,"level":35},"/v1.0.7/learn/hooks#workerpool","WorkerPool",[497,515],"SignalWhen EmittedKey Fieldsworkerpool.saturatedAll worker slots occupied, task will blockname, worker_count, active_workersworkerpool.acquiredWorker slot acquired, task startingname, worker_count, active_workersworkerpool.releasedWorker slot released, task completedname, worker_count, active_workers",{"id":533,"title":534,"titles":535,"content":536,"level":35},"/v1.0.7/learn/hooks#retry","Retry",[497,515],"SignalWhen EmittedKey Fieldsretry.attempt-startStarting a retry attemptname, attempt, max_attemptsretry.attempt-failRetry attempt failedname, attempt, max_attempts, errorretry.exhaustedAll retry attempts exhaustedname, max_attempts, error",{"id":538,"title":539,"titles":540,"content":541,"level":35},"/v1.0.7/learn/hooks#fallback","Fallback",[497,515],"SignalWhen EmittedKey Fieldsfallback.attemptAttempting a fallback processorname, processor_index, processor_namefallback.failedAll fallback processors failedname, error",{"id":543,"title":544,"titles":545,"content":546,"level":35},"/v1.0.7/learn/hooks#timeout","Timeout",[497,515],"SignalWhen EmittedKey Fieldstimeout.triggeredOperation exceeded timeout durationname, duration",{"id":548,"title":549,"titles":550,"content":551,"level":35},"/v1.0.7/learn/hooks#backoff","Backoff",[497,515],"SignalWhen EmittedKey Fieldsbackoff.waitingEntering exponential backoff delayname, attempt, max_attempts, delay, next_delay",{"id":553,"title":554,"titles":555,"content":556,"level":19},"/v1.0.7/learn/hooks#field-reference","Field Reference",[497],"All fields use primitive types for easy integration with monitoring systems: Field KeyTypeDescriptionFieldNamestringConnector instance nameFieldErrorstringError messageFieldTimestampfloat64Unix timestampFieldStatestringCircuit state: \"closed\", \"open\", \"half-open\"FieldFailuresintCurrent failure countFieldSuccessesintCurrent success countFieldFailureThresholdintFailures needed to open circuitFieldSuccessThresholdintSuccesses needed to close from half-openFieldResetTimeoutfloat64Reset timeout in secondsFieldGenerationintCircuit generation numberFieldLastFailTimefloat64Last failure timestampFieldRatefloat64Requests per secondFieldBurstintMaximum burst capacityFieldTokensfloat64Current available tokensFieldModestringRate limiter mode: \"wait\" or \"drop\"FieldWaitTimefloat64Wait time in secondsFieldWorkerCountintTotal worker slotsFieldActiveWorkersintCurrently active workersFieldAttemptintCurrent retry attempt numberFieldMaxAttemptsintMaximum retry attemptsFieldProcessorIndexintFallback processor indexFieldProcessorNamestringFallback processor nameFieldDurationfloat64Timeout duration in secondsFieldDelayfloat64Current backoff delay in secondsFieldNextDelayfloat64Next backoff delay in seconds",{"id":558,"title":559,"titles":560,"content":29,"level":19},"/v1.0.7/learn/hooks#usage-examples","Usage Examples",[497],{"id":562,"title":563,"titles":564,"content":565,"level":35},"/v1.0.7/learn/hooks#basic-hook-registration","Basic Hook Registration",[497,559],"import (\n    \"context\"\n    \"fmt\"\n\n    \"github.com/zoobzio/capitan\"\n    \"github.com/zoobzio/pipz\"\n)\n\nfunc main() {\n    // Configure capitan (optional, before any hooks)\n    capitan.Configure(capitan.WithBufferSize(64))\n\n    // Hook circuit breaker signals\n    capitan.Hook(pipz.SignalCircuitBreakerOpened, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        failures, _ := pipz.FieldFailures.From(e)\n        threshold, _ := pipz.FieldFailureThreshold.From(e)\n\n        fmt.Printf(\"ALERT: Circuit %s opened (failures=%d, threshold=%d)\\n\",\n            name, failures, threshold)\n    })\n\n    // Your pipeline code...\n\n    // Shutdown capitan to drain pending events\n    defer capitan.Shutdown()\n}",{"id":567,"title":568,"titles":569,"content":570,"level":35},"/v1.0.7/learn/hooks#metrics-collection","Metrics Collection",[497,559],"import (\n    \"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar (\n    circuitState = prometheus.NewGaugeVec(\n        prometheus.GaugeOpts{\n            Name: \"pipz_circuit_state\",\n            Help: \"Circuit breaker state (0=closed, 1=half-open, 2=open)\",\n        },\n        []string{\"name\"},\n    )\n\n    rateLimitDropped = prometheus.NewCounterVec(\n        prometheus.CounterOpts{\n            Name: \"pipz_ratelimit_dropped_total\",\n            Help: \"Total requests dropped by rate limiter\",\n        },\n        []string{\"name\"},\n    )\n)\n\nfunc init() {\n    prometheus.MustRegister(circuitState, rateLimitDropped)\n\n    // Track circuit state\n    capitan.Hook(pipz.SignalCircuitBreakerOpened, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        circuitState.WithLabelValues(name).Set(2) // open\n    })\n\n    capitan.Hook(pipz.SignalCircuitBreakerClosed, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        circuitState.WithLabelValues(name).Set(0) // closed\n    })\n\n    capitan.Hook(pipz.SignalCircuitBreakerHalfOpen, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        circuitState.WithLabelValues(name).Set(1) // half-open\n    })\n\n    // Track dropped requests\n    capitan.Hook(pipz.SignalRateLimiterDropped, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        rateLimitDropped.WithLabelValues(name).Inc()\n    })\n}",{"id":572,"title":573,"titles":574,"content":575,"level":35},"/v1.0.7/learn/hooks#structured-logging","Structured Logging",[497,559],"import (\n    \"log/slog\"\n)\n\nfunc setupHooks() {\n    // Log all rate limiter events\n    capitan.Hook(pipz.SignalRateLimiterThrottled, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        waitTime, _ := pipz.FieldWaitTime.From(e)\n        tokens, _ := pipz.FieldTokens.From(e)\n\n        slog.WarnContext(ctx, \"rate limiter throttled\",\n            \"connector\", name,\n            \"wait_seconds\", waitTime,\n            \"tokens_remaining\", tokens,\n        )\n    })\n\n    // Log worker pool saturation\n    capitan.Hook(pipz.SignalWorkerPoolSaturated, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        workers, _ := pipz.FieldWorkerCount.From(e)\n\n        slog.WarnContext(ctx, \"worker pool saturated\",\n            \"connector\", name,\n            \"worker_count\", workers,\n        )\n    })\n}",{"id":577,"title":578,"titles":579,"content":580,"level":35},"/v1.0.7/learn/hooks#alerting","Alerting",[497,559],"func setupAlerts() {\n    // Alert when circuit opens\n    capitan.Hook(pipz.SignalCircuitBreakerOpened, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        failures, _ := pipz.FieldFailures.From(e)\n\n        // Send to alerting system\n        sendAlert(Alert{\n            Severity: \"critical\",\n            Title:    fmt.Sprintf(\"Circuit Breaker Opened: %s\", name),\n            Message:  fmt.Sprintf(\"Failures reached threshold: %d\", failures),\n        })\n    })\n\n    // Alert when rate limiter starts dropping requests\n    capitan.Hook(pipz.SignalRateLimiterDropped, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        rate, _ := pipz.FieldRate.From(e)\n\n        sendAlert(Alert{\n            Severity: \"warning\",\n            Title:    fmt.Sprintf(\"Rate Limiter Dropping: %s\", name),\n            Message:  fmt.Sprintf(\"Capacity exceeded (rate=%.1f/s)\", rate),\n        })\n    })\n}",{"id":582,"title":583,"titles":584,"content":585,"level":35},"/v1.0.7/learn/hooks#severity-based-filtering","Severity-Based Filtering",[497,559],"// Only process error-level events\ncapitan.Observe(func(ctx context.Context, e *capitan.Event) {\n    if e.Severity() != capitan.SeverityError {\n        return\n    }\n\n    name, _ := pipz.FieldName.From(e)\n    log.Printf(\"ERROR event from %s: %s\", name, e.Signal())\n\n    // Send to error tracking system\n    sendToErrorTracker(e)\n})\n\n// Route events by severity\ncapitan.Observe(func(ctx context.Context, e *capitan.Event) {\n    switch e.Severity() {\n    case capitan.SeverityError:\n        sendToAlertingSystem(e)\n    case capitan.SeverityWarn:\n        sendToMonitoringDashboard(e)\n    case capitan.SeverityInfo:\n        sendToMetricsCollector(e)\n    case capitan.SeverityDebug:\n        sendToDebugLogs(e)\n    }\n})",{"id":587,"title":588,"titles":589,"content":590,"level":19},"/v1.0.7/learn/hooks#observer-pattern","Observer Pattern",[497],"Use Observe() to listen to multiple signals with a single handler: // Observe all circuit breaker events\ncapitan.Observe(func(ctx context.Context, e *capitan.Event) {\n    name, _ := pipz.FieldName.From(e)\n\n    switch e.Signal() {\n    case pipz.SignalCircuitBreakerOpened:\n        log.Printf(\"Circuit %s: OPENED\", name)\n    case pipz.SignalCircuitBreakerClosed:\n        log.Printf(\"Circuit %s: CLOSED\", name)\n    case pipz.SignalCircuitBreakerHalfOpen:\n        log.Printf(\"Circuit %s: TESTING\", name)\n    }\n},\n    pipz.SignalCircuitBreakerOpened,\n    pipz.SignalCircuitBreakerClosed,\n    pipz.SignalCircuitBreakerHalfOpen,\n)\n\n// Or observe ALL signals\ncapitan.Observe(func(ctx context.Context, e *capitan.Event) {\n    // Log everything for debugging\n    log.Printf(\"Event: %s\", e.Signal())\n})",{"id":592,"title":445,"titles":593,"content":29,"level":19},"/v1.0.7/learn/hooks#performance-considerations",[497],{"id":595,"title":440,"titles":596,"content":597,"level":35},"/v1.0.7/learn/hooks#asynchronous-processing",[497,445],"All events are processed asynchronously in per-signal worker goroutines. This means: ✅ Hooks never block pipeline processing✅ Slow handlers don't impact throughput✅ Handler panics are recovered automatically❌ Events may be buffered if handlers are slow❌ No guaranteed delivery if process crashes",{"id":599,"title":600,"titles":601,"content":602,"level":35},"/v1.0.7/learn/hooks#buffer-sizing","Buffer Sizing",[497,445],"Configure buffer size based on emission rate: // Default: 16 events per signal\ncapitan.Configure(capitan.WithBufferSize(16))\n\n// High-volume: increase buffer\ncapitan.Configure(capitan.WithBufferSize(128))\n\n// Low-latency: smaller buffer (fails faster if handler is slow)\ncapitan.Configure(capitan.WithBufferSize(4)) If a signal's buffer fills, Emit() becomes blocking until the handler catches up.",{"id":604,"title":605,"titles":606,"content":607,"level":35},"/v1.0.7/learn/hooks#handler-best-practices","Handler Best Practices",[497,445],"Keep handlers fast - Emit to external queues/channels rather than doing heavy workDon't block - Avoid synchronous I/O in handlersHandle panics - Capitan recovers, but you should still be defensiveUse context - Respect cancellation in long-running handlers // ❌ Bad: Blocking I/O in handler\ncapitan.Hook(signal, func(ctx context.Context, e *capitan.Event) {\n    http.Post(\"https://alerting.com/api\", ...)  // Blocks!\n})\n\n// ✅ Good: Queue for async processing\nvar alertQueue = make(chan Alert, 100)\n\ncapitan.Hook(signal, func(ctx context.Context, e *capitan.Event) {\n    select {\n    case alertQueue \u003C- buildAlert(e):\n    default:\n        // Queue full, drop (don't block pipeline)\n    }\n})",{"id":609,"title":610,"titles":611,"content":612,"level":19},"/v1.0.7/learn/hooks#shutdown","Shutdown",[497],"Always call Shutdown() to drain pending events: func main() {\n    // Setup hooks...\n\n    // Run application...\n\n    // Drain events before exit\n    capitan.Shutdown()\n} Without Shutdown(), buffered events may be lost on process exit.",{"id":614,"title":615,"titles":616,"content":29,"level":19},"/v1.0.7/learn/hooks#integration-with-connectors","Integration with Connectors",[497],{"id":618,"title":519,"titles":619,"content":620,"level":35},"/v1.0.7/learn/hooks#circuitbreaker-1",[497,615],"Emits signals on state transitions: var apiBreaker = pipz.NewCircuitBreaker(\n    pipz.NewIdentity(\"api-breaker\", \"Protects API from cascading failures\"),\n    apiProcessor,\n    5,                  // failureThreshold\n    30 * time.Second,   // resetTimeout\n)\n\n// Hook to track state\ncapitan.Hook(pipz.SignalCircuitBreakerOpened, trackCircuitState)\ncapitan.Hook(pipz.SignalCircuitBreakerClosed, trackCircuitState) See CircuitBreaker reference for details.",{"id":622,"title":524,"titles":623,"content":624,"level":35},"/v1.0.7/learn/hooks#ratelimiter-1",[497,615],"Emits signals for throttling and dropping: var apiLimiter = pipz.NewRateLimiter[Request](\n    pipz.NewIdentity(\"api-limiter\", \"Rate limits API requests\"),\n    100,    // rate per second\n    10,     // burst\n).SetMode(\"drop\")\n\n// Hook to track dropped requests\ncapitan.Hook(pipz.SignalRateLimiterDropped, trackDrops) See RateLimiter reference for details.",{"id":626,"title":529,"titles":627,"content":628,"level":35},"/v1.0.7/learn/hooks#workerpool-1",[497,615],"Emits signals for worker acquisition and saturation: var pool = pipz.NewWorkerPool[Task](\n    pipz.NewIdentity(\"worker-pool\", \"Limits concurrent task processing\"),\n    10,  // worker count\n    processors...,\n)\n\n// Hook to track saturation\ncapitan.Hook(pipz.SignalWorkerPoolSaturated, alertOnSaturation) See WorkerPool reference for details.",{"id":630,"title":534,"titles":631,"content":632,"level":35},"/v1.0.7/learn/hooks#retry-1",[497,615],"Emits signals for retry attempts and exhaustion: var retryProcessor = pipz.NewRetry(\n    pipz.NewIdentity(\"api-retry\", \"Retries failed API calls\"),\n    apiProcessor,\n    3,  // maxAttempts\n)\n\n// Hook to track retry exhaustion\ncapitan.Hook(pipz.SignalRetryExhausted, func(ctx context.Context, e *capitan.Event) {\n    name, _ := pipz.FieldName.From(e)\n    err, _ := pipz.FieldError.From(e)\n    log.Printf(\"ALERT: Retry exhausted for %s: %s\", name, err)\n}) See Retry reference for details.",{"id":634,"title":539,"titles":635,"content":636,"level":35},"/v1.0.7/learn/hooks#fallback-1",[497,615],"Emits signals when attempting fallback processors: var fallbackChain = pipz.NewFallback(\n    pipz.NewIdentity(\"payment-fallback\", \"Payment processing with fallback\"),\n    stripeProcessor,\n    paypalProcessor,\n    squareProcessor,\n)\n\n// Hook to track when fallbacks are used\ncapitan.Hook(pipz.SignalFallbackAttempt, func(ctx context.Context, e *capitan.Event) {\n    name, _ := pipz.FieldName.From(e)\n    procName, _ := pipz.FieldProcessorName.From(e)\n    index, _ := pipz.FieldProcessorIndex.From(e)\n\n    if index > 0 {\n        log.Printf(\"WARNING: Using fallback processor %s[%d]: %s\", name, index, procName)\n    }\n}) See Fallback reference for details.",{"id":638,"title":544,"titles":639,"content":640,"level":35},"/v1.0.7/learn/hooks#timeout-1",[497,615],"Emits signals when operations exceed timeout duration: var apiTimeout = pipz.NewTimeout(\n    pipz.NewIdentity(\"api-timeout\", \"Enforces API call timeout\"),\n    apiProcessor,\n    5 * time.Second,\n)\n\n// Hook to track timeout events\ncapitan.Hook(pipz.SignalTimeoutTriggered, func(ctx context.Context, e *capitan.Event) {\n    name, _ := pipz.FieldName.From(e)\n    duration, _ := pipz.FieldDuration.From(e)\n    log.Printf(\"ALERT: Operation %s timed out after %.2fs\", name, duration)\n}) See Timeout reference for details.",{"id":642,"title":549,"titles":643,"content":644,"level":35},"/v1.0.7/learn/hooks#backoff-1",[497,615],"Emits signals when entering exponential backoff delays: var backoffProcessor = pipz.NewBackoff(\n    pipz.NewIdentity(\"api-backoff\", \"API calls with exponential backoff\"),\n    apiProcessor,\n    5,                  // maxAttempts\n    1 * time.Second,    // baseDelay\n)\n\n// Hook to track backoff behavior\ncapitan.Hook(pipz.SignalBackoffWaiting, func(ctx context.Context, e *capitan.Event) {\n    name, _ := pipz.FieldName.From(e)\n    attempt, _ := pipz.FieldAttempt.From(e)\n    delay, _ := pipz.FieldDelay.From(e)\n    log.Printf(\"WARNING: %s backing off on attempt %d, waiting %.2fs\", name, attempt, delay)\n}) See Backoff reference for details.",{"id":646,"title":647,"titles":648,"content":29,"level":19},"/v1.0.7/learn/hooks#testing-with-hooks","Testing with Hooks",[497],{"id":650,"title":651,"titles":652,"content":653,"level":35},"/v1.0.7/learn/hooks#sync-mode-v002","Sync Mode (v0.0.2+)",[497,647],"Use WithSyncMode() for deterministic testing without timing dependencies: func TestCircuitBreakerHooks(t *testing.T) {\n    // Configure with sync mode before first use\n    capitan.Configure(capitan.WithSyncMode())\n\n    var opened bool\n\n    capitan.Hook(pipz.SignalCircuitBreakerOpened, func(ctx context.Context, e *capitan.Event) {\n        opened = true\n    })\n\n    // Trigger circuit opening...\n\n    // No waiting needed - sync mode processes immediately\n    if !opened {\n        t.Error(\"circuit should have opened\")\n    }\n\n    capitan.Shutdown()\n} Important: Configure() must be called before any other capitan operations. In tests, each test function should use a fresh process or the default instance will already be initialized.",{"id":655,"title":656,"titles":657,"content":658,"level":35},"/v1.0.7/learn/hooks#async-mode","Async Mode",[497,647],"For testing async behavior: func TestCircuitBreakerHooks(t *testing.T) {\n    var opened bool\n    var mu sync.Mutex\n\n    capitan.Hook(pipz.SignalCircuitBreakerOpened, func(ctx context.Context, e *capitan.Event) {\n        mu.Lock()\n        opened = true\n        mu.Unlock()\n    })\n\n    // Trigger circuit opening...\n\n    // Wait for async processing\n    time.Sleep(50 * time.Millisecond)\n\n    mu.Lock()\n    if !opened {\n        t.Error(\"circuit should have opened\")\n    }\n    mu.Unlock()\n\n    capitan.Shutdown()\n} For production code, hooks are for observability, not control flow.",{"id":660,"title":661,"titles":662,"content":663,"level":19},"/v1.0.7/learn/hooks#further-reading","Further Reading",[497],"Capitan DocumentationArchitecture OverviewCircuitBreaker ReferenceRateLimiter ReferenceWorkerPool ReferenceRetry ReferenceFallback ReferenceTimeout ReferenceBackoff Reference html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}",{"id":665,"title":666,"titles":667,"content":668,"level":9},"/v1.0.7/guides/connector-selection","Connector Selection Guide",[],"Decision tree and comparison matrix for choosing the right connector for your use case",{"id":670,"title":666,"titles":671,"content":672,"level":9},"/v1.0.7/guides/connector-selection#connector-selection-guide",[],"Quick decisions for choosing the right connector.",{"id":674,"title":675,"titles":676,"content":677,"level":19},"/v1.0.7/guides/connector-selection#decision-tree","Decision Tree",[666],"What do you need?\n│\n├─ Sequential processing? → Sequence\n│\n├─ Parallel processing?\n│   ├─ Need all results? → Concurrent\n│   ├─ Bounded parallelism? → WorkerPool\n│   ├─ Fire and forget? → Scaffold\n│   ├─ Need fastest? → Race\n│   └─ Need best match? → Contest\n│\n├─ Conditional routing? → Switch\n│\n├─ Error handling?\n│   ├─ Have fallback? → Fallback\n│   └─ Transient errors? → Retry\n│\n└─ Resilience?\n    ├─ Prevent cascading failures? → CircuitBreaker\n    ├─ Control throughput? → RateLimiter\n    └─ Bound execution time? → Timeout",{"id":679,"title":680,"titles":681,"content":682,"level":19},"/v1.0.7/guides/connector-selection#connector-comparison-matrix","Connector Comparison Matrix",[666],"┌────────────────┬──────────┬────────────┬──────────┬─────────────────────┐\n│   Connector    │ Parallel │ All Run?   │ Returns  │ Primary Use Case    │\n├────────────────┼──────────┼────────────┼──────────┼─────────────────────┤\n│ Sequence       │    No    │ Until fail │ Last     │ Step-by-step flow   │\n│ Concurrent     │   Yes    │    Yes     │ Original │ Side effects        │\n│ WorkerPool     │   Yes*   │    Yes     │ Original │ Bounded parallelism │\n│ Scaffold       │   Yes    │    Yes     │ Original │ Fire-and-forget     │\n│ Race           │   Yes    │ First wins │ First    │ Fastest response    │\n│ Contest        │   Yes    │ Until pass │ Matching │ Quality threshold   │\n│ Switch         │    No    │ One branch │ Selected │ Conditional routing │\n│ Fallback       │    No    │ On failure │ Primary  │ Error recovery      │\n│ Retry          │    No    │ Until pass │ Success  │ Transient failures  │\n│ CircuitBreaker │    No    │ If closed  │ Result   │ Cascade prevention  │\n│ RateLimiter    │    No    │ If allowed │ Result   │ Throughput control  │\n│ Timeout        │    No    │ Time bound │ Result   │ Execution limits    │\n└────────────────┴──────────┴────────────┴──────────┴─────────────────────┘\n\nLegend:\n• Parallel: Whether processors run concurrently (*WorkerPool limits concurrency)\n• All Run?: Whether all processors execute or stop early\n• Returns: What data is returned to caller\n• Primary Use Case: Main scenario for using this connector",{"id":684,"title":685,"titles":686,"content":29,"level":19},"/v1.0.7/guides/connector-selection#problem-solution-guide","Problem-Solution Guide",[666],{"id":688,"title":689,"titles":690,"content":691,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-process-data-through-multiple-steps-in-order","You need to: Process data through multiple steps in order",[666,685],"Solution: Sequence // Define identity upfront\nvar PipelineID = pipz.NewIdentity(\"pipeline\", \"Sequential processing pipeline\")\n\npipeline := pipz.NewSequence[T](PipelineID, step1, step2, step3) When to use: Order mattersEach step depends on previousBuilding up state through transformations Don't use when: Steps are independent (use Concurrent instead)",{"id":693,"title":694,"titles":695,"content":696,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-run-multiple-operations-in-parallel","You need to: Run multiple operations in parallel",[666,685],"Solution: Concurrent // Define identity upfront\nvar ParallelID = pipz.NewIdentity(\"parallel\", \"Executes operations in parallel\")\n\nconcurrent := pipz.NewConcurrent[T](ParallelID, proc1, proc2, proc3) When to use: Operations are independentRunning side effects (notifications, logging)Want to parallelize for performance Requirements: Type T must implement Cloner[T] Don't use when: Order mattersOperations depend on each other",{"id":698,"title":699,"titles":700,"content":701,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-run-parallel-operations-with-limited-resources","You need to: Run parallel operations with limited resources",[666,685],"Solution: WorkerPool // Define identity upfront\nvar LimitedID = pipz.NewIdentity(\"limited\", \"Worker pool with max 3 concurrent operations\")\n\npool := pipz.NewWorkerPool[T](LimitedID, 3, proc1, proc2, proc3, proc4, proc5) When to use: Resource-constrained environmentsRate-limited external servicesControlled database connectionsPreventing memory exhaustionManaging CPU-intensive operations Requirements: Type T must implement Cloner[T] Don't use when: Need unbounded parallelism (use Concurrent)Operations must complete in order (use Sequence)Fire-and-forget semantics needed (use Scaffold)",{"id":703,"title":704,"titles":705,"content":706,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-get-the-fastest-result-from-multiple-sources","You need to: Get the fastest result from multiple sources",[666,685],"Solution: Race // Define identity upfront\nvar FastestID = pipz.NewIdentity(\"fastest\", \"Returns first successful result\")\n\nrace := pipz.NewRace[T](FastestID, primary, backup1, backup2) When to use: Multiple sources for same dataWant lowest latencyHave fallback options Requirements: Type T must implement Cloner[T] Don't use when: Need all resultsSources have different costs",{"id":708,"title":709,"titles":710,"content":711,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-find-first-result-meeting-quality-criteria","You need to: Find first result meeting quality criteria",[666,685],"Solution: Contest // Define identity upfront\nvar BestID = pipz.NewIdentity(\"best\", \"Finds first result with score > 0.9\")\n\ncontest := pipz.NewContest[T](BestID,\n    func(ctx context.Context, result T) bool {\n        return result.Score > 0.9\n    },\n    model1, model2, model3,\n) When to use: Quality matters more than speedHave multiple approachesWant first acceptable result Requirements: Type T must implement Cloner[T]",{"id":713,"title":714,"titles":715,"content":716,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-route-data-based-on-conditions","You need to: Route data based on conditions",[666,685],"Solution: Switch // Define identity upfront\nvar RouterID = pipz.NewIdentity(\"router\", \"Routes data based on premium status\")\n\nswitch := pipz.NewSwitch[T](RouterID,\n    func(ctx context.Context, data T) string {\n        if data.Premium {\n            return \"premium\"\n        }\n        return \"standard\"\n    },\n).\nAddRoute(\"premium\", premiumPipeline).\nAddRoute(\"standard\", standardPipeline) When to use: Different processing for different data typesConditional logicA/B testing Don't use when: All data follows same path",{"id":718,"title":719,"titles":720,"content":721,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-recover-from-errors-gracefully","You need to: Recover from errors gracefully",[666,685],"Solution: Fallback // Define identity upfront\nvar SafeID = pipz.NewIdentity(\"safe\", \"Uses fallback on error\")\n\nfallback := pipz.NewFallback[T](SafeID, riskyOperation, safeDefault) When to use: Have a safe defaultWant graceful degradationErrors are expected Don't use when: Errors should stop processingNo reasonable fallback exists",{"id":723,"title":724,"titles":725,"content":726,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-retry-failed-operations","You need to: Retry failed operations",[666,685],"Solution: Retry // Define identity upfront\nvar ReliableID = pipz.NewIdentity(\"reliable\", \"Retries up to 3 times on failure\")\n\nretry := pipz.NewRetry[T](ReliableID, processor, 3) When to use: Transient errors (network, temporary unavailability)External service callsDatabase operations Don't use when: Errors are permanent (validation failures)No backoff needed (can overwhelm service)",{"id":728,"title":729,"titles":730,"content":731,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-prevent-cascading-failures","You need to: Prevent cascading failures",[666,685],"Solution: CircuitBreaker // Define identity upfront\nvar ProtectedID = pipz.NewIdentity(\"protected\", \"Circuit breaker with 5 failure threshold\")\n\nbreaker := pipz.NewCircuitBreaker[T](ProtectedID, processor,\n    pipz.WithCircuitBreakerThreshold(5),\n    pipz.WithCircuitBreakerWindow(time.Minute),\n) When to use: Calling external servicesProtecting downstream systemsFailing fast is acceptable Don't use when: Every request must be attemptedFailures are independent",{"id":733,"title":734,"titles":735,"content":736,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-control-processing-rate","You need to: Control processing rate",[666,685],"Solution: RateLimiter // Define identity upfront\nvar ThrottledID = pipz.NewIdentity(\"throttled\", \"Rate limited to 100 per second\")\n\nlimiter := pipz.NewRateLimiter[T](ThrottledID, processor,\n    pipz.WithRateLimiterRate(100),\n    pipz.WithRateLimiterPeriod(time.Second),\n) When to use: API rate limitsResource protectionCost control Important: Must use singleton instance (don't create per request)",{"id":738,"title":739,"titles":740,"content":741,"level":35},"/v1.0.7/guides/connector-selection#you-need-to-bound-execution-time","You need to: Bound execution time",[666,685],"Solution: Timeout // Define identity upfront\nvar BoundedID = pipz.NewIdentity(\"bounded\", \"Times out after 5 seconds\")\n\ntimeout := pipz.NewTimeout[T](BoundedID, processor, 5*time.Second) When to use: Network operationsUser-facing APIsSLA requirements Don't use when: Operations must completeTime is unpredictable",{"id":743,"title":744,"titles":745,"content":746,"level":19},"/v1.0.7/guides/connector-selection#quick-comparison","Quick Comparison",[666],"ConnectorParallel?Can Fail?Needs Clone?Stateful?SequenceNoYesNoNoConcurrentYesYesYesNoWorkerPoolLimitedYesYesNoScaffoldYesNo**YesNoRaceYesYesYesNoContestYesYesYesNoSwitchNoYesNoNoFallbackNoNo*NoNoRetryNoYesNoYesCircuitBreakerNoYesNoYesRateLimiterNoYesNoYesTimeoutNoYesNoNo *Fallback always returns a value (uses fallback on error)\n**Scaffold errors are not reported back",{"id":748,"title":749,"titles":750,"content":29,"level":19},"/v1.0.7/guides/connector-selection#common-combinations","Common Combinations",[666],{"id":752,"title":753,"titles":754,"content":755,"level":35},"/v1.0.7/guides/connector-selection#resilient-external-api-call","Resilient External API Call",[666,749],"// Define identities upfront\nvar (\n    RateID    = pipz.NewIdentity(\"rate\", \"Rate limit external API calls\")\n    BreakerID = pipz.NewIdentity(\"breaker\", \"Circuit breaker for API protection\")\n    TimeoutID = pipz.NewIdentity(\"timeout\", \"5 second timeout for API calls\")\n    RetryID   = pipz.NewIdentity(\"retry\", \"Retry API calls up to 3 times\")\n)\n\napi := pipz.NewRateLimiter(RateID,\n    pipz.NewCircuitBreaker(BreakerID,\n        pipz.NewTimeout(TimeoutID,\n            pipz.NewRetry(RetryID, apiCall, 3),\n            5*time.Second,\n        ),\n    ),\n)",{"id":757,"title":758,"titles":759,"content":760,"level":35},"/v1.0.7/guides/connector-selection#multi-source-with-fallback","Multi-Source with Fallback",[666,749],"// Define identities upfront\nvar (\n    FetchID   = pipz.NewIdentity(\"fetch\", \"Fetch with fallback to static default\")\n    SourcesID = pipz.NewIdentity(\"sources\", \"Race between primary and secondary sources\")\n)\n\nfetch := pipz.NewFallback(FetchID,\n    pipz.NewRace[T](SourcesID, primary, secondary),\n    staticDefault,\n)",{"id":762,"title":763,"titles":764,"content":765,"level":35},"/v1.0.7/guides/connector-selection#conditional-parallel-processing","Conditional Parallel Processing",[666,749],"// Define identities upfront\nvar (\n    RouterID     = pipz.NewIdentity(\"router\", \"Routes to batch or sequential processing\")\n    BatchID      = pipz.NewIdentity(\"batch\", \"Batch parallel processing\")\n    SequentialID = pipz.NewIdentity(\"seq\", \"Sequential processing\")\n)\n\nrouter := pipz.NewSwitch[T](RouterID, routeFunc).\n    AddRoute(\"batch\", pipz.NewConcurrent[T](BatchID, processors...)).\n    AddRoute(\"sequential\", pipz.NewSequence[T](SequentialID, processors...))",{"id":767,"title":768,"titles":769,"content":770,"level":35},"/v1.0.7/guides/connector-selection#resource-constrained-processing","Resource-Constrained Processing",[666,749],"// Define identities upfront\nvar (\n    APILimitedID = pipz.NewIdentity(\"api-limited\", \"Worker pool limited to 5 concurrent API calls\")\n    ServiceAID   = pipz.NewIdentity(\"service-a\", \"Calls service A\")\n    ServiceBID   = pipz.NewIdentity(\"service-b\", \"Calls service B\")\n    ServiceCID   = pipz.NewIdentity(\"service-c\", \"Calls service C\")\n    ServiceDID   = pipz.NewIdentity(\"service-d\", \"Calls service D\")\n    ServiceEID   = pipz.NewIdentity(\"service-e\", \"Calls service E\")\n    ServiceFID   = pipz.NewIdentity(\"service-f\", \"Calls service F\")\n)\n\n// Limit concurrent API calls to avoid rate limits\napiCalls := pipz.NewWorkerPool[T](APILimitedID, 5,\n    pipz.Apply(ServiceAID, callServiceA),\n    pipz.Apply(ServiceBID, callServiceB),\n    pipz.Apply(ServiceCID, callServiceC),\n    pipz.Apply(ServiceDID, callServiceD),\n    pipz.Apply(ServiceEID, callServiceE),\n    pipz.Apply(ServiceFID, callServiceF),\n    // Only 5 will run concurrently\n) html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}",{"id":772,"title":773,"titles":774,"content":775,"level":9},"/v1.0.7/guides/cloning","Clone Implementation Guide",[],"Comprehensive guide to implementing the Clone() method correctly for safe concurrent processing",{"id":777,"title":773,"titles":778,"content":779,"level":9},"/v1.0.7/guides/cloning#clone-implementation-guide",[],"Understanding and implementing the Clone() method correctly is critical for safe concurrent processing in pipz pipelines.",{"id":781,"title":782,"titles":783,"content":784,"level":19},"/v1.0.7/guides/cloning#why-clone-matters","Why Clone() Matters",[773],"When using concurrent connectors (Concurrent, Race, Contest), pipz creates independent copies of your data for each parallel processor. This isolation prevents data races and ensures predictable behavior. Without proper cloning, concurrent processors can corrupt each other's data, leading to subtle bugs that are difficult to debug.",{"id":786,"title":787,"titles":788,"content":789,"level":19},"/v1.0.7/guides/cloning#the-cloner-interface","The Cloner Interface",[773],"type Cloner[T any] interface {\n    Clone() T\n} Your data types must implement this interface to work with concurrent connectors.",{"id":791,"title":792,"titles":793,"content":29,"level":19},"/v1.0.7/guides/cloning#implementation-patterns","Implementation Patterns",[773],{"id":795,"title":796,"titles":797,"content":798,"level":35},"/v1.0.7/guides/cloning#pattern-1-simple-value-types","Pattern 1: Simple Value Types",[773,792],"For types containing only value fields (no pointers, slices, or maps): type Config struct {\n    MaxRetries int\n    Timeout    time.Duration\n    Enabled    bool\n}\n\n// Simple copy is sufficient for value types\nfunc (c Config) Clone() Config {\n    return c  // All fields are copied by value\n}",{"id":800,"title":801,"titles":802,"content":803,"level":35},"/v1.0.7/guides/cloning#pattern-2-types-with-slices","Pattern 2: Types with Slices",[773,792],"Slices share underlying arrays, so they must be deep copied: ❌ WRONG: Shallow copy shares slice memory func (o Order) Clone() Order {\n    return Order{\n        ID:    o.ID,\n        Items: o.Items,  // DANGER: Shares underlying array!\n    }\n}\n// Concurrent processors will see each other's modifications! ✅ RIGHT: Deep copy creates independent slice func (o Order) Clone() Order {\n    // Create new slice with same capacity for efficiency\n    items := make([]Item, len(o.Items))\n    copy(items, o.Items)\n    \n    return Order{\n        ID:    o.ID,\n        Items: items,  // Independent copy\n    }\n}",{"id":805,"title":806,"titles":807,"content":808,"level":35},"/v1.0.7/guides/cloning#pattern-3-types-with-maps","Pattern 3: Types with Maps",[773,792],"Maps are reference types and must be copied explicitly: ❌ WRONG: Shallow copy shares map reference func (r Request) Clone() Request {\n    return Request{\n        ID:      r.ID,\n        Headers: r.Headers,  // DANGER: Same map instance!\n    }\n} ✅ RIGHT: Deep copy creates independent map func (r Request) Clone() Request {\n    // Create new map with same capacity\n    headers := make(map[string]string, len(r.Headers))\n    for k, v := range r.Headers {\n        headers[k] = v\n    }\n    \n    return Request{\n        ID:      r.ID,\n        Headers: headers,  // Independent copy\n    }\n}",{"id":810,"title":811,"titles":812,"content":813,"level":35},"/v1.0.7/guides/cloning#pattern-4-types-with-pointers","Pattern 4: Types with Pointers",[773,792],"Pointers require careful consideration - decide whether to share or copy the pointed-to value: type Document struct {\n    ID       string\n    Content  string\n    Metadata *Metadata  // Pointer field\n}\n\n// Option 1: Share the pointed-to value (if immutable)\nfunc (d Document) Clone() Document {\n    return Document{\n        ID:       d.ID,\n        Content:  d.Content,\n        Metadata: d.Metadata,  // Shares same Metadata instance\n    }\n}\n\n// Option 2: Deep copy the pointed-to value (if mutable)\nfunc (d Document) Clone() Document {\n    var metadata *Metadata\n    if d.Metadata != nil {\n        // Create independent copy\n        metaCopy := *d.Metadata\n        metadata = &metaCopy\n    }\n    \n    return Document{\n        ID:       d.ID,\n        Content:  d.Content,\n        Metadata: metadata,  // Independent copy\n    }\n}",{"id":815,"title":816,"titles":817,"content":818,"level":35},"/v1.0.7/guides/cloning#pattern-5-nested-structures","Pattern 5: Nested Structures",[773,792],"For complex nested structures, implement Clone() recursively: type Order struct {\n    ID       string\n    Customer Customer\n    Items    []OrderItem\n    Metadata map[string]any\n}\n\ntype Customer struct {\n    ID        string\n    Name      string\n    Addresses []Address\n}\n\ntype OrderItem struct {\n    ProductID string\n    Quantity  int\n    Options   map[string]string\n}\n\n// Comprehensive deep clone\nfunc (o Order) Clone() Order {\n    // Clone nested struct (if it has reference types)\n    customer := o.Customer.Clone()\n    \n    // Clone slice of structs\n    items := make([]OrderItem, len(o.Items))\n    for i, item := range o.Items {\n        items[i] = item.Clone()\n    }\n    \n    // Clone map with interface{} values\n    metadata := make(map[string]any, len(o.Metadata))\n    for k, v := range o.Metadata {\n        // Handle different value types\n        switch val := v.(type) {\n        case []byte:\n            // Deep copy byte slices\n            b := make([]byte, len(val))\n            copy(b, val)\n            metadata[k] = b\n        default:\n            // Copy other values directly\n            metadata[k] = val\n        }\n    }\n    \n    return Order{\n        ID:       o.ID,\n        Customer: customer,\n        Items:    items,\n        Metadata: metadata,\n    }\n}\n\nfunc (c Customer) Clone() Customer {\n    addresses := make([]Address, len(c.Addresses))\n    copy(addresses, c.Addresses)\n    \n    return Customer{\n        ID:        c.ID,\n        Name:      c.Name,\n        Addresses: addresses,\n    }\n}\n\nfunc (i OrderItem) Clone() OrderItem {\n    options := make(map[string]string, len(i.Options))\n    for k, v := range i.Options {\n        options[k] = v\n    }\n    \n    return OrderItem{\n        ProductID: i.ProductID,\n        Quantity:  i.Quantity,\n        Options:   options,\n    }\n}",{"id":820,"title":821,"titles":822,"content":29,"level":19},"/v1.0.7/guides/cloning#testing-clone-implementations","Testing Clone Implementations",[773],{"id":824,"title":825,"titles":826,"content":827,"level":35},"/v1.0.7/guides/cloning#test-1-independence-test","Test 1: Independence Test",[773,821],"Verify that modifications to the clone don't affect the original: func TestCloneIndependence(t *testing.T) {\n    original := Order{\n        ID:    \"order-1\",\n        Items: []Item{{ProductID: \"prod-1\", Quantity: 1}},\n        Metadata: map[string]any{\n            \"priority\": \"high\",\n        },\n    }\n    \n    // Create clone\n    cloned := original.Clone()\n    \n    // Modify clone\n    cloned.Items[0].Quantity = 5\n    cloned.Metadata[\"priority\"] = \"low\"\n    \n    // Verify original is unchanged\n    if original.Items[0].Quantity != 1 {\n        t.Error(\"Clone modification affected original slice\")\n    }\n    if original.Metadata[\"priority\"] != \"high\" {\n        t.Error(\"Clone modification affected original map\")\n    }\n}",{"id":829,"title":830,"titles":831,"content":832,"level":35},"/v1.0.7/guides/cloning#test-2-race-condition-detection","Test 2: Race Condition Detection",[773,821],"Use Go's race detector to catch sharing issues: func TestCloneConcurrency(t *testing.T) {\n    // Run with: go test -race\n    \n    original := Order{\n        ID:    \"order-1\",\n        Items: []Item{{ProductID: \"prod-1\", Quantity: 1}},\n    }\n    \n    // Simulate concurrent processing\n    var wg sync.WaitGroup\n    for i := 0; i \u003C 10; i++ {\n        wg.Add(1)\n        go func(n int) {\n            defer wg.Done()\n            \n            // Each goroutine gets its own clone\n            clone := original.Clone()\n            \n            // Modify clone independently\n            clone.Items[0].Quantity = n\n            \n            // Process...\n            time.Sleep(10 * time.Millisecond)\n            \n            // Verify our modifications\n            if clone.Items[0].Quantity != n {\n                t.Errorf(\"Unexpected quantity: got %d, want %d\", \n                    clone.Items[0].Quantity, n)\n            }\n        }(i)\n    }\n    wg.Wait()\n}",{"id":834,"title":835,"titles":836,"content":837,"level":35},"/v1.0.7/guides/cloning#test-3-benchmark-clone-performance","Test 3: Benchmark Clone Performance",[773,821],"Measure the overhead of cloning: func BenchmarkClone(b *testing.B) {\n    order := Order{\n        ID:       \"order-1\",\n        Items:    make([]Item, 100),\n        Metadata: make(map[string]any, 50),\n    }\n    \n    // Initialize test data\n    for i := range order.Items {\n        order.Items[i] = Item{\n            ProductID: fmt.Sprintf(\"prod-%d\", i),\n            Quantity:  i,\n        }\n    }\n    for i := 0; i \u003C 50; i++ {\n        order.Metadata[fmt.Sprintf(\"key-%d\", i)] = i\n    }\n    \n    b.ResetTimer()\n    for i := 0; i \u003C b.N; i++ {\n        _ = order.Clone()\n    }\n}",{"id":839,"title":840,"titles":841,"content":29,"level":19},"/v1.0.7/guides/cloning#common-pitfalls","Common Pitfalls",[773],{"id":843,"title":844,"titles":845,"content":846,"level":35},"/v1.0.7/guides/cloning#pitfall-1-forgetting-nested-slices","Pitfall 1: Forgetting Nested Slices",[773,840],"type Report struct {\n    Sections []Section\n}\n\ntype Section struct {\n    Title string\n    Data  []byte  // Easy to miss!\n}\n\n❌ **WRONG: Nested slices not cloned**\nfunc (r Report) Clone() Report {\n    sections := make([]Section, len(r.Sections))\n    copy(sections, r.Sections)  // Shallow copy of structs!\n    return Report{Sections: sections}\n}\n\n✅ **RIGHT: Clone nested slices too**\nfunc (r Report) Clone() Report {\n    sections := make([]Section, len(r.Sections))\n    for i, s := range r.Sections {\n        data := make([]byte, len(s.Data))\n        copy(data, s.Data)\n        sections[i] = Section{\n            Title: s.Title,\n            Data:  data,\n        }\n    }\n    return Report{Sections: sections}\n}",{"id":848,"title":849,"titles":850,"content":851,"level":35},"/v1.0.7/guides/cloning#pitfall-2-shared-channel-references","Pitfall 2: Shared Channel References",[773,840],"Channels should typically not be cloned: type Worker struct {\n    ID      string\n    Results chan Result  // Channels are for communication\n}\n\nfunc (w Worker) Clone() Worker {\n    return Worker{\n        ID:      w.ID,\n        Results: w.Results,  // Share the channel - usually correct\n    }\n}",{"id":853,"title":854,"titles":855,"content":856,"level":35},"/v1.0.7/guides/cloning#pitfall-3-time-and-sync-types","Pitfall 3: Time and Sync Types",[773,840],"Some standard library types have special considerations: type Task struct {\n    ID        string\n    StartTime time.Time    // Value type, safe to copy\n    mu        sync.Mutex   // NEVER copy a mutex!\n    data      []byte\n}\n\nfunc (t *Task) Clone() Task {\n    // Note: Returns value, not pointer\n    t.mu.Lock()\n    defer t.mu.Unlock()\n    \n    data := make([]byte, len(t.data))\n    copy(data, t.data)\n    \n    return Task{\n        ID:        t.ID,\n        StartTime: t.StartTime,\n        // mu: zero value (new mutex)\n        data: data,\n    }\n}",{"id":858,"title":445,"titles":859,"content":29,"level":19},"/v1.0.7/guides/cloning#performance-considerations",[773],{"id":861,"title":862,"titles":863,"content":864,"level":35},"/v1.0.7/guides/cloning#memory-allocation","Memory Allocation",[773,445],"Deep cloning allocates new memory. Consider the trade-offs: // Lightweight clone for mostly-immutable data\nfunc (d Document) CloneLightweight() Document {\n    // Only clone what might be modified\n    return Document{\n        ID:       d.ID,\n        Metadata: d.Metadata,  // Share if read-only\n        Content:  d.Content,   // Share large immutable data\n        Tags:     cloneSlice(d.Tags),  // Clone mutable slice\n    }\n}\n\n// Full deep clone for complete isolation\nfunc (d Document) CloneDeep() Document {\n    // Clone everything for total independence\n    content := make([]byte, len(d.Content))\n    copy(content, d.Content)\n    \n    return Document{\n        ID:       d.ID,\n        Metadata: cloneMap(d.Metadata),\n        Content:  content,\n        Tags:     cloneSlice(d.Tags),\n    }\n}",{"id":866,"title":867,"titles":868,"content":869,"level":35},"/v1.0.7/guides/cloning#clone-pools","Clone Pools",[773,445],"For high-frequency cloning, consider object pools: var orderPool = sync.Pool{\n    New: func() any {\n        return &Order{\n            Items:    make([]Item, 0, 10),      // Pre-allocate capacity\n            Metadata: make(map[string]any, 5),\n        }\n    },\n}\n\nfunc (o Order) CloneWithPool() Order {\n    // Get pooled object\n    clone := orderPool.Get().(*Order)\n    \n    // Reset and populate\n    clone.ID = o.ID\n    clone.Items = clone.Items[:0]  // Reuse slice backing\n    clone.Items = append(clone.Items, o.Items...)\n    \n    // Clear and repopulate map\n    for k := range clone.Metadata {\n        delete(clone.Metadata, k)\n    }\n    for k, v := range o.Metadata {\n        clone.Metadata[k] = v\n    }\n    \n    return *clone\n}",{"id":871,"title":872,"titles":873,"content":874,"level":19},"/v1.0.7/guides/cloning#when-clone-errors-occur","When Clone() Errors Occur",[773],"If you see panics or race conditions with concurrent connectors, check: Missing Clone() implementation: Type doesn't implement Cloner interfaceShallow copies: Slices/maps are being shared between goroutinesPointer fields: Pointed-to values are being modified concurrentlyInterface fields: Concrete types in interface{} fields need deep copying",{"id":876,"title":877,"titles":878,"content":879,"level":19},"/v1.0.7/guides/cloning#debugging-clone-issues","Debugging Clone Issues",[773],"Enable race detection during development: # Run tests with race detector\ngo test -race ./...\n\n# Run your application with race detector\ngo run -race main.go Common race detector output indicating clone issues: WARNING: DATA RACE\nWrite at 0x00c000180010 by goroutine 7:\n  main.processOrder()\n      /path/to/file.go:45 +0x64\n\nPrevious write at 0x00c000180010 by goroutine 6:\n  main.processOrder()\n      /path/to/file.go:45 +0x64 This indicates shared memory between goroutines - your Clone() is likely shallow copying.",{"id":881,"title":882,"titles":883,"content":884,"level":19},"/v1.0.7/guides/cloning#best-practices-summary","Best Practices Summary",[773],"Always deep copy reference types (slices, maps, pointers)Test with -race flag during developmentBenchmark Clone() performance for hot pathsDocument sharing decisions when intentionally sharing dataConsider immutability to avoid cloning altogetherUse code generation for complex types (see tools like deepcopy-gen)",{"id":886,"title":887,"titles":888,"content":889,"level":19},"/v1.0.7/guides/cloning#example-production-ready-clone","Example: Production-Ready Clone",[773],"Here's a complete example following all best practices: package main\n\nimport (\n    \"sync\"\n    \"time\"\n)\n\ntype Order struct {\n    // Immutable fields (safe to share)\n    ID        string\n    CreatedAt time.Time\n    \n    // Mutable value fields (copied by value)\n    Status string\n    Total  float64\n    \n    // Reference types (need deep copy)\n    Items      []OrderItem\n    Tags       []string\n    Attributes map[string]string\n    \n    // Pointer fields (decision needed)\n    Customer *Customer\n    \n    // Never copy\n    mu sync.RWMutex\n}\n\nfunc (o Order) Clone() Order {\n    // Deep copy slices\n    items := make([]OrderItem, len(o.Items))\n    for i, item := range o.Items {\n        items[i] = item.Clone()\n    }\n    \n    tags := make([]string, len(o.Tags))\n    copy(tags, o.Tags)\n    \n    // Deep copy map\n    attributes := make(map[string]string, len(o.Attributes))\n    for k, v := range o.Attributes {\n        attributes[k] = v\n    }\n    \n    // Deep copy pointer if needed\n    var customer *Customer\n    if o.Customer != nil {\n        custCopy := o.Customer.Clone()\n        customer = &custCopy\n    }\n    \n    return Order{\n        // Immutable fields\n        ID:        o.ID,\n        CreatedAt: o.CreatedAt,\n        \n        // Value fields\n        Status: o.Status,\n        Total:  o.Total,\n        \n        // Deep copied references\n        Items:      items,\n        Tags:       tags,\n        Attributes: attributes,\n        Customer:   customer,\n        \n        // mu gets zero value (new mutex)\n    }\n}\n\n// Helper for OrderItem\nfunc (i OrderItem) Clone() OrderItem {\n    // Implement based on OrderItem structure\n    return i\n}\n\n// Helper for Customer  \nfunc (c Customer) Clone() Customer {\n    // Implement based on Customer structure\n    return c\n} This implementation ensures complete isolation between concurrent processors while maintaining good performance characteristics. html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}",{"id":891,"title":135,"titles":892,"content":893,"level":9},"/v1.0.7/guides/best-practices",[],"Design principles, patterns, and production-ready guidelines for building robust pipelines with pipz",{"id":895,"title":135,"titles":896,"content":897,"level":9},"/v1.0.7/guides/best-practices#best-practices",[],"Guidelines for building production-ready pipelines with pipz.",{"id":899,"title":900,"titles":901,"content":29,"level":19},"/v1.0.7/guides/best-practices#design-principles","Design Principles",[135],{"id":903,"title":904,"titles":905,"content":906,"level":35},"/v1.0.7/guides/best-practices#_1-single-responsibility","1. Single Responsibility",[135,900],"Each processor should do one thing well: // Define identities as package-level variables\nvar (\n    ValidateEmailID = pipz.NewIdentity(\"validate_email\", \"Validates email format\")\n    NormalizeEmailID = pipz.NewIdentity(\"normalize_email\", \"Normalizes email to lowercase\")\n    ProcessEmailID = pipz.NewIdentity(\"process_email\", \"Processes email (does too much!)\")\n)\n\n// Good: Focused processors\nvalidateEmail := pipz.Apply(ValidateEmailID, func(ctx context.Context, u User) (User, error) {\n    if !isValidEmail(u.Email) {\n        return u, errors.New(\"invalid email format\")\n    }\n    return u, nil\n})\n\nnormalizeEmail := pipz.Transform(NormalizeEmailID, func(ctx context.Context, u User) User {\n    u.Email = strings.ToLower(strings.TrimSpace(u.Email))\n    return u\n})\n\n// Bad: Doing too much\nprocessEmail := pipz.Apply(ProcessEmailID, func(ctx context.Context, u User) (User, error) {\n    // Validating AND normalizing AND checking duplicates\n    u.Email = strings.ToLower(strings.TrimSpace(u.Email))\n    if !isValidEmail(u.Email) {\n        return u, errors.New(\"invalid email\")\n    }\n    if emailExists(u.Email) {\n        return u, errors.New(\"email exists\")\n    }\n    return u, nil\n})",{"id":908,"title":909,"titles":910,"content":911,"level":35},"/v1.0.7/guides/best-practices#_2-identity-driven-design","2. Identity-Driven Design",[135,900],"Store Identity objects as package-level variables. This isn't just good style - it's what makes your pipelines composable and modifiable at runtime.",{"id":913,"title":914,"titles":915,"content":916,"level":917},"/v1.0.7/guides/best-practices#why-identity-variables-are-critical","Why Identity Variables are Critical",[135,900,909],"Identity objects serve as keys that enable dynamic pipeline modification. Because Identity is a comparable struct, you must use the same Identity variable for both creation and lookup: // Define identities as package-level variables\nvar (\n    ValidateOrderID = pipz.NewIdentity(\"validate_order\", \"Validates order structure and totals\")\n    CalculatePriceID = pipz.NewIdentity(\"calculate_price\", \"Calculates order pricing\")\n    ProcessPaymentID = pipz.NewIdentity(\"process_payment\", \"Processes payment transaction\")\n)\n\n// Use the same Identity when creating processors\nvalidateOrder := pipz.Apply(ValidateOrderID, validateFunc)\ncalculatePrice := pipz.Transform(CalculatePriceID, priceFunc)\nprocessPayment := pipz.Apply(ProcessPaymentID, paymentFunc)\n\n// Later, use the same Identity for pipeline modification:\norderPipeline.After(ValidateOrderID, fraudDetection)\norderPipeline.Replace(ProcessPaymentID, testPaymentProcessor)\norderPipeline.Remove(CalculatePriceID) // For free orders Without stored Identity variables, you lose this composability - creating new Identity objects inline makes dynamic modification impossible since they won't match.",4,{"id":919,"title":920,"titles":921,"content":922,"level":917},"/v1.0.7/guides/best-practices#the-pattern","The Pattern",[135,900,909],"// Define all identities as package-level variables - these become your pipeline's \"keys\"\nvar (\n    // Processor identities\n    ValidateOrderID     = pipz.NewIdentity(\"validate_order\", \"Validates order structure and totals\")\n    CheckInventoryID    = pipz.NewIdentity(\"check_inventory\", \"Checks inventory availability\")\n    CalculatePriceID    = pipz.NewIdentity(\"calculate_price\", \"Calculates subtotal, tax, and total\")\n    ApplyDiscountsID    = pipz.NewIdentity(\"apply_discounts\", \"Applies applicable discounts\")\n    ProcessPaymentID    = pipz.NewIdentity(\"process_payment\", \"Processes payment transaction\")\n    UpdateInventoryID   = pipz.NewIdentity(\"update_inventory\", \"Updates inventory levels\")\n    SendConfirmationID  = pipz.NewIdentity(\"send_confirmation\", \"Sends order confirmation\")\n    UpdateCRMID         = pipz.NewIdentity(\"update_crm\", \"Updates CRM system\")\n    MarkPendingID       = pipz.NewIdentity(\"mark_pending\", \"Marks order as pending\")\n    ApplyBulkDiscountID = pipz.NewIdentity(\"apply_bulk_discount\", \"Applies bulk order discount\")\n\n    // Connector identities\n    OrderPipelineID      = pipz.NewIdentity(\"order-pipeline\", \"Complete order processing pipeline\")\n    PaymentFlowID        = pipz.NewIdentity(\"payment-flow\", \"Payment processing with retry\")\n    NotificationsID      = pipz.NewIdentity(\"notification-flow\", \"Notification with timeout\")\n    ExpressCheckoutID    = pipz.NewIdentity(\"express-checkout\", \"Express checkout pipeline\")\n    BulkOrdersID         = pipz.NewIdentity(\"bulk-orders\", \"Bulk order processing pipeline\")\n)\n\n// Use Identity variables when creating processors\nfunc createOrderProcessors() map[pipz.Identity]pipz.Chainable[Order] {\n    return map[pipz.Identity]pipz.Chainable[Order]{\n        ValidateOrderID: pipz.Apply(ValidateOrderID, func(ctx context.Context, o Order) (Order, error) {\n            if o.Total \u003C= 0 {\n                return o, errors.New(\"invalid order total\")\n            }\n            return o, nil\n        }),\n\n        CheckInventoryID: pipz.Apply(CheckInventoryID, func(ctx context.Context, o Order) (Order, error) {\n            for _, item := range o.Items {\n                if !inventory.Has(item.SKU, item.Quantity) {\n                    return o, fmt.Errorf(\"insufficient inventory for %s\", item.SKU)\n                }\n            }\n            return o, nil\n        }),\n\n        CalculatePriceID: pipz.Transform(CalculatePriceID, func(ctx context.Context, o Order) Order {\n            o.Subtotal = calculateSubtotal(o.Items)\n            o.Tax = calculateTax(o.Subtotal, o.ShippingAddress)\n            o.Total = o.Subtotal + o.Tax + o.ShippingCost\n            return o\n        }),\n    }\n}\n\n// Build pipelines using Identity variables\nfunc createOrderPipeline() pipz.Chainable[Order] {\n    seq := pipz.NewSequence[Order](OrderPipelineID)\n    seq.Register(\n        pipz.Apply(ValidateOrderID, validateOrder),\n        pipz.Apply(CheckInventoryID, checkInventory),\n        pipz.Transform(CalculatePriceID, calculatePrice),\n        pipz.Transform(ApplyDiscountsID, applyDiscounts),\n\n        // Payment with retry\n        pipz.NewRetry(PaymentFlowID,\n            pipz.Apply(ProcessPaymentID, processPayment),\n            3,\n        ),\n\n        pipz.Apply(UpdateInventoryID, updateInventory),\n\n        // Notifications with timeout\n        pipz.NewTimeout(NotificationsID,\n            pipz.Effect(SendConfirmationID, sendConfirmation),\n            5*time.Second,\n        ),\n    )\n    return seq\n}\n\n// Example: Dynamic pipeline composition\nfunc createCustomPipeline(config Config) pipz.Chainable[Order] {\n    // For dynamic names, create Identity inline when name is runtime-determined\n    customPipelineID := pipz.NewIdentity(config.Name, \"Custom pipeline based on config\")\n    seq := pipz.NewSequence[Order](customPipelineID)\n\n    // Always validate - using the shared Identity variable\n    seq.Register(pipz.Apply(ValidateOrderID, validateOrder))\n\n    // Conditionally add processors\n    if config.CheckInventory {\n        seq.Register(pipz.Apply(CheckInventoryID, checkInventory))\n    }\n\n    seq.Register(pipz.Transform(CalculatePriceID, calculatePrice))\n\n    if config.DiscountsEnabled {\n        seq.Register(pipz.Transform(ApplyDiscountsID, applyDiscounts))\n    }\n\n    return seq\n} Benefits of Identity-driven design: Composability: Same Identity enables pipeline modification (After, Before, Remove, Replace)No Magic Strings: Variables prevent typos and runtime errorsIDE Support: Auto-completion and refactoring work perfectlyDebugging: Error messages show meaningful processor names with descriptionsSearchability: Easy to find all usages of a processorSelf-Documenting: Identity descriptions explain what each processor does // Example: Error messages are more helpful with Identity names\nresult, err := pipeline.Process(ctx, order)\nif err != nil {\n    // Error: \"failed at [order-pipeline -> validate_order]: invalid order total\"\n    // Much clearer than: \"failed at [pipeline1 -> proc1]: invalid order total\"\n}\n\n// Example: Easy to modify pipelines using Identity variables\nfunc addFraudCheck(seq *pipz.Sequence[Order]) {\n    // Use the same ValidateOrderID variable for lookup\n    var FraudCheckID = pipz.NewIdentity(\"fraud_check\", \"Checks for fraudulent orders\")\n    fraudCheck := pipz.Apply(FraudCheckID, checkFraud)\n    seq.After(ValidateOrderID, fraudCheck) // Insert after validation\n}",{"id":924,"title":925,"titles":926,"content":927,"level":35},"/v1.0.7/guides/best-practices#_3-explicit-error-handling","3. Explicit Error Handling",[135,900],"Make error handling visible and intentional: // Define identities as package-level variables\nvar (\n    PaymentFlowID     = pipz.NewIdentity(\"payment-flow\", \"Payment processing with error handling\")\n    ValidatePaymentID = pipz.NewIdentity(\"validate\", \"Validates payment details\")\n    ChargeCardID      = pipz.NewIdentity(\"charge\", \"Charges payment card\")\n    NotifyHandlerID   = pipz.NewIdentity(\"notify-with-logging\", \"Notification with error logging\")\n    NotifyID          = pipz.NewIdentity(\"notify\", \"Sends notification\")\n    LogNotifyErrorID  = pipz.NewIdentity(\"log_notification_error\", \"Logs notification errors\")\n    BadProcessID      = pipz.NewIdentity(\"process\", \"Processes payment (too much in one place)\")\n)\n\n// Good: Clear error strategy with Identity variables\npayment := pipz.NewSequence(PaymentFlowID,\n    pipz.Apply(ValidatePaymentID, validatePayment),\n\n    // Critical operation with retry\n    pipz.RetryWithBackoff(\n        pipz.Apply(ChargeCardID, chargeCard),\n        3,\n        time.Second,\n    ),\n\n    // Non-critical with error handler\n    pipz.NewHandle(NotifyHandlerID,\n        pipz.Effect(NotifyID, sendNotification),\n        pipz.Effect(LogNotifyErrorID, func(ctx context.Context, err *pipz.Error[Order]) error {\n            log.Printf(\"Notification failed for order %s: %v\", err.InputData.ID, err.Err)\n            return nil\n        }),\n    ),\n)\n\n// Bad: Hidden error handling\npayment := pipz.Apply(BadProcessID, func(ctx context.Context, p Payment) (Payment, error) {\n    // Validation, charging, and notification all mixed together\n    // Error handling buried in function\n})",{"id":929,"title":930,"titles":931,"content":932,"level":35},"/v1.0.7/guides/best-practices#_4-type-safe-routes","4. Type-Safe Routes",[135,900],"Use custom types for routing decisions: // Good: Type-safe enum\ntype OrderPriority int\nconst (\n    PriorityStandard OrderPriority = iota\n    PriorityExpress\n    PriorityOvernight\n)\n\nrouter := pipz.Switch(\n    func(ctx context.Context, o Order) OrderPriority {\n        if o.ShippingMethod == \"overnight\" {\n            return PriorityOvernight\n        }\n        // ...\n    },\n    map[OrderPriority]pipz.Chainable[Order]{\n        PriorityStandard:  standardFulfillment,\n        PriorityExpress:   expressFulfillment,\n        PriorityOvernight: overnightFulfillment,\n    },\n)\n\n// Bad: Magic strings\nrouter := pipz.Switch(\n    func(ctx context.Context, o Order) string {\n        return o.ShippingMethod // \"standard\", \"express\", etc.\n    },\n    map[string]pipz.Chainable[Order]{\n        \"standard\": standardFulfillment,\n        \"express\":  expressFulfillment,\n        // Easy to typo, no compile-time checking\n    },\n)",{"id":934,"title":935,"titles":936,"content":29,"level":19},"/v1.0.7/guides/best-practices#pipeline-patterns","Pipeline Patterns",[135],{"id":938,"title":939,"titles":940,"content":941,"level":35},"/v1.0.7/guides/best-practices#pattern-concurrent-vs-sequential","Pattern: Concurrent vs Sequential",[135,935],"Understanding when to use Concurrent versus Sequential is critical for performance:",{"id":943,"title":944,"titles":945,"content":946,"level":917},"/v1.0.7/guides/best-practices#use-concurrent-for-io-operations","Use Concurrent for I/O Operations",[135,935,939],"Concurrent is designed for operations with real latency where parallel execution provides benefit: // Define identities as package-level variables\nvar (\n    SendNotificationsID = pipz.NewIdentity(\"send-notifications\", \"Sends notifications in parallel\")\n    EnrichDataID        = pipz.NewIdentity(\"enrich-data\", \"Fetches enrichment data in parallel\")\n)\n\n// GOOD: Parallel API calls with actual network latency\nnotifications := pipz.NewConcurrent(SendNotificationsID,\n    nil, // No reducer needed for side effects\n    sendEmailNotification,      // API call to email service\n    sendSMSNotification,       // API call to SMS gateway\n    updateCRMSystem,           // API call to CRM\n    pushToAnalytics,           // API call to analytics service\n)\n\n// GOOD: Multiple database queries that don't depend on each other\nenrichment := pipz.NewConcurrent(EnrichDataID,\n    nil, // No reducer needed\n    fetchUserProfile,          // Database query\n    fetchAccountHistory,       // Database query\n    fetchPreferences,          // Database query\n)",{"id":948,"title":949,"titles":950,"content":951,"level":917},"/v1.0.7/guides/best-practices#use-sequential-for-fast-operations","Use Sequential for Fast Operations",[135,935,939],"Sequential is better for CPU-bound operations or fast validations: // Define identities as package-level variables\nvar ValidationsID = pipz.NewIdentity(\"validate\", \"Validates user data\")\n\n// BAD: Using Concurrent for simple validations\nvalidations := pipz.NewConcurrent(ValidationsID, nil,  // ❌ Don't do this!\n    validateEmail,     // Simple regex check\n    validateAge,       // Number comparison\n    validateCountry,   // String check\n)\n\n// GOOD: Sequential for fast operations\nvalidations := pipz.NewSequence(ValidationsID,     // ✅ Better!\n    validateEmail,\n    validateAge,\n    validateCountry,\n)",{"id":953,"title":954,"titles":955,"content":956,"level":917},"/v1.0.7/guides/best-practices#performance-implications","Performance Implications",[135,935,939],"Concurrent creates copies of your data (using the Cloner interface) for goroutine isolation: // This happens internally in Concurrent:\n// 1. data.Clone() called for each processor\n// 2. Goroutine spawned for each\n// 3. Original input returned (modifications discarded)\n\n// For simple operations, the overhead of:\n// - Cloning data\n// - Spawning goroutines\n// - Channel communication\n// ...exceeds the benefit of parallelism Rule of thumb: If an operation takes less than 10ms, use Sequential.",{"id":958,"title":959,"titles":960,"content":961,"level":35},"/v1.0.7/guides/best-practices#pattern-validation-first","Pattern: Validation First",[135,935],"Always validate early to fail fast: // Define identities as package-level variables\nvar (\n    ValidationPipelineID   = pipz.NewIdentity(\"validation-pipeline\", \"Validation-first processing\")\n    ValidateStructureID    = pipz.NewIdentity(\"validate_structure\", \"Validates data structure\")\n    ValidateBusinessRulesID = pipz.NewIdentity(\"validate_business_rules\", \"Validates business rules\")\n    EnrichFromAPIID        = pipz.NewIdentity(\"enrich_from_api\", \"Enriches data from API\")\n    CalculatePricingID     = pipz.NewIdentity(\"calculate_pricing\", \"Calculates pricing\")\n    SaveToDatabaseID       = pipz.NewIdentity(\"save_to_database\", \"Saves to database\")\n)\n\npipeline := pipz.NewSequence(ValidationPipelineID,\n    // Validate first - cheap and catches errors early\n    pipz.Apply(ValidateStructureID, validateStructure),\n    pipz.Apply(ValidateBusinessRulesID, validateBusinessRules),\n\n    // Then expensive operations\n    pipz.Apply(EnrichFromAPIID, enrichFromAPI),\n    pipz.Apply(CalculatePricingID, calculatePricing),\n    pipz.Apply(SaveToDatabaseID, saveToDatabase),\n)",{"id":963,"title":964,"titles":965,"content":966,"level":35},"/v1.0.7/guides/best-practices#pattern-graceful-degradation","Pattern: Graceful Degradation",[135,935],"Continue processing even when non-critical operations fail: // Define identities as package-level variables\nvar (\n    ProcessingPipelineID = pipz.NewIdentity(\"processing-pipeline\", \"Processing with graceful degradation\")\n    ValidateID           = pipz.NewIdentity(\"validate\", \"Validates input\")\n    ProcessID            = pipz.NewIdentity(\"process\", \"Processes data\")\n    SaveID               = pipz.NewIdentity(\"save\", \"Saves data\")\n    AddRecommendationsID = pipz.NewIdentity(\"add_recommendations\", \"Adds product recommendations\")\n    NotificationsID      = pipz.NewIdentity(\"notifications\", \"Sends notifications in parallel\")\n    EmailID              = pipz.NewIdentity(\"email\", \"Sends email\")\n    LogEmailErrorID      = pipz.NewIdentity(\"log_email_error\", \"Logs email errors\")\n    AnalyticsID          = pipz.NewIdentity(\"analytics\", \"Tracks analytics\")\n    EmailHandlerID       = pipz.NewIdentity(\"email-handler\", \"Email with error handling\")\n)\n\npipeline := pipz.NewSequence(ProcessingPipelineID,\n    // Critical path\n    pipz.Apply(ValidateID, validate),\n    pipz.Apply(ProcessID, process),\n    pipz.Apply(SaveID, save),\n\n    // Best-effort enrichments\n    pipz.Enrich(AddRecommendationsID, func(ctx context.Context, order Order) (Order, error) {\n        recs, err := recommendationService.Get(ctx, order.UserID)\n        if err != nil {\n            return order, err // Enrich will ignore this\n        }\n        order.Recommendations = recs\n        return order, nil\n    }),\n\n    // Non-blocking notifications\n    pipz.NewConcurrent(NotificationsID, nil,\n        pipz.NewHandle(EmailHandlerID,\n            pipz.Effect(EmailID, sendEmail),\n            pipz.Effect(LogEmailErrorID, logError),\n        ),\n        pipz.Effect(AnalyticsID, trackAnalytics),\n    ),\n)",{"id":968,"title":969,"titles":970,"content":971,"level":35},"/v1.0.7/guides/best-practices#pattern-bulkhead-isolation","Pattern: Bulkhead Isolation",[135,935],"Isolate failures to prevent cascade. The key insight is that bulkheads are simple to connect when operating on the same type: // Define identities as package-level variables\nvar (\n    OrderPipelineID        = pipz.NewIdentity(\"order-pipeline\", \"Core order processing\")\n    NotificationTimeoutID  = pipz.NewIdentity(\"notification-timeout\", \"Timeout for notifications\")\n    NotificationConcurrentID = pipz.NewIdentity(\"notifications\", \"Parallel notifications\")\n    FullPipelineID         = pipz.NewIdentity(\"full-pipeline\", \"Full order processing with notifications\")\n    LogNotifyFailureID     = pipz.NewIdentity(\"log_notification_failure\", \"Logs notification failures\")\n    NotificationHandlerID  = pipz.NewIdentity(\"notification-handler\", \"Handles notification errors\")\n)\n\n// Separate pipelines for different concerns\norderPipeline := pipz.NewSequence(OrderPipelineID,\n    validateOrder,\n    processPayment,\n    updateInventory,\n)\n\n// Isolated notification pipeline\nnotificationPipeline := pipz.NewTimeout(NotificationTimeoutID,\n    pipz.NewConcurrent(NotificationConcurrentID, nil,\n        sendEmail,\n        sendSMS,\n        updateCRM,\n    ),\n    5*time.Second, // Don't let notifications block orders\n)\n\n// Compose with isolation - easy because both operate on Order type\nfullPipeline := pipz.NewSequence(FullPipelineID,\n    orderPipeline,\n    pipz.NewHandle(NotificationHandlerID,\n        notificationPipeline,\n        pipz.Effect(LogNotifyFailureID, logError),\n    ),\n)",{"id":973,"title":974,"titles":975,"content":976,"level":35},"/v1.0.7/guides/best-practices#pattern-orchestration-pipeline","Pattern: Orchestration Pipeline",[135,935],"The ultimate pattern is to build workflows as individual pipelines for discrete types, then create an orchestration pipeline that coordinates them: // Step 1: Define all identities as package-level variables\nvar (\n    // Domain pipeline identities\n    OrderWorkflowID    = pipz.NewIdentity(\"order-workflow\", \"Order validation and pricing\")\n    CustomerWorkflowID = pipz.NewIdentity(\"customer-workflow\", \"Customer validation and credit check\")\n    InvoiceWorkflowID  = pipz.NewIdentity(\"invoice-workflow\", \"Invoice generation and formatting\")\n\n    // Orchestrator identities\n    OrchestratorID           = pipz.NewIdentity(\"orchestrator\", \"Orchestrates order workflow processing\")\n    ProcessOrderID           = pipz.NewIdentity(\"process_order\", \"Processes order through order pipeline\")\n    ProcessCustomerInvoiceID = pipz.NewIdentity(\"process_customer_invoice\", \"Processes customer and invoice in parallel\")\n    FinalizeID               = pipz.NewIdentity(\"finalize\", \"Final coordination and confirmation\")\n)\n\n// Step 2: Build discrete pipelines for each type\ntype OrderWorkflow struct {\n    Order    Order\n    Customer Customer\n    Invoice  Invoice\n    Config   WorkflowConfig\n}\n\n// Individual pipelines for each domain object\norderPipeline := pipz.NewSequence[Order](OrderWorkflowID,\n    validateOrder,\n    calculatePricing,\n    applyDiscounts,\n)\n\ncustomerPipeline := pipz.NewSequence[Customer](CustomerWorkflowID,\n    validateCustomer,\n    checkCreditLimit,\n    updateLoyaltyPoints,\n)\n\ninvoicePipeline := pipz.NewSequence[Invoice](InvoiceWorkflowID,\n    generateInvoice,\n    applyTaxes,\n    formatForDisplay,\n)\n\n// Step 3: Create orchestration pipeline that coordinates execution\norchestrator := pipz.NewSequence[OrderWorkflow](OrchestratorID,\n    // Process order first\n    pipz.Apply(ProcessOrderID, func(ctx context.Context, wf OrderWorkflow) (OrderWorkflow, error) {\n        processed, err := orderPipeline.Process(ctx, wf.Order)\n        if err != nil {\n            return wf, fmt.Errorf(\"order processing failed: %w\", err)\n        }\n        wf.Order = processed\n        return wf, nil\n    }),\n\n    // Process customer in parallel with invoice if order succeeded\n    pipz.Apply(ProcessCustomerInvoiceID, func(ctx context.Context, wf OrderWorkflow) (OrderWorkflow, error) {\n        var wg sync.WaitGroup\n        var custErr, invErr error\n\n        wg.Add(2)\n\n        // Process customer\n        go func() {\n            defer wg.Done()\n            processed, err := customerPipeline.Process(ctx, wf.Customer)\n            if err != nil {\n                custErr = err\n                return\n            }\n            wf.Customer = processed\n        }()\n\n        // Process invoice\n        go func() {\n            defer wg.Done()\n            // Invoice needs order data\n            wf.Invoice.OrderID = wf.Order.ID\n            wf.Invoice.Amount = wf.Order.Total\n\n            processed, err := invoicePipeline.Process(ctx, wf.Invoice)\n            if err != nil {\n                invErr = err\n                return\n            }\n            wf.Invoice = processed\n        }()\n\n        wg.Wait()\n\n        // Handle errors based on config\n        if custErr != nil && wf.Config.CustomerRequired {\n            return wf, fmt.Errorf(\"customer processing failed: %w\", custErr)\n        }\n        if invErr != nil && wf.Config.InvoiceRequired {\n            return wf, fmt.Errorf(\"invoice processing failed: %w\", invErr)\n        }\n\n        return wf, nil\n    }),\n\n    // Final coordination\n    pipz.Apply(FinalizeID, func(ctx context.Context, wf OrderWorkflow) (OrderWorkflow, error) {\n        // Any final coordination logic\n        if wf.Config.SendConfirmation {\n            // Send order confirmation with all processed data\n            sendOrderConfirmation(wf.Order, wf.Customer, wf.Invoice)\n        }\n        return wf, nil\n    }),\n)\n\n// Usage\nworkflow := OrderWorkflow{\n    Order:    order,\n    Customer: customer,\n    Invoice:  Invoice{},\n    Config: WorkflowConfig{\n        CustomerRequired: true,\n        InvoiceRequired:  false,\n        SendConfirmation: true,\n    },\n}\n\nresult, err := orchestrator.Process(ctx, workflow) This pattern provides: Type Safety: Each pipeline operates on its specific typeReusability: Individual pipelines can be reused in different workflowsTestability: Each pipeline can be tested independentlyFlexibility: Orchestration logic can change without modifying domain pipelinesClear Dependencies: The orchestrator explicitly manages data flow between pipelines",{"id":978,"title":979,"titles":980,"content":981,"level":35},"/v1.0.7/guides/best-practices#pattern-feature-flags","Pattern: Feature Flags",[135,935],"Support gradual rollouts and A/B testing: // Define identities as package-level variables\nvar FeatureFlagPipelineID = pipz.NewIdentity(\"feature-flag-pipeline\", \"Pipeline with feature flags\")\n\nfunc createPipeline(features FeatureFlags) pipz.Chainable[Order] {\n    seq := pipz.NewSequence[Order](FeatureFlagPipelineID)\n\n    // Core processors always included\n    seq.Register(validateOrder, calculatePricing)\n\n    // Conditionally add processors\n    if features.IsEnabled(\"fraud-detection-v2\") {\n        seq.Register(fraudDetectionV2)\n    } else {\n        seq.Register(fraudDetectionV1)\n    }\n\n    if features.IsEnabled(\"loyalty-points\") {\n        seq.Register(calculateLoyaltyPoints)\n    }\n\n    seq.Register(chargePayment, fulfillOrder)\n    return seq\n}",{"id":983,"title":984,"titles":985,"content":29,"level":19},"/v1.0.7/guides/best-practices#error-handling-strategies","Error Handling Strategies",[135],{"id":987,"title":988,"titles":989,"content":990,"level":35},"/v1.0.7/guides/best-practices#error-propagation-pattern","Error Propagation Pattern",[135,984],"Understanding how errors flow through your pipelines is critical for proper handling: ┌──────────────────────────────────────────────────────────────────┐\n│                    Error Propagation in Pipelines                │\n└──────────────────────────────────────────────────────────────────┘\n\nSequential Pipeline:\n═══════════════════\nPipeline A\n    ├─→ Processor 1 [✓] Success\n    │       ↓\n    ├─→ Processor 2 [✗] Failure ──→ Error[T]{Stage: \"proc2\", Cause: err}\n    │       ↓                            ↓\n    │    Pipeline Stops                  ▼\n    │                              Returns to caller\n    └─→ Processor 3 [⋯] Never runs\n\nNested Pipeline:\n════════════════\nMain Pipeline\n    ├─→ Validate [✓]\n    │       ↓\n    ├─→ Sub-Pipeline ──┐\n    │   ├─→ Step A [✓] │\n    │   ├─→ Step B [✗]─┼──→ Error wrapped with sub-pipeline context\n    │   └─→ Step C [⋯] │         ↓\n    │                  │         ▼\n    │                  └──→ Error[T]{Stage: \"sub-pipeline.step-b\"}\n    │                            ↓\n    └─→ Save [⋯] Never runs      ▼\n                           Main Pipeline Stops\n\nConcurrent Pipeline:\n════════════════════\nConcurrent Connector\n    ├─→ Service A [✓] Completes\n    ├─→ Service B [✗] Fails ──→ Error logged but doesn't stop others\n    ├─→ Service C [✓] Completes\n    └─→ Returns original input (errors don't propagate up)\n\nWith Error Handler:\n════════════════════\nPipeline with Recovery\n    ├─→ Process [✗] ──→ Error[T]\n    │       ↓               ↓\n    │   Fallback ←──────────┘\n    │       ↓\n    └─→ Continue [✓] Pipeline continues with fallback result Key Insights: Sequential stops on first error - Subsequent processors never runErrors carry context - Stage name, cause, and data state at failureConcurrent doesn't propagate errors - Failures are isolatedFallback enables recovery - Convert errors back to success path",{"id":992,"title":993,"titles":994,"content":995,"level":35},"/v1.0.7/guides/best-practices#strategy-categorize-and-route","Strategy: Categorize and Route",[135,984],"type ErrorCategory string\n\nconst (\n    CategoryValidation ErrorCategory = \"validation\"\n    CategoryTransient  ErrorCategory = \"transient\"\n    CategoryBusiness   ErrorCategory = \"business\"\n    CategorySystem     ErrorCategory = \"system\"\n)\n\nfunc handleError(ctx context.Context, failure FailedOrder) (FailedOrder, error) {\n    category := categorizeError(failure.Error)\n    \n    switch category {\n    case CategoryValidation:\n        // Don't retry bad input\n        return sendToDeadLetter(ctx, failure)\n        \n    case CategoryTransient:\n        // Retry with backoff\n        return pipz.RetryWithBackoff(\n            reprocessOrder,\n            5,\n            time.Second,\n        ).Process(ctx, failure)\n        \n    case CategoryBusiness:\n        // Needs human intervention\n        return sendToManualReview(ctx, failure)\n        \n    case CategorySystem:\n        // Alert ops team\n        alertOps(failure.Error)\n        return sendToDeadLetter(ctx, failure)\n    }\n    \n    return failure, failure.Error\n}",{"id":997,"title":998,"titles":999,"content":29,"level":35},"/v1.0.7/guides/best-practices#strategy-rate-limiting-and-circuit-breaking","Strategy: Rate Limiting and Circuit Breaking",[135,984],{"id":1001,"title":1002,"titles":1003,"content":1004,"level":917},"/v1.0.7/guides/best-practices#rate-limiting-best-practices","Rate Limiting Best Practices",[135,984,998],"Use RateLimiter to protect downstream services and respect API limits: // Define identities as package-level variables\nvar (\n    GlobalLimiterID   = pipz.NewIdentity(\"global-limit\", \"Global system rate limit\")\n    ServiceLimiterID  = pipz.NewIdentity(\"stripe-limit\", \"Stripe API rate limit\")\n    EndpointLimiterID = pipz.NewIdentity(\"charges-limit\", \"Charges endpoint rate limit\")\n    PaymentPipelineID = pipz.NewIdentity(\"payment-pipeline\", \"Payment processing pipeline\")\n    ChargeID          = pipz.NewIdentity(\"charge\", \"Processes payment charge\")\n)\n\n// Pattern: Layer rate limits (global -> service -> endpoint)\nglobalLimiter := pipz.NewRateLimiter(GlobalLimiterID, 10000, 1000)\nserviceLimiter := pipz.NewRateLimiter(ServiceLimiterID, 100, 20)  // Stripe's actual limits\nendpointLimiter := pipz.NewRateLimiter(EndpointLimiterID, 50, 10)\n\npipeline := pipz.NewSequence(PaymentPipelineID,\n    globalLimiter,      // Global system limit\n    serviceLimiter,     // Per-service limit\n    endpointLimiter,    // Per-endpoint limit\n    pipz.Apply(ChargeID, makePayment),\n)\n\n// Pattern: Dynamic rate adjustment based on conditions\nfunc adjustRateLimit(limiter *pipz.RateLimiter[Request], config Config) {\n    if config.OffPeakHours {\n        limiter.SetRate(1000)  // Higher rate during off-peak\n    } else {\n        limiter.SetRate(100)   // Lower rate during peak\n    }\n\n    if config.PremiumTier {\n        limiter.SetMode(\"wait\")  // Wait for premium users\n    } else {\n        limiter.SetMode(\"drop\")  // Fail fast for basic users\n    }\n}\n\n// Pattern: Per-user rate limiting with Switch\n// Define identities as package-level variables\nvar (\n    UserRateLimitID   = pipz.NewIdentity(\"user-rate-limit\", \"Routes by user tier\")\n    PremiumRateID     = pipz.NewIdentity(\"premium-rate\", \"Premium tier rate limit\")\n    StandardRateID    = pipz.NewIdentity(\"standard-rate\", \"Standard tier rate limit\")\n    FreeRateID        = pipz.NewIdentity(\"free-rate\", \"Free tier rate limit\")\n)\n\nuserLimiter := pipz.NewSwitch(UserRateLimitID,\n    func(ctx context.Context, req Request) string {\n        return getUserTier(req.UserID)\n    },\n).\nAddRoute(\"premium\", pipz.NewRateLimiter(PremiumRateID, 1000, 100)).\nAddRoute(\"standard\", pipz.NewRateLimiter(StandardRateID, 100, 10)).\nAddRoute(\"free\", pipz.NewRateLimiter(FreeRateID, 10, 1))",{"id":1006,"title":1007,"titles":1008,"content":1009,"level":917},"/v1.0.7/guides/best-practices#circuit-breaker-best-practices","Circuit Breaker Best Practices",[135,984,998],"Use CircuitBreaker to prevent cascade failures and give services time to recover: // Define identities as package-level variables\nvar (\n    StripeBreakerID = pipz.NewIdentity(\"stripe-breaker\", \"Circuit breaker for Stripe API\")\n    StripeChargeID  = pipz.NewIdentity(\"stripe-charge\", \"Charges via Stripe\")\n    DBBreakerID     = pipz.NewIdentity(\"db-breaker\", \"Circuit breaker for database\")\n    DBSaveID        = pipz.NewIdentity(\"db-save\", \"Saves to database\")\n)\n\n// Pattern: Circuit breaker per external dependency\nstripeBreaker := pipz.NewCircuitBreaker(StripeBreakerID,\n    pipz.Apply(StripeChargeID, chargeStripe),\n    5,                    // Open after 5 failures\n    30*time.Second,       // Try recovery after 30s\n)\n\ndbBreaker := pipz.NewCircuitBreaker(DBBreakerID,\n    pipz.Apply(DBSaveID, saveToDatabase),\n    10,                   // More tolerant for internal services\n    time.Minute,          // Longer recovery time\n)\n\n// Pattern: Combine with rate limiting for comprehensive protection\n// Define identities as package-level variables\nvar (\n    ResilientAPIID = pipz.NewIdentity(\"resilient-api\", \"Resilient API pipeline\")\n    APIRateID      = pipz.NewIdentity(\"api-rate\", \"API rate limiter\")\n    APIBreakerID   = pipz.NewIdentity(\"api-breaker\", \"API circuit breaker\")\n    APIRetryID     = pipz.NewIdentity(\"api-retry\", \"API retry handler\")\n    APICallID      = pipz.NewIdentity(\"api-call\", \"External API call\")\n)\n\nresilientAPI := pipz.NewSequence(ResilientAPIID,\n    pipz.NewRateLimiter(APIRateID, 100, 20),  // Rate limit first\n    pipz.NewCircuitBreaker(APIBreakerID,       // Then circuit break\n        pipz.NewRetry(APIRetryID,              // With retry inside\n            pipz.Apply(APICallID, callExternalAPI),\n            3,\n        ),\n        5, 30*time.Second,\n    ),\n)\n\n// Pattern: Graduated thresholds based on service type\nfunc createCircuitBreaker(service string, serviceType ServiceType, processor pipz.Chainable[Request]) *pipz.CircuitBreaker[Request] {\n    // Dynamic Identity when service name is determined at runtime\n    breakerID := pipz.NewIdentity(service+\"-breaker\", \"Circuit breaker for \"+service)\n    switch serviceType {\n    case ServiceTypeExternal:\n        // External services: fail fast, longer recovery\n        return pipz.NewCircuitBreaker(breakerID, processor, 3, 2*time.Minute)\n    case ServiceTypeInternal:\n        // Internal services: more tolerant, shorter recovery\n        return pipz.NewCircuitBreaker(breakerID, processor, 10, 30*time.Second)\n    case ServiceTypeCritical:\n        // Critical services: very tolerant, quick recovery attempts\n        return pipz.NewCircuitBreaker(breakerID, processor, 20, 10*time.Second)\n    default:\n        return pipz.NewCircuitBreaker(breakerID, processor, 5, time.Minute)\n    }\n}\n\n// Pattern: Monitor and adjust circuit breakers\nfunc monitorCircuits(breakers map[string]*pipz.CircuitBreaker[Request]) {\n    ticker := time.NewTicker(30 * time.Second)\n    defer ticker.Stop()\n    \n    for range ticker.C {\n        for name, breaker := range breakers {\n            state := breaker.GetState()\n            metrics.Gauge(\"circuit.state\", stateToValue(state), \"service\", name)\n            \n            switch state {\n            case \"open\":\n                log.Warn(\"Circuit breaker open\", \"service\", name)\n                // Consider alerting operations team\n            case \"half-open\":\n                log.Info(\"Circuit breaker testing recovery\", \"service\", name)\n            }\n        }\n    }\n}",{"id":1011,"title":1012,"titles":1013,"content":1014,"level":917},"/v1.0.7/guides/best-practices#combined-resilience-patterns","Combined Resilience Patterns",[135,984,998],"// Pattern: Complete resilience stack\nfunc createResilientProcessor(name string, processor pipz.Chainable[Request]) pipz.Chainable[Request] {\n    // Create dynamic identities when name is determined at runtime\n    resilientID := pipz.NewIdentity(name+\"-resilient\", \"Resilient wrapper for \"+name)\n    rateID := pipz.NewIdentity(name+\"-rate\", \"Rate limiter for \"+name)\n    timeoutID := pipz.NewIdentity(name+\"-timeout\", \"Timeout for \"+name)\n    breakerID := pipz.NewIdentity(name+\"-breaker\", \"Circuit breaker for \"+name)\n    retryID := pipz.NewIdentity(name+\"-retry\", \"Retry handler for \"+name)\n\n    return pipz.NewSequence(resilientID,\n        // 1. Rate limiting (protect downstream)\n        pipz.NewRateLimiter(rateID, 100, 20),\n\n        // 2. Timeout (bound operation time)\n        pipz.NewTimeout(timeoutID,\n\n            // 3. Circuit breaker (prevent cascade failures)\n            pipz.NewCircuitBreaker(breakerID,\n\n                // 4. Retry (handle transient failures)\n                pipz.NewRetry(retryID, processor, 3),\n\n                5, 30*time.Second,\n            ),\n            10*time.Second,\n        ),\n    )\n}\n\n// Pattern: Service mesh style protection\n// Define identities as package-level variables\nvar (\n    ServiceMeshID     = pipz.NewIdentity(\"service-mesh\", \"Service mesh with fallback\")\n    FallbackBreakerID = pipz.NewIdentity(\"fallback-breaker\", \"Fallback service circuit breaker\")\n    FallbackTimeoutID = pipz.NewIdentity(\"fallback-timeout\", \"Fallback service timeout\")\n)\n\nserviceCall := pipz.NewFallback(ServiceMeshID,\n    // Primary service with full protection\n    createResilientProcessor(\"primary\", primaryService),\n\n    // Fallback service with lighter protection\n    pipz.NewCircuitBreaker(FallbackBreakerID,\n        pipz.NewTimeout(FallbackTimeoutID, fallbackService, 5*time.Second),\n        3, 10*time.Second,\n    ),\n)",{"id":1016,"title":1017,"titles":1018,"content":1019,"level":917},"/v1.0.7/guides/best-practices#configuration-best-practices","Configuration Best Practices",[135,984,998],"// Pattern: Configuration-driven circuit breaker settings\ntype CircuitConfig struct {\n    FailureThreshold int           `yaml:\"failure_threshold\"`\n    SuccessThreshold int           `yaml:\"success_threshold\"`\n    ResetTimeout     time.Duration `yaml:\"reset_timeout\"`\n}\n\ntype RateConfig struct {\n    Rate float64 `yaml:\"rate\"`\n    Burst int    `yaml:\"burst\"`\n    Mode string  `yaml:\"mode\"`\n}\n\nfunc configureConnectors(breaker *pipz.CircuitBreaker[Request], limiter *pipz.RateLimiter[Request], cfg Config) {\n    // Circuit breaker configuration\n    breaker.SetFailureThreshold(cfg.Circuit.FailureThreshold).\n            SetSuccessThreshold(cfg.Circuit.SuccessThreshold).\n            SetResetTimeout(cfg.Circuit.ResetTimeout)\n    \n    // Rate limiter configuration\n    limiter.SetRate(cfg.Rate.Rate).\n            SetBurst(cfg.Rate.Burst).\n            SetMode(cfg.Rate.Mode)\n}",{"id":1021,"title":1022,"titles":1023,"content":29,"level":19},"/v1.0.7/guides/best-practices#production-checklist","Production Checklist",[135],{"id":1025,"title":1026,"titles":1027,"content":1028,"level":35},"/v1.0.7/guides/best-practices#design","Design",[135,1022],"Each processor has single responsibility Error handling is explicit and appropriate Uses type-safe routing (no magic strings) Validates input early Non-critical operations don't block critical path",{"id":1030,"title":284,"titles":1031,"content":1032,"level":35},"/v1.0.7/guides/best-practices#resilience",[135,1022],"Timeouts on all external calls Retry logic for transient failures Circuit breakers for external dependencies Rate limiting to protect downstream services Graduated failure thresholds based on service type Graceful degradation for features Bulkhead isolation between components Combined resilience patterns (rate limit + circuit break + retry)",{"id":1034,"title":1035,"titles":1036,"content":1037,"level":35},"/v1.0.7/guides/best-practices#observability","Observability",[135,1022],"Metrics on all processors Distributed tracing enabled Structured logging throughout Error categorization and alerting Performance benchmarks",{"id":1039,"title":1040,"titles":1041,"content":1042,"level":35},"/v1.0.7/guides/best-practices#operations","Operations",[135,1022],"Feature flags for gradual rollout Dead letter queue for failed items Manual intervention workflow Runbooks for common issues Load testing completed",{"id":1044,"title":1045,"titles":1046,"content":1047,"level":19},"/v1.0.7/guides/best-practices#anti-patterns-to-avoid","Anti-Patterns to Avoid",[135],"God Processor: One processor doing everythingSilent Failures: Swallowing errors without loggingUnbounded Retries: Retrying forever without backoffMissing Timeouts: No time bounds on operationsShared Mutable State: Processors modifying shared dataMagic Strings: Using strings instead of typed constantsHidden Dependencies: Processors with side effects on external stateCircular Fallbacks: Creating recursive fallback chains that can cause stack overflow",{"id":1049,"title":140,"titles":1050,"content":1051,"level":19},"/v1.0.7/guides/best-practices#next-steps",[135],"Performance Guide - Optimize for productionError Recovery - Advanced error patterns html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sfm-E, html code.shiki .sfm-E{--shiki-default:var(--shiki-variable)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":1053,"title":1054,"titles":1055,"content":1056,"level":9},"/v1.0.7/guides/testing","Testing pipz Pipelines",[],"Comprehensive testing guide including MockProcessor, ChaosProcessor, assertions, and three-tier testing strategy",{"id":1058,"title":1054,"titles":1059,"content":1060,"level":9},"/v1.0.7/guides/testing#testing-pipz-pipelines",[],"Comprehensive guide to testing pipelines with the pipz testing utilities, best practices, and three-tier testing strategy.",{"id":1062,"title":1063,"titles":1064,"content":1065,"level":19},"/v1.0.7/guides/testing#table-of-contents","Table of Contents",[1054],"Testing Package OverviewMockProcessor - Testing Pipeline BehaviorChaosProcessor - Resilience TestingAssertion HelpersTesting Time-Dependent ComponentsTest Organization StrategyTesting Best PracticesCommon Testing PatternsTesting Gotchas",{"id":1067,"title":1068,"titles":1069,"content":1070,"level":19},"/v1.0.7/guides/testing#testing-package-overview","Testing Package Overview",[1054],"The github.com/zoobzio/pipz/testing package provides comprehensive utilities for testing pipz pipelines: import pipztesting \"github.com/zoobzio/pipz/testing\"",{"id":1072,"title":1073,"titles":1074,"content":1075,"level":35},"/v1.0.7/guides/testing#core-testing-components","Core Testing Components",[1054,1068],"MockProcessor: Configurable mock implementation for testing pipeline behaviorChaosProcessor: Chaos engineering tool for resilience testingAssertion Helpers: Utilities for verifying processor calls and behaviorsHelper Functions: Timing, parallelization, and synchronization utilities",{"id":1077,"title":1078,"titles":1079,"content":1080,"level":19},"/v1.0.7/guides/testing#mockprocessor-testing-pipeline-behavior","MockProcessor - Testing Pipeline Behavior",[1054],"MockProcessor provides a fully configurable mock implementation of pipz.Chainable[T] with call tracking, configurable return values, delays, and panic simulation.",{"id":1082,"title":1083,"titles":1084,"content":1085,"level":35},"/v1.0.7/guides/testing#basic-mock-usage","Basic Mock Usage",[1054,1078],"func TestPipelineWithMock(t *testing.T) {\n    // Create a mock processor\n    mock := pipztesting.NewMockProcessor[string](t, \"data-processor\")\n\n    // Configure return values\n    mock.WithReturn(\"processed\", nil)\n\n    // Define identities upfront\n    var (\n        TestPipelineID = pipz.NewIdentity(\"test-pipeline\", \"Test pipeline with mock\")\n        PrepareID      = pipz.NewIdentity(\"prepare\", \"Prepare input\")\n        FinalizeID     = pipz.NewIdentity(\"finalize\", \"Finalize output\")\n    )\n\n    // Build pipeline with mock\n    pipeline := pipz.NewSequence[string](TestPipelineID,\n        pipz.Transform(PrepareID, strings.ToUpper),\n        mock,\n        pipz.Transform(FinalizeID, strings.TrimSpace),\n    )\n    \n    // Process data\n    result, err := pipeline.Process(context.Background(), \"  input  \")\n    \n    // Verify results\n    require.NoError(t, err)\n    assert.Equal(t, \"processed\", result)\n    \n    // Verify mock was called\n    pipztesting.AssertProcessed(t, mock, 1)\n    pipztesting.AssertProcessedWith(t, mock, \"INPUT\")\n}",{"id":1087,"title":1088,"titles":1089,"content":1090,"level":35},"/v1.0.7/guides/testing#testing-error-paths","Testing Error Paths",[1054,1078],"func TestErrorHandling(t *testing.T) {\n    // Create mock that returns error\n    mock := pipztesting.NewMockProcessor[Order](t, \"payment-processor\")\n    mock.WithReturn(Order{}, errors.New(\"payment declined\"))\n\n    // Define identities upfront\n    var (\n        OrderPipelineID = pipz.NewIdentity(\"order-pipeline\", \"Order processing with error handling\")\n        ProcessID       = pipz.NewIdentity(\"process\", \"Order processing sequence\")\n        ErrorRecoveryID = pipz.NewIdentity(\"error-recovery\", \"Handle order processing errors\")\n    )\n\n    // Build pipeline with error handling\n    pipeline := pipz.NewHandle[Order](OrderPipelineID,\n        pipz.NewSequence[Order](ProcessID,\n            validateOrder,\n            mock, // Will fail here\n            shipOrder,\n        ),\n        pipz.Transform(ErrorRecoveryID, func(ctx context.Context, err *pipz.Error[Order]) *pipz.Error[Order] {\n            // Log error and mark order as failed\n            err.InputData.Status = \"payment_failed\"\n            return err\n        }),\n    )\n\n    order := Order{ID: \"123\", Amount: 99.99}\n    _, err := pipeline.Process(context.Background(), order)\n\n    // Verify error occurred\n    require.Error(t, err)\n\n    // Verify shipOrder was never called (pipeline stopped at error)\n    var pipeErr *pipz.Error[Order]\n    require.True(t, errors.As(err, &pipeErr))\n    assert.Equal(t, pipz.Name(\"payment-processor\"), pipeErr.Path[len(pipeErr.Path)-1])\n    assert.Equal(t, \"payment_failed\", pipeErr.InputData.Status)\n}",{"id":1092,"title":1093,"titles":1094,"content":1095,"level":35},"/v1.0.7/guides/testing#testing-delays-and-timeouts","Testing Delays and Timeouts",[1054,1078],"func TestTimeoutBehavior(t *testing.T) {\n    // Create mock with 200ms delay\n    slowMock := pipztesting.NewMockProcessor[string](t, \"slow-service\")\n    slowMock.WithReturn(\"result\", nil).WithDelay(200 * time.Millisecond)\n\n    // Define identity upfront\n    var FastTimeoutID = pipz.NewIdentity(\"fast-timeout\", \"Quick timeout wrapper\")\n\n    // Wrap with timeout\n    pipeline := pipz.NewTimeout[string](FastTimeoutID,\n        slowMock,\n        100*time.Millisecond, // Timeout before mock completes\n    )\n\n    // Process should timeout\n    _, err := pipeline.Process(context.Background(), \"data\")\n\n    // Verify timeout occurred\n    require.Error(t, err)\n    assert.True(t, errors.Is(err, context.DeadlineExceeded))\n\n    // Mock should still have been called\n    pipztesting.AssertProcessed(t, slowMock, 1)\n}",{"id":1097,"title":1098,"titles":1099,"content":1100,"level":35},"/v1.0.7/guides/testing#testing-panic-recovery","Testing Panic Recovery",[1054,1078],"func TestPanicRecovery(t *testing.T) {\n    // Create mock that panics\n    panicMock := pipztesting.NewMockProcessor[string](t, \"unstable-service\")\n    panicMock.WithPanic(\"database connection lost\")\n\n    // Define identities upfront\n    var (\n        SafePipelineID = pipz.NewIdentity(\"safe-pipeline\", \"Pipeline with panic recovery\")\n        RecoverID      = pipz.NewIdentity(\"recover\", \"Recover from panics\")\n    )\n\n    // Build pipeline with panic recovery\n    pipeline := pipz.NewHandle[string](SafePipelineID,\n        panicMock,\n        pipz.Transform(RecoverID, func(ctx context.Context, err *pipz.Error[string]) *pipz.Error[string] {\n            // Check if it was a panic by examining the error\n            if strings.Contains(err.Err.Error(), \"panic\") {\n                // Log recovered panic\n                log.Printf(\"recovered from panic: %v\", err.Err)\n            }\n            return err\n        }),\n    )\n\n    // Should recover from panic\n    _, err := pipeline.Process(context.Background(), \"test\")\n\n    // Verify error but not panic\n    require.Error(t, err)\n    assert.Contains(t, err.Error(), \"recovered from panic\")\n}",{"id":1102,"title":1103,"titles":1104,"content":1105,"level":35},"/v1.0.7/guides/testing#call-history-tracking","Call History Tracking",[1054,1078],"func TestCallHistory(t *testing.T) {\n    mock := pipztesting.NewMockProcessor[int](t, \"accumulator\")\n    mock.WithReturn(0, nil).WithHistorySize(10) // Keep last 10 calls\n    \n    // Process multiple values\n    for i := 0; i \u003C 5; i++ {\n        mock.Process(context.Background(), i)\n    }\n    \n    // Examine call history\n    history := mock.CallHistory()\n    require.Len(t, history, 5)\n    \n    // Verify call order and timing\n    for i, call := range history {\n        assert.Equal(t, i, call.Input)\n        if i > 0 {\n            assert.True(t, call.Timestamp.After(history[i-1].Timestamp))\n        }\n    }\n}",{"id":1107,"title":1108,"titles":1109,"content":1110,"level":19},"/v1.0.7/guides/testing#chaosprocessor-resilience-testing","ChaosProcessor - Resilience Testing",[1054],"ChaosProcessor enables chaos engineering for pipelines by randomly injecting failures, delays, timeouts, and panics. This helps verify resilience patterns work correctly under adverse conditions.",{"id":1112,"title":1113,"titles":1114,"content":1115,"level":35},"/v1.0.7/guides/testing#basic-chaos-testing","Basic Chaos Testing",[1054,1108],"func TestPipelineResilience(t *testing.T) {\n    // Define identities upfront\n    var (\n        ProcessID     = pipz.NewIdentity(\"process\", \"Data processing transform\")\n        ResilientID   = pipz.NewIdentity(\"resilient\", \"Resilient retry wrapper\")\n        RecoverID     = pipz.NewIdentity(\"recover\", \"Error recovery handler\")\n        HandleErrorID = pipz.NewIdentity(\"handle-error\", \"Handle processing errors\")\n    )\n\n    // Wrap a normal processor with chaos\n    normalProcessor := pipz.Transform(ProcessID, func(ctx context.Context, data string) string {\n        return data + \"_processed\"\n    })\n\n    // Configure chaos\n    chaosConfig := pipztesting.ChaosConfig{\n        FailureRate: 0.2,              // 20% failure rate\n        LatencyMin:  10 * time.Millisecond,\n        LatencyMax:  50 * time.Millisecond,\n        TimeoutRate: 0.1,               // 10% timeout rate\n        PanicRate:   0.05,              // 5% panic rate\n        Seed:        42,                // Reproducible chaos\n    }\n\n    chaos := pipztesting.NewChaosProcessor(\"chaos-test\", normalProcessor, chaosConfig)\n\n    // Build resilient pipeline\n    pipeline := pipz.NewRetry[string](ResilientID,\n        pipz.NewHandle[string](RecoverID,\n            chaos,\n            pipz.Transform(HandleErrorID, func(ctx context.Context, err *pipz.Error[string]) *pipz.Error[string] {\n                // Log and recover from errors\n                return err\n            }),\n        ),\n        3, // Retry up to 3 times\n    )\n    \n    // Run many iterations to trigger chaos\n    successCount := 0\n    failureCount := 0\n    \n    for i := 0; i \u003C 100; i++ {\n        result, err := pipeline.Process(context.Background(), fmt.Sprintf(\"request_%d\", i))\n        if err == nil {\n            successCount++\n            assert.Contains(t, result, \"_processed\")\n        } else {\n            failureCount++\n        }\n    }\n    \n    // With retries, success rate should be higher than failure rate\n    stats := chaos.Stats()\n    t.Logf(\"Chaos Stats: %s\", stats)\n    t.Logf(\"Pipeline Success Rate: %.1f%%\", float64(successCount)/100*100)\n    \n    // Verify chaos was actually injected\n    assert.Greater(t, stats.FailedCalls+stats.TimeoutCalls+stats.PanicCalls, int64(0))\n}",{"id":1117,"title":1118,"titles":1119,"content":1120,"level":35},"/v1.0.7/guides/testing#testing-circuit-breaker-with-chaos","Testing Circuit Breaker with Chaos",[1054,1108],"func TestCircuitBreakerUnderChaos(t *testing.T) {\n    // Define identities upfront\n    var (\n        ExternalAPIID = pipz.NewIdentity(\"external-api\", \"External API call\")\n        APIBreakerID  = pipz.NewIdentity(\"api-breaker\", \"Circuit breaker for API\")\n    )\n\n    // Create service with intermittent failures\n    service := pipz.Apply(ExternalAPIID, func(ctx context.Context, req Request) (Response, error) {\n        // Actual API call\n        return callAPI(req)\n    })\n\n    // Add chaos to simulate network issues\n    chaosService := pipztesting.NewChaosProcessor(\"chaos-api\", service,\n        pipztesting.ChaosConfig{\n            FailureRate: 0.3,  // 30% failures\n            TimeoutRate: 0.2,  // 20% timeouts\n            LatencyMin:  50 * time.Millisecond,\n            LatencyMax:  200 * time.Millisecond,\n        },\n    )\n\n    // Wrap with circuit breaker\n    circuitBreaker := pipz.NewCircuitBreaker[Request](APIBreakerID,\n        chaosService,\n        10,                     // Open after 10 failures\n        30*time.Second,         // Recovery time\n    )\n    \n    // Test that circuit breaker opens under chaos\n    var openedAt time.Time\n    failuresSinceOpen := 0\n    \n    for i := 0; i \u003C 50; i++ {\n        _, err := circuitBreaker.Process(context.Background(), Request{ID: i})\n        \n        var pipeErr *pipz.Error[Request]\n        if errors.As(err, &pipeErr) && pipeErr.Path[0] == \"api-breaker\" {\n            if openedAt.IsZero() {\n                openedAt = time.Now()\n                t.Logf(\"Circuit opened after %d requests\", i)\n            }\n            failuresSinceOpen++\n        }\n    }\n    \n    // Verify circuit breaker opened\n    assert.False(t, openedAt.IsZero(), \"Circuit breaker should have opened\")\n    \n    // Verify fast failures after opening\n    assert.Greater(t, failuresSinceOpen, 10, \"Should fail fast when open\")\n    \n    // Check chaos statistics\n    stats := chaosService.Stats()\n    t.Logf(\"Chaos injected: %d failures, %d timeouts out of %d calls\",\n        stats.FailedCalls, stats.TimeoutCalls, stats.TotalCalls)\n}",{"id":1122,"title":1123,"titles":1124,"content":1125,"level":35},"/v1.0.7/guides/testing#load-testing-with-chaos","Load Testing with Chaos",[1054,1108],"func TestLoadWithChaos(t *testing.T) {\n    // Define identities upfront\n    var (\n        CounterID  = pipz.NewIdentity(\"counter\", \"Count processed items\")\n        LoadTestID = pipz.NewIdentity(\"load-test\", \"Load testing pipeline\")\n        ThrottleID = pipz.NewIdentity(\"throttle\", \"Rate limit requests\")\n        RetryID    = pipz.NewIdentity(\"retry\", \"Retry on failure\")\n    )\n\n    // Create processor that tracks throughput\n    var processed atomic.Int64\n    processor := pipz.Effect(CounterID, func(ctx context.Context, data int) error {\n        processed.Add(1)\n        return nil\n    })\n\n    // Add variable chaos\n    chaos := pipztesting.NewChaosProcessor(\"variable-chaos\", processor,\n        pipztesting.ChaosConfig{\n            FailureRate: 0.1,\n            LatencyMin:  1 * time.Millisecond,\n            LatencyMax:  10 * time.Millisecond,\n            TimeoutRate: 0.05,\n        },\n    )\n\n    // Build pipeline with rate limiting and retries\n    pipeline := pipz.NewSequence[int](LoadTestID,\n        pipz.NewRateLimiter[int](ThrottleID, 100, 10), // 100 req/s, burst 10\n        pipz.NewRetry[int](RetryID, chaos, 2),\n    )\n    \n    // Generate load\n    ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n    defer cancel()\n    \n    var wg sync.WaitGroup\n    for i := 0; i \u003C 10; i++ { // 10 concurrent workers\n        wg.Add(1)\n        go func(worker int) {\n            defer wg.Done()\n            for j := 0; ctx.Err() == nil; j++ {\n                pipeline.Process(ctx, worker*1000+j)\n            }\n        }(i)\n    }\n    \n    wg.Wait()\n    \n    // Analyze results\n    totalProcessed := processed.Load()\n    stats := chaos.Stats()\n    \n    t.Logf(\"Processed: %d requests in 5 seconds\", totalProcessed)\n    t.Logf(\"Chaos Stats: %s\", stats)\n    t.Logf(\"Effective throughput: %.1f req/s\", float64(totalProcessed)/5)\n    \n    // Verify system remained stable under chaos\n    assert.Greater(t, totalProcessed, int64(100), \"Should process reasonable amount despite chaos\")\n}",{"id":1127,"title":1128,"titles":1129,"content":1130,"level":19},"/v1.0.7/guides/testing#assertion-helpers","Assertion Helpers",[1054],"The testing package provides specialized assertions for verifying processor behavior:",{"id":1132,"title":1133,"titles":1134,"content":1135,"level":35},"/v1.0.7/guides/testing#basic-assertions","Basic Assertions",[1054,1128],"func TestProcessorAssertions(t *testing.T) {\n    mock := pipztesting.NewMockProcessor[string](t, \"test-processor\")\n    mock.WithReturn(\"result\", nil)\n\n    // Define identity upfront\n    var PipelineID = pipz.NewIdentity(\"pipeline\", \"Test pipeline\")\n\n    pipeline := pipz.NewSequence[string](PipelineID, mock)\n    \n    // Process some data\n    pipeline.Process(context.Background(), \"input1\")\n    pipeline.Process(context.Background(), \"input2\")\n    \n    // Assert exact call count\n    pipztesting.AssertProcessed(t, mock, 2)\n    \n    // Assert last input\n    pipztesting.AssertProcessedWith(t, mock, \"input2\")\n    \n    // Assert call count range\n    pipztesting.AssertProcessedBetween(t, mock, 1, 3)\n    \n    // Reset and verify no calls\n    mock.Reset()\n    pipztesting.AssertNotProcessed(t, mock)\n}",{"id":1137,"title":1138,"titles":1139,"content":1140,"level":35},"/v1.0.7/guides/testing#waiting-for-async-operations","Waiting for Async Operations",[1054,1128],"func TestAsyncProcessing(t *testing.T) {\n    mock := pipztesting.NewMockProcessor[string](t, \"async-processor\")\n    mock.WithReturn(\"done\", nil)\n    \n    // Process asynchronously\n    go func() {\n        time.Sleep(100 * time.Millisecond)\n        mock.Process(context.Background(), \"async-data\")\n    }()\n    \n    // Wait for processor to be called\n    success := pipztesting.WaitForCalls(mock, 1, 500*time.Millisecond)\n    require.True(t, success, \"Mock should have been called within timeout\")\n    \n    // Verify the call\n    pipztesting.AssertProcessedWith(t, mock, \"async-data\")\n}",{"id":1142,"title":1143,"titles":1144,"content":1145,"level":35},"/v1.0.7/guides/testing#parallel-testing-helper","Parallel Testing Helper",[1054,1128],"func TestConcurrentSafety(t *testing.T) {\n    // Define identities upfront\n    var (\n        CounterID  = pipz.NewIdentity(\"counter\", \"Count processing calls\")\n        ParallelID = pipz.NewIdentity(\"parallel\", \"Parallel execution\")\n    )\n\n    var counter atomic.Int64\n    processor := pipz.Effect(CounterID, func(ctx context.Context, n int) error {\n        counter.Add(1)\n        return nil\n    })\n\n    pipeline := pipz.NewConcurrent[int](ParallelID,\n        processor,\n        processor,\n        processor,\n    )\n    \n    // Run parallel test\n    pipztesting.ParallelTest(t, 10, func(workerID int) {\n        for i := 0; i \u003C 100; i++ {\n            pipeline.Process(context.Background(), workerID*1000+i)\n        }\n    })\n    \n    // Each of 10 workers processed 100 items through 3 processors\n    expected := int64(10 * 100 * 3)\n    assert.Equal(t, expected, counter.Load())\n}",{"id":1147,"title":1148,"titles":1149,"content":1150,"level":35},"/v1.0.7/guides/testing#latency-measurement","Latency Measurement",[1054,1128],"func TestProcessorPerformance(t *testing.T) {\n    // Define identity upfront\n    var SlowID = pipz.NewIdentity(\"slow\", \"Slow transform for testing\")\n\n    processor := pipz.Transform(SlowID, func(ctx context.Context, n int) int {\n        time.Sleep(10 * time.Millisecond)\n        return n * 2\n    })\n    \n    // Measure latency\n    latency := pipztesting.MeasureLatency(func() {\n        processor.Process(context.Background(), 42)\n    })\n    \n    assert.GreaterOrEqual(t, latency, 10*time.Millisecond)\n    assert.Less(t, latency, 20*time.Millisecond)\n    \n    // Measure with result\n    result, duration := pipztesting.MeasureLatencyWithResult(func() int {\n        res, _ := processor.Process(context.Background(), 21)\n        return res\n    })\n    \n    assert.Equal(t, 42, result)\n    assert.GreaterOrEqual(t, duration, 10*time.Millisecond)\n}",{"id":1152,"title":1153,"titles":1154,"content":1155,"level":19},"/v1.0.7/guides/testing#testing-time-dependent-components","Testing Time-Dependent Components",[1054],"For connectors that depend on time (Timeout, Backoff, CircuitBreaker, RateLimiter, WorkerPool), use clockz for deterministic testing: func TestTimeoutWithFakeClock(t *testing.T) {\n    clock := clockz.NewFakeClock()\n\n    // Define identity upfront\n    var TestTimeoutID = pipz.NewIdentity(\"test\", \"Timeout with fake clock\")\n\n    timeout := pipz.NewTimeout(TestTimeoutID, processor, 5*time.Second).\n        WithClock(clock)\n    \n    // Start processing in background\n    go timeout.Process(ctx, data)\n    \n    // Advance clock to trigger timeout\n    clock.Advance(6 * time.Second)\n    \n    // Verify timeout behavior\n} For detailed clockz usage, see: https://github.com/zoobzio/clockz",{"id":1157,"title":1158,"titles":1159,"content":1160,"level":19},"/v1.0.7/guides/testing#test-organization-strategy","Test Organization Strategy",[1054],"Pipz follows a three-tier testing strategy that separates concerns and enables comprehensive validation:",{"id":1162,"title":1163,"titles":1164,"content":1165,"level":35},"/v1.0.7/guides/testing#_1-unit-tests-package-level","1. Unit Tests (Package-Level)",[1054,1158],"Located alongside source code, testing individual processors and connectors in isolation. pipz/\n├── processor_test.go      # Tests individual processors\n├── connector_test.go      # Tests individual connectors\n└── error_test.go          # Tests error handling Example Unit Test: // processor_test.go\nfunc TestTransformProcessor(t *testing.T) {\n    // Define identity upfront\n    var DoubleID = NewIdentity(\"double\", \"Double the input value\")\n\n    processor := Transform(DoubleID, func(ctx context.Context, n int) int {\n        return n * 2\n    })\n\n    result, err := processor.Process(context.Background(), 21)\n    require.NoError(t, err)\n    assert.Equal(t, 42, result)\n}",{"id":1167,"title":1168,"titles":1169,"content":1170,"level":35},"/v1.0.7/guides/testing#_2-integration-tests","2. Integration Tests",[1054,1158],"Located in testing/integration/, testing complete pipelines and real-world scenarios. testing/integration/\n├── pipeline_flows_test.go       # End-to-end pipeline tests\n├── resilience_patterns_test.go  # Circuit breakers, retries, fallbacks\n└── real_world_test.go           # Business scenario tests Example Integration Test: // testing/integration/resilience_patterns_test.go\nfunc TestCircuitBreakerWithRetry(t *testing.T) {\n    // Define identities upfront\n    var (\n        FlakyID   = pipz.NewIdentity(\"flaky\", \"Flaky service that fails initially\")\n        BreakerID = pipz.NewIdentity(\"breaker\", \"Circuit breaker for flaky service\")\n        RetryID   = pipz.NewIdentity(\"retry\", \"Retry wrapper\")\n    )\n\n    var callCount int64\n\n    // Flaky service that fails initially\n    flakyService := pipz.Apply(FlakyID, func(ctx context.Context, data string) (string, error) {\n        count := atomic.AddInt64(&callCount, 1)\n        if count \u003C= 3 {\n            return \"\", errors.New(\"service unavailable\")\n        }\n        return data + \"_processed\", nil\n    })\n\n    // Build resilient pipeline\n    pipeline := pipz.NewCircuitBreaker[string](BreakerID,\n        pipz.NewRetry[string](RetryID, flakyService, 3),\n        5,                      // Threshold\n        time.Second,           // Recovery\n    )\n\n    // Should succeed after retries\n    result, err := pipeline.Process(context.Background(), \"test\")\n    require.NoError(t, err)\n    assert.Equal(t, \"test_processed\", result)\n    assert.Equal(t, int64(4), callCount) // 3 failures + 1 success\n}",{"id":1172,"title":1173,"titles":1174,"content":1175,"level":35},"/v1.0.7/guides/testing#_3-benchmarks","3. Benchmarks",[1054,1158],"Located in testing/benchmarks/, measuring performance and comparing implementations. testing/benchmarks/\n├── core_performance_test.go      # Individual processor benchmarks\n├── composition_performance_test.go # Pipeline composition benchmarks\n└── comparison_test.go            # Comparative benchmarks Example Benchmark: // testing/benchmarks/core_performance_test.go\nfunc BenchmarkTransformProcessor(b *testing.B) {\n    // Define identity upfront\n    var DoubleID = pipz.NewIdentity(\"double\", \"Double the input value\")\n\n    processor := pipz.Transform(DoubleID, func(_ context.Context, n int) int {\n        return n * 2\n    })\n    \n    ctx := context.Background()\n    b.ResetTimer()\n    b.ReportAllocs()\n    \n    for i := 0; i \u003C b.N; i++ {\n        result, _ := processor.Process(ctx, 42)\n        _ = result // Prevent optimization\n    }\n}",{"id":1177,"title":1178,"titles":1179,"content":29,"level":19},"/v1.0.7/guides/testing#testing-best-practices","Testing Best Practices",[1054],{"id":1181,"title":1182,"titles":1183,"content":1184,"level":35},"/v1.0.7/guides/testing#_1-test-data-isolation-with-clone","1. Test Data Isolation with Clone()",[1054,1178],"Always implement proper Clone() for concurrent testing: type TestData struct {\n    ID     string\n    Values []int\n    Meta   map[string]any\n}\n\n// Proper deep clone implementation\nfunc (d TestData) Clone() TestData {\n    values := make([]int, len(d.Values))\n    copy(values, d.Values)\n    \n    meta := make(map[string]any, len(d.Meta))\n    for k, v := range d.Meta {\n        meta[k] = v\n    }\n    \n    return TestData{\n        ID:     d.ID,\n        Values: values,\n        Meta:   meta,\n    }\n}\n\nfunc TestConcurrentIsolation(t *testing.T) {\n    // Define identities upfront\n    var (\n        ParallelID = pipz.NewIdentity(\"parallel\", \"Parallel execution\")\n        Modify1ID  = pipz.NewIdentity(\"modify1\", \"Modify values slice\")\n        Modify2ID  = pipz.NewIdentity(\"modify2\", \"Modify metadata map\")\n    )\n\n    data := TestData{\n        ID:     \"test\",\n        Values: []int{1, 2, 3},\n        Meta:   map[string]any{\"key\": \"value\"},\n    }\n\n    // Concurrent processors should not affect each other\n    concurrent := pipz.NewConcurrent[TestData](ParallelID,\n        pipz.Effect(Modify1ID, func(ctx context.Context, d TestData) error {\n            d.Values[0] = 999\n            return nil\n        }),\n        pipz.Effect(Modify2ID, func(ctx context.Context, d TestData) error {\n            d.Meta[\"new\"] = \"data\"\n            return nil\n        }),\n    )\n    \n    original := data.Clone()\n    concurrent.Process(context.Background(), data)\n    \n    // Original must be unchanged\n    assert.Equal(t, original, data)\n}",{"id":1186,"title":1187,"titles":1188,"content":1189,"level":35},"/v1.0.7/guides/testing#_2-stateful-connector-testing","2. Stateful Connector Testing",[1054,1178],"Stateful connectors (RateLimiter, CircuitBreaker) must be singletons: func TestStatefulConnectorSharing(t *testing.T) {\n    // Define identity upfront\n    var APILimiterID = pipz.NewIdentity(\"api\", \"API rate limiter\")\n\n    // CORRECT: Shared instance maintains state\n    rateLimiter := pipz.NewRateLimiter[string](APILimiterID, 2, 1) // 2 req/s\n    \n    // Multiple goroutines share the same limiter\n    var wg sync.WaitGroup\n    for i := 0; i \u003C 5; i++ {\n        wg.Add(1)\n        go func(id int) {\n            defer wg.Done()\n            rateLimiter.Process(context.Background(), fmt.Sprintf(\"req_%d\", id))\n        }(i)\n    }\n    \n    start := time.Now()\n    wg.Wait()\n    elapsed := time.Since(start)\n    \n    // Should take ~2 seconds for 5 requests at 2 req/s\n    assert.GreaterOrEqual(t, elapsed, 2*time.Second)\n}",{"id":1191,"title":1192,"titles":1193,"content":1194,"level":35},"/v1.0.7/guides/testing#_3-error-path-testing","3. Error Path Testing",[1054,1178],"Test both success and failure paths: func TestCompleteErrorCoverage(t *testing.T) {\n    tests := []struct {\n        name        string\n        input       Order\n        shouldFail  bool\n        failureStage string\n    }{\n        {\n            name:       \"valid_order\",\n            input:      Order{ID: \"123\", Amount: 99.99},\n            shouldFail: false,\n        },\n        {\n            name:        \"invalid_amount\",\n            input:       Order{ID: \"456\", Amount: -10},\n            shouldFail:  true,\n            failureStage: \"validate\",\n        },\n        {\n            name:        \"payment_failure\",\n            input:       Order{ID: \"789\", Amount: 99999}, // Triggers payment failure\n            shouldFail:  true,\n            failureStage: \"payment\",\n        },\n    }\n    \n    for _, tt := range tests {\n        t.Run(tt.name, func(t *testing.T) {\n            pipeline := buildOrderPipeline()\n            _, err := pipeline.Process(context.Background(), tt.input)\n            \n            if tt.shouldFail {\n                require.Error(t, err)\n                var pipeErr *pipz.Error[Order]\n                require.True(t, errors.As(err, &pipeErr))\n                assert.Equal(t, pipz.Name(tt.failureStage), pipeErr.Path[len(pipeErr.Path)-1])\n            } else {\n                require.NoError(t, err)\n            }\n        })\n    }\n}",{"id":1196,"title":1197,"titles":1198,"content":1199,"level":35},"/v1.0.7/guides/testing#_4-context-cancellation-testing","4. Context Cancellation Testing",[1054,1178],"Ensure processors respect context: func TestContextPropagation(t *testing.T) {\n    // Define identity upfront\n    var BlockerID = pipz.NewIdentity(\"blocker\", \"Blocks until context cancelled\")\n\n    // Create a processor that blocks until context is cancelled\n    blockingProcessor := pipz.Apply(BlockerID, func(ctx context.Context, data string) (string, error) {\n        \u003C-ctx.Done()\n        return \"\", ctx.Err()\n    })\n    \n    ctx, cancel := context.WithCancel(context.Background())\n    \n    // Start processing in background\n    done := make(chan error, 1)\n    go func() {\n        _, err := blockingProcessor.Process(ctx, \"test\")\n        done \u003C- err\n    }()\n    \n    // Give it time to start\n    time.Sleep(10 * time.Millisecond)\n    \n    // Cancel context\n    cancel()\n    \n    // Should complete quickly with cancellation error\n    select {\n    case err := \u003C-done:\n        assert.ErrorIs(t, err, context.Canceled)\n    case \u003C-time.After(100 * time.Millisecond):\n        t.Fatal(\"Processor did not respect context cancellation\")\n    }\n}",{"id":1201,"title":1202,"titles":1203,"content":1204,"level":35},"/v1.0.7/guides/testing#_5-table-driven-tests","5. Table-Driven Tests",[1054,1178],"Use table-driven tests for comprehensive coverage: func TestPipelineVariations(t *testing.T) {\n    tests := []struct {\n        name     string\n        pipeline func() pipz.Chainable[int]\n        input    int\n        expected int\n        wantErr  bool\n    }{\n        {\n            name: \"simple_transform\",\n            pipeline: func() pipz.Chainable[int] {\n                var DoubleID = pipz.NewIdentity(\"double\", \"Double value\")\n                return pipz.Transform(DoubleID, func(_ context.Context, n int) int {\n                    return n * 2\n                })\n            },\n            input:    5,\n            expected: 10,\n        },\n        {\n            name: \"sequence_of_transforms\",\n            pipeline: func() pipz.Chainable[int] {\n                var (\n                    MathID   = pipz.NewIdentity(\"math\", \"Math operations\")\n                    DoubleID = pipz.NewIdentity(\"double\", \"Double value\")\n                    Add10ID  = pipz.NewIdentity(\"add10\", \"Add 10 to value\")\n                )\n                return pipz.NewSequence[int](MathID,\n                    pipz.Transform(DoubleID, func(_ context.Context, n int) int { return n * 2 }),\n                    pipz.Transform(Add10ID, func(_ context.Context, n int) int { return n + 10 }),\n                )\n            },\n            input:    5,\n            expected: 20, // (5 * 2) + 10\n        },\n        {\n            name: \"with_validation\",\n            pipeline: func() pipz.Chainable[int] {\n                var ValidateID = pipz.NewIdentity(\"validate\", \"Validate non-negative\")\n                return pipz.Apply(ValidateID, func(_ context.Context, n int) (int, error) {\n                    if n \u003C 0 {\n                        return 0, errors.New(\"negative not allowed\")\n                    }\n                    return n, nil\n                })\n            },\n            input:   -5,\n            wantErr: true,\n        },\n    }\n    \n    for _, tt := range tests {\n        t.Run(tt.name, func(t *testing.T) {\n            pipeline := tt.pipeline()\n            result, err := pipeline.Process(context.Background(), tt.input)\n            \n            if tt.wantErr {\n                require.Error(t, err)\n            } else {\n                require.NoError(t, err)\n                assert.Equal(t, tt.expected, result)\n            }\n        })\n    }\n}",{"id":1206,"title":1207,"titles":1208,"content":29,"level":19},"/v1.0.7/guides/testing#common-testing-patterns","Common Testing Patterns",[1054],{"id":1210,"title":1211,"titles":1212,"content":1213,"level":35},"/v1.0.7/guides/testing#testing-dynamic-pipeline-modification","Testing Dynamic Pipeline Modification",[1054,1207],"func TestDynamicPipelineModification(t *testing.T) {\n    // Define identities upfront so we can reference them for lookup\n    var (\n        DynamicID = pipz.NewIdentity(\"dynamic\", \"Dynamic test pipeline\")\n        Step1ID   = pipz.NewIdentity(\"step1\", \"Uppercase transform\")\n        Step2ID   = pipz.NewIdentity(\"step2\", \"Add exclamation\")\n        Step15ID  = pipz.NewIdentity(\"step1.5\", \"Wrap in brackets\")\n    )\n\n    // Start with basic pipeline\n    seq := pipz.NewSequence[string](DynamicID)\n    seq.Register(\n        pipz.Transform(Step1ID, strings.ToUpper),\n    )\n\n    // Test initial configuration\n    result, _ := seq.Process(context.Background(), \"hello\")\n    assert.Equal(t, \"HELLO\", result)\n\n    // Add processor at runtime\n    seq.Push(pipz.Transform(Step2ID, func(_ context.Context, s string) string {\n        return s + \"!\"\n    }))\n\n    // Test modified pipeline\n    result, _ = seq.Process(context.Background(), \"hello\")\n    assert.Equal(t, \"HELLO!\", result)\n\n    // Insert processor in middle (use same Identity for lookup)\n    seq.After(Step1ID, pipz.Transform(Step15ID, func(_ context.Context, s string) string {\n        return \"[\" + s + \"]\"\n    }))\n\n    // Test final configuration\n    result, _ = seq.Process(context.Background(), \"hello\")\n    assert.Equal(t, \"[HELLO]!\", result)\n}",{"id":1215,"title":1216,"titles":1217,"content":1218,"level":35},"/v1.0.7/guides/testing#testing-pipeline-composition","Testing Pipeline Composition",[1054,1207],"func TestPipelineComposition(t *testing.T) {\n    // Define identities upfront\n    var (\n        ValidationID     = pipz.NewIdentity(\"validation\", \"Validate user data\")\n        ValidateEmailID  = pipz.NewIdentity(\"validate-email\", \"Validate email format\")\n        ValidateAgeID    = pipz.NewIdentity(\"validate-age\", \"Validate age range\")\n        EnrichmentID     = pipz.NewIdentity(\"enrichment\", \"Enrich user data\")\n        AddMetadataID    = pipz.NewIdentity(\"add-metadata\", \"Add user metadata\")\n        CalculateScoreID = pipz.NewIdentity(\"calculate-score\", \"Calculate user score\")\n        UserProcessingID = pipz.NewIdentity(\"user-processing\", \"Full user processing pipeline\")\n        SaveID           = pipz.NewIdentity(\"save\", \"Save user to database\")\n    )\n\n    // Build reusable sub-pipelines\n    validation := pipz.NewSequence[User](ValidationID,\n        pipz.Apply(ValidateEmailID, validateEmail),\n        pipz.Apply(ValidateAgeID, validateAge),\n    )\n\n    enrichment := pipz.NewSequence[User](EnrichmentID,\n        pipz.Transform(AddMetadataID, addMetadata),\n        pipz.Transform(CalculateScoreID, calculateScore),\n    )\n\n    // Compose into larger pipeline\n    fullPipeline := pipz.NewSequence[User](UserProcessingID,\n        validation,\n        enrichment,\n        pipz.Effect(SaveID, saveUser),\n    )\n\n    // Test composed pipeline\n    user := User{Email: \"test@example.com\", Age: 25}\n    result, err := fullPipeline.Process(context.Background(), user)\n\n    require.NoError(t, err)\n    assert.NotEmpty(t, result.Metadata)\n    assert.Greater(t, result.Score, 0)\n}",{"id":1220,"title":1221,"titles":1222,"content":1223,"level":35},"/v1.0.7/guides/testing#testing-switch-routing","Testing Switch Routing",[1054,1207],"func TestSwitchRouting(t *testing.T) {\n    // Define identities upfront\n    var (\n        RouterID        = pipz.NewIdentity(\"request-router\", \"Route requests by type\")\n        HandleQueryID   = pipz.NewIdentity(\"handle-query\", \"Handle query requests\")\n        HandleCommandID = pipz.NewIdentity(\"handle-command\", \"Handle command requests\")\n        HandleEventID   = pipz.NewIdentity(\"handle-event\", \"Handle event requests\")\n        HandleUnknownID = pipz.NewIdentity(\"handle-unknown\", \"Handle unknown requests\")\n    )\n\n    // Create router that processes based on type\n    router := pipz.NewSwitch[Request](RouterID,\n        func(_ context.Context, req Request) string {\n            return req.Type\n        },\n    ).\n    AddRoute(\"query\", pipz.Transform(HandleQueryID, handleQuery)).\n    AddRoute(\"command\", pipz.Apply(HandleCommandID, handleCommand)).\n    AddRoute(\"event\", pipz.Effect(HandleEventID, handleEvent)).\n    Default(pipz.Transform(HandleUnknownID, handleUnknown))\n    \n    tests := []struct {\n        reqType  string\n        expected string\n    }{\n        {\"query\", \"query_result\"},\n        {\"command\", \"command_result\"},\n        {\"event\", \"event_result\"},\n        {\"unknown\", \"unknown_result\"},\n    }\n    \n    for _, tt := range tests {\n        t.Run(tt.reqType, func(t *testing.T) {\n            req := Request{Type: tt.reqType, Data: \"test\"}\n            result, err := router.Process(context.Background(), req)\n            require.NoError(t, err)\n            assert.Contains(t, result.Response, tt.expected)\n        })\n    }\n}",{"id":1225,"title":1226,"titles":1227,"content":29,"level":19},"/v1.0.7/guides/testing#testing-gotchas","Testing Gotchas",[1054],{"id":1229,"title":1230,"titles":1231,"content":1232,"level":35},"/v1.0.7/guides/testing#creating-connectors-per-request","❌ Creating Connectors Per Request",[1054,1226],"// WRONG - New rate limiter per request (useless!)\nfunc processRequest(req Request) Response {\n    limiterID := pipz.NewIdentity(\"api\", \"Rate limiter\")\n    limiter := pipz.NewRateLimiter(limiterID, 10, 1) // New instance\n    return limiter.Process(ctx, req)\n}",{"id":1234,"title":1235,"titles":1236,"content":1237,"level":35},"/v1.0.7/guides/testing#singleton-connectors","✅ Singleton Connectors",[1054,1226],"// RIGHT - Package-level Identity and limiter shared across requests\nvar APILimiterID = pipz.NewIdentity(\"api\", \"API rate limiter\")\nvar apiLimiter = pipz.NewRateLimiter(APILimiterID, 10, 1)\n\nfunc processRequest(req Request) Response {\n    return apiLimiter.Process(ctx, req)\n}",{"id":1239,"title":1240,"titles":1241,"content":1242,"level":35},"/v1.0.7/guides/testing#shallow-clone-implementation","❌ Shallow Clone Implementation",[1054,1226],"// WRONG - Shares memory between concurrent processors\nfunc (d Data) Clone() Data {\n    return Data{\n        ID:    d.ID,\n        Items: d.Items, // SHARES SLICE!\n        Meta:  d.Meta,  // SHARES MAP!\n    }\n}",{"id":1244,"title":1245,"titles":1246,"content":1247,"level":35},"/v1.0.7/guides/testing#deep-clone-implementation","✅ Deep Clone Implementation",[1054,1226],"// RIGHT - Complete isolation\nfunc (d Data) Clone() Data {\n    items := make([]Item, len(d.Items))\n    copy(items, d.Items)\n    \n    meta := make(map[string]any, len(d.Meta))\n    for k, v := range d.Meta {\n        meta[k] = v\n    }\n    \n    return Data{\n        ID:    d.ID,\n        Items: items,\n        Meta:  meta,\n    }\n}",{"id":1249,"title":1250,"titles":1251,"content":1252,"level":35},"/v1.0.7/guides/testing#not-testing-error-paths","❌ Not Testing Error Paths",[1054,1226],"// WRONG - Only tests happy path\nfunc TestPipeline(t *testing.T) {\n    pipeline := buildPipeline()\n    result, _ := pipeline.Process(ctx, validData)\n    assert.Equal(t, expected, result)\n}",{"id":1254,"title":1255,"titles":1256,"content":1257,"level":35},"/v1.0.7/guides/testing#complete-path-coverage","✅ Complete Path Coverage",[1054,1226],"// RIGHT - Tests success and failure\nfunc TestPipeline(t *testing.T) {\n    pipeline := buildPipeline()\n    \n    // Test success\n    result, err := pipeline.Process(ctx, validData)\n    require.NoError(t, err)\n    assert.Equal(t, expected, result)\n    \n    // Test failure\n    _, err = pipeline.Process(ctx, invalidData)\n    require.Error(t, err)\n    \n    var pipeErr *pipz.Error[Data]\n    require.True(t, errors.As(err, &pipeErr))\n    assert.Equal(t, pipz.Name(\"validation\"), pipeErr.Path[len(pipeErr.Path)-1])\n}",{"id":1259,"title":1260,"titles":1261,"content":1262,"level":35},"/v1.0.7/guides/testing#ignoring-context-cancellation","❌ Ignoring Context Cancellation",[1054,1226],"// WRONG - Doesn't respect context\nfunc (p *SlowProcessor) Process(ctx context.Context, data Data) (Data, error) {\n    time.Sleep(5 * time.Second) // Blocks regardless of context\n    return process(data)\n}",{"id":1264,"title":1265,"titles":1266,"content":1267,"level":35},"/v1.0.7/guides/testing#context-aware-processing","✅ Context-Aware Processing",[1054,1226],"// RIGHT - Respects cancellation\nfunc (p *SlowProcessor) Process(ctx context.Context, data Data) (Data, error) {\n    select {\n    case \u003C-time.After(5 * time.Second):\n        return process(data)\n    case \u003C-ctx.Done():\n        return data, ctx.Err()\n    }\n}",{"id":1269,"title":492,"titles":1270,"content":1271,"level":19},"/v1.0.7/guides/testing#summary",[1054],"The pipz testing package provides comprehensive tools for validating pipeline behavior: MockProcessor for controlled testing with configurable behaviorChaosProcessor for resilience and fault tolerance testingAssertion helpers for verifying processor interactionsThree-tier testing strategy separating unit, integration, and performance testsBest practices for avoiding common pitfalls Effective testing ensures your pipelines are robust, performant, and handle edge cases gracefully. Use mocks for isolation, chaos for resilience validation, and follow the testing patterns to build reliable data processing systems. html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}",{"id":1273,"title":1274,"titles":1275,"content":1276,"level":9},"/v1.0.7/guides/performance","Performance Optimization Guide",[],"Techniques and strategies for building high-performance pipelines with minimal allocations and optimal resource usage",{"id":1278,"title":1274,"titles":1279,"content":1280,"level":9},"/v1.0.7/guides/performance#performance-optimization-guide",[],"Learn how to build high-performance pipelines with pipz.",{"id":1282,"title":1283,"titles":1284,"content":1285,"level":19},"/v1.0.7/guides/performance#performance-principles","Performance Principles",[1274],"pipz is designed for performance: Minimal allocations in core operationsNo reflection or runtime type assertionsLow interface call overheadEfficient error propagation without excessive wrapping",{"id":1287,"title":1288,"titles":1289,"content":1290,"level":19},"/v1.0.7/guides/performance#benchmarking-pipelines","Benchmarking Pipelines",[1274],"Always measure before optimizing: func BenchmarkPipeline(b *testing.B) {\n    pipeline := pipz.NewSequence[Order](\"benchmark-pipeline\",\n        validateOrder,\n        calculateTax,\n        applyDiscount,\n    )\n    \n    order := Order{\n        Items: []Item{{Price: 99.99}},\n        Country: \"US\",\n    }\n    \n    ctx := context.Background()\n    \n    b.ResetTimer()\n    for i := 0; i \u003C b.N; i++ {\n        _, err := pipeline.Process(ctx, order)\n        if err != nil {\n            b.Fatal(err)\n        }\n    }\n} Run with: go test -bench=BenchmarkPipeline -benchmem",{"id":1292,"title":449,"titles":1293,"content":29,"level":19},"/v1.0.7/guides/performance#optimization-strategies",[1274],{"id":1295,"title":1296,"titles":1297,"content":1298,"level":35},"/v1.0.7/guides/performance#_1-minimize-allocations","1. Minimize Allocations",[1274,449],"Reuse objects where possible: // Bad: Allocates on every call\nfunc enrichOrder(ctx context.Context, order Order) Order {\n    order.Metadata = make(map[string]string) // New allocation\n    order.Metadata[\"processed\"] = \"true\"\n    return order\n}\n\n// Good: Reuse existing map\nfunc enrichOrder(ctx context.Context, order Order) Order {\n    if order.Metadata == nil {\n        order.Metadata = make(map[string]string, 1)\n    }\n    order.Metadata[\"processed\"] = \"true\"\n    return order\n}\n\n// Better: Pre-size collections\nfunc processOrders(orders []Order) []Order {\n    result := make([]Order, 0, len(orders)) // Pre-sized\n    for _, order := range orders {\n        if order.Valid {\n            result = append(result, order)\n        }\n    }\n    return result\n}",{"id":1300,"title":1301,"titles":1302,"content":1303,"level":35},"/v1.0.7/guides/performance#_2-efficient-cloning","2. Efficient Cloning",[1274,449],"For concurrent processing, optimize your Clone implementation: // Inefficient clone\nfunc (o Order) Clone() Order {\n    var clone Order\n    json.Unmarshal(json.Marshal(o)) // Slow!\n    return clone\n}\n\n// Efficient clone\nfunc (o Order) Clone() Order {\n    // Copy slice with pre-allocation\n    items := make([]Item, len(o.Items))\n    copy(items, o.Items)\n    \n    // Copy map if needed\n    var metadata map[string]string\n    if o.Metadata != nil {\n        metadata = make(map[string]string, len(o.Metadata))\n        for k, v := range o.Metadata {\n            metadata[k] = v\n        }\n    }\n    \n    return Order{\n        ID:       o.ID,\n        Customer: o.Customer,\n        Items:    items,\n        Total:    o.Total,\n        Metadata: metadata,\n    }\n}",{"id":1305,"title":1306,"titles":1307,"content":1308,"level":35},"/v1.0.7/guides/performance#_3-batching-operations","3. Batching Operations",[1274,449],"Process multiple items together: // Individual processing (slow)\nfor _, order := range orders {\n    enriched, _ := enrichOrder(ctx, order)\n    saved, _ := saveOrder(ctx, enriched)\n    results = append(results, saved)\n}\n\n// Batch processing (fast)\ntype OrderBatch []Order\n\nfunc processBatch(ctx context.Context, batch OrderBatch) (OrderBatch, error) {\n    // Enrich all at once\n    for i := range batch {\n        batch[i] = enrichOrder(ctx, batch[i])\n    }\n    \n    // Save in bulk\n    if err := saveOrdersBulk(ctx, batch); err != nil {\n        return batch, err\n    }\n    \n    return batch, nil\n}",{"id":1310,"title":1311,"titles":1312,"content":1313,"level":35},"/v1.0.7/guides/performance#_4-parallel-processing","4. Parallel Processing",[1274,449],"Use concurrent processing wisely: // Process orders in parallel with controlled concurrency\nfunc processOrdersConcurrent(orders []Order) []Order {\n    // Limit concurrency to CPU count\n    workers := runtime.NumCPU()\n    \n    // Create work channel\n    work := make(chan Order, len(orders))\n    results := make(chan Order, len(orders))\n    \n    // Start workers\n    var wg sync.WaitGroup\n    for i := 0; i \u003C workers; i++ {\n        wg.Add(1)\n        go func() {\n            defer wg.Done()\n            \n            pipeline := createOrderPipeline()\n            ctx := context.Background()\n            \n            for order := range work {\n                result, err := pipeline.Process(ctx, order)\n                if err != nil {\n                    log.Printf(\"Failed to process order %s: %v\", order.ID, err)\n                    continue\n                }\n                results \u003C- result\n            }\n        }()\n    }\n    \n    // Send work\n    for _, order := range orders {\n        work \u003C- order\n    }\n    close(work)\n    \n    // Wait and collect\n    wg.Wait()\n    close(results)\n    \n    // Collect results\n    processed := make([]Order, 0, len(orders))\n    for result := range results {\n        processed = append(processed, result)\n    }\n    \n    return processed\n}",{"id":1315,"title":1316,"titles":1317,"content":1318,"level":35},"/v1.0.7/guides/performance#_5-caching","5. Caching",[1274,449],"Cache expensive operations: type CachedProcessor[T any, K comparable] struct {\n    processor pipz.Chainable[T]\n    keyFunc   func(T) K\n    cache     sync.Map\n    ttl       time.Duration\n}\n\ntype cacheEntry[T any] struct {\n    value     T\n    timestamp time.Time\n}\n\nfunc (cp *CachedProcessor[T, K]) Process(ctx context.Context, data T) (T, error) {\n    key := cp.keyFunc(data)\n    \n    // Check cache\n    if cached, ok := cp.cache.Load(key); ok {\n        entry := cached.(cacheEntry[T])\n        if time.Since(entry.timestamp) \u003C cp.ttl {\n            return entry.value, nil\n        }\n        cp.cache.Delete(key) // Expired\n    }\n    \n    // Process and cache\n    result, err := cp.processor.Process(ctx, data)\n    if err != nil {\n        return data, err\n    }\n    \n    cp.cache.Store(key, cacheEntry[T]{\n        value:     result,\n        timestamp: time.Now(),\n    })\n    \n    return result, nil\n}",{"id":1320,"title":1321,"titles":1322,"content":1323,"level":35},"/v1.0.7/guides/performance#_6-pipeline-complexity","6. Pipeline Complexity",[1274,449],"Simpler pipelines are faster: // Complex: Many small steps\ncomplex := pipz.NewSequence[Order](\"complex-pipeline\",\n    step1, step2, step3, step4, step5,\n    step6, step7, step8, step9, step10,\n)\n\n// Simple: Combine related operations\nsimple := pipz.NewSequence[Order](\"simple-pipeline\",\n    validateAndNormalize,  // Combined steps 1-3\n    enrichAndTransform,    // Combined steps 4-7\n    saveAndNotify,        // Combined steps 8-10\n)",{"id":1325,"title":1326,"titles":1327,"content":29,"level":19},"/v1.0.7/guides/performance#performance-patterns","Performance Patterns",[1274],{"id":1329,"title":1330,"titles":1331,"content":1332,"level":35},"/v1.0.7/guides/performance#pattern-zero-copy-processing","Pattern: Zero-Copy Processing",[1274,1326],"Modify data in-place when safe: // For types that don't need cloning\nfunc processInPlace(ctx context.Context, data *LargeData) (*LargeData, error) {\n    // Modify the pointer directly\n    data.Processed = true\n    data.Timestamp = time.Now()\n    return data, nil\n}",{"id":1334,"title":1335,"titles":1336,"content":1337,"level":35},"/v1.0.7/guides/performance#pattern-stream-processing","Pattern: Stream Processing",[1274,1326],"Process data as it arrives: func streamProcessor[T any](\n    input \u003C-chan T,\n    pipeline pipz.Chainable[T],\n) \u003C-chan T {\n    output := make(chan T)\n    \n    go func() {\n        defer close(output)\n        ctx := context.Background()\n        \n        for item := range input {\n            result, err := pipeline.Process(ctx, item)\n            if err != nil {\n                log.Printf(\"Processing error: %v\", err)\n                continue\n            }\n            output \u003C- result\n        }\n    }()\n    \n    return output\n}",{"id":1339,"title":1340,"titles":1341,"content":1342,"level":35},"/v1.0.7/guides/performance#pattern-memory-pools","Pattern: Memory Pools",[1274,1326],"Reuse objects to reduce GC pressure: var orderPool = sync.Pool{\n    New: func() interface{} {\n        return &Order{\n            Items:    make([]Item, 0, 10),\n            Metadata: make(map[string]string),\n        }\n    },\n}\n\nfunc processWithPool(ctx context.Context, data OrderData) (*Order, error) {\n    order := orderPool.Get().(*Order)\n    defer func() {\n        // Reset order\n        order.Items = order.Items[:0]\n        order.Metadata = make(map[string]string)\n        orderPool.Put(order)\n    }()\n    \n    // Use pooled order\n    order.ID = data.ID\n    order.Customer = data.Customer\n    // ...\n    \n    return order, nil\n}",{"id":1344,"title":1345,"titles":1346,"content":1347,"level":19},"/v1.0.7/guides/performance#profiling-pipelines","Profiling Pipelines",[1274],"Use Go's built-in profiler: import _ \"net/http/pprof\"\n\nfunc main() {\n    go func() {\n        log.Println(http.ListenAndServe(\"localhost:6060\", nil))\n    }()\n    \n    // Run your pipeline\n    runPipeline()\n}\n\n// Profile CPU:\n// go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30\n\n// Profile Memory:\n// go tool pprof http://localhost:6060/debug/pprof/heap",{"id":1349,"title":1350,"titles":1351,"content":1352,"level":19},"/v1.0.7/guides/performance#performance-checklist","Performance Checklist",[1274],"Benchmark before optimizing Profile to find bottlenecks Minimize allocations Optimize Clone() methods Use batching where possible Limit concurrent operations Cache expensive computations Combine related processors Use memory pools for high-frequency objects Monitor GC pressure",{"id":1354,"title":1355,"titles":1356,"content":1357,"level":19},"/v1.0.7/guides/performance#common-bottlenecks","Common Bottlenecks",[1274],"Excessive Cloning: Optimize Clone() or avoid Concurrent when not neededSmall Batches: Increase batch sizes for better throughputUnbounded Concurrency: Limit parallel operationsMissing Caches: Cache expensive external callsInterface Overhead: Combine processors to reduce calls",{"id":1359,"title":1360,"titles":1361,"content":1362,"level":19},"/v1.0.7/guides/performance#benchmark-results","Benchmark Results",[1274],"Example from the payment processing pipeline: BenchmarkSimplePipeline-8        1000000      1053 ns/op       0 B/op       0 allocs/op\nBenchmarkWithCloning-8            300000      4127 ns/op     896 B/op      12 allocs/op\nBenchmarkConcurrent-8             100000     10382 ns/op    1792 B/op      24 allocs/op\nBenchmarkWithCaching-8           5000000       237 ns/op       0 B/op       0 allocs/op",{"id":1364,"title":140,"titles":1365,"content":1366,"level":19},"/v1.0.7/guides/performance#next-steps",[1274],"Testing Guide - Performance testing strategiesBest Practices - Production optimizationError Recovery - Performance impact of error handling html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}",{"id":1368,"title":1369,"titles":1370,"content":1371,"level":9},"/v1.0.7/guides/safety-reliability","Safety and Reliability Guide",[],"Built-in safety features including automatic panic recovery, security sanitization, and resilience patterns",{"id":1373,"title":1369,"titles":1374,"content":1375,"level":9},"/v1.0.7/guides/safety-reliability#safety-and-reliability-guide",[],"pipz is designed with safety and reliability as core principles. This guide covers the built-in protections that make your pipelines robust in production environments.",{"id":1377,"title":1378,"titles":1379,"content":1380,"level":19},"/v1.0.7/guides/safety-reliability#automatic-panic-recovery","Automatic Panic Recovery",[1369],"Every processor and connector in pipz includes comprehensive panic recovery. This is not optional - it's always enabled to ensure your applications never crash due to unexpected panics in pipeline processors.",{"id":1382,"title":1383,"titles":1384,"content":1385,"level":35},"/v1.0.7/guides/safety-reliability#what-gets-protected","What Gets Protected",[1369,1378],"Complete Coverage: All processors: Apply, Transform, Effect, Mutate, EnrichAll connectors: Sequence, Concurrent, WorkerPool, Scaffold, Switch, Fallback, Race, Contest, Retry, Backoff, Timeout, Handle, Filter, RateLimiter, CircuitBreakerAll user-defined functions passed to processorsAll condition functions, transformation functions, and side-effect functions",{"id":1387,"title":1388,"titles":1389,"content":1390,"level":35},"/v1.0.7/guides/safety-reliability#how-it-works","How It Works",[1369,1378],"// Define identity upfront\nvar RiskyID = pipz.NewIdentity(\"risky\", \"Processor that may panic\")\n\n// ANY panic in this function will be automatically recovered\nriskyProcessor := pipz.Apply(RiskyID, func(ctx context.Context, data string) (string, error) {\n    // All of these panics are automatically handled:\n    if data == \"bounds\" {\n        arr := []int{1, 2, 3}\n        return fmt.Sprintf(\"%d\", arr[10]), nil // Index out of bounds - RECOVERED\n    }\n    if data == \"nil\" {\n        var ptr *string\n        return *ptr, nil // Nil pointer dereference - RECOVERED\n    }\n    if data == \"assert\" {\n        var val interface{} = \"not a number\"\n        num := val.(int) // Type assertion failure - RECOVERED\n        return fmt.Sprintf(\"%d\", num), nil\n    }\n    if data == \"explicit\" {\n        panic(\"something went wrong!\") // Explicit panic - RECOVERED\n    }\n    return data, nil\n})\n\n// Use it safely - panics become regular errors\nresult, err := riskyProcessor.Process(ctx, \"nil\")\nif err != nil {\n    fmt.Printf(\"Caught panic as error: %v\\n\", err)\n    // Output: risky failed after 123μs: panic in processor \"risky\": panic occurred: runtime error: invalid memory address or nil pointer dereference\n}",{"id":1392,"title":1393,"titles":1394,"content":1395,"level":35},"/v1.0.7/guides/safety-reliability#security-sanitization","Security Sanitization",[1369,1378],"Panic messages often contain sensitive information that shouldn't be exposed in logs or error responses. pipz automatically sanitizes panic messages to prevent information leakage: Memory Address Sanitization: // Original panic: \"invalid memory access at 0x1234567890abcdef\"\n// Sanitized: \"panic occurred: invalid memory access at 0x***\" File Path Sanitization: // Original panic: \"error in /sensitive/internal/path/file.go:123\"\n// Sanitized: \"panic occurred (file path sanitized)\" Stack Trace Sanitization: // Original panic: \"error\\ngoroutine 1 [running]:\\nruntime.main()\\n...\"\n// Sanitized: \"panic occurred (stack trace sanitized)\" Length Limiting: // Very long panic messages (>200 chars) are truncated:\n// \"panic occurred (message truncated for security)\" Sanitization Patterns: The sanitization process applies these specific patterns: Memory addresses: Replaces hex sequences after 0x with 0x***File paths: Detects / or \\ characters and replaces entire messageStack traces: Detects goroutine or runtime. keywords and replaces entire messageLength limit: Messages longer than 200 characters are truncatedNil panics: Converts nil panic values to \"unknown panic (nil value)\"",{"id":1397,"title":1398,"titles":1399,"content":1400,"level":35},"/v1.0.7/guides/safety-reliability#performance-impact","Performance Impact",[1369,1378],"Panic recovery is implemented with minimal performance overhead: Cost: ~20 nanoseconds per operation (measured via benchmarks)Implementation: Uses Go's built-in defer and recover() mechanismsZero allocation in the normal (no-panic) pathAlways enabled: No build tags or configuration options to minimize complexity",{"id":1402,"title":1403,"titles":1404,"content":1405,"level":35},"/v1.0.7/guides/safety-reliability#real-world-example","Real-World Example",[1369,1378],"// Processing user data that might come from untrusted sources\nprocessUserInput := pipz.Apply(\"parse-input\", func(ctx context.Context, input string) (UserData, error) {\n    // Third-party JSON library might panic on malformed input\n    var userData UserData\n    if err := someJSONLibrary.Unmarshal([]byte(input), &userData); err != nil {\n        return userData, err\n    }\n    \n    // Array access that might panic if data is unexpected\n    if len(userData.Scores) > 0 {\n        userData.AverageScore = userData.Scores[0] // Could panic if Scores is nil\n    }\n    \n    return userData, nil\n})\n\n// Define identity upfront\nvar UserPipelineID = pipz.NewIdentity(\"user-pipeline\", \"User input processing pipeline\")\n\n// Even with malicious or malformed input, your application won't crash\npipeline := pipz.NewSequence(UserPipelineID,\n    processUserInput,\n    validateUserData,\n    enrichWithDefaults,\n)\n\n// Malicious input that would normally crash your application\nmaliciousInput := `{\"scores\": null, \"data\": \"` + strings.Repeat(\"x\", 100000) + `\"}`\nresult, err := pipeline.Process(ctx, maliciousInput)\nif err != nil {\n    // The application continues running, panic is converted to error\n    log.Printf(\"Input processing failed safely: %v\", err)\n    return handleBadInput(err)\n}",{"id":1407,"title":1408,"titles":1409,"content":1410,"level":19},"/v1.0.7/guides/safety-reliability#error-handling-integration","Error Handling Integration",[1369],"Panic recovery integrates seamlessly with pipz's error handling system:",{"id":1412,"title":1413,"titles":1414,"content":1415,"level":35},"/v1.0.7/guides/safety-reliability#error-path-and-context","Error Path and Context",[1369,1408],"result, err := panickyPipeline.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[DataType]\n    if errors.As(err, &pipeErr) {\n        fmt.Printf(\"Pipeline: %s\\n\", strings.Join(pipeErr.Path, \" -> \"))\n        fmt.Printf(\"Duration: %v\\n\", pipeErr.Duration)\n        fmt.Printf(\"Input data: %+v\\n\", pipeErr.InputData)\n        \n        // Check if the underlying error is a panic\n        if strings.Contains(pipeErr.Error(), \"panic in processor\") {\n            fmt.Println(\"This was a recovered panic\")\n        }\n    }\n}",{"id":1417,"title":1418,"titles":1419,"content":1420,"level":35},"/v1.0.7/guides/safety-reliability#error-recovery-patterns","Error Recovery Patterns",[1369,1408],"Use Handle to process panic errors specifically: // Define identities upfront\nvar (\n    ResilientID    = pipz.NewIdentity(\"resilient\", \"Resilient pipeline with panic handling\")\n    PanicHandlerID = pipz.NewIdentity(\"panic-handler\", \"Handles panics from risky processor\")\n    LogPanicID     = pipz.NewIdentity(\"log-panic\", \"Logs panic errors\")\n)\n\npipeline := pipz.NewSequence(ResilientID,\n    riskyProcessor,\n    pipz.NewHandle(PanicHandlerID, nextProcessor,\n        pipz.Effect(LogPanicID, func(ctx context.Context, err *pipz.Error[DataType]) error {\n            if strings.Contains(err.Error(), \"panic in processor\") {\n                log.Printf(\"ALERT: Panic recovered in %s: %v\",\n                    strings.Join(err.Path, \"->\"), err.Err)\n                // Send to monitoring system, etc.\n            }\n            return nil\n        }),\n    ),\n)",{"id":1422,"title":1423,"titles":1424,"content":1425,"level":19},"/v1.0.7/guides/safety-reliability#reliability-through-layered-protection","Reliability Through Layered Protection",[1369],"Combine panic recovery with other reliability patterns for maximum resilience: // Define identities upfront\nvar (\n    FortressID  = pipz.NewIdentity(\"fortress\", \"Multi-layered protection pipeline\")\n    ValidateID  = pipz.NewIdentity(\"validate\", \"Input validation\")\n    TimeoutID   = pipz.NewIdentity(\"timeout\", \"Timeout protection\")\n    RetryID     = pipz.NewIdentity(\"retry\", \"Retry on transient failures\")\n    CircuitID   = pipz.NewIdentity(\"circuit\", \"Circuit breaker for cascading failures\")\n    FallbackID  = pipz.NewIdentity(\"fallback\", \"Fallback for persistent failures\")\n)\n\n// Multi-layered protection\nresilientPipeline := pipz.NewSequence(FortressID,\n    pipz.Apply(ValidateID, validateInput), // Input validation\n    pipz.NewTimeout(TimeoutID, // Timeout protection\n        pipz.NewRetry(RetryID, // Retry on transient failures\n            pipz.NewCircuitBreaker(CircuitID, // Circuit breaker for cascading failures\n                pipz.NewFallback(FallbackID, // Fallback for persistent failures\n                    riskyMainProcessor,    // Main logic (protected by panic recovery)\n                    safeDefaultProcessor,  // Safe fallback (also protected by panic recovery)\n                ),\n            ),\n        ),\n        30*time.Second,\n    ),\n) In this architecture: Input validation prevents known bad dataTimeout prevents hanging operationsRetry handles transient failuresCircuit breaker prevents cascade failuresFallback provides alternate processing pathsPanic recovery (automatic) catches unexpected failuresRich error context helps with debugging and monitoring",{"id":1427,"title":1428,"titles":1429,"content":1430,"level":19},"/v1.0.7/guides/safety-reliability#monitoring-and-observability","Monitoring and Observability",[1369],"Track panic occurrences for operational insights: // Define identities upfront\nvar (\n    MonitoredID   = pipz.NewIdentity(\"monitored\", \"Pipeline with panic monitoring\")\n    MonitorID     = pipz.NewIdentity(\"monitor\", \"Monitor and track panics\")\n    TrackPanicsID = pipz.NewIdentity(\"track-panics\", \"Tracks panic occurrences\")\n)\n\nvar panicCounter int64\n\nmonitoredPipeline := pipz.NewSequence(MonitoredID,\n    riskyProcessor,\n    pipz.NewHandle(MonitorID, continueProcessor,\n        pipz.Effect(TrackPanicsID, func(ctx context.Context, err *pipz.Error[Data]) error {\n            if strings.Contains(err.Error(), \"panic in processor\") {\n                atomic.AddInt64(&panicCounter, 1)\n\n                // Extract processor name from panic error\n                parts := strings.Split(err.Error(), \"panic in processor \")\n                if len(parts) > 1 {\n                    processorName := strings.Split(parts[1], \":\")[0]\n                    metrics.IncrementCounter(\"pipz.panics\", map[string]string{\n                        \"processor\": processorName,\n                        \"pipeline\":  strings.Join(err.Path[:len(err.Path)-1], \"->\"),\n                    })\n                }\n            }\n            return nil\n        }),\n    ),\n)",{"id":1432,"title":1433,"titles":1434,"content":29,"level":19},"/v1.0.7/guides/safety-reliability#best-practices-for-safety","Best Practices for Safety",[1369],{"id":1436,"title":1437,"titles":1438,"content":1439,"level":35},"/v1.0.7/guides/safety-reliability#_1-trust-the-safety-net-but-dont-abuse-it","1. Trust the Safety Net, But Don't Abuse It",[1369,1433],"// Define identities upfront\nvar (\n    ParseID = pipz.NewIdentity(\"parse\", \"Parses input data\")\n    BadID   = pipz.NewIdentity(\"bad\", \"Bad processor using panic for flow control\")\n)\n\n// Good - Panic recovery handles unexpected failures\nprocessor := pipz.Apply(ParseID, func(ctx context.Context, data string) (Result, error) {\n    result, err := someLibrary.Parse(data)\n    return result, err // Library might panic, that's handled\n})\n\n// Bad - Don't use panic recovery as flow control\nbadProcessor := pipz.Apply(BadID, func(ctx context.Context, data string) (Result, error) {\n    if data == \"invalid\" {\n        panic(\"invalid data\") // Use proper error returns instead!\n    }\n    return Result{}, nil\n})",{"id":1441,"title":1442,"titles":1443,"content":1444,"level":35},"/v1.0.7/guides/safety-reliability#_2-combine-with-proper-error-handling","2. Combine with Proper Error Handling",[1369,1433],"// Define identity upfront\nvar GoodID = pipz.NewIdentity(\"good\", \"Good processor with proper error handling\")\n\n// Good - Handle expected errors properly, let panic recovery handle unexpected ones\ngoodProcessor := pipz.Apply(GoodID, func(ctx context.Context, data string) (Result, error) {\n    if data == \"\" {\n        return Result{}, errors.New(\"empty input\") // Expected error\n    }\n\n    // Unexpected panics from third-party code are automatically handled\n    return complexThirdPartyOperation(data), nil\n})",{"id":1446,"title":1447,"titles":1448,"content":1449,"level":35},"/v1.0.7/guides/safety-reliability#_3-log-and-monitor-panics","3. Log and Monitor Panics",[1369,1433],"// Define identity upfront\nvar PanicMonitorID = pipz.NewIdentity(\"panic-monitor\", \"Monitors and logs panics\")\n\n// Set up monitoring to track panic frequency\npanicMonitor := pipz.Effect(PanicMonitorID, func(ctx context.Context, err *pipz.Error[Data]) error {\n    if strings.Contains(err.Error(), \"panic in processor\") {\n        log.WithFields(log.Fields{\n            \"processor\": extractProcessorName(err),\n            \"path\":      strings.Join(err.Path, \"->\"),\n            \"duration\":  err.Duration,\n            \"input\":     fmt.Sprintf(\"%+v\", err.InputData),\n        }).Warn(\"Panic recovered in pipeline\")\n    }\n    return nil\n})",{"id":1451,"title":1452,"titles":1453,"content":1454,"level":35},"/v1.0.7/guides/safety-reliability#_4-test-panic-scenarios","4. Test Panic Scenarios",[1369,1433],"func TestPanicRecovery(t *testing.T) {\n    // Define identity upfront\n    var TestPanicID = pipz.NewIdentity(\"test-panic\", \"Test processor that panics\")\n\n    panicProcessor := pipz.Transform(TestPanicID, func(ctx context.Context, data string) string {\n        if data == \"panic\" {\n            panic(\"test panic\")\n        }\n        return data\n    })\n    \n    result, err := panicProcessor.Process(context.Background(), \"panic\")\n    \n    // Verify panic was recovered as error\n    assert.Error(t, err)\n    assert.Contains(t, err.Error(), \"panic in processor\")\n    assert.Equal(t, \"\", result) // Result should be zero value\n    \n    // Verify normal operation still works\n    result, err = panicProcessor.Process(context.Background(), \"normal\")\n    assert.NoError(t, err)\n    assert.Equal(t, \"normal\", result)\n}",{"id":1456,"title":1457,"titles":1458,"content":29,"level":19},"/v1.0.7/guides/safety-reliability#production-deployment-considerations","Production Deployment Considerations",[1369],{"id":1460,"title":1461,"titles":1462,"content":1463,"level":35},"/v1.0.7/guides/safety-reliability#security","Security",[1369,1457],"Information Leakage Prevention: Panic sanitization prevents accidental exposure of internal detailsSafe Defaults: Failed operations return zero values, preventing undefined behaviorAttack Surface Reduction: Malformed input cannot crash your application through panics",{"id":1465,"title":1466,"titles":1467,"content":1468,"level":35},"/v1.0.7/guides/safety-reliability#reliability","Reliability",[1369,1457],"Graceful Degradation: Panics become errors in the normal error handling flowService Continuity: One panicking operation doesn't crash the entire serviceError Context: Full path and input data for debugging (timing not tracked for panics)",{"id":1470,"title":1471,"titles":1472,"content":1473,"level":35},"/v1.0.7/guides/safety-reliability#performance","Performance",[1369,1457],"Minimal Overhead: ~20ns per operation is typically negligibleNo Allocations: Panic recovery path only allocates when panics actually occurPredictable Behavior: Same error handling patterns whether errors are panics or regular errors",{"id":1475,"title":1476,"titles":1477,"content":29,"level":19},"/v1.0.7/guides/safety-reliability#troubleshooting-panic-related-issues","Troubleshooting Panic-Related Issues",[1369],{"id":1479,"title":1480,"titles":1481,"content":1482,"level":35},"/v1.0.7/guides/safety-reliability#high-panic-frequency","High Panic Frequency",[1369,1476],"If you're seeing many panics in your logs: Identify the source: Use error path information to pinpoint problematic processorsReview input validation: Ensure data is properly validated before processingCheck third-party libraries: Some libraries may panic on edge casesConsider upstream changes: Has input data format changed recently?",{"id":1484,"title":1398,"titles":1485,"content":1486,"level":35},"/v1.0.7/guides/safety-reliability#performance-impact-1",[1369,1476],"If panic recovery is impacting performance: Benchmark without panics: ~20ns overhead should be negligible in most casesCheck panic frequency: High panic rates indicate underlying issuesProfile allocation: Ensure panics aren't causing excessive allocationsConsider alternatives: If panics are frequent, address root causes rather than relying on recovery",{"id":1488,"title":1489,"titles":1490,"content":1491,"level":35},"/v1.0.7/guides/safety-reliability#debugging-sanitized-messages","Debugging Sanitized Messages",[1369,1476],"If you need more details from sanitized panic messages for debugging: Use development logging: Log full errors in development environmentsAdd debug processors: Wrap risky operations with additional loggingStructured error handling: Return structured errors instead of relying on panicsUnit test edge cases: Identify and test specific panic scenarios The goal is to combine automatic safety with proper engineering practices for maximum reliability. html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":1493,"title":1494,"titles":1495,"content":1496,"level":9},"/v1.0.7/guides/troubleshooting","Troubleshooting Guide",[],"Common issues, gotchas, and solutions for diagnosing and resolving problems in pipz pipelines",{"id":1498,"title":1494,"titles":1499,"content":1500,"level":9},"/v1.0.7/guides/troubleshooting#troubleshooting-guide",[],"This guide helps diagnose and resolve common issues when working with pipz pipelines. Start with the Common Gotchas section for the most frequent mistakes.",{"id":1502,"title":1503,"titles":1504,"content":1505,"level":19},"/v1.0.7/guides/troubleshooting#common-gotchas-quick-reference","Common Gotchas (Quick Reference)",[1494],"These are the most common mistakes that cause bugs in pipz pipelines:",{"id":1507,"title":1508,"titles":1509,"content":1510,"level":35},"/v1.0.7/guides/troubleshooting#_1-creating-rate-limiters-or-circuit-breakers-per-request","1. Creating Rate Limiters or Circuit Breakers Per Request",[1494,1503],"❌ WRONG: func handleRequest(req Request) Response {\n    limiterID := pipz.NewIdentity(\"api\", \"API rate limiter\")\n    limiter := pipz.NewRateLimiter(limiterID, 100, 10) // New instance each call!\n    return limiter.Process(ctx, req) // Useless!\n} ✅ RIGHT: // Package-level Identity and limiter - shared across all requests\nvar APILimiterID = pipz.NewIdentity(\"api\", \"API rate limiter\")\nvar apiLimiter = pipz.NewRateLimiter(APILimiterID, 100, 10)\n\nfunc handleRequest(req Request) Response {\n    return apiLimiter.Process(ctx, req)\n}",{"id":1512,"title":1513,"titles":1514,"content":1515,"level":35},"/v1.0.7/guides/troubleshooting#_2-forgetting-to-implement-clone-properly","2. Forgetting to Implement Clone() Properly",[1494,1503],"❌ WRONG: func (d Data) Clone() Data {\n    return Data{Items: d.Items} // Shares slice memory!\n} ✅ RIGHT: func (d Data) Clone() Data {\n    items := make([]Item, len(d.Items))\n    copy(items, d.Items)\n    return Data{Items: items}\n}",{"id":1517,"title":1518,"titles":1519,"content":1520,"level":35},"/v1.0.7/guides/troubleshooting#_3-using-transform-for-operations-that-can-fail","3. Using Transform for Operations That Can Fail",[1494,1503],"❌ WRONG: var ParseID = pipz.NewIdentity(\"parse\", \"Parse JSON data\")\ntransform := pipz.Transform(ParseID, func(ctx context.Context, s string) Data {\n    data, _ := json.Unmarshal([]byte(s), &Data{}) // Error ignored!\n    return data\n}) ✅ RIGHT: var ParseID = pipz.NewIdentity(\"parse\", \"Parse JSON data\")\napply := pipz.Apply(ParseID, func(ctx context.Context, s string) (Data, error) {\n    var data Data\n    err := json.Unmarshal([]byte(s), &data)\n    return data, err\n})",{"id":1522,"title":1523,"titles":1524,"content":1525,"level":35},"/v1.0.7/guides/troubleshooting#_4-not-respecting-context-cancellation","4. Not Respecting Context Cancellation",[1494,1503],"❌ WRONG: var SlowID = pipz.NewIdentity(\"slow\", \"Slow processor\")\nprocessor := pipz.Apply(SlowID, func(ctx context.Context, data Data) (Data, error) {\n    time.Sleep(10 * time.Second) // Ignores context!\n    return data, nil\n}) ✅ RIGHT: var SlowID = pipz.NewIdentity(\"slow\", \"Slow processor\")\nprocessor := pipz.Apply(SlowID, func(ctx context.Context, data Data) (Data, error) {\n    select {\n    case \u003C-time.After(10 * time.Second):\n        return data, nil\n    case \u003C-ctx.Done():\n        return data, ctx.Err()\n    }\n})",{"id":1527,"title":1528,"titles":1529,"content":1530,"level":35},"/v1.0.7/guides/troubleshooting#_5-using-racecontest-for-operations-with-side-effects","5. Using Race/Contest for Operations with Side Effects",[1494,1503],"❌ WRONG: var (\n    PaymentsRaceID = pipz.NewIdentity(\"payments\", \"Race for payments (wrong!)\")\n    StripeID       = pipz.NewIdentity(\"stripe\", \"Charge via Stripe\")\n    PayPalID       = pipz.NewIdentity(\"paypal\", \"Charge via PayPal\")\n    SquareID       = pipz.NewIdentity(\"square\", \"Charge via Square\")\n)\n\nrace := pipz.NewRace(PaymentsRaceID,\n    pipz.Apply(StripeID, chargeStripe),     // Charges!\n    pipz.Apply(PayPalID, chargePayPal),     // Also charges!\n    pipz.Apply(SquareID, chargeSquare),     // Triple charge!\n) ✅ RIGHT: var (\n    FetchRaceID = pipz.NewIdentity(\"fetch\", \"Race to fetch data\")\n    CacheID     = pipz.NewIdentity(\"cache\", \"Fetch from cache\")\n    DBID        = pipz.NewIdentity(\"db\", \"Fetch from database\")\n    APIID       = pipz.NewIdentity(\"api\", \"Fetch from API\")\n)\n\nrace := pipz.NewRace(FetchRaceID,\n    pipz.Apply(CacheID, fetchFromCache),\n    pipz.Apply(DBID, fetchFromDB),\n    pipz.Apply(APIID, fetchFromAPI),\n)",{"id":1532,"title":1533,"titles":1534,"content":1535,"level":35},"/v1.0.7/guides/troubleshooting#_6-expecting-results-from-concurrent","6. Expecting Results from Concurrent",[1494,1503],"❌ WRONG: var (\n    ModifyID = pipz.NewIdentity(\"modify\", \"Modify values (wrong!)\")\n    DoubleID = pipz.NewIdentity(\"double\", \"Double the value\")\n)\n\nconcurrent := pipz.NewConcurrent(ModifyID, nil,\n    pipz.Transform(DoubleID, func(ctx context.Context, n int) int {\n        return n * 2 // Result is discarded without reducer!\n    }),\n)\nresult, _ := concurrent.Process(ctx, 5)\n// result is still 5, not 10! ✅ RIGHT: var (\n    EffectsID  = pipz.NewIdentity(\"effects\", \"Side effects\")\n    LogID      = pipz.NewIdentity(\"log\", \"Log data\")\n    MetricsID  = pipz.NewIdentity(\"metrics\", \"Update metrics\")\n)\n\nconcurrent := pipz.NewConcurrent(EffectsID, nil,\n    pipz.Effect(LogID, logData),\n    pipz.Effect(MetricsID, updateMetrics),\n)",{"id":1537,"title":1538,"titles":1539,"content":1540,"level":35},"/v1.0.7/guides/troubleshooting#_7-using-switch-without-default-for-unknown-cases","7. Using Switch Without Default for Unknown Cases",[1494,1503],"❌ WRONG: var RouterID = pipz.NewIdentity(\"router\", \"Routes by type\")\n\nrouter := pipz.NewSwitch(RouterID,\n    func(ctx context.Context, data Data) string {\n        return data.Type // Could be anything!\n    },\n).\nAddRoute(\"A\", processA).\nAddRoute(\"B\", processB)\n// Missing routes will cause runtime errors! ✅ RIGHT: var RouterID = pipz.NewIdentity(\"router\", \"Routes by type\")\n\nrouter := pipz.NewSwitch(RouterID,\n    func(ctx context.Context, data Data) string {\n        return data.Type\n    },\n).\nAddRoute(\"A\", processA).\nAddRoute(\"B\", processB).\nDefault(processUnknown) // Safety net",{"id":1542,"title":1543,"titles":1544,"content":1545,"level":35},"/v1.0.7/guides/troubleshooting#_8-ignoring-pipeline-errors","8. Ignoring Pipeline Errors",[1494,1503],"❌ WRONG: result, _ := pipeline.Process(ctx, data) // Error ignored!\nprocessResult(result) // May be zero value! ✅ RIGHT: result, err := pipeline.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[Data]\n    if errors.As(err, &pipeErr) {\n        // Path contains the chain of Identity values\n        if len(pipeErr.Path) > 0 {\n            log.Printf(\"Failed at %s: %v\", pipeErr.Path[len(pipeErr.Path)-1].Name(), pipeErr.Err)\n        }\n    }\n    return handleError(err)\n}\nprocessResult(result)",{"id":1547,"title":1548,"titles":1549,"content":1550,"level":35},"/v1.0.7/guides/troubleshooting#_9-nested-timeouts-with-wrong-duration","9. Nested Timeouts with Wrong Duration",[1494,1503],"❌ WRONG: var (\n    OuterTimeoutID = pipz.NewIdentity(\"outer\", \"Outer timeout\")\n    InnerTimeoutID = pipz.NewIdentity(\"inner\", \"Inner timeout\")\n)\n\ntimeout := pipz.NewTimeout(OuterTimeoutID,\n    pipz.NewTimeout(InnerTimeoutID, processor, 10*time.Second), // Longer!\n    5*time.Second, // Shorter - inner never gets full time\n) ✅ RIGHT: var (\n    OuterTimeoutID = pipz.NewIdentity(\"outer\", \"Outer timeout\")\n    StepsID        = pipz.NewIdentity(\"steps\", \"Sequential steps\")\n    Step1TimeoutID = pipz.NewIdentity(\"step1\", \"Step 1 timeout\")\n    Step2TimeoutID = pipz.NewIdentity(\"step2\", \"Step 2 timeout\")\n)\n\ntimeout := pipz.NewTimeout(OuterTimeoutID,\n    pipz.NewSequence(StepsID,\n        pipz.NewTimeout(Step1TimeoutID, step1, 5*time.Second),\n        pipz.NewTimeout(Step2TimeoutID, step2, 5*time.Second),\n    ),\n    12*time.Second, // Accommodates both with buffer\n)",{"id":1552,"title":1553,"titles":1554,"content":1555,"level":35},"/v1.0.7/guides/troubleshooting#_10-using-enrich-for-required-operations","10. Using Enrich for Required Operations",[1494,1503],"❌ WRONG: var ValidateID = pipz.NewIdentity(\"validate\", \"Validate user\")\n\nenrich := pipz.Enrich(ValidateID, func(ctx context.Context, user User) (User, error) {\n    if !isValid(user) {\n        return user, errors.New(\"invalid\") // Error is swallowed!\n    }\n    return user, nil\n}) ✅ RIGHT: var ValidateID = pipz.NewIdentity(\"validate\", \"Validate user\")\n\napply := pipz.Apply(ValidateID, func(ctx context.Context, user User) (User, error) {\n    if !isValid(user) {\n        return user, errors.New(\"invalid\") // Fails the pipeline\n    }\n    return user, nil\n})",{"id":1557,"title":1558,"titles":1559,"content":29,"level":19},"/v1.0.7/guides/troubleshooting#built-in-safety-features","Built-in Safety Features",[1494],{"id":1561,"title":1378,"titles":1562,"content":1563,"level":35},"/v1.0.7/guides/troubleshooting#automatic-panic-recovery",[1494,1558],"Every processor and connector in pipz includes built-in panic recovery. This means: No crashes: Panics are automatically caught and converted to errorsSecurity sanitization: Sensitive information (memory addresses, file paths, stack traces) is stripped from panic messagesRich error context: Panic errors include full pipeline path and input data (timing not tracked for panics)Always enabled: No configuration needed, minimal performance overhead (~20ns per operation)Complete coverage: All connectors (Sequence, Concurrent, WorkerPool, etc.) and all processors (Apply, Transform, Effect, etc.) are protected // This will NOT crash your application\nvar RiskyID = pipz.NewIdentity(\"risky\", \"Risky processor that may panic\")\npanickyProcessor := pipz.Transform(RiskyID, func(ctx context.Context, data string) string {\n    if data == \"boom\" {\n        panic(\"something went wrong!\") // Automatically recovered\n    }\n    return data\n})\n\n// The panic is converted to a proper error\nresult, err := panickyProcessor.Process(ctx, \"boom\")\nif err != nil {\n    // err will be a *pipz.Error with sanitized panic message\n    fmt.Printf(\"Caught panic: %v\\n\", err)\n    // Output: risky failed after 0s: panic in processor \"risky\": panic occurred: something went wrong!\n} Security Benefits: Prevents accidental leakage of internal memory addressesSanitizes file paths that might contain sensitive directory namesRemoves stack trace information that could expose internal structureTruncates excessively long panic messages When You'll See This: Third-party library panics in your processor functionsArray/slice bounds errorsNil pointer dereferencesType assertion failuresAny other unexpected panics in user code No Action Required: Panic recovery is automatic and always enabled. Your pipeline will continue running, treating panics as regular errors in the pipeline flow.",{"id":1565,"title":1566,"titles":1567,"content":29,"level":19},"/v1.0.7/guides/troubleshooting#common-issues-and-solutions","Common Issues and Solutions",[1494],{"id":1569,"title":1570,"titles":1571,"content":29,"level":35},"/v1.0.7/guides/troubleshooting#_1-pipeline-execution-issues","1. Pipeline Execution Issues",[1494,1566],{"id":1573,"title":1574,"titles":1575,"content":1576,"level":917},"/v1.0.7/guides/troubleshooting#pipeline-stops-unexpectedly","Pipeline Stops Unexpectedly",[1494,1566,1570],"Symptom: Pipeline execution halts without clear error message. Possible Causes: Context cancellation or timeoutPanic in a processor function (automatically recovered by pipz)Deadlock in concurrent operations Solutions: // Check for context issues\nctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\ndefer cancel()\n\nresult, err := pipeline.Process(ctx, data)\nif err != nil {\n    // Check if it's a context error\n    if errors.Is(err.Cause, context.DeadlineExceeded) {\n        log.Println(\"Pipeline timed out\")\n    } else if errors.Is(err.Cause, context.Canceled) {\n        log.Println(\"Pipeline was cancelled\")\n    }\n    \n    // Check if it's a panic that was automatically recovered\n    var panicErr *pipz.Error[T]\n    if errors.As(err, &panicErr) {\n        // Look for panic error in the chain\n        if strings.Contains(panicErr.Error(), \"panic in processor\") {\n            log.Printf(\"Processor panic was automatically recovered: %v\", panicErr.Err)\n        }\n    }\n}\n\n// NOTE: Manual panic recovery is unnecessary - pipz handles all panics automatically\n// All processors and connectors include built-in panic recovery with security sanitization",{"id":1578,"title":1579,"titles":1580,"content":1581,"level":917},"/v1.0.7/guides/troubleshooting#pipeline-returns-unexpected-results","Pipeline Returns Unexpected Results",[1494,1566,1570],"Symptom: Output doesn't match expected transformation. Possible Causes: Processors registered in wrong orderMutation of shared stateMissing Clone() implementation for concurrent processing Solutions: // Verify processor order\nvar PipelineID = pipz.NewIdentity(\"pipeline\", \"Data processing pipeline\")\nseq := pipz.NewSequence[Data](PipelineID)\nseq.Iterate(func(p pipz.Chainable[Data]) bool {\n    fmt.Printf(\"Processor: %s\\n\", p.Name())\n    return true // continue iteration\n})\n\n// Ensure proper cloning for concurrent operations\ntype Data struct {\n    Values []int\n}\n\nfunc (d Data) Clone() Data {\n    // Deep copy slice to prevent shared state\n    newValues := make([]int, len(d.Values))\n    copy(newValues, d.Values)\n    return Data{Values: newValues}\n}",{"id":1583,"title":1584,"titles":1585,"content":29,"level":35},"/v1.0.7/guides/troubleshooting#_2-memory-and-performance-issues","2. Memory and Performance Issues",[1494,1566],{"id":1587,"title":1588,"titles":1589,"content":1590,"level":917},"/v1.0.7/guides/troubleshooting#high-memory-usage","High Memory Usage",[1494,1566,1584],"Symptom: Memory consumption grows unexpectedly. Possible Causes: Large data structures not being garbage collectedGoroutine leaks in concurrent processorsBuffered channels not being drained Solutions: // Use streaming for large datasets\nfunc processLargeDataset(ctx context.Context, reader io.Reader) error {\n    scanner := bufio.NewScanner(reader)\n    pipeline := createPipeline()\n    \n    for scanner.Scan() {\n        data := parseData(scanner.Text())\n        result, err := pipeline.Process(ctx, data)\n        if err != nil {\n            return err\n        }\n        // Process result immediately, don't accumulate\n        handleResult(result)\n    }\n    return scanner.Err()\n}\n\n// Ensure goroutines are properly cleaned up\nvar ParallelID = pipz.NewIdentity(\"parallel\", \"Parallel processing\")\nconcurrent := pipz.NewConcurrent(ParallelID, nil, processors...)\n// Always use context with timeout/cancellation\nctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)\ndefer cancel()\nresult, err := concurrent.Process(ctx, data)",{"id":1592,"title":1593,"titles":1594,"content":1595,"level":917},"/v1.0.7/guides/troubleshooting#slow-pipeline-execution","Slow Pipeline Execution",[1494,1566,1584],"Symptom: Pipeline takes longer than expected to complete. Possible Causes: Sequential processing of independent operationsInefficient processor implementationsExcessive context switching in concurrent operations Solutions: // Profile to identify bottlenecks\nimport _ \"net/http/pprof\"\n\n// In your main:\ngo func() {\n    log.Println(http.ListenAndServe(\"localhost:6060\", nil))\n}()\n\n// Use concurrent processing for independent operations\n// Instead of:\nvar SlowSeqID = pipz.NewIdentity(\"slow\", \"Sequential processing\")\nsequential := pipz.NewSequence(SlowSeqID,\n    fetchUserData,    // 100ms\n    fetchOrderData,   // 150ms\n    fetchInventory,   // 200ms\n)\n\n// Use:\nvar FastConcID = pipz.NewIdentity(\"fast\", \"Concurrent processing\")\nconcurrent := pipz.NewConcurrent(FastConcID, nil,\n    fetchUserData,\n    fetchOrderData,\n    fetchInventory,\n)\n// Total time: ~200ms instead of 450ms",{"id":1597,"title":1598,"titles":1599,"content":29,"level":35},"/v1.0.7/guides/troubleshooting#_3-error-handling-issues","3. Error Handling Issues",[1494,1566],{"id":1601,"title":1602,"titles":1603,"content":1604,"level":917},"/v1.0.7/guides/troubleshooting#errors-not-being-caught","Errors Not Being Caught",[1494,1566,1598],"Symptom: Errors pass through without being handled. Possible Causes: Using Transform instead of Apply for fallible operationsNot checking error returnsEnrich processor swallowing errors Solutions: // Use Apply for operations that can fail\nvar ParseID = pipz.NewIdentity(\"parse\", \"Parse JSON data\")\n\n// Wrong:\nprocessor := pipz.Transform(ParseID, func(ctx context.Context, s string) Data {\n    var d Data\n    json.Unmarshal([]byte(s), &d) // Error ignored!\n    return d\n})\n\n// Correct:\nprocessor := pipz.Apply(ParseID, func(ctx context.Context, s string) (Data, error) {\n    var d Data\n    err := json.Unmarshal([]byte(s), &d)\n    return d, err\n})\n\n// Always check pipeline errors\nresult, err := pipeline.Process(ctx, input)\nif err != nil {\n    log.Printf(\"Pipeline failed at stage '%s': %v\", err.Stage, err.Cause)\n    // Access the data state at failure\n    log.Printf(\"Data state at failure: %+v\", err.State)\n}",{"id":1606,"title":1607,"titles":1608,"content":1609,"level":917},"/v1.0.7/guides/troubleshooting#error-recovery-not-working","Error Recovery Not Working",[1494,1566,1598],"Symptom: Fallback processors not executing on error. Possible Causes: Incorrect Fallback configurationError occurring after fallback pointContext already cancelled Solutions: // Ensure fallback is properly configured\nvar (\n    RecoverID = pipz.NewIdentity(\"recover\", \"Recovery fallback\")\n    DefaultID = pipz.NewIdentity(\"default\", \"Default value provider\")\n)\n\nfallback := pipz.NewFallback(RecoverID,\n    primaryPipeline,\n    pipz.Transform(DefaultID, func(ctx context.Context, data T) T {\n        return getDefaultValue()\n    }),\n)\n\n// Check that context is still valid\nif ctx.Err() != nil {\n    log.Printf(\"Context already cancelled: %v\", ctx.Err())\n}",{"id":1611,"title":1612,"titles":1613,"content":29,"level":35},"/v1.0.7/guides/troubleshooting#_4-type-safety-issues","4. Type Safety Issues",[1494,1566],{"id":1615,"title":1616,"titles":1617,"content":1618,"level":917},"/v1.0.7/guides/troubleshooting#compilation-errors-with-generics","Compilation Errors with Generics",[1494,1566,1612],"Symptom: \"cannot infer T\" or type mismatch errors. Possible Causes: Type inference limitationsIncompatible processor types in sequenceMissing type parameters Solutions: // Explicitly specify type parameters when needed\n// Instead of:\nvar PipelineID = pipz.NewIdentity(\"pipeline\", \"My pipeline\")\nseq := pipz.NewSequence(PipelineID) // Error: cannot infer T\n\n// Use:\nseq := pipz.NewSequence[MyDataType](PipelineID)\n\n// Ensure all processors handle the same type\ntype User struct { /* fields */ }\ntype Order struct { /* fields */ }\n\n// This won't compile:\nvar MixedID = pipz.NewIdentity(\"mixed\", \"Mixed type pipeline\")\npipeline := pipz.NewSequence(MixedID,\n    processUser,  // Chainable[User]\n    processOrder, // Chainable[Order] - Type mismatch!\n)\n\n// Use transformation to change types:\nvar (\n    CorrectID = pipz.NewIdentity(\"correct\", \"Correct type pipeline\")\n    ConvertID = pipz.NewIdentity(\"convert\", \"Convert User to Order\")\n)\npipeline := pipz.NewSequence(CorrectID,\n    processUser,\n    pipz.Apply(ConvertID, func(ctx context.Context, u User) (Order, error) {\n        return convertUserToOrder(u)\n    }),\n    processOrder,\n)",{"id":1620,"title":1621,"titles":1622,"content":29,"level":35},"/v1.0.7/guides/troubleshooting#_5-concurrency-issues","5. Concurrency Issues",[1494,1566],{"id":1624,"title":1625,"titles":1626,"content":1627,"level":917},"/v1.0.7/guides/troubleshooting#race-conditions","Race Conditions",[1494,1566,1621],"Symptom: Inconsistent results, data corruption, panics. Possible Causes: Shared mutable stateMissing Clone() implementationUnsafe concurrent access Solutions: // Implement proper cloning\ntype Data struct {\n    mu     sync.Mutex // Don't include mutex in clone!\n    values map[string]int\n}\n\nfunc (d Data) Clone() Data {\n    newValues := make(map[string]int, len(d.values))\n    for k, v := range d.values {\n        newValues[k] = v\n    }\n    return Data{values: newValues}\n}\n\n// Use synchronization for shared resources\nvar (\n    counter int64\n)\n\nvar CountID = pipz.NewIdentity(\"count\", \"Count requests\")\nprocessor := pipz.Effect(CountID, func(ctx context.Context, data T) error {\n    atomic.AddInt64(&counter, 1)\n    return nil\n})",{"id":1629,"title":1630,"titles":1631,"content":1632,"level":917},"/v1.0.7/guides/troubleshooting#deadlocks","Deadlocks",[1494,1566,1621],"Symptom: Pipeline hangs indefinitely. Possible Causes: Circular dependenciesChannel deadlocksMutex ordering issues Solutions: // Always use timeouts\nctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\ndefer cancel()\n\n// Detect deadlocks with runtime checks\nimport _ \"runtime/debug\"\n\n// In development:\nruntime.SetBlockProfileRate(1)\n\n// Avoid circular dependencies\n// Bad:\nproc1 := createProcessor(proc2) // proc1 depends on proc2\nproc2 := createProcessor(proc1) // proc2 depends on proc1\n\n// Good:\nproc1 := createProcessor(nil)\nproc2 := createProcessor(nil)\n// Configure dependencies after creation if needed",{"id":1634,"title":1635,"titles":1636,"content":1637,"level":35},"/v1.0.7/guides/troubleshooting#_6-fallback-chain-stack-overflow","6. Fallback Chain Stack Overflow",[1494,1566],"Symptom: Stack overflow errors during error handling, infinite recursion in fallback chains. Example Stack Trace Pattern: goroutine 1 [running]:\nruntime.main()\n    fallback.Process(ctx, data)\n    fallback.Process(ctx, data) // Same fallback called recursively\n    fallback.Process(ctx, data)\n    ...\nruntime: goroutine stack exceeds 1000000000-byte limit Possible Causes: Circular references in fallback chainsFallback A points to Fallback B which points back to Fallback AComplex multi-level circular dependencies Solutions: // Identify the circular dependency\n// Look for patterns like:\nvar (\n    F1ID = pipz.NewIdentity(\"f1\", \"Fallback 1\")\n    F2ID = pipz.NewIdentity(\"f2\", \"Fallback 2\")\n    F3ID = pipz.NewIdentity(\"f3\", \"Fallback 3\")\n)\nfallback1 := pipz.NewFallback(F1ID, proc1, fallback2)\nfallback2 := pipz.NewFallback(F2ID, proc2, fallback3)\nfallback3 := pipz.NewFallback(F3ID, proc3, fallback1) // ← Creates circle\n\n// Fix: Use linear hierarchy instead\nvar (\n    PrimaryID   = pipz.NewIdentity(\"primary\", \"Primary fallback\")\n    SecondaryID = pipz.NewIdentity(\"secondary\", \"Secondary fallback\")\n)\nfallback := pipz.NewFallback(PrimaryID,\n    proc1,\n    pipz.NewFallback(SecondaryID,\n        proc2,\n        proc3, // Terminal processor - no further fallbacks\n    ),\n) Prevention: Design fallback chains as hierarchical trees, not circular graphs.",{"id":1639,"title":1640,"titles":1641,"content":29,"level":35},"/v1.0.7/guides/troubleshooting#_7-circuit-breaker-issues","7. Circuit Breaker Issues",[1494,1566],{"id":1643,"title":1644,"titles":1645,"content":1646,"level":917},"/v1.0.7/guides/troubleshooting#circuit-breaker-not-opening","Circuit Breaker Not Opening",[1494,1566,1640],"Symptom: Failed requests continue despite threshold. Possible Causes: Threshold too highWindow size too largeErrors not properly propagated Solutions: var (\n    CBApiID = pipz.NewIdentity(\"api\", \"API circuit breaker\")\n    ApiID   = pipz.NewIdentity(\"api\", \"API call\")\n)\n\ncb := pipz.NewCircuitBreaker(CBApiID,\n    apiProcessor,\n    5,           // Open after 5 failures\n    time.Minute, // Cooldown period\n)\n\n// Ensure errors are returned, not swallowed\napiProcessor := pipz.Apply(ApiID, func(ctx context.Context, data T) (T, error) {\n    resp, err := callAPI(data)\n    if err != nil {\n        return data, err // Must return error for circuit breaker to count\n    }\n    if resp.StatusCode >= 500 {\n        return data, fmt.Errorf(\"server error: %d\", resp.StatusCode)\n    }\n    return processResponse(resp)\n})",{"id":1648,"title":1649,"titles":1650,"content":29,"level":35},"/v1.0.7/guides/troubleshooting#_7-rate-limiter-issues","7. Rate Limiter Issues",[1494,1566],{"id":1652,"title":1653,"titles":1654,"content":1655,"level":917},"/v1.0.7/guides/troubleshooting#rate-limiting-not-working","Rate Limiting Not Working",[1494,1566,1649],"Symptom: Requests exceed configured rate. Possible Causes: Incorrect rate configurationMultiple rate limiter instancesTime synchronization issues Solutions: // Use a single rate limiter instance with package-level Identity\nvar RateLimiterID = pipz.NewIdentity(\"api\", \"API rate limiter\")\nvar rateLimiter = pipz.NewRateLimiter(RateLimiterID, 100, 10)\n\n// Don't create new instances per request\n// Wrong:\nfunc handleRequest(data T) (T, error) {\n    rlID := pipz.NewIdentity(\"api\", \"API rate limiter\")\n    rl := pipz.NewRateLimiter(rlID, 100, 10) // New instance each time!\n    return rl.Process(ctx, data)\n}\n\n// Correct:\nfunc handleRequest(data T) (T, error) {\n    return rateLimiter.Process(ctx, data) // Reuse single instance\n}",{"id":1657,"title":1658,"titles":1659,"content":29,"level":19},"/v1.0.7/guides/troubleshooting#debugging-techniques","Debugging Techniques",[1494],{"id":1661,"title":1662,"titles":1663,"content":1664,"level":35},"/v1.0.7/guides/troubleshooting#_1-pipeline-inspection","1. Pipeline Inspection",[1494,1658],"// Inspect pipeline structure\nfunc inspectPipeline[T any](seq *pipz.Sequence[T]) {\n    fmt.Printf(\"Pipeline: %s\\n\", seq.Name())\n    seq.Iterate(func(p pipz.Chainable[T]) bool {\n        fmt.Printf(\"  - %s\\n\", p.Name())\n        return true\n    })\n}\n\n// Add debug logging\nvar (\n    DebugPipelineID = pipz.NewIdentity(\"debug\", \"Debug pipeline\")\n    LogInputID      = pipz.NewIdentity(\"log-input\", \"Log input data\")\n    LogOutputID     = pipz.NewIdentity(\"log-output\", \"Log output data\")\n)\n\ndebugPipeline := pipz.NewSequence(DebugPipelineID,\n    pipz.Effect(LogInputID, func(ctx context.Context, data T) error {\n        log.Printf(\"Input: %+v\", data)\n        return nil\n    }),\n    actualProcessor,\n    pipz.Effect(LogOutputID, func(ctx context.Context, data T) error {\n        log.Printf(\"Output: %+v\", data)\n        return nil\n    }),\n)",{"id":1666,"title":1667,"titles":1668,"content":1669,"level":35},"/v1.0.7/guides/troubleshooting#_2-error-analysis","2. Error Analysis",[1494,1658],"// Detailed error logging\nresult, err := pipeline.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[T]\n    if errors.As(err, &pipeErr) {\n        log.Printf(\"Pipeline failed:\")\n        // Path is []Identity - show the full path\n        var pathNames []string\n        for _, id := range pipeErr.Path {\n            pathNames = append(pathNames, id.Name())\n        }\n        log.Printf(\"  Path: %s\", strings.Join(pathNames, \" -> \"))\n        log.Printf(\"  Error: %v\", pipeErr.Err)\n        log.Printf(\"  Type: %T\", pipeErr.Err)\n        log.Printf(\"  InputData: %+v\", pipeErr.InputData)\n\n        // Check for specific error types\n        var validationErr *ValidationError\n        if errors.As(pipeErr.Err, &validationErr) {\n            log.Printf(\"  Validation failed: %s\", validationErr.Field)\n        }\n    }\n}",{"id":1671,"title":1672,"titles":1673,"content":1674,"level":35},"/v1.0.7/guides/troubleshooting#_3-performance-profiling","3. Performance Profiling",[1494,1658],"// Time individual processors\nfunc timeProcessor[T any](id pipz.Identity, p pipz.Chainable[T]) pipz.Chainable[T] {\n    return pipz.Apply(id, func(ctx context.Context, data T) (T, error) {\n        start := time.Now()\n        result, err := p.Process(ctx, data)\n        duration := time.Since(start)\n        log.Printf(\"Processor %s took %v\", p.Name(), duration)\n        return result, err\n    })\n}\n\n// Memory profiling\nimport (\n    \"runtime\"\n    \"runtime/pprof\"\n)\n\nfunc profileMemory() {\n    f, _ := os.Create(\"mem.prof\")\n    defer f.Close()\n    runtime.GC()\n    pprof.WriteHeapProfile(f)\n}",{"id":1676,"title":150,"titles":1677,"content":1678,"level":19},"/v1.0.7/guides/troubleshooting#getting-help",[1494],"If you're still experiencing issues: Read the Tests: Test files often demonstrate edge cases and proper usageEnable Debug Logging: Set up detailed logging to trace executionCreate a Minimal Reproduction: Isolate the issue in a small, reproducible exampleFile an Issue: Report bugs at https://github.com/zoobzio/pipz/issues with:\nGo versionpipz versionMinimal code to reproduceExpected vs actual behaviorAny error messages or stack traces",{"id":1680,"title":1681,"titles":1682,"content":29,"level":19},"/v1.0.7/guides/troubleshooting#common-patterns-for-resilience","Common Patterns for Resilience",[1494],{"id":1684,"title":1685,"titles":1686,"content":1687,"level":35},"/v1.0.7/guides/troubleshooting#defensive-pipeline-construction","Defensive Pipeline Construction",[1494,1681],"// Define identities upfront\nvar (\n    ResilientPipelineID = pipz.NewIdentity(\"resilient\", \"Resilient pipeline\")\n    ValidateInputID     = pipz.NewIdentity(\"validate\", \"Validate input\")\n    RateLimitID         = pipz.NewIdentity(\"rate\", \"Rate limiter\")\n    CircuitBreakerID    = pipz.NewIdentity(\"circuit\", \"Circuit breaker\")\n    TimeoutID           = pipz.NewIdentity(\"timeout\", \"Timeout protection\")\n    RetryID             = pipz.NewIdentity(\"retry\", \"Retry logic\")\n    RecoverFallbackID   = pipz.NewIdentity(\"recover\", \"Error recovery fallback\")\n)\n\n// Combine multiple resilience patterns\nresilientPipeline := pipz.NewSequence(ResilientPipelineID,\n    // Input validation\n    pipz.Apply(ValidateInputID, validateInput),\n\n    // Rate limiting\n    pipz.NewRateLimiter(RateLimitID, 100, 10,\n        // Circuit breaker\n        pipz.NewCircuitBreaker(CircuitBreakerID,\n            // Timeout protection\n            pipz.NewTimeout(TimeoutID,\n                // Retry logic\n                pipz.NewRetry(RetryID, actualProcessor, 3),\n                5*time.Second,\n            ),\n            5, time.Minute,\n        ),\n    ),\n\n    // Error recovery\n    pipz.NewFallback(RecoverFallbackID,\n        riskyProcessor,\n        safeDefault,\n    ),\n) This layered approach provides multiple levels of protection against various failure modes. html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":1689,"title":1690,"titles":1691,"content":1692,"level":9},"/v1.0.7/cookbook/building-pipelines","Recipe: Building Pipelines",[],"Complete user registration pipeline with validation, enrichment, resilience, and observability",{"id":1694,"title":1690,"titles":1695,"content":1696,"level":9},"/v1.0.7/cookbook/building-pipelines#recipe-building-pipelines",[],"A complete, production-ready pipeline example covering validation, enrichment, resilience patterns, and observability.",{"id":1698,"title":1699,"titles":1700,"content":1701,"level":19},"/v1.0.7/cookbook/building-pipelines#the-scenario","The Scenario",[1690],"We'll build a user registration pipeline that: Validates user inputChecks for existing accountsEnriches data with defaultsCreates the accountSends welcome emailsLogs for analytics",{"id":1703,"title":1704,"titles":1705,"content":1706,"level":19},"/v1.0.7/cookbook/building-pipelines#step-1-define-your-data-model","Step 1: Define Your Data Model",[1690],"package main\n\nimport (\n    \"context\"\n    \"errors\"\n    \"fmt\"\n    \"strings\"\n    \"time\"\n    \n    \"github.com/zoobzio/pipz\"\n)\n\ntype User struct {\n    ID           string\n    Email        string\n    Username     string\n    Password     string // hashed\n    FullName     string\n    Country      string\n    Verified     bool\n    CreatedAt    time.Time\n    Preferences  UserPreferences\n}\n\ntype UserPreferences struct {\n    Newsletter  bool\n    Language    string\n    Theme       string\n}",{"id":1708,"title":1709,"titles":1710,"content":1711,"level":19},"/v1.0.7/cookbook/building-pipelines#step-2-define-your-keys-constants","Step 2: Define Your Keys (Constants)",[1690],"Define all processor and connector names as constants - these are the \"keys\" to your system: // All names are constants - this is the key to the system\nconst (\n    // Validation processors\n    ProcessorValidate       = \"validate\"\n    ProcessorCheckDuplicate = \"check_duplicate\"\n    \n    // Transformation processors  \n    ProcessorEnrich = \"enrich\"\n    \n    // Persistence processors\n    ProcessorSave = \"save\"\n    \n    // Post-registration processors\n    ProcessorSendWelcome  = \"send_welcome\"\n    ProcessorLogAnalytics = \"log_analytics\"\n    \n    // Connector names\n    PipelineRegistration      = \"registration\"\n    ConnectorPostRegistration = \"post-registration\"\n    ConnectorEmailHandle      = \"email-with-error-handling\"\n    ConnectorSaveRetry        = \"save-with-retry\"\n)",{"id":1713,"title":1714,"titles":1715,"content":1716,"level":19},"/v1.0.7/cookbook/building-pipelines#step-3-define-your-business-logic","Step 3: Define Your Business Logic",[1690],"Write the core business logic as pure functions: // Validation logic\nfunc validateUser(ctx context.Context, user User) error {\n    if user.Email == \"\" {\n        return errors.New(\"email is required\")\n    }\n    if !strings.Contains(user.Email, \"@\") {\n        return errors.New(\"invalid email format\")\n    }\n    if len(user.Username) \u003C 3 {\n        return errors.New(\"username must be at least 3 characters\")\n    }\n    if len(user.Password) \u003C 8 {\n        return errors.New(\"password must be at least 8 characters\")\n    }\n    return nil\n}\n\n// Check for existing user\nfunc checkDuplicate(ctx context.Context, user User) error {\n    // Simulate database check\n    existingEmails := map[string]bool{\n        \"admin@example.com\": true,\n        \"test@example.com\":  true,\n    }\n    \n    if existingEmails[user.Email] {\n        return fmt.Errorf(\"email %s already registered\", user.Email)\n    }\n    return nil\n}\n\n// Normalize and enrich data\nfunc enrichUser(ctx context.Context, user User) User {\n    // Normalize email\n    user.Email = strings.ToLower(strings.TrimSpace(user.Email))\n    \n    // Set defaults\n    if user.Country == \"\" {\n        user.Country = detectCountry(ctx)\n    }\n    \n    if user.Preferences.Language == \"\" {\n        user.Preferences.Language = \"en\"\n    }\n    \n    if user.Preferences.Theme == \"\" {\n        user.Preferences.Theme = \"light\"\n    }\n    \n    // Set timestamps\n    user.CreatedAt = time.Now()\n    user.ID = generateID()\n    \n    return user\n}\n\n// Save to database\nfunc saveUser(ctx context.Context, user User) error {\n    // Simulate database save\n    fmt.Printf(\"Saving user: %s\\n\", user.Email)\n    \n    // In real app:\n    // return db.Save(ctx, user)\n    \n    return nil\n}\n\n// Send welcome email\nfunc sendWelcomeEmail(ctx context.Context, user User) error {\n    // Simulate email sending\n    fmt.Printf(\"Sending welcome email to: %s\\n\", user.Email)\n    \n    // In real app:\n    // return emailService.Send(ctx, WelcomeEmail{\n    //     To: user.Email,\n    //     Name: user.FullName,\n    // })\n    \n    return nil\n}\n\n// Log for analytics\nfunc logRegistration(ctx context.Context, user User) error {\n    fmt.Printf(\"[ANALYTICS] New user registered: %s from %s\\n\", \n        user.Username, user.Country)\n    return nil\n}\n\n// Error handlers\nfunc logEmailError(ctx context.Context, err *pipz.Error[User]) error {\n    fmt.Printf(\"Failed to send welcome email: %v\\n\", err.Err)\n    return nil\n}\n\n// Helper functions\nfunc detectCountry(ctx context.Context) string {\n    // In real app: use GeoIP or similar\n    return \"US\"\n}\n\nfunc generateID() string {\n    return fmt.Sprintf(\"user_%d\", time.Now().UnixNano())\n}",{"id":1718,"title":1719,"titles":1720,"content":1721,"level":19},"/v1.0.7/cookbook/building-pipelines#step-4-define-identities-and-processors","Step 4: Define Identities and Processors",[1690],"Define identities upfront, then wrap your business logic with pipz processors: // Define identities upfront\nvar (\n    // Processor identities\n    ValidateID        = pipz.NewIdentity(ProcessorValidate, \"Validates user input fields\")\n    CheckDuplicateID  = pipz.NewIdentity(ProcessorCheckDuplicate, \"Checks for duplicate user accounts\")\n    EnrichID          = pipz.NewIdentity(ProcessorEnrich, \"Enriches user data with defaults\")\n    SaveID            = pipz.NewIdentity(ProcessorSave, \"Saves user to database\")\n    SendWelcomeID     = pipz.NewIdentity(ProcessorSendWelcome, \"Sends welcome email to user\")\n    LogAnalyticsID    = pipz.NewIdentity(ProcessorLogAnalytics, \"Logs registration to analytics system\")\n    LogEmailErrorID   = pipz.NewIdentity(\"log_email_error\", \"Logs email sending errors\")\n\n    // Connector identities\n    RegistrationID        = pipz.NewIdentity(PipelineRegistration, \"Complete user registration pipeline\")\n    PostRegistrationID    = pipz.NewIdentity(ConnectorPostRegistration, \"Parallel post-registration tasks\")\n    EmailHandleID         = pipz.NewIdentity(ConnectorEmailHandle, \"Email sending with error recovery\")\n    SaveRetryID           = pipz.NewIdentity(ConnectorSaveRetry, \"Database save with exponential backoff\")\n    RobustRegistrationID  = pipz.NewIdentity(\"robust-registration\", \"Robust registration with retry and error handling\")\n)\n\n// Processors as reusable variables\nvar (\n    // Validation processors\n    ValidateUser    = pipz.Effect(ValidateID, validateUser)\n    CheckDuplicate  = pipz.Effect(CheckDuplicateID, checkDuplicate)\n\n    // Transformation processors\n    EnrichUser = pipz.Transform(EnrichID, enrichUser)\n\n    // Persistence processors\n    SaveUser = pipz.Effect(SaveID, saveUser)\n\n    // Post-registration processors\n    SendWelcome  = pipz.Effect(SendWelcomeID, sendWelcomeEmail)\n    LogAnalytics = pipz.Effect(LogAnalyticsID, logRegistration)\n\n    // Error handling\n    LogEmailError = pipz.Effect(LogEmailErrorID, logEmailError)\n)\n\n// Note: Since Concurrent requires Cloner, implement it:\nfunc (u User) Clone() User {\n    // User has no pointer fields, so simple copy works\n    return u\n}",{"id":1723,"title":1724,"titles":1725,"content":1726,"level":19},"/v1.0.7/cookbook/building-pipelines#step-5-define-your-connectors","Step 5: Define Your Connectors",[1690],"Define connector identities upfront, then compose processors into sequences and connectors: // Composed connectors\nvar (\n    // Basic registration pipeline\n    RegistrationPipeline = pipz.NewSequence[User](RegistrationID,\n        // Validation phase\n        ValidateUser,\n        CheckDuplicate,\n\n        // Transformation phase\n        EnrichUser,\n\n        // Persistence phase\n        SaveUser,\n\n        // Post-registration phase (parallel)\n        pipz.NewConcurrent(PostRegistrationID,\n            SendWelcome,\n            LogAnalytics,\n        ),\n    )\n\n    // Robust email sending with error handling\n    EmailWithErrorHandling = pipz.NewHandle(EmailHandleID,\n        SendWelcome,\n        LogEmailError,\n    )\n\n    // Save with retry logic\n    SaveWithRetry = pipz.NewBackoff(SaveRetryID,\n        SaveUser,\n        3,\n        100*time.Millisecond,\n    )\n\n    // Robust registration pipeline\n    RobustRegistrationPipeline = pipz.NewSequence[User](RobustRegistrationID,\n        // Validation with early exit\n        ValidateUser,\n        CheckDuplicate,\n\n        // Enrich data\n        EnrichUser,\n\n        // Save with retry\n        SaveWithRetry,\n\n        // Non-critical operations shouldn't fail registration\n        pipz.NewConcurrent(PostRegistrationID,\n            EmailWithErrorHandling,\n            LogAnalytics,\n        ),\n    )\n)",{"id":1728,"title":1729,"titles":1730,"content":1731,"level":19},"/v1.0.7/cookbook/building-pipelines#step-6-create-functions-to-execute-pipelines","Step 6: Create Functions to Execute Pipelines",[1690],"// Simple registration\nfunc RegisterUser(ctx context.Context, user User) (User, error) {\n    return RegistrationPipeline.Process(ctx, user)\n}\n\n// Robust registration with error handling\nfunc RegisterUserRobust(ctx context.Context, user User) (User, error) {\n    return RobustRegistrationPipeline.Process(ctx, user)\n}\n\nfunc main() {\n    // Test with valid user\n    newUser := User{\n        Email:    \"john.doe@example.com\",\n        Username: \"johndoe\",\n        Password: \"securepassword123\", // Would be hashed in real app\n        FullName: \"John Doe\",\n    }\n    \n    ctx := context.Background()\n    registered, err := RegisterUser(ctx, newUser)\n    if err != nil {\n        var pipeErr *pipz.Error[User]\n        if errors.As(err, &pipeErr) {\n            fmt.Printf(\"Registration failed at %v: %v\\n\", pipeErr.Path, pipeErr.Err)\n        } else {\n            fmt.Printf(\"Registration failed: %v\\n\", err)\n        }\n        return\n    }\n    \n    fmt.Printf(\"Successfully registered: %+v\\n\", registered)\n}",{"id":1733,"title":1734,"titles":1735,"content":1736,"level":19},"/v1.0.7/cookbook/building-pipelines#step-7-dynamic-pipeline-modification","Step 7: Dynamic Pipeline Modification",[1690],"Pipelines can be modified at runtime: // Define identities for dynamic processors\nvar (\n    FraudCheckID       = pipz.NewIdentity(\"fraud_check\", \"Checks user against fraud detection database\")\n    NewEmailProviderID = pipz.NewIdentity(ProcessorSendWelcome, \"Sends welcome email using new provider\")\n)\n\n// Define processors\nvar (\n    FraudCheck = pipz.Effect(FraudCheckID, func(ctx context.Context, user User) error {\n        // Check against fraud database\n        fmt.Printf(\"Checking user %s for fraud indicators\\n\", user.Email)\n        return nil\n    })\n\n    NewEmailSender = pipz.Effect(NewEmailProviderID, func(ctx context.Context, user User) error {\n        fmt.Printf(\"[NEW PROVIDER] Sending welcome email to: %s\\n\", user.Email)\n        return nil\n    })\n)\n\n// Add fraud detection for high-risk domains\nfunc AddFraudDetection() {\n    // Insert after validation but before enrichment\n    RegistrationPipeline.After(ProcessorCheckDuplicate, FraudCheck)\n}\n\n// Replace email sender for A/B testing\nfunc UseNewEmailProvider() {\n    RegistrationPipeline.Replace(ProcessorSendWelcome, NewEmailSender)\n}",{"id":1738,"title":1739,"titles":1740,"content":1741,"level":19},"/v1.0.7/cookbook/building-pipelines#step-8-add-conditional-logic","Step 8: Add Conditional Logic",[1690],"Add premium user handling with the Switch connector: // Additional constants\nconst (\n    ProcessorRegularOnboarding  = \"regular_onboarding\"\n    ProcessorPremiumOnboarding  = \"premium_onboarding\"\n    ProcessorAssignManager      = \"assign_account_manager\"\n    RouterUserType              = \"user-type-router\"\n    PipelinePremiumFlow         = \"premium-flow\"\n    PipelineConditionalReg      = \"conditional-registration\"\n)\n\n// Route keys\ntype UserType string\n\nconst (\n    TypeRegular UserType = \"regular\"\n    TypePremium UserType = \"premium\"\n)\n\n// Business logic\nfunc detectUserType(ctx context.Context, user User) UserType {\n    // Premium domains get premium accounts\n    premiumDomains := []string{\"company.com\", \"enterprise.org\"}\n    \n    emailDomain := strings.Split(user.Email, \"@\")[1]\n    for _, domain := range premiumDomains {\n        if emailDomain == domain {\n            return TypePremium\n        }\n    }\n    return TypeRegular\n}\n\nfunc regularOnboarding(ctx context.Context, u User) error {\n    fmt.Println(\"Starting regular onboarding flow\")\n    return nil\n}\n\nfunc premiumOnboarding(ctx context.Context, u User) error {\n    fmt.Println(\"Starting premium onboarding flow\")\n    return nil\n}\n\nfunc assignAccountManager(ctx context.Context, u User) error {\n    fmt.Println(\"Assigning dedicated account manager\")\n    return nil\n}\n\n// Define identities upfront\nvar (\n    // Processor identities\n    RegularOnboardingID  = pipz.NewIdentity(ProcessorRegularOnboarding, \"Executes regular user onboarding flow\")\n    PremiumOnboardingID  = pipz.NewIdentity(ProcessorPremiumOnboarding, \"Executes premium user onboarding flow\")\n    AssignManagerID      = pipz.NewIdentity(ProcessorAssignManager, \"Assigns dedicated account manager to user\")\n\n    // Connector identities\n    PremiumFlowID        = pipz.NewIdentity(PipelinePremiumFlow, \"Premium user onboarding workflow\")\n    UserTypeRouterID     = pipz.NewIdentity(RouterUserType, \"Routes users to appropriate onboarding flow\")\n    ConditionalRegID     = pipz.NewIdentity(PipelineConditionalReg, \"Registration with conditional user type routing\")\n)\n\n// Processors\nvar (\n    RegularOnboarding  = pipz.Effect(RegularOnboardingID, regularOnboarding)\n    PremiumOnboarding  = pipz.Effect(PremiumOnboardingID, premiumOnboarding)\n    AssignManager      = pipz.Effect(AssignManagerID, assignAccountManager)\n)\n\n// Connectors\nvar (\n    // Premium user flow\n    PremiumFlow = pipz.NewSequence[User](PremiumFlowID,\n        PremiumOnboarding,\n        AssignManager,\n    )\n\n    // Router for user types\n    UserTypeRouter = pipz.NewSwitch(UserTypeRouterID, detectUserType).\n        AddRoute(TypeRegular, RegularOnboarding).\n        AddRoute(TypePremium, PremiumFlow)\n\n    // Conditional registration pipeline\n    ConditionalRegistrationPipeline = pipz.NewSequence[User](ConditionalRegID,\n        // Common steps\n        ValidateUser,\n        CheckDuplicate,\n        EnrichUser,\n        SaveUser,\n\n        // Route based on user type\n        UserTypeRouter,\n    )\n)\n\n// Function to execute\nfunc RegisterUserConditional(ctx context.Context, user User) (User, error) {\n    return ConditionalRegistrationPipeline.Process(ctx, user)\n}",{"id":1743,"title":1744,"titles":1745,"content":1746,"level":19},"/v1.0.7/cookbook/building-pipelines#step-9-test-your-pipeline","Step 9: Test Your Pipeline",[1690],"func TestRegistrationPipeline(t *testing.T) {\n    pipeline := createRegistrationPipeline()\n    \n    tests := []struct {\n        name    string\n        user    User\n        wantErr bool\n    }{\n        {\n            name: \"valid user\",\n            user: User{\n                Email:    \"valid@example.com\",\n                Username: \"validuser\",\n                Password: \"password123\",\n            },\n            wantErr: false,\n        },\n        {\n            name: \"invalid email\",\n            user: User{\n                Email:    \"invalid\",\n                Username: \"validuser\",\n                Password: \"password123\",\n            },\n            wantErr: true,\n        },\n        {\n            name: \"duplicate email\",\n            user: User{\n                Email:    \"admin@example.com\",\n                Username: \"newadmin\",\n                Password: \"password123\",\n            },\n            wantErr: true,\n        },\n    }\n    \n    for _, tt := range tests {\n        t.Run(tt.name, func(t *testing.T) {\n            _, err := pipeline.Process(context.Background(), tt.user)\n            if (err != nil) != tt.wantErr {\n                t.Errorf(\"Process() error = %v, wantErr %v\", err, tt.wantErr)\n            }\n        })\n    }\n}",{"id":1748,"title":1749,"titles":1750,"content":1751,"level":19},"/v1.0.7/cookbook/building-pipelines#step-10-production-resilience","Step 10: Production Resilience",[1690],"For production deployments with external service calls, layer additional resilience patterns: // Additional constants for resilience\nconst (\n    ConnectorAPIBreaker   = \"api-circuit-breaker\"\n    ConnectorAPIRateLimit = \"api-rate-limit\"\n    ConnectorAPITimeout   = \"api-timeout\"\n    PipelineProduction    = \"production-registration\"\n)\n\n// Define resilience identities\nvar (\n    APIBreakerID      = pipz.NewIdentity(ConnectorAPIBreaker, \"Prevents cascading failures to external services\")\n    APIRateLimitID    = pipz.NewIdentity(ConnectorAPIRateLimit, \"Limits request rate to external APIs\")\n    APITimeoutID      = pipz.NewIdentity(ConnectorAPITimeout, \"Enforces timeout on external calls\")\n    ProductionRegID   = pipz.NewIdentity(PipelineProduction, \"Production registration with full resilience\")\n)\n\n// Wrap external service calls with resilience stack\nvar (\n    // Layer: Timeout → CircuitBreaker → Backoff → Effect\n    ResilientSave = pipz.NewTimeout(APITimeoutID,\n        pipz.NewCircuitBreaker(APIBreakerID,\n            pipz.NewBackoff(SaveRetryID,\n                SaveUser,\n                3,\n                100*time.Millisecond,\n            ),\n            5,               // Open after 5 failures\n            30*time.Second,  // Try recovery after 30s\n        ),\n        5*time.Second, // 5 second timeout\n    )\n\n    // Rate limit external email API\n    RateLimitedEmail = pipz.NewRateLimiter(APIRateLimitID,\n        10,  // 10 requests per second\n        5,   // Burst of 5\n        EmailWithErrorHandling,\n    )\n\n    // Production pipeline with full resilience\n    ProductionRegistrationPipeline = pipz.NewSequence[User](ProductionRegID,\n        ValidateUser,\n        CheckDuplicate,\n        EnrichUser,\n        ResilientSave,\n        pipz.NewConcurrent(PostRegistrationID,\n            RateLimitedEmail,\n            LogAnalytics,\n        ),\n    )\n)",{"id":1753,"title":1754,"titles":1755,"content":1756,"level":19},"/v1.0.7/cookbook/building-pipelines#step-11-observability-with-pipeline-wrapper","Step 11: Observability with Pipeline Wrapper",[1690],"Wrap your pipeline for distributed tracing and correlation: import \"github.com/zoobzio/capitan\"\n\n// Set up signal handlers before creating pipelines\nfunc init() {\n    // Monitor circuit breaker state\n    capitan.Hook(pipz.SignalCircuitBreakerOpened, func(ctx context.Context, e *capitan.Event) {\n        execID, _ := pipz.ExecutionIDFromContext(ctx)\n        name, _ := pipz.FieldName.From(e)\n        log.Printf(\"[ALERT] Circuit opened: %s (exec=%s)\", name, execID)\n    })\n\n    // Track retries\n    capitan.Hook(pipz.SignalRetryExhausted, func(ctx context.Context, e *capitan.Event) {\n        execID, _ := pipz.ExecutionIDFromContext(ctx)\n        name, _ := pipz.FieldName.From(e)\n        attempts, _ := pipz.FieldMaxAttempts.From(e)\n        log.Printf(\"[ERROR] Retry exhausted: %s after %d attempts (exec=%s)\", name, attempts, execID)\n    })\n\n    // Monitor rate limiting\n    capitan.Hook(pipz.SignalRateLimiterDropped, func(ctx context.Context, e *capitan.Event) {\n        name, _ := pipz.FieldName.From(e)\n        log.Printf(\"[WARN] Request dropped by rate limiter: %s\", name)\n    })\n}\n\n// Wrap with Pipeline for execution context\nvar ObservableRegistration = pipz.NewPipeline(ProductionRegID, ProductionRegistrationPipeline)\n\n// Each call gets unique execution ID for correlation\nfunc RegisterUserProduction(ctx context.Context, user User) (User, error) {\n    return ObservableRegistration.Process(ctx, user)\n}",{"id":1758,"title":1759,"titles":1760,"content":1761,"level":19},"/v1.0.7/cookbook/building-pipelines#key-takeaways","Key Takeaways",[1690],"Start Simple: Begin with basic processors and compose themAdd Robustness Gradually: Layer in retry, timeout, circuit breaker, and rate limitingUse the Right Connector: Sequence for steps, Concurrent for parallel workHandle Errors Appropriately: Critical vs non-critical operationsWrap for Observability: Use Pipeline wrapper for correlation IDsTest Each Component: Processors are independently testable",{"id":1763,"title":1764,"titles":1765,"content":1766,"level":19},"/v1.0.7/cookbook/building-pipelines#see-also","See Also",[1690],"Safety & Reliability - Error recovery patternsTesting Pipelines - Comprehensive testing strategiesPerformance - Optimization tipsLibrary Resilience - Expose resilience via With* APIExtensible Vocabulary - Create domain-specific APIs html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sfm-E, html code.shiki .sfm-E{--shiki-default:var(--shiki-variable)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}",{"id":1768,"title":1769,"titles":1770,"content":1771,"level":9},"/v1.0.7/cookbook/library-resilience","Recipe: Library Resilience",[],"Expose pipz resilience patterns to library consumers via functional options",{"id":1773,"title":1769,"titles":1774,"content":1775,"level":9},"/v1.0.7/cookbook/library-resilience#recipe-library-resilience",[],"When building a library where the domain type T is known, you can expose pipz resilience patterns to consumers via a clean With* functional options API.",{"id":1777,"title":920,"titles":1778,"content":1779,"level":19},"/v1.0.7/cookbook/library-resilience#the-pattern",[1769],"This recipe demonstrates how zyn (an LLM orchestration library) uses pipz to give consumers composable resilience without exposing pipz internals.",{"id":1781,"title":1782,"titles":1783,"content":1784,"level":19},"/v1.0.7/cookbook/library-resilience#core-concept","Core Concept",[1769],"// The Option type wraps a pipeline with additional behavior\ntype Option func(pipz.Chainable[*Request]) pipz.Chainable[*Request]\n\n// Each With* function returns an Option that wraps the pipeline\nfunc WithRetry(attempts int) Option {\n    return func(p pipz.Chainable[*Request]) pipz.Chainable[*Request] {\n        return pipz.NewRetry(retryID, p, attempts)\n    }\n} Consumers compose options at construction time: client, _ := mylib.NewClient(\n    mylib.WithRetry(3),\n    mylib.WithTimeout(10*time.Second),\n    mylib.WithCircuitBreaker(5, 30*time.Second),\n) The result is a pipeline: CircuitBreaker(Timeout(Retry(base)))",{"id":1786,"title":1787,"titles":1788,"content":29,"level":19},"/v1.0.7/cookbook/library-resilience#implementation","Implementation",[1769],{"id":1790,"title":1791,"titles":1792,"content":1793,"level":35},"/v1.0.7/cookbook/library-resilience#define-your-request-type","Define Your Request Type",[1769,1787],"// Your library's request type\ntype Request struct {\n    Input    interface{}\n    Output   interface{}\n    Metadata map[string]string\n}",{"id":1795,"title":1796,"titles":1797,"content":1798,"level":35},"/v1.0.7/cookbook/library-resilience#define-resilience-identities","Define Resilience Identities",[1769,1787],"// Package-level identities for each resilience pattern\nvar (\n    retryID          = pipz.NewIdentity(\"mylib:retry\", \"Retries failed requests\")\n    backoffID        = pipz.NewIdentity(\"mylib:backoff\", \"Retries with exponential backoff\")\n    timeoutID        = pipz.NewIdentity(\"mylib:timeout\", \"Enforces request timeout\")\n    circuitBreakerID = pipz.NewIdentity(\"mylib:circuit-breaker\", \"Prevents cascading failures\")\n    rateLimitID      = pipz.NewIdentity(\"mylib:rate-limit\", \"Limits request rate\")\n    fallbackID       = pipz.NewIdentity(\"mylib:fallback\", \"Falls back to alternative on failure\")\n    errorHandlerID   = pipz.NewIdentity(\"mylib:error-handler\", \"Handles request errors\")\n)",{"id":1800,"title":1801,"titles":1802,"content":1803,"level":35},"/v1.0.7/cookbook/library-resilience#define-the-option-type","Define the Option Type",[1769,1787],"// Option wraps a pipeline with additional behavior\ntype Option func(pipz.Chainable[*Request]) pipz.Chainable[*Request]",{"id":1805,"title":1806,"titles":1807,"content":1808,"level":35},"/v1.0.7/cookbook/library-resilience#implement-with-functions","Implement With* Functions",[1769,1787],"// WithRetry retries failed requests up to maxAttempts times\nfunc WithRetry(maxAttempts int) Option {\n    return func(p pipz.Chainable[*Request]) pipz.Chainable[*Request] {\n        return pipz.NewRetry(retryID, p, maxAttempts)\n    }\n}\n\n// WithBackoff retries with exponential backoff\nfunc WithBackoff(maxAttempts int, baseDelay time.Duration) Option {\n    return func(p pipz.Chainable[*Request]) pipz.Chainable[*Request] {\n        return pipz.NewBackoff(backoffID, p, maxAttempts, baseDelay)\n    }\n}\n\n// WithTimeout enforces a maximum request duration\nfunc WithTimeout(duration time.Duration) Option {\n    return func(p pipz.Chainable[*Request]) pipz.Chainable[*Request] {\n        return pipz.NewTimeout(timeoutID, p, duration)\n    }\n}\n\n// WithCircuitBreaker prevents cascading failures\nfunc WithCircuitBreaker(failures int, recovery time.Duration) Option {\n    return func(p pipz.Chainable[*Request]) pipz.Chainable[*Request] {\n        return pipz.NewCircuitBreaker(circuitBreakerID, p, failures, recovery)\n    }\n}\n\n// WithRateLimit limits request rate\nfunc WithRateLimit(rps float64, burst int) Option {\n    return func(p pipz.Chainable[*Request]) pipz.Chainable[*Request] {\n        return pipz.NewRateLimiter(rateLimitID, rps, burst, p)\n    }\n}\n\n// WithFallback uses an alternative client on failure\nfunc WithFallback(fallback Client) Option {\n    return func(p pipz.Chainable[*Request]) pipz.Chainable[*Request] {\n        return pipz.NewFallback(fallbackID, p, fallback.pipeline())\n    }\n}\n\n// WithErrorHandler provides custom error handling\nfunc WithErrorHandler(handler pipz.Chainable[*pipz.Error[*Request]]) Option {\n    return func(p pipz.Chainable[*Request]) pipz.Chainable[*Request] {\n        return pipz.NewHandle(errorHandlerID, p, handler)\n    }\n}",{"id":1810,"title":1811,"titles":1812,"content":1813,"level":35},"/v1.0.7/cookbook/library-resilience#apply-options-in-constructor","Apply Options in Constructor",[1769,1787],"// Client is your library's main type\ntype Client struct {\n    pipe pipz.Chainable[*Request]\n}\n\n// NewClient creates a client with optional resilience configuration\nfunc NewClient(backend Backend, opts ...Option) *Client {\n    // Create the base pipeline (your core logic)\n    base := newTerminal(backend)\n\n    // Apply each option, wrapping the pipeline\n    pipe := pipz.Chainable[*Request](base)\n    for _, opt := range opts {\n        pipe = opt(pipe)\n    }\n\n    return &Client{pipe: pipe}\n}\n\n// Execute processes a request through the configured pipeline\nfunc (c *Client) Execute(ctx context.Context, req *Request) (*Request, error) {\n    return c.pipe.Process(ctx, req)\n}\n\n// pipeline exposes the internal pipeline for composition (e.g., WithFallback)\nfunc (c *Client) pipeline() pipz.Chainable[*Request] {\n    return c.pipe\n}",{"id":1815,"title":1816,"titles":1817,"content":1818,"level":35},"/v1.0.7/cookbook/library-resilience#create-the-base-terminal","Create the Base Terminal",[1769,1787],"var terminalID = pipz.NewIdentity(\"mylib:terminal\", \"Executes request against backend\")\n\nfunc newTerminal(backend Backend) pipz.Chainable[*Request] {\n    return pipz.Apply(terminalID, func(ctx context.Context, req *Request) (*Request, error) {\n        result, err := backend.Call(ctx, req.Input)\n        if err != nil {\n            return req, err\n        }\n        req.Output = result\n        return req, nil\n    })\n}",{"id":1820,"title":559,"titles":1821,"content":29,"level":19},"/v1.0.7/cookbook/library-resilience#usage-examples",[1769],{"id":1823,"title":1824,"titles":1825,"content":1826,"level":35},"/v1.0.7/cookbook/library-resilience#basic-retry","Basic Retry",[1769,559],"client := mylib.NewClient(backend,\n    mylib.WithRetry(3),\n)",{"id":1828,"title":1829,"titles":1830,"content":1831,"level":35},"/v1.0.7/cookbook/library-resilience#production-configuration","Production Configuration",[1769,559],"client := mylib.NewClient(backend,\n    mylib.WithRetry(3),\n    mylib.WithTimeout(10*time.Second),\n    mylib.WithCircuitBreaker(5, 30*time.Second),\n    mylib.WithRateLimit(100, 10),\n) Options are applied in order, creating: RateLimit(CircuitBreaker(Timeout(Retry(terminal))))",{"id":1833,"title":1834,"titles":1835,"content":1836,"level":35},"/v1.0.7/cookbook/library-resilience#with-fallback","With Fallback",[1769,559],"primary := mylib.NewClient(primaryBackend,\n    mylib.WithTimeout(5*time.Second),\n)\n\nfallback := mylib.NewClient(fallbackBackend,\n    mylib.WithTimeout(10*time.Second),\n)\n\nresilient := mylib.NewClient(primaryBackend,\n    mylib.WithTimeout(5*time.Second),\n    mylib.WithFallback(fallback),\n)",{"id":1838,"title":1839,"titles":1840,"content":1841,"level":19},"/v1.0.7/cookbook/library-resilience#real-world-example-zyn","Real-World Example: zyn",[1769],"zyn implements this pattern for LLM operations: // From zyn - creating a classifier with resilience\nclassifier, _ := zyn.Classification(\n    \"What type of email is this?\",\n    []string{\"spam\", \"urgent\", \"newsletter\", \"personal\"},\n    provider,\n    zyn.WithRetry(3),\n    zyn.WithTimeout(10*time.Second),\n    zyn.WithCircuitBreaker(5, 30*time.Second),\n)\n\n// Execute with full resilience\ncategory, _ := classifier.Fire(ctx, session, \"URGENT: Your account suspended!\") zyn provides 8 synapse types (Binary, Classification, Extraction, Transform, etc.), each accepting the same ...Option parameter. The resilience layer is completely orthogonal to the LLM logic.",{"id":1843,"title":1844,"titles":1845,"content":1846,"level":19},"/v1.0.7/cookbook/library-resilience#benefits","Benefits",[1769],"Clean API — Consumers see WithRetry(3), not pipz.NewRetry(id, p, 3)Composable — Options combine naturally in any orderEncapsulated — pipz is an implementation detail, not a public dependencyType-Safe — The generic T is fixed to your domain typeTestable — Each option can be tested in isolation",{"id":1848,"title":1849,"titles":1850,"content":1851,"level":19},"/v1.0.7/cookbook/library-resilience#key-insight","Key Insight",[1769],"By fixing pipz.Chainable[T] to your domain type (*Request, *SynapseRequest, etc.), you create a resilience vocabulary specific to your library. Consumers get production-grade resilience without learning pipz.",{"id":1853,"title":1764,"titles":1854,"content":1855,"level":19},"/v1.0.7/cookbook/library-resilience#see-also",[1769],"zyn source — Complete implementation of this patternBuilding Pipelines — Application-level pipeline constructionExtensible Vocabulary — Creating domain-specific APIs html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":1857,"title":1858,"titles":1859,"content":1860,"level":9},"/v1.0.7/cookbook/extensible-vocabulary","Recipe: Extensible Application Vocabulary",[],"Create domain-specific APIs where library and user code compose seamlessly",{"id":1862,"title":1858,"titles":1863,"content":1864,"level":9},"/v1.0.7/cookbook/extensible-vocabulary#recipe-extensible-application-vocabulary",[],"When you fix pipz.Chainable[T] to a domain type, you create an extensible vocabulary — a set of composable primitives where library code and user code are indistinguishable.",{"id":1866,"title":920,"titles":1867,"content":1868,"level":19},"/v1.0.7/cookbook/extensible-vocabulary#the-pattern",[1858],"This recipe demonstrates how cogito (an LLM reasoning framework) creates a vocabulary of reasoning primitives that users can extend with their own.",{"id":1870,"title":1782,"titles":1871,"content":1872,"level":19},"/v1.0.7/cookbook/extensible-vocabulary#core-concept",[1858],"// Library fixes the generic to a domain type\ntype Thought struct { /* reasoning context */ }\n\n// Library provides primitives — all implement Chainable[*Thought]\nfunc NewDecide(key, question string) *Decide { ... }\nfunc NewAnalyze[T any](key, prompt string) *Analyze[T] { ... }\nfunc NewCategorize(key, question string, categories []string) *Categorize { ... }\n\n// Users implement custom primitives — same interface, first-class citizen\ntype MyCustomStep struct { ... }\nfunc (m *MyCustomStep) Process(ctx context.Context, t *Thought) (*Thought, error) { ... }\nfunc (m *MyCustomStep) Identity() pipz.Identity { ... }\nfunc (m *MyCustomStep) Schema() pipz.Node { ... }\nfunc (m *MyCustomStep) Close() error { ... }\n\n// Everything composes — library and user code, indistinguishable\npipeline := cogito.Sequence(\"my-flow\",\n    cogito.NewAnalyze[Data](\"parse\", \"extract fields\"),\n    &MyCustomStep{...},  // User's custom primitive\n    cogito.NewDecide(\"approve\", \"should we approve?\"),\n)",{"id":1874,"title":1787,"titles":1875,"content":29,"level":19},"/v1.0.7/cookbook/extensible-vocabulary#implementation",[1858],{"id":1877,"title":1878,"titles":1879,"content":1880,"level":35},"/v1.0.7/cookbook/extensible-vocabulary#define-your-domain-type","Define Your Domain Type",[1858,1787],"// Thought is the reasoning context passed through the pipeline\ntype Thought struct {\n    ID      string\n    Intent  string\n    notes   []Note  // Accumulated reasoning history\n    Session *Session // LLM conversation state\n}\n\n// Implement Cloner for parallel processing\nfunc (t *Thought) Clone() *Thought {\n    clone := &Thought{\n        ID:      uuid.New().String(),\n        Intent:  t.Intent,\n        notes:   make([]Note, len(t.notes)),\n        Session: t.Session.Clone(),\n    }\n    copy(clone.notes, t.notes)\n    return clone\n}",{"id":1882,"title":1883,"titles":1884,"content":1885,"level":35},"/v1.0.7/cookbook/extensible-vocabulary#create-library-primitives","Create Library Primitives",[1858,1787],"Each primitive implements pipz.Chainable[*Thought]: // Decide — binary yes/no decision\ntype Decide struct {\n    identity pipz.Identity\n    key      string\n    question string\n}\n\nfunc NewDecide(key, question string) *Decide {\n    return &Decide{\n        identity: pipz.NewIdentity(key, \"Binary decision: \"+question),\n        key:      key,\n        question: question,\n    }\n}\n\nfunc (d *Decide) Process(ctx context.Context, t *Thought) (*Thought, error) {\n    // Get context from thought's notes\n    context := t.RenderContext()\n\n    // Make LLM decision\n    result, err := d.synapse.Fire(ctx, t.Session, context)\n    if err != nil {\n        return t, err\n    }\n\n    // Record decision in thought\n    t.AddNote(d.key, result.Answer, \"decide\")\n    return t, nil\n}\n\nfunc (d *Decide) Identity() pipz.Identity { return d.identity }\nfunc (d *Decide) Schema() pipz.Node {\n    return pipz.Node{Identity: d.identity, Type: \"decide\"}\n}\nfunc (d *Decide) Close() error { return nil } // Analyze — extract structured data\ntype Analyze[T any] struct {\n    identity pipz.Identity\n    key      string\n    prompt   string\n}\n\nfunc NewAnalyze[T any](key, prompt string) *Analyze[T] {\n    return &Analyze[T]{\n        identity: pipz.NewIdentity(key, \"Extraction: \"+prompt),\n        key:      key,\n        prompt:   prompt,\n    }\n}\n\nfunc (a *Analyze[T]) Process(ctx context.Context, t *Thought) (*Thought, error) {\n    context := t.RenderContext()\n\n    var result T\n    if err := a.synapse.Extract(ctx, t.Session, context, &result); err != nil {\n        return t, err\n    }\n\n    t.AddNote(a.key, result, \"analyze\")\n    return t, nil\n}\n\nfunc (a *Analyze[T]) Identity() pipz.Identity { return a.identity }\nfunc (a *Analyze[T]) Schema() pipz.Node {\n    return pipz.Node{Identity: a.identity, Type: \"analyze\"}\n}\nfunc (a *Analyze[T]) Close() error { return nil }",{"id":1887,"title":1888,"titles":1889,"content":1890,"level":35},"/v1.0.7/cookbook/extensible-vocabulary#provide-composition-helpers","Provide Composition Helpers",[1858,1787],"Wrap pipz connectors with domain-specific names: // Sequence executes steps in order\nfunc Sequence(name string, steps ...pipz.Chainable[*Thought]) *pipz.Sequence[*Thought] {\n    id := pipz.NewIdentity(name, \"Sequential reasoning chain\")\n    return pipz.NewSequence(id, steps...)\n}\n\n// Converge runs steps in parallel and synthesizes results\nfunc Converge(name, synthesisPrompt string, steps ...pipz.Chainable[*Thought]) pipz.Chainable[*Thought] {\n    id := pipz.NewIdentity(name, \"Parallel reasoning with synthesis\")\n    return pipz.NewConcurrent(id, steps...).WithReducer(synthesize(synthesisPrompt))\n}\n\n// Filter conditionally executes a step\nfunc Filter(name string, predicate func(*Thought) bool, step pipz.Chainable[*Thought]) pipz.Chainable[*Thought] {\n    id := pipz.NewIdentity(name, \"Conditional reasoning step\")\n    return pipz.NewFilter(id, predicate, step)\n}\n\n// Resilience wrappers\nfunc Retry(name string, step pipz.Chainable[*Thought], attempts int) pipz.Chainable[*Thought] {\n    id := pipz.NewIdentity(name, \"Retry reasoning step\")\n    return pipz.NewRetry(id, step, attempts)\n}\n\nfunc Timeout(name string, step pipz.Chainable[*Thought], duration time.Duration) pipz.Chainable[*Thought] {\n    id := pipz.NewIdentity(name, \"Timeout reasoning step\")\n    return pipz.NewTimeout(id, step, duration)\n}",{"id":1892,"title":1893,"titles":1894,"content":1895,"level":35},"/v1.0.7/cookbook/extensible-vocabulary#users-extend-the-vocabulary","Users Extend the Vocabulary",[1858,1787],"Users create custom primitives that compose with library primitives: // User's domain-specific reasoning step\ntype RiskAssessment struct {\n    identity   pipz.Identity\n    key        string\n    riskEngine RiskEngine\n}\n\nfunc NewRiskAssessment(key string, engine RiskEngine) *RiskAssessment {\n    return &RiskAssessment{\n        identity:   pipz.NewIdentity(key, \"Assesses risk using custom engine\"),\n        key:        key,\n        riskEngine: engine,\n    }\n}\n\nfunc (r *RiskAssessment) Process(ctx context.Context, t *Thought) (*Thought, error) {\n    // Extract data from thought\n    data := t.GetNote(\"parsed-data\")\n\n    // Use custom risk engine\n    score, err := r.riskEngine.Evaluate(ctx, data)\n    if err != nil {\n        return t, err\n    }\n\n    // Record in thought like any library primitive\n    t.AddNote(r.key, fmt.Sprintf(\"Risk score: %.2f\", score), \"risk-assessment\")\n    return t, nil\n}\n\nfunc (r *RiskAssessment) Identity() pipz.Identity { return r.identity }\nfunc (r *RiskAssessment) Schema() pipz.Node {\n    return pipz.Node{Identity: r.identity, Type: \"risk-assessment\"}\n}\nfunc (r *RiskAssessment) Close() error { return nil }",{"id":1897,"title":1898,"titles":1899,"content":1900,"level":35},"/v1.0.7/cookbook/extensible-vocabulary#compose-library-and-user-code","Compose Library and User Code",[1858,1787],"// Library primitives and user primitives compose identically\npipeline := cogito.Sequence(\"loan-approval\",\n    // Library primitive: extract application data\n    cogito.NewAnalyze[LoanApplication](\"parse\", \"extract loan application fields\"),\n\n    // Library primitive: search for similar applications\n    cogito.NewSeek(\"history\", \"similar loan applications\").WithLimit(5),\n\n    // User primitive: custom risk assessment\n    NewRiskAssessment(\"risk\", myRiskEngine),\n\n    // Library primitive: make decision\n    cogito.NewDecide(\"approve\", \"should we approve this loan?\"),\n\n    // Conditional: only if approved\n    cogito.Filter(\"if-approved\",\n        func(t *cogito.Thought) bool {\n            return t.GetNote(\"approve\") == \"yes\"\n        },\n        cogito.Sequence(\"post-approval\",\n            cogito.NewAnalyze[Terms](\"terms\", \"generate loan terms\"),\n            &NotifyApplicant{...}, // Another user primitive\n        ),\n    ),\n)",{"id":1902,"title":1903,"titles":1904,"content":1905,"level":19},"/v1.0.7/cookbook/extensible-vocabulary#real-world-example-cogito","Real-World Example: cogito",[1858],"cogito provides 15+ reasoning primitives: CategoryPrimitivesDecisionDecide, Categorize, Assess, PrioritizeExtractionAnalyzeMemorySeek, Survey, Recall, ReflectControl FlowSift (LLM gate), Discern (LLM router)SynthesisConverge, AmplifySessionCompress, Truncate Users extend this vocabulary with domain-specific reasoning steps. A legal document analyzer might add: pipeline := cogito.Sequence(\"contract-review\",\n    cogito.NewAnalyze[Contract](\"parse\", \"extract contract clauses\"),\n    &ClauseRiskScorer{...},        // User: score each clause\n    &RegulatoryChecker{...},       // User: check compliance\n    cogito.NewCategorize(\"type\", \"contract type\", contractTypes),\n    cogito.NewDecide(\"flag\", \"does this need legal review?\"),\n)",{"id":1907,"title":1844,"titles":1908,"content":1909,"level":19},"/v1.0.7/cookbook/extensible-vocabulary#benefits",[1858],"No Privileged Code — Library primitives have no special status; user primitives are first-classUniform Interface — Everything is Chainable[*Thought]Full Composability — Sequence, parallel, conditional, resilience — all work with any primitiveObservable — All primitives emit signals through the same mechanismSchema Introspection — Pipeline structure is discoverable at runtime",{"id":1911,"title":1912,"titles":1913,"content":1914,"level":19},"/v1.0.7/cookbook/extensible-vocabulary#the-key-insight","The Key Insight",[1858],"When you fix Chainable[T] to your domain type, you're not just using pipz — you're creating a domain-specific language for composing operations on that type. The library provides vocabulary; users extend it. pipz.Chainable[T]  →  fix T to *Thought  →  Reasoning vocabulary\n                  →  fix T to *File     →  File processing vocabulary\n                  →  fix T to *Request  →  API handling vocabulary Each vocabulary inherits pipz's composition, resilience, and observability — but speaks in domain terms.",{"id":1916,"title":1764,"titles":1917,"content":1918,"level":19},"/v1.0.7/cookbook/extensible-vocabulary#see-also",[1858],"cogito source — Complete implementation of this patternBuilding Pipelines — Application-level pipeline constructionLibrary Resilience — Expose resilience via With* API html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}",{"id":1920,"title":1921,"titles":1922,"content":1923,"level":9},"/v1.0.7/reference/cheatsheet","pipz Quick Reference",[],"Fast-reference cheatsheet for processor selection, connector decision trees, and common pipeline patterns",{"id":1925,"title":1921,"titles":1926,"content":29,"level":9},"/v1.0.7/reference/cheatsheet#pipz-quick-reference",[],{"id":1928,"title":1929,"titles":1930,"content":1931,"level":19},"/v1.0.7/reference/cheatsheet#processor-selection-10-seconds","Processor Selection (10 seconds)",[1921],"If you need to...Use...Can fail?ExampleTransform data (pure)TransformNostrings.ToUpperTransform data (can fail)ApplyYesParse JSON, validateSide effects onlyEffectYesLogging, metricsConditional changesMutateNoFeature flagsOptional enhancementEnrichLogs errorsAdd metadataPass/block dataFilterNoAccess control",{"id":1933,"title":1934,"titles":1935,"content":1936,"level":19},"/v1.0.7/reference/cheatsheet#connector-decision-10-seconds","Connector Decision (10 seconds)",[1921],"Need parallel? ──No──→ Need conditions? ──No──→ Sequence\n      │                       │\n      │                      Yes → Switch\n      │\n     Yes → Bounded? ──Yes──→ WorkerPool\n            │\n           No → Need all results? ──No──→ Fastest? → Race\n                      │                        │\n                      │                       Best? → Contest\n                      │\n                     Yes → Fire & forget? ──Yes──→ Scaffold\n                                 │\n                                No → Concurrent",{"id":1938,"title":1939,"titles":1940,"content":29,"level":19},"/v1.0.7/reference/cheatsheet#common-patterns-copy-paste","Common Patterns (Copy & Paste)",[1921],{"id":1942,"title":1943,"titles":1944,"content":1945,"level":35},"/v1.0.7/reference/cheatsheet#basic-pipeline","Basic Pipeline",[1921,1939],"// Define identities\nvar (\n    PipelineID  = pipz.NewIdentity(\"name\", \"Pipeline description\")\n    ValidateID  = pipz.NewIdentity(\"validate\", \"Validate input data\")\n    NormalizeID = pipz.NewIdentity(\"normalize\", \"Normalize data format\")\n    LogID       = pipz.NewIdentity(\"log\", \"Log processed data\")\n)\n\npipeline := pipz.NewSequence[T](PipelineID,\n    pipz.Apply(ValidateID, validateFunc),\n    pipz.Transform(NormalizeID, normalizeFunc),\n    pipz.Effect(LogID, logFunc),\n)",{"id":1947,"title":1948,"titles":1949,"content":1950,"level":35},"/v1.0.7/reference/cheatsheet#retry-with-backoff","Retry with Backoff",[1921,1939],"var APIID = pipz.NewIdentity(\"api\", \"Retry API calls on failure\")\n\nreliable := pipz.NewRetry(APIID, apiCall, 3)",{"id":1952,"title":1953,"titles":1954,"content":1955,"level":35},"/v1.0.7/reference/cheatsheet#circuit-breaker","Circuit Breaker",[1921,1939],"var ServiceID = pipz.NewIdentity(\"service\", \"Protect downstream service\")\n\nprotected := pipz.NewCircuitBreaker(ServiceID, processor, 5, 30*time.Second).\n    SetSuccessThreshold(2) // Optional: require 2 successes to close from half-open",{"id":1957,"title":1958,"titles":1959,"content":1960,"level":35},"/v1.0.7/reference/cheatsheet#rate-limiting-singleton","Rate Limiting (Singleton!)",[1921,1939],"var (\n    RateLimitID = pipz.NewIdentity(\"api\", \"Limit API request rate\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"The rate-limited processor\")\n    limiter     = pipz.NewRateLimiter(RateLimitID, 100, 10, // 100/sec, burst 10\n                      pipz.Apply(ProcessorID, processFunc)).\n                      SetMode(\"wait\") // Or \"drop\"\n)",{"id":1962,"title":1963,"titles":1964,"content":1965,"level":35},"/v1.0.7/reference/cheatsheet#timeout-protection","Timeout Protection",[1921,1939],"var SlowID = pipz.NewIdentity(\"slow\", \"Enforce timeout on slow operations\")\n\nbounded := pipz.NewTimeout(SlowID, processor, 5*time.Second)",{"id":1967,"title":1968,"titles":1969,"content":1970,"level":35},"/v1.0.7/reference/cheatsheet#fallback-on-error","Fallback on Error",[1921,1939],"var SafeID = pipz.NewIdentity(\"safe\", \"Fallback to safe default\")\n\nsafe := pipz.NewFallback(SafeID, riskyOp, safeDefault)",{"id":1972,"title":271,"titles":1973,"content":1974,"level":35},"/v1.0.7/reference/cheatsheet#parallel-processing",[1921,1939],"var (\n    NotifyID  = pipz.NewIdentity(\"notify\", \"Send parallel notifications\")\n    LimitedID = pipz.NewIdentity(\"limited\", \"Process with worker pool\")\n)\n\n// Unbounded - Type must implement Cloner[T]\nparallel := pipz.NewConcurrent[T](NotifyID,\n    sendEmail, sendSMS, logEvent,\n)\n\n// Bounded - limit to N concurrent operations\npool := pipz.NewWorkerPool[T](LimitedID, 5,\n    apiCall1, apiCall2, apiCall3, // ... many more\n)",{"id":1976,"title":1977,"titles":1978,"content":1979,"level":35},"/v1.0.7/reference/cheatsheet#conditional-routing","Conditional Routing",[1921,1939],"var RouteID = pipz.NewIdentity(\"route\", \"Route by customer tier\")\n\nrouter := pipz.NewSwitch[T](RouteID, routeFunc).\n    AddRoute(\"premium\", premiumPipeline).\n    AddRoute(\"standard\", standardPipeline)",{"id":1981,"title":1982,"titles":1983,"content":1984,"level":35},"/v1.0.7/reference/cheatsheet#resilient-api-call","Resilient API Call",[1921,1939],"var (\n    RateID    = pipz.NewIdentity(\"rate\", \"Rate limit API calls\")\n    BreakerID = pipz.NewIdentity(\"breaker\", \"Protect API with circuit breaker\")\n    TimeoutID = pipz.NewIdentity(\"timeout\", \"Enforce API timeout\")\n    RetryID   = pipz.NewIdentity(\"retry\", \"Retry failed API calls\")\n)\n\napi := pipz.NewRateLimiter(RateID, 100, 10, // 100/sec, burst 10\n    pipz.NewCircuitBreaker(BreakerID,\n        pipz.NewTimeout(TimeoutID,\n            pipz.NewRetry(RetryID, apiCall, 3),\n            5*time.Second,\n        ),\n        5, 30*time.Second, // 5 failures, 30s reset\n    ),\n)",{"id":1986,"title":1987,"titles":1988,"content":1989,"level":35},"/v1.0.7/reference/cheatsheet#error-pipeline","Error Pipeline",[1921,1939],"var (\n    ErrorsID   = pipz.NewIdentity(\"errors\", \"Handle pipeline errors\")\n    LogErrorID = pipz.NewIdentity(\"log\", \"Log error details\")\n    ClassifyID = pipz.NewIdentity(\"classify\", \"Classify error type\")\n    RecoverID  = pipz.NewIdentity(\"recover\", \"Select recovery strategy\")\n)\n\nerrorHandler := pipz.NewSequence[*pipz.Error[T]](ErrorsID,\n    pipz.Effect(LogErrorID, logError),\n    pipz.Apply(ClassifyID, classifyError),\n    pipz.Switch(RecoverID, selectRecovery),\n)",{"id":1991,"title":1992,"titles":1993,"content":1994,"level":35},"/v1.0.7/reference/cheatsheet#pipeline-with-tracing","Pipeline with Tracing",[1921,1939],"var (\n    OrderPipelineID = pipz.NewIdentity(\"order-processing\", \"Main order flow\")\n    InternalSeqID   = pipz.NewIdentity(\"order-steps\", \"Processing sequence\")\n)\n\n// Wrap any chainable with execution context\npipeline := pipz.NewPipeline(OrderPipelineID,\n    pipz.NewSequence(InternalSeqID, validate, enrich, save),\n)\n\n// Extract IDs in processors or signal handlers\nfunc myProcessor(ctx context.Context, data T) (T, error) {\n    execID, _ := pipz.ExecutionIDFromContext(ctx)  // Unique per call\n    pipeID, _ := pipz.PipelineIDFromContext(ctx)   // Stable per pipeline\n    // ...\n}",{"id":1996,"title":1997,"titles":1998,"content":1999,"level":19},"/v1.0.7/reference/cheatsheet#clone-implementation","Clone Implementation",[1921],"type Data struct {\n    Items []Item\n    Meta  map[string]string\n}\n\nfunc (d Data) Clone() Data {\n    // Deep copy slices\n    items := make([]Item, len(d.Items))\n    copy(items, d.Items)\n\n    // Deep copy maps\n    meta := make(map[string]string, len(d.Meta))\n    for k, v := range d.Meta {\n        meta[k] = v\n    }\n\n    return Data{Items: items, Meta: meta}\n}",{"id":2001,"title":106,"titles":2002,"content":29,"level":19},"/v1.0.7/reference/cheatsheet#error-handling",[1921],{"id":2004,"title":2005,"titles":2006,"content":2007,"level":35},"/v1.0.7/reference/cheatsheet#access-error-details","Access Error Details",[1921,106],"result, err := pipeline.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[T]\n    if errors.As(err, &pipeErr) {\n        fmt.Printf(\"Failed at: %v\\n\", pipeErr.Path)\n        fmt.Printf(\"Cause: %v\\n\", pipeErr.Err)\n        fmt.Printf(\"Data state: %+v\\n\", pipeErr.InputData)\n        fmt.Printf(\"Duration: %v\\n\", pipeErr.Duration)\n        if pipeErr.Timeout {\n            fmt.Println(\"Operation timed out\")\n        }\n    }\n}",{"id":2009,"title":2010,"titles":2011,"content":2012,"level":35},"/v1.0.7/reference/cheatsheet#handle-specific-errors","Handle Specific Errors",[1921,106],"var (\n    RecoverErrorsID = pipz.NewIdentity(\"recover\", \"Recover from specific errors\")\n    HandleErrorID   = pipz.NewIdentity(\"handle\", \"Handle temporary errors\")\n)\n\npipz.NewHandle(RecoverErrorsID, pipeline,\n    pipz.Effect(HandleErrorID, func(ctx context.Context, pipeErr *pipz.Error[T]) error {\n        if errors.Is(pipeErr.Err, ErrTemporary) {\n            log.Printf(\"Temporary error at %v: %v\", pipeErr.Path, pipeErr.Err)\n            // Perform cleanup or notification\n        }\n        return nil // Handler errors don't affect flow\n    }),\n)",{"id":2014,"title":2015,"titles":2016,"content":29,"level":19},"/v1.0.7/reference/cheatsheet#testing-patterns","Testing Patterns",[1921],{"id":2018,"title":2019,"titles":2020,"content":2021,"level":35},"/v1.0.7/reference/cheatsheet#mock-processor","Mock Processor",[1921,2015],"type Mock[T any] struct {\n    identity pipz.Identity\n    Returns  T\n    Error    error\n}\n\nfunc NewMock[T any](id pipz.Identity, returns T, err error) *Mock[T] {\n    return &Mock[T]{identity: id, Returns: returns, Error: err}\n}\n\nfunc (m *Mock[T]) Process(ctx context.Context, data T) (T, error) {\n    return m.Returns, m.Error\n}\n\nfunc (m *Mock[T]) Identity() pipz.Identity { return m.identity }\nfunc (m *Mock[T]) Schema() pipz.Node       { return pipz.Node{Identity: m.identity, Type: \"mock\"} }\nfunc (m *Mock[T]) Close() error            { return nil }",{"id":2023,"title":2024,"titles":2025,"content":2026,"level":35},"/v1.0.7/reference/cheatsheet#test-error-location","Test Error Location",[1921,2015],"_, err := pipeline.Process(ctx, data)\nvar pipeErr *pipz.Error[T]\nif errors.As(err, &pipeErr) {\n    // Path contains the full chain of identities, last element is where failure occurred\n    lastID := pipeErr.Path[len(pipeErr.Path)-1]\n    assert.Equal(t, \"expected-stage\", lastID.Name)\n}",{"id":2028,"title":2029,"titles":2030,"content":29,"level":19},"/v1.0.7/reference/cheatsheet#gotchas-tips","Gotchas & Tips",[1921],{"id":2032,"title":2033,"titles":2034,"content":2035,"level":35},"/v1.0.7/reference/cheatsheet#common-mistakes","❌ Common Mistakes",[1921,2029],"Creating rate limiters per request // WRONG - New instance each time\nfunc handle(req Request) {\n    limiter := pipz.NewRateLimiter(...) // ❌\n} Shallow copying in Clone() // WRONG - Shares slice memory\nfunc (d Data) Clone() Data {\n    return Data{Items: d.Items} // ❌\n} Not checking context in long operations // WRONG - Ignores cancellation\nfunc process(ctx context.Context, data T) (T, error) {\n    time.Sleep(10 * time.Second) // ❌\n    return data, nil\n}",{"id":2037,"title":2038,"titles":2039,"content":2040,"level":35},"/v1.0.7/reference/cheatsheet#best-practices","✅ Best Practices",[1921,2029],"Singleton rate limiters // RIGHT - Shared instance\nvar limiter = pipz.NewRateLimiter(...) // ✅ Deep copy in Clone() // RIGHT - New memory\nfunc (d Data) Clone() Data {\n    items := make([]Item, len(d.Items))\n    copy(items, d.Items) // ✅\n    return Data{Items: items}\n} Respect context // RIGHT - Cancellable\nfunc process(ctx context.Context, data T) (T, error) {\n    select {\n    case \u003C-time.After(10 * time.Second):\n        return data, nil\n    case \u003C-ctx.Done():\n        return data, ctx.Err() // ✅\n    }\n}",{"id":2042,"title":2043,"titles":2044,"content":2045,"level":19},"/v1.0.7/reference/cheatsheet#type-constraints","Type Constraints",[1921],"ConnectorRequiresConcurrentT implements Cloner[T]RaceT implements Cloner[T]ContestT implements Cloner[T]All othersAny type T",{"id":2047,"title":2048,"titles":2049,"content":2050,"level":19},"/v1.0.7/reference/cheatsheet#performance-tips","Performance Tips",[1921],"Sequence: O(n) - minimize processor countConcurrent: Overhead from goroutines - use for expensive operationsTransform: Zero allocations - prefer over Apply when possibleEffect: Use for metrics/logging without data copySwitch: Single branch execution - efficient routing",{"id":2052,"title":2053,"titles":2054,"content":29,"level":19},"/v1.0.7/reference/cheatsheet#context-patterns","Context Patterns",[1921],{"id":2056,"title":2057,"titles":2058,"content":2059,"level":35},"/v1.0.7/reference/cheatsheet#add-request-id","Add request ID",[1921,2053],"ctx := context.WithValue(ctx, \"request-id\", uuid.New())",{"id":2061,"title":2062,"titles":2063,"content":2064,"level":35},"/v1.0.7/reference/cheatsheet#set-timeout","Set timeout",[1921,2053],"ctx, cancel := context.WithTimeout(ctx, 30*time.Second)\ndefer cancel()",{"id":2066,"title":2067,"titles":2068,"content":2069,"level":35},"/v1.0.7/reference/cheatsheet#check-cancellation","Check cancellation",[1921,2053],"select {\ncase \u003C-ctx.Done():\n    return ctx.Err()\ndefault:\n    // Continue processing\n} html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}",{"id":2071,"title":2072,"titles":2073,"content":2074,"level":9},"/v1.0.7/reference/types/error","Error[T]",[],"Rich error context type providing complete debugging information for pipeline failures",{"id":2076,"title":2077,"titles":2078,"content":2079,"level":9},"/v1.0.7/reference/types/error#errort","ErrorT",[],"Rich error context for pipeline failures with complete debugging information.",{"id":2081,"title":6,"titles":2082,"content":2083,"level":19},"/v1.0.7/reference/types/error#overview",[2077],"Error[T] provides comprehensive error information when pipeline processing fails. It captures not just what went wrong, but where, when, and with what data - giving you everything needed to debug production issues.",{"id":2085,"title":2086,"titles":2087,"content":2088,"level":19},"/v1.0.7/reference/types/error#type-definition","Type Definition",[2077],"type Error[T any] struct {\n    Timestamp time.Time    // When the error occurred\n    InputData T            // The data that caused the failure\n    Err       error        // The underlying error\n    Path      []Identity   // Complete processing path to failure point\n    Duration  time.Duration // How long before failure\n    Timeout   bool         // Whether it was a timeout\n    Canceled  bool         // Whether it was canceled\n}",{"id":2090,"title":2091,"titles":2092,"content":2093,"level":19},"/v1.0.7/reference/types/error#type-parameters","Type Parameters",[2077],"T - The type of data being processed when the error occurred",{"id":2095,"title":2096,"titles":2097,"content":29,"level":19},"/v1.0.7/reference/types/error#fields","Fields",[2077],{"id":2099,"title":2100,"titles":2101,"content":2102,"level":35},"/v1.0.7/reference/types/error#timestamp","Timestamp",[2077,2096],"Type: time.TimePurpose: Records the exact time when the error occurredUsage: Correlate with logs, metrics, and monitoring systems",{"id":2104,"title":2105,"titles":2106,"content":2107,"level":35},"/v1.0.7/reference/types/error#inputdata","InputData",[2077,2096],"Type: T (generic type parameter)Purpose: Preserves the exact input data that caused the failureUsage: Reproduce issues, debug data-specific problems, audit failures",{"id":2109,"title":2110,"titles":2111,"content":2112,"level":35},"/v1.0.7/reference/types/error#err","Err",[2077,2096],"Type: errorPurpose: The underlying error that caused the failureUsage: Access original error details, use with errors.Is and errors.AsPanic Recovery: When processors panic, this contains a panicError type with sanitized panic message and processor name for security",{"id":2114,"title":2115,"titles":2116,"content":2117,"level":35},"/v1.0.7/reference/types/error#path","Path",[2077,2096],"Type: []IdentityPurpose: Complete trace of processors/connectors leading to the failureUsage: Pinpoint exactly where in the pipeline the failure occurredNote: Each Identity contains both a name and description, providing rich context about each stage in the pipeline",{"id":2119,"title":2120,"titles":2121,"content":2122,"level":35},"/v1.0.7/reference/types/error#duration","Duration",[2077,2096],"Type: time.DurationPurpose: How long the operation ran before failingUsage: Identify performance issues, detect timeout patternsPanic Behavior: Always 0 for panic recovery - timing is not tracked when processors panic",{"id":2124,"title":544,"titles":2125,"content":2126,"level":35},"/v1.0.7/reference/types/error#timeout",[2077,2096],"Type: boolPurpose: Indicates if the error was caused by a timeoutUsage: Implement timeout-specific retry logic or alerting",{"id":2128,"title":2129,"titles":2130,"content":2131,"level":35},"/v1.0.7/reference/types/error#canceled","Canceled",[2077,2096],"Type: boolPurpose: Indicates if the error was caused by cancellationUsage: Distinguish intentional shutdowns from actual failures",{"id":2133,"title":2134,"titles":2135,"content":29,"level":19},"/v1.0.7/reference/types/error#methods","Methods",[2077],{"id":2137,"title":2138,"titles":2139,"content":2140,"level":35},"/v1.0.7/reference/types/error#error-string","Error() string",[2077,2134],"Returns a formatted error message with path and timing information. func (e *Error[T]) Error() string Format patterns: Timeout: \"path -> component timed out after 5s: context deadline exceeded\"Canceled: \"path -> component canceled after 2s: context canceled\"Normal: \"path -> component failed after 100ms: validation error\" Example: var (\n    OrderPipelineID  = pipz.NewIdentity(\"order-pipeline\", \"Process customer orders\")\n    ValidateID       = pipz.NewIdentity(\"validate\", \"Validate order fields\")\n    CheckInventoryID = pipz.NewIdentity(\"check-inventory\", \"Check product availability\")\n)\n\nerr := &Error[Order]{\n    Path: []Identity{\n        OrderPipelineID,\n        ValidateID,\n        CheckInventoryID,\n    },\n    Duration: 250 * time.Millisecond,\n    Err: errors.New(\"item out of stock\"),\n}\nfmt.Println(err.Error())\n// Output: \"order-pipeline -> validate -> check-inventory failed after 250ms: item out of stock\"",{"id":2142,"title":2143,"titles":2144,"content":2145,"level":35},"/v1.0.7/reference/types/error#unwrap-error","Unwrap() error",[2077,2134],"Returns the underlying error for compatibility with Go's error wrapping. func (e *Error[T]) Unwrap() error Usage: Enables errors.Is(err, targetErr) checkingEnables errors.As(err, &targetType) conversionMaintains compatibility with standard error handling Example: var pipeErr *pipz.Error[Order]\nif errors.As(err, &pipeErr) {\n    // Access Error[T] fields\n    fmt.Printf(\"Failed at: %v\\n\", pipeErr.Path)\n    fmt.Printf(\"Input data: %+v\\n\", pipeErr.InputData)\n\n    // Check underlying error\n    if errors.Is(pipeErr, sql.ErrNoRows) {\n        // Handle specific database error\n    }\n}",{"id":2147,"title":2148,"titles":2149,"content":2150,"level":35},"/v1.0.7/reference/types/error#istimeout-bool","IsTimeout() bool",[2077,2134],"Checks if the error was caused by a timeout. func (e *Error[T]) IsTimeout() bool Returns true when: The Timeout field is explicitly set to trueThe underlying error is context.DeadlineExceeded Example: result, err := pipeline.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[Data]\n    if errors.As(err, &pipeErr) && pipeErr.IsTimeout() {\n        // Implement timeout-specific handling\n        metrics.IncrementTimeouts()\n        return retryWithBackoff(data)\n    }\n}",{"id":2152,"title":2153,"titles":2154,"content":2155,"level":35},"/v1.0.7/reference/types/error#iscanceled-bool","IsCanceled() bool",[2077,2134],"Checks if the error was caused by cancellation. func (e *Error[T]) IsCanceled() bool Returns true when: The Canceled field is explicitly set to trueThe underlying error is context.Canceled Example: result, err := pipeline.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[Data]\n    if errors.As(err, &pipeErr) && pipeErr.IsCanceled() {\n        // Don't treat as failure - graceful shutdown\n        log.Info(\"Pipeline canceled during shutdown\")\n        return nil\n    }\n}",{"id":2157,"title":559,"titles":2158,"content":29,"level":19},"/v1.0.7/reference/types/error#usage-examples",[2077],{"id":2160,"title":2161,"titles":2162,"content":2163,"level":35},"/v1.0.7/reference/types/error#basic-error-handling","Basic Error Handling",[2077,559],"var OrderProcessingID = pipz.NewIdentity(\"order-processing\", \"Process customer orders\")\n\npipeline := pipz.NewSequence(OrderProcessingID,\n    validateOrder,\n    checkInventory,\n    processPayment,\n    shipOrder,\n)\n\nresult, err := pipeline.Process(ctx, order)\nif err != nil {\n    var pipeErr *pipz.Error[Order]\n    if errors.As(err, &pipeErr) {\n        log.WithFields(log.Fields{\n            \"path\":      strings.Join(pipeErr.Path, \" -> \"),\n            \"duration\":  pipeErr.Duration,\n            \"timestamp\": pipeErr.Timestamp,\n            \"order_id\":  pipeErr.InputData.ID,\n        }).Error(\"Order processing failed\", pipeErr.Err)\n    }\n}",{"id":2165,"title":2166,"titles":2167,"content":2168,"level":35},"/v1.0.7/reference/types/error#retry-logic-based-on-error-type","Retry Logic Based on Error Type",[2077,559],"func processWithRetry(ctx context.Context, pipeline Chainable[Data], data Data) (Data, error) {\n    for attempt := 0; attempt \u003C 3; attempt++ {\n        result, err := pipeline.Process(ctx, data)\n        if err == nil {\n            return result, nil\n        }\n\n        var pipeErr *pipz.Error[Data]\n        if !errors.As(err, &pipeErr) {\n            return data, err // Not a pipeline error\n        }\n\n        // Don't retry timeouts or cancellations\n        if pipeErr.IsTimeout() || pipeErr.IsCanceled() {\n            return data, err\n        }\n\n        // Retry with exponential backoff\n        time.Sleep(time.Duration(attempt+1) * time.Second)\n    }\n    return data, fmt.Errorf(\"max retries exceeded\")\n}",{"id":2170,"title":2171,"titles":2172,"content":2173,"level":35},"/v1.0.7/reference/types/error#error-monitoring-and-alerting","Error Monitoring and Alerting",[2077,559],"var MonitorID = pipz.NewIdentity(\"monitor\", \"Monitor pipeline errors\")\n\nfunc monitorPipeline(pipeline Chainable[Request]) Chainable[Request] {\n    return pipz.Handle(MonitorID,\n        pipeline,\n        func(ctx context.Context, req Request, err error) {\n            var pipeErr *pipz.Error[Request]\n            if !errors.As(err, &pipeErr) {\n                return // Not a pipeline error\n            }\n\n            // Send to monitoring system\n            metrics.RecordError(metrics.ErrorRecord{\n                Path:      pipeErr.Path,\n                Duration:  pipeErr.Duration,\n                Timeout:   pipeErr.IsTimeout(),\n                Canceled:  pipeErr.IsCanceled(),\n                RequestID: req.ID,\n            })\n\n            // Alert on critical paths\n            if containsPath(pipeErr.Path, \"payment\") && !pipeErr.IsCanceled() {\n                alerting.SendAlert(alerting.Critical,\n                    fmt.Sprintf(\"Payment processing failed: %v\", pipeErr))\n            }\n        },\n    )\n}",{"id":2175,"title":2176,"titles":2177,"content":2178,"level":35},"/v1.0.7/reference/types/error#debugging-with-full-context","Debugging with Full Context",[2077,559],"func debugFailure(err error) {\n    var pipeErr *pipz.Error[Order]\n    if !errors.As(err, &pipeErr) {\n        return\n    }\n\n    fmt.Println(\"=== Pipeline Failure Debug ===\")\n    fmt.Printf(\"Time: %v\\n\", pipeErr.Timestamp)\n    fmt.Printf(\"Duration: %v\\n\", pipeErr.Duration)\n    fmt.Printf(\"Path: %s\\n\", strings.Join(pipeErr.Path, \" -> \"))\n    fmt.Printf(\"Timeout: %v\\n\", pipeErr.IsTimeout())\n    fmt.Printf(\"Canceled: %v\\n\", pipeErr.IsCanceled())\n    fmt.Printf(\"Error: %v\\n\", pipeErr.Err)\n    fmt.Printf(\"Input Data:\\n%+v\\n\", pipeErr.InputData)\n\n    // Check for specific error types\n    var validationErr *ValidationError\n    if errors.As(pipeErr.Err, &validationErr) {\n        fmt.Printf(\"Validation failures: %v\\n\", validationErr.Fields)\n    }\n\n    // Check if it was a panic that was automatically recovered\n    if strings.Contains(pipeErr.Error(), \"panic in processor\") {\n        fmt.Println(\"This was a recovered panic (automatically handled by pipz)\")\n    }\n}",{"id":2180,"title":2181,"titles":2182,"content":29,"level":19},"/v1.0.7/reference/types/error#common-patterns","Common Patterns",[2077],{"id":2184,"title":2185,"titles":2186,"content":2187,"level":35},"/v1.0.7/reference/types/error#creating-custom-errors","Creating Custom Errors",[2077,2181],"var ExternalProcessorID = pipz.NewIdentity(\"external-processor\", \"Call external API\")\n\n// Wrap external errors with context\nfunc processExternal(ctx context.Context, data Data) (Data, error) {\n    result, err := externalAPI.Call(data)\n    if err != nil {\n        return data, &pipz.Error[Data]{\n            Timestamp: time.Now(),\n            InputData: data,\n            Err:       fmt.Errorf(\"external API failed: %w\", err),\n            Path:      []Identity{ExternalProcessorID},\n            Duration:  time.Since(start),\n            Timeout:   errors.Is(err, context.DeadlineExceeded),\n        }\n    }\n    return result, nil\n}",{"id":2189,"title":2190,"titles":2191,"content":2192,"level":35},"/v1.0.7/reference/types/error#error-recovery","Error Recovery",[2077,2181],"var (\n    WithRecoveryID = pipz.NewIdentity(\"with-recovery\", \"Recover from failures\")\n    RecoverDataID  = pipz.NewIdentity(\"recover\", \"Extract partial data\")\n)\n\n// Recover and continue with partial data\npipeline := pipz.NewFallback(WithRecoveryID,\n    primaryPipeline,\n    pipz.Apply(RecoverDataID, func(ctx context.Context, data Data) (Data, error) {\n        // Access the error that caused the fallback\n        if err := ctx.Value(\"error\"); err != nil {\n            var pipeErr *pipz.Error[Data]\n            if errors.As(err.(error), &pipeErr) {\n                // Log the failure path\n                log.Warnf(\"Primary failed at %v, using fallback\", pipeErr.Path)\n\n                // Return partial data from the error\n                return pipeErr.InputData, nil\n            }\n        }\n        return data, nil\n    }),\n)",{"id":2194,"title":135,"titles":2195,"content":2196,"level":19},"/v1.0.7/reference/types/error#best-practices",[2077],"Always check error type - Use errors.As to safely access ErrorT fieldsPreserve error chains - Use fmt.Errorf with %w verb when wrappingLog complete context - Include Path, Duration, and InputData in logsHandle timeouts differently - Don't retry timeouts with same timeout durationDistinguish cancellations - Treat as graceful shutdown, not failureUse Path for debugging - Shows exact failure point in complex pipelinesMonitor Duration - Detect performance degradation before timeouts",{"id":2198,"title":2199,"titles":2200,"content":29,"level":19},"/v1.0.7/reference/types/error#gotchas","Gotchas",[2077],{"id":2202,"title":2203,"titles":2204,"content":2205,"level":35},"/v1.0.7/reference/types/error#dont-ignore-the-inputdata-field","❌ Don't ignore the InputData field",[2077,2199],"// WRONG - Losing valuable debug information\nvar pipeErr *pipz.Error[Order]\nif errors.As(err, &pipeErr) {\n    log.Error(pipeErr.Err) // Only logging the error message\n}",{"id":2207,"title":2208,"titles":2209,"content":2210,"level":35},"/v1.0.7/reference/types/error#use-all-available-context","✅ Use all available context",[2077,2199],"// RIGHT - Complete error context\nvar pipeErr *pipz.Error[Order]\nif errors.As(err, &pipeErr) {\n    log.WithFields(log.Fields{\n        \"order_id\": pipeErr.InputData.ID,\n        \"customer\": pipeErr.InputData.CustomerID,\n        \"amount\":   pipeErr.InputData.Total,\n        \"path\":     pipeErr.Path,\n        \"duration\": pipeErr.Duration,\n    }).Error(\"Order failed\", pipeErr.Err)\n}",{"id":2212,"title":2213,"titles":2214,"content":2215,"level":35},"/v1.0.7/reference/types/error#dont-retry-canceled-operations","❌ Don't retry canceled operations",[2077,2199],"// WRONG - Retrying during shutdown\nif err != nil {\n    return retryOperation(ctx, data)\n}",{"id":2217,"title":2218,"titles":2219,"content":2220,"level":35},"/v1.0.7/reference/types/error#check-cancellation-first","✅ Check cancellation first",[2077,2199],"// RIGHT - Respect cancellation\nif err != nil {\n    var pipeErr *pipz.Error[Data]\n    if errors.As(err, &pipeErr) && pipeErr.IsCanceled() {\n        return data, err // Don't retry - system is shutting down\n    }\n    return retryOperation(ctx, data)\n}",{"id":2222,"title":2223,"titles":2224,"content":2225,"level":19},"/v1.0.7/reference/types/error#panic-recovery-errors","Panic Recovery Errors",[2077],"pipz automatically recovers from all panics in processor and connector functions, converting them to Error[T] instances. When you see errors containing \"panic in processor\", these represent panics that were automatically caught and sanitized.",{"id":2227,"title":2228,"titles":2229,"content":2230,"level":35},"/v1.0.7/reference/types/error#identifying-panic-errors","Identifying Panic Errors",[2077,2223],"result, err := processor.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[Data]\n    if errors.As(err, &pipeErr) {\n        // Check if this was a recovered panic\n        if strings.Contains(pipeErr.Error(), \"panic in processor\") {\n            log.Warn(\"Processor panicked but was safely recovered\")\n\n            // The panic message is sanitized for security:\n            // - Memory addresses redacted (0x*** instead of 0x1234...)\n            // - File paths removed to prevent info leakage\n            // - Stack traces stripped\n            // - Long messages truncated\n        }\n    }\n}",{"id":2232,"title":1393,"titles":2233,"content":2234,"level":35},"/v1.0.7/reference/types/error#security-sanitization",[2077,2223],"Panic messages undergo security sanitization to prevent information leakage: Memory addresses: 0x1234567890abcdef → 0x***File paths: panic in /sensitive/path/file.go:123 → \"panic occurred (file path sanitized)\"Stack traces: goroutine 1 [running]:... → \"panic occurred (stack trace sanitized)\"Long messages: Truncated to prevent log spam",{"id":2236,"title":2237,"titles":2238,"content":2239,"level":35},"/v1.0.7/reference/types/error#what-this-means-for-you","What This Means for You",[2077,2223],"No crashes: Your application will never crash due to panics in pipelinesError handling: Panics become regular errors in the error handling flowSecurity: Sensitive information is automatically stripped from panic messagesMonitoring: You can detect and alert on panic occurrences in productionDebugging: Use development environments to get more detailed panic information",{"id":2241,"title":1764,"titles":2242,"content":2243,"level":19},"/v1.0.7/reference/types/error#see-also",[2077],"Handle - Observe and react to errorsFallback - Automatic error recoveryRetry - Retry failed operationsCircuitBreaker - Prevent cascading failuresSafety and Reliability - Complete panic recovery documentation html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}",{"id":2245,"title":2246,"titles":2247,"content":2248,"level":9},"/v1.0.7/reference/types","Types",[],"Core type definitions for pipz pipelines",{"id":2250,"title":2246,"titles":2251,"content":29,"level":9},"/v1.0.7/reference/types#types",[],{"id":2253,"title":2254,"titles":2255,"content":2256,"level":9},"/v1.0.7/reference/processors/apply","Apply",[],"Creates a processor from a function that can return an error for fallible operations",{"id":2258,"title":2254,"titles":2259,"content":2260,"level":9},"/v1.0.7/reference/processors/apply#apply",[],"Creates a processor from a function that can return an error. Note: Apply is a convenience wrapper. You can always implement Chainable[T] directly for more control or stateful processors.",{"id":2262,"title":2263,"titles":2264,"content":2265,"level":19},"/v1.0.7/reference/processors/apply#function-signature","Function Signature",[2254],"func Apply[T any](identity Identity, fn func(context.Context, T) (T, error)) Chainable[T]",{"id":2267,"title":2268,"titles":2269,"content":2270,"level":19},"/v1.0.7/reference/processors/apply#parameters","Parameters",[2254],"identity (Identity) - Identifier for the processor used in error messages and debuggingfn - Processing function that takes a context and input, returns output or error",{"id":2272,"title":2273,"titles":2274,"content":2275,"level":19},"/v1.0.7/reference/processors/apply#returns","Returns",[2254],"Returns a Chainable[T] that can be composed with other processors.",{"id":2277,"title":2278,"titles":2279,"content":2280,"level":19},"/v1.0.7/reference/processors/apply#behavior","Behavior",[2254],"Fallible operations - Can return errors that stop pipeline executionError wrapping - Errors are automatically wrapped with context (path, timing, input data)Context aware - Respects cancellation and timeoutsFail-fast - Pipeline stops on first error",{"id":2282,"title":2283,"titles":2284,"content":2285,"level":19},"/v1.0.7/reference/processors/apply#example","Example",[2254],"// Validation\nvalidate := pipz.Apply(\n    pipz.NewIdentity(\"validate-user\", \"Validates user email and age fields\"),\n    func(ctx context.Context, user User) (User, error) {\n        if user.Email == \"\" {\n            return user, errors.New(\"email required\")\n        }\n        if user.Age \u003C 0 || user.Age > 150 {\n            return user, fmt.Errorf(\"invalid age: %d\", user.Age)\n        }\n        return user, nil\n    },\n)\n\n// External API call\nfetchData := pipz.Apply(\n    pipz.NewIdentity(\"fetch-data\", \"Fetches data from external API\"),\n    func(ctx context.Context, id string) (Data, error) {\n        resp, err := http.Get(ctx, fmt.Sprintf(\"/api/data/%s\", id))\n        if err != nil {\n            return Data{}, fmt.Errorf(\"fetch failed: %w\", err)\n        }\n        return parseResponse(resp)\n    },\n)\n\n// Database operation\nsaveUser := pipz.Apply(\n    pipz.NewIdentity(\"save-user\", \"Saves user to database\"),\n    func(ctx context.Context, user User) (User, error) {\n        user.ID = uuid.New()\n        if err := db.Save(ctx, &user); err != nil {\n            return user, fmt.Errorf(\"database save failed: %w\", err)\n        }\n        return user, nil\n    },\n)",{"id":2287,"title":106,"titles":2288,"content":2289,"level":19},"/v1.0.7/reference/processors/apply#error-handling",[2254],"When Apply returns an error, it's wrapped in *Error[T] with rich context: result, err := pipeline.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[User]\n    if errors.As(err, &pipeErr) {\n        fmt.Printf(\"Failed at: %v\\n\", pipeErr.Path)\n        fmt.Printf(\"Input: %+v\\n\", pipeErr.InputData)\n        fmt.Printf(\"Duration: %v\\n\", pipeErr.Duration)\n        \n        // Check specific conditions\n        if pipeErr.Timeout {\n            // Handle timeout\n        }\n    }\n}",{"id":2291,"title":2292,"titles":2293,"content":2294,"level":19},"/v1.0.7/reference/processors/apply#when-to-use","When to Use",[2254],"Use Apply when: Your operation can fail (parsing, validation, I/O)You're calling external services (APIs, databases)You're doing validation that should stop the pipelineYou need explicit error handlingParsing data (JSON, XML, user input)Type conversions that might fail",{"id":2296,"title":2297,"titles":2298,"content":2299,"level":19},"/v1.0.7/reference/processors/apply#when-not-to-use","When NOT to Use",[2254],"Don't use Apply when: Your operation cannot fail (use Transform for better performance)You only need side effects (use Effect instead)You want to ignore errors (use Enrich instead)Simple math or string operations (use Transform)Logging or metrics (use Effect)",{"id":2301,"title":1471,"titles":2302,"content":2303,"level":19},"/v1.0.7/reference/processors/apply#performance",[2254],"Apply has minimal overhead for error handling: ~46ns per operation (success case)Zero allocations on successSmall allocation only on error",{"id":2305,"title":2181,"titles":2306,"content":2307,"level":19},"/v1.0.7/reference/processors/apply#common-patterns",[2254],"// Define identities upfront\nvar (\n    ValidationID     = pipz.NewIdentity(\"validation\", \"User validation pipeline\")\n    CheckRequiredID  = pipz.NewIdentity(\"check-required\", \"Checks required fields\")\n    CheckFormatID    = pipz.NewIdentity(\"check-format\", \"Validates field formats\")\n    CheckUniqueID    = pipz.NewIdentity(\"check-unique\", \"Ensures uniqueness constraints\")\n    APIRetryID       = pipz.NewIdentity(\"api-retry\", \"API with retry\")\n    APICallID        = pipz.NewIdentity(\"api-call\", \"Calls external API\")\n    SaveID           = pipz.NewIdentity(\"save\", \"Database save with fallback\")\n    PrimaryDBID      = pipz.NewIdentity(\"primary-db\", \"Saves to primary database\")\n    BackupDBID       = pipz.NewIdentity(\"backup-db\", \"Saves to backup database\")\n    ParseJSONID      = pipz.NewIdentity(\"parse-json\", \"Parses and validates JSON configuration\")\n)\n\n// Validation pipeline\nvalidation := pipz.NewSequence[User](ValidationID,\n    pipz.Apply(CheckRequiredID, checkRequired),\n    pipz.Apply(CheckFormatID, checkFormat),\n    pipz.Apply(CheckUniqueID, checkUnique),\n)\n\n// API with retry\nreliableAPI := pipz.NewRetry(APIRetryID,\n    pipz.Apply(APICallID, callExternalAPI),\n    3,\n)\n\n// Database with fallback\nsaveWithFallback := pipz.NewFallback(SaveID,\n    pipz.Apply(PrimaryDBID, saveToPrimary),\n    pipz.Apply(BackupDBID, saveToBackup),\n)\n\n// Parse with validation\nparseAndValidate := pipz.Apply(ParseJSONID,\n    func(ctx context.Context, raw string) (Config, error) {\n        var config Config\n        if err := json.Unmarshal([]byte(raw), &config); err != nil {\n            return config, fmt.Errorf(\"invalid JSON: %w\", err)\n        }\n        if config.Version == \"\" {\n            return config, errors.New(\"version required\")\n        }\n        return config, nil\n    },\n)",{"id":2309,"title":2199,"titles":2310,"content":29,"level":19},"/v1.0.7/reference/processors/apply#gotchas",[2254],{"id":2312,"title":2313,"titles":2314,"content":2315,"level":35},"/v1.0.7/reference/processors/apply#dont-ignore-context","❌ Don't ignore context",[2254,2199],"// WRONG - Ignoring context cancellation\napply := pipz.Apply(\n    pipz.NewIdentity(\"slow-operation\", \"Slow operation that ignores context\"),\n    func(ctx context.Context, data Data) (Data, error) {\n        time.Sleep(10 * time.Second) // Blocks even if context cancelled\n        return data, nil\n    },\n)",{"id":2317,"title":2318,"titles":2319,"content":2320,"level":35},"/v1.0.7/reference/processors/apply#respect-context-cancellation","✅ Respect context cancellation",[2254,2199],"// RIGHT - Check context\napply := pipz.Apply(\n    pipz.NewIdentity(\"slow-operation\", \"Slow operation with context awareness\"),\n    func(ctx context.Context, data Data) (Data, error) {\n        select {\n        case \u003C-time.After(10 * time.Second):\n            return data, nil\n        case \u003C-ctx.Done():\n            return data, ctx.Err()\n        }\n    },\n)",{"id":2322,"title":2323,"titles":2324,"content":2325,"level":35},"/v1.0.7/reference/processors/apply#dont-use-for-pure-transforms","❌ Don't use for pure transforms",[2254,2199],"// WRONG - Unnecessary error handling\napply := pipz.Apply(\n    pipz.NewIdentity(\"double\", \"Doubles the input\"),\n    func(ctx context.Context, n int) (int, error) {\n        return n * 2, nil // Never fails!\n    },\n)",{"id":2327,"title":2328,"titles":2329,"content":2330,"level":35},"/v1.0.7/reference/processors/apply#use-transform-for-infallible-operations","✅ Use Transform for infallible operations",[2254,2199],"// RIGHT - No error overhead\ntransform := pipz.Transform(\n    pipz.NewIdentity(\"double\", \"Doubles the input\"),\n    func(ctx context.Context, n int) int {\n        return n * 2\n    },\n)",{"id":2332,"title":1764,"titles":2333,"content":2334,"level":19},"/v1.0.7/reference/processors/apply#see-also",[2254],"Transform - For pure transformationsEffect - For side effectsEnrich - For optional operations html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":2336,"title":2337,"titles":2338,"content":2339,"level":9},"/v1.0.7/reference/processors/effect","Effect",[],"Creates a processor that performs side effects without modifying the input data",{"id":2341,"title":2337,"titles":2342,"content":2343,"level":9},"/v1.0.7/reference/processors/effect#effect",[],"Creates a processor that performs side effects without modifying the input data. Note: Effect is a convenience wrapper. You can always implement Chainable[T] directly for more control or stateful processors.",{"id":2345,"title":2263,"titles":2346,"content":2347,"level":19},"/v1.0.7/reference/processors/effect#function-signature",[2337],"func Effect[T any](identity Identity, fn func(context.Context, T) error) Chainable[T]",{"id":2349,"title":2268,"titles":2350,"content":2351,"level":19},"/v1.0.7/reference/processors/effect#parameters",[2337],"identity (Identity) - Identifier for the processor used in error messages and debuggingfn - Side effect function that takes context and input, returns error on failure",{"id":2353,"title":2273,"titles":2354,"content":2355,"level":19},"/v1.0.7/reference/processors/effect#returns",[2337],"Returns a Chainable[T] that passes through the original input unchanged (unless error occurs).",{"id":2357,"title":2278,"titles":2358,"content":2359,"level":19},"/v1.0.7/reference/processors/effect#behavior",[2337],"Pass-through - Original input is returned unchangedSide effects only - Used for logging, metrics, notifications, etc.Can fail - Errors stop pipeline executionContext aware - Respects cancellation and timeouts",{"id":2361,"title":2283,"titles":2362,"content":2363,"level":19},"/v1.0.7/reference/processors/effect#example",[2337],"// Logging\nlogger := pipz.Effect(\n    pipz.NewIdentity(\"log-order\", \"Logs order processing details\"),\n    func(ctx context.Context, order Order) error {\n        log.Printf(\"Processing order: %s, amount: %.2f\", order.ID, order.Total)\n        return nil\n    },\n)\n\n// Metrics\nmetrics := pipz.Effect(\n    pipz.NewIdentity(\"record-metrics\", \"Records event metrics\"),\n    func(ctx context.Context, event Event) error {\n        if err := metricsClient.Increment(\"events.processed\",\n            \"type\", event.Type,\n            \"source\", event.Source,\n        ); err != nil {\n            return fmt.Errorf(\"metrics failed: %w\", err)\n        }\n        return nil\n    },\n)\n\n// Audit trail\naudit := pipz.Effect(\n    pipz.NewIdentity(\"audit-trail\", \"Writes audit log entry\"),\n    func(ctx context.Context, user User) error {\n        entry := AuditEntry{\n            UserID:    user.ID,\n            Action:    \"profile_update\",\n            Timestamp: time.Now(),\n            IP:        getIPFromContext(ctx),\n        }\n        return auditLog.Write(ctx, entry)\n    },\n)\n\n// Validation (no data modification)\ncheckPermissions := pipz.Effect(\n    pipz.NewIdentity(\"check-permissions\", \"Validates user permissions\"),\n    func(ctx context.Context, req Request) error {\n        if !req.User.HasPermission(req.Action) {\n            return errors.New(\"permission denied\")\n        }\n        return nil\n    },\n)",{"id":2365,"title":2292,"titles":2366,"content":2367,"level":19},"/v1.0.7/reference/processors/effect#when-to-use",[2337],"Use Effect when: You need side effects without data changes (logging, metrics, notifications)You're writing audit trails that must succeedYou need validation without modificationYou want to maintain data immutabilityRecording events or telemetryTriggering external systems",{"id":2369,"title":2297,"titles":2370,"content":2371,"level":19},"/v1.0.7/reference/processors/effect#when-not-to-use",[2337],"Don't use Effect when: You need to modify the data (use Transform or Apply)The side effect is optional (consider Enrich)You want to ignore errors (wrap with error handling)Computing values to add to data (use Transform)The operation returns useful data (use Apply)",{"id":2373,"title":1471,"titles":2374,"content":2375,"level":19},"/v1.0.7/reference/processors/effect#performance",[2337],"Effect has similar performance to Apply: ~46ns per operation (success case)Zero allocations on successOriginal data is passed through without copying",{"id":2377,"title":2181,"titles":2378,"content":2379,"level":19},"/v1.0.7/reference/processors/effect#common-patterns",[2337],"// Define identities upfront\nvar (\n    ObserveID       = pipz.NewIdentity(\"observe\", \"Order observability pipeline\")\n    LogOrderID      = pipz.NewIdentity(\"log-order\", \"Logs order details\")\n    RecordMetricsID = pipz.NewIdentity(\"record-metrics\", \"Records order metrics\")\n    AuditOrderID    = pipz.NewIdentity(\"audit-order\", \"Audits order processing\")\n    NotificationsID = pipz.NewIdentity(\"notifications\", \"Notification effects\")\n    SendEmailID     = pipz.NewIdentity(\"send-email\", \"Sends email notification\")\n    SendSMSID       = pipz.NewIdentity(\"send-sms\", \"Sends SMS notification\")\n    SendPushID      = pipz.NewIdentity(\"send-push\", \"Sends push notification\")\n)\n\n// Custom observability pipeline\nobserve := pipz.NewSequence[Order](ObserveID,\n    pipz.Effect(LogOrderID, logOrder),\n    pipz.Effect(RecordMetricsID, recordMetrics),\n    pipz.Effect(AuditOrderID, auditOrder),\n)\n\n// Notification effects running in parallel\nnotifications := pipz.NewConcurrent[User](NotificationsID,\n    pipz.Effect(SendEmailID, sendEmail),\n    pipz.Effect(SendSMSID, sendSMS),\n    pipz.Effect(SendPushID, sendPushNotification),\n)\n\n// Conditional logging\ndebugLog := pipz.Mutate(\n    pipz.NewIdentity(\"debug-log\", \"Logs debug information when enabled\"),\n    func(ctx context.Context, data Data) Data {\n        log.Printf(\"DEBUG: %+v\", data)\n        return data\n    },\n    func(ctx context.Context, data Data) bool {\n        return os.Getenv(\"DEBUG\") == \"true\"\n    },\n)\n\n// Critical validation\nauthorize := pipz.Effect(\n    pipz.NewIdentity(\"authorize-request\", \"Authorizes user access to resource\"),\n    func(ctx context.Context, req Request) error {\n        if !isAuthorized(ctx, req.UserID, req.Resource) {\n            return fmt.Errorf(\"unauthorized access to %s\", req.Resource)\n        }\n        return nil\n    },\n)",{"id":2381,"title":2199,"titles":2382,"content":29,"level":19},"/v1.0.7/reference/processors/effect#gotchas",[2337],{"id":2384,"title":2385,"titles":2386,"content":2387,"level":35},"/v1.0.7/reference/processors/effect#dont-modify-the-input","❌ Don't modify the input",[2337,2199],"// WRONG - Effect shouldn't modify data\neffect := pipz.Effect(\n    pipz.NewIdentity(\"bad-effect\", \"Incorrectly modifies data\"),\n    func(ctx context.Context, user *User) error {\n        user.LastSeen = time.Now() // Modifying!\n        return nil\n    },\n)",{"id":2389,"title":2390,"titles":2391,"content":2392,"level":35},"/v1.0.7/reference/processors/effect#use-transform-for-modifications","✅ Use Transform for modifications",[2337,2199],"// RIGHT - Use Transform to modify\ntransform := pipz.Transform(\n    pipz.NewIdentity(\"update-last-seen\", \"Updates last seen timestamp\"),\n    func(ctx context.Context, user User) User {\n        user.LastSeen = time.Now()\n        return user\n    },\n)",{"id":2394,"title":2395,"titles":2396,"content":2397,"level":35},"/v1.0.7/reference/processors/effect#dont-return-data-through-side-channels","❌ Don't return data through side channels",[2337,2199],"// WRONG - Using closure to smuggle data out\nvar result string\neffect := pipz.Effect(\n    pipz.NewIdentity(\"fetch-data\", \"Fetches data via side channel\"),\n    func(ctx context.Context, id string) error {\n        data, err := fetchData(id)\n        result = data // Side channel!\n        return err\n    },\n)",{"id":2399,"title":2400,"titles":2401,"content":2402,"level":35},"/v1.0.7/reference/processors/effect#use-apply-for-operations-that-return-data","✅ Use Apply for operations that return data",[2337,2199],"// RIGHT - Proper data flow\napply := pipz.Apply(\n    pipz.NewIdentity(\"fetch-data\", \"Fetches data from external source\"),\n    func(ctx context.Context, id string) (Data, error) {\n        return fetchData(id)\n    },\n)",{"id":2404,"title":106,"titles":2405,"content":2406,"level":19},"/v1.0.7/reference/processors/effect#error-handling",[2337],"Effect errors include the same rich context as other processors: audit := pipz.Effect(\n    pipz.NewIdentity(\"audit-transaction\", \"Logs transaction to audit database\"),\n    func(ctx context.Context, tx Transaction) error {\n        if err := auditDB.Log(ctx, tx); err != nil {\n            // This error will stop the pipeline\n            return fmt.Errorf(\"audit log failed: %w\", err)\n        }\n        return nil\n    },\n)\n\n// To make effects optional, wrap with error handling\noptionalAudit := pipz.NewEnrich(\n    pipz.NewIdentity(\"optional-audit\", \"Attempts to log transaction audit\"),\n    func(ctx context.Context, tx Transaction) (Transaction, error) {\n        if err := auditDB.Log(ctx, tx); err != nil {\n            // Log but don't fail\n            log.Printf(\"Audit failed (continuing): %v\", err)\n            return tx, err // Will be ignored by Enrich\n        }\n        return tx, nil\n    },\n)",{"id":2408,"title":1764,"titles":2409,"content":2410,"level":19},"/v1.0.7/reference/processors/effect#see-also",[2337],"Transform - For data transformationsApply - For operations that modify dataEnrich - For optional side effects html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}",{"id":2412,"title":2413,"titles":2414,"content":2415,"level":9},"/v1.0.7/reference/processors/enrich","Enrich",[],"Creates a processor that attempts to enhance data but doesn't fail the pipeline on error",{"id":2417,"title":2413,"titles":2418,"content":2419,"level":9},"/v1.0.7/reference/processors/enrich#enrich",[],"Creates a processor that attempts to enhance data but doesn't fail the pipeline on error. Note: Enrich is a convenience wrapper. You can always implement Chainable[T] directly for more control or stateful processors.",{"id":2421,"title":2263,"titles":2422,"content":2423,"level":19},"/v1.0.7/reference/processors/enrich#function-signature",[2413],"func Enrich[T any](identity Identity, fn func(context.Context, T) (T, error)) Chainable[T]",{"id":2425,"title":2268,"titles":2426,"content":2427,"level":19},"/v1.0.7/reference/processors/enrich#parameters",[2413],"identity (Identity) - Identifier for the processor used in error messages and debuggingfn - Enrichment function that attempts to enhance the data",{"id":2429,"title":2273,"titles":2430,"content":2431,"level":19},"/v1.0.7/reference/processors/enrich#returns",[2413],"Returns a Chainable[T] that enhances data when possible, passes through original on failure.",{"id":2433,"title":2278,"titles":2434,"content":2435,"level":19},"/v1.0.7/reference/processors/enrich#behavior",[2413],"Best effort - Tries to enhance data but continues on failureNon-failing - Errors are logged but don't stop the pipelineGraceful degradation - Returns original input if enrichment failsError visibility - Failures are included in error tracking but not propagated",{"id":2437,"title":2283,"titles":2438,"content":2439,"level":19},"/v1.0.7/reference/processors/enrich#example",[2413],"// Optional geolocation\nenrichLocation := pipz.Enrich(\n    pipz.NewIdentity(\"geocode-address\", \"Geocodes user address to coordinates\"),\n    func(ctx context.Context, user User) (User, error) {\n        coords, err := geocodeAPI.Lookup(ctx, user.Address)\n        if err != nil {\n            // Error is logged but pipeline continues\n            return user, fmt.Errorf(\"geocoding failed: %w\", err)\n        }\n        user.Latitude = coords.Lat\n        user.Longitude = coords.Lng\n        return user, nil\n    },\n)\n\n// Optional external data\nenrichProfile := pipz.Enrich(\n    pipz.NewIdentity(\"social-profile\", \"Fetches social profile data\"),\n    func(ctx context.Context, user User) (User, error) {\n        profile, err := socialAPI.GetProfile(ctx, user.Email)\n        if err != nil {\n            // User proceeds without social data\n            return user, err\n        }\n        user.Avatar = profile.Avatar\n        user.Bio = profile.Bio\n        return user, nil\n    },\n)\n\n// Optional scoring\nenrichRisk := pipz.Enrich(\n    pipz.NewIdentity(\"risk-score\", \"Calculates transaction risk score\"),\n    func(ctx context.Context, transaction Transaction) (Transaction, error) {\n        score, err := riskEngine.Calculate(ctx, transaction)\n        if err != nil {\n            // Transaction proceeds with default risk\n            transaction.RiskScore = 0.5 // default medium risk\n            return transaction, err\n        }\n        transaction.RiskScore = score\n        return transaction, nil\n    },\n)\n\n// Optional caching\nenrichFromCache := pipz.Enrich(\n    pipz.NewIdentity(\"cache-lookup\", \"Enriches item with cached data\"),\n    func(ctx context.Context, item Item) (Item, error) {\n        cached, err := cache.Get(ctx, item.ID)\n        if err != nil {\n            // Proceed without cached data\n            return item, err\n        }\n        item.CachedPrice = cached.Price\n        item.CachedAt = cached.Timestamp\n        return item, nil\n    },\n)",{"id":2441,"title":2292,"titles":2442,"content":2443,"level":19},"/v1.0.7/reference/processors/enrich#when-to-use",[2413],"Use Enrich when: The enhancement is optional and can fail silentlyYou're calling unreliable external services (third-party APIs)You want graceful degradationThe data is useful even without enrichmentYou're adding nice-to-have features (recommendations, social data)External data sources may be temporarily unavailable",{"id":2445,"title":2297,"titles":2446,"content":2447,"level":19},"/v1.0.7/reference/processors/enrich#when-not-to-use",[2413],"Don't use Enrich when: The operation is required (use Apply - fail fast)You need to handle errors explicitly (use Apply with error handling)The operation cannot fail (use Transform for better performance)You need to know if enrichment failed (use Apply with fallback)Validation or critical business logic (use Apply)",{"id":2449,"title":2450,"titles":2451,"content":2452,"level":19},"/v1.0.7/reference/processors/enrich#error-tracking","Error Tracking",[2413],"While Enrich doesn't fail the pipeline, errors are still tracked: // Define identities upfront\nvar (\n    UserPipelineID   = pipz.NewIdentity(\"user-pipeline\", \"User processing pipeline\")\n    ValidateUserID   = pipz.NewIdentity(\"validate-user\", \"Validates user data\")\n    GeocodeAddressID = pipz.NewIdentity(\"geocode-address\", \"Geocodes user address\")\n    FetchSocialID    = pipz.NewIdentity(\"fetch-social\", \"Fetches social profile\")\n    SaveUserID       = pipz.NewIdentity(\"save-user\", \"Saves user to database\")\n)\n\n// Errors are logged internally but not returned\npipeline := pipz.NewSequence[User](UserPipelineID,\n    pipz.Apply(ValidateUserID, validateUser),           // Can fail\n    pipz.Enrich(GeocodeAddressID, geocodeAddress),      // Won't fail\n    pipz.Enrich(FetchSocialID, fetchSocialProfile),     // Won't fail\n    pipz.Apply(SaveUserID, saveUser),                   // Can fail\n)\n\n// The pipeline only fails if validate or save fail\n// Enrich failures are logged but don't stop processing",{"id":2454,"title":2181,"titles":2455,"content":2456,"level":19},"/v1.0.7/reference/processors/enrich#common-patterns",[2413],"// Define identities upfront\nvar (\n    EnrichmentID        = pipz.NewIdentity(\"enrichment\", \"Product enrichment pipeline\")\n    FetchReviewsID      = pipz.NewIdentity(\"fetch-reviews\", \"Fetches product reviews\")\n    CheckInventoryID    = pipz.NewIdentity(\"check-inventory\", \"Checks inventory levels\")\n    GetRecommendationsID = pipz.NewIdentity(\"get-recommendations\", \"Fetches product recommendations\")\n    DynamicPricingID    = pipz.NewIdentity(\"dynamic-pricing\", \"Calculates dynamic pricing\")\n    ParallelEnrichID    = pipz.NewIdentity(\"parallel-enrich\", \"Parallel user enrichment\")\n    GeocodeUserID       = pipz.NewIdentity(\"geocode-user\", \"Geocodes user location\")\n    FetchSocialID       = pipz.NewIdentity(\"fetch-social\", \"Fetches social data\")\n    LoadPreferencesID   = pipz.NewIdentity(\"load-preferences\", \"Loads user preferences\")\n    SmartEnrichID       = pipz.NewIdentity(\"smart-enrich\", \"Smart order enrichment\")\n    NeedsEnrichmentID   = pipz.NewIdentity(\"needs-enrichment\", \"Enriches high-value orders\")\n    PremiumDataID       = pipz.NewIdentity(\"premium-data\", \"Fetches premium data\")\n)\n\n// Multiple optional enrichments\nenrichmentPipeline := pipz.NewSequence[Product](EnrichmentID,\n    pipz.Enrich(FetchReviewsID, fetchReviews),\n    pipz.Enrich(CheckInventoryID, checkInventory),\n    pipz.Enrich(GetRecommendationsID, getRecommendations),\n    pipz.Enrich(DynamicPricingID, getDynamicPricing),\n)\n\n// Parallel optional enrichments\nparallelEnrich := pipz.NewConcurrent[User](ParallelEnrichID,\n    pipz.Enrich(GeocodeUserID, geocodeUser),\n    pipz.Enrich(FetchSocialID, fetchSocialData),\n    pipz.Enrich(LoadPreferencesID, loadPreferences),\n)\n\n// Conditional enrichment\nsmartEnrich := pipz.NewSequence[Order](SmartEnrichID,\n    pipz.Mutate(NeedsEnrichmentID,\n        func(ctx context.Context, order Order) Order {\n            enriched, _ := pipz.Enrich(PremiumDataID,\n                fetchPremiumData,\n            ).Process(ctx, order)\n            return enriched\n        },\n        func(ctx context.Context, order Order) bool {\n            return order.Total > 1000 // Only enrich high-value orders\n        },\n    ),\n)",{"id":2458,"title":2199,"titles":2459,"content":29,"level":19},"/v1.0.7/reference/processors/enrich#gotchas",[2413],{"id":2461,"title":2462,"titles":2463,"content":2464,"level":35},"/v1.0.7/reference/processors/enrich#dont-use-for-required-operations","❌ Don't use for required operations",[2413,2199],"// WRONG - Validation should fail the pipeline\nenrich := pipz.Enrich(\n    pipz.NewIdentity(\"validate-user\", \"Validates user\"),\n    func(ctx context.Context, user User) (User, error) {\n        if !isValid(user) {\n            return user, errors.New(\"invalid user\") // Error is swallowed!\n        }\n        return user, nil\n    },\n)",{"id":2466,"title":2467,"titles":2468,"content":2469,"level":35},"/v1.0.7/reference/processors/enrich#use-apply-for-required-operations","✅ Use Apply for required operations",[2413,2199],"// RIGHT - Validation fails the pipeline\napply := pipz.Apply(\n    pipz.NewIdentity(\"validate-user\", \"Validates user\"),\n    func(ctx context.Context, user User) (User, error) {\n        if !isValid(user) {\n            return user, errors.New(\"invalid user\")\n        }\n        return user, nil\n    },\n)",{"id":2471,"title":2472,"titles":2473,"content":2474,"level":35},"/v1.0.7/reference/processors/enrich#dont-use-when-you-need-error-details","❌ Don't use when you need error details",[2413,2199],"// Define identities upfront\nvar ProcessPaymentID = pipz.NewIdentity(\"process-payment\", \"Processes payment\")\n\n// WRONG - Can't handle specific errors\nenrich := pipz.Enrich(ProcessPaymentID, processPayment) // Errors hidden",{"id":2476,"title":2477,"titles":2478,"content":2479,"level":35},"/v1.0.7/reference/processors/enrich#use-apply-with-explicit-error-handling","✅ Use Apply with explicit error handling",[2413,2199],"// Define identities upfront\nvar (\n    PaymentID       = pipz.NewIdentity(\"payment\", \"Payment processing with fallback\")\n    PrimaryPayID    = pipz.NewIdentity(\"primary-payment\", \"Processes payment via primary provider\")\n    BackupPayID     = pipz.NewIdentity(\"backup-payment\", \"Processes payment via backup provider\")\n)\n\n// RIGHT - Handle errors explicitly\nwithFallback := pipz.NewFallback(PaymentID,\n    pipz.Apply(PrimaryPayID, processPayment),\n    pipz.Apply(BackupPayID, processBackupPayment),\n)",{"id":2481,"title":135,"titles":2482,"content":2483,"level":19},"/v1.0.7/reference/processors/enrich#best-practices",[2413],"// Provide defaults when enrichment fails\nenrichWithDefault := pipz.Enrich(\n    pipz.NewIdentity(\"weather-data\", \"Enriches event with weather information\"),\n    func(ctx context.Context, event Event) (Event, error) {\n        weather, err := weatherAPI.GetCurrent(ctx, event.Location)\n        if err != nil {\n            // Provide sensible default\n            event.Weather = \"unknown\"\n            event.Temperature = 20.0 // room temperature\n            return event, err\n        }\n        event.Weather = weather.Condition\n        event.Temperature = weather.Temp\n        return event, nil\n    },\n)\n\n// Log enrichment failures for monitoring\nmonitoredEnrich := pipz.Enrich(\n    pipz.NewIdentity(\"external-api\", \"Enhances data via external API\"),\n    func(ctx context.Context, data Data) (Data, error) {\n        result, err := externalAPI.Enhance(ctx, data.ID)\n        if err != nil {\n            // Log for monitoring but don't fail\n            log.Printf(\"Enrichment failed for %s: %v\", data.ID, err)\n            metrics.Increment(\"enrichment.failures\", \"api\", \"external\")\n            return data, err\n        }\n        data.Enhanced = result\n        return data, nil\n    },\n)",{"id":2485,"title":1764,"titles":2486,"content":2487,"level":19},"/v1.0.7/reference/processors/enrich#see-also",[2413],"Apply - For required operations that can failTransform - For operations that cannot failEffect - For optional side effects html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}",{"id":2489,"title":192,"titles":2490,"content":2491,"level":9},"/v1.0.7/reference/processors",[],"Function wrappers that transform, validate, and enrich data flowing through pipelines",{"id":2493,"title":192,"titles":2494,"content":29,"level":9},"/v1.0.7/reference/processors#processors",[],{"id":2496,"title":2497,"titles":2498,"content":2499,"level":9},"/v1.0.7/reference/processors/mutate","Mutate",[],"Creates a processor that conditionally modifies data based on a predicate function",{"id":2501,"title":2497,"titles":2502,"content":2503,"level":9},"/v1.0.7/reference/processors/mutate#mutate",[],"Creates a processor that conditionally modifies data based on a predicate. Note: Mutate is a convenience wrapper. You can always implement Chainable[T] directly for more control or stateful processors.",{"id":2505,"title":2263,"titles":2506,"content":2507,"level":19},"/v1.0.7/reference/processors/mutate#function-signature",[2497],"func Mutate[T any](\n    identity Identity,\n    transformer func(context.Context, T) T,\n    condition func(context.Context, T) bool,\n) Processor[T]",{"id":2509,"title":2268,"titles":2510,"content":2511,"level":19},"/v1.0.7/reference/processors/mutate#parameters",[2497],"identity (Identity) - Identifier for the processor used in error messages and debuggingtransformer - Function that performs the transformation when condition is truecondition - Predicate function that determines if transformation should occur",{"id":2513,"title":2273,"titles":2514,"content":2515,"level":19},"/v1.0.7/reference/processors/mutate#returns",[2497],"Returns a Processor[T] that applies the transformation only when the condition is met.",{"id":2517,"title":2278,"titles":2518,"content":2519,"level":19},"/v1.0.7/reference/processors/mutate#behavior",[2497],"Conditional execution - Mutation only runs if condition returns truePass-through on false - Original data returned when condition is falseCannot fail - Neither condition nor mutation can return errorsContext aware - Both functions receive context",{"id":2521,"title":2283,"titles":2522,"content":2523,"level":19},"/v1.0.7/reference/processors/mutate#example",[2497],"// Auto-verify trusted domains\nautoVerify := pipz.Mutate(\n    pipz.NewIdentity(\"auto-verify\", \"Auto-verifies company email addresses\"),\n    func(ctx context.Context, user User) User {\n        user.Verified = true\n        user.VerifiedAt = time.Now()\n        return user\n    },\n    func(ctx context.Context, user User) bool {\n        return strings.HasSuffix(user.Email, \"@company.com\")\n    },\n)\n\n// Apply discounts\napplyDiscount := pipz.Mutate(\n    pipz.NewIdentity(\"vip-discount\", \"Applies VIP discount to qualifying orders\"),\n    func(ctx context.Context, order Order) Order {\n        order.Discount = order.Total * 0.2\n        order.Total = order.Total - order.Discount\n        return order\n    },\n    func(ctx context.Context, order Order) bool {\n        return order.Customer.Tier == \"VIP\" && order.Total > 100\n    },\n)\n\n// Feature flags\nbetaFeature := pipz.Mutate(\n    pipz.NewIdentity(\"beta-enrichment\", \"Adds beta score when feature enabled\"),\n    func(ctx context.Context, data Data) Data {\n        data.BetaScore = calculateBetaScore(data)\n        return data\n    },\n    func(ctx context.Context, data Data) bool {\n        return featureFlags.IsEnabled(ctx, \"beta-enrichment\")\n    },\n)\n\n// Conditional formatting\nformatPhone := pipz.Mutate(\n    pipz.NewIdentity(\"format-phone\", \"Formats US phone numbers\"),\n    func(ctx context.Context, contact Contact) Contact {\n        // Format as (XXX) XXX-XXXX\n        contact.Phone = fmt.Sprintf(\"(%s) %s-%s\",\n            contact.Phone[0:3],\n            contact.Phone[3:6],\n            contact.Phone[6:10],\n        )\n        return contact\n    },\n    func(ctx context.Context, contact Contact) bool {\n        return contact.Country == \"US\" && len(contact.Phone) == 10\n    },\n)",{"id":2525,"title":2292,"titles":2526,"content":2527,"level":19},"/v1.0.7/reference/processors/mutate#when-to-use",[2497],"Use Mutate when: You need conditional transformations that can't failDifferent data needs different processing based on simple conditionsYou're implementing business rules with pure functionsYou want feature flags or A/B testing for transformationsYou need data normalization based on conditionsApplying defaults or enrichments conditionally",{"id":2529,"title":2297,"titles":2530,"content":2531,"level":19},"/v1.0.7/reference/processors/mutate#when-not-to-use",[2497],"Don't use Mutate when: The operation can fail (use Apply with conditions)You always transform (use Transform - no condition needed)You need complex routing (use Switch for multiple branches)The condition needs error handling (use Filter with Apply)You need side effects (use Filter with Effect)",{"id":2533,"title":1471,"titles":2534,"content":2535,"level":19},"/v1.0.7/reference/processors/mutate#performance",[2497],"Mutate has minimal overhead: Condition check is fastNo allocations if condition is falseSimilar to Transform when condition is true",{"id":2537,"title":2181,"titles":2538,"content":2539,"level":19},"/v1.0.7/reference/processors/mutate#common-patterns",[2497],"// Define identities upfront\nvar (\n    UserProcessingID = pipz.NewIdentity(\"user-processing\", \"User processing pipeline\")\n    VerifyTrustedID  = pipz.NewIdentity(\"verify-trusted\", \"Verifies trusted domains\")\n    ApplyRegionalID  = pipz.NewIdentity(\"apply-regional\", \"Applies regional settings\")\n    PremiumFeaturesID = pipz.NewIdentity(\"premium-features\", \"Adds premium features\")\n    OrderID          = pipz.NewIdentity(\"order\", \"Order processing pipeline\")\n    ValidateID       = pipz.NewIdentity(\"validate\", \"Validates order\")\n    LoyaltyDiscountID = pipz.NewIdentity(\"loyalty-discount\", \"Applies loyalty discount\")\n    BulkDiscountID   = pipz.NewIdentity(\"bulk-discount\", \"Applies bulk discount\")\n    CalculateTaxID   = pipz.NewIdentity(\"calculate-tax\", \"Calculates tax\")\n)\n\n// Chain multiple conditional mutations\npipeline := pipz.NewSequence[User](UserProcessingID,\n    pipz.Mutate(VerifyTrustedID, markVerified, isTrustedDomain),\n    pipz.Mutate(ApplyRegionalID, applyGDPR, isEuropean),\n    pipz.Mutate(PremiumFeaturesID, addPremiumFeatures, isPremium),\n)\n\n// Combine with validation\nprocessOrder := pipz.NewSequence[Order](OrderID,\n    pipz.Apply(ValidateID, validateOrder),\n    pipz.Mutate(LoyaltyDiscountID, applyLoyaltyDiscount, isLoyaltyMember),\n    pipz.Mutate(BulkDiscountID, applyBulkDiscount, isBulkOrder),\n    pipz.Apply(CalculateTaxID, calculateTax),\n)\n\n// Environment-based behavior\ndebugEnrichment := pipz.Mutate(\n    pipz.NewIdentity(\"debug-data\", \"Adds debug information in development\"),\n    func(ctx context.Context, data Data) Data {\n        data.DebugInfo = generateDebugInfo(data)\n        return data\n    },\n    func(ctx context.Context, data Data) bool {\n        return os.Getenv(\"ENV\") == \"development\"\n    },\n)\n\n// Default values\napplyDefaults := pipz.Mutate(\n    pipz.NewIdentity(\"defaults\", \"Applies default timeout\"),\n    func(ctx context.Context, cfg Config) Config {\n        cfg.Timeout = 30 * time.Second\n        return cfg\n    },\n    func(ctx context.Context, cfg Config) bool {\n        return cfg.Timeout == 0 // No timeout set\n    },\n)",{"id":2541,"title":2199,"titles":2542,"content":29,"level":19},"/v1.0.7/reference/processors/mutate#gotchas",[2497],{"id":2544,"title":2545,"titles":2546,"content":2547,"level":35},"/v1.0.7/reference/processors/mutate#dont-use-for-operations-that-can-fail","❌ Don't use for operations that can fail",[2497,2199],"// WRONG - Parse can fail but Mutate can't handle errors\nmutate := pipz.Mutate(\n    pipz.NewIdentity(\"parse\", \"Parses JSON\"),\n    func(ctx context.Context, s string) Data {\n        data, _ := json.Unmarshal([]byte(s), &Data{}) // Error ignored!\n        return data\n    },\n    func(ctx context.Context, s string) bool { return s != \"\" },\n)",{"id":2549,"title":2550,"titles":2551,"content":2552,"level":35},"/v1.0.7/reference/processors/mutate#use-apply-for-fallible-operations","✅ Use Apply for fallible operations",[2497,2199],"// RIGHT - Proper error handling\napply := pipz.Apply(\n    pipz.NewIdentity(\"parse\", \"Parses JSON with error handling\"),\n    func(ctx context.Context, s string) (Data, error) {\n        if s == \"\" {\n            return Data{}, nil // Skip parsing\n        }\n        var data Data\n        err := json.Unmarshal([]byte(s), &data)\n        return data, err\n    },\n)",{"id":2554,"title":2555,"titles":2556,"content":2557,"level":35},"/v1.0.7/reference/processors/mutate#dont-use-when-you-always-transform","❌ Don't use when you always transform",[2497,2199],"// WRONG - Condition always true\nmutate := pipz.Mutate(\n    pipz.NewIdentity(\"always\", \"Always transforms\"),\n    transform,\n    func(ctx context.Context, data Data) bool { return true }, // Always!\n)",{"id":2559,"title":2560,"titles":2561,"content":2562,"level":35},"/v1.0.7/reference/processors/mutate#use-transform-directly","✅ Use Transform directly",[2497,2199],"// RIGHT - No condition needed\ntransform := pipz.Transform(\n    pipz.NewIdentity(\"always\", \"Always transforms\"),\n    transform,\n)",{"id":2564,"title":2565,"titles":2566,"content":2567,"level":19},"/v1.0.7/reference/processors/mutate#advanced-usage","Advanced Usage",[2497],"// Complex conditions\nsmartRouting := pipz.Mutate(\n    pipz.NewIdentity(\"smart-route\", \"Routes high priority requests to express during business hours\"),\n    func(ctx context.Context, req Request) Request {\n        req.Route = \"express\"\n        req.SLA = time.Hour\n        return req\n    },\n    func(ctx context.Context, req Request) bool {\n        // Multiple conditions\n        return req.Priority == High &&\n               time.Now().Hour() >= 9 &&\n               time.Now().Hour() \u003C= 17 &&\n               !isHoliday(time.Now())\n    },\n)\n\n// Stateful conditions (be careful with concurrency)\nrateLimiter := pipz.Mutate(\n    pipz.NewIdentity(\"rate-limit\", \"Applies rate limiting based on user quota\"),\n    func(ctx context.Context, req Request) Request {\n        req.RateLimited = false\n        return req\n    },\n    func(ctx context.Context, req Request) bool {\n        return limiter.Allow(req.UserID)\n    },\n)",{"id":2569,"title":1764,"titles":2570,"content":2571,"level":19},"/v1.0.7/reference/processors/mutate#see-also",[2497],"Transform - For unconditional transformationsSwitch - For routing to different processorsApply - For conditional operations that can fail html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}",{"id":2573,"title":2574,"titles":2575,"content":2576,"level":9},"/v1.0.7/reference/processors/transform","Transform",[],"Creates a processor from a pure transformation function that cannot fail",{"id":2578,"title":2574,"titles":2579,"content":2580,"level":9},"/v1.0.7/reference/processors/transform#transform",[],"Creates a processor from a pure transformation function that cannot fail. Note: Transform is a convenience wrapper. You can always implement Chainable[T] directly for more control or stateful processors.",{"id":2582,"title":2263,"titles":2583,"content":2584,"level":19},"/v1.0.7/reference/processors/transform#function-signature",[2574],"func Transform[T any](identity Identity, fn func(context.Context, T) T) Chainable[T]",{"id":2586,"title":2268,"titles":2587,"content":2588,"level":19},"/v1.0.7/reference/processors/transform#parameters",[2574],"identity (Identity) - Identifier for the processor used in error messages and debuggingfn - Transformation function that takes a context and input, returns transformed output",{"id":2590,"title":2273,"titles":2591,"content":2275,"level":19},"/v1.0.7/reference/processors/transform#returns",[2574],{"id":2593,"title":2278,"titles":2594,"content":2595,"level":19},"/v1.0.7/reference/processors/transform#behavior",[2574],"Pure transformation - Cannot return errorsAlways succeeds - Unless context is cancelledZero allocations - Optimal performance for simple transformationsContext aware - Respects cancellation",{"id":2597,"title":2283,"titles":2598,"content":2599,"level":19},"/v1.0.7/reference/processors/transform#example",[2574],"// Simple transformation\ndouble := pipz.Transform(\n    pipz.NewIdentity(\"double\", \"Doubles the input value\"),\n    func(ctx context.Context, n int) int {\n        return n * 2\n    },\n)\n\n// String manipulation\nnormalize := pipz.Transform(\n    pipz.NewIdentity(\"normalize\", \"Normalizes string to lowercase and trims whitespace\"),\n    func(ctx context.Context, s string) string {\n        return strings.ToLower(strings.TrimSpace(s))\n    },\n)\n\n// Struct transformation\naddTimestamp := pipz.Transform(\n    pipz.NewIdentity(\"timestamp\", \"Adds current timestamp to event\"),\n    func(ctx context.Context, event Event) Event {\n        event.Timestamp = time.Now()\n        return event\n    },\n)",{"id":2601,"title":2292,"titles":2602,"content":2603,"level":19},"/v1.0.7/reference/processors/transform#when-to-use",[2574],"Use Transform when: Your operation cannot fail (mathematical operations, string formatting)You're doing simple data transformationsYou want optimal performance (no error handling overhead)Converting between representations (struct to JSON, formatting dates)Adding computed fields that always succeed",{"id":2605,"title":2297,"titles":2606,"content":2607,"level":19},"/v1.0.7/reference/processors/transform#when-not-to-use",[2574],"Don't use Transform when: Your operation might fail (use Apply instead)You need side effects without changing data (use Effect instead)You need conditional logic (consider Mutate instead)Parsing or validation that can fail (use Apply)Making network/database calls (use Apply)",{"id":2609,"title":1471,"titles":2610,"content":2611,"level":19},"/v1.0.7/reference/processors/transform#performance",[2574],"Transform has the best performance of all processors: ~2.7ns per operationZero allocationsMinimal overhead",{"id":2613,"title":2181,"titles":2614,"content":2615,"level":19},"/v1.0.7/reference/processors/transform#common-patterns",[2574],"// Define identities upfront\nvar (\n    TextProcessingID = pipz.NewIdentity(\"text-processing\", \"Text processing pipeline\")\n    TrimID           = pipz.NewIdentity(\"trim\", \"Trims whitespace\")\n    LowerID          = pipz.NewIdentity(\"lower\", \"Converts to lowercase\")\n    CapitalizeID     = pipz.NewIdentity(\"capitalize\", \"Capitalizes first letter\")\n)\n\n// Chain multiple transforms\npipeline := pipz.NewSequence[string](TextProcessingID,\n    pipz.Transform(TrimID, strings.TrimSpace),\n    pipz.Transform(LowerID, strings.ToLower),\n    pipz.Transform(CapitalizeID, capitalize),\n)\n\n// Data enrichment\nenrichUser := pipz.Transform(\n    pipz.NewIdentity(\"enrich\", \"Adds display name to user\"),\n    func(ctx context.Context, user User) User {\n        user.DisplayName = fmt.Sprintf(\"%s (%s)\", user.Name, user.Role)\n        return user\n    },\n)\n\n// Computed fields\naddMetadata := pipz.Transform(\n    pipz.NewIdentity(\"metadata\", \"Adds processing metadata to event\"),\n    func(ctx context.Context, event Event) Event {\n        event.ProcessedAt = time.Now()\n        event.Version = \"1.0\"\n        return event\n    },\n)\n\n// Data normalization\nnormalizePhone := pipz.Transform(\n    pipz.NewIdentity(\"normalize-phone\", \"Normalizes phone number format\"),\n    func(ctx context.Context, user User) User {\n        user.Phone = strings.ReplaceAll(user.Phone, \"-\", \"\")\n        user.Phone = strings.ReplaceAll(user.Phone, \" \", \"\")\n        return user\n    },\n)",{"id":2617,"title":2199,"titles":2618,"content":29,"level":19},"/v1.0.7/reference/processors/transform#gotchas",[2574],{"id":2620,"title":2621,"titles":2622,"content":2623,"level":35},"/v1.0.7/reference/processors/transform#dont-hide-errors","❌ Don't hide errors",[2574,2199],"// WRONG - Swallowing potential errors\ntransform := pipz.Transform(\n    pipz.NewIdentity(\"parse\", \"Parses JSON\"),\n    func(ctx context.Context, s string) Data {\n        data, _ := json.Unmarshal([]byte(s), &Data{}) // Error ignored!\n        return data\n    },\n)",{"id":2625,"title":2550,"titles":2626,"content":2627,"level":35},"/v1.0.7/reference/processors/transform#use-apply-for-fallible-operations",[2574,2199],"// RIGHT - Proper error handling\napply := pipz.Apply(\n    pipz.NewIdentity(\"parse\", \"Parses JSON with error handling\"),\n    func(ctx context.Context, s string) (Data, error) {\n        var data Data\n        err := json.Unmarshal([]byte(s), &data)\n        return data, err\n    },\n)",{"id":2629,"title":1764,"titles":2630,"content":2631,"level":19},"/v1.0.7/reference/processors/transform#see-also",[2574],"Apply - For operations that can failEffect - For side effectsMutate - For conditional transformations html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}",{"id":2633,"title":549,"titles":2634,"content":2635,"level":9},"/v1.0.7/reference/connectors/backoff",[],"Retry with exponential backoff for handling transient failures and reducing system load",{"id":2637,"title":549,"titles":2638,"content":2639,"level":9},"/v1.0.7/reference/connectors/backoff#backoff",[],"Retry with exponential backoff for handling transient failures.",{"id":2641,"title":6,"titles":2642,"content":2643,"level":19},"/v1.0.7/reference/connectors/backoff#overview",[549],"Backoff provides intelligent retry logic with exponentially increasing delays between attempts. Unlike simple Retry which uses fixed delays, Backoff progressively increases wait times to reduce load on failing systems and improve recovery chances.",{"id":2645,"title":2263,"titles":2646,"content":2647,"level":19},"/v1.0.7/reference/connectors/backoff#function-signature",[549],"func NewBackoff[T any](\n    identity Identity,\n    processor Chainable[T],\n    maxAttempts int,\n    baseDelay time.Duration,\n) *Backoff[T]",{"id":2649,"title":2091,"titles":2650,"content":2651,"level":19},"/v1.0.7/reference/connectors/backoff#type-parameters",[549],"T - The data type being processed",{"id":2653,"title":2268,"titles":2654,"content":2655,"level":19},"/v1.0.7/reference/connectors/backoff#parameters",[549],"identity (Identity) - Identifier for debugging and error pathsprocessor (Chainable[T]) - The processor to retry on failuremaxAttempts (int) - Maximum number of retry attempts (minimum 1)baseDelay (time.Duration) - Initial delay between retries",{"id":2657,"title":2273,"titles":2658,"content":2659,"level":19},"/v1.0.7/reference/connectors/backoff#returns",[549],"Returns a *Backoff[T] that implements Chainable[T].",{"id":2661,"title":2278,"titles":2662,"content":2663,"level":19},"/v1.0.7/reference/connectors/backoff#behavior",[549],"Exponential growth: Each retry doubles the previous delayNo delay after final attempt: Fails immediately if last attempt failsContext aware: Respects cancellation and deadlinesError preservation: Maintains complete error context from failuresThread-safe: Safe for concurrent use",{"id":2665,"title":2666,"titles":2667,"content":2668,"level":35},"/v1.0.7/reference/connectors/backoff#retry-backoff-visualization","Retry Backoff Visualization",[549,2278],"┌──────────────────────────────────────────────────────────────────┐\n│                    Exponential Backoff Pattern                   │\n└──────────────────────────────────────────────────────────────────┘\n\nBase Delay: 100ms, Max Attempts: 5\n\nAttempt 1 ──[✗]──→ Wait 100ms ──┐\n                                 ▼\nAttempt 2 ──[✗]──→ Wait 200ms ──┐\n                                 ▼\nAttempt 3 ──[✗]──→ Wait 400ms ──┐\n                                 ▼\nAttempt 4 ──[✗]──→ Wait 800ms ──┐\n                                 ▼\nAttempt 5 ──[✗]──→ Final Failure (no delay)\n                         │\n                         ▼\n                   Return Error\n\nTotal Time: 100ms + 200ms + 400ms + 800ms = 1.5 seconds\n\nTimeline View:\n═════════════\nTime:    0ms   100ms  300ms      700ms           1500ms\n         │      │      │          │               │\nAttempt: [1]──►[2]───►[3]───────►[4]────────────►[5]\n         ↑      ↑      ↑          ↑               ↑\n         Try   Retry  Retry     Retry          Final\n\nSuccess Case (succeeds on attempt 3):\n══════════════════════════════════════\nAttempt 1 ──[✗]──→ Wait 100ms ──┐\n                                 ▼\nAttempt 2 ──[✗]──→ Wait 200ms ──┐\n                                 ▼\nAttempt 3 ──[✓]──→ Success! Return result\n\nTotal Time: 100ms + 200ms = 300ms (plus processing time)",{"id":2670,"title":2671,"titles":2672,"content":2673,"level":35},"/v1.0.7/reference/connectors/backoff#delay-progression","Delay Progression",[549,2278],"Given a base delay of 1 second: 1st retry: 1 second delay2nd retry: 2 seconds delay3rd retry: 4 seconds delay4th retry: 8 seconds delayAnd so on...",{"id":2675,"title":2134,"titles":2676,"content":29,"level":19},"/v1.0.7/reference/connectors/backoff#methods",[549],{"id":2678,"title":2679,"titles":2680,"content":2681,"level":35},"/v1.0.7/reference/connectors/backoff#setmaxattempts","SetMaxAttempts",[549,2134],"Updates the maximum number of retry attempts. func (b *Backoff[T]) SetMaxAttempts(n int) *Backoff[T]",{"id":2683,"title":2684,"titles":2685,"content":2686,"level":35},"/v1.0.7/reference/connectors/backoff#setbasedelay","SetBaseDelay",[549,2134],"Updates the base delay duration. func (b *Backoff[T]) SetBaseDelay(d time.Duration) *Backoff[T]",{"id":2688,"title":2689,"titles":2690,"content":2691,"level":35},"/v1.0.7/reference/connectors/backoff#getmaxattempts","GetMaxAttempts",[549,2134],"Returns the current maximum attempts setting. func (b *Backoff[T]) GetMaxAttempts() int",{"id":2693,"title":2694,"titles":2695,"content":2696,"level":35},"/v1.0.7/reference/connectors/backoff#getbasedelay","GetBaseDelay",[549,2134],"Returns the current base delay setting. func (b *Backoff[T]) GetBaseDelay() time.Duration",{"id":2698,"title":2699,"titles":2700,"content":2701,"level":35},"/v1.0.7/reference/connectors/backoff#identity","Identity",[549,2134],"Returns the identity of this connector. func (b *Backoff[T]) Identity() Identity",{"id":2703,"title":2704,"titles":2705,"content":2706,"level":35},"/v1.0.7/reference/connectors/backoff#schema","Schema",[549,2134],"Returns the schema representation of this connector. func (b *Backoff[T]) Schema() Node",{"id":2708,"title":2709,"titles":2710,"content":2711,"level":19},"/v1.0.7/reference/connectors/backoff#basic-usage","Basic Usage",[549],"// Define identities upfront\nvar (\n    APIRetryID = pipz.NewIdentity(\"api-retry\", \"Retry external API calls with exponential backoff\")\n    CallAPIID  = pipz.NewIdentity(\"call-api\", \"Call external API\")\n)\n\n// Retry API calls with exponential backoff\napiCall := pipz.NewBackoff(\n    APIRetryID,\n    pipz.Apply(CallAPIID, func(ctx context.Context, req Request) (Response, error) {\n        return externalAPI.Call(ctx, req)\n    }),\n    5,                    // Max 5 attempts\n    100*time.Millisecond, // Start with 100ms delay\n)\n\n// Delays will be: 100ms, 200ms, 400ms, 800ms",{"id":2713,"title":2181,"titles":2714,"content":29,"level":19},"/v1.0.7/reference/connectors/backoff#common-patterns",[549],{"id":2716,"title":2717,"titles":2718,"content":2719,"level":35},"/v1.0.7/reference/connectors/backoff#network-request-handling","Network Request Handling",[549,2181],"// Define identities upfront\nvar (\n    HTTPBackoffID = pipz.NewIdentity(\"http-backoff\", \"HTTP client with exponential backoff for server errors\")\n    HTTPRequestID = pipz.NewIdentity(\"http-request\", \"Execute HTTP request\")\n)\n\n// Robust HTTP client with backoff\nhttpClient := pipz.NewBackoff(\n    HTTPBackoffID,\n    pipz.Apply(HTTPRequestID, func(ctx context.Context, req HTTPRequest) (HTTPResponse, error) {\n        resp, err := client.Do(req.ToHTTP())\n        if err != nil {\n            return HTTPResponse{}, err\n        }\n\n        // Retry on 5xx errors\n        if resp.StatusCode >= 500 {\n            return HTTPResponse{}, fmt.Errorf(\"server error: %d\", resp.StatusCode)\n        }\n\n        return parseResponse(resp)\n    }),\n    4,                   // 4 attempts total\n    500*time.Millisecond, // Start with 500ms\n)\n// Total possible delay: 500ms + 1s + 2s = 3.5s",{"id":2721,"title":2722,"titles":2723,"content":2724,"level":35},"/v1.0.7/reference/connectors/backoff#database-operations","Database Operations",[549,2181],"// Define identities upfront\nvar (\n    DBRetryID = pipz.NewIdentity(\"db-retry\", \"Retry database operations with backoff for deadlocks\")\n    UpdateID  = pipz.NewIdentity(\"update\", \"Update database record\")\n)\n\n// Database operations with backoff for lock contention\ndbOperation := pipz.NewBackoff(\n    DBRetryID,\n    pipz.Apply(UpdateID, func(ctx context.Context, data Record) (Record, error) {\n        tx, err := db.BeginTx(ctx, nil)\n        if err != nil {\n            return data, err\n        }\n        defer tx.Rollback()\n\n        // Perform operations\n        if err := updateRecord(tx, data); err != nil {\n            return data, err\n        }\n\n        return data, tx.Commit()\n    }),\n    3,                  // 3 attempts for deadlocks\n    50*time.Millisecond, // Short initial delay\n)",{"id":2726,"title":2727,"titles":2728,"content":2729,"level":35},"/v1.0.7/reference/connectors/backoff#message-queue-processing","Message Queue Processing",[549,2181],"// Define identities upfront\nvar (\n    MessageRetryID  = pipz.NewIdentity(\"message-retry\", \"Retry message processing with exponential backoff\")\n    ProcessMessageID = pipz.NewIdentity(\"process-message\", \"Parse, validate, and send message\")\n    SendID          = pipz.NewIdentity(\"send\", \"Send message to queue\")\n)\n\n// Retry message processing with increasing delays\nmessageProcessor := pipz.NewBackoff(\n    MessageRetryID,\n    pipz.NewSequence(\n        ProcessMessageID,\n        parseMessage,\n        validateMessage,\n        pipz.Apply(SendID, func(ctx context.Context, msg Message) (Message, error) {\n            return queue.Send(ctx, msg)\n        }),\n    ),\n    6,              // More attempts for async operations\n    1*time.Second,  // Start with 1 second\n)\n// Max total delay: 1s + 2s + 4s + 8s + 16s = 31s",{"id":2731,"title":2732,"titles":2733,"content":2734,"level":35},"/v1.0.7/reference/connectors/backoff#combined-with-circuit-breaker","Combined with Circuit Breaker",[549,2181],"// Define identities upfront\nvar (\n    CircuitID = pipz.NewIdentity(\"circuit\", \"Circuit breaker for external service\")\n    BackoffID = pipz.NewIdentity(\"backoff\", \"Exponential backoff for service calls\")\n)\n\n// Backoff with circuit breaker for external services\nresilientService := pipz.NewCircuitBreaker(\n    CircuitID,\n    pipz.NewBackoff(\n        BackoffID,\n        externalServiceCall,\n        3,                    // Limited attempts before circuit opens\n        200*time.Millisecond,\n    ),\n    10,  // Failure threshold\n    5*time.Minute, // Recovery timeout\n)",{"id":2736,"title":2737,"titles":2738,"content":29,"level":19},"/v1.0.7/reference/connectors/backoff#configuration-patterns","Configuration Patterns",[549],{"id":2740,"title":2741,"titles":2742,"content":2743,"level":35},"/v1.0.7/reference/connectors/backoff#dynamic-configuration","Dynamic Configuration",[549,2737],"// Define identity upfront\nvar DynamicID = pipz.NewIdentity(\"dynamic\", \"Dynamically configured backoff\")\n\nbackoff := pipz.NewBackoff(\n    DynamicID,\n    processor,\n    3,\n    1*time.Second,\n)\n\n// Adjust based on load\nif highLoad() {\n    backoff.SetMaxAttempts(5).SetBaseDelay(2*time.Second)\n} else {\n    backoff.SetMaxAttempts(3).SetBaseDelay(500*time.Millisecond)\n}",{"id":2745,"title":2746,"titles":2747,"content":2748,"level":35},"/v1.0.7/reference/connectors/backoff#environment-based-configuration","Environment-Based Configuration",[549,2737],"// Define identities upfront\nvar (\n    ProdBackoffID  = pipz.NewIdentity(\"prod-backoff\", \"Production backoff configuration\")\n    StageBackoffID = pipz.NewIdentity(\"stage-backoff\", \"Staging backoff configuration\")\n    DevBackoffID   = pipz.NewIdentity(\"dev-backoff\", \"Development backoff configuration\")\n)\n\nfunc createBackoff(env string) *Backoff[Data] {\n    switch env {\n    case \"production\":\n        return pipz.NewBackoff(\n            ProdBackoffID,\n            processor, 5, 1*time.Second)\n    case \"staging\":\n        return pipz.NewBackoff(\n            StageBackoffID,\n            processor, 3, 500*time.Millisecond)\n    default:\n        return pipz.NewBackoff(\n            DevBackoffID,\n            processor, 2, 100*time.Millisecond)\n    }\n}",{"id":2750,"title":106,"titles":2751,"content":2752,"level":19},"/v1.0.7/reference/connectors/backoff#error-handling",[549],"Backoff preserves complete error context: result, err := backoff.Process(ctx, data)\nif err != nil {\n    var pipeErr *pipz.Error[Data]\n    if errors.As(err, &pipeErr) {\n        // Error path includes backoff name\n        fmt.Printf(\"Failed after %d attempts at: %v\\n\", \n            backoff.GetMaxAttempts(), pipeErr.Path)\n        \n        // Check if it was a timeout during backoff\n        if pipeErr.IsTimeout() {\n            log.Warn(\"Backoff interrupted by timeout\")\n        }\n    }\n}",{"id":2754,"title":2755,"titles":2756,"content":29,"level":19},"/v1.0.7/reference/connectors/backoff#comparison-with-retry","Comparison with Retry",[549],{"id":2758,"title":2759,"titles":2760,"content":2761,"level":35},"/v1.0.7/reference/connectors/backoff#backoff-vs-retry","Backoff vs Retry",[549,2755],"FeatureBackoffRetryDelay patternExponential (1s, 2s, 4s...)Fixed (1s, 1s, 1s...)Use caseTransient failures, overloadQuick failures, network blipsTotal timeGrows exponentiallyGrows linearlySystem loadReduces pressure over timeConstant pressure // Define identities upfront\nvar (\n    ExponentialID = pipz.NewIdentity(\"exponential\", \"Exponential backoff for overloaded systems\")\n    FixedID       = pipz.NewIdentity(\"fixed\", \"Fixed delay retry for brief interruptions\")\n)\n\n// Backoff: Good for overloaded systems\nbackoff := pipz.NewBackoff(\n    ExponentialID,\n    processor, 4, 1*time.Second)\n// Delays: 1s, 2s, 4s (total: 7s)\n\n// Retry: Good for brief interruptions\nretry := pipz.NewRetry(\n    FixedID,\n    processor, 4, 1*time.Second)\n// Delays: 1s, 1s, 1s (total: 3s)",{"id":2763,"title":2764,"titles":2765,"content":2766,"level":19},"/v1.0.7/reference/connectors/backoff#performance-characteristics","Performance Characteristics",[549],"Memory: O(1) - No additional allocations per retryGoroutines: No additional goroutines createdOverhead: ~10ns + sleep time per retryContext checks: Performed before each retry",{"id":2768,"title":2199,"titles":2769,"content":29,"level":19},"/v1.0.7/reference/connectors/backoff#gotchas",[549],{"id":2771,"title":2772,"titles":2773,"content":2774,"level":35},"/v1.0.7/reference/connectors/backoff#dont-use-tiny-base-delays","❌ Don't use tiny base delays",[549,2199],"// Define identity upfront\nvar TooFastID = pipz.NewIdentity(\"too-fast\", \"Backoff with microsecond delays\")\n\n// WRONG - Delays too small to be meaningful\nbackoff := pipz.NewBackoff(\n    TooFastID,\n    processor, 5, 1*time.Microsecond)\n// Results in: 1μs, 2μs, 4μs, 8μs - essentially no delay",{"id":2776,"title":2777,"titles":2778,"content":2779,"level":35},"/v1.0.7/reference/connectors/backoff#use-meaningful-base-delays","✅ Use meaningful base delays",[549,2199],"// Define identity upfront\nvar ReasonableID = pipz.NewIdentity(\"reasonable\", \"Backoff with reasonable delays for system recovery\")\n\n// RIGHT - Delays that allow recovery\nbackoff := pipz.NewBackoff(\n    ReasonableID,\n    processor, 5, 100*time.Millisecond)\n// Results in: 100ms, 200ms, 400ms, 800ms - gives system time to recover",{"id":2781,"title":2782,"titles":2783,"content":2784,"level":35},"/v1.0.7/reference/connectors/backoff#dont-use-too-many-attempts","❌ Don't use too many attempts",[549,2199],"// Define identity upfront\nvar ExcessiveID = pipz.NewIdentity(\"excessive\", \"Backoff with too many attempts\")\n\n// WRONG - Could wait extremely long\nbackoff := pipz.NewBackoff(\n    ExcessiveID,\n    processor, 10, 1*time.Second)\n// Final delay would be 512 seconds (8.5 minutes)!",{"id":2786,"title":2787,"titles":2788,"content":2789,"level":35},"/v1.0.7/reference/connectors/backoff#balance-attempts-with-total-delay","✅ Balance attempts with total delay",[549,2199],"// Define identity upfront\nvar BalancedID = pipz.NewIdentity(\"balanced\", \"Backoff with balanced attempts and delays\")\n\n// RIGHT - Reasonable total delay\nbackoff := pipz.NewBackoff(\n    BalancedID,\n    processor, 5, 500*time.Millisecond)\n// Max total: 500ms + 1s + 2s + 4s = 7.5s",{"id":2791,"title":2792,"titles":2793,"content":2794,"level":35},"/v1.0.7/reference/connectors/backoff#dont-ignore-context-cancellation","❌ Don't ignore context cancellation",[549,2199],"// WRONG - Not checking context\nfor i := 0; i \u003C maxAttempts; i++ {\n    result, err := process(data)\n    if err == nil {\n        return result, nil\n    }\n    time.Sleep(delay) // Ignores context!\n    delay *= 2\n}",{"id":2796,"title":2318,"titles":2797,"content":2798,"level":35},"/v1.0.7/reference/connectors/backoff#respect-context-cancellation",[549,2199],"// Define identity upfront\nvar ContextAwareID = pipz.NewIdentity(\"context-aware\", \"Context-aware backoff\")\n\n// RIGHT - Backoff handles this automatically\nbackoff := pipz.NewBackoff(\n    ContextAwareID,\n    processor, 5, 1*time.Second)\n// Automatically stops on context cancellation",{"id":2800,"title":135,"titles":2801,"content":2802,"level":19},"/v1.0.7/reference/connectors/backoff#best-practices",[549],"Start with small delays - Begin with 50-500ms for most operationsLimit max attempts - Usually 3-5 attempts is sufficientCalculate total time - Ensure max total delay is acceptableMatch delay to failure type - Network: 100ms+, Database: 50ms+, API: 500ms+Monitor retry metrics - Track retry rates and success ratesConsider circuit breakers - Combine with circuit breakers for system protectionLog retry attempts - Include attempt number in logs for debugging",{"id":2804,"title":2805,"titles":2806,"content":2807,"level":19},"/v1.0.7/reference/connectors/backoff#testing","Testing",[549],"// Define identities upfront\nvar (\n    FlakyID       = pipz.NewIdentity(\"flaky\", \"Flaky processor for testing\")\n    TestID        = pipz.NewIdentity(\"test\", \"Test backoff processor\")\n    SlowID        = pipz.NewIdentity(\"slow\", \"Always-failing processor\")\n    TimeoutTestID = pipz.NewIdentity(\"timeout-test\", \"Backoff with timeout test\")\n)\n\nfunc TestBackoff(t *testing.T) {\n    attempts := 0\n    processor := pipz.Apply(\n        FlakyID,\n        func(ctx context.Context, n int) (int, error) {\n            attempts++\n            if attempts \u003C 3 {\n                return 0, errors.New(\"transient error\")\n            }\n            return n * 2, nil\n        })\n\n    backoff := pipz.NewBackoff(\n        TestID,\n        processor, 5, 10*time.Millisecond)\n\n    start := time.Now()\n    result, err := backoff.Process(context.Background(), 5)\n    duration := time.Since(start)\n\n    assert.NoError(t, err)\n    assert.Equal(t, 10, result)\n    assert.Equal(t, 3, attempts)\n\n    // Verify exponential delays (10ms + 20ms = 30ms minimum)\n    assert.GreaterOrEqual(t, duration, 30*time.Millisecond)\n}\n\nfunc TestBackoffTimeout(t *testing.T) {\n    processor := pipz.Apply(\n        SlowID,\n        func(ctx context.Context, n int) (int, error) {\n            return 0, errors.New(\"always fails\")\n        })\n\n    backoff := pipz.NewBackoff(\n        TimeoutTestID,\n        processor, 10, 100*time.Millisecond)\n\n    ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)\n    defer cancel()\n\n    _, err := backoff.Process(ctx, 5)\n\n    var pipeErr *pipz.Error[int]\n    require.Error(t, err)\n    require.True(t, errors.As(err, &pipeErr))\n    assert.True(t, pipeErr.IsTimeout())\n}",{"id":2809,"title":1764,"titles":2810,"content":2811,"level":19},"/v1.0.7/reference/connectors/backoff#see-also",[549],"Retry - Fixed-delay retry patternCircuitBreaker - Prevent cascading failuresTimeout - Bound operation timeFallback - Alternative processing on failure html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":2813,"title":519,"titles":2814,"content":2815,"level":9},"/v1.0.7/reference/connectors/circuitbreaker",[],"Prevents cascading failures by stopping requests to failing services and allowing time for recovery",{"id":2817,"title":519,"titles":2818,"content":2819,"level":9},"/v1.0.7/reference/connectors/circuitbreaker#circuitbreaker",[],"Prevents cascading failures by stopping requests to failing services and allowing time for recovery.",{"id":2821,"title":2822,"titles":2823,"content":2824,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#function-signatures","Function Signatures",[519],"// Create circuit breaker with failure threshold and reset timeout\nfunc NewCircuitBreaker[T any](identity Identity, processor Chainable[T], failureThreshold int, resetTimeout time.Duration) *CircuitBreaker[T]",{"id":2826,"title":2268,"titles":2827,"content":2828,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#parameters",[519],"identity (Identity) - Identifier for the connector used in debuggingprocessor (Chainable[T]) - The processor to protect with circuit breakingfailureThreshold (int) - Number of consecutive failures before opening circuitresetTimeout (time.Duration) - Time to wait before attempting recovery",{"id":2830,"title":2273,"titles":2831,"content":2832,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#returns",[519],"Returns a *CircuitBreaker[T] that implements Chainable[T].",{"id":2834,"title":2835,"titles":2836,"content":29,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#testing-configuration","Testing Configuration",[519],{"id":2838,"title":2839,"titles":2840,"content":2841,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#withclock","WithClock",[519,2835],"func (cb *CircuitBreaker[T]) WithClock(clock clockz.Clock) *CircuitBreaker[T] Sets a custom clock implementation for testing purposes. This method enables controlled time manipulation in tests using clockz.FakeClock. Parameters: clock (clockz.Clock) - Clock implementation to use Returns:\nReturns the same connector instance for method chaining. Example: // Use fake clock in tests\nfakeClock := clockz.NewFakeClock()\ncb := pipz.NewCircuitBreaker(\n    pipz.NewIdentity(\"test\", \"Test circuit breaker\"),\n    processor, 3, 30*time.Second).\n    WithClock(fakeClock)\n\n// Advance time in test to trigger state transitions\nfakeClock.Advance(31 * time.Second)",{"id":2843,"title":2278,"titles":2844,"content":29,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#behavior",[519],{"id":2846,"title":2847,"titles":2848,"content":2849,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#circuit-states","Circuit States",[519,2278],"The circuit breaker implements the standard three-state pattern: Closed (Normal) - Requests pass through normally, failures are countedOpen (Blocking) - All requests fail immediately without calling the processorHalf-Open (Testing) - Limited requests are allowed to test service recovery",{"id":2851,"title":2852,"titles":2853,"content":2854,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#state-machine-diagram","State Machine Diagram",[519,2278],"┌──────────────────────────────────────────────────────────────────┐\n│                  Circuit Breaker State Machine                   │\n└──────────────────────────────────────────────────────────────────┘\n\n        ┌─────────────────────────────────────┐\n        │            CLOSED                    │\n        │         (Normal Operation)           │\n        │                                      │\n        │  • Requests pass through             │\n        │  • Count consecutive failures        │\n        │  • Reset count on success            │\n        └──────────────┬───────────────────────┘\n                       │\n            failures >= threshold\n                       │\n                       ▼\n        ┌─────────────────────────────────────┐\n        │              OPEN                    │\n        │          (Failing Fast)              │\n        │                                      │\n        │  • All requests fail immediately     │\n        │  • No calls to protected service     │\n        │  • Wait for reset timeout            │\n        └──────────────┬───────────────────────┘\n                       │\n              after resetTimeout\n                       │\n                       ▼\n        ┌─────────────────────────────────────┐\n        │           HALF-OPEN                  │◄──┐\n        │      (Testing Recovery)              │   │\n        │                                      │   │\n        │  • Limited requests allowed          │   │ any failure\n        │  • Count successes                   │   │\n        │  • Testing service health            │───┘\n        └──────────────┬───────────────────────┘\n                       │\n         successes >= successThreshold\n                       │\n                       ▼\n                  [CLOSED]\n\nState Transition Rules:\n═══════════════════════\nCLOSED → OPEN:      After failureThreshold consecutive failures\nOPEN → HALF-OPEN:   After resetTimeout duration expires\nHALF-OPEN → CLOSED: After successThreshold consecutive successes\nHALF-OPEN → OPEN:   On any failure during half-open state",{"id":2856,"title":2857,"titles":2858,"content":2859,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#state-transitions","State Transitions",[519,2278],"Closed → Open - After failureThreshold consecutive failuresOpen → Half-Open - After resetTimeout durationHalf-Open → Closed - After successThreshold consecutive successesHalf-Open → Open - On any failure during half-open state",{"id":2861,"title":106,"titles":2862,"content":2863,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#error-handling",[519,2278],"Error propagation - Preserves original error information and pathsCircuit context - Adds circuit breaker information to error pathsState awareness - Different errors for open vs processor failures",{"id":2865,"title":2866,"titles":2867,"content":2868,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#signals","Signals",[519],"CircuitBreaker emits typed signals at state transitions for observability and monitoring via capitan: SignalWhen EmittedFieldscircuitbreaker.openedCircuit opens after failure threshold reachedname, state, failures, failure_thresholdcircuitbreaker.closedCircuit closes after successful recoveryname, state, successes, success_thresholdcircuitbreaker.half-openCircuit transitions to half-open for testingname, state, generationcircuitbreaker.rejectedRequest rejected while circuit is openname, state, generation Example: import \"github.com/zoobzio/capitan\"\n\n// Hook circuit breaker signals\ncapitan.Hook(pipz.SignalCircuitBreakerOpened, func(ctx context.Context, e *capitan.Event) {\n    name, _ := pipz.FieldName.From(e)\n    failures, _ := pipz.FieldFailures.From(e)\n    // Alert or log circuit opening\n}) See Hooks Documentation for complete signal reference and usage examples.",{"id":2870,"title":2871,"titles":2872,"content":2873,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#configuration-methods","Configuration Methods",[519],"// Runtime configuration\nbreaker.SetFailureThreshold(10)               // Change failure threshold\nbreaker.SetSuccessThreshold(3)                // Successes needed to close from half-open\nbreaker.SetResetTimeout(time.Minute)          // Change recovery timeout\n\n// State management\nstate := breaker.GetState()                   // \"closed\", \"open\", or \"half-open\"\nbreaker.Reset()                               // Manually reset to closed state\n\n// Getters\nfailures := breaker.GetFailureThreshold()     // Current failure threshold\nsuccesses := breaker.GetSuccessThreshold()    // Current success threshold\ntimeout := breaker.GetResetTimeout()          // Current reset timeout",{"id":2875,"title":2283,"titles":2876,"content":2877,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#example",[519],"// Define identities upfront\nvar (\n    APIBreakerID    = pipz.NewIdentity(\"api-breaker\", \"Circuit breaker for external API\")\n    ExternalAPIID   = pipz.NewIdentity(\"external-api\", \"Call external API\")\n    ResilientAPIID  = pipz.NewIdentity(\"resilient-api\", \"Resilient API call pipeline\")\n    RetryID         = pipz.NewIdentity(\"retry\", \"Retry API calls\")\n)\n\n// Basic circuit breaker - open after 5 failures, try recovery after 30 seconds\nbreaker := pipz.NewCircuitBreaker(APIBreakerID,\n    pipz.Apply(ExternalAPIID, callExternalAPI),\n    5,                    // Open after 5 consecutive failures\n    30*time.Second,       // Try recovery after 30 seconds\n)\n\n// Use in a resilient pipeline\nresilientAPI := pipz.NewSequence(ResilientAPIID,\n    breaker,\n    pipz.NewRetry(RetryID, apiCall, 3),\n)\n\n// Runtime configuration\nbreaker.SetFailureThreshold(10)               // More tolerant during peak hours\nbreaker.SetSuccessThreshold(3)                // Need 3 successes to fully recover\n\n// Monitoring circuit state\nif breaker.GetState() == \"open\" {\n    log.Warn(\"Circuit breaker is open - service may be down\")\n}\n\n// Manual intervention\nif emergencyRecovery {\n    breaker.Reset()  // Force reset during maintenance\n}",{"id":2879,"title":2292,"titles":2880,"content":2881,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#when-to-use",[519],"Use CircuitBreaker when: Calling external services that may fail (APIs, databases, microservices)Preventing cascade failures in distributed systemsProtecting against downstream service degradationGiving failing services time to recoverImplementing fast failure for better user experienceReducing load on struggling services Use with low thresholds (3-5) when: Services fail completely rather than graduallyFast failure is more important than tryingYou have good fallback mechanisms Use with higher thresholds (10-20) when: Services have intermittent issuesTemporary failures are commonYou want to be tolerant of occasional errors",{"id":2883,"title":2297,"titles":2884,"content":2885,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#when-not-to-use",[519],"Don't use CircuitBreaker when: Calling internal services that should always workFailures are permanent (validation errors, business logic)You just need retries (use Retry - simpler)The service has no failure patternsEvery request is unique and independent",{"id":2887,"title":2888,"titles":2889,"content":2890,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#error-messages","Error Messages",[519],"CircuitBreaker provides detailed error information: var PaymentBreakerID = pipz.NewIdentity(\"payment-breaker\", \"Circuit breaker for payment processing\")\nbreaker := pipz.NewCircuitBreaker(PaymentBreakerID, paymentProcessor, 3, time.Minute)\n\n_, err := breaker.Process(ctx, payment)\nif err != nil {\n    var pipeErr *pipz.Error[Payment]\n    if errors.As(err, &pipeErr) {\n        // Error path shows circuit breaker involvement\n        // Example: \"payment-pipeline → payment-breaker → payment-processor failed after 1.2s: connection timeout\"\n        // Or: \"payment-breaker failed after 0s: circuit breaker is open\"\n        \n        if strings.Contains(err.Error(), \"circuit breaker is open\") {\n            // Handle open circuit differently\n            log.Info(\"Payment service circuit is open, using fallback\")\n        }\n    }\n}",{"id":2892,"title":2181,"titles":2893,"content":2894,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#common-patterns",[519],"// Define identities upfront\nvar (\n    DBBreakerID      = pipz.NewIdentity(\"db-breaker\", \"Circuit breaker for database operations\")\n    ExecuteQueryID   = pipz.NewIdentity(\"execute-query\", \"Execute database query\")\n)\n\n// Database operations with circuit breaker\ndbConnection := pipz.NewCircuitBreaker(DBBreakerID,\n    pipz.Apply(ExecuteQueryID, runDatabaseQuery),\n    5,                    // Open after 5 database failures\n    time.Minute,          // Try reconnection after 1 minute\n)\n\n// HTTP client with multiple protection layers\nvar (\n    ProtectedHTTPID  = pipz.NewIdentity(\"protected-http\", \"HTTP client with protection layers\")\n    RequestTimeoutID = pipz.NewIdentity(\"request-timeout\", \"Request timeout wrapper\")\n    HTTPBreakerID    = pipz.NewIdentity(\"http-breaker\", \"HTTP circuit breaker\")\n    HTTPRetryID      = pipz.NewIdentity(\"http-retry\", \"HTTP retry handler\")\n    HTTPCallID       = pipz.NewIdentity(\"http-call\", \"HTTP request call\")\n)\n\nresilientHTTP := pipz.NewSequence(ProtectedHTTPID,\n    pipz.NewTimeout(RequestTimeoutID,\n        pipz.NewCircuitBreaker(HTTPBreakerID,\n            pipz.NewRetry(HTTPRetryID,\n                pipz.Apply(HTTPCallID, makeHTTPRequest),\n                3,\n            ),\n            10,                // Open after 10 failures\n            2*time.Minute,     // Try recovery after 2 minutes\n        ),\n        30*time.Second,        // Overall timeout\n    ),\n)\n\n// Service mesh pattern\nvar (\n    ServiceMeshID     = pipz.NewIdentity(\"service-mesh\", \"Service mesh with fallback\")\n    PrimaryServiceID  = pipz.NewIdentity(\"primary-service\", \"Primary service circuit breaker\")\n    SecondaryServiceID = pipz.NewIdentity(\"secondary-service\", \"Secondary service circuit breaker\")\n)\n\nserviceCall := pipz.NewFallback(ServiceMeshID,\n    pipz.NewCircuitBreaker(PrimaryServiceID,\n        primaryServiceCall,\n        5, 30*time.Second,\n    ),\n    pipz.NewCircuitBreaker(SecondaryServiceID,\n        secondaryServiceCall,\n        3, time.Minute,\n    ),\n)\n\n// Microservice with graceful degradation\nvar (\n    UserServiceID  = pipz.NewIdentity(\"user-service\", \"User service router\")\n    FullServiceID  = pipz.NewIdentity(\"full-service\", \"Full user service circuit breaker\")\n    BasicServiceID = pipz.NewIdentity(\"basic-service\", \"Basic user service circuit breaker\")\n)\n\nuserService := pipz.NewSwitch(UserServiceID, checkServiceHealth).\n    AddRoute(\"healthy\",\n        pipz.NewCircuitBreaker(FullServiceID,\n            fullUserService,\n            5, time.Minute,\n        ),\n    ).\n    AddRoute(\"degraded\",\n        pipz.NewCircuitBreaker(BasicServiceID,\n            basicUserService,\n            10, 30*time.Second,  // More tolerant in degraded mode\n        ),\n    )",{"id":2896,"title":2199,"titles":2897,"content":29,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#gotchas",[519],{"id":2899,"title":2900,"titles":2901,"content":2902,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#dont-create-circuit-breakers-per-request","❌ Don't create circuit breakers per request",[519,2199],"// WRONG - New breaker each time, no shared state!\nfunc handleRequest(req Request) Response {\n    breakerID := pipz.NewIdentity(\"api\", \"API circuit breaker\")\n    breaker := pipz.NewCircuitBreaker(breakerID, apiCall, 5, time.Minute)\n    return breaker.Process(ctx, req) // Useless! New Identity each call\n}",{"id":2904,"title":2905,"titles":2906,"content":2907,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#create-once-reuse","✅ Create once, reuse",[519,2199],"// RIGHT - Shared state across requests with package-level Identity\nvar APIBreakerID = pipz.NewIdentity(\"api\", \"API circuit breaker\")\nvar apiBreaker = pipz.NewCircuitBreaker(APIBreakerID, apiCall, 5, time.Minute)\n\nfunc handleRequest(req Request) Response {\n    return apiBreaker.Process(ctx, req)\n}",{"id":2909,"title":2910,"titles":2911,"content":2912,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#dont-use-for-permanent-errors","❌ Don't use for permanent errors",[519,2199],"// WRONG - Validation errors aren't transient\nvar (\n    ValidationBreakerID = pipz.NewIdentity(\"validation\", \"Validation circuit breaker\")\n    ValidateID          = pipz.NewIdentity(\"validate\", \"Validate data\")\n)\n\nbreaker := pipz.NewCircuitBreaker(ValidationBreakerID,\n    pipz.Apply(ValidateID, validateData), // Always fails for bad data\n    3, time.Minute,\n)",{"id":2914,"title":2915,"titles":2916,"content":2917,"level":35},"/v1.0.7/reference/connectors/circuitbreaker#only-protect-transient-failures","✅ Only protect transient failures",[519,2199],"// RIGHT - Network calls can recover\nvar (\n    NetworkBreakerID = pipz.NewIdentity(\"network\", \"Network circuit breaker\")\n    APIID            = pipz.NewIdentity(\"api\", \"API call\")\n)\n\nbreaker := pipz.NewCircuitBreaker(NetworkBreakerID,\n    pipz.Apply(APIID, callExternalAPI),\n    5, time.Minute,\n)",{"id":2919,"title":111,"titles":2920,"content":2921,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#advanced-patterns",[519],"// Circuit breaker with custom recovery logic\nvar (\n    SmartBreakerID    = pipz.NewIdentity(\"smart-breaker\", \"Smart circuit breaker with recovery\")\n    CircuitID         = pipz.NewIdentity(\"circuit\", \"Inner circuit breaker\")\n    RecoveryHandlerID = pipz.NewIdentity(\"recovery-handler\", \"Error recovery router\")\n    NotifyOpsID       = pipz.NewIdentity(\"notify-ops\", \"Notify operations team\")\n    LogErrorID        = pipz.NewIdentity(\"log-error\", \"Log error\")\n)\n\nsmartBreaker := pipz.NewHandle(SmartBreakerID,\n    pipz.NewCircuitBreaker(CircuitID,\n        riskyOperation,\n        5, time.Minute,\n    ),\n    pipz.NewSwitch(RecoveryHandlerID,\n        func(ctx context.Context, err *pipz.Error[Data]) string {\n            if strings.Contains(err.Err.Error(), \"circuit breaker is open\") {\n                return \"circuit-open\"\n            }\n            return \"other-error\"\n        },\n    ).\n    AddRoute(\"circuit-open\",\n        pipz.Effect(NotifyOpsID, notifyOperations),\n    ).\n    AddRoute(\"other-error\",\n        pipz.Effect(LogErrorID, logError),\n    ),\n)\n\n// Multi-tier circuit breaking\nvar (\n    TieredProtectionID = pipz.NewIdentity(\"tiered-protection\", \"Multi-tier circuit breaking\")\n    ServiceBreakerID   = pipz.NewIdentity(\"service-breaker\", \"Service-level circuit breaker\")\n    EndpointBreakerID  = pipz.NewIdentity(\"endpoint-breaker\", \"Endpoint-level circuit breaker\")\n)\n\ntieredBreaker := pipz.NewSequence(TieredProtectionID,\n    pipz.NewCircuitBreaker(ServiceBreakerID,     // Service-level protection\n        pipz.NewCircuitBreaker(EndpointBreakerID, // Endpoint-level protection\n            endpointCall,\n            3, 30*time.Second,\n        ),\n        10, 2*time.Minute,\n    ),\n)\n\n// Circuit breaker with metrics\ntype MetricsCircuitBreaker[T any] struct {\n    breaker *pipz.CircuitBreaker[T]\n    metrics MetricsCollector\n}\n\nfunc (m *MetricsCircuitBreaker[T]) Process(ctx context.Context, data T) (T, error) {\n    state := m.breaker.GetState()\n    m.metrics.RecordGauge(\"circuit.state\", stateToFloat(state))\n    \n    result, err := m.breaker.Process(ctx, data)\n    \n    if err != nil {\n        if strings.Contains(err.Error(), \"circuit breaker is open\") {\n            m.metrics.Increment(\"circuit.blocked\")\n        } else {\n            m.metrics.Increment(\"circuit.failures\")\n        }\n    } else {\n        m.metrics.Increment(\"circuit.successes\")\n    }\n    \n    return result, err\n}\n\n// Adaptive circuit breaker\ntype AdaptiveCircuitBreaker[T any] struct {\n    breaker      *pipz.CircuitBreaker[T]\n    errorRate    float64\n    requestCount int\n    mu           sync.Mutex\n}\n\nfunc (a *AdaptiveCircuitBreaker[T]) Process(ctx context.Context, data T) (T, error) {\n    a.mu.Lock()\n    a.requestCount++\n    \n    // Adjust threshold based on error rate\n    if a.requestCount%100 == 0 {\n        if a.errorRate > 0.5 {\n            a.breaker.SetFailureThreshold(3)  // More sensitive\n        } else if a.errorRate \u003C 0.1 {\n            a.breaker.SetFailureThreshold(10) // Less sensitive\n        }\n    }\n    a.mu.Unlock()\n    \n    result, err := a.breaker.Process(ctx, data)\n    \n    a.mu.Lock()\n    if err != nil {\n        a.errorRate = a.errorRate*0.9 + 0.1\n    } else {\n        a.errorRate = a.errorRate * 0.99\n    }\n    a.mu.Unlock()\n    \n    return result, err\n}\n\n// Circuit breaker with health checks\nvar (\n    HealthAwareID       = pipz.NewIdentity(\"health-aware\", \"Health-aware circuit breaker\")\n    HealthCheckID       = pipz.NewIdentity(\"health-check\", \"Service health check\")\n    ProtectedServiceID  = pipz.NewIdentity(\"protected-service\", \"Protected service circuit breaker\")\n)\n\nhealthAwareBreaker := pipz.NewSequence(HealthAwareID,\n    pipz.Apply(HealthCheckID, func(ctx context.Context, req Request) (Request, error) {\n        if !healthChecker.IsHealthy() {\n            return req, errors.New(\"service unhealthy\")\n        }\n        return req, nil\n    }),\n    pipz.NewCircuitBreaker(ProtectedServiceID,\n        serviceCall,\n        5, time.Minute,\n    ),\n)",{"id":2923,"title":2924,"titles":2925,"content":2926,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#state-management","State Management",[519],"// Monitor circuit state\nfunc monitorCircuit(breaker *pipz.CircuitBreaker[Request]) {\n    ticker := time.NewTicker(10 * time.Second)\n    defer ticker.Stop()\n    \n    for range ticker.C {\n        state := breaker.GetState()\n        switch state {\n        case \"open\":\n            log.Warn(\"Circuit is open - service may be down\")\n            // Trigger alerts, health checks, etc.\n        case \"half-open\":\n            log.Info(\"Circuit is half-open - testing service recovery\")\n        case \"closed\":\n            log.Debug(\"Circuit is closed - service operating normally\")\n        }\n    }\n}\n\n// Coordinated circuit management\ntype CircuitManager struct {\n    circuits map[string]*pipz.CircuitBreaker[any]\n    mu       sync.RWMutex\n}\n\nfunc (cm *CircuitManager) GetCircuitStates() map[string]string {\n    cm.mu.RLock()\n    defer cm.mu.RUnlock()\n    \n    states := make(map[string]string)\n    for name, circuit := range cm.circuits {\n        states[name] = circuit.GetState()\n    }\n    return states\n}\n\nfunc (cm *CircuitManager) ResetAllCircuits() {\n    cm.mu.RLock()\n    defer cm.mu.RUnlock()\n    \n    for name, circuit := range cm.circuits {\n        circuit.Reset()\n        log.Infof(\"Reset circuit: %s\", name)\n    }\n}",{"id":2928,"title":2764,"titles":2929,"content":2930,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#performance-characteristics",[519],"Closed state - ~67ns per operation, minimal overheadOpen state - ~443ns per operation, fast failureHalf-open state - Similar to closed, with state trackingMemory usage - Minimal, constant per circuit breakerThread safety - Fully concurrent, uses efficient locking",{"id":2932,"title":1764,"titles":2933,"content":2934,"level":19},"/v1.0.7/reference/connectors/circuitbreaker#see-also",[519],"RateLimiter - For controlling request ratesRetry - Often combined with circuit breakersFallback - For alternative processorsTimeout - For time-based failure detectionHandle - For custom error handling patterns html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}",{"id":2936,"title":2937,"titles":2938,"content":2939,"level":9},"/v1.0.7/reference/connectors/concurrent","Concurrent",[],"Runs multiple processors in parallel with isolated data copies and optional result aggregation",{"id":2941,"title":2937,"titles":2942,"content":2943,"level":9},"/v1.0.7/reference/connectors/concurrent#concurrent",[],"Runs multiple processors in parallel with isolated data copies.",{"id":2945,"title":2263,"titles":2946,"content":2947,"level":19},"/v1.0.7/reference/connectors/concurrent#function-signature",[2937],"func NewConcurrent[T Cloner[T]](\n    identity Identity,\n    reducer func(original T, results map[Identity]T, errors map[Identity]error) T,\n    processors ...Chainable[T],\n) *Concurrent[T]",{"id":2949,"title":2043,"titles":2950,"content":2951,"level":19},"/v1.0.7/reference/connectors/concurrent#type-constraints",[2937],"T must implement the Cloner[T] interface:\ntype Cloner[T any] interface {\n    Clone() T\n}",{"id":2953,"title":2268,"titles":2954,"content":2955,"level":19},"/v1.0.7/reference/connectors/concurrent#parameters",[2937],"identity (Identity) - Identifier with name and description for debuggingreducer - Optional function to aggregate results; if nil, returns original input unchangedprocessors - Variable number of processors to run concurrently",{"id":2957,"title":2273,"titles":2958,"content":2959,"level":19},"/v1.0.7/reference/connectors/concurrent#returns",[2937],"Returns a *Concurrent[T] that implements Chainable[T].",{"id":2961,"title":2278,"titles":2962,"content":2963,"level":19},"/v1.0.7/reference/connectors/concurrent#behavior",[2937],"Parallel execution - All processors run simultaneouslyData isolation - Each processor receives a clone of the inputNon-failing - Individual failures don't stop other processorsWait for all - Waits for all processors to completeTwo modes:\nWithout reducer (nil) - Returns the original input unchangedWith reducer - Collects all results and errors, then calls reducer to produce final outputContext preservation - Passes original context to all processors, preserving distributed tracing and context valuesCancellation support - Parent context cancellation affects all child processors",{"id":2965,"title":2966,"titles":2967,"content":29,"level":19},"/v1.0.7/reference/connectors/concurrent#examples","Examples",[2937],{"id":2969,"title":2970,"titles":2971,"content":2972,"level":35},"/v1.0.7/reference/connectors/concurrent#without-reducer-side-effects","Without Reducer (Side Effects)",[2937,2966],"// Define a type that implements Cloner\ntype User struct {\n    ID      string\n    Name    string\n    Email   string\n    Tags    []string\n}\n\nfunc (u User) Clone() User {\n    tags := make([]string, len(u.Tags))\n    copy(tags, u.Tags)\n    return User{\n        ID:    u.ID,\n        Name:  u.Name,\n        Email: u.Email,\n        Tags:  tags,\n    }\n}\n\n// Define identities upfront\nvar (\n    NotifyAllID = pipz.NewIdentity(\"notify-all\", \"Send notifications to all channels in parallel\")\n    EmailID     = pipz.NewIdentity(\"email\", \"Send email notification\")\n    SMSID       = pipz.NewIdentity(\"sms\", \"Send SMS notification\")\n    PushID      = pipz.NewIdentity(\"push\", \"Send push notification\")\n    AuditID     = pipz.NewIdentity(\"audit\", \"Log to audit trail\")\n)\n\n// Create concurrent processor without reducer\nnotifications := pipz.NewConcurrent(\n    NotifyAllID,\n    nil, // No reducer - just run side effects\n    pipz.Effect(EmailID, sendEmailNotification),\n    pipz.Effect(SMSID, sendSMSNotification),\n    pipz.Effect(PushID, sendPushNotification),\n    pipz.Effect(AuditID, logToAuditTrail),\n)\n\n// Define pipeline identities\nvar (\n    UserUpdateID = pipz.NewIdentity(\"user-update\", \"Process user update with notifications\")\n    ValidateID   = pipz.NewIdentity(\"validate\", \"Validate user data\")\n    UpdateID     = pipz.NewIdentity(\"update\", \"Update user in database\")\n)\n\n// Use in a pipeline\npipeline := pipz.NewSequence[User](\n    UserUpdateID,\n    pipz.Apply(ValidateID, validateUser),\n    pipz.Apply(UpdateID, updateDatabase),\n    notifications, // All notifications sent in parallel\n)",{"id":2974,"title":2975,"titles":2976,"content":2977,"level":35},"/v1.0.7/reference/connectors/concurrent#with-reducer-aggregate-results","With Reducer (Aggregate Results)",[2937,2966],"type PriceCheck struct {\n    ProductID string\n    BestPrice float64\n}\n\nfunc (p PriceCheck) Clone() PriceCheck {\n    return p\n}\n\n// Define identities upfront\nvar (\n    CheckPricesID = pipz.NewIdentity(\"check-prices\", \"Check prices across multiple vendors for best price\")\n    AmazonID      = pipz.NewIdentity(\"amazon\", \"Check Amazon price\")\n    WalmartID     = pipz.NewIdentity(\"walmart\", \"Check Walmart price\")\n    TargetID      = pipz.NewIdentity(\"target\", \"Check Target price\")\n)\n\n// Reducer function to find the best price\nreducer := func(original PriceCheck, results map[pipz.Identity]PriceCheck, errors map[pipz.Identity]error) PriceCheck {\n    bestPrice := original.BestPrice\n    for _, result := range results {\n        if result.BestPrice > 0 && result.BestPrice \u003C bestPrice {\n            bestPrice = result.BestPrice\n        }\n    }\n    return PriceCheck{\n        ProductID: original.ProductID,\n        BestPrice: bestPrice,\n    }\n}\n\n// Check prices from multiple vendors concurrently\npriceChecker := pipz.NewConcurrent(\n    CheckPricesID,\n    reducer,\n    pipz.Transform(AmazonID, checkAmazonPrice),\n    pipz.Transform(WalmartID, checkWalmartPrice),\n    pipz.Transform(TargetID, checkTargetPrice),\n)\n\n// Returns PriceCheck with the lowest price found\nresult, _ := priceChecker.Process(ctx, PriceCheck{ProductID: \"abc123\", BestPrice: 999.99})",{"id":2979,"title":2292,"titles":2980,"content":2981,"level":19},"/v1.0.7/reference/connectors/concurrent#when-to-use",[2937],"Use Concurrent when: Operations are independent and can run in parallelYou want to fire multiple actions simultaneouslySide effects can run in parallel (notifications, logging)You need to aggregate results from multiple parallel sourcesIndividual failures shouldn't affect othersYou need to notify multiple systemsPerformance benefit from parallelizationYou want to collect data from multiple APIs concurrently",{"id":2983,"title":2297,"titles":2984,"content":2985,"level":19},"/v1.0.7/reference/connectors/concurrent#when-not-to-use",[2937],"Don't use Concurrent when: Operations must run in order (use Sequence)Type doesn't implement Cloner[T] (compilation error)You need to stop on first error (all run to completion)Operations share state or resources (race conditions)You need fastest result only (use Race)",{"id":2987,"title":106,"titles":2988,"content":29,"level":19},"/v1.0.7/reference/connectors/concurrent#error-handling",[2937],{"id":2990,"title":2991,"titles":2992,"content":2993,"level":35},"/v1.0.7/reference/connectors/concurrent#without-reducer","Without Reducer",[2937,106],"Concurrent continues even if some processors fail: // Define identities upfront\nvar (\n    MultiSaveID = pipz.NewIdentity(\"multi-save\", \"Save data to multiple destinations\")\n    PrimaryID   = pipz.NewIdentity(\"primary\", \"Save to primary storage\")\n    BackupID    = pipz.NewIdentity(\"backup\", \"Save to backup storage\")\n    CacheID     = pipz.NewIdentity(\"cache\", \"Update cache\")\n)\n\nconcurrent := pipz.NewConcurrent(\n    MultiSaveID,\n    nil, // No reducer\n    pipz.Apply(PrimaryID, saveToPrimary),   // Might fail\n    pipz.Apply(BackupID, saveToBackup),     // Still runs\n    pipz.Effect(CacheID, updateCache),      // Still runs\n)\n\n// The original data is returned regardless of individual failures\nresult, err := concurrent.Process(ctx, data)\n// err is nil even if some processors failed\n// result is the original data",{"id":2995,"title":2996,"titles":2997,"content":2998,"level":35},"/v1.0.7/reference/connectors/concurrent#with-reducer","With Reducer",[2937,106],"Errors are collected in the errors map passed to the reducer: reducer := func(original Data, results map[pipz.Identity]Data, errors map[pipz.Identity]error) Data {\n    if len(errors) > 0 {\n        // Handle errors - maybe log them or set a flag\n        for id, err := range errors {\n            log.Printf(\"processor %s failed: %v\", id.Name(), err)\n        }\n    }\n    // Merge successful results\n    merged := original\n    for _, result := range results {\n        merged = mergeData(merged, result)\n    }\n    return merged\n}",{"id":3000,"title":445,"titles":3001,"content":3002,"level":19},"/v1.0.7/reference/connectors/concurrent#performance-considerations",[2937],"Creates one goroutine per processorRequires data cloning (allocation cost)All processors run even if some finish earlyContext cancellation stops waiting processors",{"id":3004,"title":2181,"titles":3005,"content":29,"level":19},"/v1.0.7/reference/connectors/concurrent#common-patterns",[2937],{"id":3007,"title":3008,"titles":3009,"content":3010,"level":35},"/v1.0.7/reference/connectors/concurrent#side-effects-pattern-no-reducer","Side Effects Pattern (No Reducer)",[2937,2181],"// Define identities for parallel notifications\nvar (\n    NotificationsID = pipz.NewIdentity(\"notifications\", \"Send welcome notifications across all channels\")\n    WelcomeEmailID  = pipz.NewIdentity(\"email\", \"Send welcome email\")\n    WelcomeSMSID    = pipz.NewIdentity(\"sms\", \"Send welcome SMS\")\n    CRMID           = pipz.NewIdentity(\"crm\", \"Update CRM system\")\n    SignupAnalyticsID = pipz.NewIdentity(\"analytics\", \"Track signup event\")\n)\n\n// Parallel notifications\nuserNotifications := pipz.NewConcurrent(\n    NotificationsID,\n    nil, // No reducer needed\n    pipz.Effect(WelcomeEmailID, sendWelcomeEmail),\n    pipz.Effect(WelcomeSMSID, sendWelcomeSMS),\n    pipz.Effect(CRMID, updateCRM),\n    pipz.Effect(SignupAnalyticsID, trackSignup),\n)\n\n// Define identities for parallel data distribution\nvar (\n    DistributeID      = pipz.NewIdentity(\"distribute\", \"Distribute data to multiple systems\")\n    ElasticsearchID   = pipz.NewIdentity(\"elasticsearch\", \"Index in Elasticsearch\")\n    RedisID           = pipz.NewIdentity(\"redis\", \"Cache in Redis\")\n    S3ID              = pipz.NewIdentity(\"s3\", \"Upload to S3\")\n    MetricsID         = pipz.NewIdentity(\"metrics\", \"Record metrics\")\n)\n\n// Parallel data distribution\ndistribute := pipz.NewConcurrent(\n    DistributeID,\n    nil,\n    pipz.Apply(ElasticsearchID, indexInElastic),\n    pipz.Apply(RedisID, cacheInRedis),\n    pipz.Apply(S3ID, uploadToS3),\n    pipz.Effect(MetricsID, recordMetrics),\n)\n\n// Define identities for order processing\nvar (\n    OrderFlowID       = pipz.NewIdentity(\"order-flow\", \"Process order from validation to post-payment\")\n    ValidateOrderID   = pipz.NewIdentity(\"validate\", \"Validate order details\")\n    PaymentID         = pipz.NewIdentity(\"payment\", \"Process payment\")\n    PostPaymentID     = pipz.NewIdentity(\"post-payment\", \"Execute post-payment operations in parallel\")\n    InventoryID       = pipz.NewIdentity(\"inventory\", \"Update inventory\")\n    ShippingID        = pipz.NewIdentity(\"shipping\", \"Create shipping label\")\n    ConfirmationID    = pipz.NewIdentity(\"email\", \"Send confirmation email\")\n    RevenueAnalyticsID = pipz.NewIdentity(\"analytics\", \"Track revenue\")\n)\n\n// Multi-channel processing\nprocessOrder := pipz.NewSequence[Order](\n    OrderFlowID,\n    pipz.Apply(ValidateOrderID, validateOrder),\n    pipz.Apply(PaymentID, processPayment),\n    pipz.NewConcurrent(\n        PostPaymentID,\n        nil,\n        pipz.Effect(InventoryID, updateInventory),\n        pipz.Effect(ShippingID, createShippingLabel),\n        pipz.Effect(ConfirmationID, sendConfirmation),\n        pipz.Effect(RevenueAnalyticsID, trackRevenue),\n    ),\n)",{"id":3012,"title":3013,"titles":3014,"content":3015,"level":35},"/v1.0.7/reference/connectors/concurrent#aggregation-pattern-with-reducer","Aggregation Pattern (With Reducer)",[2937,2181],"// Merge enrichment data from multiple sources\ntype Product struct {\n    ID          string\n    Name        string\n    Description string\n    Reviews     []Review\n    Inventory   int\n    Price       float64\n}\n\n// Define identities upfront so we can reference them in the reducer\nvar (\n    EnrichProductID = pipz.NewIdentity(\"enrich-product\", \"Enrich product with data from multiple services\")\n    ReviewsID       = pipz.NewIdentity(\"reviews\", \"Fetch product reviews\")\n    ProductInventoryID = pipz.NewIdentity(\"inventory\", \"Fetch inventory levels\")\n    PricingID       = pipz.NewIdentity(\"pricing\", \"Fetch current pricing\")\n)\n\nenrichReducer := func(original Product, results map[pipz.Identity]Product, errors map[pipz.Identity]error) Product {\n    enriched := original\n\n    // Merge reviews from review service\n    if r, ok := results[ReviewsID]; ok {\n        enriched.Reviews = r.Reviews\n    }\n\n    // Merge inventory from warehouse service\n    if inv, ok := results[ProductInventoryID]; ok {\n        enriched.Inventory = inv.Inventory\n    }\n\n    // Merge pricing from pricing service\n    if price, ok := results[PricingID]; ok {\n        enriched.Price = price.Price\n    }\n\n    return enriched\n}\n\nenrichProduct := pipz.NewConcurrent(\n    EnrichProductID,\n    enrichReducer,\n    pipz.Transform(ReviewsID, fetchReviews),\n    pipz.Transform(ProductInventoryID, fetchInventory),\n    pipz.Transform(PricingID, fetchPricing),\n)",{"id":3017,"title":2199,"titles":3018,"content":29,"level":19},"/v1.0.7/reference/connectors/concurrent#gotchas",[2937],{"id":3020,"title":3021,"titles":3022,"content":3023,"level":35},"/v1.0.7/reference/connectors/concurrent#dont-forget-to-use-reducer-if-you-need-results","❌ Don't forget to use reducer if you need results",[2937,2199],"// WRONG - Expecting modified data without reducer\nvar (\n    ModifyID = pipz.NewIdentity(\"modify\", \"Modify values\")\n    DoubleID = pipz.NewIdentity(\"double\", \"Double the value\")\n)\n\nconcurrent := pipz.NewConcurrent(\n    ModifyID,\n    nil, // No reducer!\n    pipz.Transform(DoubleID, func(ctx context.Context, n int) int {\n        return n * 2 // Result is discarded!\n    }),\n)\nresult, _ := concurrent.Process(ctx, 5)\n// result is still 5, not 10!",{"id":3025,"title":3026,"titles":3027,"content":3028,"level":35},"/v1.0.7/reference/connectors/concurrent#use-reducer-when-you-need-results","✅ Use reducer when you need results",[2937,2199],"// Define identities upfront\nvar (\n    SumID    = pipz.NewIdentity(\"sum\", \"Sum original value with results\")\n    DoubleID = pipz.NewIdentity(\"double\", \"Double the value\")\n)\n\n// RIGHT - Reducer aggregates results\nreducer := func(original int, results map[pipz.Identity]int, errors map[pipz.Identity]error) int {\n    sum := original\n    for _, v := range results {\n        sum += v\n    }\n    return sum\n}\n\nconcurrent := pipz.NewConcurrent(\n    SumID,\n    reducer,\n    pipz.Transform(DoubleID, func(ctx context.Context, n int) int {\n        return n * 2\n    }),\n)\nresult, _ := concurrent.Process(ctx, 5)\n// result is now 15 (5 + 10)",{"id":3030,"title":3031,"titles":3032,"content":3033,"level":35},"/v1.0.7/reference/connectors/concurrent#or-use-nil-reducer-for-side-effects-only","✅ Or use nil reducer for side effects only",[2937,2199],"// Define identities upfront\nvar (\n    EffectsID = pipz.NewIdentity(\"effects\", \"Execute side effects in parallel\")\n    LogID     = pipz.NewIdentity(\"log\", \"Log data\")\n    MetricsID = pipz.NewIdentity(\"metrics\", \"Update metrics\")\n)\n\n// RIGHT - Side effects, not transformations\nconcurrent := pipz.NewConcurrent(\n    EffectsID,\n    nil,\n    pipz.Effect(LogID, logData),\n    pipz.Effect(MetricsID, updateMetrics),\n)",{"id":3035,"title":3036,"titles":3037,"content":3038,"level":35},"/v1.0.7/reference/connectors/concurrent#dont-share-state-between-processors","❌ Don't share state between processors",[2937,2199],"// WRONG - Race condition!\nvar counter int\n\nvar (\n    RacyID = pipz.NewIdentity(\"racy\", \"Increment counter (has race condition)\")\n    Inc1ID = pipz.NewIdentity(\"inc1\", \"Increment counter\")\n    Inc2ID = pipz.NewIdentity(\"inc2\", \"Increment counter again\")\n)\n\nconcurrent := pipz.NewConcurrent(\n    RacyID,\n    pipz.Effect(Inc1ID, func(ctx context.Context, _ Data) error {\n        counter++ // Race!\n        return nil\n    }),\n    pipz.Effect(Inc2ID, func(ctx context.Context, _ Data) error {\n        counter++ // Race!\n        return nil\n    }),\n)",{"id":3040,"title":3041,"titles":3042,"content":3043,"level":35},"/v1.0.7/reference/connectors/concurrent#use-proper-synchronization-or-avoid-shared-state","✅ Use proper synchronization or avoid shared state",[2937,2199],"// Define identities upfront\nvar (\n    SafeID = pipz.NewIdentity(\"safe\", \"Save to multiple databases independently\")\n    DB1ID  = pipz.NewIdentity(\"db1\", \"Save to database 1\")\n    DB2ID  = pipz.NewIdentity(\"db2\", \"Save to database 2\")\n)\n\n// RIGHT - No shared state\nconcurrent := pipz.NewConcurrent(\n    SafeID,\n    pipz.Effect(DB1ID, saveToDatabase1),\n    pipz.Effect(DB2ID, saveToDatabase2),\n)",{"id":3045,"title":3046,"titles":3047,"content":3048,"level":19},"/v1.0.7/reference/connectors/concurrent#implementation-requirements","Implementation Requirements",[2937],"Your type must implement Clone() correctly: // Simple struct\ntype Event struct {\n    ID        string\n    Type      string\n    Timestamp time.Time\n}\n\nfunc (e Event) Clone() Event {\n    return e // Struct with only value types can be copied directly\n}\n\n// Struct with slices/maps\ntype Document struct {\n    ID       string\n    Sections []Section\n    Metadata map[string]string\n}\n\nfunc (d Document) Clone() Document {\n    sections := make([]Section, len(d.Sections))\n    copy(sections, d.Sections)\n    \n    metadata := make(map[string]string, len(d.Metadata))\n    for k, v := range d.Metadata {\n        metadata[k] = v\n    }\n    \n    return Document{\n        ID:       d.ID,\n        Sections: sections,\n        Metadata: metadata,\n    }\n}",{"id":3050,"title":1764,"titles":3051,"content":3052,"level":19},"/v1.0.7/reference/connectors/concurrent#see-also",[2937],"Race - For getting the first successful resultSequence - For sequential executionEffect - Common processor for concurrent operations html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":3054,"title":3055,"titles":3056,"content":3057,"level":9},"/v1.0.7/reference/connectors/contest","Contest",[],"Runs processors in parallel and returns the first result that meets a specified condition",{"id":3059,"title":3055,"titles":3060,"content":3061,"level":9},"/v1.0.7/reference/connectors/contest#contest",[],"Contest runs all processors in parallel and returns the first result that meets a specified condition. It combines the speed benefits of Race with conditional selection, allowing you to define what makes a \"winner\" beyond just being first to complete.",{"id":3063,"title":6,"titles":3064,"content":3065,"level":19},"/v1.0.7/reference/connectors/contest#overview",[3055],"Contest is ideal when you need the fastest result that also meets quality criteria: Finding the cheapest shipping rate under a time constraintGetting the first API response with required data completenessQuerying multiple sources for the best quality result quicklyRacing services where the \"best\" result matters more than just \"first\"",{"id":3067,"title":3068,"titles":3069,"content":3070,"level":19},"/v1.0.7/reference/connectors/contest#creating-a-contest","Creating a Contest",[3055],"// Define identities\nvar FindBestRateID = pipz.NewIdentity(\"find-best-rate\", \"Find first shipping rate under $50 with 3-day delivery\")\n\n// Define the winning condition\ncondition := func(ctx context.Context, rate ShippingRate) bool {\n    return rate.Cost \u003C 50.00 && rate.DeliveryDays \u003C= 3\n}\n\n// Create Contest with multiple processors\ncontest := pipz.NewContest(\n    FindBestRateID,\n    condition,\n    fedexRates,\n    upsRates,\n    uspsRates,\n)",{"id":3072,"title":3073,"titles":3074,"content":3075,"level":19},"/v1.0.7/reference/connectors/contest#key-behaviors","Key Behaviors",[3055],"Parallel Execution: All processors run concurrentlyConditional Winner: First result that meets the condition winsEarly Termination: Winner cancels remaining processorsNo Winner Handling: Returns error if no results meet conditionClone Safety: Each processor gets an isolated copy via Clone()Context Preservation: Uses context.WithCancel(ctx) to preserve trace context while enabling cancellation when winner is found",{"id":3077,"title":3078,"titles":3079,"content":3080,"level":19},"/v1.0.7/reference/connectors/contest#example-rate-shopping","Example: Rate Shopping",[3055],"package main\n\nimport (\n    \"context\"\n    \"github.com/zoobzio/pipz\"\n)\n\n// Define identities\nvar (\n    FedExID = pipz.NewIdentity(\"fedex\", \"Fetch FedEx shipping rate\")\n    UPSID = pipz.NewIdentity(\"ups\", \"Fetch UPS shipping rate\")\n    USPSID = pipz.NewIdentity(\"usps\", \"Fetch USPS shipping rate\")\n    RateShoppingID = pipz.NewIdentity(\"rate-shopping\", \"Find first acceptable shipping rate under $30\")\n)\n\n// Find the cheapest acceptable shipping rate\nfunc main() {\n    // Condition: Must be under $30 and deliver within 5 days\n    acceptableRate := func(_ context.Context, rate Rate) bool {\n        return rate.Cost \u003C 30.00 && rate.EstimatedDays \u003C= 5\n    }\n\n    // Create processors for each provider\n    fedex := pipz.Apply(FedExID, fetchFedExRate)\n    ups := pipz.Apply(UPSID, fetchUPSRate)\n    usps := pipz.Apply(USPSID, fetchUSPSRate)\n\n    // Contest to find first acceptable rate\n    rateContest := pipz.NewContest(\n        RateShoppingID,\n        acceptableRate,\n        fedex, ups, usps,\n    )\n\n    shipment := Shipment{Weight: 5.0, Destination: \"NYC\"}\n    result, err := rateContest.Process(context.Background(), shipment)\n}",{"id":3082,"title":3083,"titles":3084,"content":3085,"level":19},"/v1.0.7/reference/connectors/contest#dynamic-conditions","Dynamic Conditions",[3055],"You can update the winning condition at runtime: // Define identity\nvar DynamicContestID = pipz.NewIdentity(\"dynamic\", \"Contest with dynamic quality criteria\")\n\n// Start with strict criteria\ncontest := pipz.NewContest(\n    DynamicContestID,\n    strictCondition,\n    processors...,\n)\n\n// Relax criteria based on circumstances\nif timeIsRunningOut {\n    contest.SetCondition(relaxedCondition)\n}",{"id":3087,"title":3088,"titles":3089,"content":3090,"level":19},"/v1.0.7/reference/connectors/contest#complex-conditions","Complex Conditions",[3055],"Conditions can use context for sophisticated logic: // Condition that adapts based on deadline\nadaptiveCondition := func(ctx context.Context, result Result) bool {\n    deadline, ok := ctx.Deadline()\n    if !ok {\n        // No deadline - use strict criteria\n        return result.Quality > 90 && result.Cost \u003C 100\n    }\n    \n    // Relax criteria as deadline approaches\n    timeLeft := time.Until(deadline)\n    if timeLeft \u003C 5*time.Second {\n        return result.Quality > 70 // Accept lower quality if urgent\n    }\n    return result.Quality > 90 && result.Cost \u003C 100\n}",{"id":3092,"title":2871,"titles":3093,"content":3094,"level":19},"/v1.0.7/reference/connectors/contest#configuration-methods",[3055],"Contest supports the same configuration methods as other connectors: // Define identity\nvar ConfigurableContestID = pipz.NewIdentity(\"configurable\", \"Contest with configurable processors\")\n\ncontest := pipz.NewContest(\n    ConfigurableContestID,\n    condition,\n)\n\n// Add processors\ncontest.Add(newProcessor)\n\n// Remove by index\ncontest.Remove(0)\n\n// Replace all processors\ncontest.SetProcessors(p1, p2, p3)\n\n// Clear all\ncontest.Clear()\n\n// Get count\ncount := contest.Len()\n\n// Update condition\ncontest.SetCondition(newCondition)",{"id":3096,"title":106,"titles":3097,"content":3098,"level":19},"/v1.0.7/reference/connectors/contest#error-handling",[3055],"Contest provides specific error messages for different scenarios: result, err := contest.Process(ctx, input)\nif err != nil {\n    var pipeErr *pipz.Error[T]\n    if errors.As(err, &pipeErr) {\n        if strings.Contains(pipeErr.Error(), \"no processor results met\") {\n            // Some processors succeeded but none met condition\n        } else if strings.Contains(pipeErr.Error(), \"all processors failed\") {\n            // All processors returned errors\n        }\n    }\n}",{"id":3100,"title":2292,"titles":3101,"content":3102,"level":19},"/v1.0.7/reference/connectors/contest#when-to-use",[3055],"Use Contest when: You need the fastest result that meets quality criteriaMultiple sources can provide acceptable resultsQuality matters more than just speedYou're comparing prices, rates, or scoresYou want to optimize for both speed and qualityDifferent processors have different quality/speed tradeoffs",{"id":3104,"title":2297,"titles":3105,"content":3106,"level":19},"/v1.0.7/reference/connectors/contest#when-not-to-use",[3055],"Don't use Contest when: Any successful result is fine (use Race)You need all results (use Concurrent)Results aren't comparable (different data types)Order of execution matters (use Sequence)You always need the highest quality regardless of time (process all, then select)",{"id":3108,"title":3109,"titles":3110,"content":3111,"level":19},"/v1.0.7/reference/connectors/contest#contest-vs-race","Contest vs Race",[3055],"AspectContestRaceWinner SelectionFirst to meet conditionFirst to completeUse CaseQuality + SpeedPure speedCondition FunctionRequiredNot applicableResult EvaluationChecks each resultAccepts any success",{"id":3113,"title":2199,"titles":3114,"content":29,"level":19},"/v1.0.7/reference/connectors/contest#gotchas",[3055],{"id":3116,"title":3117,"titles":3118,"content":3119,"level":35},"/v1.0.7/reference/connectors/contest#dont-use-vague-conditions","❌ Don't use vague conditions",[3055,2199],"// WRONG - What does \"good\" mean?\nvar VagueContestID = pipz.NewIdentity(\"vague\", \"Contest with unclear criteria\")\n\ncontest := pipz.NewContest(\n    VagueContestID,\n    func(ctx context.Context, result Result) bool {\n        return result.IsGood // Unclear criteria\n    },\n    processors...,\n)",{"id":3121,"title":3122,"titles":3123,"content":3124,"level":35},"/v1.0.7/reference/connectors/contest#use-specific-measurable-conditions","✅ Use specific, measurable conditions",[3055,2199],"// RIGHT - Clear, measurable criteria\nvar SpecificContestID = pipz.NewIdentity(\"specific\", \"Find result with >95% accuracy, \u003C100ms latency, and \u003C$10 cost\")\n\ncontest := pipz.NewContest(\n    SpecificContestID,\n    func(ctx context.Context, result Result) bool {\n        return result.Accuracy > 0.95 &&\n               result.Latency \u003C 100*time.Millisecond &&\n               result.Cost \u003C 10.00\n    },\n    processors...,\n)",{"id":3126,"title":3127,"titles":3128,"content":3129,"level":35},"/v1.0.7/reference/connectors/contest#dont-ignore-no-winner-scenarios","❌ Don't ignore \"no winner\" scenarios",[3055,2199],"// WRONG - Assumes someone always wins\nresult, _ := contest.Process(ctx, input) // Ignoring error!\nprocessResult(result) // May be zero value!",{"id":3131,"title":3132,"titles":3133,"content":3134,"level":35},"/v1.0.7/reference/connectors/contest#handle-no-winner-gracefully","✅ Handle no winner gracefully",[3055,2199],"// RIGHT - Handle no winner case\nresult, err := contest.Process(ctx, input)\nif err != nil {\n    if strings.Contains(err.Error(), \"no processor results met\") {\n        // Use fallback or relax criteria\n        result = getDefaultResult()\n    } else {\n        return err // Real error\n    }\n}",{"id":3136,"title":3137,"titles":3138,"content":3139,"level":35},"/v1.0.7/reference/connectors/contest#dont-use-contest-for-side-effects","❌ Don't use Contest for side effects",[3055,2199],"// WRONG - All run until one meets condition!\nvar (\n    SideEffectsContestID = pipz.NewIdentity(\"side-effects\", \"Contest with side effects (dangerous)\")\n    Update1ID = pipz.NewIdentity(\"update1\", \"Update database 1\")\n    Update2ID = pipz.NewIdentity(\"update2\", \"Update database 2\")\n)\n\ncontest := pipz.NewContest(\n    SideEffectsContestID,\n    func(ctx context.Context, r Result) bool {\n        return r.Success\n    },\n    pipz.Apply(Update1ID, updateDatabase1), // Updates!\n    pipz.Apply(Update2ID, updateDatabase2), // Also updates!\n)",{"id":3141,"title":3142,"titles":3143,"content":3144,"level":35},"/v1.0.7/reference/connectors/contest#use-contest-for-queries-only","✅ Use Contest for queries only",[3055,2199],"// RIGHT - Safe read operations\nvar (\n    QueriesContestID = pipz.NewIdentity(\"queries\", \"Find first complete and fresh query result\")\n    Query1ID = pipz.NewIdentity(\"query1\", \"Query database 1\")\n    Query2ID = pipz.NewIdentity(\"query2\", \"Query database 2\")\n)\n\ncontest := pipz.NewContest(\n    QueriesContestID,\n    func(ctx context.Context, r Result) bool {\n        return r.Complete && r.Fresh\n    },\n    pipz.Apply(Query1ID, queryDatabase1),\n    pipz.Apply(Query2ID, queryDatabase2),\n)",{"id":3146,"title":135,"titles":3147,"content":3148,"level":19},"/v1.0.7/reference/connectors/contest#best-practices",[3055],"Meaningful Conditions: Write clear conditions that express business requirementsFail Fast: Order processors by likelihood of meeting conditionsTimeout Handling: Consider deadline-aware conditions for time-sensitive operationsError Context: Use the error path to understand which processors were triedTesting: Test both successful and no-winner scenarios",{"id":3150,"title":2181,"titles":3151,"content":29,"level":19},"/v1.0.7/reference/connectors/contest#common-patterns",[3055],{"id":3153,"title":3154,"titles":3155,"content":3156,"level":35},"/v1.0.7/reference/connectors/contest#fallback-with-quality","Fallback with Quality",[3055,2181],"// Define identity\nvar ServiceSelectionID = pipz.NewIdentity(\"service-selection\", \"Select first available premium service, fallback to economy\")\n\n// Try premium services first, fall back to economy if needed\npremiumCondition := func(_ context.Context, svc Service) bool {\n    return svc.Type == \"premium\" && svc.Available\n}\n\ncontest := pipz.NewContest(\n    ServiceSelectionID,\n    premiumCondition,\n    premiumService1,\n    premiumService2,\n    // These economy services won't win unless all premium fail\n    economyService1,\n    economyService2,\n)",{"id":3158,"title":3159,"titles":3160,"content":3161,"level":35},"/v1.0.7/reference/connectors/contest#cost-optimized-selection","Cost-Optimized Selection",[3055,2181],"// Define identity\nvar CostOptimizationID = pipz.NewIdentity(\"cost-optimization\", \"Find first vendor meeting SLA within budget\")\n\n// Find cheapest option that meets SLA\nbudgetCondition := func(_ context.Context, opt Option) bool {\n    return opt.MeetsSLA && opt.Cost \u003C budget\n}\n\ncontest := pipz.NewContest(\n    CostOptimizationID,\n    budgetCondition,\n    vendors...,\n)",{"id":3163,"title":3164,"titles":3165,"content":3166,"level":35},"/v1.0.7/reference/connectors/contest#progressive-relaxation","Progressive Relaxation",[3055,2181],"// Define identities\nvar (\n    StrictContestID = pipz.NewIdentity(\"strict\", \"Contest with strict quality criteria\")\n    RelaxedContestID = pipz.NewIdentity(\"relaxed\", \"Contest with relaxed criteria\")\n)\n\n// Try strict criteria first\nstrict := pipz.NewContest(\n    StrictContestID,\n    strictCondition,\n    processors...,\n)\nresult, err := strict.Process(ctx, input)\n\nif err != nil {\n    // Relax criteria and try again\n    relaxed := pipz.NewContest(\n        RelaxedContestID,\n        relaxedCondition,\n        processors...,\n    )\n    result, err = relaxed.Process(ctx, input)\n}",{"id":3168,"title":1764,"titles":3169,"content":3170,"level":19},"/v1.0.7/reference/connectors/contest#see-also",[3055],"Race - First successful result winsConcurrent - Run all in parallelFallback - Sequential fallback patternSwitch - Conditional routing html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}",{"id":3172,"title":539,"titles":3173,"content":3174,"level":9},"/v1.0.7/reference/connectors/fallback",[],"Tries a primary processor and falls back to a secondary on error for automatic failover",{"id":3176,"title":539,"titles":3177,"content":3178,"level":9},"/v1.0.7/reference/connectors/fallback#fallback",[],"Tries a primary processor, falls back to secondary on error.",{"id":3180,"title":2263,"titles":3181,"content":3182,"level":19},"/v1.0.7/reference/connectors/fallback#function-signature",[539],"func NewFallback[T any](identity Identity, primary, fallback Chainable[T]) *Fallback[T]",{"id":3184,"title":2268,"titles":3185,"content":3186,"level":19},"/v1.0.7/reference/connectors/fallback#parameters",[539],"identity (Identity) - Identifier with name and description for debuggingprimary - The primary processor to try firstfallback - The backup processor to use if primary fails",{"id":3188,"title":2273,"titles":3189,"content":3190,"level":19},"/v1.0.7/reference/connectors/fallback#returns",[539],"Returns a *Fallback[T] that implements Chainable[T].",{"id":3192,"title":2278,"titles":3193,"content":3194,"level":19},"/v1.0.7/reference/connectors/fallback#behavior",[539],"Try primary first - Always attempts the primary processorFallback on error - Only tries fallback if primary failsSuccess stops - Returns immediately if primary succeedsError propagation - Returns fallback error if both failContext awareness - Respects cancellation throughout",{"id":3196,"title":2283,"titles":3197,"content":3198,"level":19},"/v1.0.7/reference/connectors/fallback#example",[539],"// Payment processing with backup\nvar (\n    PaymentID = pipz.NewIdentity(\"payment\", \"Process payment with Stripe, fallback to PayPal\")\n    StripeID  = pipz.NewIdentity(\"stripe\", \"Process with Stripe\")\n    PayPalID  = pipz.NewIdentity(\"paypal\", \"Process with PayPal\")\n)\n\npayment := pipz.NewFallback(\n    PaymentID,\n    pipz.Apply(StripeID, processWithStripe),\n    pipz.Apply(PayPalID, processWithPayPal),\n)\n\n// Database with replica fallback\nvar (\n    SaveID       = pipz.NewIdentity(\"save\", \"Save to primary database, fallback to replica\")\n    PrimaryDBID  = pipz.NewIdentity(\"primary-db\", \"Save to primary database\")\n    ReplicaDBID  = pipz.NewIdentity(\"replica-db\", \"Save to replica database\")\n)\n\nsaveData := pipz.NewFallback(\n    SaveID,\n    pipz.Apply(PrimaryDBID, saveToPrimary),\n    pipz.Apply(ReplicaDBID, saveToReplica),\n)\n\n// API with mock fallback\nvar (\n    WeatherID    = pipz.NewIdentity(\"weather\", \"Fetch from weather API, fallback to mock data\")\n    WeatherAPIID = pipz.NewIdentity(\"weather-api\", \"Fetch from weather API\")\n    MockDataID   = pipz.NewIdentity(\"mock-data\", \"Return mock weather data\")\n)\n\nfetchWeather := pipz.NewFallback(\n    WeatherID,\n    pipz.Apply(WeatherAPIID, fetchFromWeatherAPI),\n    pipz.Apply(MockDataID, returnMockWeather),\n)\n\n// Service degradation\nvar (\n    UserLookupID    = pipz.NewIdentity(\"user-lookup\", \"Fetch full profile, fallback to basic profile\")\n    FullProfileID   = pipz.NewIdentity(\"full-profile\", \"Fetch full user profile\")\n    BasicProfileID  = pipz.NewIdentity(\"basic-profile\", \"Fetch basic user profile\")\n)\n\nuserService := pipz.NewFallback(\n    UserLookupID,\n    pipz.Apply(FullProfileID, fetchFullProfile),\n    pipz.Apply(BasicProfileID, fetchBasicProfile),\n)",{"id":3200,"title":2292,"titles":3201,"content":3202,"level":19},"/v1.0.7/reference/connectors/fallback#when-to-use",[539],"Use Fallback when: You have a primary and backup service (database replicas, payment providers)You want automatic failoverThe fallback provides acceptable resultsYou need service resilienceOrder of preference is clearGraceful degradation is acceptable",{"id":3204,"title":2297,"titles":3205,"content":3206,"level":19},"/v1.0.7/reference/connectors/fallback#when-not-to-use",[539],"Don't use Fallback when: You need to try more than two options (chain Fallbacks or use Race)Both processors should always run (use Concurrent)You need the fastest result (use Race)Failure reasons matter for routing (use Switch with error handling)Primary failure should stop everything (no fallback needed)",{"id":3208,"title":106,"titles":3209,"content":3210,"level":19},"/v1.0.7/reference/connectors/fallback#error-handling",[539],"Fallback returns the fallback's error if both fail: var (\n    DataFetchID = pipz.NewIdentity(\"data-fetch\", \"Fetch data from primary, fallback to backup\")\n    PrimaryID   = pipz.NewIdentity(\"primary\", \"Fetch from primary source\")\n    BackupID    = pipz.NewIdentity(\"backup\", \"Fetch from backup source\")\n)\n\nfallback := pipz.NewFallback(\n    DataFetchID,\n    pipz.Apply(PrimaryID, func(ctx context.Context, id string) (Data, error) {\n        return Data{}, errors.New(\"primary failed\")\n    }),\n    pipz.Apply(BackupID, func(ctx context.Context, id string) (Data, error) {\n        return Data{}, errors.New(\"backup failed\")\n    }),\n)\n\nresult, err := fallback.Process(ctx, \"123\")\n// err.Error() == \"backup failed\" (the fallback's error)",{"id":3212,"title":2199,"titles":3213,"content":29,"level":19},"/v1.0.7/reference/connectors/fallback#gotchas",[539],{"id":3215,"title":3216,"titles":3217,"content":3218,"level":35},"/v1.0.7/reference/connectors/fallback#dont-use-for-unrelated-operations","❌ Don't use for unrelated operations",[539,2199],"// WRONG - These aren't alternatives\nvar (\n    UnrelatedID = pipz.NewIdentity(\"unrelated\", \"Unrelated operations\")\n    SaveID      = pipz.NewIdentity(\"save\", \"Save to database\")\n    EmailID     = pipz.NewIdentity(\"email\", \"Send email\")\n)\n\nfallback := pipz.NewFallback(\n    UnrelatedID,\n    pipz.Apply(SaveID, saveToDatabase),\n    pipz.Apply(EmailID, sendEmail), // Not a fallback!\n)",{"id":3220,"title":3221,"titles":3222,"content":3223,"level":35},"/v1.0.7/reference/connectors/fallback#use-for-true-alternatives","✅ Use for true alternatives",[539,2199],"// RIGHT - Both achieve the same goal\nvar (\n    AlternativesID = pipz.NewIdentity(\"alternatives\", \"Save to primary database with backup fallback\")\n    PrimaryDBID    = pipz.NewIdentity(\"primary-db\", \"Save to primary database\")\n    BackupDBID     = pipz.NewIdentity(\"backup-db\", \"Save to backup database\")\n)\n\nfallback := pipz.NewFallback(\n    AlternativesID,\n    pipz.Apply(PrimaryDBID, saveToPrimary),\n    pipz.Apply(BackupDBID, saveToBackup),\n)",{"id":3225,"title":3226,"titles":3227,"content":3228,"level":35},"/v1.0.7/reference/connectors/fallback#dont-ignore-primary-errors-completely","❌ Don't ignore primary errors completely",[539,2199],"// WRONG - No visibility into primary failures\nvar (\n    SilentID = pipz.NewIdentity(\"silent\", \"Silent fallback without monitoring\")\n)\n\nfallback := pipz.NewFallback(\n    SilentID,\n    primary,\n    backup,\n) // Primary failures are hidden",{"id":3230,"title":3231,"titles":3232,"content":3233,"level":35},"/v1.0.7/reference/connectors/fallback#log-primary-failures-for-monitoring","✅ Log primary failures for monitoring",[539,2199],"// RIGHT - Track primary failures\nvar (\n    MonitoredID          = pipz.NewIdentity(\"monitored\", \"Fallback with primary failure monitoring\")\n    PrimaryWithLoggingID = pipz.NewIdentity(\"primary-with-logging\", \"Primary processor with error logging\")\n    LogID                = pipz.NewIdentity(\"log\", \"Log primary failure\")\n)\n\nfallback := pipz.NewFallback(\n    MonitoredID,\n    pipz.NewHandle(\n        PrimaryWithLoggingID,\n        primary,\n        pipz.Effect(LogID, func(ctx context.Context, err *pipz.Error[T]) error {\n            log.Printf(\"Primary failed, using fallback: %v\", err)\n            metrics.Increment(\"fallback.triggered\")\n            return nil\n        }),\n    ),\n    backup,\n)",{"id":3235,"title":3236,"titles":3237,"content":3238,"level":35},"/v1.0.7/reference/connectors/fallback#dont-create-circular-fallback-chains","❌ Don't create circular fallback chains",[539,2199],"// WRONG - Creates infinite recursion risk\nvar (\n    PrimaryID   = pipz.NewIdentity(\"primary\", \"Primary with fallback\")\n    SecondaryID = pipz.NewIdentity(\"secondary\", \"Secondary with fallback\")\n    TertiaryID  = pipz.NewIdentity(\"tertiary\", \"Tertiary with fallback\")\n)\n\nprimary := pipz.NewFallback(PrimaryID, processor1, secondary)\nsecondary := pipz.NewFallback(SecondaryID, processor2, tertiary)\ntertiary := pipz.NewFallback(TertiaryID, processor3, primary) // ← Circular!\n\n// If processor1, processor2, and processor3 all fail:\n// primary → secondary → tertiary → primary → secondary → ...\n// Stack overflow!",{"id":3240,"title":3241,"titles":3242,"content":3243,"level":35},"/v1.0.7/reference/connectors/fallback#use-linear-fallback-chains-instead","✅ Use linear fallback chains instead",[539,2199],"// RIGHT - Clear fallback hierarchy\nvar (\n    PrimaryID   = pipz.NewIdentity(\"primary\", \"Primary processor with secondary and tertiary fallbacks\")\n    SecondaryID = pipz.NewIdentity(\"secondary\", \"Secondary processor with tertiary fallback\")\n)\n\nprimary := pipz.NewFallback(\n    PrimaryID,\n    processor1,\n    pipz.NewFallback(\n        SecondaryID,\n        processor2,\n        processor3, // Final fallback - no further chains\n    ),\n)",{"id":3245,"title":2181,"titles":3246,"content":3247,"level":19},"/v1.0.7/reference/connectors/fallback#common-patterns",[539],"// Chained fallbacks for multiple backups\nvar (\n    MultiBackupID = pipz.NewIdentity(\"multi-backup\", \"Multi-tier backup with primary, secondary, and tertiary\")\n    PrimaryID     = pipz.NewIdentity(\"primary\", \"Use primary service\")\n    BackupsID     = pipz.NewIdentity(\"backups\", \"Secondary and tertiary backup chain\")\n    SecondaryID   = pipz.NewIdentity(\"secondary\", \"Use secondary service\")\n    TertiaryID    = pipz.NewIdentity(\"tertiary\", \"Use tertiary service\")\n)\n\nmultiBackup := pipz.NewFallback(\n    MultiBackupID,\n    pipz.Apply(PrimaryID, usePrimary),\n    pipz.NewFallback(\n        BackupsID,\n        pipz.Apply(SecondaryID, useSecondary),\n        pipz.Apply(TertiaryID, useTertiary),\n    ),\n)\n\n// Fallback with retry\nvar (\n    ResilientSaveID  = pipz.NewIdentity(\"resilient-save\", \"Save with retries on primary and backup\")\n    PrimaryRetryID   = pipz.NewIdentity(\"primary-retry\", \"Retry primary save up to 3 times\")\n    BackupRetryID    = pipz.NewIdentity(\"backup-retry\", \"Retry backup save up to 2 times\")\n)\n\nresilientSave := pipz.NewFallback(\n    ResilientSaveID,\n    pipz.NewRetry(PrimaryRetryID, saveToPrimary, 3),\n    pipz.NewRetry(BackupRetryID, saveToBackup, 2),\n)\n\n// Degraded service\nvar (\n    ServiceID     = pipz.NewIdentity(\"service\", \"Full service with timeout, fallback to degraded\")\n    FullServiceID = pipz.NewIdentity(\"full-service\", \"Full service with 5 second timeout\")\n    CompleteID    = pipz.NewIdentity(\"complete\", \"Provide full service\")\n    DegradedID    = pipz.NewIdentity(\"degraded\", \"Provide degraded service\")\n)\n\nfullService := pipz.NewFallback(\n    ServiceID,\n    pipz.NewTimeout(\n        FullServiceID,\n        pipz.Apply(CompleteID, provideFullService),\n        5*time.Second,\n    ),\n    pipz.Apply(DegradedID, provideDegradedService),\n)\n\n// Development fallback\nvar (\n    APIID  = pipz.NewIdentity(\"api\", \"Call real API, fallback to mock in development\")\n    RealID = pipz.NewIdentity(\"real\", \"Call real API\")\n    MockID = pipz.NewIdentity(\"mock\", \"Return mock response\")\n)\n\napiCall := pipz.NewFallback(\n    APIID,\n    pipz.Apply(RealID, callRealAPI),\n    pipz.Apply(MockID, func(ctx context.Context, req Request) (Response, error) {\n        if os.Getenv(\"ENV\") == \"development\" {\n            return mockResponse(req), nil\n        }\n        return Response{}, errors.New(\"production only\")\n    }),\n)",{"id":3249,"title":3250,"titles":3251,"content":3252,"level":19},"/v1.0.7/reference/connectors/fallback#monitoring-fallbacks","Monitoring Fallbacks",[539],"// Track fallback usage\nvar (\n    MonitoredID = pipz.NewIdentity(\"monitored\", \"Fallback with metrics tracking\")\n    PrimaryID   = pipz.NewIdentity(\"primary\", \"Primary service with metrics\")\n    FallbackID  = pipz.NewIdentity(\"fallback\", \"Fallback service with metrics\")\n)\n\nmonitoredFallback := pipz.NewFallback(\n    MonitoredID,\n    pipz.Apply(PrimaryID, func(ctx context.Context, data Data) (Data, error) {\n        result, err := primaryService(ctx, data)\n        if err != nil {\n            metrics.Increment(\"fallback.triggered\", \"service\", \"primary\")\n        }\n        return result, err\n    }),\n    pipz.Apply(FallbackID, func(ctx context.Context, data Data) (Data, error) {\n        metrics.Increment(\"fallback.used\", \"service\", \"backup\")\n        return backupService(ctx, data)\n    }),\n)\n\n// Log fallback activation\nvar (\n    LoggedFallbackID = pipz.NewIdentity(\"logged-fallback\", \"Fallback with error logging\")\n    ServiceID        = pipz.NewIdentity(\"service\", \"Primary and backup service\")\n    LogFailureID     = pipz.NewIdentity(\"log-failure\", \"Log primary failure\")\n)\n\nloggedFallback := pipz.NewHandle(\n    LoggedFallbackID,\n    pipz.NewFallback(\n        ServiceID,\n        primary,\n        backup,\n    ),\n    pipz.Effect(LogFailureID, func(ctx context.Context, err *pipz.Error[Data]) error {\n        if strings.Contains(err.Path[len(err.Path)-1], \"primary\") {\n            log.Printf(\"Primary failed, using fallback: %v\", err.Err)\n        }\n        return nil\n    }),\n)",{"id":3254,"title":135,"titles":3255,"content":3256,"level":19},"/v1.0.7/reference/connectors/fallback#best-practices",[539],"// Ensure fallback is truly independent\n// BAD: Fallback might fail for same reason\nvar (\n    BadID     = pipz.NewIdentity(\"bad\", \"Fallback to same database (bad practice)\")\n    DBWrite1ID = pipz.NewIdentity(\"db-write-1\", \"Write to database\")\n    DBWrite2ID = pipz.NewIdentity(\"db-write-2\", \"Write to same database\")\n)\n\nbadFallback := pipz.NewFallback(\n    BadID,\n    pipz.Apply(DBWrite1ID, writeToDatabase),\n    pipz.Apply(DBWrite2ID, writeToSameDatabase), // Same failure mode!\n)\n\n// GOOD: Independent failure modes\nvar (\n    GoodID     = pipz.NewIdentity(\"good\", \"Fallback from database to file system\")\n    DatabaseID = pipz.NewIdentity(\"database\", \"Write to database\")\n    FileID     = pipz.NewIdentity(\"file\", \"Write to file\")\n)\n\ngoodFallback := pipz.NewFallback(\n    GoodID,\n    pipz.Apply(DatabaseID, writeToDatabase),\n    pipz.Apply(FileID, writeToFile), // Different failure mode\n)\n\n// Consider data consistency\nvar (\n    TransactionID = pipz.NewIdentity(\"transaction\", \"ACID transaction with eventual consistency fallback\")\n    PrimaryID     = pipz.NewIdentity(\"primary\", \"Process with ACID transaction\")\n    FallbackID    = pipz.NewIdentity(\"fallback\", \"Process with eventual consistency\")\n)\n\ntransactional := pipz.NewFallback(\n    TransactionID,\n    pipz.Apply(PrimaryID, func(ctx context.Context, tx Transaction) (Transaction, error) {\n        // Full ACID transaction\n        return processPrimary(ctx, tx)\n    }),\n    pipz.Apply(FallbackID, func(ctx context.Context, tx Transaction) (Transaction, error) {\n        // Ensure fallback maintains consistency\n        log.Printf(\"WARNING: Using eventual consistency fallback for tx %s\", tx.ID)\n        return processEventually(ctx, tx)\n    }),\n)",{"id":3258,"title":1764,"titles":3259,"content":3260,"level":19},"/v1.0.7/reference/connectors/fallback#see-also",[539],"Race - For trying multiple options in parallelRetry - For retrying the same processorHandle - For custom error handlingSwitch - For conditional routing html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}",{"id":3262,"title":3263,"titles":3264,"content":3265,"level":9},"/v1.0.7/reference/connectors/filter","Filter",[],"Provides conditional processing that either executes a processor or passes data through based on a predicate",{"id":3267,"title":3263,"titles":3268,"content":3269,"level":9},"/v1.0.7/reference/connectors/filter#filter",[],"Filter provides conditional processing that either executes a processor or passes data through unchanged based on a predicate function.",{"id":3271,"title":6,"titles":3272,"content":3273,"level":19},"/v1.0.7/reference/connectors/filter#overview",[3263],"Filter creates a branch in your pipeline where processing is optional based on runtime conditions. Unlike Switch which routes to different processors, Filter either processes or skips. Unlike Mutate which only supports safe transformations, Filter can execute any Chainable including ones that may error. filter := pipz.NewFilter(identity, condition, processor)",{"id":3275,"title":2292,"titles":3276,"content":3277,"level":19},"/v1.0.7/reference/connectors/filter#when-to-use",[3263],"Use Filter when: Conditional processing needed (feature flags, A/B testing)Skip expensive operations based on data stateApply business rules to subset of dataDifferent users need different processing pathsYou want clean separation of condition and logicPerformance optimization through selective processing",{"id":3279,"title":2297,"titles":3280,"content":3281,"level":19},"/v1.0.7/reference/connectors/filter#when-not-to-use",[3263],"Don't use Filter when: All data needs the same processing (just use the processor directly)You need multiple branches (use Switch instead)The condition is better expressed in the processor itselfYou're just transforming conditionally (use Mutate for simpler cases)",{"id":3283,"title":2709,"titles":3284,"content":3285,"level":19},"/v1.0.7/reference/connectors/filter#basic-usage",[3263],"// Define identities upfront\nvar (\n    BetaFeatureID      = pipz.NewIdentity(\"beta-feature\", \"Apply new algorithm for beta users with feature flag\")\n    PremiumValidID     = pipz.NewIdentity(\"premium-validation\", \"Perform enhanced validation for premium customers\")\n    PremiumChecksID    = pipz.NewIdentity(\"premium-checks\", \"Premium customer validation checks\")\n)\n\n// Feature flag example\nbetaFeature := pipz.NewFilter(\n    BetaFeatureID,\n    func(ctx context.Context, user User) bool {\n        return user.BetaEnabled && isFeatureEnabled(ctx, \"new-algorithm\")\n    },\n    newAlgorithmProcessor,\n)\n\n// Conditional validation\nvalidatePremium := pipz.NewFilter(\n    PremiumValidID,\n    func(ctx context.Context, order Order) bool {\n        return order.CustomerTier == \"premium\"\n    },\n    pipz.NewSequence(\n        PremiumChecksID,\n        validateCreditLimit,\n        checkFraudScore,\n        verifyIdentity,\n    ),\n)",{"id":3287,"title":3288,"titles":3289,"content":3290,"level":19},"/v1.0.7/reference/connectors/filter#condition-function","Condition Function",[3263],"The condition function determines whether processing should occur: func(context.Context, T) bool Returns true: Execute the processorReturns false: Pass data through unchangedContext aware: Can use context for timeouts, values, cancellationPure function: Should not have side effects",{"id":3292,"title":3293,"titles":3294,"content":3295,"level":35},"/v1.0.7/reference/connectors/filter#condition-examples","Condition Examples",[3263,3288],"// Simple data check\nfunc(ctx context.Context, order Order) bool {\n    return order.Amount > 1000\n}\n\n// Feature flag with context\nfunc(ctx context.Context, user User) bool {\n    return user.BetaEnabled && \n           featureFlags.IsEnabled(ctx, \"experimental-feature\")\n}\n\n// Time-based condition\nfunc(ctx context.Context, data Data) bool {\n    return time.Now().Hour() >= 9 && time.Now().Hour() \u003C 17 // Business hours\n}\n\n// Complex business logic\nfunc(ctx context.Context, payment Payment) bool {\n    return payment.Method == \"crypto\" && \n           payment.Amount > 10000 &&\n           payment.Customer.RiskScore \u003C 0.3\n}",{"id":3297,"title":3298,"titles":3299,"content":3300,"level":19},"/v1.0.7/reference/connectors/filter#processor","Processor",[3263],"Any Chainable can be used as the processor: // Define identities upfront\nvar (\n    DoubleID      = pipz.NewIdentity(\"double\", \"Double the value\")\n    ValidateID    = pipz.NewIdentity(\"validate\", \"Validate data\")\n    ComplexID     = pipz.NewIdentity(\"complex\", \"Validate, enrich, and transform data\")\n    ConditionalID = pipz.NewIdentity(\"conditional\", \"Conditionally apply complex flow\")\n)\n\n// Simple processor\nprocessor := pipz.Transform(DoubleID, func(ctx context.Context, n int) int {\n    return n * 2\n})\n\n// Error-prone processor\nvalidator := pipz.Apply(ValidateID, func(ctx context.Context, data Data) (Data, error) {\n    return validateData(data)\n})\n\n// Complex pipeline\ncomplexFlow := pipz.NewSequence(\n    ComplexID,\n    validate,\n    enrich,\n    transform,\n)\n\nfilter := pipz.NewFilter(ConditionalID, condition, complexFlow)",{"id":3302,"title":3303,"titles":3304,"content":3305,"level":19},"/v1.0.7/reference/connectors/filter#dynamic-behavior","Dynamic Behavior",[3263],"Filter supports runtime updates for dynamic behavior: // Define identity upfront\nvar DynamicID = pipz.NewIdentity(\"dynamic\", \"Filter with dynamic condition and processor\")\n\nfilter := pipz.NewFilter(\n    DynamicID,\n    initialCondition,\n    initialProcessor,\n)\n\n// Update condition at runtime\nfilter.SetCondition(func(ctx context.Context, data Data) bool {\n    // New condition logic\n    return data.Version >= 2\n})\n\n// Update processor at runtime\nfilter.SetProcessor(newProcessor)\n\n// Access current values\ncurrentCondition := filter.Condition()\ncurrentProcessor := filter.Processor()",{"id":3307,"title":106,"titles":3308,"content":3309,"level":19},"/v1.0.7/reference/connectors/filter#error-handling",[3263],"When the processor returns an error, Filter prepends its name to the error path: // Define identities upfront\nvar (\n    PaymentFilterID = pipz.NewIdentity(\"payment-filter\", \"Validate high-value payments over $100\")\n    ValidateID      = pipz.NewIdentity(\"validate\", \"Validate payment\")\n)\n\nfilter := pipz.NewFilter(\n    PaymentFilterID,\n    func(ctx context.Context, p Payment) bool { return p.Amount > 100 },\n    pipz.Apply(ValidateID, failingValidator),\n)\n\nresult, err := filter.Process(ctx, payment)\nif err != nil {\n    // err.Path will be [\"payment-filter\", \"validate\"]\n    fmt.Printf(\"Failed at: %v\\n\", err.Path)\n}",{"id":3311,"title":3312,"titles":3313,"content":3314,"level":19},"/v1.0.7/reference/connectors/filter#thread-safety","Thread Safety",[3263],"Filter is thread-safe and can be safely used in concurrent scenarios: // Define identity upfront\nvar ConcurrentSafeID = pipz.NewIdentity(\"concurrent-safe\", \"Thread-safe conditional processor\")\n\nfilter := pipz.NewFilter(\n    ConcurrentSafeID,\n    condition,\n    processor,\n)\n\n// Safe to call from multiple goroutines\ngo func() { filter.Process(ctx, data1) }()\ngo func() { filter.Process(ctx, data2) }()\n\n// Safe to update from other goroutines\ngo func() { filter.SetCondition(newCondition) }()",{"id":3316,"title":2764,"titles":3317,"content":3318,"level":19},"/v1.0.7/reference/connectors/filter#performance-characteristics",[3263],"Filter has minimal overhead: Condition false: ~5ns with zero allocationsCondition true: Processor overhead + ~10nsNo reflection: Direct function callsMemory efficient: No intermediate allocations",{"id":3320,"title":2181,"titles":3321,"content":29,"level":19},"/v1.0.7/reference/connectors/filter#common-patterns",[3263],{"id":3323,"title":3324,"titles":3325,"content":3326,"level":35},"/v1.0.7/reference/connectors/filter#feature-flag-processing","Feature Flag Processing",[3263,2181],"type FeatureFlags struct {\n    flags map[string]bool\n    mu    sync.RWMutex\n}\n\nfunc (f *FeatureFlags) IsEnabled(flag string) bool {\n    f.mu.RLock()\n    defer f.mu.RUnlock()\n    return f.flags[flag]\n}\n\n// Define identity upfront\nvar FeatureGateID = pipz.NewIdentity(\"feature-gate\", \"Gate new feature for beta users with feature flag\")\n\n// Create feature flag filter\nfeatureFilter := pipz.NewFilter(\n    FeatureGateID,\n    func(ctx context.Context, user User) bool {\n        return user.BetaEnabled && flags.IsEnabled(\"new-feature\")\n    },\n    newFeatureProcessor,\n)",{"id":3328,"title":3329,"titles":3330,"content":3331,"level":35},"/v1.0.7/reference/connectors/filter#conditional-enrichment","Conditional Enrichment",[3263,2181],"// Define identities upfront\nvar (\n    EnrichPremiumID     = pipz.NewIdentity(\"enrich-premium\", \"Enrich premium and enterprise customers with additional data\")\n    PremiumEnrichmentID = pipz.NewIdentity(\"premium-enrichment\", \"Add personalized offers, loyalty points, and priority support\")\n)\n\n// Only enrich premium customers\nenrichPremium := pipz.NewFilter(\n    EnrichPremiumID,\n    func(ctx context.Context, customer Customer) bool {\n        return customer.Tier == \"premium\" || customer.Tier == \"enterprise\"\n    },\n    pipz.NewSequence(\n        PremiumEnrichmentID,\n        addPersonalizedOffers,\n        calculateLoyaltyPoints,\n        addPrioritySupport,\n    ),\n)",{"id":3333,"title":3334,"titles":3335,"content":3336,"level":35},"/v1.0.7/reference/connectors/filter#performance-optimization","Performance Optimization",[3263,2181],"// Define identity upfront\nvar CacheCheckID = pipz.NewIdentity(\"cache-check\", \"Skip expensive processing if data is cached\")\n\n// Skip expensive processing for cached data\nskipIfCached := pipz.NewFilter(\n    CacheCheckID,\n    func(ctx context.Context, request Request) bool {\n        _, exists := cache.Get(request.CacheKey())\n        return !exists // Only process if not cached\n    },\n    expensiveProcessor,\n)",{"id":3338,"title":3339,"titles":3340,"content":3341,"level":35},"/v1.0.7/reference/connectors/filter#time-based-processing","Time-Based Processing",[3263,2181],"// Define identity upfront\nvar BusinessHoursID = pipz.NewIdentity(\"business-hours\", \"Process only during weekday business hours (9am-5pm)\")\n\n// Only process during business hours\nbusinessHours := pipz.NewFilter(\n    BusinessHoursID,\n    func(ctx context.Context, task Task) bool {\n        now := time.Now()\n        hour := now.Hour()\n        weekday := now.Weekday()\n\n        return weekday >= time.Monday &&\n               weekday \u003C= time.Friday &&\n               hour >= 9 &&\n               hour \u003C 17\n    },\n    businessProcessor,\n)",{"id":3343,"title":3344,"titles":3345,"content":29,"level":19},"/v1.0.7/reference/connectors/filter#filter-vs-other-connectors","Filter vs Other Connectors",[3263],{"id":3347,"title":3348,"titles":3349,"content":3350,"level":35},"/v1.0.7/reference/connectors/filter#filter-vs-switch","Filter vs Switch",[3263,3344],"Filter: Execute or skip (binary choice)Switch: Route to different processors (multiple choices) // Define identities upfront\nvar (\n    OptionalID = pipz.NewIdentity(\"optional\", \"Conditionally apply processor\")\n    RouterID   = pipz.NewIdentity(\"router\", \"Route to different processors\")\n)\n\n// Filter: Optional processing\nfilter := pipz.NewFilter(OptionalID, condition, processor)\n\n// Switch: Alternative processing\nrouter := pipz.NewSwitch(RouterID, routingFunction)\nrouter.AddRoute(\"path-a\", processorA)\nrouter.AddRoute(\"path-b\", processorB)",{"id":3352,"title":3353,"titles":3354,"content":3355,"level":35},"/v1.0.7/reference/connectors/filter#filter-vs-mutate","Filter vs Mutate",[3263,3344],"Filter: Can use any Chainable, including error-prone onesMutate: Only safe transformations (no errors) // Define identities upfront\nvar (\n    ValidateIfNeededID = pipz.NewIdentity(\"validate-if-needed\", \"Conditionally validate data\")\n    ModifyIfNeededID   = pipz.NewIdentity(\"modify-if-needed\", \"Conditionally transform data\")\n)\n\n// Filter: Can fail\nfilter := pipz.NewFilter(ValidateIfNeededID, condition, validator)\n\n// Mutate: Cannot fail\nmutate := pipz.Mutate(ModifyIfNeededID, transformer, condition)",{"id":3357,"title":3358,"titles":3359,"content":3360,"level":35},"/v1.0.7/reference/connectors/filter#filter-vs-conditional-logic","Filter vs Conditional Logic",[3263,3344],"// Define identities upfront\nvar (\n    MixedLogicID      = pipz.NewIdentity(\"mixed-logic\", \"Conditionally apply expensive operation\")\n    CleanSeparationID = pipz.NewIdentity(\"clean-separation\", \"Separate condition from expensive operation\")\n    ExpensiveID       = pipz.NewIdentity(\"expensive\", \"Expensive operation\")\n)\n\n// Instead of embedding conditions\nprocessor := pipz.Apply(MixedLogicID, func(ctx context.Context, data Data) (Data, error) {\n    if shouldProcess(data) {\n        return expensiveOperation(ctx, data)\n    }\n    return data, nil\n})\n\n// Use Filter for cleaner separation\nfilter := pipz.NewFilter(\n    CleanSeparationID,\n    shouldProcess,\n    pipz.Apply(ExpensiveID, expensiveOperation),\n)",{"id":3362,"title":2805,"titles":3363,"content":3364,"level":19},"/v1.0.7/reference/connectors/filter#testing",[3263],"Test Filter by verifying both condition paths: func TestFilter(t *testing.T) {\n    // Define identities upfront\n    var (\n        DoubleID   = pipz.NewIdentity(\"double\", \"Double the value\")\n        EvenOnlyID = pipz.NewIdentity(\"even-only\", \"Double only even numbers\")\n    )\n\n    processor := pipz.Transform(DoubleID, func(ctx context.Context, n int) int {\n        return n * 2\n    })\n\n    filter := pipz.NewFilter(\n        EvenOnlyID,\n        func(ctx context.Context, n int) bool { return n%2 == 0 },\n        processor,\n    )\n\n    // Test condition true\n    result, err := filter.Process(context.Background(), 4)\n    assert.NoError(t, err)\n    assert.Equal(t, 8, result) // 4 * 2\n\n    // Test condition false\n    result, err = filter.Process(context.Background(), 3)\n    assert.NoError(t, err)\n    assert.Equal(t, 3, result) // unchanged\n}",{"id":3366,"title":2199,"titles":3367,"content":29,"level":19},"/v1.0.7/reference/connectors/filter#gotchas",[3263],{"id":3369,"title":3370,"titles":3371,"content":3372,"level":35},"/v1.0.7/reference/connectors/filter#dont-have-side-effects-in-conditions","❌ Don't have side effects in conditions",[3263,2199],"// Define identity upfront\nvar BadID = pipz.NewIdentity(\"bad\", \"Filter with side effects in condition\")\n\n// WRONG - Condition modifies state\nfilter := pipz.NewFilter(\n    BadID,\n    func(ctx context.Context, data Data) bool {\n        counter++ // Side effect!\n        log.Println(\"Checking...\") // Side effect!\n        return data.Important\n    },\n    processor,\n)",{"id":3374,"title":3375,"titles":3376,"content":3377,"level":35},"/v1.0.7/reference/connectors/filter#keep-conditions-pure","✅ Keep conditions pure",[3263,2199],"// Define identity upfront\nvar GoodID = pipz.NewIdentity(\"good\", \"Filter with pure condition\")\n\n// RIGHT - Pure condition function\nfilter := pipz.NewFilter(\n    GoodID,\n    func(ctx context.Context, data Data) bool {\n        return data.Important\n    },\n    processor,\n)",{"id":3379,"title":3380,"titles":3381,"content":3382,"level":35},"/v1.0.7/reference/connectors/filter#dont-use-for-simple-truefalse-transforms","❌ Don't use for simple true/false transforms",[3263,2199],"// Define identities upfront\nvar (\n    OverkillID = pipz.NewIdentity(\"overkill\", \"Absolute value for positive numbers only\")\n    AbsID      = pipz.NewIdentity(\"abs\", \"Calculate absolute value\")\n)\n\n// WRONG - Overkill for simple conditional\nfilter := pipz.NewFilter(\n    OverkillID,\n    func(ctx context.Context, n int) bool { return n > 0 },\n    pipz.Transform(AbsID, math.Abs),\n)",{"id":3384,"title":3385,"titles":3386,"content":3387,"level":35},"/v1.0.7/reference/connectors/filter#use-mutate-for-simple-conditional-transforms","✅ Use Mutate for simple conditional transforms",[3263,2199],"// Define identity upfront\nvar AbsIfNegativeID = pipz.NewIdentity(\"abs-if-negative\", \"Negate negative numbers\")\n\n// RIGHT - Simpler with Mutate\nmutate := pipz.Mutate(\n    AbsIfNegativeID,\n    func(ctx context.Context, n int) int { return -n },\n    func(ctx context.Context, n int) bool { return n \u003C 0 },\n)",{"id":3389,"title":135,"titles":3390,"content":3391,"level":19},"/v1.0.7/reference/connectors/filter#best-practices",[3263],"Keep conditions simple: Complex logic makes debugging difficultAvoid side effects in conditions: Conditions should be pure functionsUse descriptive names: Names appear in error pathsTest both paths: Verify condition true and false scenariosConsider caching: For expensive condition calculationsUse context: Leverage context for timeouts and valuesDocument behavior: Make condition logic clear to other developersMonitor pass rates: Use metrics to understand filter effectiveness html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}",{"id":3393,"title":3394,"titles":3395,"content":3396,"level":9},"/v1.0.7/reference/connectors/handle","Handle",[],"Provides error observation and handling capabilities for processors with cleanup and compensation patterns",{"id":3398,"title":3394,"titles":3399,"content":3400,"level":9},"/v1.0.7/reference/connectors/handle#handle",[],"Provides error observation and handling capabilities for processors.",{"id":3402,"title":2263,"titles":3403,"content":3404,"level":19},"/v1.0.7/reference/connectors/handle#function-signature",[3394],"func NewHandle[T any](\n    identity Identity,\n    processor Chainable[T],\n    errorHandler Chainable[*Error[T]],\n) *Handle[T]",{"id":3406,"title":2268,"titles":3407,"content":3408,"level":19},"/v1.0.7/reference/connectors/handle#parameters",[3394],"identity (Identity) - Identifier for the connector used in debuggingprocessor - Main processor that might failerrorHandler - Pipeline that processes errors (receives *Error[T])",{"id":3410,"title":2273,"titles":3411,"content":3412,"level":19},"/v1.0.7/reference/connectors/handle#returns",[3394],"Returns a *Handle[T] that implements Chainable[T].",{"id":3414,"title":2278,"titles":3415,"content":3416,"level":19},"/v1.0.7/reference/connectors/handle#behavior",[3394],"Error observation - Handler processes errors for side effects (logging, cleanup)Error pass-through - Original errors always propagate after handlingError as data - Errors flow through the error handler pipelineHandler errors ignored - Handler failures don't affect error propagationSuccess pass-through - Successful results bypass error handler",{"id":3418,"title":1849,"titles":3419,"content":3420,"level":19},"/v1.0.7/reference/connectors/handle#key-insight",[3394],"Handle provides error observation and cleanup. By wrapping a processor with Handle, you're saying \"when this fails, I need to do something about it\" - whether that's logging, cleanup, notifications, or compensation. The error always propagates after handling.",{"id":3422,"title":2283,"titles":3423,"content":3424,"level":19},"/v1.0.7/reference/connectors/handle#example",[3394],"// Log errors with context\nlogged := pipz.NewHandle(\n    pipz.NewIdentity(\"order-logging\", \"Logs order processing errors\"),\n    processOrder,\n    pipz.Effect(\n        pipz.NewIdentity(\"log-error\", \"Logs order failure details\"),\n        func(ctx context.Context, err *pipz.Error[Order]) error {\n            log.Printf(\"Order %s failed at %s: %v\",\n                err.InputData.ID, err.Path, err.Err)\n            metrics.Increment(\"order.failures\")\n            return nil\n        },\n    ),\n)\n\n// Clean up resources on failure\nwithCleanup := pipz.NewHandle(\n    pipz.NewIdentity(\"inventory-management\", \"Manages inventory with cleanup\"),\n    pipz.NewSequence[Order](\n        reserveInventory,\n        chargePayment,\n        confirmOrder,\n    ),\n    pipz.Effect(\n        pipz.NewIdentity(\"cleanup\", \"Releases inventory on failure\"),\n        func(ctx context.Context, err *pipz.Error[Order]) error {\n            if err.InputData.ReservationID != \"\" {\n                log.Printf(\"Releasing inventory for failed order %s\", err.InputData.ID)\n                inventory.Release(err.InputData.ReservationID)\n            }\n            return nil\n        },\n    ),\n)\n\n// Send notifications on failure\nnotifying := pipz.NewHandle(\n    pipz.NewIdentity(\"payment-alerts\", \"Alerts on payment failures\"),\n    processPayment,\n    pipz.Effect(\n        pipz.NewIdentity(\"notify\", \"Sends alert for high-value failures\"),\n        func(ctx context.Context, err *pipz.Error[Payment]) error {\n            if err.InputData.Amount > 10000 {\n                // Alert on large payment failures\n                alerting.SendHighValuePaymentFailure(err.InputData, err.Err)\n            }\n            return nil\n        },\n    ),\n)",{"id":3426,"title":2292,"titles":3427,"content":3428,"level":19},"/v1.0.7/reference/connectors/handle#when-to-use",[3394],"Use Handle when: You need to observe errors without stopping them (logging, metrics)You need to perform cleanup on failure (release resources)Errors require logging with additional contextYou want to send notifications or alerts on failureYou need to implement compensation logicYou want to collect metrics about failuresDifferent errors require different side effects",{"id":3430,"title":2297,"titles":3431,"content":3432,"level":19},"/v1.0.7/reference/connectors/handle#when-not-to-use",[3394],"Don't use Handle when: You just need to suppress errors (use Fallback with default value)Simple retry is sufficient (use Retry)You want to transform errors into values (use Fallback)No cleanup or side effects are needed on failureYou want to recover from errors (use Fallback - Handle only observes)",{"id":3434,"title":3435,"titles":3436,"content":3437,"level":19},"/v1.0.7/reference/connectors/handle#error-handler-access","Error Handler Access",[3394],"The error handler receives *Error[T] with full context: type Error[T any] struct {\n    Path      []Identity    // Full path through processors\n    Err       error         // Original error\n    InputData T             // Input when error occurred\n    Timeout   bool          // Was this a timeout?\n    Canceled  bool          // Was this cancelled?\n    Timestamp time.Time     // When the error occurred\n    Duration  time.Duration // Processing time before error\n}",{"id":3439,"title":2181,"titles":3440,"content":3441,"level":19},"/v1.0.7/reference/connectors/handle#common-patterns",[3394],"// Resource cleanup pattern\ninventoryCleanup := pipz.NewHandle(\n    pipz.NewIdentity(\"order-with-cleanup\", \"Order processing with inventory cleanup\"),\n    pipz.NewSequence[Order](\n        validateOrder,\n        reserveInventory,\n        chargePayment,\n        shipOrder,\n    ),\n    pipz.Effect(\n        pipz.NewIdentity(\"release-inventory\", \"Releases inventory on failure\"),\n        func(ctx context.Context, err *pipz.Error[Order]) error {\n            if reservation := err.InputData.ReservationID; reservation != \"\" {\n                log.Printf(\"Releasing inventory reservation %s after failure\", reservation)\n                if releaseErr := inventory.Release(reservation); releaseErr != nil {\n                    log.Printf(\"Failed to release inventory: %v\", releaseErr)\n                }\n            }\n            return nil\n        },\n    ),\n)\n\n// Monitoring and alerting\nmonitoredPayment := pipz.NewHandle(\n    pipz.NewIdentity(\"payment-monitoring\", \"Payment processing with monitoring\"),\n    processPayment,\n    pipz.Effect(\n        pipz.NewIdentity(\"monitor\", \"Records payment metrics and alerts\"),\n        func(ctx context.Context, err *pipz.Error[Payment]) error {\n            metrics.RecordPaymentFailure(err.InputData.Method, err.Err)\n\n            if err.InputData.Amount > alertThreshold {\n                alerting.NotifyHighValueFailure(err.InputData, err.Err)\n            }\n\n            if err.Timeout {\n                log.Printf(\"Payment timeout after %v\", err.Duration)\n                metrics.RecordTimeout(\"payment\", err.Duration)\n            }\n\n            return nil\n        },\n    ),\n)\n\n// Compensation pattern\ncompensatingTransaction := pipz.NewHandle(\n    pipz.NewIdentity(\"transfer-with-compensation\", \"Transfer with compensating transaction\"),\n    pipz.NewSequence[Transfer](\n        debitSource,\n        creditDestination,\n        recordTransaction,\n    ),\n    pipz.Effect(\n        pipz.NewIdentity(\"compensate\", \"Compensates for partial transfer failure\"),\n        func(ctx context.Context, err *pipz.Error[Transfer]) error {\n            // Determine how far we got\n            failedAt := err.Path[len(err.Path)-1]\n\n            switch failedAt {\n            case \"recordTransaction\":\n                // Both debit and credit succeeded, just logging failed\n                log.Printf(\"Transaction completed but not recorded: %v\", err.InputData)\n                // Try to record in backup system\n                backupLog.Record(err.InputData)\n\n            case \"creditDestination\":\n                // Debit succeeded but credit failed - must reverse\n                log.Printf(\"Reversing debit due to credit failure\")\n                if reverseErr := reverseDebit(err.InputData); reverseErr != nil {\n                    // Critical - manual intervention needed\n                    alerting.CriticalAlert(\"Failed to reverse debit\", err.InputData, reverseErr)\n                }\n            }\n\n            return nil\n        },\n    ),\n)",{"id":3443,"title":3444,"titles":3445,"content":3446,"level":19},"/v1.0.7/reference/connectors/handle#advanced-error-flows","Advanced Error Flows",[3394],"// Define identities upfront\nvar (\n    ParallelRecoveryID = pipz.NewIdentity(\"parallel-recovery\", \"Parallel error handling and recovery\")\n    LogID              = pipz.NewIdentity(\"log\", \"Logs error to central system\")\n    MetricsID          = pipz.NewIdentity(\"metrics\", \"Updates error dashboard\")\n    BackupID           = pipz.NewIdentity(\"backup\", \"Saves failed request\")\n    NotifyID           = pipz.NewIdentity(\"notify\", \"Notifies on-call team\")\n)\n\n// Parallel error handling\nparallelRecovery := pipz.NewHandle(ParallelRecoveryID,\n    mainProcess,\n    pipz.NewConcurrent[*pipz.Error[Data]](\n        pipz.Effect(LogID, logToCentralSystem),\n        pipz.Effect(MetricsID, updateDashboard),\n        pipz.Effect(BackupID, saveFailedRequest),\n        pipz.Apply(NotifyID, notifyOnCallTeam),\n    ),\n)\n\n// Define identities for nested handling\nvar (\n    OuterID    = pipz.NewIdentity(\"outer\", \"Outer error handler\")\n    InnerID    = pipz.NewIdentity(\"inner\", \"Inner error handler\")\n    InnerLogID = pipz.NewIdentity(\"inner-log\", \"Logs inner errors\")\n    OuterLogID = pipz.NewIdentity(\"outer-log\", \"Logs outer errors\")\n    BatchID    = pipz.NewIdentity(\"batch\", \"Batch processing with error aggregation\")\n    CollectID  = pipz.NewIdentity(\"collect\", \"Collects and aggregates errors\")\n)\n\n// Nested error handling\nnestedHandling := pipz.NewHandle(OuterID,\n    pipz.NewHandle(InnerID,\n        riskyOperation,\n        pipz.Effect(InnerLogID, logInnerError),\n    ),\n    pipz.Effect(OuterLogID,\n        func(ctx context.Context, err *pipz.Error[Data]) error {\n            // This catches errors from both riskyOperation and inner-log\n            log.Printf(\"Outer handler: %v\", err)\n            return nil\n        },\n    ),\n)\n\n// Error aggregation for batch processing\nbatchErrors := pipz.NewHandle(BatchID,\n    batchProcessor,\n    pipz.Apply(CollectID,\n        func(ctx context.Context, err *pipz.Error[Batch]) (*pipz.Error[Batch], error) {\n            errorCollector.Add(err)\n            if errorCollector.Count() > errorThreshold {\n                // Trigger batch error recovery\n                triggerBatchRecovery(errorCollector.GetAll())\n            }\n            return err, nil\n        },\n    ),\n)",{"id":3448,"title":2199,"titles":3449,"content":29,"level":19},"/v1.0.7/reference/connectors/handle#gotchas",[3394],{"id":3451,"title":3452,"titles":3453,"content":3454,"level":35},"/v1.0.7/reference/connectors/handle#dont-use-handle-to-suppress-errors","❌ Don't use Handle to suppress errors",[3394,2199],"// Define identities upfront\nvar (\n    SuppressID = pipz.NewIdentity(\"suppress\", \"Attempt to suppress errors\")\n    LogID      = pipz.NewIdentity(\"log\", \"Logs error\")\n)\n\n// WRONG - Handle doesn't suppress errors\nhandle := pipz.NewHandle(SuppressID,\n    failingProcessor,\n    pipz.Effect(LogID, logError), // Error still propagates!\n)",{"id":3456,"title":3457,"titles":3458,"content":3459,"level":35},"/v1.0.7/reference/connectors/handle#use-fallback-to-recover-from-errors","✅ Use Fallback to recover from errors",[3394,2199],"// Define identity upfront\nvar RecoverID = pipz.NewIdentity(\"recover\", \"Recovery with fallback\")\n\n// RIGHT - Fallback provides recovery\nfallback := pipz.NewFallback(RecoverID,\n    failingProcessor,\n    defaultProcessor, // This runs on error\n)",{"id":3461,"title":3462,"titles":3463,"content":3464,"level":35},"/v1.0.7/reference/connectors/handle#dont-ignore-handler-errors-in-critical-cleanup","❌ Don't ignore handler errors in critical cleanup",[3394,2199],"// WRONG - Critical cleanup might fail silently\nhandle := pipz.NewHandle(\n    pipz.NewIdentity(\"cleanup\", \"Cleanup with ignored errors\"),\n    processor,\n    pipz.Effect(\n        pipz.NewIdentity(\"release\", \"Releases resources\"),\n        func(ctx context.Context, err *pipz.Error[Data]) error {\n            return releaseResources() // Error is ignored!\n        },\n    ),\n)",{"id":3466,"title":3467,"titles":3468,"content":3469,"level":35},"/v1.0.7/reference/connectors/handle#log-critical-cleanup-failures","✅ Log critical cleanup failures",[3394,2199],"// RIGHT - Track cleanup failures\nhandle := pipz.NewHandle(\n    pipz.NewIdentity(\"cleanup\", \"Cleanup with error tracking\"),\n    processor,\n    pipz.Effect(\n        pipz.NewIdentity(\"release\", \"Releases resources with logging\"),\n        func(ctx context.Context, err *pipz.Error[Data]) error {\n            if releaseErr := releaseResources(); releaseErr != nil {\n                log.Printf(\"CRITICAL: Failed to release resources: %v\", releaseErr)\n                alerting.SendCritical(\"Resource leak\", releaseErr)\n            }\n            return nil // Handler errors don't affect flow anyway\n        },\n    ),\n)",{"id":3471,"title":135,"titles":3472,"content":3473,"level":19},"/v1.0.7/reference/connectors/handle#best-practices",[3394],"// Clear separation of concerns\n// GOOD: Handle for cleanup, Fallback for recovery\nvar WithRecoveryID = pipz.NewIdentity(\"with-recovery\", \"Pattern with cleanup and recovery\")\ngoodPattern := pipz.NewFallback(WithRecoveryID,\n    pipz.NewHandle(\n        pipz.NewIdentity(\"with-cleanup\", \"Operation with cleanup\"),\n        riskyOperation,\n        pipz.Effect(\n            pipz.NewIdentity(\"cleanup\", \"Cleans up resources on failure\"),\n            func(ctx context.Context, err *pipz.Error[Data]) error {\n                // Clean up resources\n                cleanup(err.InputData)\n                return nil\n            },\n        ),\n    ),\n    fallbackOperation,  // This provides the recovery\n)\n\n// Resource management\n// GOOD: Always clean up acquired resources\nfileProcessor := pipz.NewHandle(\n    pipz.NewIdentity(\"file-processing\", \"Processes file with cleanup\"),\n    pipz.Apply(\n        pipz.NewIdentity(\"process\", \"Processes file\"),\n        func(ctx context.Context, path string) (Result, error) {\n            file, err := os.Open(path)\n            if err != nil {\n                return Result{}, err\n            }\n            defer file.Close()\n            // ... processing ...\n        },\n    ),\n    pipz.Effect(\n        pipz.NewIdentity(\"cleanup-temp\", \"Cleans up temporary files\"),\n        func(ctx context.Context, err *pipz.Error[string]) error {\n            // Clean up any temporary files created\n            tempPath := filepath.Join(os.TempDir(), filepath.Base(err.InputData))\n            os.Remove(tempPath)\n            return nil\n        },\n    ),\n)\n\n// Comprehensive monitoring\n// GOOD: Collect all relevant metrics\nmonitoredService := pipz.NewHandle(\n    pipz.NewIdentity(\"monitored\", \"Service with comprehensive monitoring\"),\n    externalService,\n    pipz.Effect(\n        pipz.NewIdentity(\"metrics\", \"Records service metrics and errors\"),\n        func(ctx context.Context, err *pipz.Error[Request]) error {\n            labels := map[string]string{\n                \"service\": \"external\",\n                \"method\":  err.InputData.Method,\n                \"error\":   errorType(err.Err),\n            }\n\n            metrics.RecordError(labels)\n            metrics.RecordLatency(err.Duration, labels)\n\n            if err.Timeout {\n                metrics.RecordTimeout(labels)\n            }\n\n            return nil\n        },\n    ),\n)",{"id":3475,"title":1764,"titles":3476,"content":3477,"level":19},"/v1.0.7/reference/connectors/handle#see-also",[3394],"Fallback - For simple primary/backup patternsRetry - For retry logicSwitch - Often used within error handlersConcurrent - For parallel error handling html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}",{"id":3479,"title":197,"titles":3480,"content":3481,"level":9},"/v1.0.7/reference/connectors",[],"Composition primitives that control how processors are executed and connected",{"id":3483,"title":197,"titles":3484,"content":29,"level":9},"/v1.0.7/reference/connectors#connectors",[],{"id":3486,"title":3487,"titles":3488,"content":3489,"level":9},"/v1.0.7/reference/connectors/pipeline","Pipeline",[],"Wraps a Chainable with execution context for distributed tracing and observability",{"id":3491,"title":3487,"titles":3492,"content":3493,"level":9},"/v1.0.7/reference/connectors/pipeline#pipeline",[],"Wraps a Chainable with a semantic execution context for distributed tracing and observability.",{"id":3495,"title":2263,"titles":3496,"content":3497,"level":19},"/v1.0.7/reference/connectors/pipeline#function-signature",[3487],"func NewPipeline[T any](\n    identity Identity,\n    root Chainable[T],\n) *Pipeline[T]",{"id":3499,"title":2268,"titles":3500,"content":3501,"level":19},"/v1.0.7/reference/connectors/pipeline#parameters",[3487],"identity (Identity) - Semantic identity for the pipeline, used for correlation across executionsroot (Chainable[T]) - The chainable to wrap with execution context",{"id":3503,"title":2273,"titles":3504,"content":3505,"level":19},"/v1.0.7/reference/connectors/pipeline#returns",[3487],"Returns a *Pipeline[T] that implements Chainable[T].",{"id":3507,"title":2278,"titles":3508,"content":3509,"level":19},"/v1.0.7/reference/connectors/pipeline#behavior",[3487],"Execution ID injection - Each Process() call generates a unique execution UUIDPipeline ID injection - The pipeline's identity ID is injected into contextContext propagation - Both IDs flow through to all nested chainablesTransparent delegation - Processing is delegated to the root chainable",{"id":3511,"title":3512,"titles":3513,"content":3514,"level":19},"/v1.0.7/reference/connectors/pipeline#context-extraction","Context Extraction",[3487],"Extract IDs from context in signal handlers or custom processors: // Extract execution ID (unique per Process() call)\nif execID, ok := pipz.ExecutionIDFromContext(ctx); ok {\n    // Use for tracing, logging, metrics...\n}\n\n// Extract pipeline ID (stable across executions)\nif pipeID, ok := pipz.PipelineIDFromContext(ctx); ok {\n    // Use for correlation, grouping...\n}",{"id":3516,"title":2283,"titles":3517,"content":3518,"level":19},"/v1.0.7/reference/connectors/pipeline#example",[3487],"// Define identities\nvar (\n    OrderPipelineID = pipz.NewIdentity(\"order-processing\", \"Main order processing flow\")\n    ValidateID      = pipz.NewIdentity(\"validate\", \"Validates order data\")\n    EnrichID        = pipz.NewIdentity(\"enrich\", \"Enriches order with customer data\")\n    SaveID          = pipz.NewIdentity(\"save\", \"Persists order to database\")\n    InternalSeqID   = pipz.NewIdentity(\"order-steps\", \"Internal processing sequence\")\n)\n\n// Build the processing logic\nsequence := pipz.NewSequence(InternalSeqID,\n    pipz.Apply(ValidateID, validateOrder),\n    pipz.Apply(EnrichID, enrichOrder),\n    pipz.Apply(SaveID, saveOrder),\n)\n\n// Wrap with Pipeline for execution context\npipeline := pipz.NewPipeline(OrderPipelineID, sequence)\n\n// Process - execution ID generated automatically\nresult, err := pipeline.Process(ctx, order)",{"id":3520,"title":2292,"titles":3521,"content":3522,"level":19},"/v1.0.7/reference/connectors/pipeline#when-to-use",[3487],"Use Pipeline when: Distributed tracing - Correlating signals across pipeline executionObservability - Tracking execution runs in monitoring systemsDebugging - Associating logs with specific pipeline invocationsMetrics - Grouping performance data by pipeline and execution",{"id":3524,"title":2297,"titles":3525,"content":3526,"level":19},"/v1.0.7/reference/connectors/pipeline#when-not-to-use",[3487],"Don't use Pipeline when: Simple pipelines without tracing needsPerformance-critical paths where context overhead mattersYou're not consuming execution/pipeline IDs anywhere",{"id":3528,"title":3529,"titles":3530,"content":3531,"level":19},"/v1.0.7/reference/connectors/pipeline#integration-with-signals","Integration with Signals",[3487],"Connectors emit signals with context. Use signal handlers to extract IDs: capitan.Hook(pipz.SignalCircuitBreakerOpened, func(ctx context.Context, e *capitan.Event) {\n    execID, _ := pipz.ExecutionIDFromContext(ctx)\n    pipeID, _ := pipz.PipelineIDFromContext(ctx)\n\n    // Log with correlation\n    log.Printf(\"Circuit opened in pipeline %s, execution %s\", pipeID, execID)\n\n    // Send to tracing system\n    span.SetAttribute(\"pipz.execution_id\", execID.String())\n    span.SetAttribute(\"pipz.pipeline_id\", pipeID.String())\n})",{"id":3533,"title":2704,"titles":3534,"content":3535,"level":19},"/v1.0.7/reference/connectors/pipeline#schema",[3487],"Pipeline appears in schema with type \"pipeline\" and a PipelineFlow: schema := pipeline.Schema()\n// schema.Type == \"pipeline\"\n// schema.Identity.Name() == \"order-processing\"\n\nif flow, ok := pipz.PipelineKey.From(schema); ok {\n    // flow.Root contains the wrapped chainable's schema\n}",{"id":3537,"title":2181,"titles":3538,"content":3539,"level":19},"/v1.0.7/reference/connectors/pipeline#common-patterns",[3487],"// Multiple pipelines sharing processors\nvar (\n    SyncPipelineID  = pipz.NewIdentity(\"sync-orders\", \"Synchronous order processing\")\n    AsyncPipelineID = pipz.NewIdentity(\"async-orders\", \"Async order processing\")\n)\n\n// Same processing, different execution contexts\nsyncPipeline := pipz.NewPipeline(SyncPipelineID, orderSequence)\nasyncPipeline := pipz.NewPipeline(AsyncPipelineID, orderSequence)\n\n// Nested pipelines (outer wins)\nvar (\n    OuterID = pipz.NewIdentity(\"outer\", \"Outer pipeline\")\n    InnerID = pipz.NewIdentity(\"inner\", \"Inner pipeline\")\n)\n\n// Inner pipeline's context injection is overwritten by outer\nouter := pipz.NewPipeline(OuterID,\n    pipz.NewPipeline(InnerID, processor), // Inner IDs ignored\n)\n// All signals will have OuterID as pipeline ID",{"id":3541,"title":1764,"titles":3542,"content":3543,"level":19},"/v1.0.7/reference/connectors/pipeline#see-also",[3487],"Sequence - Common root for pipelinesHooks - Signal-based observability html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}",{"id":3545,"title":3546,"titles":3547,"content":3548,"level":9},"/v1.0.7/reference/connectors/race","Race",[],"Runs processors in parallel and returns the first successful result for optimizing latency",{"id":3550,"title":3546,"titles":3551,"content":3552,"level":9},"/v1.0.7/reference/connectors/race#race",[],"Runs processors in parallel and returns the first successful result.",{"id":3554,"title":2263,"titles":3555,"content":3556,"level":19},"/v1.0.7/reference/connectors/race#function-signature",[3546],"func NewRace[T Cloner[T]](identity Identity, processors ...Chainable[T]) *Race[T]",{"id":3558,"title":2043,"titles":3559,"content":2951,"level":19},"/v1.0.7/reference/connectors/race#type-constraints",[3546],{"id":3561,"title":2268,"titles":3562,"content":3563,"level":19},"/v1.0.7/reference/connectors/race#parameters",[3546],"identity (Identity) - Identity containing name and description for debugging and documentationprocessors - Variable number of processors to race",{"id":3565,"title":2273,"titles":3566,"content":3567,"level":19},"/v1.0.7/reference/connectors/race#returns",[3546],"Returns a *Race[T] that implements Chainable[T].",{"id":3569,"title":2278,"titles":3570,"content":3571,"level":19},"/v1.0.7/reference/connectors/race#behavior",[3546],"Parallel execution - All processors start simultaneouslyFirst wins - Returns the first successful resultCancellation - Cancels remaining processors when one succeedsAll fail = error - Only fails if all processors failReturns clone - Winner's result is returnedContext preservation - Uses context.WithCancel(ctx) to preserve trace context while enabling cancellation of losing processors",{"id":3573,"title":2283,"titles":3574,"content":3575,"level":19},"/v1.0.7/reference/connectors/race#example",[3546],"// Race multiple data sources\nvar (\n    FetchFastestID = pipz.NewIdentity(\"fetch-fastest\", \"race to fetch data from first available source\")\n    CacheID        = pipz.NewIdentity(\"cache\", \"try cache first\")\n    PrimaryDBID    = pipz.NewIdentity(\"primary-db\", \"fetch from primary database\")\n    ReplicaDBID    = pipz.NewIdentity(\"replica-db\", \"fetch from replica database\")\n    APIFallbackID  = pipz.NewIdentity(\"api-fallback\", \"fallback to external API\")\n)\n\nfetchData := pipz.NewRace(\n    FetchFastestID,\n    pipz.Apply(CacheID, fetchFromCache),\n    pipz.Apply(PrimaryDBID, fetchFromPrimary),\n    pipz.Apply(ReplicaDBID, fetchFromReplica),\n    pipz.Apply(APIFallbackID, fetchFromAPI),\n)\n\n// Race different processing strategies\nvar (\n    ImageProcessorID = pipz.NewIdentity(\"image-processor\", \"race different image processing strategies\")\n    GPUID            = pipz.NewIdentity(\"gpu\", \"process image using GPU acceleration\")\n    CPUOptimizedID   = pipz.NewIdentity(\"cpu-optimized\", \"process using SIMD CPU optimization\")\n    CPUStandardID    = pipz.NewIdentity(\"cpu-standard\", \"process using standard CPU\")\n)\n\nprocessImage := pipz.NewRace(\n    ImageProcessorID,\n    pipz.Apply(GPUID, processWithGPU),\n    pipz.Apply(CPUOptimizedID, processWithSIMD),\n    pipz.Apply(CPUStandardID, processWithCPU),\n)\n\n// Race external services\nvar (\n    TranslateID = pipz.NewIdentity(\"translate\", \"race translation services for fastest response\")\n    GoogleID    = pipz.NewIdentity(\"google\", \"translate using Google Translate API\")\n    DeepLID     = pipz.NewIdentity(\"deepl\", \"translate using DeepL API\")\n    AzureID     = pipz.NewIdentity(\"azure\", \"translate using Azure Translator\")\n)\n\ntranslateText := pipz.NewRace(\n    TranslateID,\n    pipz.Apply(GoogleID, translateWithGoogle),\n    pipz.Apply(DeepLID, translateWithDeepL),\n    pipz.Apply(AzureID, translateWithAzure),\n)",{"id":3577,"title":2292,"titles":3578,"content":3579,"level":19},"/v1.0.7/reference/connectors/race#when-to-use",[3546],"Use Race when: You have multiple ways to get the same result (cache, database, API)You want the fastest response timeYou have geographically distributed servicesLatency is criticalAny successful result is acceptableTrading resource usage for speed is acceptable",{"id":3581,"title":2297,"titles":3582,"content":3583,"level":19},"/v1.0.7/reference/connectors/race#when-not-to-use",[3546],"Don't use Race when: You need all operations to complete (use Concurrent)Results might differ between processors (not equivalent)Order matters (use Sequence)You need to know which source succeededResource usage needs to be minimizedOperations have side effects (all will run)",{"id":3585,"title":106,"titles":3586,"content":3587,"level":19},"/v1.0.7/reference/connectors/race#error-handling",[3546],"Race only fails if all processors fail: var (\n    MultiFetchID = pipz.NewIdentity(\"multi-fetch\", \"race multiple fetch strategies for first success\")\n    FastID       = pipz.NewIdentity(\"fast\", \"fast but flaky service\")\n    SlowID       = pipz.NewIdentity(\"slow\", \"slow but reliable service\")\n    BackupID     = pipz.NewIdentity(\"backup\", \"backup service as last resort\")\n)\n\nrace := pipz.NewRace(\n    MultiFetchID,\n    pipz.Apply(FastID, fastButFlaky),     // Fails 50% of time\n    pipz.Apply(SlowID, slowButReliable),  // Takes 5 seconds\n    pipz.Apply(BackupID, backupService),  // Last resort\n)\n\n// Returns first success or error if all fail\nresult, err := race.Process(ctx, input)\nif err != nil {\n    // All three processors failed\n    var raceErr *pipz.Error[Data]\n    if errors.As(err, &raceErr) {\n        // Error from the last processor to fail\n        fmt.Printf(\"All processors failed: %v\", raceErr)\n    }\n}",{"id":3589,"title":445,"titles":3590,"content":3591,"level":19},"/v1.0.7/reference/connectors/race#performance-considerations",[3546],"Creates one goroutine per processorRequires data cloning (allocation cost)Cancels losers (saves resources)Winner's speed determines total time",{"id":3593,"title":2181,"titles":3594,"content":3595,"level":19},"/v1.0.7/reference/connectors/race#common-patterns",[3546],"// Multi-region API calls\nvar (\n    GeoFetchID = pipz.NewIdentity(\"geo-fetch\", \"race geo-distributed API calls\")\n    USEastID   = pipz.NewIdentity(\"us-east\", \"fetch from US East region\")\n    EUWestID   = pipz.NewIdentity(\"eu-west\", \"fetch from EU West region\")\n    APSouthID  = pipz.NewIdentity(\"ap-south\", \"fetch from Asia Pacific South region\")\n)\n\nmultiRegion := pipz.NewRace(\n    GeoFetchID,\n    pipz.Apply(USEastID, fetchFromUSEast),\n    pipz.Apply(EUWestID, fetchFromEUWest),\n    pipz.Apply(APSouthID, fetchFromAPSouth),\n)\n\n// Cache with fallbacks\nvar (\n    CachedDataID = pipz.NewIdentity(\"cached-data\", \"race cache layers for fastest data access\")\n    MemoryID     = pipz.NewIdentity(\"memory\", \"fetch from in-memory cache\")\n    RedisID      = pipz.NewIdentity(\"redis\", \"fetch from Redis cache\")\n    DatabaseID   = pipz.NewIdentity(\"database\", \"fetch from database\")\n    ComputeID    = pipz.NewIdentity(\"compute\", \"compute data from scratch\")\n)\n\ncachedFetch := pipz.NewRace(\n    CachedDataID,\n    pipz.Apply(MemoryID, fetchFromMemory),    // Fastest\n    pipz.Apply(RedisID, fetchFromRedis),      // Fast\n    pipz.Apply(DatabaseID, fetchFromDB),      // Slower\n    pipz.Apply(ComputeID, computeFromScratch), // Slowest\n)\n\n// Service degradation\nvar (\n    ResilientID     = pipz.NewIdentity(\"resilient\", \"resilient service with timeout-based degradation\")\n    ValidateID      = pipz.NewIdentity(\"validate\", \"validate incoming request\")\n    ProcessID       = pipz.NewIdentity(\"process\", \"race primary and secondary services\")\n    FastPrimaryID   = pipz.NewIdentity(\"fast-primary\", \"primary service with tight timeout\")\n    PrimaryID       = pipz.NewIdentity(\"primary\", \"call primary service\")\n    SlowSecondaryID = pipz.NewIdentity(\"slow-secondary\", \"secondary service with relaxed timeout\")\n    SecondaryID     = pipz.NewIdentity(\"secondary\", \"call secondary service\")\n)\n\nresilientService := pipz.NewSequence(\n    ResilientID,\n    pipz.Apply(ValidateID, validateRequest),\n    pipz.NewRace(\n        ProcessID,\n        pipz.NewTimeout(\n            FastPrimaryID,\n            pipz.Apply(PrimaryID, usePrimaryService),\n            1*time.Second,\n        ),\n        pipz.NewTimeout(\n            SlowSecondaryID,\n            pipz.Apply(SecondaryID, useSecondaryService),\n            5*time.Second,\n        ),\n    ),\n)",{"id":3597,"title":2199,"titles":3598,"content":29,"level":19},"/v1.0.7/reference/connectors/race#gotchas",[3546],{"id":3600,"title":3601,"titles":3602,"content":3603,"level":35},"/v1.0.7/reference/connectors/race#dont-use-race-for-different-results","❌ Don't use Race for different results",[3546,2199],"// WRONG - These return different data!\nvar (\n    DifferentID = pipz.NewIdentity(\"different\", \"race different data sources\")\n    SummaryID   = pipz.NewIdentity(\"summary\", \"get summary data\")\n    DetailedID  = pipz.NewIdentity(\"detailed\", \"get detailed data\")\n    MetadataID  = pipz.NewIdentity(\"metadata\", \"get metadata only\")\n)\n\nrace := pipz.NewRace(\n    DifferentID,\n    pipz.Apply(SummaryID, getSummaryData),     // Returns summary\n    pipz.Apply(DetailedID, getDetailedData),   // Returns full data\n    pipz.Apply(MetadataID, getMetadata),       // Returns only metadata\n)\n// You'll get random incomplete data!",{"id":3605,"title":3606,"titles":3607,"content":3608,"level":35},"/v1.0.7/reference/connectors/race#use-race-for-equivalent-results","✅ Use Race for equivalent results",[3546,2199],"// RIGHT - All return the same data\nvar (\n    EquivalentID   = pipz.NewIdentity(\"equivalent\", \"race equivalent data sources\")\n    CacheID        = pipz.NewIdentity(\"cache\", \"get from cache\")\n    PrimaryID      = pipz.NewIdentity(\"primary\", \"get from primary database\")\n    ReplicaID      = pipz.NewIdentity(\"replica\", \"get from replica database\")\n)\n\nrace := pipz.NewRace(\n    EquivalentID,\n    pipz.Apply(CacheID, getFromCache),\n    pipz.Apply(PrimaryID, getFromPrimary),\n    pipz.Apply(ReplicaID, getFromReplica),\n)",{"id":3610,"title":3611,"titles":3612,"content":3613,"level":35},"/v1.0.7/reference/connectors/race#dont-use-race-with-side-effects","❌ Don't use Race with side effects",[3546,2199],"// WRONG - All processors run until one succeeds!\nvar (\n    SideEffectsID = pipz.NewIdentity(\"side-effects\", \"race payment methods\")\n    Charge1ID     = pipz.NewIdentity(\"charge1\", \"charge first payment method\")\n    Charge2ID     = pipz.NewIdentity(\"charge2\", \"charge second payment method\")\n    Charge3ID     = pipz.NewIdentity(\"charge3\", \"charge third payment method\")\n)\n\nrace := pipz.NewRace(\n    SideEffectsID,\n    pipz.Apply(Charge1ID, chargePaymentMethod1), // Charges!\n    pipz.Apply(Charge2ID, chargePaymentMethod2), // Also charges!\n    pipz.Apply(Charge3ID, chargePaymentMethod3), // Triple charge!\n)",{"id":3615,"title":3616,"titles":3617,"content":3618,"level":35},"/v1.0.7/reference/connectors/race#use-race-for-read-operations","✅ Use Race for read operations",[3546,2199],"// RIGHT - Safe read operations\nvar (\n    ReadsID = pipz.NewIdentity(\"reads\", \"race read operations for fastest response\")\n    Get1ID  = pipz.NewIdentity(\"get1\", \"fetch data from first source\")\n    Get2ID  = pipz.NewIdentity(\"get2\", \"fetch data from second source\")\n    Get3ID  = pipz.NewIdentity(\"get3\", \"fetch data from third source\")\n)\n\nrace := pipz.NewRace(\n    ReadsID,\n    pipz.Apply(Get1ID, fetchData1),\n    pipz.Apply(Get2ID, fetchData2),\n    pipz.Apply(Get3ID, fetchData3),\n)",{"id":3620,"title":3621,"titles":3622,"content":3623,"level":35},"/v1.0.7/reference/connectors/race#dont-forget-about-resource-usage","❌ Don't forget about resource usage",[3546,2199],"// WRONG - Wastes resources\nvar (\n    WastefulID   = pipz.NewIdentity(\"wasteful\", \"race expensive operations\")\n    Expensive1ID = pipz.NewIdentity(\"expensive1\", \"very expensive GPU operation\")\n    Expensive2ID = pipz.NewIdentity(\"expensive2\", \"expensive RAM operation\")\n    Expensive3ID = pipz.NewIdentity(\"expensive3\", \"expensive CPU operation\")\n)\n\nrace := pipz.NewRace(\n    WastefulID,\n    pipz.Apply(Expensive1ID, veryExpensiveOperation), // Uses GPU\n    pipz.Apply(Expensive2ID, anotherExpensiveOp),     // Uses lots of RAM\n    pipz.Apply(Expensive3ID, yetAnotherExpensive),    // Heavy CPU\n)\n// All three run even though only one result is needed!",{"id":3625,"title":3626,"titles":3627,"content":3628,"level":35},"/v1.0.7/reference/connectors/race#put-cheap-operations-first","✅ Put cheap operations first",[3546,2199],"// RIGHT - Try cheap operations first\nvar (\n    EfficientID = pipz.NewIdentity(\"efficient\", \"race operations from cheapest to most expensive\")\n    CacheID     = pipz.NewIdentity(\"cache\", \"check cache for data\")\n    DatabaseID  = pipz.NewIdentity(\"database\", \"query database for data\")\n    ComputeID   = pipz.NewIdentity(\"compute\", \"compute data from scratch\")\n)\n\nrace := pipz.NewRace(\n    EfficientID,\n    pipz.Apply(CacheID, checkCache),           // Microseconds\n    pipz.Apply(DatabaseID, queryDatabase),     // Milliseconds\n    pipz.Apply(ComputeID, computeFromScratch), // Seconds\n)",{"id":3630,"title":2565,"titles":3631,"content":3632,"level":19},"/v1.0.7/reference/connectors/race#advanced-usage",[3546],"// Combine with retry for resilience\nvar (\n    ResilientFetchID = pipz.NewIdentity(\"resilient-fetch\", \"race fetch operations with retry support\")\n    CacheRetryID     = pipz.NewIdentity(\"cache-retry\", \"retry cache fetch\")\n    DBRetryID        = pipz.NewIdentity(\"db-retry\", \"retry database fetch\")\n    APIID            = pipz.NewIdentity(\"api\", \"fetch from API without retry\")\n)\n\nresilientRace := pipz.NewRace(\n    ResilientFetchID,\n    pipz.NewRetry(CacheRetryID, fetchFromCache, 2),\n    pipz.NewRetry(DBRetryID, fetchFromDB, 3),\n    pipz.Apply(APIID, fetchFromAPI),\n)\n\n// Monitor race results\nvar (\n    MonitoredID = pipz.NewIdentity(\"monitored\", \"race processing options with metrics\")\n    OptionAID   = pipz.NewIdentity(\"option-a\", \"process with option A\")\n    OptionBID   = pipz.NewIdentity(\"option-b\", \"process with option B\")\n)\n\nmonitoredRace := pipz.NewRace(\n    MonitoredID,\n    pipz.Apply(OptionAID, func(ctx context.Context, data Data) (Data, error) {\n        defer metrics.Increment(\"race.winner\", \"option\", \"a\")\n        return processOptionA(ctx, data)\n    }),\n    pipz.Apply(OptionBID, func(ctx context.Context, data Data) (Data, error) {\n        defer metrics.Increment(\"race.winner\", \"option\", \"b\")\n        return processOptionB(ctx, data)\n    }),\n)\n\n// Conditional racing based on context\nvar (\n    SmartQueryID = pipz.NewIdentity(\"smart-query\", \"conditionally add cache to race based on query settings\")\n    AddCacheID   = pipz.NewIdentity(\"add-cache\", \"add cache processor if caching enabled\")\n    RaceID       = pipz.NewIdentity(\"race\", \"race dynamically configured processors\")\n    DynamicID    = pipz.NewIdentity(\"dynamic\", \"dynamically created race\")\n)\n\nsmartRace := pipz.NewSequence(\n    SmartQueryID,\n    pipz.Mutate(\n        AddCacheID,\n        func(ctx context.Context, q Query) Query {\n            // Add cache processor to race if caching is enabled\n            q.Processors = append([]Chainable[Query]{cacheProcessor}, q.Processors...)\n            return q\n        },\n        func(ctx context.Context, q Query) bool {\n            return !q.NoCache\n        },\n    ),\n    pipz.Apply(RaceID, func(ctx context.Context, q Query) (Query, error) {\n        return pipz.NewRace(DynamicID, q.Processors...).Process(ctx, q)\n    }),\n)",{"id":3634,"title":1764,"titles":3635,"content":3636,"level":19},"/v1.0.7/reference/connectors/race#see-also",[3546],"Concurrent - For running all processorsFallback - For simple primary/backup patternTimeout - Often used with Race html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}",{"id":3638,"title":524,"titles":3639,"content":3640,"level":9},"/v1.0.7/reference/connectors/ratelimiter",[],"Controls the rate of processing to protect downstream services and rate-limited resources",{"id":3642,"title":524,"titles":3643,"content":3644,"level":9},"/v1.0.7/reference/connectors/ratelimiter#ratelimiter",[],"Controls the rate of processing to protect downstream services and rate-limited resources.",{"id":3646,"title":2822,"titles":3647,"content":3648,"level":19},"/v1.0.7/reference/connectors/ratelimiter#function-signatures",[524],"// Create rate limiter wrapping a processor with specified rate and burst capacity\nfunc NewRateLimiter[T any](identity Identity, ratePerSecond float64, burst int, processor Chainable[T]) *RateLimiter[T]",{"id":3650,"title":2268,"titles":3651,"content":3652,"level":19},"/v1.0.7/reference/connectors/ratelimiter#parameters",[524],"identity (Identity) - Identity containing name and description for debugging and documentationratePerSecond (float64) - Sustained rate limit (requests per second)burst (int) - Maximum burst capacity (immediate requests allowed)processor (Chainable[T]) - The processor to rate limit",{"id":3654,"title":2273,"titles":3655,"content":3656,"level":19},"/v1.0.7/reference/connectors/ratelimiter#returns",[524],"Returns a *RateLimiter[T] that implements Chainable[T].",{"id":3658,"title":2835,"titles":3659,"content":29,"level":19},"/v1.0.7/reference/connectors/ratelimiter#testing-configuration",[524],{"id":3661,"title":2839,"titles":3662,"content":3663,"level":35},"/v1.0.7/reference/connectors/ratelimiter#withclock",[524,2835],"func (r *RateLimiter[T]) WithClock(clock clockz.Clock) *RateLimiter[T] Sets a custom clock implementation for testing purposes. This method enables controlled time manipulation in tests using clockz.FakeClock. Parameters: clock (clockz.Clock) - Clock implementation to use Returns:\nReturns the same connector instance for method chaining. Example: // Use fake clock in tests\nvar (\n    TestID      = pipz.NewIdentity(\"test\", \"test rate limiter with fake clock\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"the wrapped processor\")\n)\n\nprocessor := pipz.Transform(ProcessorID, func(_ context.Context, s string) string { return s })\nfakeClock := clockz.NewFakeClock()\nrateLimiter := pipz.NewRateLimiter(TestID, 10.0, 5, processor).WithClock(fakeClock)\n\n// Advance time in test to replenish tokens\nfakeClock.Advance(1 * time.Second)",{"id":3665,"title":2278,"titles":3666,"content":29,"level":19},"/v1.0.7/reference/connectors/ratelimiter#behavior",[524],{"id":3668,"title":3669,"titles":3670,"content":3671,"level":35},"/v1.0.7/reference/connectors/ratelimiter#rate-limiting-algorithm","Rate Limiting Algorithm",[524,2278],"Token bucket - Uses golang.org/x/time/rate package for proven implementationSustained rate - Long-term average rate is maintainedBurst handling - Allows controlled bursts up to the specified limitThread-safe - Safe for concurrent access from multiple goroutines",{"id":3673,"title":3674,"titles":3675,"content":3676,"level":35},"/v1.0.7/reference/connectors/ratelimiter#operating-modes","Operating Modes",[524,2278],"Wait mode (default) - Blocks until a token is availableDrop mode - Returns error immediately if no tokens available",{"id":3678,"title":3679,"titles":3680,"content":3681,"level":35},"/v1.0.7/reference/connectors/ratelimiter#context-handling","Context Handling",[524,2278],"Cancellation support - Respects context cancellation during waitsTimeout detection - Properly handles context deadline exceededError enrichment - Provides detailed error information",{"id":3683,"title":2866,"titles":3684,"content":3685,"level":19},"/v1.0.7/reference/connectors/ratelimiter#signals",[524],"RateLimiter emits typed signals for throttling and request handling via capitan: SignalWhen EmittedFieldsratelimiter.allowedRequest allowed, token consumedname, tokens, rate, burstratelimiter.throttledRequest waiting for tokens (wait mode)name, wait_time, tokens, rateratelimiter.droppedRequest dropped, no tokens available (drop mode)name, tokens, rate, burst, mode Example: import \"github.com/zoobzio/capitan\"\n\n// Hook rate limiter signals\ncapitan.Hook(pipz.SignalRateLimiterDropped, func(ctx context.Context, e *capitan.Event) {\n    name, _ := pipz.FieldName.From(e)\n    rate, _ := pipz.FieldRate.From(e)\n    // Alert on dropped requests\n}) See Hooks Documentation for complete signal reference and usage examples.",{"id":3687,"title":2871,"titles":3688,"content":3689,"level":19},"/v1.0.7/reference/connectors/ratelimiter#configuration-methods",[524],"// Runtime configuration\nrateLimiter.SetRate(200)           // Update to 200 requests/second\nrateLimiter.SetBurst(20)           // Update burst capacity to 20\nrateLimiter.SetMode(\"drop\")        // Switch to drop mode\n\n// Getters\nrate := rateLimiter.GetRate()      // Current rate limit\nburst := rateLimiter.GetBurst()    // Current burst capacity\nmode := rateLimiter.GetMode()      // Current mode (\"wait\" or \"drop\")",{"id":3691,"title":2283,"titles":3692,"content":3693,"level":19},"/v1.0.7/reference/connectors/ratelimiter#example",[524],"// Define identities upfront\nvar (\n    APILimiterID     = pipz.NewIdentity(\"api-limiter\", \"limit API requests to 100/sec with burst of 10\")\n    CallExternalID   = pipz.NewIdentity(\"call-external-api\", \"call external API endpoint\")\n    UserLimiterID    = pipz.NewIdentity(\"user-limiter\", \"route to tier-specific rate limiter\")\n    PremiumRateID    = pipz.NewIdentity(\"premium-rate\", \"premium tier rate limit: 1000/sec\")\n    StandardRateID   = pipz.NewIdentity(\"standard-rate\", \"standard tier rate limit: 100/sec\")\n    FreeRateID       = pipz.NewIdentity(\"free-rate\", \"free tier rate limit: 10/sec\")\n    APIGatewayID     = pipz.NewIdentity(\"api-gateway\", \"API gateway with global and per-user rate limiting\")\n    AuthenticateID   = pipz.NewIdentity(\"authenticate\", \"authenticate incoming request\")\n    GlobalLimitID    = pipz.NewIdentity(\"global-limit\", \"global rate limit: 10000/sec\")\n    RouteRequestID   = pipz.NewIdentity(\"route-request\", \"route request to backend service\")\n    PremiumAPIID     = pipz.NewIdentity(\"premium-api\", \"premium tier API call\")\n    StandardAPIID    = pipz.NewIdentity(\"standard-api\", \"standard tier API call\")\n    FreeAPIID        = pipz.NewIdentity(\"free-api\", \"free tier API call\")\n)\n\n// Basic rate limiter wrapping an API call - 100 requests per second with burst of 10\napiCall := pipz.Apply(CallExternalID, callExternalAPI)\nrateLimiter := pipz.NewRateLimiter(APILimiterID, 100, 10, apiCall)\n\n// Runtime configuration\nrateLimiter.SetMode(\"drop\")        // Don't wait, fail fast\nrateLimiter.SetRate(200)           // Increase rate during off-peak hours\n\n// Per-user rate limiting - each tier wraps its own API processor\nuserLimiter := pipz.NewSwitch(UserLimiterID, getUserTier).\n    AddRoute(\"premium\", pipz.NewRateLimiter(PremiumRateID, 1000, 100,\n        pipz.Apply(PremiumAPIID, callPremiumAPI))).\n    AddRoute(\"standard\", pipz.NewRateLimiter(StandardRateID, 100, 10,\n        pipz.Apply(StandardAPIID, callStandardAPI))).\n    AddRoute(\"free\", pipz.NewRateLimiter(FreeRateID, 10, 1,\n        pipz.Apply(FreeAPIID, callFreeAPI)))\n\n// API Gateway with rate limiting wrapping the backend router\napiGateway := pipz.NewSequence(APIGatewayID,\n    pipz.Apply(AuthenticateID, authenticateRequest),\n    pipz.NewRateLimiter(GlobalLimitID, 10000, 1000,\n        pipz.Apply(RouteRequestID, routeToBackend)),\n)",{"id":3695,"title":2292,"titles":3696,"content":3697,"level":19},"/v1.0.7/reference/connectors/ratelimiter#when-to-use",[524],"Use RateLimiter when: Protecting external APIs with rate limits (Twitter, OpenAI, etc.)Preventing overwhelming of downstream servicesImplementing fair resource sharingMeeting SLA requirementsControlling database connection usageThrottling expensive operationsCost control (pay-per-request APIs) Use Wait mode when: You want to honor all requests eventuallyLatency spikes are acceptableThe calling system can handle delaysRate limits are soft boundaries Use Drop mode when: Fast failure is preferred over delaysSystem load shedding is requiredRate limits are hard boundariesYou want to fail fast under pressure",{"id":3699,"title":2297,"titles":3700,"content":3701,"level":19},"/v1.0.7/reference/connectors/ratelimiter#when-not-to-use",[524],"Don't use RateLimiter when: No external rate limits existAll operations are equally cheapBackpressure isn't neededDifferent error handling is needed (use CircuitBreaker)You need per-user limits (create multiple limiters)",{"id":3703,"title":2888,"titles":3704,"content":3705,"level":19},"/v1.0.7/reference/connectors/ratelimiter#error-messages",[524],"RateLimiter provides clear error information: var (\n    APIID       = pipz.NewIdentity(\"api\", \"API rate limiter at 10/sec\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"the rate-limited processor\")\n)\n\nlimiter := pipz.NewRateLimiter(APIID, 10, 1, pipz.Apply(ProcessorID, processRequest))\nlimiter.SetMode(\"drop\")\n\n_, err := limiter.Process(ctx, request)\nif err != nil {\n    var pipeErr *pipz.Error[Request]\n    if errors.As(err, &pipeErr) {\n        // Error path shows where rate limiting occurred\n        // Example: \"api failed after 0s: rate limit exceeded\"\n        fmt.Printf(\"Rate limited at: %v\\n\", pipeErr.Path)\n    }\n}",{"id":3707,"title":2181,"titles":3708,"content":3709,"level":19},"/v1.0.7/reference/connectors/ratelimiter#common-patterns",[524],"// External API protection - rate limiter wraps the timeout chain\nvar (\n    RateLimitedClientID = pipz.NewIdentity(\"rate-limited-client\", \"HTTP client with rate limiting and timeout\")\n    APIRateID           = pipz.NewIdentity(\"api-rate\", \"limit API requests to 100/sec\")\n    RequestTimeoutID    = pipz.NewIdentity(\"request-timeout\", \"30 second timeout for HTTP requests\")\n    HTTPRequestID       = pipz.NewIdentity(\"http-request\", \"make HTTP request\")\n)\n\nhttpClient := pipz.NewRateLimiter(APIRateID, 100, 20,\n    pipz.NewTimeout(RequestTimeoutID,\n        pipz.Apply(HTTPRequestID, makeHTTPRequest),\n        30*time.Second,\n    ),\n)\n\n// Database connection throttling - rate limiter wraps retry chain\nvar (\n    DBRateID       = pipz.NewIdentity(\"db-rate\", \"protect database with 1000/sec rate limit\")\n    DBRetryID      = pipz.NewIdentity(\"db-retry\", \"retry failed database queries\")\n    ExecuteQueryID = pipz.NewIdentity(\"execute-query\", \"execute database query\")\n)\n\ndbOperations := pipz.NewRateLimiter(DBRateID, 1000, 50,\n    pipz.NewRetry(DBRetryID,\n        pipz.Apply(ExecuteQueryID, runQuery),\n        3,\n    ),\n)\n\n// Burst vs sustained rate\nvar (\n    EmailRateID  = pipz.NewIdentity(\"email-rate\", \"email sending with 10/sec sustained, 100 burst capacity\")\n    SendEmailID  = pipz.NewIdentity(\"send-email\", \"send email via SMTP\")\n)\n\nemailSender := pipz.NewRateLimiter(EmailRateID,\n    10,    // 10 emails per second sustained\n    100,   // But allow burst of 100 emails\n    pipz.Apply(SendEmailID, sendEmail),\n)\n\n// Dynamic rate limiting\nvar (\n    DynamicID     = pipz.NewIdentity(\"dynamic\", \"rate limiter with dynamic adjustment based on load\")\n    ProcessorID   = pipz.NewIdentity(\"processor\", \"the rate-limited processor\")\n)\n\ndynamicLimiter := pipz.NewRateLimiter(DynamicID, 100, 10,\n    pipz.Apply(ProcessorID, processRequest),\n)\n\n// Adjust rate based on time of day, load, etc.\ngo func() {\n    for {\n        if isOffPeakHours() {\n            dynamicLimiter.SetRate(1000)  // Higher rate during off-peak\n        } else {\n            dynamicLimiter.SetRate(100)   // Lower rate during peak\n        }\n        time.Sleep(5 * time.Minute)\n    }\n}()\n\n// Graceful degradation - each fallback option has its own rate limiter\nvar (\n    GracefulAPIID    = pipz.NewIdentity(\"graceful-api\", \"graceful degradation from primary to fallback API\")\n    PrimaryRateID    = pipz.NewIdentity(\"primary-rate\", \"primary API rate limit: 1000/sec\")\n    FallbackRateID   = pipz.NewIdentity(\"fallback-rate\", \"fallback API rate limit: 100/sec\")\n    PrimaryCallID    = pipz.NewIdentity(\"primary-call\", \"call primary API\")\n    FallbackCallID   = pipz.NewIdentity(\"fallback-call\", \"call fallback API\")\n)\n\ngracefulAPI := pipz.NewFallback(GracefulAPIID,\n    pipz.NewRateLimiter(PrimaryRateID, 1000, 100,\n        pipz.Apply(PrimaryCallID, callPrimaryAPI)),\n    pipz.NewRateLimiter(FallbackRateID, 100, 10,\n        pipz.Apply(FallbackCallID, callFallbackAPI)),\n)",{"id":3711,"title":2199,"titles":3712,"content":29,"level":19},"/v1.0.7/reference/connectors/ratelimiter#gotchas",[524],{"id":3714,"title":3715,"titles":3716,"content":3717,"level":35},"/v1.0.7/reference/connectors/ratelimiter#dont-create-rate-limiters-per-request","❌ Don't create rate limiters per request",[524,2199],"// WRONG - New limiter each time, no rate limiting!\nvar (\n    APIID       = pipz.NewIdentity(\"api\", \"API rate limiter\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"process request\")\n)\n\nfunc handleRequest(req Request) Response {\n    processor := pipz.Apply(ProcessorID, processRequest)\n    limiter := pipz.NewRateLimiter(APIID, 100, 10, processor) // New instance!\n    return limiter.Process(ctx, req) // Useless!\n}",{"id":3719,"title":2905,"titles":3720,"content":3721,"level":35},"/v1.0.7/reference/connectors/ratelimiter#create-once-reuse",[524,2199],"// RIGHT - Shared limiter for all requests\nvar (\n    APIID       = pipz.NewIdentity(\"api\", \"shared API rate limiter at 100/sec\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"process request\")\n    apiLimiter  = pipz.NewRateLimiter(APIID, 100, 10,\n        pipz.Apply(ProcessorID, processRequest))\n)\n\nfunc handleRequest(req Request) Response {\n    return apiLimiter.Process(ctx, req)\n}",{"id":3723,"title":3724,"titles":3725,"content":3726,"level":35},"/v1.0.7/reference/connectors/ratelimiter#dont-ignore-burst-capacity","❌ Don't ignore burst capacity",[524,2199],"// WRONG - Burst of 1 causes unnecessary blocking\nvar (\n    StrictID    = pipz.NewIdentity(\"strict\", \"strict rate limiter with minimal burst\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"process request\")\n)\n\nlimiter := pipz.NewRateLimiter(StrictID, 100, 1, // Burst of 1!\n    pipz.Apply(ProcessorID, processRequest))\n// Can't handle any burst traffic",{"id":3728,"title":3729,"titles":3730,"content":3731,"level":35},"/v1.0.7/reference/connectors/ratelimiter#set-reasonable-burst-capacity","✅ Set reasonable burst capacity",[524,2199],"// RIGHT - Allow some burst\nvar (\n    FlexibleID  = pipz.NewIdentity(\"flexible\", \"flexible rate limiter with 20% burst capacity\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"process request\")\n)\n\nlimiter := pipz.NewRateLimiter(FlexibleID, 100, 20, // 20% burst\n    pipz.Apply(ProcessorID, processRequest))\n// Can handle traffic spikes",{"id":3733,"title":3734,"titles":3735,"content":3736,"level":35},"/v1.0.7/reference/connectors/ratelimiter#dont-use-wait-mode-for-user-facing-apis","❌ Don't use wait mode for user-facing APIs",[524,2199],"// WRONG - Users wait indefinitely\nvar (\n    UserAPIID   = pipz.NewIdentity(\"user-api\", \"user-facing API rate limiter\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"process request\")\n)\n\napiHandler := pipz.NewRateLimiter(UserAPIID, 10, 1,\n    pipz.Apply(ProcessorID, processRequest))\n// Default is \"wait\" mode - users stuck waiting!",{"id":3738,"title":3739,"titles":3740,"content":3741,"level":35},"/v1.0.7/reference/connectors/ratelimiter#use-drop-mode-for-user-facing-services","✅ Use drop mode for user-facing services",[524,2199],"// RIGHT - Fail fast for users\nvar (\n    UserAPIID   = pipz.NewIdentity(\"user-api\", \"user-facing API with fail-fast rate limiting\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"process request\")\n)\n\napiHandler := pipz.NewRateLimiter(UserAPIID, 100, 10,\n    pipz.Apply(ProcessorID, processRequest))\napiHandler.SetMode(\"drop\") // Return 429 immediately",{"id":3743,"title":3744,"titles":3745,"content":3746,"level":35},"/v1.0.7/reference/connectors/ratelimiter#dont-forget-rate-limits-are-per-instance","❌ Don't forget rate limits are per instance",[524,2199],"// WRONG - Each server has its own limit\n// If you have 10 servers, actual rate is 10x!\nvar (\n    APIID       = pipz.NewIdentity(\"api\", \"API rate limiter\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"process request\")\n)\n\nlimiter := pipz.NewRateLimiter(APIID, 100, 10,\n    pipz.Apply(ProcessorID, processRequest))",{"id":3748,"title":3749,"titles":3750,"content":3751,"level":35},"/v1.0.7/reference/connectors/ratelimiter#adjust-for-number-of-instances","✅ Adjust for number of instances",[524,2199],"// RIGHT - Divide by instance count\nvar (\n    APIID       = pipz.NewIdentity(\"api\", \"distributed API rate limiter adjusted for instance count\")\n    ProcessorID = pipz.NewIdentity(\"processor\", \"process request\")\n)\n\ninstanceCount := getInstanceCount()\nlimiter := pipz.NewRateLimiter(APIID, 100/float64(instanceCount), 10, // Distributed rate\n    pipz.Apply(ProcessorID, processRequest))",{"id":3753,"title":111,"titles":3754,"content":3755,"level":19},"/v1.0.7/reference/connectors/ratelimiter#advanced-patterns",[524],"// Multi-tier rate limiting - nested wrappers provide layered protection\nvar (\n    GlobalID       = pipz.NewIdentity(\"global\", \"global rate limit: 10000/sec\")\n    PerServiceID   = pipz.NewIdentity(\"per-service\", \"per-service rate limit: 1000/sec\")\n    PerEndpointID  = pipz.NewIdentity(\"per-endpoint\", \"per-endpoint rate limit: 100/sec\")\n    ProcessorID    = pipz.NewIdentity(\"processor\", \"the rate-limited processor\")\n)\n\nmultiTierLimiter := pipz.NewRateLimiter(GlobalID, 10000, 1000,\n    pipz.NewRateLimiter(PerServiceID, 1000, 100,\n        pipz.NewRateLimiter(PerEndpointID, 100, 10,\n            pipz.Apply(ProcessorID, processRequest))))\n\n// Rate limiting with circuit breaker - rate limiter wraps the resilience chain\nvar (\n    RateLimitID   = pipz.NewIdentity(\"rate-limit\", \"rate limit before circuit breaker\")\n    CircuitID     = pipz.NewIdentity(\"circuit\", \"circuit breaker for API calls\")\n    RetryID       = pipz.NewIdentity(\"retry\", \"retry failed API calls\")\n    APICallID     = pipz.NewIdentity(\"api-call\", \"the actual API call\")\n)\n\nresilientAPI := pipz.NewRateLimiter(RateLimitID, 100, 10,\n    pipz.NewCircuitBreaker(CircuitID,\n        pipz.NewRetry(RetryID,\n            pipz.Apply(APICallID, callAPI),\n            3),\n        5, 30*time.Second))\n\n// Custom rate limit error handling\nvar (\n    SmartLimiterID      = pipz.NewIdentity(\"smart-rate-limiter\", \"rate limiter with custom error handling\")\n    LimiterID           = pipz.NewIdentity(\"limiter\", \"base rate limiter at 100/sec\")\n    ProcessID           = pipz.NewIdentity(\"process\", \"process the request\")\n    RateErrorHandlerID  = pipz.NewIdentity(\"rate-error-handler\", \"route based on error type\")\n    LogRateLimitID      = pipz.NewIdentity(\"log-rate-limit\", \"log rate limit hit\")\n    LogOtherID          = pipz.NewIdentity(\"log-other\", \"log other errors\")\n)\n\nsmartLimiter := pipz.NewHandle(SmartLimiterID,\n    pipz.NewRateLimiter(LimiterID, 100, 10,\n        pipz.Apply(ProcessID, processRequest)),\n    pipz.NewSwitch(RateErrorHandlerID,\n        func(ctx context.Context, err *pipz.Error[Request]) string {\n            if strings.Contains(err.Err.Error(), \"rate limit exceeded\") {\n                return \"rate-limited\"\n            }\n            return \"other\"\n        },\n    ).\n    AddRoute(\"rate-limited\",\n        pipz.Effect(LogRateLimitID, logRateLimitHit),\n    ).\n    AddRoute(\"other\",\n        pipz.Effect(LogOtherID, logOtherError),\n    ),\n)\n\n// Adaptive rate limiting\ntype AdaptiveRateLimiter struct {\n    limiter     *pipz.RateLimiter[Request]\n    successRate float64\n    mu          sync.Mutex\n}\n\nfunc (a *AdaptiveRateLimiter) Process(ctx context.Context, req Request) (Request, error) {\n    result, err := a.limiter.Process(ctx, req)\n\n    a.mu.Lock()\n    if err == nil {\n        a.successRate = a.successRate*0.9 + 0.1  // Increase success rate\n        if a.successRate > 0.95 {\n            // Increase rate if success rate is high\n            currentRate := a.limiter.GetRate()\n            a.limiter.SetRate(currentRate * 1.1)\n        }\n    } else {\n        a.successRate = a.successRate * 0.9       // Decrease success rate\n        if a.successRate \u003C 0.8 {\n            // Decrease rate if success rate is low\n            currentRate := a.limiter.GetRate()\n            a.limiter.SetRate(currentRate * 0.9)\n        }\n    }\n    a.mu.Unlock()\n\n    return result, err\n}",{"id":3757,"title":2764,"titles":3758,"content":3759,"level":19},"/v1.0.7/reference/connectors/ratelimiter#performance-characteristics",[524],"Low overhead - ~1μs per operation when not rate limitedWait mode - Blocks until token available (can add latency)Drop mode - Fast failure, no waitingMemory usage - Minimal, constant regardless of rateThread safety - Fully concurrent, no contention",{"id":3761,"title":1764,"titles":3762,"content":3763,"level":19},"/v1.0.7/reference/connectors/ratelimiter#see-also",[524],"CircuitBreaker - For handling service failuresTimeout - Often combined with rate limitingRetry - For handling rate limit errorsSwitch - For conditional rate limitingSequence - For building rate-limited pipelines html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}",{"id":3765,"title":534,"titles":3766,"content":3767,"level":9},"/v1.0.7/reference/connectors/retry",[],"Retries a processor up to a specified number of attempts for handling transient failures",{"id":3769,"title":534,"titles":3770,"content":3771,"level":9},"/v1.0.7/reference/connectors/retry#retry",[],"Retries a processor up to a specified number of attempts, with optional exponential backoff.",{"id":3773,"title":2822,"titles":3774,"content":3775,"level":19},"/v1.0.7/reference/connectors/retry#function-signatures",[534],"// Simple retry without delays\nfunc NewRetry[T any](identity Identity, processor Chainable[T], maxAttempts int) *Retry[T]\n\n// Retry with exponential backoff\nfunc NewBackoff[T any](identity Identity, processor Chainable[T], maxAttempts int, baseDelay time.Duration) *Retry[T]",{"id":3777,"title":2268,"titles":3778,"content":3779,"level":19},"/v1.0.7/reference/connectors/retry#parameters",[534],"identity (Identity) - Identity containing name and description for debugging and documentationprocessor - The processor to retry on failuremaxAttempts - Maximum number of attempts (minimum 1)baseDelay - (Backoff only) Initial delay between attempts",{"id":3781,"title":2273,"titles":3782,"content":3783,"level":19},"/v1.0.7/reference/connectors/retry#returns",[534],"Returns a *Retry[T] that implements Chainable[T].",{"id":3785,"title":2835,"titles":3786,"content":29,"level":19},"/v1.0.7/reference/connectors/retry#testing-configuration",[534],{"id":3788,"title":2839,"titles":3789,"content":3790,"level":35},"/v1.0.7/reference/connectors/retry#withclock",[534,2835],"func (b *Backoff[T]) WithClock(clock clockz.Clock) *Backoff[T] Sets a custom clock implementation for testing purposes. This method enables controlled time manipulation in tests using clockz.FakeClock. Available only on Backoff (created with NewBackoff), not on simple Retry. Parameters: clock (clockz.Clock) - Clock implementation to use Returns:\nReturns the same connector instance for method chaining. Example: // Define identity\nvar TestBackoffID = pipz.NewIdentity(\"test\", \"test backoff with fake clock\")\n\n// Use fake clock in tests\nfakeClock := clockz.NewFakeClock()\nbackoff := pipz.NewBackoff(\n    TestBackoffID,\n    processor, 3, 100*time.Millisecond,\n).WithClock(fakeClock)\n\n// Advance time in test to trigger delays\nfakeClock.Advance(200 * time.Millisecond)",{"id":3792,"title":2278,"titles":3793,"content":29,"level":19},"/v1.0.7/reference/connectors/retry#behavior",[534],{"id":3795,"title":3796,"titles":3797,"content":3798,"level":35},"/v1.0.7/reference/connectors/retry#newretry","NewRetry",[534,2278],"Immediate retry - No delay between attemptsStops on success - Returns immediately when processor succeedsContext check - Checks for cancellation between attemptsError includes attempts - Final error shows retry count",{"id":3800,"title":3801,"titles":3802,"content":3803,"level":35},"/v1.0.7/reference/connectors/retry#newbackoff","NewBackoff",[534,2278],"Exponential delays - Delay doubles after each failurePattern - baseDelay, 2×baseDelay, 4×baseDelay, etc.No final delay - No delay after the last attemptJittered delays - Small randomization to prevent thundering herd",{"id":3805,"title":2283,"titles":3806,"content":3807,"level":19},"/v1.0.7/reference/connectors/retry#example",[534],"// Define identities\nvar (\n    APIRetryID      = pipz.NewIdentity(\"api-retry\", \"retry flaky API calls up to 3 times\")\n    APICallID       = pipz.NewIdentity(\"api-call\", \"call external API\")\n    ServiceRetryID  = pipz.NewIdentity(\"service-retry\", \"retry external service with exponential backoff\")\n    ExternalSvcID   = pipz.NewIdentity(\"external-service\", \"call external service\")\n    SaveRetryID     = pipz.NewIdentity(\"save-retry\", \"retry order save operation\")\n    SaveFlowID      = pipz.NewIdentity(\"save-flow\", \"validate, calculate, and persist order\")\n    ValidateID      = pipz.NewIdentity(\"validate\", \"validate order data\")\n    CalculateID     = pipz.NewIdentity(\"calculate\", \"calculate order totals\")\n    PersistID       = pipz.NewIdentity(\"persist\", \"save order to database\")\n    GraduatedID     = pipz.NewIdentity(\"graduated-retry\", \"graduated retry: quick attempts then slow backoff\")\n    QuickRetryID    = pipz.NewIdentity(\"quick-retry\", \"2 quick retry attempts without delay\")\n    SlowRetryID     = pipz.NewIdentity(\"slow-retry\", \"3 slower retry attempts with backoff\")\n)\n\n// Simple retry\nreliableAPI := pipz.NewRetry(\n    APIRetryID,\n    pipz.Apply(APICallID, callFlakyAPI),\n    3, // Try up to 3 times\n)\n\n// Retry with backoff\nresilientService := pipz.NewBackoff(\n    ServiceRetryID,\n    pipz.Apply(ExternalSvcID, callExternalService),\n    5,                        // Max 5 attempts\n    100*time.Millisecond,     // 100ms, 200ms, 400ms, 800ms delays\n)\n\n// Retry a complex operation\nsaveWithRetry := pipz.NewRetry(\n    SaveRetryID,\n    pipz.NewSequence(\n        SaveFlowID,\n        pipz.Apply(ValidateID, validateOrder),\n        pipz.Apply(CalculateID, calculateTotals),\n        pipz.Apply(PersistID, saveToDatabase),\n    ),\n    3,\n)\n\n// Graduated retry strategy\nsmartRetry := pipz.NewFallback(\n    GraduatedID,\n    pipz.NewRetry(QuickRetryID, processor, 2),\n    pipz.NewBackoff(SlowRetryID, processor, 3, time.Second),\n)",{"id":3809,"title":2292,"titles":3810,"content":3811,"level":19},"/v1.0.7/reference/connectors/retry#when-to-use",[534],"Use Retry when: Dealing with transient failures (network blips, temporary unavailability)Network operations that may timeoutExternal services with occasional failuresDatabase deadlocks or conflictsRate limit errors (with backoff)Operations are idempotent (safe to repeat) Use Backoff specifically when: You need to respect rate limitsAvoiding thundering herd problemsExternal service needs recovery timeExponential backoff is required by APILoad shedding is important",{"id":3813,"title":2297,"titles":3814,"content":3815,"level":19},"/v1.0.7/reference/connectors/retry#when-not-to-use",[534],"Don't use Retry when: Errors are permanent (validation failures, business logic errors)Operations are not idempotent (payments, incrementing counters)Fast failure is preferred (user-facing APIs)Different approach needed on failure (use Fallback)Error indicates a bug (null pointer, index out of bounds)",{"id":3817,"title":2199,"titles":3818,"content":29,"level":19},"/v1.0.7/reference/connectors/retry#gotchas",[534],{"id":3820,"title":3821,"titles":3822,"content":3823,"level":35},"/v1.0.7/reference/connectors/retry#dont-retry-non-idempotent-operations","❌ Don't retry non-idempotent operations",[534,2199],"// Define identities\nvar (\n    ChargeRetryID = pipz.NewIdentity(\"charge\", \"retry payment charge\")\n    PaymentID     = pipz.NewIdentity(\"payment\", \"charge card\")\n)\n\n// WRONG - Each retry charges the card again!\nretry := pipz.NewRetry(\n    ChargeRetryID,\n    pipz.Apply(PaymentID, chargeCard),\n    3,\n)",{"id":3825,"title":3826,"titles":3827,"content":3828,"level":35},"/v1.0.7/reference/connectors/retry#make-operations-idempotent-first","✅ Make operations idempotent first",[534,2199],"// Define identities\nvar (\n    IdempotentChargeID = pipz.NewIdentity(\"charge\", \"retry idempotent payment charge\")\n    IdempotentPayID    = pipz.NewIdentity(\"payment\", \"charge card with idempotency key\")\n)\n\n// RIGHT - Use idempotency key\nretry := pipz.NewRetry(\n    IdempotentChargeID,\n    pipz.Apply(IdempotentPayID, func(ctx context.Context, payment Payment) (Payment, error) {\n        payment.IdempotencyKey = generateIdempotencyKey(payment)\n        return chargeCardIdempotent(ctx, payment)\n    }),\n    3,\n)",{"id":3830,"title":3831,"titles":3832,"content":3833,"level":35},"/v1.0.7/reference/connectors/retry#dont-retry-validation-errors","❌ Don't retry validation errors",[534,2199],"// Define identities\nvar (\n    ValidateRetryID = pipz.NewIdentity(\"validate\", \"retry email validation\")\n    CheckEmailID    = pipz.NewIdentity(\"check\", \"check email format\")\n)\n\n// WRONG - Will never succeed\nretry := pipz.NewRetry(\n    ValidateRetryID,\n    pipz.Apply(CheckEmailID, func(ctx context.Context, email string) (string, error) {\n        if !strings.Contains(email, \"@\") {\n            return \"\", errors.New(\"invalid email\") // Permanent error!\n        }\n        return email, nil\n    }),\n    5, // Wastes 5 attempts\n)",{"id":3835,"title":3836,"titles":3837,"content":3838,"level":35},"/v1.0.7/reference/connectors/retry#only-retry-transient-errors","✅ Only retry transient errors",[534,2199],"// Define identities\nvar (\n    SmartRetryID = pipz.NewIdentity(\"smart\", \"smart retry that distinguishes transient from permanent errors\")\n    APICheckID   = pipz.NewIdentity(\"api\", \"call API with error type checking\")\n)\n\n// RIGHT - Check error type\nretry := pipz.NewRetry(\n    SmartRetryID,\n    pipz.Apply(APICheckID, func(ctx context.Context, req Request) (Response, error) {\n        resp, err := callAPI(ctx, req)\n        if err != nil {\n            if isPermanentError(err) {\n                return resp, fmt.Errorf(\"permanent: %w\", err) // Mark as permanent\n            }\n            return resp, err // Transient, will retry\n        }\n        return resp, nil\n    }),\n    3,\n)",{"id":3840,"title":2888,"titles":3841,"content":3842,"level":19},"/v1.0.7/reference/connectors/retry#error-messages",[534],"Retry enriches errors with attempt information: // Define identity\nvar APIRetryID = pipz.NewIdentity(\"api\", \"retry flaky API processor\")\n\nretry := pipz.NewRetry(APIRetryID, flakyProcessor, 3)\n_, err := retry.Process(ctx, input)\nif err != nil {\n    // Error message includes retry information\n    // Example: \"api failed after 3 attempts: connection timeout\"\n}",{"id":3844,"title":2181,"titles":3845,"content":3846,"level":19},"/v1.0.7/reference/connectors/retry#common-patterns",[534],"// Define identities\nvar (\n    HTTPClientID    = pipz.NewIdentity(\"http-client\", \"HTTP client with exponential backoff retry\")\n    RequestID       = pipz.NewIdentity(\"request\", \"make HTTP request\")\n    DBOpID          = pipz.NewIdentity(\"db-op\", \"database operation with quick retry for deadlocks\")\n    QueryID         = pipz.NewIdentity(\"query\", \"run database query\")\n    CascadingID     = pipz.NewIdentity(\"cascading\", \"cascading retry strategy with validation and progressive delays\")\n    ValidateDataID  = pipz.NewIdentity(\"validate\", \"validate data\")\n    QuickOpID       = pipz.NewIdentity(\"quick\", \"quick operation with immediate retry\")\n    SlowOpID        = pipz.NewIdentity(\"slow\", \"slow operation with exponential backoff\")\n    CircuitID       = pipz.NewIdentity(\"circuit\", \"circuit breaker with retry protection\")\n    CheckCircuitID  = pipz.NewIdentity(\"check-circuit\", \"check if circuit is open\")\n    ProtectedCallID = pipz.NewIdentity(\"protected-call\", \"retry protected operation\")\n)\n\n// Network operations with backoff\nhttpClient := pipz.NewBackoff(\n    HTTPClientID,\n    pipz.Apply(RequestID, makeHTTPRequest),\n    5,\n    500*time.Millisecond, // 0.5s, 1s, 2s, 4s\n)\n\n// Database operations with quick retry\ndbOperation := pipz.NewRetry(\n    DBOpID,\n    pipz.Apply(QueryID, runDatabaseQuery),\n    3, // Handle transient deadlocks\n)\n\n// Cascading retry strategy\ncascadingRetry := pipz.NewSequence(\n    CascadingID,\n    pipz.Apply(ValidateDataID, validate),\n    pipz.NewRetry(QuickOpID, quickOperation, 2),\n    pipz.NewBackoff(SlowOpID, slowOperation, 5, time.Second),\n)\n\n// Retry with circuit breaker pattern\ntype CircuitBreaker struct {\n    failures int\n    mu       sync.Mutex\n}\n\ncircuitBreaker := pipz.NewSequence(\n    CircuitID,\n    pipz.Apply(CheckCircuitID, func(ctx context.Context, req Request) (Request, error) {\n        cb.mu.Lock()\n        defer cb.mu.Unlock()\n        if cb.failures > 10 {\n            return req, errors.New(\"circuit open\")\n        }\n        return req, nil\n    }),\n    pipz.NewRetry(ProtectedCallID, protectedOperation, 3),\n)",{"id":3848,"title":111,"titles":3849,"content":3850,"level":19},"/v1.0.7/reference/connectors/retry#advanced-patterns",[534],"// Define identities\nvar (\n    CustomBackoffID   = pipz.NewIdentity(\"custom\", \"custom backoff with rate limit awareness\")\n    OperationID       = pipz.NewIdentity(\"operation\", \"operation with rate limit handling\")\n    IntelligentID     = pipz.NewIdentity(\"intelligent\", \"intelligent retry with error-specific strategies\")\n    ErrorRouterID     = pipz.NewIdentity(\"error-router\", \"route to retry strategy based on error type\")\n    TimeoutRetryID    = pipz.NewIdentity(\"timeout-retry\", \"retry timeout errors aggressively\")\n    RateLimitRetryID  = pipz.NewIdentity(\"rate-retry\", \"retry rate limit errors with long backoff\")\n    GeneralRetryID    = pipz.NewIdentity(\"general-retry\", \"retry other errors conservatively\")\n)\n\n// Custom backoff strategy\ncustomBackoff := pipz.NewBackoff(\n    CustomBackoffID,\n    pipz.Apply(OperationID, func(ctx context.Context, data Data) (Data, error) {\n        // Check for specific error types\n        result, err := operation(ctx, data)\n        if err != nil {\n            var rateLimitErr *RateLimitError\n            if errors.As(err, &rateLimitErr) {\n                // Wait for rate limit reset\n                select {\n                case \u003C-time.After(rateLimitErr.ResetAfter):\n                case \u003C-ctx.Done():\n                    return data, ctx.Err()\n                }\n            }\n        }\n        return result, err\n    }),\n    3,\n    time.Second,\n)\n\n// Retry with different strategies per error\nintelligentRetry := pipz.NewHandle(\n    IntelligentID,\n    processor,\n    pipz.NewSwitch(\n        ErrorRouterID,\n        func(ctx context.Context, err *pipz.Error[Data]) string {\n            if err.Timeout {\n                return \"timeout\"\n            }\n            if strings.Contains(err.Err.Error(), \"rate limit\") {\n                return \"rate-limit\"\n            }\n            return \"other\"\n        },\n    ).\n    AddRoute(\"timeout\", pipz.NewRetry(TimeoutRetryID, processor, 5)).\n    AddRoute(\"rate-limit\", pipz.NewBackoff(RateLimitRetryID, processor, 3, 30*time.Second)).\n    AddRoute(\"other\", pipz.NewRetry(GeneralRetryID, processor, 2)),\n)",{"id":3852,"title":1764,"titles":3853,"content":3854,"level":19},"/v1.0.7/reference/connectors/retry#see-also",[534],"Fallback - For trying different processorsTimeout - Often combined with retryHandle - For custom retry logicRace - For parallel attempts html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}",{"id":3856,"title":3857,"titles":3858,"content":3859,"level":9},"/v1.0.7/reference/connectors/scaffold","Scaffold",[],"Fire-and-forget parallel execution with context isolation for background operations",{"id":3861,"title":3857,"titles":3862,"content":3863,"level":9},"/v1.0.7/reference/connectors/scaffold#scaffold",[],"Fire-and-forget parallel execution with context isolation.",{"id":3865,"title":2263,"titles":3866,"content":3867,"level":19},"/v1.0.7/reference/connectors/scaffold#function-signature",[3857],"func NewScaffold[T Cloner[T]](identity Identity, processors ...Chainable[T]) *Scaffold[T]",{"id":3869,"title":2043,"titles":3870,"content":2951,"level":19},"/v1.0.7/reference/connectors/scaffold#type-constraints",[3857],{"id":3872,"title":2268,"titles":3873,"content":3874,"level":19},"/v1.0.7/reference/connectors/scaffold#parameters",[3857],"identity (Identity) - Identifier for the connector used in debuggingprocessors - Variable number of processors to run asynchronously",{"id":3876,"title":2273,"titles":3877,"content":3878,"level":19},"/v1.0.7/reference/connectors/scaffold#returns",[3857],"Returns a *Scaffold[T] that implements Chainable[T].",{"id":3880,"title":2278,"titles":3881,"content":3882,"level":19},"/v1.0.7/reference/connectors/scaffold#behavior",[3857],"Fire-and-forget - Returns immediately without waitingContext isolation - Uses context.WithoutCancel() to prevent parent cancellationData isolation - Each processor receives a clone of the inputNo error reporting - Individual failures are not reported backReturns original - Always returns the original input data immediatelyTrace preservation - Preserves trace IDs and context values while removing cancellationBackground execution - Processors continue even after parent context cancellation",{"id":3884,"title":2283,"titles":3885,"content":3886,"level":19},"/v1.0.7/reference/connectors/scaffold#example",[3857],"// Define a type that implements Cloner\ntype AuditEvent struct {\n    UserID    string\n    Action    string\n    Timestamp time.Time\n    Metadata  map[string]string\n}\n\nfunc (a AuditEvent) Clone() AuditEvent {\n    metadata := make(map[string]string, len(a.Metadata))\n    for k, v := range a.Metadata {\n        metadata[k] = v\n    }\n    return AuditEvent{\n        UserID:    a.UserID,\n        Action:    a.Action,\n        Timestamp: a.Timestamp,\n        Metadata:  metadata,\n    }\n}\n\n// Define identities upfront\nvar (\n    AsyncOpsID   = pipz.NewIdentity(\"async-operations\", \"Background operations for event processing\")\n    AuditLogID   = pipz.NewIdentity(\"audit-log\", \"Writes to audit log\")\n    AnalyticsID  = pipz.NewIdentity(\"analytics\", \"Sends to analytics\")\n    CacheWarmID  = pipz.NewIdentity(\"cache-warm\", \"Warms secondary cache\")\n    MetricsID    = pipz.NewIdentity(\"metrics\", \"Updates metrics\")\n    UserActionID = pipz.NewIdentity(\"user-action\", \"User action pipeline\")\n    ValidateID   = pipz.NewIdentity(\"validate\", \"Validates action\")\n    AuthorizeID  = pipz.NewIdentity(\"authorize\", \"Checks permissions\")\n    ExecuteID    = pipz.NewIdentity(\"execute\", \"Performs action\")\n)\n\n// Create scaffold for background operations\nbackground := pipz.NewScaffold(AsyncOpsID,\n    pipz.Effect(AuditLogID, writeToAuditLog),      // 500ms operation\n    pipz.Effect(AnalyticsID, sendToAnalytics),      // 300ms operation\n    pipz.Effect(CacheWarmID, warmSecondaryCache),  // 200ms operation\n    pipz.Effect(MetricsID, updateMetrics),          // 100ms operation\n)\n\n// Use in a pipeline - returns immediately\npipeline := pipz.NewSequence[AuditEvent](UserActionID,\n    pipz.Apply(ValidateID, validateAction),    // Must complete\n    pipz.Apply(AuthorizeID, checkPermissions), // Must complete\n    pipz.Apply(ExecuteID, performAction),      // Must complete\n    background,                                // Returns immediately\n)\n\n// Process returns after ~10ms (validation + auth + execute)\n// Background tasks continue running for up to 500ms\nresult, err := pipeline.Process(ctx, event)",{"id":3888,"title":2292,"titles":3889,"content":3890,"level":19},"/v1.0.7/reference/connectors/scaffold#when-to-use",[3857],"Use Scaffold when: Operations must complete regardless of request lifecycleYou need true fire-and-forget semanticsBackground tasks shouldn't block the main flowAudit logging or metrics collection that must always happenCache warming or cleanup tasksNon-critical notifications (email, SMS)Analytics and telemetry that shouldn't affect performance",{"id":3892,"title":2297,"titles":3893,"content":3894,"level":19},"/v1.0.7/reference/connectors/scaffold#when-not-to-use",[3857],"Don't use Scaffold when: You need results from the processors (use Concurrent)Errors must be handled or logged (no error reporting)Operations should be cancelled with the request (use Concurrent)You need to wait for completion (use Concurrent)Critical business logic is involved (use synchronous processors)You need confirmation of success (no feedback mechanism)",{"id":3896,"title":3897,"titles":3898,"content":3899,"level":19},"/v1.0.7/reference/connectors/scaffold#scaffold-vs-concurrent","Scaffold vs Concurrent",[3857],"FeatureScaffoldConcurrentReturnsImmediatelyAfter all completeContext CancellationIgnored (continues)Respected (stops)Error ReportingNoNo (but waits)Use CaseBackground tasksParallel operationsTrace ContextPreservedPreserved",{"id":3901,"title":3679,"titles":3902,"content":3903,"level":19},"/v1.0.7/reference/connectors/scaffold#context-handling",[3857],"Scaffold uses context.WithoutCancel() which: Preserves all context values (trace IDs, request IDs, etc.)Removes cancellation signalsAllows processors to outlive the parent request // Example with distributed tracing\nfunc handleRequest(ctx context.Context, req Request) {\n    // ctx contains trace ID: \"trace-123\"\n\n    scaffold := pipz.NewScaffold(\n        pipz.NewIdentity(\"background\", \"Background operation with trace context\"),\n        pipz.Effect(\n            pipz.NewIdentity(\"log\", \"Logs with trace ID\"),\n            func(ctx context.Context, _ Request) error {\n                // ctx still contains trace ID: \"trace-123\"\n                // But won't be cancelled when request ends\n                traceID := ctx.Value(\"trace-id\").(string)\n                log.Printf(\"[%s] Background operation\", traceID)\n                time.Sleep(5 * time.Second) // Continues even after request done\n                return nil\n            },\n        ),\n    )\n\n    // Returns immediately\n    scaffold.Process(ctx, req)\n}",{"id":3905,"title":445,"titles":3906,"content":3907,"level":19},"/v1.0.7/reference/connectors/scaffold#performance-considerations",[3857],"Creates one goroutine per processorRequires data cloning (allocation cost)No synchronization overhead (fire-and-forget)Goroutines are not tracked or managedMemory usage depends on processor lifetime",{"id":3909,"title":2181,"titles":3910,"content":3911,"level":19},"/v1.0.7/reference/connectors/scaffold#common-patterns",[3857],"// Define identities upfront\nvar (\n    AuditID          = pipz.NewIdentity(\"audit\", \"Audit logging to multiple destinations\")\n    PrimaryLogID     = pipz.NewIdentity(\"primary-log\", \"Writes to database\")\n    BackupLogID      = pipz.NewIdentity(\"backup-log\", \"Writes to S3\")\n    ComplianceID     = pipz.NewIdentity(\"compliance\", \"Sends to compliance system\")\n    MonitoringID     = pipz.NewIdentity(\"monitoring\", \"Updates monitoring systems\")\n    PrometheusID     = pipz.NewIdentity(\"prometheus\", \"Updates Prometheus metrics\")\n    DatadogID        = pipz.NewIdentity(\"datadog\", \"Sends to Datadog\")\n    CustomID         = pipz.NewIdentity(\"custom\", \"Updates custom dashboard\")\n    CacheOpsID       = pipz.NewIdentity(\"cache-ops\", \"Cache operations\")\n    RedisID          = pipz.NewIdentity(\"redis\", \"Warms Redis cache\")\n    CDNID            = pipz.NewIdentity(\"cdn\", \"Purges CDN cache\")\n    LocalID          = pipz.NewIdentity(\"local\", \"Updates local cache\")\n    OrderProcessingID = pipz.NewIdentity(\"order-processing\", \"Order processing pipeline\")\n    ValidateOrderID  = pipz.NewIdentity(\"validate\", \"Validates order\")\n    PaymentID        = pipz.NewIdentity(\"payment\", \"Processes payment\")\n    InventoryID      = pipz.NewIdentity(\"inventory\", \"Updates inventory\")\n    CompleteID       = pipz.NewIdentity(\"complete\", \"Marks order complete\")\n    PostOrderID      = pipz.NewIdentity(\"post-order\", \"Post-order notifications and analytics\")\n    EmailID          = pipz.NewIdentity(\"email\", \"Sends confirmation email\")\n    SMSID            = pipz.NewIdentity(\"sms\", \"Sends SMS notification\")\n    OrderAnalyticsID = pipz.NewIdentity(\"analytics\", \"Tracks order metrics\")\n    PartnerID        = pipz.NewIdentity(\"partner\", \"Notifies fulfillment partner\")\n)\n\n// Audit logging\nauditLog := pipz.NewScaffold(AuditID,\n    pipz.Effect(PrimaryLogID, writeToDatabase),\n    pipz.Effect(BackupLogID, writeToS3),\n    pipz.Effect(ComplianceID, sendToComplianceSystem),\n)\n\n// Metrics and monitoring\nmonitoring := pipz.NewScaffold(MonitoringID,\n    pipz.Effect(PrometheusID, updatePrometheusMetrics),\n    pipz.Effect(DatadogID, sendToDatadog),\n    pipz.Effect(CustomID, updateCustomDashboard),\n)\n\n// Cache warming\ncacheOps := pipz.NewScaffold(CacheOpsID,\n    pipz.Effect(RedisID, warmRedisCache),\n    pipz.Effect(CDNID, purgeCDNCache),\n    pipz.Effect(LocalID, updateLocalCache),\n)\n\n// Complete pipeline with synchronous and async parts\npipeline := pipz.NewSequence[Order](OrderProcessingID,\n    // Synchronous - must complete\n    pipz.Apply(ValidateOrderID, validateOrder),\n    pipz.Apply(PaymentID, processPayment),\n    pipz.Apply(InventoryID, updateInventory),\n\n    // Returns order immediately after this\n    pipz.Transform(CompleteID, markOrderComplete),\n\n    // Async - fire and forget\n    pipz.NewScaffold(PostOrderID,\n        pipz.Effect(EmailID, sendConfirmationEmail),\n        pipz.Effect(SMSID, sendSMSNotification),\n        pipz.Effect(OrderAnalyticsID, trackOrderMetrics),\n        pipz.Effect(PartnerID, notifyFulfillmentPartner),\n    ),\n)",{"id":3913,"title":2199,"titles":3914,"content":29,"level":19},"/v1.0.7/reference/connectors/scaffold#gotchas",[3857],{"id":3916,"title":3917,"titles":3918,"content":3919,"level":35},"/v1.0.7/reference/connectors/scaffold#dont-use-for-critical-operations","❌ Don't use for critical operations",[3857,2199],"// Define identities upfront\nvar (\n    CriticalID = pipz.NewIdentity(\"critical\", \"Critical payment processing\")\n    PaymentID  = pipz.NewIdentity(\"payment\", \"Processes payment\")\n)\n\n// WRONG - Payment must be confirmed!\nscaffold := pipz.NewScaffold(CriticalID,\n    pipz.Apply(PaymentID, processPayment), // No error feedback!\n)",{"id":3921,"title":3922,"titles":3923,"content":3924,"level":35},"/v1.0.7/reference/connectors/scaffold#use-synchronous-processing-for-critical-ops","✅ Use synchronous processing for critical ops",[3857,2199],"// Define identities upfront\nvar (\n    CriticalID = pipz.NewIdentity(\"critical\", \"Critical payment processing\")\n    PaymentID  = pipz.NewIdentity(\"payment\", \"Processes payment\")\n)\n\n// RIGHT - Wait for payment confirmation\nsequence := pipz.NewSequence(CriticalID,\n    pipz.Apply(PaymentID, processPayment),\n)",{"id":3926,"title":3927,"titles":3928,"content":3929,"level":35},"/v1.0.7/reference/connectors/scaffold#dont-forget-to-implement-clone-properly","❌ Don't forget to implement Clone properly",[3857,2199],"// WRONG - Shallow copy of slice\nfunc (d Data) Clone() Data {\n    return Data{Items: d.Items} // Shares slice memory!\n}",{"id":3931,"title":3932,"titles":3933,"content":3934,"level":35},"/v1.0.7/reference/connectors/scaffold#deep-copy-all-reference-types","✅ Deep copy all reference types",[3857,2199],"// RIGHT - Deep copy\nfunc (d Data) Clone() Data {\n    items := make([]Item, len(d.Items))\n    copy(items, d.Items)\n    return Data{Items: items}\n}",{"id":3936,"title":3937,"titles":3938,"content":3939,"level":35},"/v1.0.7/reference/connectors/scaffold#dont-use-when-you-need-error-handling","❌ Don't use when you need error handling",[3857,2199],"// Define identities upfront\nvar (\n    NoErrorsID = pipz.NewIdentity(\"no-errors\", \"No error handling\")\n    RiskyID    = pipz.NewIdentity(\"risky\", \"Risky operation\")\n)\n\n// WRONG - Can't handle or log errors\nscaffold := pipz.NewScaffold(NoErrorsID,\n    pipz.Apply(RiskyID, riskyOperation), // Errors vanish!\n)",{"id":3941,"title":3942,"titles":3943,"content":3944,"level":35},"/v1.0.7/reference/connectors/scaffold#use-concurrent-if-you-need-to-know-about-failures","✅ Use Concurrent if you need to know about failures",[3857,2199],"// Define identities upfront\nvar (\n    WithErrorsID = pipz.NewIdentity(\"with-errors\", \"With error handling\")\n    RiskyID      = pipz.NewIdentity(\"risky\", \"Risky operation\")\n)\n\n// RIGHT - Errors are reported (though not individually)\nconcurrent := pipz.NewConcurrent(WithErrorsID,\n    pipz.Apply(RiskyID, riskyOperation),\n)",{"id":3946,"title":3046,"titles":3947,"content":3948,"level":19},"/v1.0.7/reference/connectors/scaffold#implementation-requirements",[3857],"Your type must implement Clone() correctly for thread safety: // Struct with reference types needs deep copying\ntype Notification struct {\n    ID        string\n    Channels  []string\n    Data      map[string]interface{}\n    Template  *Template\n}\n\nfunc (n Notification) Clone() Notification {\n    // Deep copy slice\n    channels := make([]string, len(n.Channels))\n    copy(channels, n.Channels)\n    \n    // Deep copy map\n    data := make(map[string]interface{}, len(n.Data))\n    for k, v := range n.Data {\n        data[k] = v\n    }\n    \n    // Copy pointer if needed\n    var template *Template\n    if n.Template != nil {\n        t := *n.Template\n        template = &t\n    }\n    \n    return Notification{\n        ID:       n.ID,\n        Channels: channels,\n        Data:     data,\n        Template: template,\n    }\n}",{"id":3950,"title":1764,"titles":3951,"content":3952,"level":19},"/v1.0.7/reference/connectors/scaffold#see-also",[3857],"Concurrent - For parallel operations that wait for completionEffect - Common processor for fire-and-forget operationsHandle - For error monitoring without blocking html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":3954,"title":3955,"titles":3956,"content":3957,"level":9},"/v1.0.7/reference/connectors/sequence","Sequences",[],"Mutable, managed chains of processors with introspection and dynamic modification capabilities",{"id":3959,"title":3955,"titles":3960,"content":3961,"level":9},"/v1.0.7/reference/connectors/sequence#sequences",[],"Sequences are mutable, managed chains of processors that provide introspection, modification, and advanced control over processor execution.",{"id":3963,"title":3964,"titles":3965,"content":3966,"level":19},"/v1.0.7/reference/connectors/sequence#understanding-sequence","Understanding Sequence",[3955],"Sequence is the primary way to build sequential pipelines in pipz. Unlike simple processor composition, Sequence offers dynamic management: // Define identities\nvar (\n    OrderProcessingID = pipz.NewIdentity(\"order-processing\", \"sequential order processing pipeline\")\n)\n\n// Create a sequence\nseq := pipz.NewSequence(OrderProcessingID)\n\n// Register processors\nseq.Register(\n    validateOrder,\n    calculateTax,\n    applyDiscount,\n    saveOrder,\n)\n\n// Process data\nresult, err := seq.Process(ctx, order)",{"id":3968,"title":3969,"titles":3970,"content":3971,"level":19},"/v1.0.7/reference/connectors/sequence#creating-sequences","Creating Sequences",[3955],"// Define identities upfront\nvar (\n    UserRegistrationID = pipz.NewIdentity(\"user-registration\", \"user registration workflow\")\n    ValidateUserID     = pipz.NewIdentity(\"validate\", \"validate user data\")\n    EnrichUserID       = pipz.NewIdentity(\"enrich\", \"enrich user profile\")\n    SaveUserID         = pipz.NewIdentity(\"save\", \"save user to database\")\n    UserPipelineID     = pipz.NewIdentity(\"user-pipeline\", \"complete user processing pipeline\")\n)\n\n// Create an empty sequence with a descriptive name\nsequence := pipz.NewSequence(UserRegistrationID)\n\n// Register processors (they must already be created)\nvalidateUser := pipz.Effect(ValidateUserID, validateFunc)\nenrichUser := pipz.Apply(EnrichUserID, enrichFunc)\nsaveUser := pipz.Apply(SaveUserID, saveFunc)\n\nsequence.Register(validateUser, enrichUser, saveUser)\n\n// Or chain the calls\nsequence = pipz.NewSequence(UserPipelineID).Register(validateUser, enrichUser, saveUser)",{"id":3973,"title":3974,"titles":3975,"content":3976,"level":19},"/v1.0.7/reference/connectors/sequence#introspection","Introspection",[3955],"Sequences provide visibility into their structure: // Get processor names in order\nnames := sequence.Names()\n// [\"validate\", \"enrich\", \"save\"]\n\n// Get sequence length\ncount := sequence.Len()\n\n// Check if empty\nif sequence.Len() == 0 {\n    fmt.Println(\"No processors registered\")\n}",{"id":3978,"title":3979,"titles":3980,"content":3981,"level":19},"/v1.0.7/reference/connectors/sequence#dynamic-modification","Dynamic Modification",[3955],"Sequences can be modified at runtime - this is their key advantage:",{"id":3983,"title":3984,"titles":3985,"content":3986,"level":35},"/v1.0.7/reference/connectors/sequence#adding-processors","Adding Processors",[3955,3979],"// Add to end (most common)\nsequence.Register(auditProcessor)\n\n// Add to beginning\nsequence.Unshift(authProcessor)\n\n// Add multiple to beginning\nsequence.Unshift(preprocess, authenticate)\n\n// Add to end explicitly\nsequence.Push(postprocess)\n\n// Insert after a specific processor (by Identity)\nerr := sequence.After(validateID, authzProcessor)\n\n// Insert before a specific processor (by Identity)\nerr := sequence.Before(saveID, cacheProcessor)",{"id":3988,"title":3989,"titles":3990,"content":3991,"level":35},"/v1.0.7/reference/connectors/sequence#removing-processors","Removing Processors",[3955,3979],"// Remove from end\nprocessor, err := sequence.Pop()\n\n// Remove from beginning\nprocessor, err := sequence.Shift()\n\n// Remove by Identity\nerr := sequence.Remove(validateID)\n\n// Clear all\nsequence.Clear()",{"id":3993,"title":3994,"titles":3995,"content":3996,"level":35},"/v1.0.7/reference/connectors/sequence#replacing","Replacing",[3955,3979],"// Define identities upfront\nvar (\n    TransformV2ID = pipz.NewIdentity(\"transform_v2\", \"Updated transform logic\")\n)\n\n// Replace processor by Identity\nnewTransform := pipz.Transform(TransformV2ID, transformFunc)\nerr := sequence.Replace(transformID, newTransform)",{"id":3998,"title":3999,"titles":4000,"content":4001,"level":19},"/v1.0.7/reference/connectors/sequence#using-sequences-with-other-connectors","Using Sequences with Other Connectors",[3955],"Sequences implement Chainable[T], so they can be used anywhere a processor is expected: // Define identities upfront\nvar (\n    ValidationID         = pipz.NewIdentity(\"validation\", \"validate order items and payment\")\n    ProcessingID         = pipz.NewIdentity(\"processing\", \"calculate tax and apply discounts\")\n    MainID               = pipz.NewIdentity(\"main\", \"main order processing pipeline\")\n    ReliableProcessingID = pipz.NewIdentity(\"reliable-processing\", \"retry main pipeline on failure\")\n)\n\n// Create sub-sequences\nvalidation := pipz.NewSequence(ValidationID, checkItems, checkPayment)\nprocessing := pipz.NewSequence(ProcessingID, calculateTax, applyDiscount)\n\n// Combine in a parent sequence\nmain := pipz.NewSequence(MainID)\nmain.Register(\n    validation,   // Already implements Chainable[T]\n    processing,   // Already implements Chainable[T]\n    saveOrder,\n)\n\n// Or use in other connectors\nwithRetry := pipz.NewRetry(ReliableProcessingID, main, 3)",{"id":4003,"title":207,"titles":4004,"content":29,"level":19},"/v1.0.7/reference/connectors/sequence#use-cases",[3955],{"id":4006,"title":4007,"titles":4008,"content":4009,"level":35},"/v1.0.7/reference/connectors/sequence#feature-flags","Feature Flags",[3955,207],"// Define identities upfront\nvar (\n    APIHandlerID = pipz.NewIdentity(\"api-handler\", \"API request handler with feature flag support\")\n)\n\nsequence := pipz.NewSequence(APIHandlerID)\nsequence.Register(authenticate, validate)\n\nif featureFlags.IsEnabled(\"new-enrichment\") {\n    sequence.Register(enrichDataV2)\n} else {\n    sequence.Register(enrichDataV1)\n}\n\nsequence.Register(process)",{"id":4011,"title":4012,"titles":4013,"content":4014,"level":35},"/v1.0.7/reference/connectors/sequence#ab-testing","A/B Testing",[3955,207],"func createSequence(variant string) *Sequence[Order] {\n    // Define identity with variant\n    orderFlowID := pipz.NewIdentity(\"order-flow-\"+variant, \"order flow for A/B test variant \"+variant)\n\n    seq := pipz.NewSequence(orderFlowID)\n    seq.Register(validateOrder)\n\n    switch variant {\n    case \"A\":\n        seq.Register(standardPricing)\n    case \"B\":\n        seq.Register(dynamicPricing)\n    }\n\n    seq.Register(fulfillOrder)\n    return seq\n}",{"id":4016,"title":4017,"titles":4018,"content":4019,"level":35},"/v1.0.7/reference/connectors/sequence#debug-mode","Debug Mode",[3955,207],"// Define identities upfront\nvar (\n    TransformID     = pipz.NewIdentity(\"transform\", \"transform data\")\n    DataProcessorID = pipz.NewIdentity(\"data-processor\", \"data processor with optional debug logging\")\n    DebugLogID      = pipz.NewIdentity(\"debug\", \"log debug information\")\n)\n\ntransform := pipz.Transform(TransformID, transformFunc)\n\nsequence := pipz.NewSequence(DataProcessorID)\nsequence.Register(transform)\n\nif debugMode {\n    // Insert logging after transform\n    debugLog := pipz.Effect(DebugLogID, logFunc)\n    sequence.After(TransformID, debugLog)\n    sequence.Push(debugLog) // Also log at end\n}",{"id":4021,"title":4022,"titles":4023,"content":4024,"level":35},"/v1.0.7/reference/connectors/sequence#plugin-systems","Plugin Systems",[3955,207],"// Define identities upfront\nvar (\n    EventHandlerID = pipz.NewIdentity(\"event-handler\", \"event handler with dynamic plugin support\")\n)\n\n// Core sequence\nsequence := pipz.NewSequence(EventHandlerID)\nsequence.Register(parseEvent, validateEvent)\n\n// Load plugins\nfor _, plugin := range loadPlugins() {\n    sequence.Register(plugin.Processor())\n}\n\nsequence.Register(dispatchEvent)",{"id":4026,"title":3312,"titles":4027,"content":4028,"level":19},"/v1.0.7/reference/connectors/sequence#thread-safety",[3955],"Sequences ARE thread-safe. All modification methods use internal locking: // Safe to use concurrently\ngo func() {\n    sequence.Register(newProcessor)\n}()\n\ngo func() {\n    result, err := sequence.Process(ctx, data)\n}() The internal sync.RWMutex ensures: Multiple concurrent Process calls can executeModifications lock out all access temporarilyNo race conditions or data corruption",{"id":4030,"title":4031,"titles":4032,"content":4033,"level":19},"/v1.0.7/reference/connectors/sequence#sequence-vs-direct-composition","Sequence vs Direct Composition",[3955],"Use Sequence when you need: Runtime modification of processing stepsIntrospection capabilities (Names, Len)Plugin architecturesFeature flag integrationDebug instrumentationA/B testing flows Use direct processor composition when: Pipeline is fixed at compile timeMaximum performance is criticalSimplicity is preferred",{"id":4035,"title":4036,"titles":4037,"content":4038,"level":19},"/v1.0.7/reference/connectors/sequence#example-dynamic-etl-pipeline","Example: Dynamic ETL Pipeline",[3955],"type ETLProcessor struct {\n    sequence *pipz.Sequence[Record]\n}\n\nfunc (etl *ETLProcessor) Configure(cfg Config) {\n    etl.sequence.Clear()\n    \n    // Always start with validation\n    etl.sequence.Register(validateRecord)\n    \n    // Add transformations based on config\n    for _, transform := range cfg.Transformations {\n        etl.sequence.Register(createTransform(transform))\n    }\n    \n    // Conditional enrichment\n    if cfg.EnableEnrichment {\n        etl.sequence.Register(enrichRecord)\n    }\n    \n    // Output varies by destination\n    switch cfg.Destination {\n    case \"database\":\n        etl.sequence.Register(writeToDatabase)\n    case \"file\":\n        etl.sequence.Register(writeToFile)\n    case \"api\":\n        etl.sequence.Register(postToAPI)\n    }\n}\n\nfunc (etl *ETLProcessor) Process(ctx context.Context, record Record) (Record, error) {\n    return etl.sequence.Process(ctx, record)\n}",{"id":4040,"title":135,"titles":4041,"content":4042,"level":19},"/v1.0.7/reference/connectors/sequence#best-practices",[3955],"Name your sequences - The name appears in error paths for debuggingStore Identity references - Keep references to processor Identities so you can use Remove, Replace, After, and BeforeCheck errors - Modification methods return errors when processors aren't foundDon't over-modify - If you're constantly changing the sequence, consider using Switch insteadTest modifications - Ensure your dynamic changes work as expected",{"id":4044,"title":140,"titles":4045,"content":4046,"level":19},"/v1.0.7/reference/connectors/sequence#next-steps",[3955],"Error Handling - Handle failures in sequencesConnector Selection - Choose the right connector for your use caseTesting - Test dynamic sequences html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",{"id":4048,"title":4049,"titles":4050,"content":4051,"level":9},"/v1.0.7/reference/connectors/switch","Switch",[],"Routes data to different processors based on a condition function for dynamic workflow routing",{"id":4053,"title":4049,"titles":4054,"content":4055,"level":9},"/v1.0.7/reference/connectors/switch#switch",[],"Routes data to different processors based on a condition function.",{"id":4057,"title":2263,"titles":4058,"content":4059,"level":19},"/v1.0.7/reference/connectors/switch#function-signature",[4049],"func NewSwitch[T any](\n    identity Identity,\n    condition func(context.Context, T) string,\n) *Switch[T]",{"id":4061,"title":2091,"titles":4062,"content":2651,"level":19},"/v1.0.7/reference/connectors/switch#type-parameters",[4049],{"id":4064,"title":2268,"titles":4065,"content":4066,"level":19},"/v1.0.7/reference/connectors/switch#parameters",[4049],"identity (Identity) - Identifier with name and description for the connector used in debugging and observabilitycondition - Function that examines data and returns a route key string",{"id":4068,"title":4069,"titles":4070,"content":4071,"level":19},"/v1.0.7/reference/connectors/switch#condition-type","Condition Type",[4049],"The Condition type determines routing in Switch connectors: type Condition[T any] func(context.Context, T) string",{"id":4073,"title":2091,"titles":4074,"content":4075,"level":35},"/v1.0.7/reference/connectors/switch#type-parameters-1",[4049,4069],"T - The input data type to be examined",{"id":4077,"title":2263,"titles":4078,"content":4079,"level":35},"/v1.0.7/reference/connectors/switch#function-signature-1",[4049,4069],"Input: Takes a context and data of type TOutput: Returns a route key stringPurpose: Examines the input data and determines which route to take",{"id":4081,"title":1388,"titles":4082,"content":4083,"level":35},"/v1.0.7/reference/connectors/switch#how-it-works",[4049,4069],"The condition function is called for each piece of data processed: Receives the current context and input dataExamines the data to determine the appropriate routeReturns a key that maps to a specific processorThe Switch connector uses this key to route the data",{"id":4085,"title":4086,"titles":4087,"content":4088,"level":35},"/v1.0.7/reference/connectors/switch#example-conditions","Example Conditions",[4049,4069],"// Simple string-based routing\nfunc userTypeCondition(ctx context.Context, user User) string {\n    if user.IsVIP {\n        return \"vip\"\n    }\n    return \"regular\"\n}\n\n// Priority routing with string keys\nfunc priorityCondition(ctx context.Context, task Task) string {\n    switch task.Priority {\n    case 3:\n        return \"critical\"\n    case 2:\n        return \"high\"\n    case 1:\n        return \"medium\"\n    default:\n        return \"low\"\n    }\n}\n\n// Computed routing key\nfunc loadBalanceCondition(ctx context.Context, req Request) string {\n    // Route based on request ID hash for load distribution\n    bucket := req.ID % 3\n    return fmt.Sprintf(\"bucket-%d\", bucket) // Routes to \"bucket-0\", \"bucket-1\", \"bucket-2\"\n}\n\n// Context-aware routing\nfunc featureCondition(ctx context.Context, data Data) string {\n    // Use context values for routing decisions\n    if feature, ok := ctx.Value(\"feature\").(string); ok && feature == \"beta\" {\n        return \"experimental\"\n    }\n    return \"stable\"\n}",{"id":4090,"title":4091,"titles":4092,"content":4093,"level":35},"/v1.0.7/reference/connectors/switch#best-practices-for-conditions","Best Practices for Conditions",[4049,4069],"Keep conditions pure - Avoid side effects in condition functionsMake them fast - Conditions are called for every data itemUse meaningful keys - Return descriptive strings or enumsHandle all cases - Ensure all possible return values have routesLeverage context - Use context for feature flags or configuration",{"id":4095,"title":2273,"titles":4096,"content":4097,"level":19},"/v1.0.7/reference/connectors/switch#returns",[4049],"Returns a *Switch[T] that implements Chainable[T].",{"id":4099,"title":2134,"titles":4100,"content":4101,"level":19},"/v1.0.7/reference/connectors/switch#methods",[4049],"// Add a route\nAddRoute(key string, processor Chainable[T]) *Switch[T]\n\n// Remove a route\nRemoveRoute(key string) *Switch[T]\n\n// Check if route exists\nHasRoute(key string) bool\n\n// Get all routes (copy)\nRoutes() map[string]Chainable[T]\n\n// Clear all routes\nClearRoutes() *Switch[T]\n\n// Replace all routes atomically\nSetRoutes(routes map[string]Chainable[T]) *Switch[T]\n\n// Update condition function\nSetCondition(condition Condition[T]) *Switch[T]",{"id":4103,"title":2278,"titles":4104,"content":4105,"level":19},"/v1.0.7/reference/connectors/switch#behavior",[4049],"Dynamic routing - Routes determined at runtime based on dataString keys - Route keys are strings for simplicity and serializationChainable API - Routes can be added fluentlyPass-through on no match - Returns input unchanged if no route matchesThread-safe - Routes can be modified during operation",{"id":4107,"title":2283,"titles":4108,"content":4109,"level":19},"/v1.0.7/reference/connectors/switch#example",[4049],"// Route by user type\nuserRouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"user-router\", \"Routes users to appropriate handlers based on VIP/new/regular status\"),\n    func(ctx context.Context, user User) string {\n        if user.IsVIP {\n            return \"vip\"\n        }\n        if user.IsNew {\n            return \"new\"\n        }\n        return \"regular\"\n    },\n).\nAddRoute(\"vip\", processVIPUser).\nAddRoute(\"new\", processNewUser).\nAddRoute(\"regular\", processRegularUser)\n\n// Route by priority level\npriorityRouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"priority-router\", \"Routes tasks by priority level for appropriate processing\"),\n    func(ctx context.Context, task Task) string {\n        switch task.Priority {\n        case 3:\n            return \"critical\"\n        case 2:\n            return \"high\"\n        case 1:\n            return \"medium\"\n        default:\n            return \"low\"\n        }\n    },\n).\nAddRoute(\"critical\", processCritical).\nAddRoute(\"high\", processHigh).\nAddRoute(\"medium\", processMedium).\nAddRoute(\"low\", processLow)\n\n// Route by payment method\npaymentRouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"payment-router\", \"Routes payment processing based on payment method type\"),\n    func(ctx context.Context, payment Payment) string {\n        return payment.Method\n    },\n).\nAddRoute(\"credit_card\", processCreditCard).\nAddRoute(\"paypal\", processPayPal).\nAddRoute(\"crypto\", processCrypto)\n// Unmatched methods pass through unchanged",{"id":4111,"title":2292,"titles":4112,"content":4113,"level":19},"/v1.0.7/reference/connectors/switch#when-to-use",[4049],"Use Switch when: You need conditional routing with 3+ branchesDifferent types require different handlingImplementing strategy patternBuilding rule enginesCreating workflow routersA/B testing with multiple variantsProcessing varies by category/type/status",{"id":4115,"title":2297,"titles":4116,"content":4117,"level":19},"/v1.0.7/reference/connectors/switch#when-not-to-use",[4049],"Don't use Switch when: Only two options exist (use Fallback or Filter)All processors should run (use Concurrent)Conditions are complex/nested (consider multiple Switches)Simple boolean conditions (use Filter or Mutate)You just need if/else logic (use Filter)",{"id":4119,"title":4120,"titles":4121,"content":4122,"level":19},"/v1.0.7/reference/connectors/switch#pass-through-behavior","Pass-Through Behavior",[4049],"Switch passes through unchanged if no route matches: router := pipz.NewSwitch(\n    pipz.NewIdentity(\"router\", \"Routes data by type to appropriate processors\"),\n    func(ctx context.Context, data Data) string {\n        return data.Type\n    },\n).\nAddRoute(\"typeA\", processA).\nAddRoute(\"typeB\", processB)\n\ndata := Data{Type: \"typeC\"}\nresult, err := router.Process(ctx, data)\n// err: nil\n// result: data (unchanged, passed through) This design allows Switch to be safely added to pipelines without requiring exhaustive route coverage.",{"id":4124,"title":2181,"titles":4125,"content":4126,"level":19},"/v1.0.7/reference/connectors/switch#common-patterns",[4049],"// Multi-level routing\nmainRouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"main-router\", \"Primary service router for auth, payment, and shipping requests\"),\n    func(ctx context.Context, req Request) string {\n        return req.Service\n    },\n).\nAddRoute(\"auth\", authPipeline).\nAddRoute(\"payment\",\n    pipz.NewSwitch(\n        pipz.NewIdentity(\"payment-sub-router\", \"Sub-router for different payment method types\"),\n        func(ctx context.Context, req Request) string {\n            return req.PaymentType\n        },\n    ).\n    AddRoute(\"card\", cardProcessor).\n    AddRoute(\"bank\", bankProcessor),\n).\nAddRoute(\"shipping\", shippingPipeline)\n\n// Error-based routing\nerrorRouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"error-router\", \"Routes errors to appropriate handlers based on error type\"),\n    func(ctx context.Context, err *pipz.Error[Data]) string {\n        switch {\n        case err.Timeout:\n            return \"timeout\"\n        case err.Canceled:\n            return \"canceled\"\n        case strings.Contains(err.Err.Error(), \"validation\"):\n            return \"validation\"\n        default:\n            return \"other\"\n        }\n    },\n).\nAddRoute(\"timeout\", handleTimeout).\nAddRoute(\"canceled\", handleCancellation).\nAddRoute(\"validation\", handleValidation).\nAddRoute(\"other\", handleGenericError)\n\n// Feature flag routing\nfeatureRouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"feature-router\", \"Routes between new and legacy algorithms based on feature flags\"),\n    func(ctx context.Context, data Data) string {\n        if featureFlags.IsEnabled(ctx, \"new_algorithm\") {\n            return \"new\"\n        }\n        return \"old\"\n    },\n).\nAddRoute(\"new\", newAlgorithm).\nAddRoute(\"old\", oldAlgorithm)",{"id":4128,"title":111,"titles":4129,"content":4130,"level":19},"/v1.0.7/reference/connectors/switch#advanced-patterns",[4049],"// Dynamic route registration\nrouter := pipz.NewSwitch[Order](\n    pipz.NewIdentity(\"dynamic\", \"Dynamically configured router for order processing\"),\n    getOrderType,\n)\n\n// Register routes from configuration\nfor _, route := range config.Routes {\n    processor := createProcessor(route.Handler)\n    router.AddRoute(route.Key, processor)\n}\n\n// Computed routing keys\ncomplexRouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"complex\", \"Routes events by region and calculated score for distributed processing\"),\n    func(ctx context.Context, event Event) string {\n        // Complex routing logic\n        score := calculateScore(event)\n        region := detectRegion(event.IP)\n\n        return fmt.Sprintf(\"%s:%d\", region, score/10)\n    },\n).\nAddRoute(\"us:0\", lowPriorityUS).\nAddRoute(\"us:1\", mediumPriorityUS).\nAddRoute(\"eu:0\", lowPriorityEU).\nAddRoute(\"eu:1\", mediumPriorityEU).\nDefault(genericProcessor)\n\n// Percentage-based routing (A/B testing)\nabRouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"ab-test\", \"A/B test router directing 10% of users to experimental flow\"),\n    func(ctx context.Context, user User) string {\n        hash := hashUserID(user.ID)\n        if hash%100 \u003C 10 { // 10% of users\n            return \"experiment\"\n        }\n        return \"control\"\n    },\n).\nAddRoute(\"experiment\", experimentalFlow).\nAddRoute(\"control\", standardFlow)",{"id":4132,"title":2199,"titles":4133,"content":29,"level":19},"/v1.0.7/reference/connectors/switch#gotchas",[4049],{"id":4135,"title":4136,"titles":4137,"content":4138,"level":35},"/v1.0.7/reference/connectors/switch#dont-use-switch-for-simple-boolean-logic","❌ Don't use Switch for simple boolean logic",[4049,2199],"// WRONG - Overkill for boolean\nswitch := pipz.NewSwitch(\n    pipz.NewIdentity(\"overkill\", \"Over-engineered router for simple boolean condition\"),\n    func(ctx context.Context, user User) string {\n        if user.IsActive {\n            return \"active\"\n        }\n        return \"inactive\"\n    },\n).\nAddRoute(\"active\", processActive).\nAddRoute(\"inactive\", processInactive)",{"id":4140,"title":4141,"titles":4142,"content":4143,"level":35},"/v1.0.7/reference/connectors/switch#use-filter-for-simple-conditions","✅ Use Filter for simple conditions",[4049,2199],"// RIGHT - Simpler with Filter\nfilter := pipz.NewFilter(\n    pipz.NewIdentity(\"simple\", \"Processes active users\"),\n    func(ctx context.Context, user User) bool {\n        return user.IsActive\n    },\n    processActive,\n)",{"id":4145,"title":4146,"titles":4147,"content":4148,"level":35},"/v1.0.7/reference/connectors/switch#dont-use-opaque-route-keys","❌ Don't use opaque route keys",[4049,2199],"// WRONG - What do these mean?\nswitch := pipz.NewSwitch(\n    pipz.NewIdentity(\"opaque\", \"Routes orders by value threshold with unclear keys\"),\n    func(ctx context.Context, order Order) string {\n        if order.Total > 1000 {\n            return \"1\" // Magic value!\n        }\n        return \"2\" // Another magic value!\n    },\n)",{"id":4150,"title":4151,"titles":4152,"content":4153,"level":35},"/v1.0.7/reference/connectors/switch#use-meaningful-self-documenting-keys","✅ Use meaningful, self-documenting keys",[4049,2199],"// RIGHT - Clear intent\nswitch := pipz.NewSwitch(\n    pipz.NewIdentity(\"clear\", \"Routes orders based on total value using descriptive keys\"),\n    func(ctx context.Context, order Order) string {\n        if order.Total > 1000 {\n            return \"high-value\"\n        }\n        return \"standard\"\n    },\n)",{"id":4155,"title":4156,"titles":4157,"content":4158,"level":35},"/v1.0.7/reference/connectors/switch#dont-assume-unmatched-routes-fail","❌ Don't assume unmatched routes fail",[4049,2199],"// WRONG - Expecting an error\nrouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"router\", \"Routes by type\"),\n    func(ctx context.Context, data Data) string {\n        return data.Type\n    },\n).AddRoute(\"known\", processKnown)\n\n_, err := router.Process(ctx, Data{Type: \"unknown\"})\n// err is nil! Data passes through unchanged",{"id":4160,"title":4161,"titles":4162,"content":4163,"level":35},"/v1.0.7/reference/connectors/switch#add-explicit-handling-if-needed","✅ Add explicit handling if needed",[4049,2199],"// RIGHT - Validate in condition or use a catch-all route\nrouter := pipz.NewSwitch(\n    pipz.NewIdentity(\"router\", \"Routes by type with unknown handling\"),\n    func(ctx context.Context, data Data) string {\n        switch data.Type {\n        case \"typeA\", \"typeB\":\n            return data.Type\n        default:\n            return \"unknown\"\n        }\n    },\n).\nAddRoute(\"typeA\", processA).\nAddRoute(\"typeB\", processB).\nAddRoute(\"unknown\", handleUnknown)",{"id":4165,"title":135,"titles":4166,"content":4167,"level":19},"/v1.0.7/reference/connectors/switch#best-practices",[4049],"Use constants for route keys to avoid typosAdd a catch-all route if you need to handle unknown cases explicitlyKeep routing logic simple - complex conditions make debugging hardDocument route keys if they're not self-evidentTest all routes including pass-through behavior for unmatched keys",{"id":4169,"title":1764,"titles":4170,"content":4171,"level":19},"/v1.0.7/reference/connectors/switch#see-also",[4049],"Mutate - For simple conditional processingFallback - For two-option routingHandle - Often uses Switch for error routingConcurrent - When all routes should execute html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}",{"id":4173,"title":544,"titles":4174,"content":4175,"level":9},"/v1.0.7/reference/connectors/timeout",[],"Enforces a time limit on processor execution to prevent indefinite operations",{"id":4177,"title":544,"titles":4178,"content":4179,"level":9},"/v1.0.7/reference/connectors/timeout#timeout",[],"Enforces a time limit on processor execution.",{"id":4181,"title":2263,"titles":4182,"content":4183,"level":19},"/v1.0.7/reference/connectors/timeout#function-signature",[544],"func NewTimeout[T any](\n    identity Identity,\n    processor Chainable[T],\n    duration time.Duration,\n) *Timeout[T]",{"id":4185,"title":2268,"titles":4186,"content":4187,"level":19},"/v1.0.7/reference/connectors/timeout#parameters",[544],"identity (Identity) - Identifier with name and description for the connector used in debugging and observabilityprocessor - The processor to time-boundduration - Maximum allowed execution time",{"id":4189,"title":2273,"titles":4190,"content":4191,"level":19},"/v1.0.7/reference/connectors/timeout#returns",[544],"Returns a *Timeout[T] that implements Chainable[T].",{"id":4193,"title":2835,"titles":4194,"content":29,"level":19},"/v1.0.7/reference/connectors/timeout#testing-configuration",[544],{"id":4196,"title":2839,"titles":4197,"content":4198,"level":35},"/v1.0.7/reference/connectors/timeout#withclock",[544,2835],"func (t *Timeout[T]) WithClock(clock clockz.Clock) *Timeout[T] Sets a custom clock implementation for testing purposes. This method enables controlled time manipulation in tests using clockz.FakeClock. Parameters: clock (clockz.Clock) - Clock implementation to use Returns:\nReturns the same connector instance for method chaining. Example: // Use fake clock in tests\nfakeClock := clockz.NewFakeClock()\ntimeout := pipz.NewTimeout(\n    pipz.NewIdentity(\"test\", \"Test timeout with fake clock for controlled time advancement\"),\n    processor,\n    5*time.Second,\n).WithClock(fakeClock)\n\n// Advance time in test\nfakeClock.Advance(6 * time.Second)",{"id":4200,"title":2278,"titles":4201,"content":4202,"level":19},"/v1.0.7/reference/connectors/timeout#behavior",[544],"Time enforcement - Cancels operation after durationContext timeout - Creates a timeout context for the processorTimeout errors - Returns timeout error with timing informationClean cancellation - Processor should respect context cancellationError enrichment - Timeout errors include duration info",{"id":4204,"title":2283,"titles":4205,"content":4206,"level":19},"/v1.0.7/reference/connectors/timeout#example",[544],"// Define identities upfront\nvar (\n    APITimeoutID     = pipz.NewIdentity(\"api-timeout\", \"Enforces 5 second timeout on external API calls\")\n    APICallID        = pipz.NewIdentity(\"api-call\", \"Calls external API\")\n    BoundedPipelineID = pipz.NewIdentity(\"bounded-pipeline\", \"Ensures entire data processing pipeline completes within 30 seconds\")\n    ProcessingID     = pipz.NewIdentity(\"processing\", \"Data processing sequence\")\n    FetchID          = pipz.NewIdentity(\"fetch\", \"Fetches data\")\n    TransformID      = pipz.NewIdentity(\"transform\", \"Transforms data\")\n    SaveID           = pipz.NewIdentity(\"save\", \"Saves data\")\n    GraduatedID      = pipz.NewIdentity(\"graduated\", \"Graduated timeout fallback\")\n    FastID           = pipz.NewIdentity(\"fast\", \"Primary service with strict 1 second timeout\")\n    SlowID           = pipz.NewIdentity(\"slow\", \"Backup service with generous 10 second timeout\")\n    ReliableID       = pipz.NewIdentity(\"reliable\", \"Reliable retry with timeout\")\n    BoundedOpID      = pipz.NewIdentity(\"bounded-op\", \"Each retry attempt limited to 5 seconds for slow operation\")\n)\n\n// Basic timeout\nfastAPI := pipz.NewTimeout(APITimeoutID,\n    pipz.Apply(APICallID, callExternalAPI),\n    5*time.Second,\n)\n\n// Timeout on complex pipeline\ntimeBounded := pipz.NewTimeout(BoundedPipelineID,\n    pipz.NewSequence[Data](ProcessingID,\n        pipz.Apply(FetchID, fetchData),\n        pipz.Apply(TransformID, transformData),\n        pipz.Apply(SaveID, saveData),\n    ),\n    30*time.Second, // Entire pipeline must complete in 30s\n)\n\n// Graduated timeouts\nresilient := pipz.NewFallback(GraduatedID,\n    pipz.NewTimeout(FastID,\n        primaryService,\n        1*time.Second,\n    ),\n    pipz.NewTimeout(SlowID,\n        backupService,\n        10*time.Second,\n    ),\n)\n\n// Timeout with retry\nreliableButSlow := pipz.NewRetry(ReliableID,\n    pipz.NewTimeout(BoundedOpID,\n        slowOperation,\n        5*time.Second,\n    ),\n    3, // Retry up to 3 times, each with 5s timeout\n)",{"id":4208,"title":2292,"titles":4209,"content":4210,"level":19},"/v1.0.7/reference/connectors/timeout#when-to-use",[544],"Use Timeout when: Calling external services (APIs, databases)Operations might hang indefinitelySLAs must be enforcedProtecting against slow operationsResource usage needs boundsUser-facing operations need responsivenessPreventing cascading delays",{"id":4212,"title":2297,"titles":4213,"content":4214,"level":19},"/v1.0.7/reference/connectors/timeout#when-not-to-use",[544],"Don't use Timeout when: Operations are always fast (unnecessary overhead)Cancellation might corrupt data (transactions)Exact completion is required (financial processing)Timeout would leave inconsistent stateOperations can't be cancelled (use monitoring instead)",{"id":4216,"title":4217,"titles":4218,"content":4219,"level":19},"/v1.0.7/reference/connectors/timeout#error-details","Error Details",[544],"Timeout errors are marked specially: timeout := pipz.NewTimeout(\n    pipz.NewIdentity(\"slow-op\", \"Timeout for potentially slow processor with 2 second limit\"),\n    slowProcessor,\n    2*time.Second,\n)\n_, err := timeout.Process(ctx, data)\n\nif err != nil {\n    var pipeErr *pipz.Error[Data]\n    if errors.As(err, &pipeErr) {\n        if pipeErr.Timeout {\n            fmt.Printf(\"Operation timed out after %v\", pipeErr.Duration)\n            // Duration will be ~2 seconds\n        }\n    }\n}",{"id":4221,"title":4222,"titles":4223,"content":4224,"level":19},"/v1.0.7/reference/connectors/timeout#context-cancellation","Context Cancellation",[544],"Processors must respect context cancellation: // Define identities upfront\nvar (\n    GoodProcessorID = pipz.NewIdentity(\"good\", \"Context-aware processor\")\n    BadProcessorID  = pipz.NewIdentity(\"bad\", \"Context-ignoring processor\")\n)\n\n// GOOD: Checks context\ngoodProcessor := pipz.Apply(GoodProcessorID, func(ctx context.Context, data Data) (Data, error) {\n    for i := 0; i \u003C 100; i++ {\n        select {\n        case \u003C-ctx.Done():\n            return data, ctx.Err() // Respects timeout\n        default:\n            // Do work\n            if err := processChunk(data, i); err != nil {\n                return data, err\n            }\n        }\n    }\n    return data, nil\n})\n\n// BAD: Ignores context\nbadProcessor := pipz.Apply(BadProcessorID, func(ctx context.Context, data Data) (Data, error) {\n    // This will continue even after timeout!\n    time.Sleep(10 * time.Second)\n    return data, nil\n})",{"id":4226,"title":2181,"titles":4227,"content":4228,"level":19},"/v1.0.7/reference/connectors/timeout#common-patterns",[544],"// Tiered timeout strategy\ntieredService := pipz.NewSwitch[Request](\n    pipz.NewIdentity(\"tier-router\", \"Routes requests by tier with appropriate timeout limits\"),\n    func(ctx context.Context, req Request) string {\n        return req.Tier\n    },\n).\nAddRoute(\"premium\", pipz.NewTimeout(\n    pipz.NewIdentity(\"premium\", \"Premium tier processing with 10 second timeout\"),\n    processor,\n    10*time.Second,\n)).\nAddRoute(\"standard\", pipz.NewTimeout(\n    pipz.NewIdentity(\"standard\", \"Standard tier processing with 5 second timeout\"),\n    processor,\n    5*time.Second,\n)).\nAddRoute(\"free\", pipz.NewTimeout(\n    pipz.NewIdentity(\"free\", \"Free tier processing with 1 second timeout\"),\n    processor,\n    1*time.Second,\n))\n\n// Define identities for monitoring and dynamic patterns\nvar (\n    MonitoredID  = pipz.NewIdentity(\"monitored\", \"Monitored timeout handler\")\n    OperationID  = pipz.NewIdentity(\"operation\", \"Monitored operation with 3 second timeout\")\n    TrackID      = pipz.NewIdentity(\"track\", \"Tracks timeout metrics\")\n    DynamicID    = pipz.NewIdentity(\"dynamic\", \"Dynamic timeout based on priority\")\n    BoundedID    = pipz.NewIdentity(\"bounded\", \"Dynamically timed operation\")\n)\n\n// Timeout with monitoring\nmonitoredTimeout := pipz.NewHandle(MonitoredID,\n    pipz.NewTimeout(OperationID,\n        processor,\n        3*time.Second,\n    ),\n    pipz.Effect(TrackID, func(ctx context.Context, err *pipz.Error[Data]) error {\n        if err.Timeout {\n            metrics.Increment(\"timeouts\", \"operation\", \"myop\")\n            if err.Duration > 2*time.Second {\n                log.Printf(\"Near timeout: %v\", err.Duration)\n            }\n        }\n        return nil\n    }),\n)\n\n// Dynamic timeout based on context\ndynamicTimeout := pipz.Apply(DynamicID, func(ctx context.Context, req Request) (Request, error) {\n    timeout := 5 * time.Second // default\n    if req.Priority == \"high\" {\n        timeout = 30 * time.Second\n    }\n\n    return pipz.NewTimeout(BoundedID,\n        processor,\n        timeout,\n    ).Process(ctx, req)\n})",{"id":4230,"title":111,"titles":4231,"content":4232,"level":19},"/v1.0.7/reference/connectors/timeout#advanced-patterns",[544],"// Percentage-based timeout (P95)\nadaptiveTimeout := pipz.NewTimeout(\n    pipz.NewIdentity(\"adaptive\", \"Adaptive timeout based on P95 historical performance metrics\"),\n    processor,\n    calculateP95Timeout(), // Based on historical data\n)\n\n// Define identities for advanced patterns\nvar (\n    GracefulID     = pipz.NewIdentity(\"graceful\", \"Graceful shutdown with partial results\")\n    OuterID        = pipz.NewIdentity(\"outer\", \"Outer timeout for multi-step sequence\")\n    InnerSequenceID = pipz.NewIdentity(\"inner-sequence\", \"Inner sequence with step timeouts\")\n    Step1ID        = pipz.NewIdentity(\"step1\", \"First step with 5 second timeout\")\n    Step2ID        = pipz.NewIdentity(\"step2\", \"Second step with 5 second timeout\")\n    Step3ID        = pipz.NewIdentity(\"step3\", \"Third step with 5 second timeout\")\n)\n\n// Timeout with graceful shutdown\ngracefulTimeout := pipz.Apply(GracefulID, func(ctx context.Context, batch Batch) (Batch, error) {\n    ctx, cancel := context.WithTimeout(ctx, 10*time.Second)\n    defer cancel()\n\n    results := make([]Result, 0, len(batch.Items))\n    for _, item := range batch.Items {\n        select {\n        case \u003C-ctx.Done():\n            // Save partial results\n            batch.Partial = true\n            batch.Results = results\n            return batch, fmt.Errorf(\"timeout after %d items\", len(results))\n        default:\n            result, err := processItem(ctx, item)\n            if err != nil {\n                return batch, err\n            }\n            results = append(results, result)\n        }\n    }\n\n    batch.Results = results\n    return batch, nil\n})\n\n// Cascading timeouts\ncascading := pipz.NewTimeout(OuterID,\n    pipz.NewSequence[Data](InnerSequenceID,\n        pipz.NewTimeout(Step1ID,\n            step1,\n            5*time.Second,\n        ),\n        pipz.NewTimeout(Step2ID,\n            step2,\n            5*time.Second,\n        ),\n        pipz.NewTimeout(Step3ID,\n            step3,\n            5*time.Second,\n        ),\n    ),\n    12*time.Second, // Total timeout less than sum of parts\n)",{"id":4234,"title":2199,"titles":4235,"content":29,"level":19},"/v1.0.7/reference/connectors/timeout#gotchas",[544],{"id":4237,"title":4238,"titles":4239,"content":4240,"level":35},"/v1.0.7/reference/connectors/timeout#dont-use-timeouts-that-are-too-short","❌ Don't use timeouts that are too short",[544,2199],"// WRONG - Timeout shorter than average response time\ntimeout := pipz.NewTimeout(\n    pipz.NewIdentity(\"too-short\", \"Unrealistic timeout that will fail most normal requests\"),\n    apiCall, // Takes 2-3 seconds normally\n    1*time.Second, // Will almost always timeout!\n)",{"id":4242,"title":4243,"titles":4244,"content":4245,"level":35},"/v1.0.7/reference/connectors/timeout#base-timeouts-on-actual-performance","✅ Base timeouts on actual performance",[544,2199],"// RIGHT - Realistic timeout\ntimeout := pipz.NewTimeout(\n    pipz.NewIdentity(\"realistic\", \"Realistic timeout based on P99 response time measurements\"),\n    apiCall,\n    5*time.Second, // P99 response time\n)",{"id":4247,"title":4248,"titles":4249,"content":4250,"level":35},"/v1.0.7/reference/connectors/timeout#dont-ignore-context-in-processors","❌ Don't ignore context in processors",[544,2199],"// WRONG - Ignores timeout!\nprocessor := pipz.Apply(\"bad\", func(ctx context.Context, data Data) (Data, error) {\n    time.Sleep(10 * time.Second) // Ignores context!\n    return data, nil\n})",{"id":4252,"title":4253,"titles":4254,"content":4255,"level":35},"/v1.0.7/reference/connectors/timeout#check-context-during-long-operations","✅ Check context during long operations",[544,2199],"// RIGHT - Respects timeout\nprocessor := pipz.Apply(\"good\", func(ctx context.Context, data Data) (Data, error) {\n    for i := 0; i \u003C 100; i++ {\n        select {\n        case \u003C-ctx.Done():\n            return data, ctx.Err()\n        default:\n            processChunk(data, i)\n        }\n    }\n    return data, nil\n})",{"id":4257,"title":4258,"titles":4259,"content":4260,"level":35},"/v1.0.7/reference/connectors/timeout#dont-nest-timeouts-incorrectly","❌ Don't nest timeouts incorrectly",[544,2199],"// WRONG - Inner timeout longer than outer!\ntimeout := pipz.NewTimeout(\n    pipz.NewIdentity(\"outer\", \"Outer timeout shorter than inner timeout - misconfigured\"),\n    pipz.NewTimeout(\n        pipz.NewIdentity(\"inner\", \"Inner timeout that will never complete fully\"),\n        processor,\n        10*time.Second,\n    ), // Longer!\n    5*time.Second, // Shorter - inner never gets full time\n)",{"id":4262,"title":4263,"titles":4264,"content":4265,"level":35},"/v1.0.7/reference/connectors/timeout#make-outer-timeouts-longer-than-sum-of-inner","✅ Make outer timeouts longer than sum of inner",[544,2199],"// Define identities upfront\nvar (\n    OuterTimeoutID = pipz.NewIdentity(\"outer\", \"Outer timeout with buffer\")\n    StepsID        = pipz.NewIdentity(\"steps\", \"Sequential steps\")\n    StepOneID      = pipz.NewIdentity(\"step1\", \"First step with 5 second timeout\")\n    StepTwoID      = pipz.NewIdentity(\"step2\", \"Second step with 5 second timeout\")\n)\n\n// RIGHT - Outer accommodates inner timeouts\ntimeout := pipz.NewTimeout(OuterTimeoutID,\n    pipz.NewSequence(StepsID,\n        pipz.NewTimeout(StepOneID,\n            step1,\n            5*time.Second,\n        ),\n        pipz.NewTimeout(StepTwoID,\n            step2,\n            5*time.Second,\n        ),\n    ),\n    12*time.Second, // Allows both to complete with buffer\n)",{"id":4267,"title":135,"titles":4268,"content":4269,"level":19},"/v1.0.7/reference/connectors/timeout#best-practices",[544],"// Set realistic timeouts\n// GOOD: Based on actual performance\ngoodTimeout := pipz.NewTimeout(\n    pipz.NewIdentity(\"realistic\", \"Statistically-based timeout using mean plus standard deviation\"),\n    apiCall,\n    2*averageResponseTime + standardDeviation,\n)\n\n// BAD: Arbitrary timeout\nbadTimeout := pipz.NewTimeout(\n    pipz.NewIdentity(\"arbitrary\", \"Arbitrary timeout without performance justification\"),\n    apiCall,\n    1*time.Minute, // Why 1 minute?\n)\n\n// Define identities for best practices examples\nvar (\n    BatchID     = pipz.NewIdentity(\"batch\", \"Batch processor with timeout\")\n    ProcessID   = pipz.NewIdentity(\"process\", \"Processes batch items\")\n    SlowLogID   = pipz.NewIdentity(\"slow-log\", \"Logs slow operations\")\n    MaybeSlowID = pipz.NewIdentity(\"maybe-slow\", \"Potentially slow operation\")\n    LogSlowID   = pipz.NewIdentity(\"log-slow\", \"Logs slow operations\")\n)\n\n// Handle partial results\nbatchWithTimeout := pipz.NewTimeout(BatchID,\n    pipz.Apply(ProcessID, func(ctx context.Context, batch Batch) (Batch, error) {\n        var processed []Item\n        for _, item := range batch.Items {\n            if ctx.Err() != nil {\n                // Return partial results\n                return Batch{\n                    Items:     batch.Items,\n                    Processed: processed,\n                    Partial:   true,\n                }, ctx.Err()\n            }\n            processed = append(processed, process(item))\n        }\n        return Batch{Items: batch.Items, Processed: processed}, nil\n    }),\n    30*time.Second,\n)\n\n// Log slow operations\nslowLogger := pipz.NewHandle(SlowLogID,\n    pipz.NewTimeout(MaybeSlowID,\n        processor,\n        5*time.Second,\n    ),\n    pipz.Effect(LogSlowID, func(ctx context.Context, err *pipz.Error[Data]) error {\n        // Log operations that take more than 3s\n        if err.Duration > 3*time.Second {\n            log.Printf(\"Slow operation: %v (timeout: %v)\", err.Duration, err.Timeout)\n        }\n        return nil\n    }),\n)",{"id":4271,"title":1764,"titles":4272,"content":4273,"level":19},"/v1.0.7/reference/connectors/timeout#see-also",[544],"Retry - Often combined with timeoutFallback - For timeout recoveryRace - Alternative approach to timeoutsHandle - For timeout monitoring html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}",{"id":4275,"title":529,"titles":4276,"content":4277,"level":9},"/v1.0.7/reference/connectors/workerpool",[],"Bounded parallel execution with a fixed number of workers for controlled resource usage",{"id":4279,"title":529,"titles":4280,"content":4281,"level":9},"/v1.0.7/reference/connectors/workerpool#workerpool",[],"Bounded parallel execution with a fixed number of workers.",{"id":4283,"title":2263,"titles":4284,"content":4285,"level":19},"/v1.0.7/reference/connectors/workerpool#function-signature",[529],"func NewWorkerPool[T Cloner[T]](identity Identity, workers int, processors ...Chainable[T]) *WorkerPool[T]",{"id":4287,"title":2043,"titles":4288,"content":2951,"level":19},"/v1.0.7/reference/connectors/workerpool#type-constraints",[529],{"id":4290,"title":2268,"titles":4291,"content":4292,"level":19},"/v1.0.7/reference/connectors/workerpool#parameters",[529],"identity (Identity) - Identifier with name and description for the connector used in debugging and observabilityworkers (int) - Maximum number of concurrent processors (semaphore size)processors - Variable number of processors to execute with limited parallelism",{"id":4294,"title":2273,"titles":4295,"content":4296,"level":19},"/v1.0.7/reference/connectors/workerpool#returns",[529],"Returns a *WorkerPool[T] that implements Chainable[T].",{"id":4298,"title":2835,"titles":4299,"content":29,"level":19},"/v1.0.7/reference/connectors/workerpool#testing-configuration",[529],{"id":4301,"title":2839,"titles":4302,"content":4303,"level":35},"/v1.0.7/reference/connectors/workerpool#withclock",[529,2835],"func (w *WorkerPool[T]) WithClock(clock clockz.Clock) *WorkerPool[T] Sets a custom clock implementation for testing purposes. This method enables controlled time manipulation in tests using clockz.FakeClock. Parameters: clock (clockz.Clock) - Clock implementation to use Returns:\nReturns the same connector instance for method chaining. Example: // Use fake clock in tests\nfakeClock := clockz.NewFakeClock()\npool := pipz.NewWorkerPool(\n    pipz.NewIdentity(\"test\", \"Test worker pool with fake clock for controlled time testing\"),\n    3,\n    processor1,\n    processor2,\n).WithClock(fakeClock)\n\n// Advance time in test for timeout testing\nfakeClock.Advance(5 * time.Second)",{"id":4305,"title":2278,"titles":4306,"content":4307,"level":19},"/v1.0.7/reference/connectors/workerpool#behavior",[529],"Bounded parallelism - Limits concurrent execution to worker countSemaphore pattern - Uses channel-based semaphore for concurrency controlData isolation - Each processor receives a clone of the inputWait for all - Waits for all processors to completeReturns original - Always returns the original input dataContext preservation - Passes original context to all processorsDynamic configuration - Worker count and processors can be modified at runtimeOptional timeout - Per-task timeout configuration available",{"id":4309,"title":2283,"titles":4310,"content":4311,"level":19},"/v1.0.7/reference/connectors/workerpool#example",[529],"// Define a type that implements Cloner\ntype APIRequest struct {\n    ID       string\n    Endpoint string\n    Payload  map[string]interface{}\n}\n\nfunc (r APIRequest) Clone() APIRequest {\n    payload := make(map[string]interface{}, len(r.Payload))\n    for k, v := range r.Payload {\n        payload[k] = v\n    }\n    return APIRequest{\n        ID:       r.ID,\n        Endpoint: r.Endpoint,\n        Payload:  payload,\n    }\n}\n\n// Define identities upfront\nvar (\n    APIBatchID    = pipz.NewIdentity(\"api-batch\", \"Bounded parallel execution limited to 3 concurrent requests\")\n    ServiceAID    = pipz.NewIdentity(\"service-a\", \"Call service A\")\n    ServiceBID    = pipz.NewIdentity(\"service-b\", \"Call service B\")\n    ServiceCID    = pipz.NewIdentity(\"service-c\", \"Call service C\")\n    ServiceDID    = pipz.NewIdentity(\"service-d\", \"Call service D\")\n    ServiceEID    = pipz.NewIdentity(\"service-e\", \"Call service E\")\n    APIFlowID     = pipz.NewIdentity(\"api-flow\", \"API request flow pipeline\")\n    ValidateID    = pipz.NewIdentity(\"validate\", \"Validate API request\")\n    AuthID        = pipz.NewIdentity(\"auth\", \"Add authentication\")\n)\n\n// Create worker pool with limited concurrency\napiCalls := pipz.NewWorkerPool(APIBatchID,\n    3,\n    pipz.Apply(ServiceAID, callServiceA),\n    pipz.Apply(ServiceBID, callServiceB),\n    pipz.Apply(ServiceCID, callServiceC),\n    pipz.Apply(ServiceDID, callServiceD),\n    pipz.Apply(ServiceEID, callServiceE),\n)\n\n// Use in a pipeline\npipeline := pipz.NewSequence[APIRequest](APIFlowID,\n    pipz.Apply(ValidateID, validateRequest),\n    pipz.Apply(AuthID, addAuthentication),\n    apiCalls, // Only 3 API calls run concurrently\n)",{"id":4313,"title":2292,"titles":4314,"content":4315,"level":19},"/v1.0.7/reference/connectors/workerpool#when-to-use",[529],"Use WorkerPool when: You have many processors but limited resourcesExternal services have rate limits or connection limitsYou want to prevent resource exhaustionMemory or CPU constraints require bounded parallelismDatabase connection pools have limited connectionsYou need predictable resource usageTesting with controlled concurrency levels",{"id":4317,"title":2297,"titles":4318,"content":4319,"level":19},"/v1.0.7/reference/connectors/workerpool#when-not-to-use",[529],"Don't use WorkerPool when: You need maximum parallelism (use Concurrent)Operations must run in order (use Sequence)Fire-and-forget semantics needed (use Scaffold)Only have a few processors (overhead not worth it)Type doesn't implement Cloner[T] (compilation error)",{"id":4321,"title":4322,"titles":4323,"content":4324,"level":19},"/v1.0.7/reference/connectors/workerpool#workerpool-vs-other-parallel-connectors","WorkerPool vs Other Parallel Connectors",[529],"FeatureWorkerPoolConcurrentScaffoldParallelismBounded (N workers)UnboundedUnboundedReturnsAfter all completeAfter all completeImmediatelyResource ControlYesNoNoUse CaseLimited resourcesMax parallelismFire-and-forgetMemory UsagePredictableCan spikeCan spike",{"id":4326,"title":2866,"titles":4327,"content":4328,"level":19},"/v1.0.7/reference/connectors/workerpool#signals",[529],"WorkerPool emits typed signals for worker acquisition and saturation via capitan: SignalWhen EmittedFieldsworkerpool.saturatedAll worker slots occupied, task will blockname, worker_count, active_workersworkerpool.acquiredWorker slot acquired, task startingname, worker_count, active_workersworkerpool.releasedWorker slot released, task completedname, worker_count, active_workers Example: import \"github.com/zoobzio/capitan\"\n\n// Hook worker pool signals\ncapitan.Hook(pipz.SignalWorkerPoolSaturated, func(ctx context.Context, e *capitan.Event) {\n    name, _ := pipz.FieldName.From(e)\n    workers, _ := pipz.FieldWorkerCount.From(e)\n    // Alert on saturation\n}) See Hooks Documentation for complete signal reference and usage examples.",{"id":4330,"title":2871,"titles":4331,"content":4332,"level":19},"/v1.0.7/reference/connectors/workerpool#configuration-methods",[529],"var PoolID = pipz.NewIdentity(\"pool\", \"Worker pool\")\npool := pipz.NewWorkerPool(PoolID, 5, processors...)\n\n// Adjust worker count dynamically\npool.SetWorkerCount(10)\n\n// Add timeout per task\npool.WithTimeout(30 * time.Second)\n\n// Add processors dynamically\npool.Add(newProcessor)\n\n// Replace all processors\npool.SetProcessors(proc1, proc2, proc3)\n\n// Query current state\nworkers := pool.GetWorkerCount()     // Maximum workers\nactive := pool.GetActiveWorkers()    // Currently active",{"id":4334,"title":2764,"titles":4335,"content":4336,"level":19},"/v1.0.7/reference/connectors/workerpool#performance-characteristics",[529],"Creates one goroutine per processor (up to worker limit)Blocked processors wait for available worker slotSemaphore acquisition overhead: ~50nsMemory usage: O(processors) + cloning costScales linearly up to worker count, then queues",{"id":4338,"title":2181,"titles":4339,"content":29,"level":19},"/v1.0.7/reference/connectors/workerpool#common-patterns",[529],{"id":4341,"title":4342,"titles":4343,"content":4344,"level":35},"/v1.0.7/reference/connectors/workerpool#rate-limited-api-calls","Rate-Limited API Calls",[529,2181],"// Define identities upfront\nvar (\n    APIRateLimitedID = pipz.NewIdentity(\"api-rate-limited\", \"Rate-limited API pool with 10 workers\")\n    FetchUserID      = pipz.NewIdentity(\"fetch-user\", \"Fetch user data\")\n    FetchOrdersID    = pipz.NewIdentity(\"fetch-orders\", \"Fetch order history\")\n    FetchPrefsID     = pipz.NewIdentity(\"fetch-prefs\", \"Fetch preferences\")\n    FetchActivityID  = pipz.NewIdentity(\"fetch-activity\", \"Fetch activity log\")\n)\n\n// Respect API rate limits\napiPool := pipz.NewWorkerPool(APIRateLimitedID,\n    10,\n    pipz.Apply(FetchUserID, fetchUserData),\n    pipz.Apply(FetchOrdersID, fetchOrderHistory),\n    pipz.Apply(FetchPrefsID, fetchPreferences),\n    pipz.Apply(FetchActivityID, fetchActivityLog),\n    // ... many more API calls\n).WithTimeout(5 * time.Second)",{"id":4346,"title":4347,"titles":4348,"content":4349,"level":35},"/v1.0.7/reference/connectors/workerpool#database-connection-pool","Database Connection Pool",[529,2181],"// Define identities upfront\nvar (\n    DBOpsID        = pipz.NewIdentity(\"db-ops\", \"Database operations pool with 5 workers\")\n    InsertUserID   = pipz.NewIdentity(\"insert-user\", \"Insert user to database\")\n    UpdateProfileID = pipz.NewIdentity(\"update-profile\", \"Update user profile\")\n    LogActivityID  = pipz.NewIdentity(\"log-activity\", \"Log user activity\")\n    UpdateStatsID  = pipz.NewIdentity(\"update-stats\", \"Update statistics\")\n)\n\n// Limit concurrent database operations\ndbPool := pipz.NewWorkerPool(DBOpsID,\n    5,\n    pipz.Apply(InsertUserID, insertUser),\n    pipz.Apply(UpdateProfileID, updateProfile),\n    pipz.Apply(LogActivityID, logActivity),\n    pipz.Apply(UpdateStatsID, updateStatistics),\n)",{"id":4351,"title":4352,"titles":4353,"content":4354,"level":35},"/v1.0.7/reference/connectors/workerpool#cpu-intensive-operations","CPU-Intensive Operations",[529,2181],"// Define identities upfront\nvar (\n    CPUBoundID       = pipz.NewIdentity(\"cpu-bound\", \"CPU-intensive processing pool\")\n    ResizeImageID    = pipz.NewIdentity(\"resize-image\", \"Resize image\")\n    GenThumbnailID   = pipz.NewIdentity(\"generate-thumbnail\", \"Generate thumbnail\")\n    ExtractMetaID    = pipz.NewIdentity(\"extract-metadata\", \"Extract metadata\")\n    ApplyWatermarkID = pipz.NewIdentity(\"apply-watermark\", \"Apply watermark\")\n)\n\n// Control CPU usage\nprocessing := pipz.NewWorkerPool(CPUBoundID,\n    runtime.NumCPU(),\n    pipz.Transform(ResizeImageID, resizeImage),\n    pipz.Transform(GenThumbnailID, generateThumbnail),\n    pipz.Transform(ExtractMetaID, extractMetadata),\n    pipz.Transform(ApplyWatermarkID, applyWatermark),\n)",{"id":4356,"title":4357,"titles":4358,"content":4359,"level":35},"/v1.0.7/reference/connectors/workerpool#batch-processing-with-controlled-concurrency","Batch Processing with Controlled Concurrency",[529,2181],"// Define identities upfront\nvar (\n    BatchFlowID     = pipz.NewIdentity(\"batch-flow\", \"Batch processing flow\")\n    ValidateBatchID = pipz.NewIdentity(\"validate\", \"Validate batch\")\n    ProcessItemsID  = pipz.NewIdentity(\"process-items\", \"Batch item enrichment pool with 20 workers\")\n    Enrich1ID       = pipz.NewIdentity(\"enrich-1\", \"Enrich from service 1\")\n    Enrich2ID       = pipz.NewIdentity(\"enrich-2\", \"Enrich from service 2\")\n    Enrich3ID       = pipz.NewIdentity(\"enrich-3\", \"Enrich from service 3\")\n    AggregateID     = pipz.NewIdentity(\"aggregate\", \"Aggregate results\")\n)\n\n// Process large batches with resource limits\nbatchProcessor := pipz.NewSequence[Batch](BatchFlowID,\n    pipz.Apply(ValidateBatchID, validateBatch),\n    pipz.NewWorkerPool(ProcessItemsID,\n        20,\n        pipz.Apply(Enrich1ID, enrichWithService1),\n        pipz.Apply(Enrich2ID, enrichWithService2),\n        pipz.Apply(Enrich3ID, enrichWithService3),\n        // ... potentially hundreds of items\n    ),\n    pipz.Apply(AggregateID, aggregateResults),\n)",{"id":4361,"title":2199,"titles":4362,"content":29,"level":19},"/v1.0.7/reference/connectors/workerpool#gotchas",[529],{"id":4364,"title":4365,"titles":4366,"content":4367,"level":35},"/v1.0.7/reference/connectors/workerpool#dont-create-per-request-instances","❌ Don't create per-request instances",[529,2199],"// WRONG - Creates new pool for each request!\nfunc handleRequest(req Request) Response {\n    poolID := pipz.NewIdentity(\"pool\", \"Worker pool\")\n    pool := pipz.NewWorkerPool(poolID, 5, processors...)\n    return pool.Process(ctx, req) // Defeats the purpose\n}",{"id":4369,"title":4370,"titles":4371,"content":4372,"level":35},"/v1.0.7/reference/connectors/workerpool#use-singleton-instances","✅ Use singleton instances",[529,2199],"// RIGHT - Package-level Identity and pool shared across requests\nvar APIID = pipz.NewIdentity(\"api\", \"Shared API pool for all requests with 5 worker limit\")\nvar apiPool = pipz.NewWorkerPool(APIID, 5, processors...)\n\nfunc handleRequest(req Request) Response {\n    return apiPool.Process(ctx, req) // Properly limited\n}",{"id":4374,"title":4375,"titles":4376,"content":4377,"level":35},"/v1.0.7/reference/connectors/workerpool#dont-forget-workerpool-doesnt-transform-data","❌ Don't forget WorkerPool doesn't transform data",[529,2199],"// WRONG - Expecting modified data\nvar (\n    TransformPoolID = pipz.NewIdentity(\"transform\", \"Incorrectly expecting transformation results\")\n    DoubleID        = pipz.NewIdentity(\"double\", \"Double the value\")\n)\n\npool := pipz.NewWorkerPool(TransformPoolID,\n    3,\n    pipz.Transform(DoubleID, func(ctx context.Context, n int) int {\n        return n * 2 // Result is discarded!\n    }),\n)\nresult, _ := pool.Process(ctx, 5)\n// result is still 5, not 10!",{"id":4379,"title":4380,"titles":4381,"content":4382,"level":35},"/v1.0.7/reference/connectors/workerpool#use-for-side-effects-only","✅ Use for side effects only",[529,2199],"// RIGHT - Side effects, not transformations\nvar (\n    EffectsPoolID = pipz.NewIdentity(\"effects\", \"Side effect operations pool\")\n    SaveDBID      = pipz.NewIdentity(\"save-db\", \"Save to database\")\n    SendEventID   = pipz.NewIdentity(\"send-event\", \"Publish event\")\n    UpdateCacheID = pipz.NewIdentity(\"update-cache\", \"Update cache\")\n)\n\npool := pipz.NewWorkerPool(EffectsPoolID,\n    3,\n    pipz.Effect(SaveDBID, saveToDatabase),\n    pipz.Effect(SendEventID, publishEvent),\n    pipz.Effect(UpdateCacheID, updateCache),\n)",{"id":4384,"title":4385,"titles":4386,"content":4387,"level":35},"/v1.0.7/reference/connectors/workerpool#dont-set-workers-to-0","❌ Don't set workers to 0",[529,2199],"// WRONG - No workers means nothing runs!\nvar BrokenID = pipz.NewIdentity(\"broken\", \"Misconfigured pool with zero workers\")\npool := pipz.NewWorkerPool(BrokenID, 0, processors...)",{"id":4389,"title":4390,"titles":4391,"content":4392,"level":35},"/v1.0.7/reference/connectors/workerpool#use-reasonable-worker-counts","✅ Use reasonable worker counts",[529,2199],"// RIGHT - Based on actual constraints\nvar BalancedID = pipz.NewIdentity(\"balanced\", \"Balanced worker pool based on resource constraints\")\npool := pipz.NewWorkerPool(BalancedID, 10, processors...)",{"id":4394,"title":3046,"titles":4395,"content":4396,"level":19},"/v1.0.7/reference/connectors/workerpool#implementation-requirements",[529],"Your type must implement Clone() for thread safety: // Complex type with proper cloning\ntype Task struct {\n    ID         string\n    Priority   int\n    Data       []byte\n    Metadata   map[string]string\n    Processors []string\n}\n\nfunc (t Task) Clone() Task {\n    // Deep copy slice\n    data := make([]byte, len(t.Data))\n    copy(data, t.Data)\n    \n    // Deep copy map\n    metadata := make(map[string]string, len(t.Metadata))\n    for k, v := range t.Metadata {\n        metadata[k] = v\n    }\n    \n    // Deep copy string slice\n    processors := make([]string, len(t.Processors))\n    copy(processors, t.Processors)\n    \n    return Task{\n        ID:         t.ID,\n        Priority:   t.Priority,\n        Data:       data,\n        Metadata:   metadata,\n        Processors: processors,\n    }\n}",{"id":4398,"title":2565,"titles":4399,"content":29,"level":19},"/v1.0.7/reference/connectors/workerpool#advanced-usage",[529],{"id":4401,"title":4402,"titles":4403,"content":4404,"level":35},"/v1.0.7/reference/connectors/workerpool#dynamic-worker-adjustment","Dynamic Worker Adjustment",[529,2565],"// Define identity upfront\nvar AdaptivePoolID = pipz.NewIdentity(\"adaptive\", \"Self-adjusting worker pool that scales based on utilization\")\n\n// Adjust workers based on load\npool := pipz.NewWorkerPool(AdaptivePoolID, 5, processors...)\n\n// Monitor and adjust\ngo func() {\n    for {\n        active := pool.GetActiveWorkers()\n        max := pool.GetWorkerCount()\n\n        utilization := float64(active) / float64(max)\n        if utilization > 0.8 {\n            pool.SetWorkerCount(max + 5) // Scale up\n        } else if utilization \u003C 0.2 && max > 5 {\n            pool.SetWorkerCount(max - 5) // Scale down\n        }\n\n        time.Sleep(30 * time.Second)\n    }\n}()",{"id":4406,"title":4407,"titles":4408,"content":4409,"level":35},"/v1.0.7/reference/connectors/workerpool#combining-with-circuit-breaker","Combining with Circuit Breaker",[529,2565],"// Define identities upfront\nvar (\n    ProtectedBreakerID = pipz.NewIdentity(\"protected\", \"Circuit breaker for worker pool\")\n    LimitedPoolID      = pipz.NewIdentity(\"limited\", \"Protected API pool with worker limit\")\n    API1ID             = pipz.NewIdentity(\"api-1\", \"Call API 1\")\n    API2ID             = pipz.NewIdentity(\"api-2\", \"Call API 2\")\n    API3ID             = pipz.NewIdentity(\"api-3\", \"Call API 3\")\n)\n\n// Protect external services with both patterns\nprotected := pipz.NewCircuitBreaker(ProtectedBreakerID,\n    pipz.NewWorkerPool(LimitedPoolID,\n        10,\n        pipz.Apply(API1ID, callAPI1),\n        pipz.Apply(API2ID, callAPI2),\n        pipz.Apply(API3ID, callAPI3),\n    ),\n    5, time.Minute,\n)",{"id":4411,"title":1764,"titles":4412,"content":4413,"level":19},"/v1.0.7/reference/connectors/workerpool#see-also",[529],"Concurrent - For unbounded parallel executionScaffold - For fire-and-forget operationsSequence - For sequential executionRateLimiter - For time-based rate limiting html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .skxcq, html code.shiki .skxcq{--shiki-default:var(--shiki-builtin)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}",[4415],{"title":4416,"path":4417,"stem":4418,"children":4419,"page":4437},"V107","/v1.0.7","v1.0.7",[4420,4422,4438,4457,4468],{"title":6,"path":5,"stem":4421,"description":8},"v1.0.7/1.overview",{"title":4423,"path":4424,"stem":4425,"children":4426,"page":4437},"Learn","/v1.0.7/learn","v1.0.7/2.learn",[4427,4429,4431,4433,4435],{"title":58,"path":57,"stem":4428,"description":60},"v1.0.7/2.learn/1.quickstart",{"title":155,"path":154,"stem":4430,"description":157},"v1.0.7/2.learn/2.introduction",{"title":226,"path":225,"stem":4432,"description":228},"v1.0.7/2.learn/3.core-concepts",{"title":322,"path":321,"stem":4434,"description":324},"v1.0.7/2.learn/4.architecture",{"title":497,"path":496,"stem":4436,"description":499},"v1.0.7/2.learn/5.hooks",false,{"title":4439,"path":4440,"stem":4441,"children":4442,"page":4437},"Guides","/v1.0.7/guides","v1.0.7/3.guides",[4443,4445,4447,4449,4451,4453,4455],{"title":666,"path":665,"stem":4444,"description":668},"v1.0.7/3.guides/1.connector-selection",{"title":773,"path":772,"stem":4446,"description":775},"v1.0.7/3.guides/2.cloning",{"title":135,"path":891,"stem":4448,"description":893},"v1.0.7/3.guides/3.best-practices",{"title":1054,"path":1053,"stem":4450,"description":1056},"v1.0.7/3.guides/4.testing",{"title":1274,"path":1273,"stem":4452,"description":1276},"v1.0.7/3.guides/5.performance",{"title":1369,"path":1368,"stem":4454,"description":1371},"v1.0.7/3.guides/6.safety-reliability",{"title":1494,"path":1493,"stem":4456,"description":1496},"v1.0.7/3.guides/7.troubleshooting",{"title":4458,"path":4459,"stem":4460,"children":4461,"page":4437},"Cookbook","/v1.0.7/cookbook","v1.0.7/4.cookbook",[4462,4464,4466],{"title":1690,"path":1689,"stem":4463,"description":1692},"v1.0.7/4.cookbook/1.building-pipelines",{"title":1769,"path":1768,"stem":4465,"description":1771},"v1.0.7/4.cookbook/2.library-resilience",{"title":1858,"path":1857,"stem":4467,"description":1860},"v1.0.7/4.cookbook/3.extensible-vocabulary",{"title":4469,"path":4470,"stem":4471,"children":4472,"page":4437},"Reference","/v1.0.7/reference","v1.0.7/5.reference",[4473,4475,4481,4495],{"title":1921,"path":1920,"stem":4474,"description":1923},"v1.0.7/5.reference/1.cheatsheet",{"title":2246,"path":2245,"stem":4476,"children":4477,"description":2248},"v1.0.7/5.reference/2.types/index",[4478,4479],{"title":2246,"path":2245,"stem":4476,"description":2248},{"title":2072,"path":2071,"stem":4480,"description":2074},"v1.0.7/5.reference/2.types/error",{"title":192,"path":2489,"stem":4482,"children":4483,"description":2491},"v1.0.7/5.reference/3.processors/index",[4484,4485,4487,4489,4491,4493],{"title":192,"path":2489,"stem":4482,"description":2491},{"title":2254,"path":2253,"stem":4486,"description":2256},"v1.0.7/5.reference/3.processors/apply",{"title":2337,"path":2336,"stem":4488,"description":2339},"v1.0.7/5.reference/3.processors/effect",{"title":2413,"path":2412,"stem":4490,"description":2415},"v1.0.7/5.reference/3.processors/enrich",{"title":2497,"path":2496,"stem":4492,"description":2499},"v1.0.7/5.reference/3.processors/mutate",{"title":2574,"path":2573,"stem":4494,"description":2576},"v1.0.7/5.reference/3.processors/transform",{"title":197,"path":3479,"stem":4496,"children":4497,"description":3481},"v1.0.7/5.reference/4.connectors/index",[4498,4499,4501,4503,4505,4507,4509,4511,4513,4515,4517,4519,4521,4523,4525,4527,4529],{"title":197,"path":3479,"stem":4496,"description":3481},{"title":549,"path":2633,"stem":4500,"description":2635},"v1.0.7/5.reference/4.connectors/backoff",{"title":519,"path":2813,"stem":4502,"description":2815},"v1.0.7/5.reference/4.connectors/circuitbreaker",{"title":2937,"path":2936,"stem":4504,"description":2939},"v1.0.7/5.reference/4.connectors/concurrent",{"title":3055,"path":3054,"stem":4506,"description":3057},"v1.0.7/5.reference/4.connectors/contest",{"title":539,"path":3172,"stem":4508,"description":3174},"v1.0.7/5.reference/4.connectors/fallback",{"title":3263,"path":3262,"stem":4510,"description":3265},"v1.0.7/5.reference/4.connectors/filter",{"title":3394,"path":3393,"stem":4512,"description":3396},"v1.0.7/5.reference/4.connectors/handle",{"title":3487,"path":3486,"stem":4514,"description":3489},"v1.0.7/5.reference/4.connectors/pipeline",{"title":3546,"path":3545,"stem":4516,"description":3548},"v1.0.7/5.reference/4.connectors/race",{"title":524,"path":3638,"stem":4518,"description":3640},"v1.0.7/5.reference/4.connectors/ratelimiter",{"title":534,"path":3765,"stem":4520,"description":3767},"v1.0.7/5.reference/4.connectors/retry",{"title":3857,"path":3856,"stem":4522,"description":3859},"v1.0.7/5.reference/4.connectors/scaffold",{"title":3955,"path":3954,"stem":4524,"description":3957},"v1.0.7/5.reference/4.connectors/sequence",{"title":4049,"path":4048,"stem":4526,"description":4051},"v1.0.7/5.reference/4.connectors/switch",{"title":544,"path":4173,"stem":4528,"description":4175},"v1.0.7/5.reference/4.connectors/timeout",{"title":529,"path":4275,"stem":4530,"description":4277},"v1.0.7/5.reference/4.connectors/workerpool",[4532],{"title":4416,"path":4417,"stem":4418,"children":4533,"page":4437},[4534,4535,4542,4551,4556],{"title":6,"path":5,"stem":4421},{"title":4423,"path":4424,"stem":4425,"children":4536,"page":4437},[4537,4538,4539,4540,4541],{"title":58,"path":57,"stem":4428},{"title":155,"path":154,"stem":4430},{"title":226,"path":225,"stem":4432},{"title":322,"path":321,"stem":4434},{"title":497,"path":496,"stem":4436},{"title":4439,"path":4440,"stem":4441,"children":4543,"page":4437},[4544,4545,4546,4547,4548,4549,4550],{"title":666,"path":665,"stem":4444},{"title":773,"path":772,"stem":4446},{"title":135,"path":891,"stem":4448},{"title":1054,"path":1053,"stem":4450},{"title":1274,"path":1273,"stem":4452},{"title":1369,"path":1368,"stem":4454},{"title":1494,"path":1493,"stem":4456},{"title":4458,"path":4459,"stem":4460,"children":4552,"page":4437},[4553,4554,4555],{"title":1690,"path":1689,"stem":4463},{"title":1769,"path":1768,"stem":4465},{"title":1858,"path":1857,"stem":4467},{"title":4469,"path":4470,"stem":4471,"children":4557,"page":4437},[4558,4559,4563,4571],{"title":1921,"path":1920,"stem":4474},{"title":2246,"path":2245,"stem":4476,"children":4560},[4561,4562],{"title":2246,"path":2245,"stem":4476},{"title":2072,"path":2071,"stem":4480},{"title":192,"path":2489,"stem":4482,"children":4564},[4565,4566,4567,4568,4569,4570],{"title":192,"path":2489,"stem":4482},{"title":2254,"path":2253,"stem":4486},{"title":2337,"path":2336,"stem":4488},{"title":2413,"path":2412,"stem":4490},{"title":2497,"path":2496,"stem":4492},{"title":2574,"path":2573,"stem":4494},{"title":197,"path":3479,"stem":4496,"children":4572},[4573,4574,4575,4576,4577,4578,4579,4580,4581,4582,4583,4584,4585,4586,4587,4588,4589],{"title":197,"path":3479,"stem":4496},{"title":549,"path":2633,"stem":4500},{"title":519,"path":2813,"stem":4502},{"title":2937,"path":2936,"stem":4504},{"title":3055,"path":3054,"stem":4506},{"title":539,"path":3172,"stem":4508},{"title":3263,"path":3262,"stem":4510},{"title":3394,"path":3393,"stem":4512},{"title":3487,"path":3486,"stem":4514},{"title":3546,"path":3545,"stem":4516},{"title":524,"path":3638,"stem":4518},{"title":534,"path":3765,"stem":4520},{"title":3857,"path":3856,"stem":4522},{"title":3955,"path":3954,"stem":4524},{"title":4049,"path":4048,"stem":4526},{"title":544,"path":4173,"stem":4528},{"title":529,"path":4275,"stem":4530},[4591,7731,8153],{"id":4592,"title":4593,"body":4594,"description":29,"extension":7724,"icon":7725,"meta":7726,"navigation":4917,"path":7727,"seo":7728,"stem":7729,"__hash__":7730},"resources/readme.md","README",{"type":4595,"value":4596,"toc":7707},"minimark",[4597,4601,4669,4672,4675,4680,4688,4806,4812,5131,5136,5310,5314,5331,5334,5338,6227,6230,6345,6348,6396,6400,6403,6658,6665,6669,6675,7293,7296,7300,7308,7312,7343,7346,7392,7395,7418,7421,7434,7438,7501,7504,7678,7682,7694,7697,7703],[4598,4599,4600],"h1",{"id":4600},"pipz",[4602,4603,4604,4615,4623,4631,4639,4647,4654,4661],"p",{},[4605,4606,4610],"a",{"href":4607,"rel":4608},"https://github.com/zoobz-io/pipz/actions/workflows/ci.yml",[4609],"nofollow",[4611,4612],"img",{"alt":4613,"src":4614},"CI Status","https://github.com/zoobz-io/pipz/workflows/CI/badge.svg",[4605,4616,4619],{"href":4617,"rel":4618},"https://codecov.io/gh/zoobz-io/pipz",[4609],[4611,4620],{"alt":4621,"src":4622},"codecov","https://codecov.io/gh/zoobz-io/pipz/graph/badge.svg?branch=main",[4605,4624,4627],{"href":4625,"rel":4626},"https://goreportcard.com/report/github.com/zoobz-io/pipz",[4609],[4611,4628],{"alt":4629,"src":4630},"Go Report Card","https://goreportcard.com/badge/github.com/zoobz-io/pipz",[4605,4632,4635],{"href":4633,"rel":4634},"https://github.com/zoobz-io/pipz/security/code-scanning",[4609],[4611,4636],{"alt":4637,"src":4638},"CodeQL","https://github.com/zoobz-io/pipz/workflows/CodeQL/badge.svg",[4605,4640,4643],{"href":4641,"rel":4642},"https://pkg.go.dev/github.com/zoobz-io/pipz",[4609],[4611,4644],{"alt":4645,"src":4646},"Go Reference","https://pkg.go.dev/badge/github.com/zoobz-io/pipz.svg",[4605,4648,4650],{"href":4649},"LICENSE",[4611,4651],{"alt":4652,"src":4653},"License","https://img.shields.io/github/license/zoobz-io/pipz",[4605,4655,4657],{"href":4656},"go.mod",[4611,4658],{"alt":4659,"src":4660},"Go Version","https://img.shields.io/github/go-mod/go-version/zoobz-io/pipz",[4605,4662,4665],{"href":4663,"rel":4664},"https://github.com/zoobz-io/pipz/releases",[4609],[4611,4666],{"alt":4667,"src":4668},"Release","https://img.shields.io/github/v/release/zoobz-io/pipz",[4602,4670,4671],{},"Type-safe, composable data pipelines for Go.",[4602,4673,4674],{},"Build processing pipelines from simple parts, compose them into complex flows, and get rich error context when things fail.",[4676,4677,4679],"h2",{"id":4678},"one-interface-to-rule-them-all","One Interface To Rule Them All",[4602,4681,4682,4683,4687],{},"Every primitive in pipz implements ",[4684,4685,4686],"code",{},"Chainable[T]",":",[4689,4690,4694],"pre",{"className":4691,"code":4692,"language":4693,"meta":29,"style":29},"language-go shiki shiki-themes","type Chainable[T any] interface {\n    Process(context.Context, T) (T, error)\n    Identity() Identity\n    Schema() Node\n    Close() error\n}\n","go",[4684,4695,4696,4728,4768,4779,4789,4800],{"__ignoreMap":29},[4697,4698,4700,4704,4708,4712,4716,4719,4722,4725],"span",{"class":4699,"line":9},"line",[4697,4701,4703],{"class":4702},"sUt3r","type",[4697,4705,4707],{"class":4706},"sYBwO"," Chainable",[4697,4709,4711],{"class":4710},"sq5bi","[",[4697,4713,4715],{"class":4714},"sSYET","T",[4697,4717,4718],{"class":4706}," any",[4697,4720,4721],{"class":4710},"]",[4697,4723,4724],{"class":4702}," interface",[4697,4726,4727],{"class":4710}," {\n",[4697,4729,4730,4734,4737,4740,4743,4746,4749,4752,4755,4758,4760,4762,4765],{"class":4699,"line":19},[4697,4731,4733],{"class":4732},"s5klm","    Process",[4697,4735,4736],{"class":4710},"(",[4697,4738,4739],{"class":4706},"context",[4697,4741,4742],{"class":4710},".",[4697,4744,4745],{"class":4706},"Context",[4697,4747,4748],{"class":4710},",",[4697,4750,4751],{"class":4706}," T",[4697,4753,4754],{"class":4710},")",[4697,4756,4757],{"class":4710}," (",[4697,4759,4715],{"class":4706},[4697,4761,4748],{"class":4710},[4697,4763,4764],{"class":4706}," error",[4697,4766,4767],{"class":4710},")\n",[4697,4769,4770,4773,4776],{"class":4699,"line":35},[4697,4771,4772],{"class":4732},"    Identity",[4697,4774,4775],{"class":4710},"()",[4697,4777,4778],{"class":4706}," Identity\n",[4697,4780,4781,4784,4786],{"class":4699,"line":917},[4697,4782,4783],{"class":4732},"    Schema",[4697,4785,4775],{"class":4710},[4697,4787,4788],{"class":4706}," Node\n",[4697,4790,4792,4795,4797],{"class":4699,"line":4791},5,[4697,4793,4794],{"class":4732},"    Close",[4697,4796,4775],{"class":4710},[4697,4798,4799],{"class":4706}," error\n",[4697,4801,4803],{"class":4699,"line":4802},6,[4697,4804,4805],{"class":4710},"}\n",[4602,4807,4808,4811],{},[4809,4810,192],"strong",{}," wrap your functions — the callback signature is the only difference:",[4689,4813,4815],{"className":4691,"code":4814,"language":4693,"meta":29,"style":29},"// Transform: pure function, no errors\nenrich := pipz.Transform(EnrichID, func(ctx context.Context, o Order) Order {\n    o.ProcessedAt = time.Now()\n    return o\n})\n\n// Apply: fallible function\nvalidate := pipz.Apply(ValidateID, func(ctx context.Context, o Order) (Order, error) {\n    if o.Total \u003C= 0 {\n        return o, errors.New(\"invalid total\")\n    }\n    return o, nil\n})\n\n// Effect: side effect, data passes through unchanged\nnotify := pipz.Effect(NotifyID, func(ctx context.Context, o Order) error {\n    return sendNotification(o.ID)\n})\n",[4684,4816,4817,4823,4875,4899,4908,4913,4919,4925,4979,5001,5027,5033,5045,5050,5055,5061,5106,5126],{"__ignoreMap":29},[4697,4818,4819],{"class":4699,"line":9},[4697,4820,4822],{"class":4821},"sLkEo","// Transform: pure function, no errors\n",[4697,4824,4825,4829,4832,4835,4837,4839,4841,4844,4846,4849,4851,4854,4857,4859,4861,4863,4866,4869,4871,4873],{"class":4699,"line":19},[4697,4826,4828],{"class":4827},"sh8_p","enrich",[4697,4830,4831],{"class":4827}," :=",[4697,4833,4834],{"class":4827}," pipz",[4697,4836,4742],{"class":4710},[4697,4838,2574],{"class":4732},[4697,4840,4736],{"class":4710},[4697,4842,4843],{"class":4827},"EnrichID",[4697,4845,4748],{"class":4710},[4697,4847,4848],{"class":4702}," func",[4697,4850,4736],{"class":4710},[4697,4852,4853],{"class":4714},"ctx",[4697,4855,4856],{"class":4706}," context",[4697,4858,4742],{"class":4710},[4697,4860,4745],{"class":4706},[4697,4862,4748],{"class":4710},[4697,4864,4865],{"class":4714}," o",[4697,4867,4868],{"class":4706}," Order",[4697,4870,4754],{"class":4710},[4697,4872,4868],{"class":4706},[4697,4874,4727],{"class":4710},[4697,4876,4877,4880,4882,4885,4888,4891,4893,4896],{"class":4699,"line":35},[4697,4878,4879],{"class":4827},"    o",[4697,4881,4742],{"class":4710},[4697,4883,4884],{"class":4827},"ProcessedAt",[4697,4886,4887],{"class":4827}," =",[4697,4889,4890],{"class":4827}," time",[4697,4892,4742],{"class":4710},[4697,4894,4895],{"class":4732},"Now",[4697,4897,4898],{"class":4710},"()\n",[4697,4900,4901,4905],{"class":4699,"line":917},[4697,4902,4904],{"class":4903},"sW3Qg","    return",[4697,4906,4907],{"class":4827}," o\n",[4697,4909,4910],{"class":4699,"line":4791},[4697,4911,4912],{"class":4710},"})\n",[4697,4914,4915],{"class":4699,"line":4802},[4697,4916,4918],{"emptyLinePlaceholder":4917},true,"\n",[4697,4920,4922],{"class":4699,"line":4921},7,[4697,4923,4924],{"class":4821},"// Apply: fallible function\n",[4697,4926,4928,4931,4933,4935,4937,4939,4941,4944,4946,4948,4950,4952,4954,4956,4958,4960,4962,4964,4966,4968,4971,4973,4975,4977],{"class":4699,"line":4927},8,[4697,4929,4930],{"class":4827},"validate",[4697,4932,4831],{"class":4827},[4697,4934,4834],{"class":4827},[4697,4936,4742],{"class":4710},[4697,4938,2254],{"class":4732},[4697,4940,4736],{"class":4710},[4697,4942,4943],{"class":4827},"ValidateID",[4697,4945,4748],{"class":4710},[4697,4947,4848],{"class":4702},[4697,4949,4736],{"class":4710},[4697,4951,4853],{"class":4714},[4697,4953,4856],{"class":4706},[4697,4955,4742],{"class":4710},[4697,4957,4745],{"class":4706},[4697,4959,4748],{"class":4710},[4697,4961,4865],{"class":4714},[4697,4963,4868],{"class":4706},[4697,4965,4754],{"class":4710},[4697,4967,4757],{"class":4710},[4697,4969,4970],{"class":4706},"Order",[4697,4972,4748],{"class":4710},[4697,4974,4764],{"class":4706},[4697,4976,4754],{"class":4710},[4697,4978,4727],{"class":4710},[4697,4980,4982,4985,4987,4989,4992,4995,4999],{"class":4699,"line":4981},9,[4697,4983,4984],{"class":4903},"    if",[4697,4986,4865],{"class":4827},[4697,4988,4742],{"class":4710},[4697,4990,4991],{"class":4827},"Total",[4697,4993,4994],{"class":4903}," \u003C=",[4697,4996,4998],{"class":4997},"sMAmT"," 0",[4697,5000,4727],{"class":4710},[4697,5002,5004,5007,5009,5011,5014,5016,5019,5021,5025],{"class":4699,"line":5003},10,[4697,5005,5006],{"class":4903},"        return",[4697,5008,4865],{"class":4827},[4697,5010,4748],{"class":4710},[4697,5012,5013],{"class":4827}," errors",[4697,5015,4742],{"class":4710},[4697,5017,5018],{"class":4732},"New",[4697,5020,4736],{"class":4710},[4697,5022,5024],{"class":5023},"sxAnc","\"invalid total\"",[4697,5026,4767],{"class":4710},[4697,5028,5030],{"class":4699,"line":5029},11,[4697,5031,5032],{"class":4710},"    }\n",[4697,5034,5036,5038,5040,5042],{"class":4699,"line":5035},12,[4697,5037,4904],{"class":4903},[4697,5039,4865],{"class":4827},[4697,5041,4748],{"class":4710},[4697,5043,5044],{"class":4702}," nil\n",[4697,5046,5048],{"class":4699,"line":5047},13,[4697,5049,4912],{"class":4710},[4697,5051,5053],{"class":4699,"line":5052},14,[4697,5054,4918],{"emptyLinePlaceholder":4917},[4697,5056,5058],{"class":4699,"line":5057},15,[4697,5059,5060],{"class":4821},"// Effect: side effect, data passes through unchanged\n",[4697,5062,5064,5067,5069,5071,5073,5075,5077,5080,5082,5084,5086,5088,5090,5092,5094,5096,5098,5100,5102,5104],{"class":4699,"line":5063},16,[4697,5065,5066],{"class":4827},"notify",[4697,5068,4831],{"class":4827},[4697,5070,4834],{"class":4827},[4697,5072,4742],{"class":4710},[4697,5074,2337],{"class":4732},[4697,5076,4736],{"class":4710},[4697,5078,5079],{"class":4827},"NotifyID",[4697,5081,4748],{"class":4710},[4697,5083,4848],{"class":4702},[4697,5085,4736],{"class":4710},[4697,5087,4853],{"class":4714},[4697,5089,4856],{"class":4706},[4697,5091,4742],{"class":4710},[4697,5093,4745],{"class":4706},[4697,5095,4748],{"class":4710},[4697,5097,4865],{"class":4714},[4697,5099,4868],{"class":4706},[4697,5101,4754],{"class":4710},[4697,5103,4764],{"class":4706},[4697,5105,4727],{"class":4710},[4697,5107,5109,5111,5114,5116,5119,5121,5124],{"class":4699,"line":5108},17,[4697,5110,4904],{"class":4903},[4697,5112,5113],{"class":4732}," sendNotification",[4697,5115,4736],{"class":4710},[4697,5117,5118],{"class":4827},"o",[4697,5120,4742],{"class":4710},[4697,5122,5123],{"class":4827},"ID",[4697,5125,4767],{"class":4710},[4697,5127,5129],{"class":4699,"line":5128},18,[4697,5130,4912],{"class":4710},[4602,5132,5133,5135],{},[4809,5134,197],{}," compose processors — and each other:",[4689,5137,5139],{"className":4691,"code":5138,"language":4693,"meta":29,"style":29},"// Compose processors into a sequence\nflow := pipz.NewSequence(FlowID, validate, enrich, notify)\n\n// Wrap with resilience patterns\nresilient := pipz.NewRetry(RetryID, flow, 3)\nprotected := pipz.NewTimeout(TimeoutID, resilient, 5*time.Second)\n\n// Connectors nest freely — it's Chainable[T] all the way down\npipeline := pipz.NewCircuitBreaker(BreakerID, protected, 5, 30*time.Second)\n",[4684,5140,5141,5146,5182,5186,5191,5221,5260,5264,5269],{"__ignoreMap":29},[4697,5142,5143],{"class":4699,"line":9},[4697,5144,5145],{"class":4821},"// Compose processors into a sequence\n",[4697,5147,5148,5151,5153,5155,5157,5160,5162,5165,5167,5170,5172,5175,5177,5180],{"class":4699,"line":19},[4697,5149,5150],{"class":4827},"flow",[4697,5152,4831],{"class":4827},[4697,5154,4834],{"class":4827},[4697,5156,4742],{"class":4710},[4697,5158,5159],{"class":4732},"NewSequence",[4697,5161,4736],{"class":4710},[4697,5163,5164],{"class":4827},"FlowID",[4697,5166,4748],{"class":4710},[4697,5168,5169],{"class":4827}," validate",[4697,5171,4748],{"class":4710},[4697,5173,5174],{"class":4827}," enrich",[4697,5176,4748],{"class":4710},[4697,5178,5179],{"class":4827}," notify",[4697,5181,4767],{"class":4710},[4697,5183,5184],{"class":4699,"line":35},[4697,5185,4918],{"emptyLinePlaceholder":4917},[4697,5187,5188],{"class":4699,"line":917},[4697,5189,5190],{"class":4821},"// Wrap with resilience patterns\n",[4697,5192,5193,5196,5198,5200,5202,5204,5206,5209,5211,5214,5216,5219],{"class":4699,"line":4791},[4697,5194,5195],{"class":4827},"resilient",[4697,5197,4831],{"class":4827},[4697,5199,4834],{"class":4827},[4697,5201,4742],{"class":4710},[4697,5203,3796],{"class":4732},[4697,5205,4736],{"class":4710},[4697,5207,5208],{"class":4827},"RetryID",[4697,5210,4748],{"class":4710},[4697,5212,5213],{"class":4827}," flow",[4697,5215,4748],{"class":4710},[4697,5217,5218],{"class":4997}," 3",[4697,5220,4767],{"class":4710},[4697,5222,5223,5226,5228,5230,5232,5235,5237,5240,5242,5245,5247,5250,5253,5255,5258],{"class":4699,"line":4802},[4697,5224,5225],{"class":4827},"protected",[4697,5227,4831],{"class":4827},[4697,5229,4834],{"class":4827},[4697,5231,4742],{"class":4710},[4697,5233,5234],{"class":4732},"NewTimeout",[4697,5236,4736],{"class":4710},[4697,5238,5239],{"class":4827},"TimeoutID",[4697,5241,4748],{"class":4710},[4697,5243,5244],{"class":4827}," resilient",[4697,5246,4748],{"class":4710},[4697,5248,5249],{"class":4997}," 5",[4697,5251,5252],{"class":4827},"*time",[4697,5254,4742],{"class":4710},[4697,5256,5257],{"class":4827},"Second",[4697,5259,4767],{"class":4710},[4697,5261,5262],{"class":4699,"line":4921},[4697,5263,4918],{"emptyLinePlaceholder":4917},[4697,5265,5266],{"class":4699,"line":4927},[4697,5267,5268],{"class":4821},"// Connectors nest freely — it's Chainable[T] all the way down\n",[4697,5270,5271,5274,5276,5278,5280,5283,5285,5288,5290,5293,5295,5297,5299,5302,5304,5306,5308],{"class":4699,"line":4981},[4697,5272,5273],{"class":4827},"pipeline",[4697,5275,4831],{"class":4827},[4697,5277,4834],{"class":4827},[4697,5279,4742],{"class":4710},[4697,5281,5282],{"class":4732},"NewCircuitBreaker",[4697,5284,4736],{"class":4710},[4697,5286,5287],{"class":4827},"BreakerID",[4697,5289,4748],{"class":4710},[4697,5291,5292],{"class":4827}," protected",[4697,5294,4748],{"class":4710},[4697,5296,5249],{"class":4997},[4697,5298,4748],{"class":4710},[4697,5300,5301],{"class":4997}," 30",[4697,5303,5252],{"class":4827},[4697,5305,4742],{"class":4710},[4697,5307,5257],{"class":4827},[4697,5309,4767],{"class":4710},[4676,5311,5313],{"id":5312},"install","Install",[4689,5315,5319],{"className":5316,"code":5317,"language":5318,"meta":29,"style":29},"language-bash shiki shiki-themes","go get github.com/zoobz-io/pipz\n","bash",[4684,5320,5321],{"__ignoreMap":29},[4697,5322,5323,5325,5328],{"class":4699,"line":9},[4697,5324,4693],{"class":4732},[4697,5326,5327],{"class":5023}," get",[4697,5329,5330],{"class":5023}," github.com/zoobz-io/pipz\n",[4602,5332,5333],{},"Requires Go 1.24+.",[4676,5335,5337],{"id":5336},"quick-start","Quick Start",[4689,5339,5341],{"className":4691,"code":5340,"language":4693,"meta":29,"style":29},"package main\n\nimport (\n    \"context\"\n    \"errors\"\n    \"fmt\"\n    \"strings\"\n    \"time\"\n\n    \"github.com/zoobz-io/pipz\"\n)\n\n// Identities for debugging and observability\nvar (\n    ValidateID = pipz.NewIdentity(\"validate\", \"Validates order totals\")\n    EnrichID   = pipz.NewIdentity(\"enrich\", \"Adds processing timestamp\")\n    FormatID   = pipz.NewIdentity(\"format\", \"Formats order ID\")\n    PipelineID = pipz.NewIdentity(\"order-flow\", \"Main order pipeline\")\n)\n\ntype Order struct {\n    ID          string\n    Total       float64\n    ProcessedAt time.Time\n}\n\nfunc main() {\n    ctx := context.Background()\n\n    // Processors wrap functions\n    validate := pipz.Apply(ValidateID, func(_ context.Context, o Order) (Order, error) {\n        if o.Total \u003C= 0 {\n            return o, errors.New(\"invalid total\")\n        }\n        return o, nil\n    })\n\n    enrich := pipz.Transform(EnrichID, func(_ context.Context, o Order) Order {\n        o.ProcessedAt = time.Now()\n        return o\n    })\n\n    format := pipz.Transform(FormatID, func(_ context.Context, o Order) Order {\n        o.ID = strings.ToUpper(o.ID)\n        return o\n    })\n\n    // Connectors compose processors\n    pipeline := pipz.NewSequence(PipelineID, validate, enrich, format)\n\n    // Process\n    result, err := pipeline.Process(ctx, Order{ID: \"order-123\", Total: 99.99})\n    if err != nil {\n        var pipeErr *pipz.Error[Order]\n        if errors.As(err, &pipeErr) {\n            fmt.Printf(\"Failed at %s: %v\\n\", strings.Join(pipeErr.Path, \"->\"), pipeErr.Err)\n        }\n        return\n    }\n\n    fmt.Printf(\"Processed: %s at %v\\n\", result.ID, result.ProcessedAt)\n}\n",[4684,5342,5343,5351,5355,5364,5369,5374,5379,5384,5389,5393,5398,5402,5406,5411,5418,5444,5470,5495,5520,5525,5530,5542,5552,5561,5574,5579,5584,5597,5614,5619,5625,5678,5696,5718,5724,5735,5741,5746,5790,5810,5817,5822,5827,5872,5901,5908,5913,5918,5924,5958,5963,5969,6020,6035,6061,6090,6156,6161,6167,6172,6177,6222],{"__ignoreMap":29},[4697,5344,5345,5348],{"class":4699,"line":9},[4697,5346,5347],{"class":4702},"package",[4697,5349,5350],{"class":4706}," main\n",[4697,5352,5353],{"class":4699,"line":19},[4697,5354,4918],{"emptyLinePlaceholder":4917},[4697,5356,5357,5360],{"class":4699,"line":35},[4697,5358,5359],{"class":4702},"import",[4697,5361,5363],{"class":5362},"soy-K"," (\n",[4697,5365,5366],{"class":4699,"line":917},[4697,5367,5368],{"class":5023},"    \"context\"\n",[4697,5370,5371],{"class":4699,"line":4791},[4697,5372,5373],{"class":5023},"    \"errors\"\n",[4697,5375,5376],{"class":4699,"line":4802},[4697,5377,5378],{"class":5023},"    \"fmt\"\n",[4697,5380,5381],{"class":4699,"line":4921},[4697,5382,5383],{"class":5023},"    \"strings\"\n",[4697,5385,5386],{"class":4699,"line":4927},[4697,5387,5388],{"class":5023},"    \"time\"\n",[4697,5390,5391],{"class":4699,"line":4981},[4697,5392,4918],{"emptyLinePlaceholder":4917},[4697,5394,5395],{"class":4699,"line":5003},[4697,5396,5397],{"class":5023},"    \"github.com/zoobz-io/pipz\"\n",[4697,5399,5400],{"class":4699,"line":5029},[4697,5401,4767],{"class":5362},[4697,5403,5404],{"class":4699,"line":5035},[4697,5405,4918],{"emptyLinePlaceholder":4917},[4697,5407,5408],{"class":4699,"line":5047},[4697,5409,5410],{"class":4821},"// Identities for debugging and observability\n",[4697,5412,5413,5416],{"class":4699,"line":5052},[4697,5414,5415],{"class":4702},"var",[4697,5417,5363],{"class":4710},[4697,5419,5420,5423,5425,5427,5429,5432,5434,5437,5439,5442],{"class":4699,"line":5057},[4697,5421,5422],{"class":4827},"    ValidateID",[4697,5424,4887],{"class":4827},[4697,5426,4834],{"class":4827},[4697,5428,4742],{"class":4710},[4697,5430,5431],{"class":4732},"NewIdentity",[4697,5433,4736],{"class":4710},[4697,5435,5436],{"class":5023},"\"validate\"",[4697,5438,4748],{"class":4710},[4697,5440,5441],{"class":5023}," \"Validates order totals\"",[4697,5443,4767],{"class":4710},[4697,5445,5446,5449,5452,5454,5456,5458,5460,5463,5465,5468],{"class":4699,"line":5063},[4697,5447,5448],{"class":4827},"    EnrichID",[4697,5450,5451],{"class":4827},"   =",[4697,5453,4834],{"class":4827},[4697,5455,4742],{"class":4710},[4697,5457,5431],{"class":4732},[4697,5459,4736],{"class":4710},[4697,5461,5462],{"class":5023},"\"enrich\"",[4697,5464,4748],{"class":4710},[4697,5466,5467],{"class":5023}," \"Adds processing timestamp\"",[4697,5469,4767],{"class":4710},[4697,5471,5472,5475,5477,5479,5481,5483,5485,5488,5490,5493],{"class":4699,"line":5108},[4697,5473,5474],{"class":4827},"    FormatID",[4697,5476,5451],{"class":4827},[4697,5478,4834],{"class":4827},[4697,5480,4742],{"class":4710},[4697,5482,5431],{"class":4732},[4697,5484,4736],{"class":4710},[4697,5486,5487],{"class":5023},"\"format\"",[4697,5489,4748],{"class":4710},[4697,5491,5492],{"class":5023}," \"Formats order ID\"",[4697,5494,4767],{"class":4710},[4697,5496,5497,5500,5502,5504,5506,5508,5510,5513,5515,5518],{"class":4699,"line":5128},[4697,5498,5499],{"class":4827},"    PipelineID",[4697,5501,4887],{"class":4827},[4697,5503,4834],{"class":4827},[4697,5505,4742],{"class":4710},[4697,5507,5431],{"class":4732},[4697,5509,4736],{"class":4710},[4697,5511,5512],{"class":5023},"\"order-flow\"",[4697,5514,4748],{"class":4710},[4697,5516,5517],{"class":5023}," \"Main order pipeline\"",[4697,5519,4767],{"class":4710},[4697,5521,5523],{"class":4699,"line":5522},19,[4697,5524,4767],{"class":4710},[4697,5526,5528],{"class":4699,"line":5527},20,[4697,5529,4918],{"emptyLinePlaceholder":4917},[4697,5531,5533,5535,5537,5540],{"class":4699,"line":5532},21,[4697,5534,4703],{"class":4702},[4697,5536,4868],{"class":4706},[4697,5538,5539],{"class":4702}," struct",[4697,5541,4727],{"class":4710},[4697,5543,5545,5549],{"class":4699,"line":5544},22,[4697,5546,5548],{"class":5547},"sBGCq","    ID",[4697,5550,5551],{"class":4706},"          string\n",[4697,5553,5555,5558],{"class":4699,"line":5554},23,[4697,5556,5557],{"class":5547},"    Total",[4697,5559,5560],{"class":4706},"       float64\n",[4697,5562,5564,5567,5569,5571],{"class":4699,"line":5563},24,[4697,5565,5566],{"class":5547},"    ProcessedAt",[4697,5568,4890],{"class":4706},[4697,5570,4742],{"class":4710},[4697,5572,5573],{"class":4706},"Time\n",[4697,5575,5577],{"class":4699,"line":5576},25,[4697,5578,4805],{"class":4710},[4697,5580,5582],{"class":4699,"line":5581},26,[4697,5583,4918],{"emptyLinePlaceholder":4917},[4697,5585,5587,5590,5593,5595],{"class":4699,"line":5586},27,[4697,5588,5589],{"class":4702},"func",[4697,5591,5592],{"class":4732}," main",[4697,5594,4775],{"class":4710},[4697,5596,4727],{"class":4710},[4697,5598,5600,5603,5605,5607,5609,5612],{"class":4699,"line":5599},28,[4697,5601,5602],{"class":4827},"    ctx",[4697,5604,4831],{"class":4827},[4697,5606,4856],{"class":4827},[4697,5608,4742],{"class":4710},[4697,5610,5611],{"class":4732},"Background",[4697,5613,4898],{"class":4710},[4697,5615,5617],{"class":4699,"line":5616},29,[4697,5618,4918],{"emptyLinePlaceholder":4917},[4697,5620,5622],{"class":4699,"line":5621},30,[4697,5623,5624],{"class":4821},"    // Processors wrap functions\n",[4697,5626,5628,5631,5633,5635,5637,5639,5641,5643,5645,5647,5649,5652,5654,5656,5658,5660,5662,5664,5666,5668,5670,5672,5674,5676],{"class":4699,"line":5627},31,[4697,5629,5630],{"class":4827},"    validate",[4697,5632,4831],{"class":4827},[4697,5634,4834],{"class":4827},[4697,5636,4742],{"class":4710},[4697,5638,2254],{"class":4732},[4697,5640,4736],{"class":4710},[4697,5642,4943],{"class":4827},[4697,5644,4748],{"class":4710},[4697,5646,4848],{"class":4702},[4697,5648,4736],{"class":4710},[4697,5650,5651],{"class":4714},"_",[4697,5653,4856],{"class":4706},[4697,5655,4742],{"class":4710},[4697,5657,4745],{"class":4706},[4697,5659,4748],{"class":4710},[4697,5661,4865],{"class":4714},[4697,5663,4868],{"class":4706},[4697,5665,4754],{"class":4710},[4697,5667,4757],{"class":4710},[4697,5669,4970],{"class":4706},[4697,5671,4748],{"class":4710},[4697,5673,4764],{"class":4706},[4697,5675,4754],{"class":4710},[4697,5677,4727],{"class":4710},[4697,5679,5681,5684,5686,5688,5690,5692,5694],{"class":4699,"line":5680},32,[4697,5682,5683],{"class":4903},"        if",[4697,5685,4865],{"class":4827},[4697,5687,4742],{"class":4710},[4697,5689,4991],{"class":4827},[4697,5691,4994],{"class":4903},[4697,5693,4998],{"class":4997},[4697,5695,4727],{"class":4710},[4697,5697,5699,5702,5704,5706,5708,5710,5712,5714,5716],{"class":4699,"line":5698},33,[4697,5700,5701],{"class":4903},"            return",[4697,5703,4865],{"class":4827},[4697,5705,4748],{"class":4710},[4697,5707,5013],{"class":4827},[4697,5709,4742],{"class":4710},[4697,5711,5018],{"class":4732},[4697,5713,4736],{"class":4710},[4697,5715,5024],{"class":5023},[4697,5717,4767],{"class":4710},[4697,5719,5721],{"class":4699,"line":5720},34,[4697,5722,5723],{"class":4710},"        }\n",[4697,5725,5727,5729,5731,5733],{"class":4699,"line":5726},35,[4697,5728,5006],{"class":4903},[4697,5730,4865],{"class":4827},[4697,5732,4748],{"class":4710},[4697,5734,5044],{"class":4702},[4697,5736,5738],{"class":4699,"line":5737},36,[4697,5739,5740],{"class":4710},"    })\n",[4697,5742,5744],{"class":4699,"line":5743},37,[4697,5745,4918],{"emptyLinePlaceholder":4917},[4697,5747,5749,5752,5754,5756,5758,5760,5762,5764,5766,5768,5770,5772,5774,5776,5778,5780,5782,5784,5786,5788],{"class":4699,"line":5748},38,[4697,5750,5751],{"class":4827},"    enrich",[4697,5753,4831],{"class":4827},[4697,5755,4834],{"class":4827},[4697,5757,4742],{"class":4710},[4697,5759,2574],{"class":4732},[4697,5761,4736],{"class":4710},[4697,5763,4843],{"class":4827},[4697,5765,4748],{"class":4710},[4697,5767,4848],{"class":4702},[4697,5769,4736],{"class":4710},[4697,5771,5651],{"class":4714},[4697,5773,4856],{"class":4706},[4697,5775,4742],{"class":4710},[4697,5777,4745],{"class":4706},[4697,5779,4748],{"class":4710},[4697,5781,4865],{"class":4714},[4697,5783,4868],{"class":4706},[4697,5785,4754],{"class":4710},[4697,5787,4868],{"class":4706},[4697,5789,4727],{"class":4710},[4697,5791,5793,5796,5798,5800,5802,5804,5806,5808],{"class":4699,"line":5792},39,[4697,5794,5795],{"class":4827},"        o",[4697,5797,4742],{"class":4710},[4697,5799,4884],{"class":4827},[4697,5801,4887],{"class":4827},[4697,5803,4890],{"class":4827},[4697,5805,4742],{"class":4710},[4697,5807,4895],{"class":4732},[4697,5809,4898],{"class":4710},[4697,5811,5813,5815],{"class":4699,"line":5812},40,[4697,5814,5006],{"class":4903},[4697,5816,4907],{"class":4827},[4697,5818,5820],{"class":4699,"line":5819},41,[4697,5821,5740],{"class":4710},[4697,5823,5825],{"class":4699,"line":5824},42,[4697,5826,4918],{"emptyLinePlaceholder":4917},[4697,5828,5830,5833,5835,5837,5839,5841,5843,5846,5848,5850,5852,5854,5856,5858,5860,5862,5864,5866,5868,5870],{"class":4699,"line":5829},43,[4697,5831,5832],{"class":4827},"    format",[4697,5834,4831],{"class":4827},[4697,5836,4834],{"class":4827},[4697,5838,4742],{"class":4710},[4697,5840,2574],{"class":4732},[4697,5842,4736],{"class":4710},[4697,5844,5845],{"class":4827},"FormatID",[4697,5847,4748],{"class":4710},[4697,5849,4848],{"class":4702},[4697,5851,4736],{"class":4710},[4697,5853,5651],{"class":4714},[4697,5855,4856],{"class":4706},[4697,5857,4742],{"class":4710},[4697,5859,4745],{"class":4706},[4697,5861,4748],{"class":4710},[4697,5863,4865],{"class":4714},[4697,5865,4868],{"class":4706},[4697,5867,4754],{"class":4710},[4697,5869,4868],{"class":4706},[4697,5871,4727],{"class":4710},[4697,5873,5875,5877,5879,5881,5883,5886,5888,5891,5893,5895,5897,5899],{"class":4699,"line":5874},44,[4697,5876,5795],{"class":4827},[4697,5878,4742],{"class":4710},[4697,5880,5123],{"class":4827},[4697,5882,4887],{"class":4827},[4697,5884,5885],{"class":4827}," strings",[4697,5887,4742],{"class":4710},[4697,5889,5890],{"class":4732},"ToUpper",[4697,5892,4736],{"class":4710},[4697,5894,5118],{"class":4827},[4697,5896,4742],{"class":4710},[4697,5898,5123],{"class":4827},[4697,5900,4767],{"class":4710},[4697,5902,5904,5906],{"class":4699,"line":5903},45,[4697,5905,5006],{"class":4903},[4697,5907,4907],{"class":4827},[4697,5909,5911],{"class":4699,"line":5910},46,[4697,5912,5740],{"class":4710},[4697,5914,5916],{"class":4699,"line":5915},47,[4697,5917,4918],{"emptyLinePlaceholder":4917},[4697,5919,5921],{"class":4699,"line":5920},48,[4697,5922,5923],{"class":4821},"    // Connectors compose processors\n",[4697,5925,5927,5930,5932,5934,5936,5938,5940,5943,5945,5947,5949,5951,5953,5956],{"class":4699,"line":5926},49,[4697,5928,5929],{"class":4827},"    pipeline",[4697,5931,4831],{"class":4827},[4697,5933,4834],{"class":4827},[4697,5935,4742],{"class":4710},[4697,5937,5159],{"class":4732},[4697,5939,4736],{"class":4710},[4697,5941,5942],{"class":4827},"PipelineID",[4697,5944,4748],{"class":4710},[4697,5946,5169],{"class":4827},[4697,5948,4748],{"class":4710},[4697,5950,5174],{"class":4827},[4697,5952,4748],{"class":4710},[4697,5954,5955],{"class":4827}," format",[4697,5957,4767],{"class":4710},[4697,5959,5961],{"class":4699,"line":5960},50,[4697,5962,4918],{"emptyLinePlaceholder":4917},[4697,5964,5966],{"class":4699,"line":5965},51,[4697,5967,5968],{"class":4821},"    // Process\n",[4697,5970,5972,5975,5977,5980,5982,5985,5987,5990,5992,5994,5996,5998,6001,6003,6005,6008,6010,6013,6015,6018],{"class":4699,"line":5971},52,[4697,5973,5974],{"class":4827},"    result",[4697,5976,4748],{"class":4710},[4697,5978,5979],{"class":4827}," err",[4697,5981,4831],{"class":4827},[4697,5983,5984],{"class":4827}," pipeline",[4697,5986,4742],{"class":4710},[4697,5988,5989],{"class":4732},"Process",[4697,5991,4736],{"class":4710},[4697,5993,4853],{"class":4827},[4697,5995,4748],{"class":4710},[4697,5997,4868],{"class":4706},[4697,5999,6000],{"class":4710},"{",[4697,6002,5123],{"class":5547},[4697,6004,4687],{"class":4710},[4697,6006,6007],{"class":5023}," \"order-123\"",[4697,6009,4748],{"class":4710},[4697,6011,6012],{"class":5547}," Total",[4697,6014,4687],{"class":4710},[4697,6016,6017],{"class":4997}," 99.99",[4697,6019,4912],{"class":4710},[4697,6021,6023,6025,6027,6030,6033],{"class":4699,"line":6022},53,[4697,6024,4984],{"class":4903},[4697,6026,5979],{"class":4827},[4697,6028,6029],{"class":4903}," !=",[4697,6031,6032],{"class":4702}," nil",[4697,6034,4727],{"class":4710},[4697,6036,6038,6041,6044,6047,6049,6051,6054,6056,6058],{"class":4699,"line":6037},54,[4697,6039,6040],{"class":4702},"        var",[4697,6042,6043],{"class":4827}," pipeErr",[4697,6045,6046],{"class":4903}," *",[4697,6048,4600],{"class":4706},[4697,6050,4742],{"class":4710},[4697,6052,6053],{"class":4706},"Error",[4697,6055,4711],{"class":4710},[4697,6057,4970],{"class":4706},[4697,6059,6060],{"class":4710},"]\n",[4697,6062,6064,6066,6068,6070,6073,6075,6078,6080,6083,6086,6088],{"class":4699,"line":6063},55,[4697,6065,5683],{"class":4903},[4697,6067,5013],{"class":4827},[4697,6069,4742],{"class":4710},[4697,6071,6072],{"class":4732},"As",[4697,6074,4736],{"class":4710},[4697,6076,6077],{"class":4827},"err",[4697,6079,4748],{"class":4710},[4697,6081,6082],{"class":4903}," &",[4697,6084,6085],{"class":4827},"pipeErr",[4697,6087,4754],{"class":4710},[4697,6089,4727],{"class":4710},[4697,6091,6093,6096,6098,6101,6103,6106,6110,6113,6116,6120,6123,6125,6127,6129,6132,6134,6136,6138,6140,6142,6145,6148,6150,6152,6154],{"class":4699,"line":6092},56,[4697,6094,6095],{"class":4827},"            fmt",[4697,6097,4742],{"class":4710},[4697,6099,6100],{"class":4732},"Printf",[4697,6102,4736],{"class":4710},[4697,6104,6105],{"class":5023},"\"Failed at ",[4697,6107,6109],{"class":6108},"scyPU","%s",[4697,6111,6112],{"class":5023},": ",[4697,6114,6115],{"class":6108},"%v",[4697,6117,6119],{"class":6118},"suWN2","\\n",[4697,6121,6122],{"class":5023},"\"",[4697,6124,4748],{"class":4710},[4697,6126,5885],{"class":4827},[4697,6128,4742],{"class":4710},[4697,6130,6131],{"class":4732},"Join",[4697,6133,4736],{"class":4710},[4697,6135,6085],{"class":4827},[4697,6137,4742],{"class":4710},[4697,6139,2115],{"class":4827},[4697,6141,4748],{"class":4710},[4697,6143,6144],{"class":5023}," \"->\"",[4697,6146,6147],{"class":4710},"),",[4697,6149,6043],{"class":4827},[4697,6151,4742],{"class":4710},[4697,6153,2110],{"class":4827},[4697,6155,4767],{"class":4710},[4697,6157,6159],{"class":4699,"line":6158},57,[4697,6160,5723],{"class":4710},[4697,6162,6164],{"class":4699,"line":6163},58,[4697,6165,6166],{"class":4903},"        return\n",[4697,6168,6170],{"class":4699,"line":6169},59,[4697,6171,5032],{"class":4710},[4697,6173,6175],{"class":4699,"line":6174},60,[4697,6176,4918],{"emptyLinePlaceholder":4917},[4697,6178,6180,6183,6185,6187,6189,6192,6194,6197,6199,6201,6203,6205,6208,6210,6212,6214,6216,6218,6220],{"class":4699,"line":6179},61,[4697,6181,6182],{"class":4827},"    fmt",[4697,6184,4742],{"class":4710},[4697,6186,6100],{"class":4732},[4697,6188,4736],{"class":4710},[4697,6190,6191],{"class":5023},"\"Processed: ",[4697,6193,6109],{"class":6108},[4697,6195,6196],{"class":5023}," at ",[4697,6198,6115],{"class":6108},[4697,6200,6119],{"class":6118},[4697,6202,6122],{"class":5023},[4697,6204,4748],{"class":4710},[4697,6206,6207],{"class":4827}," result",[4697,6209,4742],{"class":4710},[4697,6211,5123],{"class":4827},[4697,6213,4748],{"class":4710},[4697,6215,6207],{"class":4827},[4697,6217,4742],{"class":4710},[4697,6219,4884],{"class":4827},[4697,6221,4767],{"class":4710},[4697,6223,6225],{"class":4699,"line":6224},62,[4697,6226,4805],{"class":4710},[4676,6228,22],{"id":6229},"capabilities",[6231,6232,6233,6249],"table",{},[6234,6235,6236],"thead",{},[6237,6238,6239,6243,6246],"tr",{},[6240,6241,6242],"th",{},"Feature",[6240,6244,6245],{},"Description",[6240,6247,6248],{},"Docs",[6250,6251,6252,6269,6282,6296,6308,6328],"tbody",{},[6237,6253,6254,6258,6264],{},[6255,6256,6257],"td",{},"Uniform Interface",[6255,6259,6260,6261,6263],{},"Everything implements ",[4684,6262,4686],{}," for seamless composition",[6255,6265,6266],{},[4605,6267,226],{"href":6268},"docs/learn/core-concepts",[6237,6270,6271,6274,6277],{},[6255,6272,6273],{},"Type-Safe Generics",[6255,6275,6276],{},"Full compile-time checking with zero reflection",[6255,6278,6279],{},[4605,6280,322],{"href":6281},"docs/learn/architecture",[6237,6283,6284,6287,6290],{},[6255,6285,6286],{},"Rich Error Context",[6255,6288,6289],{},"Path tracking, timestamps, and input capture on failure",[6255,6291,6292],{},[4605,6293,6295],{"href":6294},"docs/guides/safety-reliability","Safety & Reliability",[6237,6297,6298,6301,6304],{},[6255,6299,6300],{},"Panic Recovery",[6255,6302,6303],{},"Automatic recovery with security-conscious sanitization",[6255,6305,6306],{},[4605,6307,6295],{"href":6294},[6237,6309,6310,6313,6322],{},[6255,6311,6312],{},"Signal Observability",[6255,6314,6315,6316,6321],{},"State change events via ",[4605,6317,6320],{"href":6318,"rel":6319},"https://github.com/zoobz-io/capitan",[4609],"capitan"," integration",[6255,6323,6324],{},[4605,6325,6327],{"href":6326},"docs/learn/hooks","Hooks",[6237,6329,6330,6333,6339],{},[6255,6331,6332],{},"Pipeline Schemas",[6255,6334,6335,6338],{},[4684,6336,6337],{},"Schema()"," exports structure for visualization and debugging",[6255,6340,6341],{},[4605,6342,6344],{"href":6343},"docs/reference/cheatsheet","Cheatsheet",[4676,6346,164],{"id":6347},"why-pipz",[6349,6350,6351,6358,6364,6375,6384,6390],"ul",{},[6352,6353,6354,6357],"li",{},[4809,6355,6356],{},"Type-safe"," — Full compile-time checking with generics",[6352,6359,6360,6363],{},[4809,6361,6362],{},"Composable"," — Complex pipelines from simple parts",[6352,6365,6366,6369,6370],{},[4809,6367,6368],{},"Minimal dependencies"," — Standard library plus ",[4605,6371,6374],{"href":6372,"rel":6373},"https://github.com/zoobz-io/clockz",[4609],"clockz",[6352,6376,6377,6380,6381],{},[4809,6378,6379],{},"Observable"," — Typed signals for state changes via ",[4605,6382,6320],{"href":6318,"rel":6383},[4609],[6352,6385,6386,6389],{},[4809,6387,6388],{},"Rich errors"," — Full path tracking shows exactly where failures occur",[6352,6391,6392,6395],{},[4809,6393,6394],{},"Panic-safe"," — Automatic recovery with security sanitization",[4676,6397,6399],{"id":6398},"composable-reliability","Composable Reliability",[4602,6401,6402],{},"Use pipz directly to build secure, observable reliability patterns over your types:",[4689,6404,6406],{"className":4691,"code":6405,"language":4693,"meta":29,"style":29},"// Your domain type\ntype Order struct { ... }\n\n// Wrap any operation with resilience\nfetch := pipz.Apply(FetchID, fetchOrder)\n\nreliable := pipz.NewSequence(ReliableID,\n    pipz.NewRateLimiter[Order](LimiterID, 100, 10),      // throttle\n    pipz.NewRetry(RetryID, fetch, 3),                    // retry on failure\n    pipz.NewTimeout(TimeoutID, fetch, 5*time.Second),    // enforce deadline\n    pipz.NewCircuitBreaker(BreakerID, fetch, 5, 30*time.Second), // prevent cascade\n)\n\n// Full error context when things fail\nresult, err := reliable.Process(ctx, order)\n",[4684,6407,6408,6413,6430,6434,6439,6464,6468,6489,6524,6550,6581,6616,6620,6624,6629],{"__ignoreMap":29},[4697,6409,6410],{"class":4699,"line":9},[4697,6411,6412],{"class":4821},"// Your domain type\n",[4697,6414,6415,6417,6419,6421,6424,6427],{"class":4699,"line":19},[4697,6416,4703],{"class":4702},[4697,6418,4868],{"class":4706},[4697,6420,5539],{"class":4702},[4697,6422,6423],{"class":4710}," {",[4697,6425,6426],{"class":4903}," ...",[4697,6428,6429],{"class":4710}," }\n",[4697,6431,6432],{"class":4699,"line":35},[4697,6433,4918],{"emptyLinePlaceholder":4917},[4697,6435,6436],{"class":4699,"line":917},[4697,6437,6438],{"class":4821},"// Wrap any operation with resilience\n",[4697,6440,6441,6444,6446,6448,6450,6452,6454,6457,6459,6462],{"class":4699,"line":4791},[4697,6442,6443],{"class":4827},"fetch",[4697,6445,4831],{"class":4827},[4697,6447,4834],{"class":4827},[4697,6449,4742],{"class":4710},[4697,6451,2254],{"class":4732},[4697,6453,4736],{"class":4710},[4697,6455,6456],{"class":4827},"FetchID",[4697,6458,4748],{"class":4710},[4697,6460,6461],{"class":4827}," fetchOrder",[4697,6463,4767],{"class":4710},[4697,6465,6466],{"class":4699,"line":4802},[4697,6467,4918],{"emptyLinePlaceholder":4917},[4697,6469,6470,6473,6475,6477,6479,6481,6483,6486],{"class":4699,"line":4921},[4697,6471,6472],{"class":4827},"reliable",[4697,6474,4831],{"class":4827},[4697,6476,4834],{"class":4827},[4697,6478,4742],{"class":4710},[4697,6480,5159],{"class":4732},[4697,6482,4736],{"class":4710},[4697,6484,6485],{"class":4827},"ReliableID",[4697,6487,6488],{"class":4710},",\n",[4697,6490,6491,6494,6496,6499,6501,6503,6506,6509,6511,6514,6516,6519,6521],{"class":4699,"line":4927},[4697,6492,6493],{"class":4827},"    pipz",[4697,6495,4742],{"class":4710},[4697,6497,6498],{"class":4732},"NewRateLimiter",[4697,6500,4711],{"class":4710},[4697,6502,4970],{"class":4706},[4697,6504,6505],{"class":4710},"](",[4697,6507,6508],{"class":4827},"LimiterID",[4697,6510,4748],{"class":4710},[4697,6512,6513],{"class":4997}," 100",[4697,6515,4748],{"class":4710},[4697,6517,6518],{"class":4997}," 10",[4697,6520,6147],{"class":4710},[4697,6522,6523],{"class":4821},"      // throttle\n",[4697,6525,6526,6528,6530,6532,6534,6536,6538,6541,6543,6545,6547],{"class":4699,"line":4981},[4697,6527,6493],{"class":4827},[4697,6529,4742],{"class":4710},[4697,6531,3796],{"class":4732},[4697,6533,4736],{"class":4710},[4697,6535,5208],{"class":4827},[4697,6537,4748],{"class":4710},[4697,6539,6540],{"class":4827}," fetch",[4697,6542,4748],{"class":4710},[4697,6544,5218],{"class":4997},[4697,6546,6147],{"class":4710},[4697,6548,6549],{"class":4821},"                    // retry on failure\n",[4697,6551,6552,6554,6556,6558,6560,6562,6564,6566,6568,6570,6572,6574,6576,6578],{"class":4699,"line":5003},[4697,6553,6493],{"class":4827},[4697,6555,4742],{"class":4710},[4697,6557,5234],{"class":4732},[4697,6559,4736],{"class":4710},[4697,6561,5239],{"class":4827},[4697,6563,4748],{"class":4710},[4697,6565,6540],{"class":4827},[4697,6567,4748],{"class":4710},[4697,6569,5249],{"class":4997},[4697,6571,5252],{"class":4827},[4697,6573,4742],{"class":4710},[4697,6575,5257],{"class":4827},[4697,6577,6147],{"class":4710},[4697,6579,6580],{"class":4821},"    // enforce deadline\n",[4697,6582,6583,6585,6587,6589,6591,6593,6595,6597,6599,6601,6603,6605,6607,6609,6611,6613],{"class":4699,"line":5029},[4697,6584,6493],{"class":4827},[4697,6586,4742],{"class":4710},[4697,6588,5282],{"class":4732},[4697,6590,4736],{"class":4710},[4697,6592,5287],{"class":4827},[4697,6594,4748],{"class":4710},[4697,6596,6540],{"class":4827},[4697,6598,4748],{"class":4710},[4697,6600,5249],{"class":4997},[4697,6602,4748],{"class":4710},[4697,6604,5301],{"class":4997},[4697,6606,5252],{"class":4827},[4697,6608,4742],{"class":4710},[4697,6610,5257],{"class":4827},[4697,6612,6147],{"class":4710},[4697,6614,6615],{"class":4821}," // prevent cascade\n",[4697,6617,6618],{"class":4699,"line":5035},[4697,6619,4767],{"class":4710},[4697,6621,6622],{"class":4699,"line":5047},[4697,6623,4918],{"emptyLinePlaceholder":4917},[4697,6625,6626],{"class":4699,"line":5052},[4697,6627,6628],{"class":4821},"// Full error context when things fail\n",[4697,6630,6631,6634,6636,6638,6640,6643,6645,6647,6649,6651,6653,6656],{"class":4699,"line":5057},[4697,6632,6633],{"class":4827},"result",[4697,6635,4748],{"class":4710},[4697,6637,5979],{"class":4827},[4697,6639,4831],{"class":4827},[4697,6641,6642],{"class":4827}," reliable",[4697,6644,4742],{"class":4710},[4697,6646,5989],{"class":4732},[4697,6648,4736],{"class":4710},[4697,6650,4853],{"class":4827},[4697,6652,4748],{"class":4710},[4697,6654,6655],{"class":4827}," order",[4697,6657,4767],{"class":4710},[4602,6659,6660,6661,6664],{},"Every connector emits ",[4605,6662,6320],{"href":6318,"rel":6663},[4609]," signals — circuit breaker state changes, retry attempts, rate limit hits — observable without instrumentation code.",[4676,6666,6668],{"id":6667},"extensible-application-vocabulary","Extensible Application Vocabulary",[4602,6670,6671,6672,6674],{},"Fix T to a domain type and ",[4684,6673,4686],{}," becomes your API surface:",[4689,6676,6678],{"className":4691,"code":6677,"language":4693,"meta":29,"style":29},"// Library fixes T to a domain type\ntype File struct {\n    Name     string\n    Size     int64\n    Data     []byte\n    Metadata map[string]string\n}\n\n// Library provides domain-specific primitives\nfunc Scan(scanner VirusScanner) pipz.Chainable[*File] { ... }\nfunc Thumbnail(width, height int) pipz.Chainable[*File] { ... }\nfunc Compress(quality int) pipz.Chainable[*File] { ... }\nfunc Upload(storage Storage) pipz.Chainable[*File] { ... }\n\n// Users extend with their own — same interface, first-class citizen\ntype Watermark struct {\n    identity pipz.Identity\n    logo     []byte\n}\nfunc (w *Watermark) Process(ctx context.Context, f *File) (*File, error) {\n    f.Data = applyWatermark(f.Data, w.logo)\n    return f, nil\n}\nfunc (w *Watermark) Identity() pipz.Identity { return w.identity }\nfunc (w *Watermark) Schema() pipz.Node       { return pipz.Node{Identity: w.identity, Type: \"processor\"} }\nfunc (w *Watermark) Close() error            { return nil }\n\n// Everything composes — library primitives and user code, indistinguishable\npipeline := pipz.NewSequence(PipelineID,\n    Scan(clamav),\n    Thumbnail(800, 600),\n    &Watermark{logo},  // user's primitive slots right in\n    Compress(85),\n    Upload(s3),\n)\n",[4684,6679,6680,6685,6696,6704,6712,6723,6741,6745,6749,6754,6794,6836,6872,6909,6913,6918,6929,6941,6950,6954,7008,7044,7054,7058,7097,7161,7191,7195,7200,7218,7231,7248,7265,7277,7289],{"__ignoreMap":29},[4697,6681,6682],{"class":4699,"line":9},[4697,6683,6684],{"class":4821},"// Library fixes T to a domain type\n",[4697,6686,6687,6689,6692,6694],{"class":4699,"line":19},[4697,6688,4703],{"class":4702},[4697,6690,6691],{"class":4706}," File",[4697,6693,5539],{"class":4702},[4697,6695,4727],{"class":4710},[4697,6697,6698,6701],{"class":4699,"line":35},[4697,6699,6700],{"class":5547},"    Name",[4697,6702,6703],{"class":4706},"     string\n",[4697,6705,6706,6709],{"class":4699,"line":917},[4697,6707,6708],{"class":5547},"    Size",[4697,6710,6711],{"class":4706},"     int64\n",[4697,6713,6714,6717,6720],{"class":4699,"line":4791},[4697,6715,6716],{"class":5547},"    Data",[4697,6718,6719],{"class":4710},"     []",[4697,6721,6722],{"class":4706},"byte\n",[4697,6724,6725,6728,6731,6733,6736,6738],{"class":4699,"line":4802},[4697,6726,6727],{"class":5547},"    Metadata",[4697,6729,6730],{"class":4702}," map",[4697,6732,4711],{"class":4710},[4697,6734,6735],{"class":4706},"string",[4697,6737,4721],{"class":4710},[4697,6739,6740],{"class":4706},"string\n",[4697,6742,6743],{"class":4699,"line":4921},[4697,6744,4805],{"class":4710},[4697,6746,6747],{"class":4699,"line":4927},[4697,6748,4918],{"emptyLinePlaceholder":4917},[4697,6750,6751],{"class":4699,"line":4981},[4697,6752,6753],{"class":4821},"// Library provides domain-specific primitives\n",[4697,6755,6756,6758,6761,6763,6766,6769,6771,6773,6775,6778,6780,6783,6786,6788,6790,6792],{"class":4699,"line":5003},[4697,6757,5589],{"class":4702},[4697,6759,6760],{"class":4732}," Scan",[4697,6762,4736],{"class":4710},[4697,6764,6765],{"class":4714},"scanner",[4697,6767,6768],{"class":4706}," VirusScanner",[4697,6770,4754],{"class":4710},[4697,6772,4834],{"class":4706},[4697,6774,4742],{"class":4710},[4697,6776,6777],{"class":4706},"Chainable",[4697,6779,4711],{"class":4710},[4697,6781,6782],{"class":4903},"*",[4697,6784,6785],{"class":4706},"File",[4697,6787,4721],{"class":4710},[4697,6789,6423],{"class":4710},[4697,6791,6426],{"class":4903},[4697,6793,6429],{"class":4710},[4697,6795,6796,6798,6801,6803,6806,6808,6811,6814,6816,6818,6820,6822,6824,6826,6828,6830,6832,6834],{"class":4699,"line":5029},[4697,6797,5589],{"class":4702},[4697,6799,6800],{"class":4732}," Thumbnail",[4697,6802,4736],{"class":4710},[4697,6804,6805],{"class":4714},"width",[4697,6807,4748],{"class":4710},[4697,6809,6810],{"class":4714}," height",[4697,6812,6813],{"class":4706}," int",[4697,6815,4754],{"class":4710},[4697,6817,4834],{"class":4706},[4697,6819,4742],{"class":4710},[4697,6821,6777],{"class":4706},[4697,6823,4711],{"class":4710},[4697,6825,6782],{"class":4903},[4697,6827,6785],{"class":4706},[4697,6829,4721],{"class":4710},[4697,6831,6423],{"class":4710},[4697,6833,6426],{"class":4903},[4697,6835,6429],{"class":4710},[4697,6837,6838,6840,6843,6845,6848,6850,6852,6854,6856,6858,6860,6862,6864,6866,6868,6870],{"class":4699,"line":5035},[4697,6839,5589],{"class":4702},[4697,6841,6842],{"class":4732}," Compress",[4697,6844,4736],{"class":4710},[4697,6846,6847],{"class":4714},"quality",[4697,6849,6813],{"class":4706},[4697,6851,4754],{"class":4710},[4697,6853,4834],{"class":4706},[4697,6855,4742],{"class":4710},[4697,6857,6777],{"class":4706},[4697,6859,4711],{"class":4710},[4697,6861,6782],{"class":4903},[4697,6863,6785],{"class":4706},[4697,6865,4721],{"class":4710},[4697,6867,6423],{"class":4710},[4697,6869,6426],{"class":4903},[4697,6871,6429],{"class":4710},[4697,6873,6874,6876,6879,6881,6884,6887,6889,6891,6893,6895,6897,6899,6901,6903,6905,6907],{"class":4699,"line":5047},[4697,6875,5589],{"class":4702},[4697,6877,6878],{"class":4732}," Upload",[4697,6880,4736],{"class":4710},[4697,6882,6883],{"class":4714},"storage",[4697,6885,6886],{"class":4706}," Storage",[4697,6888,4754],{"class":4710},[4697,6890,4834],{"class":4706},[4697,6892,4742],{"class":4710},[4697,6894,6777],{"class":4706},[4697,6896,4711],{"class":4710},[4697,6898,6782],{"class":4903},[4697,6900,6785],{"class":4706},[4697,6902,4721],{"class":4710},[4697,6904,6423],{"class":4710},[4697,6906,6426],{"class":4903},[4697,6908,6429],{"class":4710},[4697,6910,6911],{"class":4699,"line":5052},[4697,6912,4918],{"emptyLinePlaceholder":4917},[4697,6914,6915],{"class":4699,"line":5057},[4697,6916,6917],{"class":4821},"// Users extend with their own — same interface, first-class citizen\n",[4697,6919,6920,6922,6925,6927],{"class":4699,"line":5063},[4697,6921,4703],{"class":4702},[4697,6923,6924],{"class":4706}," Watermark",[4697,6926,5539],{"class":4702},[4697,6928,4727],{"class":4710},[4697,6930,6931,6934,6936,6938],{"class":4699,"line":5108},[4697,6932,6933],{"class":5547},"    identity",[4697,6935,4834],{"class":4706},[4697,6937,4742],{"class":4710},[4697,6939,6940],{"class":4706},"Identity\n",[4697,6942,6943,6946,6948],{"class":4699,"line":5128},[4697,6944,6945],{"class":5547},"    logo",[4697,6947,6719],{"class":4710},[4697,6949,6722],{"class":4706},[4697,6951,6952],{"class":4699,"line":5522},[4697,6953,4805],{"class":4710},[4697,6955,6956,6958,6960,6963,6965,6968,6970,6973,6975,6977,6979,6981,6983,6985,6988,6990,6992,6994,6996,6998,7000,7002,7004,7006],{"class":4699,"line":5527},[4697,6957,5589],{"class":4702},[4697,6959,4757],{"class":4710},[4697,6961,6962],{"class":4714},"w ",[4697,6964,6782],{"class":4903},[4697,6966,6967],{"class":4706},"Watermark",[4697,6969,4754],{"class":4710},[4697,6971,6972],{"class":4732}," Process",[4697,6974,4736],{"class":4710},[4697,6976,4853],{"class":4714},[4697,6978,4856],{"class":4706},[4697,6980,4742],{"class":4710},[4697,6982,4745],{"class":4706},[4697,6984,4748],{"class":4710},[4697,6986,6987],{"class":4714}," f",[4697,6989,6046],{"class":4903},[4697,6991,6785],{"class":4706},[4697,6993,4754],{"class":4710},[4697,6995,4757],{"class":4710},[4697,6997,6782],{"class":4903},[4697,6999,6785],{"class":4706},[4697,7001,4748],{"class":4710},[4697,7003,4764],{"class":4706},[4697,7005,4754],{"class":4710},[4697,7007,4727],{"class":4710},[4697,7009,7010,7013,7015,7018,7020,7023,7025,7028,7030,7032,7034,7037,7039,7042],{"class":4699,"line":5532},[4697,7011,7012],{"class":4827},"    f",[4697,7014,4742],{"class":4710},[4697,7016,7017],{"class":4827},"Data",[4697,7019,4887],{"class":4827},[4697,7021,7022],{"class":4732}," applyWatermark",[4697,7024,4736],{"class":4710},[4697,7026,7027],{"class":4827},"f",[4697,7029,4742],{"class":4710},[4697,7031,7017],{"class":4827},[4697,7033,4748],{"class":4710},[4697,7035,7036],{"class":4827}," w",[4697,7038,4742],{"class":4710},[4697,7040,7041],{"class":4827},"logo",[4697,7043,4767],{"class":4710},[4697,7045,7046,7048,7050,7052],{"class":4699,"line":5544},[4697,7047,4904],{"class":4903},[4697,7049,6987],{"class":4827},[4697,7051,4748],{"class":4710},[4697,7053,5044],{"class":4702},[4697,7055,7056],{"class":4699,"line":5554},[4697,7057,4805],{"class":4710},[4697,7059,7060,7062,7064,7066,7068,7070,7072,7075,7077,7079,7081,7083,7085,7088,7090,7092,7095],{"class":4699,"line":5563},[4697,7061,5589],{"class":4702},[4697,7063,4757],{"class":4710},[4697,7065,6962],{"class":4714},[4697,7067,6782],{"class":4903},[4697,7069,6967],{"class":4706},[4697,7071,4754],{"class":4710},[4697,7073,7074],{"class":4732}," Identity",[4697,7076,4775],{"class":4710},[4697,7078,4834],{"class":4706},[4697,7080,4742],{"class":4710},[4697,7082,2699],{"class":4706},[4697,7084,6423],{"class":4710},[4697,7086,7087],{"class":4903}," return",[4697,7089,7036],{"class":4827},[4697,7091,4742],{"class":4710},[4697,7093,7094],{"class":4827},"identity",[4697,7096,6429],{"class":4710},[4697,7098,7099,7101,7103,7105,7107,7109,7111,7114,7116,7118,7120,7123,7126,7128,7130,7132,7134,7136,7138,7140,7142,7144,7146,7148,7151,7153,7156,7159],{"class":4699,"line":5576},[4697,7100,5589],{"class":4702},[4697,7102,4757],{"class":4710},[4697,7104,6962],{"class":4714},[4697,7106,6782],{"class":4903},[4697,7108,6967],{"class":4706},[4697,7110,4754],{"class":4710},[4697,7112,7113],{"class":4732}," Schema",[4697,7115,4775],{"class":4710},[4697,7117,4834],{"class":4706},[4697,7119,4742],{"class":4710},[4697,7121,7122],{"class":4706},"Node",[4697,7124,7125],{"class":4710},"       {",[4697,7127,7087],{"class":4903},[4697,7129,4834],{"class":4706},[4697,7131,4742],{"class":4710},[4697,7133,7122],{"class":4706},[4697,7135,6000],{"class":4710},[4697,7137,2699],{"class":5547},[4697,7139,4687],{"class":4710},[4697,7141,7036],{"class":4827},[4697,7143,4742],{"class":4710},[4697,7145,7094],{"class":4827},[4697,7147,4748],{"class":4710},[4697,7149,7150],{"class":5547}," Type",[4697,7152,4687],{"class":4710},[4697,7154,7155],{"class":5023}," \"processor\"",[4697,7157,7158],{"class":4710},"}",[4697,7160,6429],{"class":4710},[4697,7162,7163,7165,7167,7169,7171,7173,7175,7178,7180,7182,7185,7187,7189],{"class":4699,"line":5581},[4697,7164,5589],{"class":4702},[4697,7166,4757],{"class":4710},[4697,7168,6962],{"class":4714},[4697,7170,6782],{"class":4903},[4697,7172,6967],{"class":4706},[4697,7174,4754],{"class":4710},[4697,7176,7177],{"class":4732}," Close",[4697,7179,4775],{"class":4710},[4697,7181,4764],{"class":4706},[4697,7183,7184],{"class":4710},"            {",[4697,7186,7087],{"class":4903},[4697,7188,6032],{"class":4702},[4697,7190,6429],{"class":4710},[4697,7192,7193],{"class":4699,"line":5586},[4697,7194,4918],{"emptyLinePlaceholder":4917},[4697,7196,7197],{"class":4699,"line":5599},[4697,7198,7199],{"class":4821},"// Everything composes — library primitives and user code, indistinguishable\n",[4697,7201,7202,7204,7206,7208,7210,7212,7214,7216],{"class":4699,"line":5616},[4697,7203,5273],{"class":4827},[4697,7205,4831],{"class":4827},[4697,7207,4834],{"class":4827},[4697,7209,4742],{"class":4710},[4697,7211,5159],{"class":4732},[4697,7213,4736],{"class":4710},[4697,7215,5942],{"class":4827},[4697,7217,6488],{"class":4710},[4697,7219,7220,7223,7225,7228],{"class":4699,"line":5621},[4697,7221,7222],{"class":4732},"    Scan",[4697,7224,4736],{"class":4710},[4697,7226,7227],{"class":4827},"clamav",[4697,7229,7230],{"class":4710},"),\n",[4697,7232,7233,7236,7238,7241,7243,7246],{"class":4699,"line":5627},[4697,7234,7235],{"class":4732},"    Thumbnail",[4697,7237,4736],{"class":4710},[4697,7239,7240],{"class":4997},"800",[4697,7242,4748],{"class":4710},[4697,7244,7245],{"class":4997}," 600",[4697,7247,7230],{"class":4710},[4697,7249,7250,7253,7255,7257,7259,7262],{"class":4699,"line":5680},[4697,7251,7252],{"class":4903},"    &",[4697,7254,6967],{"class":4706},[4697,7256,6000],{"class":4710},[4697,7258,7041],{"class":4827},[4697,7260,7261],{"class":4710},"},",[4697,7263,7264],{"class":4821},"  // user's primitive slots right in\n",[4697,7266,7267,7270,7272,7275],{"class":4699,"line":5698},[4697,7268,7269],{"class":4732},"    Compress",[4697,7271,4736],{"class":4710},[4697,7273,7274],{"class":4997},"85",[4697,7276,7230],{"class":4710},[4697,7278,7279,7282,7284,7287],{"class":4699,"line":5720},[4697,7280,7281],{"class":4732},"    Upload",[4697,7283,4736],{"class":4710},[4697,7285,7286],{"class":4827},"s3",[4697,7288,7230],{"class":4710},[4697,7290,7291],{"class":4699,"line":5726},[4697,7292,4767],{"class":4710},[4602,7294,7295],{},"The built-in primitives are the base vocabulary. Users add their own words following the same grammar. The interface IS the API — implement it and express whatever you want.",[4676,7297,7299],{"id":7298},"documentation","Documentation",[6349,7301,7302],{},[6352,7303,7304,7307],{},[4605,7305,6],{"href":7306},"docs/overview"," — Design philosophy and architecture",[7309,7310,4423],"h3",{"id":7311},"learn",[6349,7313,7314,7321,7328,7333,7338],{},[6352,7315,7316,7320],{},[4605,7317,7319],{"href":7318},"docs/learn/quickstart","Quickstart"," — Build your first pipeline",[6352,7322,7323,7327],{},[4605,7324,7326],{"href":7325},"docs/learn/introduction","Introduction"," — What pipz is and why it exists",[6352,7329,7330,7332],{},[4605,7331,226],{"href":6268}," — Processors, connectors, identity",[6352,7334,7335,7337],{},[4605,7336,322],{"href":6281}," — Internal design and components",[6352,7339,7340,7342],{},[4605,7341,6327],{"href":6326}," — Signal-based observability",[7309,7344,4439],{"id":7345},"guides",[6349,7347,7348,7355,7362,7368,7374,7380,7385],{},[6352,7349,7350,7354],{},[4605,7351,7353],{"href":7352},"docs/guides/connector-selection","Connector Selection"," — Choosing the right connector",[6352,7356,7357,7361],{},[4605,7358,7360],{"href":7359},"docs/guides/cloning","Cloning"," — Data isolation for parallel processing",[6352,7363,7364,7367],{},[4605,7365,135],{"href":7366},"docs/guides/best-practices"," — Patterns and recommendations",[6352,7369,7370,7373],{},[4605,7371,2805],{"href":7372},"docs/guides/testing"," — Testing pipelines",[6352,7375,7376,7379],{},[4605,7377,1471],{"href":7378},"docs/guides/performance"," — Optimization and benchmarking",[6352,7381,7382,7384],{},[4605,7383,6295],{"href":6294}," — Error handling, panics, timeouts",[6352,7386,7387,7391],{},[4605,7388,7390],{"href":7389},"docs/guides/troubleshooting","Troubleshooting"," — Common issues and solutions",[7309,7393,4458],{"id":7394},"cookbook",[6349,7396,7397,7404,7411],{},[6352,7398,7399,7403],{},[4605,7400,7402],{"href":7401},"docs/cookbook/building-pipelines","Building Pipelines"," — Complete pipeline with validation, resilience, observability",[6352,7405,7406,7410],{},[4605,7407,7409],{"href":7408},"docs/cookbook/library-resilience","Library Resilience"," — Expose resilience patterns via functional options",[6352,7412,7413,7417],{},[4605,7414,7416],{"href":7415},"docs/cookbook/extensible-vocabulary","Extensible Vocabulary"," — Domain-specific APIs with composable primitives",[7309,7419,4469],{"id":7420},"reference",[6349,7422,7423,7428],{},[6352,7424,7425,7427],{},[4605,7426,6344],{"href":6343}," — Quick reference for all primitives",[6352,7429,7430,7433],{},[4605,7431,2246],{"href":7432},"docs/5.reference/2.types/"," — Error, Identity, Node, Schema",[7435,7436,192],"h4",{"id":7437},"processors",[6231,7439,7440,7449],{},[6234,7441,7442],{},[6237,7443,7444,7446],{},[6240,7445,3298],{},[6240,7447,7448],{},"Purpose",[6250,7450,7451,7461,7471,7481,7491],{},[6237,7452,7453,7458],{},[6255,7454,7455],{},[4605,7456,2574],{"href":7457},"docs/reference/processors/transform",[6255,7459,7460],{},"Pure transformation (no errors)",[6237,7462,7463,7468],{},[6255,7464,7465],{},[4605,7466,2254],{"href":7467},"docs/reference/processors/apply",[6255,7469,7470],{},"Transformation that may fail",[6237,7472,7473,7478],{},[6255,7474,7475],{},[4605,7476,2337],{"href":7477},"docs/reference/processors/effect",[6255,7479,7480],{},"Side effect, passes data through",[6237,7482,7483,7488],{},[6255,7484,7485],{},[4605,7486,2497],{"href":7487},"docs/reference/processors/mutate",[6255,7489,7490],{},"Conditional modification",[6237,7492,7493,7498],{},[6255,7494,7495],{},[4605,7496,2413],{"href":7497},"docs/reference/processors/enrich",[6255,7499,7500],{},"Best-effort enhancement (errors ignored)",[7435,7502,197],{"id":7503},"connectors",[6231,7505,7506,7515],{},[6234,7507,7508],{},[6237,7509,7510,7513],{},[6240,7511,7512],{},"Connector",[6240,7514,7448],{},[6250,7516,7517,7528,7538,7548,7558,7568,7578,7588,7598,7608,7618,7628,7638,7648,7658,7668],{},[6237,7518,7519,7525],{},[6255,7520,7521],{},[4605,7522,7524],{"href":7523},"docs/reference/connectors/sequence","Sequence",[6255,7526,7527],{},"Run in order",[6237,7529,7530,7535],{},[6255,7531,7532],{},[4605,7533,2937],{"href":7534},"docs/reference/connectors/concurrent",[6255,7536,7537],{},"Run in parallel, collect all results",[6237,7539,7540,7545],{},[6255,7541,7542],{},[4605,7543,529],{"href":7544},"docs/reference/connectors/workerpool",[6255,7546,7547],{},"Bounded parallelism with fixed worker count",[6237,7549,7550,7555],{},[6255,7551,7552],{},[4605,7553,3857],{"href":7554},"docs/reference/connectors/scaffold",[6255,7556,7557],{},"Fire-and-forget parallel execution",[6237,7559,7560,7565],{},[6255,7561,7562],{},[4605,7563,539],{"href":7564},"docs/reference/connectors/fallback",[6255,7566,7567],{},"Try primary, fall back on error",[6237,7569,7570,7575],{},[6255,7571,7572],{},[4605,7573,3546],{"href":7574},"docs/reference/connectors/race",[6255,7576,7577],{},"First success wins",[6237,7579,7580,7585],{},[6255,7581,7582],{},[4605,7583,3055],{"href":7584},"docs/reference/connectors/contest",[6255,7586,7587],{},"First result meeting condition wins",[6237,7589,7590,7595],{},[6255,7591,7592],{},[4605,7593,4049],{"href":7594},"docs/reference/connectors/switch",[6255,7596,7597],{},"Route based on conditions",[6237,7599,7600,7605],{},[6255,7601,7602],{},[4605,7603,3263],{"href":7604},"docs/reference/connectors/filter",[6255,7606,7607],{},"Conditional execution",[6237,7609,7610,7615],{},[6255,7611,7612],{},[4605,7613,534],{"href":7614},"docs/reference/connectors/retry",[6255,7616,7617],{},"Retry on failure",[6237,7619,7620,7625],{},[6255,7621,7622],{},[4605,7623,549],{"href":7624},"docs/reference/connectors/backoff",[6255,7626,7627],{},"Retry with exponential delays",[6237,7629,7630,7635],{},[6255,7631,7632],{},[4605,7633,544],{"href":7634},"docs/reference/connectors/timeout",[6255,7636,7637],{},"Enforce time limits",[6237,7639,7640,7645],{},[6255,7641,7642],{},[4605,7643,3394],{"href":7644},"docs/reference/connectors/handle",[6255,7646,7647],{},"Error recovery pipeline",[6237,7649,7650,7655],{},[6255,7651,7652],{},[4605,7653,524],{"href":7654},"docs/reference/connectors/ratelimiter",[6255,7656,7657],{},"Token bucket rate limiting",[6237,7659,7660,7665],{},[6255,7661,7662],{},[4605,7663,519],{"href":7664},"docs/reference/connectors/circuitbreaker",[6255,7666,7667],{},"Prevent cascading failures",[6237,7669,7670,7675],{},[6255,7671,7672],{},[4605,7673,3487],{"href":7674},"docs/reference/connectors/pipeline",[6255,7676,7677],{},"Execution context for tracing",[4676,7679,7681],{"id":7680},"contributing","Contributing",[4602,7683,7684,7685,7689,7690,7693],{},"See ",[4605,7686,7688],{"href":7687},"CONTRIBUTING","CONTRIBUTING.md"," for guidelines. Run ",[4684,7691,7692],{},"make help"," for available commands.",[4676,7695,4652],{"id":7696},"license",[4602,7698,7699,7700,7702],{},"MIT License — see ",[4605,7701,4649],{"href":4649}," for details.",[7704,7705,7706],"style",{},"html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .soy-K, html code.shiki .soy-K{--shiki-default:#BBBBBB}html pre.shiki code .sBGCq, html code.shiki .sBGCq{--shiki-default:var(--shiki-property)}html pre.shiki code .scyPU, html code.shiki .scyPU{--shiki-default:var(--shiki-placeholder)}html pre.shiki code .suWN2, html code.shiki .suWN2{--shiki-default:var(--shiki-tag)}",{"title":29,"searchDepth":19,"depth":19,"links":7708},[7709,7710,7711,7712,7713,7714,7715,7716,7722,7723],{"id":4678,"depth":19,"text":4679},{"id":5312,"depth":19,"text":5313},{"id":5336,"depth":19,"text":5337},{"id":6229,"depth":19,"text":22},{"id":6347,"depth":19,"text":164},{"id":6398,"depth":19,"text":6399},{"id":6667,"depth":19,"text":6668},{"id":7298,"depth":19,"text":7299,"children":7717},[7718,7719,7720,7721],{"id":7311,"depth":35,"text":4423},{"id":7345,"depth":35,"text":4439},{"id":7394,"depth":35,"text":4458},{"id":7420,"depth":35,"text":4469},{"id":7680,"depth":19,"text":7681},{"id":7696,"depth":19,"text":4652},"md","book-open",{},"/readme",{"title":4593,"description":29},"readme","4AiRfdwTZFRZULAVru3UUrxOw9o1xlIRl9DEcUYwIZU",{"id":7732,"title":1461,"body":7733,"description":29,"extension":7724,"icon":8147,"meta":8148,"navigation":4917,"path":8149,"seo":8150,"stem":8151,"__hash__":8152},"resources/security.md",{"type":4595,"value":7734,"toc":8133},[7735,7739,7743,7746,7785,7789,7792,7796,7801,7804,7843,7847,7850,7899,7903,7929,7933,7936,7940,7943,8020,8024,8027,8058,8062,8065,8090,8094,8108,8112,8115,8121,8124,8130],[4598,7736,7738],{"id":7737},"security-policy","Security Policy",[4676,7740,7742],{"id":7741},"supported-versions","Supported Versions",[4602,7744,7745],{},"We release patches for security vulnerabilities. Which versions are eligible for receiving such patches depends on the CVSS v3.0 Rating:",[6231,7747,7748,7761],{},[6234,7749,7750],{},[6237,7751,7752,7755,7758],{},[6240,7753,7754],{},"Version",[6240,7756,7757],{},"Supported",[6240,7759,7760],{},"Status",[6250,7762,7763,7774],{},[6237,7764,7765,7768,7771],{},[6255,7766,7767],{},"latest",[6255,7769,7770],{},"✅",[6255,7772,7773],{},"Active development",[6237,7775,7776,7779,7782],{},[6255,7777,7778],{},"\u003C latest",[6255,7780,7781],{},"❌",[6255,7783,7784],{},"Security fixes only for critical issues",[4676,7786,7788],{"id":7787},"reporting-a-vulnerability","Reporting a Vulnerability",[4602,7790,7791],{},"We take the security of pipz seriously. If you have discovered a security vulnerability in this project, please report it responsibly.",[7309,7793,7795],{"id":7794},"how-to-report","How to Report",[4602,7797,7798],{},[4809,7799,7800],{},"Please DO NOT report security vulnerabilities through public GitHub issues.",[4602,7802,7803],{},"Instead, please report them via one of the following methods:",[7805,7806,7807,7830],"ol",{},[6352,7808,7809,7812,7813],{},[4809,7810,7811],{},"GitHub Security Advisories"," (Preferred)",[6349,7814,7815,7824,7827],{},[6352,7816,7817,7818,7823],{},"Go to the ",[4605,7819,7822],{"href":7820,"rel":7821},"https://github.com/zoobzio/pipz/security",[4609],"Security tab"," of this repository",[6352,7825,7826],{},"Click \"Report a vulnerability\"",[6352,7828,7829],{},"Fill out the form with details about the vulnerability",[6352,7831,7832,7835],{},[4809,7833,7834],{},"Email",[6349,7836,7837,7840],{},[6352,7838,7839],{},"Send details to the repository maintainer through GitHub profile contact information",[6352,7841,7842],{},"Use PGP encryption if possible for sensitive details",[7309,7844,7846],{"id":7845},"what-to-include","What to Include",[4602,7848,7849],{},"Please include the following information (as much as you can provide) to help us better understand the nature and scope of the possible issue:",[6349,7851,7852,7858,7864,7870,7876,7881,7887,7893],{},[6352,7853,7854,7857],{},[4809,7855,7856],{},"Type of issue"," (e.g., buffer overflow, SQL injection, cross-site scripting, etc.)",[6352,7859,7860,7863],{},[4809,7861,7862],{},"Full paths of source file(s)"," related to the manifestation of the issue",[6352,7865,7866,7869],{},[4809,7867,7868],{},"The location of the affected source code"," (tag/branch/commit or direct URL)",[6352,7871,7872,7875],{},[4809,7873,7874],{},"Any special configuration required"," to reproduce the issue",[6352,7877,7878,7875],{},[4809,7879,7880],{},"Step-by-step instructions",[6352,7882,7883,7886],{},[4809,7884,7885],{},"Proof-of-concept or exploit code"," (if possible)",[6352,7888,7889,7892],{},[4809,7890,7891],{},"Impact of the issue",", including how an attacker might exploit the issue",[6352,7894,7895,7898],{},[4809,7896,7897],{},"Your name and affiliation"," (optional)",[7309,7900,7902],{"id":7901},"what-to-expect","What to Expect",[6349,7904,7905,7911,7917,7923],{},[6352,7906,7907,7910],{},[4809,7908,7909],{},"Acknowledgment",": We will acknowledge receipt of your vulnerability report within 48 hours",[6352,7912,7913,7916],{},[4809,7914,7915],{},"Initial Assessment",": Within 7 days, we will provide an initial assessment of the report",[6352,7918,7919,7922],{},[4809,7920,7921],{},"Resolution Timeline",": We aim to resolve critical issues within 30 days",[6352,7924,7925,7928],{},[4809,7926,7927],{},"Disclosure",": We will coordinate with you on the disclosure timeline",[7309,7930,7932],{"id":7931},"preferred-languages","Preferred Languages",[4602,7934,7935],{},"We prefer all communications to be in English.",[4676,7937,7939],{"id":7938},"security-best-practices","Security Best Practices",[4602,7941,7942],{},"When using pipz in your applications, we recommend:",[7805,7944,7945,7967,7980,7992,8004],{},[6352,7946,7947,7950],{},[4809,7948,7949],{},"Keep Dependencies Updated",[4689,7951,7953],{"className":5316,"code":7952,"language":5318,"meta":29,"style":29},"go get -u github.com/zoobzio/pipz\n",[4684,7954,7955],{"__ignoreMap":29},[4697,7956,7957,7959,7961,7964],{"class":4699,"line":9},[4697,7958,4693],{"class":4732},[4697,7960,5327],{"class":5023},[4697,7962,7963],{"class":4702}," -u",[4697,7965,7966],{"class":5023}," github.com/zoobzio/pipz\n",[6352,7968,7969,7972],{},[4809,7970,7971],{},"Use Context Properly",[6349,7973,7974,7977],{},[6352,7975,7976],{},"Always pass contexts with appropriate timeouts",[6352,7978,7979],{},"Handle context cancellation in your processors",[6352,7981,7982,7984],{},[4809,7983,106],{},[6349,7985,7986,7989],{},[6352,7987,7988],{},"Never ignore errors returned by pipelines",[6352,7990,7991],{},"Implement proper fallback mechanisms",[6352,7993,7994,7996],{},[4809,7995,463],{},[6349,7997,7998,8001],{},[6352,7999,8000],{},"Validate all inputs before processing",[6352,8002,8003],{},"Use the Filter processor to sanitize data",[6352,8005,8006,8009],{},[4809,8007,8008],{},"Resource Management",[6349,8010,8011,8014,8017],{},[6352,8012,8013],{},"Use rate limiters for external API calls",[6352,8015,8016],{},"Implement circuit breakers for failing services",[6352,8018,8019],{},"Set appropriate timeouts for all operations",[4676,8021,8023],{"id":8022},"security-features","Security Features",[4602,8025,8026],{},"pipz includes several built-in security features:",[6349,8028,8029,8034,8040,8046,8052],{},[6352,8030,8031,8033],{},[4809,8032,32],{},": Generic types prevent type confusion attacks",[6352,8035,8036,8039],{},[4809,8037,8038],{},"Context Support",": Built-in cancellation and timeout support",[6352,8041,8042,8045],{},[4809,8043,8044],{},"Error Isolation",": Errors are properly wrapped and traced",[6352,8047,8048,8051],{},[4809,8049,8050],{},"Resource Controls",": Rate limiting and circuit breaker patterns",[6352,8053,8054,8057],{},[4809,8055,8056],{},"No Dependencies",": Zero external dependencies reduce attack surface",[4676,8059,8061],{"id":8060},"automated-security-scanning","Automated Security Scanning",[4602,8063,8064],{},"This project uses:",[6349,8066,8067,8072,8078,8084],{},[6352,8068,8069,8071],{},[4809,8070,4637],{},": GitHub's semantic code analysis for security vulnerabilities",[6352,8073,8074,8077],{},[4809,8075,8076],{},"Dependabot",": Automated dependency updates (for dev dependencies)",[6352,8079,8080,8083],{},[4809,8081,8082],{},"golangci-lint",": Static analysis including security linters",[6352,8085,8086,8089],{},[4809,8087,8088],{},"Codecov",": Coverage tracking to ensure security-critical code is tested",[4676,8091,8093],{"id":8092},"vulnerability-disclosure-policy","Vulnerability Disclosure Policy",[6349,8095,8096,8099,8102,8105],{},[6352,8097,8098],{},"Security vulnerabilities will be disclosed via GitHub Security Advisories",[6352,8100,8101],{},"We follow a 90-day disclosure timeline for non-critical issues",[6352,8103,8104],{},"Critical vulnerabilities may be disclosed sooner after patches are available",[6352,8106,8107],{},"We will credit reporters who follow responsible disclosure practices",[4676,8109,8111],{"id":8110},"credits","Credits",[4602,8113,8114],{},"We thank the following individuals for responsibly disclosing security issues:",[4602,8116,8117],{},[8118,8119,8120],"em",{},"This list is currently empty. Be the first to help improve our security!",[8122,8123],"hr",{},[4602,8125,8126,8129],{},[4809,8127,8128],{},"Last Updated",": 2025-08-19",[7704,8131,8132],{},"html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}",{"title":29,"searchDepth":19,"depth":19,"links":8134},[8135,8136,8142,8143,8144,8145,8146],{"id":7741,"depth":19,"text":7742},{"id":7787,"depth":19,"text":7788,"children":8137},[8138,8139,8140,8141],{"id":7794,"depth":35,"text":7795},{"id":7845,"depth":35,"text":7846},{"id":7901,"depth":35,"text":7902},{"id":7931,"depth":35,"text":7932},{"id":7938,"depth":19,"text":7939},{"id":8022,"depth":19,"text":8023},{"id":8060,"depth":19,"text":8061},{"id":8092,"depth":19,"text":8093},{"id":8110,"depth":19,"text":8111},"shield",{},"/security",{"title":1461,"description":29},"security","ZBgC58b_Pq5B71s9UnY7a7DF8ibnK_Rz4m6BeJy8-KE",{"id":8154,"title":7681,"body":8155,"description":8163,"extension":7724,"icon":4684,"meta":8712,"navigation":4917,"path":8713,"seo":8714,"stem":7680,"__hash__":8715},"resources/contributing.md",{"type":4595,"value":8156,"toc":8686},[8157,8161,8164,8168,8171,8175,8213,8217,8221,8239,8242,8258,8260,8271,8275,8279,8293,8297,8308,8312,8315,8318,8329,8333,8336,8352,8356,8384,8387,8390,8405,8408,8424,8427,8443,8447,8455,8459,8462,8506,8510,8514,8517,8528,8531,8545,8549,8552,8580,8584,8610,8614,8641,8647,8651,8654,8665,8669,8680,8683],[4598,8158,8160],{"id":8159},"contributing-to-pipz","Contributing to pipz",[4602,8162,8163],{},"Thank you for your interest in contributing to pipz! This guide will help you get started.",[4676,8165,8167],{"id":8166},"code-of-conduct","Code of Conduct",[4602,8169,8170],{},"By participating in this project, you agree to maintain a respectful and inclusive environment for all contributors.",[4676,8172,8174],{"id":8173},"getting-started","Getting Started",[7805,8176,8177,8180,8186,8192,8195,8201,8204,8210],{},[6352,8178,8179],{},"Fork the repository",[6352,8181,8182,8183],{},"Clone your fork: ",[4684,8184,8185],{},"git clone https://github.com/yourusername/pipz.git",[6352,8187,8188,8189],{},"Create a feature branch: ",[4684,8190,8191],{},"git checkout -b feature/your-feature-name",[6352,8193,8194],{},"Make your changes",[6352,8196,8197,8198],{},"Run tests: ",[4684,8199,8200],{},"go test ./...",[6352,8202,8203],{},"Commit your changes with a descriptive message",[6352,8205,8206,8207],{},"Push to your fork: ",[4684,8208,8209],{},"git push origin feature/your-feature-name",[6352,8211,8212],{},"Create a Pull Request",[4676,8214,8216],{"id":8215},"development-guidelines","Development Guidelines",[7309,8218,8220],{"id":8219},"code-style","Code Style",[6349,8222,8223,8226,8233,8236],{},[6352,8224,8225],{},"Follow standard Go conventions",[6352,8227,8228,8229,8232],{},"Run ",[4684,8230,8231],{},"go fmt"," before committing",[6352,8234,8235],{},"Add comments for exported functions and types",[6352,8237,8238],{},"Keep functions small and focused",[7309,8240,2805],{"id":8241},"testing",[6349,8243,8244,8247,8252,8255],{},[6352,8245,8246],{},"Write tests for new functionality",[6352,8248,8249,8250],{},"Ensure all tests pass: ",[4684,8251,8200],{},[6352,8253,8254],{},"Include benchmarks for performance-critical code",[6352,8256,8257],{},"Aim for good test coverage",[7309,8259,7299],{"id":7298},[6349,8261,8262,8265,8268],{},[6352,8263,8264],{},"Update documentation for API changes",[6352,8266,8267],{},"Add examples for new features",[6352,8269,8270],{},"Keep doc comments clear and concise",[4676,8272,8274],{"id":8273},"types-of-contributions","Types of Contributions",[7309,8276,8278],{"id":8277},"bug-reports","Bug Reports",[6349,8280,8281,8284,8287,8290],{},[6352,8282,8283],{},"Use GitHub Issues",[6352,8285,8286],{},"Include minimal reproduction code",[6352,8288,8289],{},"Describe expected vs actual behavior",[6352,8291,8292],{},"Include Go version and OS",[7309,8294,8296],{"id":8295},"feature-requests","Feature Requests",[6349,8298,8299,8302,8305],{},[6352,8300,8301],{},"Open an issue for discussion first",[6352,8303,8304],{},"Explain the use case",[6352,8306,8307],{},"Consider backwards compatibility",[7309,8309,8311],{"id":8310},"code-contributions","Code Contributions",[7435,8313,3984],{"id":8314},"adding-processors",[4602,8316,8317],{},"New processor adapters should:",[6349,8319,8320,8323,8326],{},[6352,8321,8322],{},"Follow the existing pattern (Apply, Validate, Effect)",[6352,8324,8325],{},"Include comprehensive tests",[6352,8327,8328],{},"Add documentation with examples",[7435,8330,8332],{"id":8331},"adding-connectors","Adding Connectors",[4602,8334,8335],{},"New connectors should:",[6349,8337,8338,8343,8346,8349],{},[6352,8339,8340,8341,4724],{},"Implement the ",[4684,8342,4686],{},[6352,8344,8345],{},"Handle context cancellation properly",[6352,8347,8348],{},"Include tests for error cases",[6352,8350,8351],{},"Document behavior clearly",[4676,8353,8355],{"id":8354},"pull-request-process","Pull Request Process",[7805,8357,8358,8364,8369,8374,8379],{},[6352,8359,8360,8363],{},[4809,8361,8362],{},"Keep PRs focused"," - One feature/fix per PR",[6352,8365,8366],{},[4809,8367,8368],{},"Write descriptive commit messages",[6352,8370,8371],{},[4809,8372,8373],{},"Update tests and documentation",[6352,8375,8376],{},[4809,8377,8378],{},"Ensure CI passes",[6352,8380,8381],{},[4809,8382,8383],{},"Respond to review feedback",[4676,8385,2805],{"id":8386},"testing-1",[4602,8388,8389],{},"Run the full test suite:",[4689,8391,8393],{"className":5316,"code":8392,"language":5318,"meta":29,"style":29},"go test ./...\n",[4684,8394,8395],{"__ignoreMap":29},[4697,8396,8397,8399,8402],{"class":4699,"line":9},[4697,8398,4693],{"class":4732},[4697,8400,8401],{"class":5023}," test",[4697,8403,8404],{"class":5023}," ./...\n",[4602,8406,8407],{},"Run with race detection:",[4689,8409,8411],{"className":5316,"code":8410,"language":5318,"meta":29,"style":29},"go test -race ./...\n",[4684,8412,8413],{"__ignoreMap":29},[4697,8414,8415,8417,8419,8422],{"class":4699,"line":9},[4697,8416,4693],{"class":4732},[4697,8418,8401],{"class":5023},[4697,8420,8421],{"class":4702}," -race",[4697,8423,8404],{"class":5023},[4602,8425,8426],{},"Run benchmarks:",[4689,8428,8430],{"className":5316,"code":8429,"language":5318,"meta":29,"style":29},"go test -bench=. ./...\n",[4684,8431,8432],{"__ignoreMap":29},[4697,8433,8434,8436,8438,8441],{"class":4699,"line":9},[4697,8435,4693],{"class":4732},[4697,8437,8401],{"class":5023},[4697,8439,8440],{"class":4702}," -bench=.",[4697,8442,8404],{"class":5023},[4676,8444,8446],{"id":8445},"project-structure","Project Structure",[4689,8448,8453],{"className":8449,"code":8451,"language":8452},[8450],"language-text","pipz/\n├── *.go              # Core library files\n├── *_test.go         # Tests\n├── *_bench_test.go   # Benchmarks\n├── docs/            # Documentation\n└── cmd/             # Command-line tools\n","text",[4684,8454,8451],{"__ignoreMap":29},[4676,8456,8458],{"id":8457},"commit-messages","Commit Messages",[4602,8460,8461],{},"Follow conventional commits:",[6349,8463,8464,8470,8476,8482,8488,8494,8500],{},[6352,8465,8466,8469],{},[4684,8467,8468],{},"feat:"," New feature",[6352,8471,8472,8475],{},[4684,8473,8474],{},"fix:"," Bug fix",[6352,8477,8478,8481],{},[4684,8479,8480],{},"docs:"," Documentation changes",[6352,8483,8484,8487],{},[4684,8485,8486],{},"test:"," Test additions/changes",[6352,8489,8490,8493],{},[4684,8491,8492],{},"refactor:"," Code refactoring",[6352,8495,8496,8499],{},[4684,8497,8498],{},"perf:"," Performance improvements",[6352,8501,8502,8505],{},[4684,8503,8504],{},"chore:"," Maintenance tasks",[4676,8507,8509],{"id":8508},"release-process","Release Process",[7309,8511,8513],{"id":8512},"automated-releases","Automated Releases",[4602,8515,8516],{},"This project uses automated release versioning. To create a release:",[7805,8518,8519,8522,8525],{},[6352,8520,8521],{},"Go to Actions → Release → Run workflow",[6352,8523,8524],{},"Leave \"Version override\" empty for automatic version inference",[6352,8526,8527],{},"Click \"Run workflow\"",[4602,8529,8530],{},"The system will:",[6349,8532,8533,8536,8539,8542],{},[6352,8534,8535],{},"Automatically determine the next version from conventional commits",[6352,8537,8538],{},"Create a git tag",[6352,8540,8541],{},"Generate release notes via GoReleaser",[6352,8543,8544],{},"Publish the release to GitHub",[7309,8546,8548],{"id":8547},"manual-release-legacy","Manual Release (Legacy)",[4602,8550,8551],{},"You can still create releases manually:",[4689,8553,8555],{"className":5316,"code":8554,"language":5318,"meta":29,"style":29},"git tag v1.2.3\ngit push origin v1.2.3\n",[4684,8556,8557,8568],{"__ignoreMap":29},[4697,8558,8559,8562,8565],{"class":4699,"line":9},[4697,8560,8561],{"class":4732},"git",[4697,8563,8564],{"class":5023}," tag",[4697,8566,8567],{"class":5023}," v1.2.3\n",[4697,8569,8570,8572,8575,8578],{"class":4699,"line":19},[4697,8571,8561],{"class":4732},[4697,8573,8574],{"class":5023}," push",[4697,8576,8577],{"class":5023}," origin",[4697,8579,8567],{"class":5023},[7309,8581,8583],{"id":8582},"known-limitations","Known Limitations",[6349,8585,8586,8592,8598],{},[6352,8587,8588,8591],{},[4809,8589,8590],{},"Protected branches",": The automated release cannot bypass branch protection rules. This is by design for security.",[6352,8593,8594,8597],{},[4809,8595,8596],{},"Concurrent releases",": Rapid successive releases may fail. Simply retry after a moment.",[6352,8599,8600,8603,8604,8606,8607,8609],{},[4809,8601,8602],{},"Conventional commits required",": Version inference requires conventional commit format (",[4684,8605,8468],{},", ",[4684,8608,8474],{},", etc.)",[7309,8611,8613],{"id":8612},"commit-conventions-for-versioning","Commit Conventions for Versioning",[6349,8615,8616,8621,8626,8632],{},[6352,8617,8618,8620],{},[4684,8619,8468],{}," new features (minor version: 1.2.0 → 1.3.0)",[6352,8622,8623,8625],{},[4684,8624,8474],{}," bug fixes (patch version: 1.2.0 → 1.2.1)",[6352,8627,8628,8631],{},[4684,8629,8630],{},"feat!:"," breaking changes (major version: 1.2.0 → 2.0.0)",[6352,8633,8634,8606,8636,8606,8638,8640],{},[4684,8635,8480],{},[4684,8637,8486],{},[4684,8639,8504],{}," no version change",[4602,8642,8643,8644],{},"Example: ",[4684,8645,8646],{},"feat(pipeline): add timeout support for processors",[7309,8648,8650],{"id":8649},"version-preview-on-pull-requests","Version Preview on Pull Requests",[4602,8652,8653],{},"Every PR automatically shows the next version that will be created:",[6349,8655,8656,8659,8662],{},[6352,8657,8658],{},"Check PR comments for \"Version Preview\"",[6352,8660,8661],{},"Updates automatically as you add commits",[6352,8663,8664],{},"Helps verify your commits have the intended effect",[4676,8666,8668],{"id":8667},"questions","Questions?",[6349,8670,8671,8674,8677],{},[6352,8672,8673],{},"Open an issue for questions",[6352,8675,8676],{},"Check existing issues first",[6352,8678,8679],{},"Be patient and respectful",[4602,8681,8682],{},"Thank you for contributing to pipz!",[7704,8684,8685],{},"html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}",{"title":29,"searchDepth":19,"depth":19,"links":8687},[8688,8689,8690,8695,8700,8701,8702,8703,8704,8711],{"id":8166,"depth":19,"text":8167},{"id":8173,"depth":19,"text":8174},{"id":8215,"depth":19,"text":8216,"children":8691},[8692,8693,8694],{"id":8219,"depth":35,"text":8220},{"id":8241,"depth":35,"text":2805},{"id":7298,"depth":35,"text":7299},{"id":8273,"depth":19,"text":8274,"children":8696},[8697,8698,8699],{"id":8277,"depth":35,"text":8278},{"id":8295,"depth":35,"text":8296},{"id":8310,"depth":35,"text":8311},{"id":8354,"depth":19,"text":8355},{"id":8386,"depth":19,"text":2805},{"id":8445,"depth":19,"text":8446},{"id":8457,"depth":19,"text":8458},{"id":8508,"depth":19,"text":8509,"children":8705},[8706,8707,8708,8709,8710],{"id":8512,"depth":35,"text":8513},{"id":8547,"depth":35,"text":8548},{"id":8582,"depth":35,"text":8583},{"id":8612,"depth":35,"text":8613},{"id":8649,"depth":35,"text":8650},{"id":8667,"depth":19,"text":8668},{},"/contributing",{"title":7681,"description":8163},"w5S8DxCjnKaElwvBmJpfyKDJvGFP8aunvXvGqO_DUiQ",{"id":8717,"title":666,"author":8718,"body":8719,"description":668,"extension":7724,"meta":11409,"navigation":4917,"path":665,"published":11410,"readtime":11411,"seo":11412,"stem":4444,"tags":11413,"updated":11410,"__hash__":11415},"pipz/v1.0.7/3.guides/1.connector-selection.md","zoobzio",{"type":4595,"value":8720,"toc":11385},[8721,8724,8726,8729,8735,8738,8744,8747,8750,8758,8836,8841,8852,8857,8862,8864,8867,8873,8953,8957,8968,8973,8981,8985,8992,8994,8997,9003,9094,9098,9115,9119,9125,9129,9146,9148,9151,9157,9237,9241,9252,9256,9262,9266,9274,9276,9279,9285,9421,9425,9436,9440,9446,9448,9451,9457,9623,9627,9638,9642,9647,9649,9652,9658,9733,9737,9748,9752,9760,9762,9765,9771,9843,9847,9858,9862,9870,9872,9875,9881,9990,9994,10005,10009,10017,10019,10022,10028,10135,10139,10150,10155,10160,10162,10165,10171,10249,10253,10264,10268,10276,10279,10450,10453,10456,10459,10684,10687,10817,10820,11019,11022,11382],[4598,8722,666],{"id":8723},"connector-selection-guide",[4602,8725,672],{},[4676,8727,675],{"id":8728},"decision-tree",[4689,8730,8733],{"className":8731,"code":8732,"language":8452},[8450],"What do you need?\n│\n├─ Sequential processing? → Sequence\n│\n├─ Parallel processing?\n│   ├─ Need all results? → Concurrent\n│   ├─ Bounded parallelism? → WorkerPool\n│   ├─ Fire and forget? → Scaffold\n│   ├─ Need fastest? → Race\n│   └─ Need best match? → Contest\n│\n├─ Conditional routing? → Switch\n│\n├─ Error handling?\n│   ├─ Have fallback? → Fallback\n│   └─ Transient errors? → Retry\n│\n└─ Resilience?\n    ├─ Prevent cascading failures? → CircuitBreaker\n    ├─ Control throughput? → RateLimiter\n    └─ Bound execution time? → Timeout\n",[4684,8734,8732],{"__ignoreMap":29},[4676,8736,680],{"id":8737},"connector-comparison-matrix",[4689,8739,8742],{"className":8740,"code":8741,"language":8452},[8450],"┌────────────────┬──────────┬────────────┬──────────┬─────────────────────┐\n│   Connector    │ Parallel │ All Run?   │ Returns  │ Primary Use Case    │\n├────────────────┼──────────┼────────────┼──────────┼─────────────────────┤\n│ Sequence       │    No    │ Until fail │ Last     │ Step-by-step flow   │\n│ Concurrent     │   Yes    │    Yes     │ Original │ Side effects        │\n│ WorkerPool     │   Yes*   │    Yes     │ Original │ Bounded parallelism │\n│ Scaffold       │   Yes    │    Yes     │ Original │ Fire-and-forget     │\n│ Race           │   Yes    │ First wins │ First    │ Fastest response    │\n│ Contest        │   Yes    │ Until pass │ Matching │ Quality threshold   │\n│ Switch         │    No    │ One branch │ Selected │ Conditional routing │\n│ Fallback       │    No    │ On failure │ Primary  │ Error recovery      │\n│ Retry          │    No    │ Until pass │ Success  │ Transient failures  │\n│ CircuitBreaker │    No    │ If closed  │ Result   │ Cascade prevention  │\n│ RateLimiter    │    No    │ If allowed │ Result   │ Throughput control  │\n│ Timeout        │    No    │ Time bound │ Result   │ Execution limits    │\n└────────────────┴──────────┴────────────┴──────────┴─────────────────────┘\n\nLegend:\n• Parallel: Whether processors run concurrently (*WorkerPool limits concurrency)\n• All Run?: Whether all processors execute or stop early\n• Returns: What data is returned to caller\n• Primary Use Case: Main scenario for using this connector\n",[4684,8743,8741],{"__ignoreMap":29},[4676,8745,685],{"id":8746},"problem-solution-guide",[7309,8748,689],{"id":8749},"you-need-to-process-data-through-multiple-steps-in-order",[4602,8751,8752,8755,8756],{},[4809,8753,8754],{},"Solution:"," ",[4684,8757,7524],{},[4689,8759,8761],{"className":4691,"code":8760,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar PipelineID = pipz.NewIdentity(\"pipeline\", \"Sequential processing pipeline\")\n\npipeline := pipz.NewSequence[T](PipelineID, step1, step2, step3)\n",[4684,8762,8763,8768,8795,8799],{"__ignoreMap":29},[4697,8764,8765],{"class":4699,"line":9},[4697,8766,8767],{"class":4821},"// Define identity upfront\n",[4697,8769,8770,8772,8775,8777,8779,8781,8783,8785,8788,8790,8793],{"class":4699,"line":19},[4697,8771,5415],{"class":4702},[4697,8773,8774],{"class":4827}," PipelineID",[4697,8776,4887],{"class":4827},[4697,8778,4834],{"class":4827},[4697,8780,4742],{"class":4710},[4697,8782,5431],{"class":4732},[4697,8784,4736],{"class":4710},[4697,8786,8787],{"class":5023},"\"pipeline\"",[4697,8789,4748],{"class":4710},[4697,8791,8792],{"class":5023}," \"Sequential processing pipeline\"",[4697,8794,4767],{"class":4710},[4697,8796,8797],{"class":4699,"line":35},[4697,8798,4918],{"emptyLinePlaceholder":4917},[4697,8800,8801,8803,8805,8807,8809,8811,8813,8815,8817,8819,8821,8824,8826,8829,8831,8834],{"class":4699,"line":917},[4697,8802,5273],{"class":4827},[4697,8804,4831],{"class":4827},[4697,8806,4834],{"class":4827},[4697,8808,4742],{"class":4710},[4697,8810,5159],{"class":4732},[4697,8812,4711],{"class":4710},[4697,8814,4715],{"class":4706},[4697,8816,6505],{"class":4710},[4697,8818,5942],{"class":4827},[4697,8820,4748],{"class":4710},[4697,8822,8823],{"class":4827}," step1",[4697,8825,4748],{"class":4710},[4697,8827,8828],{"class":4827}," step2",[4697,8830,4748],{"class":4710},[4697,8832,8833],{"class":4827}," step3",[4697,8835,4767],{"class":4710},[4602,8837,8838],{},[4809,8839,8840],{},"When to use:",[6349,8842,8843,8846,8849],{},[6352,8844,8845],{},"Order matters",[6352,8847,8848],{},"Each step depends on previous",[6352,8850,8851],{},"Building up state through transformations",[4602,8853,8854],{},[4809,8855,8856],{},"Don't use when:",[6349,8858,8859],{},[6352,8860,8861],{},"Steps are independent (use Concurrent instead)",[8122,8863],{},[7309,8865,694],{"id":8866},"you-need-to-run-multiple-operations-in-parallel",[4602,8868,8869,8755,8871],{},[4809,8870,8754],{},[4684,8872,2937],{},[4689,8874,8876],{"className":4691,"code":8875,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar ParallelID = pipz.NewIdentity(\"parallel\", \"Executes operations in parallel\")\n\nconcurrent := pipz.NewConcurrent[T](ParallelID, proc1, proc2, proc3)\n",[4684,8877,8878,8882,8909,8913],{"__ignoreMap":29},[4697,8879,8880],{"class":4699,"line":9},[4697,8881,8767],{"class":4821},[4697,8883,8884,8886,8889,8891,8893,8895,8897,8899,8902,8904,8907],{"class":4699,"line":19},[4697,8885,5415],{"class":4702},[4697,8887,8888],{"class":4827}," ParallelID",[4697,8890,4887],{"class":4827},[4697,8892,4834],{"class":4827},[4697,8894,4742],{"class":4710},[4697,8896,5431],{"class":4732},[4697,8898,4736],{"class":4710},[4697,8900,8901],{"class":5023},"\"parallel\"",[4697,8903,4748],{"class":4710},[4697,8905,8906],{"class":5023}," \"Executes operations in parallel\"",[4697,8908,4767],{"class":4710},[4697,8910,8911],{"class":4699,"line":35},[4697,8912,4918],{"emptyLinePlaceholder":4917},[4697,8914,8915,8918,8920,8922,8924,8927,8929,8931,8933,8936,8938,8941,8943,8946,8948,8951],{"class":4699,"line":917},[4697,8916,8917],{"class":4827},"concurrent",[4697,8919,4831],{"class":4827},[4697,8921,4834],{"class":4827},[4697,8923,4742],{"class":4710},[4697,8925,8926],{"class":4732},"NewConcurrent",[4697,8928,4711],{"class":4710},[4697,8930,4715],{"class":4706},[4697,8932,6505],{"class":4710},[4697,8934,8935],{"class":4827},"ParallelID",[4697,8937,4748],{"class":4710},[4697,8939,8940],{"class":4827}," proc1",[4697,8942,4748],{"class":4710},[4697,8944,8945],{"class":4827}," proc2",[4697,8947,4748],{"class":4710},[4697,8949,8950],{"class":4827}," proc3",[4697,8952,4767],{"class":4710},[4602,8954,8955],{},[4809,8956,8840],{},[6349,8958,8959,8962,8965],{},[6352,8960,8961],{},"Operations are independent",[6352,8963,8964],{},"Running side effects (notifications, logging)",[6352,8966,8967],{},"Want to parallelize for performance",[4602,8969,8970],{},[4809,8971,8972],{},"Requirements:",[6349,8974,8975],{},[6352,8976,8977,8978],{},"Type T must implement ",[4684,8979,8980],{},"Cloner[T]",[4602,8982,8983],{},[4809,8984,8856],{},[6349,8986,8987,8989],{},[6352,8988,8845],{},[6352,8990,8991],{},"Operations depend on each other",[8122,8993],{},[7309,8995,699],{"id":8996},"you-need-to-run-parallel-operations-with-limited-resources",[4602,8998,8999,8755,9001],{},[4809,9000,8754],{},[4684,9002,529],{},[4689,9004,9006],{"className":4691,"code":9005,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar LimitedID = pipz.NewIdentity(\"limited\", \"Worker pool with max 3 concurrent operations\")\n\npool := pipz.NewWorkerPool[T](LimitedID, 3, proc1, proc2, proc3, proc4, proc5)\n",[4684,9007,9008,9012,9039,9043],{"__ignoreMap":29},[4697,9009,9010],{"class":4699,"line":9},[4697,9011,8767],{"class":4821},[4697,9013,9014,9016,9019,9021,9023,9025,9027,9029,9032,9034,9037],{"class":4699,"line":19},[4697,9015,5415],{"class":4702},[4697,9017,9018],{"class":4827}," LimitedID",[4697,9020,4887],{"class":4827},[4697,9022,4834],{"class":4827},[4697,9024,4742],{"class":4710},[4697,9026,5431],{"class":4732},[4697,9028,4736],{"class":4710},[4697,9030,9031],{"class":5023},"\"limited\"",[4697,9033,4748],{"class":4710},[4697,9035,9036],{"class":5023}," \"Worker pool with max 3 concurrent operations\"",[4697,9038,4767],{"class":4710},[4697,9040,9041],{"class":4699,"line":35},[4697,9042,4918],{"emptyLinePlaceholder":4917},[4697,9044,9045,9048,9050,9052,9054,9057,9059,9061,9063,9066,9068,9070,9072,9074,9076,9078,9080,9082,9084,9087,9089,9092],{"class":4699,"line":917},[4697,9046,9047],{"class":4827},"pool",[4697,9049,4831],{"class":4827},[4697,9051,4834],{"class":4827},[4697,9053,4742],{"class":4710},[4697,9055,9056],{"class":4732},"NewWorkerPool",[4697,9058,4711],{"class":4710},[4697,9060,4715],{"class":4706},[4697,9062,6505],{"class":4710},[4697,9064,9065],{"class":4827},"LimitedID",[4697,9067,4748],{"class":4710},[4697,9069,5218],{"class":4997},[4697,9071,4748],{"class":4710},[4697,9073,8940],{"class":4827},[4697,9075,4748],{"class":4710},[4697,9077,8945],{"class":4827},[4697,9079,4748],{"class":4710},[4697,9081,8950],{"class":4827},[4697,9083,4748],{"class":4710},[4697,9085,9086],{"class":4827}," proc4",[4697,9088,4748],{"class":4710},[4697,9090,9091],{"class":4827}," proc5",[4697,9093,4767],{"class":4710},[4602,9095,9096],{},[4809,9097,8840],{},[6349,9099,9100,9103,9106,9109,9112],{},[6352,9101,9102],{},"Resource-constrained environments",[6352,9104,9105],{},"Rate-limited external services",[6352,9107,9108],{},"Controlled database connections",[6352,9110,9111],{},"Preventing memory exhaustion",[6352,9113,9114],{},"Managing CPU-intensive operations",[4602,9116,9117],{},[4809,9118,8972],{},[6349,9120,9121],{},[6352,9122,8977,9123],{},[4684,9124,8980],{},[4602,9126,9127],{},[4809,9128,8856],{},[6349,9130,9131,9136,9141],{},[6352,9132,9133,9134,4754],{},"Need unbounded parallelism (use ",[4684,9135,2937],{},[6352,9137,9138,9139,4754],{},"Operations must complete in order (use ",[4684,9140,7524],{},[6352,9142,9143,9144,4754],{},"Fire-and-forget semantics needed (use ",[4684,9145,3857],{},[8122,9147],{},[7309,9149,704],{"id":9150},"you-need-to-get-the-fastest-result-from-multiple-sources",[4602,9152,9153,8755,9155],{},[4809,9154,8754],{},[4684,9156,3546],{},[4689,9158,9160],{"className":4691,"code":9159,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar FastestID = pipz.NewIdentity(\"fastest\", \"Returns first successful result\")\n\nrace := pipz.NewRace[T](FastestID, primary, backup1, backup2)\n",[4684,9161,9162,9166,9193,9197],{"__ignoreMap":29},[4697,9163,9164],{"class":4699,"line":9},[4697,9165,8767],{"class":4821},[4697,9167,9168,9170,9173,9175,9177,9179,9181,9183,9186,9188,9191],{"class":4699,"line":19},[4697,9169,5415],{"class":4702},[4697,9171,9172],{"class":4827}," FastestID",[4697,9174,4887],{"class":4827},[4697,9176,4834],{"class":4827},[4697,9178,4742],{"class":4710},[4697,9180,5431],{"class":4732},[4697,9182,4736],{"class":4710},[4697,9184,9185],{"class":5023},"\"fastest\"",[4697,9187,4748],{"class":4710},[4697,9189,9190],{"class":5023}," \"Returns first successful result\"",[4697,9192,4767],{"class":4710},[4697,9194,9195],{"class":4699,"line":35},[4697,9196,4918],{"emptyLinePlaceholder":4917},[4697,9198,9199,9202,9204,9206,9208,9211,9213,9215,9217,9220,9222,9225,9227,9230,9232,9235],{"class":4699,"line":917},[4697,9200,9201],{"class":4827},"race",[4697,9203,4831],{"class":4827},[4697,9205,4834],{"class":4827},[4697,9207,4742],{"class":4710},[4697,9209,9210],{"class":4732},"NewRace",[4697,9212,4711],{"class":4710},[4697,9214,4715],{"class":4706},[4697,9216,6505],{"class":4710},[4697,9218,9219],{"class":4827},"FastestID",[4697,9221,4748],{"class":4710},[4697,9223,9224],{"class":4827}," primary",[4697,9226,4748],{"class":4710},[4697,9228,9229],{"class":4827}," backup1",[4697,9231,4748],{"class":4710},[4697,9233,9234],{"class":4827}," backup2",[4697,9236,4767],{"class":4710},[4602,9238,9239],{},[4809,9240,8840],{},[6349,9242,9243,9246,9249],{},[6352,9244,9245],{},"Multiple sources for same data",[6352,9247,9248],{},"Want lowest latency",[6352,9250,9251],{},"Have fallback options",[4602,9253,9254],{},[4809,9255,8972],{},[6349,9257,9258],{},[6352,9259,8977,9260],{},[4684,9261,8980],{},[4602,9263,9264],{},[4809,9265,8856],{},[6349,9267,9268,9271],{},[6352,9269,9270],{},"Need all results",[6352,9272,9273],{},"Sources have different costs",[8122,9275],{},[7309,9277,709],{"id":9278},"you-need-to-find-first-result-meeting-quality-criteria",[4602,9280,9281,8755,9283],{},[4809,9282,8754],{},[4684,9284,3055],{},[4689,9286,9288],{"className":4691,"code":9287,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar BestID = pipz.NewIdentity(\"best\", \"Finds first result with score > 0.9\")\n\ncontest := pipz.NewContest[T](BestID,\n    func(ctx context.Context, result T) bool {\n        return result.Score > 0.9\n    },\n    model1, model2, model3,\n)\n",[4684,9289,9290,9294,9321,9325,9350,9378,9395,9400,9417],{"__ignoreMap":29},[4697,9291,9292],{"class":4699,"line":9},[4697,9293,8767],{"class":4821},[4697,9295,9296,9298,9301,9303,9305,9307,9309,9311,9314,9316,9319],{"class":4699,"line":19},[4697,9297,5415],{"class":4702},[4697,9299,9300],{"class":4827}," BestID",[4697,9302,4887],{"class":4827},[4697,9304,4834],{"class":4827},[4697,9306,4742],{"class":4710},[4697,9308,5431],{"class":4732},[4697,9310,4736],{"class":4710},[4697,9312,9313],{"class":5023},"\"best\"",[4697,9315,4748],{"class":4710},[4697,9317,9318],{"class":5023}," \"Finds first result with score > 0.9\"",[4697,9320,4767],{"class":4710},[4697,9322,9323],{"class":4699,"line":35},[4697,9324,4918],{"emptyLinePlaceholder":4917},[4697,9326,9327,9330,9332,9334,9336,9339,9341,9343,9345,9348],{"class":4699,"line":917},[4697,9328,9329],{"class":4827},"contest",[4697,9331,4831],{"class":4827},[4697,9333,4834],{"class":4827},[4697,9335,4742],{"class":4710},[4697,9337,9338],{"class":4732},"NewContest",[4697,9340,4711],{"class":4710},[4697,9342,4715],{"class":4706},[4697,9344,6505],{"class":4710},[4697,9346,9347],{"class":4827},"BestID",[4697,9349,6488],{"class":4710},[4697,9351,9352,9355,9357,9359,9361,9363,9365,9367,9369,9371,9373,9376],{"class":4699,"line":4791},[4697,9353,9354],{"class":4702},"    func",[4697,9356,4736],{"class":4710},[4697,9358,4853],{"class":4714},[4697,9360,4856],{"class":4706},[4697,9362,4742],{"class":4710},[4697,9364,4745],{"class":4706},[4697,9366,4748],{"class":4710},[4697,9368,6207],{"class":4714},[4697,9370,4751],{"class":4706},[4697,9372,4754],{"class":4710},[4697,9374,9375],{"class":4706}," bool",[4697,9377,4727],{"class":4710},[4697,9379,9380,9382,9384,9386,9389,9392],{"class":4699,"line":4802},[4697,9381,5006],{"class":4903},[4697,9383,6207],{"class":4827},[4697,9385,4742],{"class":4710},[4697,9387,9388],{"class":4827},"Score",[4697,9390,9391],{"class":4903}," >",[4697,9393,9394],{"class":4997}," 0.9\n",[4697,9396,9397],{"class":4699,"line":4921},[4697,9398,9399],{"class":4710},"    },\n",[4697,9401,9402,9405,9407,9410,9412,9415],{"class":4699,"line":4927},[4697,9403,9404],{"class":4827},"    model1",[4697,9406,4748],{"class":4710},[4697,9408,9409],{"class":4827}," model2",[4697,9411,4748],{"class":4710},[4697,9413,9414],{"class":4827}," model3",[4697,9416,6488],{"class":4710},[4697,9418,9419],{"class":4699,"line":4981},[4697,9420,4767],{"class":4710},[4602,9422,9423],{},[4809,9424,8840],{},[6349,9426,9427,9430,9433],{},[6352,9428,9429],{},"Quality matters more than speed",[6352,9431,9432],{},"Have multiple approaches",[6352,9434,9435],{},"Want first acceptable result",[4602,9437,9438],{},[4809,9439,8972],{},[6349,9441,9442],{},[6352,9443,8977,9444],{},[4684,9445,8980],{},[8122,9447],{},[7309,9449,714],{"id":9450},"you-need-to-route-data-based-on-conditions",[4602,9452,9453,8755,9455],{},[4809,9454,8754],{},[4684,9456,4049],{},[4689,9458,9460],{"className":4691,"code":9459,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar RouterID = pipz.NewIdentity(\"router\", \"Routes data based on premium status\")\n\nswitch := pipz.NewSwitch[T](RouterID,\n    func(ctx context.Context, data T) string {\n        if data.Premium {\n            return \"premium\"\n        }\n        return \"standard\"\n    },\n).\nAddRoute(\"premium\", premiumPipeline).\nAddRoute(\"standard\", standardPipeline)\n",[4684,9461,9462,9466,9493,9497,9522,9550,9563,9570,9574,9581,9585,9590,9607],{"__ignoreMap":29},[4697,9463,9464],{"class":4699,"line":9},[4697,9465,8767],{"class":4821},[4697,9467,9468,9470,9473,9475,9477,9479,9481,9483,9486,9488,9491],{"class":4699,"line":19},[4697,9469,5415],{"class":4702},[4697,9471,9472],{"class":4827}," RouterID",[4697,9474,4887],{"class":4827},[4697,9476,4834],{"class":4827},[4697,9478,4742],{"class":4710},[4697,9480,5431],{"class":4732},[4697,9482,4736],{"class":4710},[4697,9484,9485],{"class":5023},"\"router\"",[4697,9487,4748],{"class":4710},[4697,9489,9490],{"class":5023}," \"Routes data based on premium status\"",[4697,9492,4767],{"class":4710},[4697,9494,9495],{"class":4699,"line":35},[4697,9496,4918],{"emptyLinePlaceholder":4917},[4697,9498,9499,9502,9504,9506,9508,9511,9513,9515,9517,9520],{"class":4699,"line":917},[4697,9500,9501],{"class":4903},"switch",[4697,9503,4831],{"class":4827},[4697,9505,4834],{"class":4827},[4697,9507,4742],{"class":4710},[4697,9509,9510],{"class":4732},"NewSwitch",[4697,9512,4711],{"class":4710},[4697,9514,4715],{"class":4706},[4697,9516,6505],{"class":4710},[4697,9518,9519],{"class":4827},"RouterID",[4697,9521,6488],{"class":4710},[4697,9523,9524,9526,9528,9530,9532,9534,9536,9538,9541,9543,9545,9548],{"class":4699,"line":4791},[4697,9525,9354],{"class":4702},[4697,9527,4736],{"class":4710},[4697,9529,4853],{"class":4714},[4697,9531,4856],{"class":4706},[4697,9533,4742],{"class":4710},[4697,9535,4745],{"class":4706},[4697,9537,4748],{"class":4710},[4697,9539,9540],{"class":4714}," data",[4697,9542,4751],{"class":4706},[4697,9544,4754],{"class":4710},[4697,9546,9547],{"class":4706}," string",[4697,9549,4727],{"class":4710},[4697,9551,9552,9554,9556,9558,9561],{"class":4699,"line":4802},[4697,9553,5683],{"class":4903},[4697,9555,9540],{"class":4827},[4697,9557,4742],{"class":4710},[4697,9559,9560],{"class":4827},"Premium",[4697,9562,4727],{"class":4710},[4697,9564,9565,9567],{"class":4699,"line":4921},[4697,9566,5701],{"class":4903},[4697,9568,9569],{"class":5023}," \"premium\"\n",[4697,9571,9572],{"class":4699,"line":4927},[4697,9573,5723],{"class":4710},[4697,9575,9576,9578],{"class":4699,"line":4981},[4697,9577,5006],{"class":4903},[4697,9579,9580],{"class":5023}," \"standard\"\n",[4697,9582,9583],{"class":4699,"line":5003},[4697,9584,9399],{"class":4710},[4697,9586,9587],{"class":4699,"line":5029},[4697,9588,9589],{"class":4710},").\n",[4697,9591,9592,9595,9597,9600,9602,9605],{"class":4699,"line":5035},[4697,9593,9594],{"class":4732},"AddRoute",[4697,9596,4736],{"class":4710},[4697,9598,9599],{"class":5023},"\"premium\"",[4697,9601,4748],{"class":4710},[4697,9603,9604],{"class":4827}," premiumPipeline",[4697,9606,9589],{"class":4710},[4697,9608,9609,9611,9613,9616,9618,9621],{"class":4699,"line":5047},[4697,9610,9594],{"class":4732},[4697,9612,4736],{"class":4710},[4697,9614,9615],{"class":5023},"\"standard\"",[4697,9617,4748],{"class":4710},[4697,9619,9620],{"class":4827}," standardPipeline",[4697,9622,4767],{"class":4710},[4602,9624,9625],{},[4809,9626,8840],{},[6349,9628,9629,9632,9635],{},[6352,9630,9631],{},"Different processing for different data types",[6352,9633,9634],{},"Conditional logic",[6352,9636,9637],{},"A/B testing",[4602,9639,9640],{},[4809,9641,8856],{},[6349,9643,9644],{},[6352,9645,9646],{},"All data follows same path",[8122,9648],{},[7309,9650,719],{"id":9651},"you-need-to-recover-from-errors-gracefully",[4602,9653,9654,8755,9656],{},[4809,9655,8754],{},[4684,9657,539],{},[4689,9659,9661],{"className":4691,"code":9660,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar SafeID = pipz.NewIdentity(\"safe\", \"Uses fallback on error\")\n\nfallback := pipz.NewFallback[T](SafeID, riskyOperation, safeDefault)\n",[4684,9662,9663,9667,9694,9698],{"__ignoreMap":29},[4697,9664,9665],{"class":4699,"line":9},[4697,9666,8767],{"class":4821},[4697,9668,9669,9671,9674,9676,9678,9680,9682,9684,9687,9689,9692],{"class":4699,"line":19},[4697,9670,5415],{"class":4702},[4697,9672,9673],{"class":4827}," SafeID",[4697,9675,4887],{"class":4827},[4697,9677,4834],{"class":4827},[4697,9679,4742],{"class":4710},[4697,9681,5431],{"class":4732},[4697,9683,4736],{"class":4710},[4697,9685,9686],{"class":5023},"\"safe\"",[4697,9688,4748],{"class":4710},[4697,9690,9691],{"class":5023}," \"Uses fallback on error\"",[4697,9693,4767],{"class":4710},[4697,9695,9696],{"class":4699,"line":35},[4697,9697,4918],{"emptyLinePlaceholder":4917},[4697,9699,9700,9703,9705,9707,9709,9712,9714,9716,9718,9721,9723,9726,9728,9731],{"class":4699,"line":917},[4697,9701,9702],{"class":4827},"fallback",[4697,9704,4831],{"class":4827},[4697,9706,4834],{"class":4827},[4697,9708,4742],{"class":4710},[4697,9710,9711],{"class":4732},"NewFallback",[4697,9713,4711],{"class":4710},[4697,9715,4715],{"class":4706},[4697,9717,6505],{"class":4710},[4697,9719,9720],{"class":4827},"SafeID",[4697,9722,4748],{"class":4710},[4697,9724,9725],{"class":4827}," riskyOperation",[4697,9727,4748],{"class":4710},[4697,9729,9730],{"class":4827}," safeDefault",[4697,9732,4767],{"class":4710},[4602,9734,9735],{},[4809,9736,8840],{},[6349,9738,9739,9742,9745],{},[6352,9740,9741],{},"Have a safe default",[6352,9743,9744],{},"Want graceful degradation",[6352,9746,9747],{},"Errors are expected",[4602,9749,9750],{},[4809,9751,8856],{},[6349,9753,9754,9757],{},[6352,9755,9756],{},"Errors should stop processing",[6352,9758,9759],{},"No reasonable fallback exists",[8122,9761],{},[7309,9763,724],{"id":9764},"you-need-to-retry-failed-operations",[4602,9766,9767,8755,9769],{},[4809,9768,8754],{},[4684,9770,534],{},[4689,9772,9774],{"className":4691,"code":9773,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar ReliableID = pipz.NewIdentity(\"reliable\", \"Retries up to 3 times on failure\")\n\nretry := pipz.NewRetry[T](ReliableID, processor, 3)\n",[4684,9775,9776,9780,9807,9811],{"__ignoreMap":29},[4697,9777,9778],{"class":4699,"line":9},[4697,9779,8767],{"class":4821},[4697,9781,9782,9784,9787,9789,9791,9793,9795,9797,9800,9802,9805],{"class":4699,"line":19},[4697,9783,5415],{"class":4702},[4697,9785,9786],{"class":4827}," ReliableID",[4697,9788,4887],{"class":4827},[4697,9790,4834],{"class":4827},[4697,9792,4742],{"class":4710},[4697,9794,5431],{"class":4732},[4697,9796,4736],{"class":4710},[4697,9798,9799],{"class":5023},"\"reliable\"",[4697,9801,4748],{"class":4710},[4697,9803,9804],{"class":5023}," \"Retries up to 3 times on failure\"",[4697,9806,4767],{"class":4710},[4697,9808,9809],{"class":4699,"line":35},[4697,9810,4918],{"emptyLinePlaceholder":4917},[4697,9812,9813,9816,9818,9820,9822,9824,9826,9828,9830,9832,9834,9837,9839,9841],{"class":4699,"line":917},[4697,9814,9815],{"class":4827},"retry",[4697,9817,4831],{"class":4827},[4697,9819,4834],{"class":4827},[4697,9821,4742],{"class":4710},[4697,9823,3796],{"class":4732},[4697,9825,4711],{"class":4710},[4697,9827,4715],{"class":4706},[4697,9829,6505],{"class":4710},[4697,9831,6485],{"class":4827},[4697,9833,4748],{"class":4710},[4697,9835,9836],{"class":4827}," processor",[4697,9838,4748],{"class":4710},[4697,9840,5218],{"class":4997},[4697,9842,4767],{"class":4710},[4602,9844,9845],{},[4809,9846,8840],{},[6349,9848,9849,9852,9855],{},[6352,9850,9851],{},"Transient errors (network, temporary unavailability)",[6352,9853,9854],{},"External service calls",[6352,9856,9857],{},"Database operations",[4602,9859,9860],{},[4809,9861,8856],{},[6349,9863,9864,9867],{},[6352,9865,9866],{},"Errors are permanent (validation failures)",[6352,9868,9869],{},"No backoff needed (can overwhelm service)",[8122,9871],{},[7309,9873,729],{"id":9874},"you-need-to-prevent-cascading-failures",[4602,9876,9877,8755,9879],{},[4809,9878,8754],{},[4684,9880,519],{},[4689,9882,9884],{"className":4691,"code":9883,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar ProtectedID = pipz.NewIdentity(\"protected\", \"Circuit breaker with 5 failure threshold\")\n\nbreaker := pipz.NewCircuitBreaker[T](ProtectedID, processor,\n    pipz.WithCircuitBreakerThreshold(5),\n    pipz.WithCircuitBreakerWindow(time.Minute),\n)\n",[4684,9885,9886,9890,9917,9921,9949,9965,9986],{"__ignoreMap":29},[4697,9887,9888],{"class":4699,"line":9},[4697,9889,8767],{"class":4821},[4697,9891,9892,9894,9897,9899,9901,9903,9905,9907,9910,9912,9915],{"class":4699,"line":19},[4697,9893,5415],{"class":4702},[4697,9895,9896],{"class":4827}," ProtectedID",[4697,9898,4887],{"class":4827},[4697,9900,4834],{"class":4827},[4697,9902,4742],{"class":4710},[4697,9904,5431],{"class":4732},[4697,9906,4736],{"class":4710},[4697,9908,9909],{"class":5023},"\"protected\"",[4697,9911,4748],{"class":4710},[4697,9913,9914],{"class":5023}," \"Circuit breaker with 5 failure threshold\"",[4697,9916,4767],{"class":4710},[4697,9918,9919],{"class":4699,"line":35},[4697,9920,4918],{"emptyLinePlaceholder":4917},[4697,9922,9923,9926,9928,9930,9932,9934,9936,9938,9940,9943,9945,9947],{"class":4699,"line":917},[4697,9924,9925],{"class":4827},"breaker",[4697,9927,4831],{"class":4827},[4697,9929,4834],{"class":4827},[4697,9931,4742],{"class":4710},[4697,9933,5282],{"class":4732},[4697,9935,4711],{"class":4710},[4697,9937,4715],{"class":4706},[4697,9939,6505],{"class":4710},[4697,9941,9942],{"class":4827},"ProtectedID",[4697,9944,4748],{"class":4710},[4697,9946,9836],{"class":4827},[4697,9948,6488],{"class":4710},[4697,9950,9951,9953,9955,9958,9960,9963],{"class":4699,"line":4791},[4697,9952,6493],{"class":4827},[4697,9954,4742],{"class":4710},[4697,9956,9957],{"class":4732},"WithCircuitBreakerThreshold",[4697,9959,4736],{"class":4710},[4697,9961,9962],{"class":4997},"5",[4697,9964,7230],{"class":4710},[4697,9966,9967,9969,9971,9974,9976,9979,9981,9984],{"class":4699,"line":4802},[4697,9968,6493],{"class":4827},[4697,9970,4742],{"class":4710},[4697,9972,9973],{"class":4732},"WithCircuitBreakerWindow",[4697,9975,4736],{"class":4710},[4697,9977,9978],{"class":4827},"time",[4697,9980,4742],{"class":4710},[4697,9982,9983],{"class":4827},"Minute",[4697,9985,7230],{"class":4710},[4697,9987,9988],{"class":4699,"line":4921},[4697,9989,4767],{"class":4710},[4602,9991,9992],{},[4809,9993,8840],{},[6349,9995,9996,9999,10002],{},[6352,9997,9998],{},"Calling external services",[6352,10000,10001],{},"Protecting downstream systems",[6352,10003,10004],{},"Failing fast is acceptable",[4602,10006,10007],{},[4809,10008,8856],{},[6349,10010,10011,10014],{},[6352,10012,10013],{},"Every request must be attempted",[6352,10015,10016],{},"Failures are independent",[8122,10018],{},[7309,10020,734],{"id":10021},"you-need-to-control-processing-rate",[4602,10023,10024,8755,10026],{},[4809,10025,8754],{},[4684,10027,524],{},[4689,10029,10031],{"className":4691,"code":10030,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar ThrottledID = pipz.NewIdentity(\"throttled\", \"Rate limited to 100 per second\")\n\nlimiter := pipz.NewRateLimiter[T](ThrottledID, processor,\n    pipz.WithRateLimiterRate(100),\n    pipz.WithRateLimiterPeriod(time.Second),\n)\n",[4684,10032,10033,10037,10064,10068,10096,10112,10131],{"__ignoreMap":29},[4697,10034,10035],{"class":4699,"line":9},[4697,10036,8767],{"class":4821},[4697,10038,10039,10041,10044,10046,10048,10050,10052,10054,10057,10059,10062],{"class":4699,"line":19},[4697,10040,5415],{"class":4702},[4697,10042,10043],{"class":4827}," ThrottledID",[4697,10045,4887],{"class":4827},[4697,10047,4834],{"class":4827},[4697,10049,4742],{"class":4710},[4697,10051,5431],{"class":4732},[4697,10053,4736],{"class":4710},[4697,10055,10056],{"class":5023},"\"throttled\"",[4697,10058,4748],{"class":4710},[4697,10060,10061],{"class":5023}," \"Rate limited to 100 per second\"",[4697,10063,4767],{"class":4710},[4697,10065,10066],{"class":4699,"line":35},[4697,10067,4918],{"emptyLinePlaceholder":4917},[4697,10069,10070,10073,10075,10077,10079,10081,10083,10085,10087,10090,10092,10094],{"class":4699,"line":917},[4697,10071,10072],{"class":4827},"limiter",[4697,10074,4831],{"class":4827},[4697,10076,4834],{"class":4827},[4697,10078,4742],{"class":4710},[4697,10080,6498],{"class":4732},[4697,10082,4711],{"class":4710},[4697,10084,4715],{"class":4706},[4697,10086,6505],{"class":4710},[4697,10088,10089],{"class":4827},"ThrottledID",[4697,10091,4748],{"class":4710},[4697,10093,9836],{"class":4827},[4697,10095,6488],{"class":4710},[4697,10097,10098,10100,10102,10105,10107,10110],{"class":4699,"line":4791},[4697,10099,6493],{"class":4827},[4697,10101,4742],{"class":4710},[4697,10103,10104],{"class":4732},"WithRateLimiterRate",[4697,10106,4736],{"class":4710},[4697,10108,10109],{"class":4997},"100",[4697,10111,7230],{"class":4710},[4697,10113,10114,10116,10118,10121,10123,10125,10127,10129],{"class":4699,"line":4802},[4697,10115,6493],{"class":4827},[4697,10117,4742],{"class":4710},[4697,10119,10120],{"class":4732},"WithRateLimiterPeriod",[4697,10122,4736],{"class":4710},[4697,10124,9978],{"class":4827},[4697,10126,4742],{"class":4710},[4697,10128,5257],{"class":4827},[4697,10130,7230],{"class":4710},[4697,10132,10133],{"class":4699,"line":4921},[4697,10134,4767],{"class":4710},[4602,10136,10137],{},[4809,10138,8840],{},[6349,10140,10141,10144,10147],{},[6352,10142,10143],{},"API rate limits",[6352,10145,10146],{},"Resource protection",[6352,10148,10149],{},"Cost control",[4602,10151,10152],{},[4809,10153,10154],{},"Important:",[6349,10156,10157],{},[6352,10158,10159],{},"Must use singleton instance (don't create per request)",[8122,10161],{},[7309,10163,739],{"id":10164},"you-need-to-bound-execution-time",[4602,10166,10167,8755,10169],{},[4809,10168,8754],{},[4684,10170,544],{},[4689,10172,10174],{"className":4691,"code":10173,"language":4693,"meta":29,"style":29},"// Define identity upfront\nvar BoundedID = pipz.NewIdentity(\"bounded\", \"Times out after 5 seconds\")\n\ntimeout := pipz.NewTimeout[T](BoundedID, processor, 5*time.Second)\n",[4684,10175,10176,10180,10207,10211],{"__ignoreMap":29},[4697,10177,10178],{"class":4699,"line":9},[4697,10179,8767],{"class":4821},[4697,10181,10182,10184,10187,10189,10191,10193,10195,10197,10200,10202,10205],{"class":4699,"line":19},[4697,10183,5415],{"class":4702},[4697,10185,10186],{"class":4827}," BoundedID",[4697,10188,4887],{"class":4827},[4697,10190,4834],{"class":4827},[4697,10192,4742],{"class":4710},[4697,10194,5431],{"class":4732},[4697,10196,4736],{"class":4710},[4697,10198,10199],{"class":5023},"\"bounded\"",[4697,10201,4748],{"class":4710},[4697,10203,10204],{"class":5023}," \"Times out after 5 seconds\"",[4697,10206,4767],{"class":4710},[4697,10208,10209],{"class":4699,"line":35},[4697,10210,4918],{"emptyLinePlaceholder":4917},[4697,10212,10213,10216,10218,10220,10222,10224,10226,10228,10230,10233,10235,10237,10239,10241,10243,10245,10247],{"class":4699,"line":917},[4697,10214,10215],{"class":4827},"timeout",[4697,10217,4831],{"class":4827},[4697,10219,4834],{"class":4827},[4697,10221,4742],{"class":4710},[4697,10223,5234],{"class":4732},[4697,10225,4711],{"class":4710},[4697,10227,4715],{"class":4706},[4697,10229,6505],{"class":4710},[4697,10231,10232],{"class":4827},"BoundedID",[4697,10234,4748],{"class":4710},[4697,10236,9836],{"class":4827},[4697,10238,4748],{"class":4710},[4697,10240,5249],{"class":4997},[4697,10242,5252],{"class":4827},[4697,10244,4742],{"class":4710},[4697,10246,5257],{"class":4827},[4697,10248,4767],{"class":4710},[4602,10250,10251],{},[4809,10252,8840],{},[6349,10254,10255,10258,10261],{},[6352,10256,10257],{},"Network operations",[6352,10259,10260],{},"User-facing APIs",[6352,10262,10263],{},"SLA requirements",[4602,10265,10266],{},[4809,10267,8856],{},[6349,10269,10270,10273],{},[6352,10271,10272],{},"Operations must complete",[6352,10274,10275],{},"Time is unpredictable",[4676,10277,744],{"id":10278},"quick-comparison",[6231,10280,10281,10299],{},[6234,10282,10283],{},[6237,10284,10285,10287,10290,10293,10296],{},[6240,10286,7512],{},[6240,10288,10289],{},"Parallel?",[6240,10291,10292],{},"Can Fail?",[6240,10294,10295],{},"Needs Clone?",[6240,10297,10298],{},"Stateful?",[6250,10300,10301,10315,10327,10340,10353,10365,10377,10389,10402,10414,10426,10438],{},[6237,10302,10303,10305,10308,10311,10313],{},[6255,10304,7524],{},[6255,10306,10307],{},"No",[6255,10309,10310],{},"Yes",[6255,10312,10307],{},[6255,10314,10307],{},[6237,10316,10317,10319,10321,10323,10325],{},[6255,10318,2937],{},[6255,10320,10310],{},[6255,10322,10310],{},[6255,10324,10310],{},[6255,10326,10307],{},[6237,10328,10329,10331,10334,10336,10338],{},[6255,10330,529],{},[6255,10332,10333],{},"Limited",[6255,10335,10310],{},[6255,10337,10310],{},[6255,10339,10307],{},[6237,10341,10342,10344,10346,10349,10351],{},[6255,10343,3857],{},[6255,10345,10310],{},[6255,10347,10348],{},"No**",[6255,10350,10310],{},[6255,10352,10307],{},[6237,10354,10355,10357,10359,10361,10363],{},[6255,10356,3546],{},[6255,10358,10310],{},[6255,10360,10310],{},[6255,10362,10310],{},[6255,10364,10307],{},[6237,10366,10367,10369,10371,10373,10375],{},[6255,10368,3055],{},[6255,10370,10310],{},[6255,10372,10310],{},[6255,10374,10310],{},[6255,10376,10307],{},[6237,10378,10379,10381,10383,10385,10387],{},[6255,10380,4049],{},[6255,10382,10307],{},[6255,10384,10310],{},[6255,10386,10307],{},[6255,10388,10307],{},[6237,10390,10391,10393,10395,10398,10400],{},[6255,10392,539],{},[6255,10394,10307],{},[6255,10396,10397],{},"No*",[6255,10399,10307],{},[6255,10401,10307],{},[6237,10403,10404,10406,10408,10410,10412],{},[6255,10405,534],{},[6255,10407,10307],{},[6255,10409,10310],{},[6255,10411,10307],{},[6255,10413,10310],{},[6237,10415,10416,10418,10420,10422,10424],{},[6255,10417,519],{},[6255,10419,10307],{},[6255,10421,10310],{},[6255,10423,10307],{},[6255,10425,10310],{},[6237,10427,10428,10430,10432,10434,10436],{},[6255,10429,524],{},[6255,10431,10307],{},[6255,10433,10310],{},[6255,10435,10307],{},[6255,10437,10310],{},[6237,10439,10440,10442,10444,10446,10448],{},[6255,10441,544],{},[6255,10443,10307],{},[6255,10445,10310],{},[6255,10447,10307],{},[6255,10449,10307],{},[4602,10451,10452],{},"*Fallback always returns a value (uses fallback on error)\n**Scaffold errors are not reported back",[4676,10454,749],{"id":10455},"common-combinations",[7309,10457,753],{"id":10458},"resilient-external-api-call",[4689,10460,10462],{"className":4691,"code":10461,"language":4693,"meta":29,"style":29},"// Define identities upfront\nvar (\n    RateID    = pipz.NewIdentity(\"rate\", \"Rate limit external API calls\")\n    BreakerID = pipz.NewIdentity(\"breaker\", \"Circuit breaker for API protection\")\n    TimeoutID = pipz.NewIdentity(\"timeout\", \"5 second timeout for API calls\")\n    RetryID   = pipz.NewIdentity(\"retry\", \"Retry API calls up to 3 times\")\n)\n\napi := pipz.NewRateLimiter(RateID,\n    pipz.NewCircuitBreaker(BreakerID,\n        pipz.NewTimeout(TimeoutID,\n            pipz.NewRetry(RetryID, apiCall, 3),\n            5*time.Second,\n        ),\n    ),\n)\n",[4684,10463,10464,10469,10475,10501,10526,10551,10576,10580,10584,10604,10618,10633,10657,10670,10675,10680],{"__ignoreMap":29},[4697,10465,10466],{"class":4699,"line":9},[4697,10467,10468],{"class":4821},"// Define identities upfront\n",[4697,10470,10471,10473],{"class":4699,"line":19},[4697,10472,5415],{"class":4702},[4697,10474,5363],{"class":4710},[4697,10476,10477,10480,10483,10485,10487,10489,10491,10494,10496,10499],{"class":4699,"line":35},[4697,10478,10479],{"class":4827},"    RateID",[4697,10481,10482],{"class":4827},"    =",[4697,10484,4834],{"class":4827},[4697,10486,4742],{"class":4710},[4697,10488,5431],{"class":4732},[4697,10490,4736],{"class":4710},[4697,10492,10493],{"class":5023},"\"rate\"",[4697,10495,4748],{"class":4710},[4697,10497,10498],{"class":5023}," \"Rate limit external API calls\"",[4697,10500,4767],{"class":4710},[4697,10502,10503,10506,10508,10510,10512,10514,10516,10519,10521,10524],{"class":4699,"line":917},[4697,10504,10505],{"class":4827},"    BreakerID",[4697,10507,4887],{"class":4827},[4697,10509,4834],{"class":4827},[4697,10511,4742],{"class":4710},[4697,10513,5431],{"class":4732},[4697,10515,4736],{"class":4710},[4697,10517,10518],{"class":5023},"\"breaker\"",[4697,10520,4748],{"class":4710},[4697,10522,10523],{"class":5023}," \"Circuit breaker for API protection\"",[4697,10525,4767],{"class":4710},[4697,10527,10528,10531,10533,10535,10537,10539,10541,10544,10546,10549],{"class":4699,"line":4791},[4697,10529,10530],{"class":4827},"    TimeoutID",[4697,10532,4887],{"class":4827},[4697,10534,4834],{"class":4827},[4697,10536,4742],{"class":4710},[4697,10538,5431],{"class":4732},[4697,10540,4736],{"class":4710},[4697,10542,10543],{"class":5023},"\"timeout\"",[4697,10545,4748],{"class":4710},[4697,10547,10548],{"class":5023}," \"5 second timeout for API calls\"",[4697,10550,4767],{"class":4710},[4697,10552,10553,10556,10558,10560,10562,10564,10566,10569,10571,10574],{"class":4699,"line":4802},[4697,10554,10555],{"class":4827},"    RetryID",[4697,10557,5451],{"class":4827},[4697,10559,4834],{"class":4827},[4697,10561,4742],{"class":4710},[4697,10563,5431],{"class":4732},[4697,10565,4736],{"class":4710},[4697,10567,10568],{"class":5023},"\"retry\"",[4697,10570,4748],{"class":4710},[4697,10572,10573],{"class":5023}," \"Retry API calls up to 3 times\"",[4697,10575,4767],{"class":4710},[4697,10577,10578],{"class":4699,"line":4921},[4697,10579,4767],{"class":4710},[4697,10581,10582],{"class":4699,"line":4927},[4697,10583,4918],{"emptyLinePlaceholder":4917},[4697,10585,10586,10589,10591,10593,10595,10597,10599,10602],{"class":4699,"line":4981},[4697,10587,10588],{"class":4827},"api",[4697,10590,4831],{"class":4827},[4697,10592,4834],{"class":4827},[4697,10594,4742],{"class":4710},[4697,10596,6498],{"class":4732},[4697,10598,4736],{"class":4710},[4697,10600,10601],{"class":4827},"RateID",[4697,10603,6488],{"class":4710},[4697,10605,10606,10608,10610,10612,10614,10616],{"class":4699,"line":5003},[4697,10607,6493],{"class":4827},[4697,10609,4742],{"class":4710},[4697,10611,5282],{"class":4732},[4697,10613,4736],{"class":4710},[4697,10615,5287],{"class":4827},[4697,10617,6488],{"class":4710},[4697,10619,10620,10623,10625,10627,10629,10631],{"class":4699,"line":5029},[4697,10621,10622],{"class":4827},"        pipz",[4697,10624,4742],{"class":4710},[4697,10626,5234],{"class":4732},[4697,10628,4736],{"class":4710},[4697,10630,5239],{"class":4827},[4697,10632,6488],{"class":4710},[4697,10634,10635,10638,10640,10642,10644,10646,10648,10651,10653,10655],{"class":4699,"line":5035},[4697,10636,10637],{"class":4827},"            pipz",[4697,10639,4742],{"class":4710},[4697,10641,3796],{"class":4732},[4697,10643,4736],{"class":4710},[4697,10645,5208],{"class":4827},[4697,10647,4748],{"class":4710},[4697,10649,10650],{"class":4827}," apiCall",[4697,10652,4748],{"class":4710},[4697,10654,5218],{"class":4997},[4697,10656,7230],{"class":4710},[4697,10658,10659,10662,10664,10666,10668],{"class":4699,"line":5047},[4697,10660,10661],{"class":4997},"            5",[4697,10663,5252],{"class":4827},[4697,10665,4742],{"class":4710},[4697,10667,5257],{"class":4827},[4697,10669,6488],{"class":4710},[4697,10671,10672],{"class":4699,"line":5052},[4697,10673,10674],{"class":4710},"        ),\n",[4697,10676,10677],{"class":4699,"line":5057},[4697,10678,10679],{"class":4710},"    ),\n",[4697,10681,10682],{"class":4699,"line":5063},[4697,10683,4767],{"class":4710},[7309,10685,758],{"id":10686},"multi-source-with-fallback",[4689,10688,10690],{"className":4691,"code":10689,"language":4693,"meta":29,"style":29},"// Define identities upfront\nvar (\n    FetchID   = pipz.NewIdentity(\"fetch\", \"Fetch with fallback to static default\")\n    SourcesID = pipz.NewIdentity(\"sources\", \"Race between primary and secondary sources\")\n)\n\nfetch := pipz.NewFallback(FetchID,\n    pipz.NewRace[T](SourcesID, primary, secondary),\n    staticDefault,\n)\n",[4684,10691,10692,10696,10702,10727,10752,10756,10760,10778,10806,10813],{"__ignoreMap":29},[4697,10693,10694],{"class":4699,"line":9},[4697,10695,10468],{"class":4821},[4697,10697,10698,10700],{"class":4699,"line":19},[4697,10699,5415],{"class":4702},[4697,10701,5363],{"class":4710},[4697,10703,10704,10707,10709,10711,10713,10715,10717,10720,10722,10725],{"class":4699,"line":35},[4697,10705,10706],{"class":4827},"    FetchID",[4697,10708,5451],{"class":4827},[4697,10710,4834],{"class":4827},[4697,10712,4742],{"class":4710},[4697,10714,5431],{"class":4732},[4697,10716,4736],{"class":4710},[4697,10718,10719],{"class":5023},"\"fetch\"",[4697,10721,4748],{"class":4710},[4697,10723,10724],{"class":5023}," \"Fetch with fallback to static default\"",[4697,10726,4767],{"class":4710},[4697,10728,10729,10732,10734,10736,10738,10740,10742,10745,10747,10750],{"class":4699,"line":917},[4697,10730,10731],{"class":4827},"    SourcesID",[4697,10733,4887],{"class":4827},[4697,10735,4834],{"class":4827},[4697,10737,4742],{"class":4710},[4697,10739,5431],{"class":4732},[4697,10741,4736],{"class":4710},[4697,10743,10744],{"class":5023},"\"sources\"",[4697,10746,4748],{"class":4710},[4697,10748,10749],{"class":5023}," \"Race between primary and secondary sources\"",[4697,10751,4767],{"class":4710},[4697,10753,10754],{"class":4699,"line":4791},[4697,10755,4767],{"class":4710},[4697,10757,10758],{"class":4699,"line":4802},[4697,10759,4918],{"emptyLinePlaceholder":4917},[4697,10761,10762,10764,10766,10768,10770,10772,10774,10776],{"class":4699,"line":4921},[4697,10763,6443],{"class":4827},[4697,10765,4831],{"class":4827},[4697,10767,4834],{"class":4827},[4697,10769,4742],{"class":4710},[4697,10771,9711],{"class":4732},[4697,10773,4736],{"class":4710},[4697,10775,6456],{"class":4827},[4697,10777,6488],{"class":4710},[4697,10779,10780,10782,10784,10786,10788,10790,10792,10795,10797,10799,10801,10804],{"class":4699,"line":4927},[4697,10781,6493],{"class":4827},[4697,10783,4742],{"class":4710},[4697,10785,9210],{"class":4732},[4697,10787,4711],{"class":4710},[4697,10789,4715],{"class":4706},[4697,10791,6505],{"class":4710},[4697,10793,10794],{"class":4827},"SourcesID",[4697,10796,4748],{"class":4710},[4697,10798,9224],{"class":4827},[4697,10800,4748],{"class":4710},[4697,10802,10803],{"class":4827}," secondary",[4697,10805,7230],{"class":4710},[4697,10807,10808,10811],{"class":4699,"line":4981},[4697,10809,10810],{"class":4827},"    staticDefault",[4697,10812,6488],{"class":4710},[4697,10814,10815],{"class":4699,"line":5003},[4697,10816,4767],{"class":4710},[7309,10818,763],{"id":10819},"conditional-parallel-processing",[4689,10821,10823],{"className":4691,"code":10822,"language":4693,"meta":29,"style":29},"// Define identities upfront\nvar (\n    RouterID     = pipz.NewIdentity(\"router\", \"Routes to batch or sequential processing\")\n    BatchID      = pipz.NewIdentity(\"batch\", \"Batch parallel processing\")\n    SequentialID = pipz.NewIdentity(\"seq\", \"Sequential processing\")\n)\n\nrouter := pipz.NewSwitch[T](RouterID, routeFunc).\n    AddRoute(\"batch\", pipz.NewConcurrent[T](BatchID, processors...)).\n    AddRoute(\"sequential\", pipz.NewSequence[T](SequentialID, processors...))\n",[4684,10824,10825,10829,10835,10860,10886,10911,10915,10919,10947,10984],{"__ignoreMap":29},[4697,10826,10827],{"class":4699,"line":9},[4697,10828,10468],{"class":4821},[4697,10830,10831,10833],{"class":4699,"line":19},[4697,10832,5415],{"class":4702},[4697,10834,5363],{"class":4710},[4697,10836,10837,10840,10843,10845,10847,10849,10851,10853,10855,10858],{"class":4699,"line":35},[4697,10838,10839],{"class":4827},"    RouterID",[4697,10841,10842],{"class":4827},"     =",[4697,10844,4834],{"class":4827},[4697,10846,4742],{"class":4710},[4697,10848,5431],{"class":4732},[4697,10850,4736],{"class":4710},[4697,10852,9485],{"class":5023},[4697,10854,4748],{"class":4710},[4697,10856,10857],{"class":5023}," \"Routes to batch or sequential processing\"",[4697,10859,4767],{"class":4710},[4697,10861,10862,10865,10868,10870,10872,10874,10876,10879,10881,10884],{"class":4699,"line":917},[4697,10863,10864],{"class":4827},"    BatchID",[4697,10866,10867],{"class":4827},"      =",[4697,10869,4834],{"class":4827},[4697,10871,4742],{"class":4710},[4697,10873,5431],{"class":4732},[4697,10875,4736],{"class":4710},[4697,10877,10878],{"class":5023},"\"batch\"",[4697,10880,4748],{"class":4710},[4697,10882,10883],{"class":5023}," \"Batch parallel processing\"",[4697,10885,4767],{"class":4710},[4697,10887,10888,10891,10893,10895,10897,10899,10901,10904,10906,10909],{"class":4699,"line":4791},[4697,10889,10890],{"class":4827},"    SequentialID",[4697,10892,4887],{"class":4827},[4697,10894,4834],{"class":4827},[4697,10896,4742],{"class":4710},[4697,10898,5431],{"class":4732},[4697,10900,4736],{"class":4710},[4697,10902,10903],{"class":5023},"\"seq\"",[4697,10905,4748],{"class":4710},[4697,10907,10908],{"class":5023}," \"Sequential processing\"",[4697,10910,4767],{"class":4710},[4697,10912,10913],{"class":4699,"line":4802},[4697,10914,4767],{"class":4710},[4697,10916,10917],{"class":4699,"line":4921},[4697,10918,4918],{"emptyLinePlaceholder":4917},[4697,10920,10921,10924,10926,10928,10930,10932,10934,10936,10938,10940,10942,10945],{"class":4699,"line":4927},[4697,10922,10923],{"class":4827},"router",[4697,10925,4831],{"class":4827},[4697,10927,4834],{"class":4827},[4697,10929,4742],{"class":4710},[4697,10931,9510],{"class":4732},[4697,10933,4711],{"class":4710},[4697,10935,4715],{"class":4706},[4697,10937,6505],{"class":4710},[4697,10939,9519],{"class":4827},[4697,10941,4748],{"class":4710},[4697,10943,10944],{"class":4827}," routeFunc",[4697,10946,9589],{"class":4710},[4697,10948,10949,10952,10954,10956,10958,10960,10962,10964,10966,10968,10970,10973,10975,10978,10981],{"class":4699,"line":4981},[4697,10950,10951],{"class":4732},"    AddRoute",[4697,10953,4736],{"class":4710},[4697,10955,10878],{"class":5023},[4697,10957,4748],{"class":4710},[4697,10959,4834],{"class":4827},[4697,10961,4742],{"class":4710},[4697,10963,8926],{"class":4732},[4697,10965,4711],{"class":4710},[4697,10967,4715],{"class":4706},[4697,10969,6505],{"class":4710},[4697,10971,10972],{"class":4827},"BatchID",[4697,10974,4748],{"class":4710},[4697,10976,10977],{"class":4827}," processors",[4697,10979,10980],{"class":4903},"...",[4697,10982,10983],{"class":4710},")).\n",[4697,10985,10986,10988,10990,10993,10995,10997,10999,11001,11003,11005,11007,11010,11012,11014,11016],{"class":4699,"line":5003},[4697,10987,10951],{"class":4732},[4697,10989,4736],{"class":4710},[4697,10991,10992],{"class":5023},"\"sequential\"",[4697,10994,4748],{"class":4710},[4697,10996,4834],{"class":4827},[4697,10998,4742],{"class":4710},[4697,11000,5159],{"class":4732},[4697,11002,4711],{"class":4710},[4697,11004,4715],{"class":4706},[4697,11006,6505],{"class":4710},[4697,11008,11009],{"class":4827},"SequentialID",[4697,11011,4748],{"class":4710},[4697,11013,10977],{"class":4827},[4697,11015,10980],{"class":4903},[4697,11017,11018],{"class":4710},"))\n",[7309,11020,768],{"id":11021},"resource-constrained-processing",[4689,11023,11025],{"className":4691,"code":11024,"language":4693,"meta":29,"style":29},"// Define identities upfront\nvar (\n    APILimitedID = pipz.NewIdentity(\"api-limited\", \"Worker pool limited to 5 concurrent API calls\")\n    ServiceAID   = pipz.NewIdentity(\"service-a\", \"Calls service A\")\n    ServiceBID   = pipz.NewIdentity(\"service-b\", \"Calls service B\")\n    ServiceCID   = pipz.NewIdentity(\"service-c\", \"Calls service C\")\n    ServiceDID   = pipz.NewIdentity(\"service-d\", \"Calls service D\")\n    ServiceEID   = pipz.NewIdentity(\"service-e\", \"Calls service E\")\n    ServiceFID   = pipz.NewIdentity(\"service-f\", \"Calls service F\")\n)\n\n// Limit concurrent API calls to avoid rate limits\napiCalls := pipz.NewWorkerPool[T](APILimitedID, 5,\n    pipz.Apply(ServiceAID, callServiceA),\n    pipz.Apply(ServiceBID, callServiceB),\n    pipz.Apply(ServiceCID, callServiceC),\n    pipz.Apply(ServiceDID, callServiceD),\n    pipz.Apply(ServiceEID, callServiceE),\n    pipz.Apply(ServiceFID, callServiceF),\n    // Only 5 will run concurrently\n)\n",[4684,11026,11027,11031,11037,11062,11087,11112,11137,11162,11187,11212,11216,11220,11225,11253,11273,11293,11313,11333,11353,11373,11378],{"__ignoreMap":29},[4697,11028,11029],{"class":4699,"line":9},[4697,11030,10468],{"class":4821},[4697,11032,11033,11035],{"class":4699,"line":19},[4697,11034,5415],{"class":4702},[4697,11036,5363],{"class":4710},[4697,11038,11039,11042,11044,11046,11048,11050,11052,11055,11057,11060],{"class":4699,"line":35},[4697,11040,11041],{"class":4827},"    APILimitedID",[4697,11043,4887],{"class":4827},[4697,11045,4834],{"class":4827},[4697,11047,4742],{"class":4710},[4697,11049,5431],{"class":4732},[4697,11051,4736],{"class":4710},[4697,11053,11054],{"class":5023},"\"api-limited\"",[4697,11056,4748],{"class":4710},[4697,11058,11059],{"class":5023}," \"Worker pool limited to 5 concurrent API calls\"",[4697,11061,4767],{"class":4710},[4697,11063,11064,11067,11069,11071,11073,11075,11077,11080,11082,11085],{"class":4699,"line":917},[4697,11065,11066],{"class":4827},"    ServiceAID",[4697,11068,5451],{"class":4827},[4697,11070,4834],{"class":4827},[4697,11072,4742],{"class":4710},[4697,11074,5431],{"class":4732},[4697,11076,4736],{"class":4710},[4697,11078,11079],{"class":5023},"\"service-a\"",[4697,11081,4748],{"class":4710},[4697,11083,11084],{"class":5023}," \"Calls service A\"",[4697,11086,4767],{"class":4710},[4697,11088,11089,11092,11094,11096,11098,11100,11102,11105,11107,11110],{"class":4699,"line":4791},[4697,11090,11091],{"class":4827},"    ServiceBID",[4697,11093,5451],{"class":4827},[4697,11095,4834],{"class":4827},[4697,11097,4742],{"class":4710},[4697,11099,5431],{"class":4732},[4697,11101,4736],{"class":4710},[4697,11103,11104],{"class":5023},"\"service-b\"",[4697,11106,4748],{"class":4710},[4697,11108,11109],{"class":5023}," \"Calls service B\"",[4697,11111,4767],{"class":4710},[4697,11113,11114,11117,11119,11121,11123,11125,11127,11130,11132,11135],{"class":4699,"line":4802},[4697,11115,11116],{"class":4827},"    ServiceCID",[4697,11118,5451],{"class":4827},[4697,11120,4834],{"class":4827},[4697,11122,4742],{"class":4710},[4697,11124,5431],{"class":4732},[4697,11126,4736],{"class":4710},[4697,11128,11129],{"class":5023},"\"service-c\"",[4697,11131,4748],{"class":4710},[4697,11133,11134],{"class":5023}," \"Calls service C\"",[4697,11136,4767],{"class":4710},[4697,11138,11139,11142,11144,11146,11148,11150,11152,11155,11157,11160],{"class":4699,"line":4921},[4697,11140,11141],{"class":4827},"    ServiceDID",[4697,11143,5451],{"class":4827},[4697,11145,4834],{"class":4827},[4697,11147,4742],{"class":4710},[4697,11149,5431],{"class":4732},[4697,11151,4736],{"class":4710},[4697,11153,11154],{"class":5023},"\"service-d\"",[4697,11156,4748],{"class":4710},[4697,11158,11159],{"class":5023}," \"Calls service D\"",[4697,11161,4767],{"class":4710},[4697,11163,11164,11167,11169,11171,11173,11175,11177,11180,11182,11185],{"class":4699,"line":4927},[4697,11165,11166],{"class":4827},"    ServiceEID",[4697,11168,5451],{"class":4827},[4697,11170,4834],{"class":4827},[4697,11172,4742],{"class":4710},[4697,11174,5431],{"class":4732},[4697,11176,4736],{"class":4710},[4697,11178,11179],{"class":5023},"\"service-e\"",[4697,11181,4748],{"class":4710},[4697,11183,11184],{"class":5023}," \"Calls service E\"",[4697,11186,4767],{"class":4710},[4697,11188,11189,11192,11194,11196,11198,11200,11202,11205,11207,11210],{"class":4699,"line":4981},[4697,11190,11191],{"class":4827},"    ServiceFID",[4697,11193,5451],{"class":4827},[4697,11195,4834],{"class":4827},[4697,11197,4742],{"class":4710},[4697,11199,5431],{"class":4732},[4697,11201,4736],{"class":4710},[4697,11203,11204],{"class":5023},"\"service-f\"",[4697,11206,4748],{"class":4710},[4697,11208,11209],{"class":5023}," \"Calls service F\"",[4697,11211,4767],{"class":4710},[4697,11213,11214],{"class":4699,"line":5003},[4697,11215,4767],{"class":4710},[4697,11217,11218],{"class":4699,"line":5029},[4697,11219,4918],{"emptyLinePlaceholder":4917},[4697,11221,11222],{"class":4699,"line":5035},[4697,11223,11224],{"class":4821},"// Limit concurrent API calls to avoid rate limits\n",[4697,11226,11227,11230,11232,11234,11236,11238,11240,11242,11244,11247,11249,11251],{"class":4699,"line":5047},[4697,11228,11229],{"class":4827},"apiCalls",[4697,11231,4831],{"class":4827},[4697,11233,4834],{"class":4827},[4697,11235,4742],{"class":4710},[4697,11237,9056],{"class":4732},[4697,11239,4711],{"class":4710},[4697,11241,4715],{"class":4706},[4697,11243,6505],{"class":4710},[4697,11245,11246],{"class":4827},"APILimitedID",[4697,11248,4748],{"class":4710},[4697,11250,5249],{"class":4997},[4697,11252,6488],{"class":4710},[4697,11254,11255,11257,11259,11261,11263,11266,11268,11271],{"class":4699,"line":5052},[4697,11256,6493],{"class":4827},[4697,11258,4742],{"class":4710},[4697,11260,2254],{"class":4732},[4697,11262,4736],{"class":4710},[4697,11264,11265],{"class":4827},"ServiceAID",[4697,11267,4748],{"class":4710},[4697,11269,11270],{"class":4827}," callServiceA",[4697,11272,7230],{"class":4710},[4697,11274,11275,11277,11279,11281,11283,11286,11288,11291],{"class":4699,"line":5057},[4697,11276,6493],{"class":4827},[4697,11278,4742],{"class":4710},[4697,11280,2254],{"class":4732},[4697,11282,4736],{"class":4710},[4697,11284,11285],{"class":4827},"ServiceBID",[4697,11287,4748],{"class":4710},[4697,11289,11290],{"class":4827}," callServiceB",[4697,11292,7230],{"class":4710},[4697,11294,11295,11297,11299,11301,11303,11306,11308,11311],{"class":4699,"line":5063},[4697,11296,6493],{"class":4827},[4697,11298,4742],{"class":4710},[4697,11300,2254],{"class":4732},[4697,11302,4736],{"class":4710},[4697,11304,11305],{"class":4827},"ServiceCID",[4697,11307,4748],{"class":4710},[4697,11309,11310],{"class":4827}," callServiceC",[4697,11312,7230],{"class":4710},[4697,11314,11315,11317,11319,11321,11323,11326,11328,11331],{"class":4699,"line":5108},[4697,11316,6493],{"class":4827},[4697,11318,4742],{"class":4710},[4697,11320,2254],{"class":4732},[4697,11322,4736],{"class":4710},[4697,11324,11325],{"class":4827},"ServiceDID",[4697,11327,4748],{"class":4710},[4697,11329,11330],{"class":4827}," callServiceD",[4697,11332,7230],{"class":4710},[4697,11334,11335,11337,11339,11341,11343,11346,11348,11351],{"class":4699,"line":5128},[4697,11336,6493],{"class":4827},[4697,11338,4742],{"class":4710},[4697,11340,2254],{"class":4732},[4697,11342,4736],{"class":4710},[4697,11344,11345],{"class":4827},"ServiceEID",[4697,11347,4748],{"class":4710},[4697,11349,11350],{"class":4827}," callServiceE",[4697,11352,7230],{"class":4710},[4697,11354,11355,11357,11359,11361,11363,11366,11368,11371],{"class":4699,"line":5522},[4697,11356,6493],{"class":4827},[4697,11358,4742],{"class":4710},[4697,11360,2254],{"class":4732},[4697,11362,4736],{"class":4710},[4697,11364,11365],{"class":4827},"ServiceFID",[4697,11367,4748],{"class":4710},[4697,11369,11370],{"class":4827}," callServiceF",[4697,11372,7230],{"class":4710},[4697,11374,11375],{"class":4699,"line":5527},[4697,11376,11377],{"class":4821},"    // Only 5 will run concurrently\n",[4697,11379,11380],{"class":4699,"line":5532},[4697,11381,4767],{"class":4710},[7704,11383,11384],{},"html pre.shiki code .sLkEo, html code.shiki .sLkEo{--shiki-default:var(--shiki-comment)}html pre.shiki code .sUt3r, html code.shiki .sUt3r{--shiki-default:var(--shiki-keyword)}html pre.shiki code .sh8_p, html code.shiki .sh8_p{--shiki-default:var(--shiki-text)}html pre.shiki code .sq5bi, html code.shiki .sq5bi{--shiki-default:var(--shiki-punctuation)}html pre.shiki code .s5klm, html code.shiki .s5klm{--shiki-default:var(--shiki-function)}html pre.shiki code .sxAnc, html code.shiki .sxAnc{--shiki-default:var(--shiki-string)}html pre.shiki code .sYBwO, html code.shiki .sYBwO{--shiki-default:var(--shiki-type)}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sMAmT, html code.shiki .sMAmT{--shiki-default:var(--shiki-number)}html pre.shiki code .sSYET, html code.shiki .sSYET{--shiki-default:var(--shiki-parameter)}html pre.shiki code .sW3Qg, html code.shiki .sW3Qg{--shiki-default:var(--shiki-operator)}",{"title":29,"searchDepth":19,"depth":19,"links":11386},[11387,11388,11389,11402,11403],{"id":8728,"depth":19,"text":675},{"id":8737,"depth":19,"text":680},{"id":8746,"depth":19,"text":685,"children":11390},[11391,11392,11393,11394,11395,11396,11397,11398,11399,11400,11401],{"id":8749,"depth":35,"text":689},{"id":8866,"depth":35,"text":694},{"id":8996,"depth":35,"text":699},{"id":9150,"depth":35,"text":704},{"id":9278,"depth":35,"text":709},{"id":9450,"depth":35,"text":714},{"id":9651,"depth":35,"text":719},{"id":9764,"depth":35,"text":724},{"id":9874,"depth":35,"text":729},{"id":10021,"depth":35,"text":734},{"id":10164,"depth":35,"text":739},{"id":10278,"depth":19,"text":744},{"id":10455,"depth":19,"text":749,"children":11404},[11405,11406,11407,11408],{"id":10458,"depth":35,"text":753},{"id":10686,"depth":35,"text":758},{"id":10819,"depth":35,"text":763},{"id":11021,"depth":35,"text":768},{},"2025-12-13T00:00:00.000Z",null,{"title":666,"description":668},[7345,7503,7420,11414],"decision-making","SeZIQYdKrY6BYgATQJxXXWoqlXBXtOnxvFngSeps-tc",[11417,11418],{"title":497,"path":496,"stem":4436,"description":499,"children":-1},{"title":773,"path":772,"stem":4446,"description":775,"children":-1},1776118603464]