Skip to content

Core API Reference

Ecopath (Mass-Balance)

pypath.core.ecopath

Ecopath mass-balance model implementation.

This module contains the core Rpath class and the rpath() function that performs mass-balance calculations for food web models.

Rpath dataclass

Balanced Ecopath model.

This class represents a mass-balanced food web model created by the rpath() function.

Attributes:

Name Type Description
NUM_GROUPS int

Total number of groups (living + dead + gears)

NUM_LIVING int

Number of living groups (consumers + producers)

NUM_DEAD int

Number of detritus groups

NUM_GEARS int

Number of fishing fleets

Group ndarray

Names of all groups

type ndarray

Type codes (0=consumer, 1=producer, 2=detritus, 3=fleet)

TL ndarray

Trophic levels

Biomass ndarray

Biomass values (t/km²)

PB ndarray

Production/Biomass ratios (1/year)

QB ndarray

Consumption/Biomass ratios (1/year)

EE ndarray

Ecotrophic efficiencies

GE ndarray

Gross efficiencies (P/Q)

M0 ndarray

Other mortality rates (M0 = PB * (1 - EE))

BA ndarray

Biomass accumulation rates

Unassim ndarray

Unassimilated consumption fractions

DC ndarray

Diet composition matrix

DetFate ndarray

Detritus fate matrix

Landings ndarray

Landings by group and fleet

Discards ndarray

Discards by group and fleet

eco_name str

Ecosystem name

eco_area float

Ecosystem area (km²)

Source code in pypath/core/ecopath.py
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
@dataclass
class Rpath:
    """Balanced Ecopath model.

    This class represents a mass-balanced food web model created by the
    rpath() function.

    Attributes
    ----------
    NUM_GROUPS : int
        Total number of groups (living + dead + gears)
    NUM_LIVING : int
        Number of living groups (consumers + producers)
    NUM_DEAD : int
        Number of detritus groups
    NUM_GEARS : int
        Number of fishing fleets
    Group : np.ndarray
        Names of all groups
    type : np.ndarray
        Type codes (0=consumer, 1=producer, 2=detritus, 3=fleet)
    TL : np.ndarray
        Trophic levels
    Biomass : np.ndarray
        Biomass values (t/km²)
    PB : np.ndarray
        Production/Biomass ratios (1/year)
    QB : np.ndarray
        Consumption/Biomass ratios (1/year)
    EE : np.ndarray
        Ecotrophic efficiencies
    GE : np.ndarray
        Gross efficiencies (P/Q)
    M0 : np.ndarray
        Other mortality rates (M0 = PB * (1 - EE))
    BA : np.ndarray
        Biomass accumulation rates
    Unassim : np.ndarray
        Unassimilated consumption fractions
    DC : np.ndarray
        Diet composition matrix
    DetFate : np.ndarray
        Detritus fate matrix
    Landings : np.ndarray
        Landings by group and fleet
    Discards : np.ndarray
        Discards by group and fleet
    eco_name : str
        Ecosystem name
    eco_area : float
        Ecosystem area (km²)
    """

    NUM_GROUPS: int
    NUM_LIVING: int
    NUM_DEAD: int
    NUM_GEARS: int
    Group: np.ndarray
    type: np.ndarray
    TL: np.ndarray
    Biomass: np.ndarray
    PB: np.ndarray
    QB: np.ndarray
    EE: np.ndarray
    GE: np.ndarray
    M0: np.ndarray
    BA: np.ndarray
    Unassim: np.ndarray
    DC: np.ndarray
    DetFate: np.ndarray
    Landings: np.ndarray
    Discards: np.ndarray
    eco_name: str = ""
    eco_area: float = 1.0

    def __repr__(self) -> str:
        max_ee = np.nanmax(self.EE[: self.NUM_LIVING + self.NUM_DEAD])
        if max_ee > 1:
            status = "Unbalanced!"
            unbalanced = self.Group[np.where(self.EE > 1)[0]]
            status_detail = f"\nGroups with EE > 1: {list(unbalanced)}"
        else:
            status = "Balanced"
            status_detail = ""

        return (
            f"Rpath model: {self.eco_name}\n"
            f"Model Area: {self.eco_area}\n"
            f"     Status: {status}{status_detail}\n"
            f"     Groups: {self.NUM_GROUPS} "
            f"(living={self.NUM_LIVING}, dead={self.NUM_DEAD}, gears={self.NUM_GEARS})"
        )

    def summary(self) -> pd.DataFrame:
        """Get summary table of model results.

        Returns
        -------
        pd.DataFrame
            Summary with Group, Type, TL, Biomass, PB, QB, EE, GE, and Removals.
        """
        removals = np.nansum(self.Landings, axis=1) + np.nansum(self.Discards, axis=1)

        return pd.DataFrame(
            {
                "Group": self.Group,
                "Type": self.type,
                "TL": self.TL,
                "Biomass": self.Biomass,
                "PB": self.PB,
                "QB": self.QB,
                "EE": self.EE,
                "GE": self.GE,
                "Removals": removals,
            }
        )
summary
summary() -> pd.DataFrame

Get summary table of model results.

Returns:

Type Description
DataFrame

Summary with Group, Type, TL, Biomass, PB, QB, EE, GE, and Removals.

Source code in pypath/core/ecopath.py
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
def summary(self) -> pd.DataFrame:
    """Get summary table of model results.

    Returns
    -------
    pd.DataFrame
        Summary with Group, Type, TL, Biomass, PB, QB, EE, GE, and Removals.
    """
    removals = np.nansum(self.Landings, axis=1) + np.nansum(self.Discards, axis=1)

    return pd.DataFrame(
        {
            "Group": self.Group,
            "Type": self.type,
            "TL": self.TL,
            "Biomass": self.Biomass,
            "PB": self.PB,
            "QB": self.QB,
            "EE": self.EE,
            "GE": self.GE,
            "Removals": removals,
        }
    )

rpath

rpath(rpath_params: RpathParams, eco_name: str = '', eco_area: float = 1.0, debug: bool = False) -> Union[Rpath, Tuple[Rpath, Dict[str, object]]]

Balance an Ecopath model.

Performs initial mass balance using an RpathParams object. Preserves the original group order from the input parameters.

The mass balance equation solved is:

Production = Predation Mortality + Fishing Mortality + Other Mortality + Biomass Accumulation + Net Migration

Or equivalently: B_i * PB_i * EE_i = Σ(B_j * QB_j * DC_ji) + Y_i + BA_i

Parameters:

Name Type Description Default
rpath_params RpathParams

R object containing the parameters needed to create an Rpath model.

required
eco_name str

Name of the ecosystem (stored as attribute).

''
eco_area float

Area of the ecosystem (stored as attribute).

1.0
debug bool

If False (default), return only the balanced Rpath object. If True, return a tuple (rpath_obj, diagnostics) where diagnostics is a dict containing intermediate matrices (A, b_vec, x, diet_values, nodetrdiet, living_idx, no_b, no_ee).

False

Returns:

Type Description
Rpath or tuple[Rpath, dict]

Balanced model that can be supplied to rsim_scenario(). When debug=True, returns (Rpath, diagnostics).

Raises:

Type Description
ValueError

If the model cannot be balanced due to missing parameters.

Notes

When debug=True the function returns a tuple (rpath_obj, diagnostics) where diagnostics contains intermediate matrices useful for debugging (A, b_vec, x, diet_values, nodetrdiet, living_idx, no_b, no_ee).

Examples:

>>> params = create_rpath_params(...)
>>> # Fill in parameter values
>>> model = rpath(params, eco_name='Georges Bank')
>>> print(model)
Source code in pypath/core/ecopath.py
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
def rpath(
    rpath_params: RpathParams,
    eco_name: str = "",
    eco_area: float = 1.0,
    debug: bool = False,
) -> Union[Rpath, Tuple[Rpath, Dict[str, object]]]:
    """Balance an Ecopath model.

    Performs initial mass balance using an RpathParams object.
    Preserves the original group order from the input parameters.

    The mass balance equation solved is:

    Production = Predation Mortality + Fishing Mortality +
                 Other Mortality + Biomass Accumulation + Net Migration

    Or equivalently:
    B_i * PB_i * EE_i = Σ(B_j * QB_j * DC_ji) + Y_i + BA_i

    Parameters
    ----------
    rpath_params : RpathParams
        R object containing the parameters needed to create an Rpath model.
    eco_name : str, optional
        Name of the ecosystem (stored as attribute).
    eco_area : float, optional
        Area of the ecosystem (stored as attribute).
    debug : bool, optional
        If False (default), return only the balanced Rpath object.
        If True, return a tuple ``(rpath_obj, diagnostics)`` where
        *diagnostics* is a dict containing intermediate matrices
        (A, b_vec, x, diet_values, nodetrdiet, living_idx, no_b, no_ee).

    Returns
    -------
    Rpath or tuple[Rpath, dict]
        Balanced model that can be supplied to rsim_scenario().
        When *debug=True*, returns ``(Rpath, diagnostics)``.

    Raises
    ------
    ValueError
        If the model cannot be balanced due to missing parameters.

    Notes
    -----
    When ``debug=True`` the function returns a tuple
    ``(rpath_obj, diagnostics)`` where ``diagnostics`` contains
    intermediate matrices useful for debugging (A, b_vec, x,
    diet_values, nodetrdiet, living_idx, no_b, no_ee).

    Examples
    --------
    >>> params = create_rpath_params(...)
    >>> # Fill in parameter values
    >>> model = rpath(params, eco_name='Georges Bank')
    >>> print(model)
    """
    # Make a deep copy to avoid modifying original
    model_df = rpath_params.model.copy()
    diet_df = rpath_params.diet.copy()

    # Get dimensions - PRESERVE ORIGINAL ORDER
    ngroups = len(model_df)

    # Create index arrays for each group type (preserving original order)
    types_arr = model_df["Type"].values.astype(float)
    living_idx = np.where(types_arr < 2)[0]  # Indices of living groups
    dead_idx = np.where(types_arr == 2)[0]  # Indices of detritus groups
    fleet_idx = np.where(types_arr == 3)[0]  # Indices of fleet groups

    nliving = len(living_idx)
    ndead = len(dead_idx)
    ngear = len(fleet_idx)

    # Extract arrays from model DataFrame (original order)
    groups = model_df["Group"].values
    types = types_arr
    biomass = model_df["Biomass"].values.astype(float)
    pb = model_df["PB"].values.astype(float)
    qb = model_df["QB"].values.astype(float)
    ee = model_df["EE"].values.astype(float)
    prodcons = model_df["ProdCons"].values.astype(float)
    bioacc = model_df["BioAcc"].values.astype(float)
    unassim = model_df["Unassim"].values.astype(float)

    # Replace NaN with 0 for BioAcc and Unassim
    bioacc = np.where(np.isnan(bioacc), 0.0, bioacc)
    unassim = np.where(np.isnan(unassim), 0.0, unassim)

    # Get diet matrix - columns are predators (living groups only)
    living_group_names = groups[living_idx].tolist()
    diet_cols = [g for g in living_group_names if g in diet_df.columns]

    # Build diet matrix with rows matching original group order
    diet_prey_names = diet_df["Group"].tolist()
    all_group_names = groups.tolist()

    # Create mapping from diet prey names to row indices in diet_df
    prey_name_to_diet_row = {name: i for i, name in enumerate(diet_prey_names)}

    # Build diet matrix (rows = ALL groups + Import, cols = predators in living_idx order)
    # Need ngroups rows (one per group) + 1 row for Import
    n_prey = len(diet_prey_names)  # Number of rows in diet_df (includes Import)
    n_pred = len(diet_cols)
    diet_values = np.zeros(
        (ngroups + 1, n_pred)
    )  # ngroups rows for groups + 1 for Import

    # Map each group to its diet row
    for new_row_idx, group_name in enumerate(all_group_names):
        if group_name in prey_name_to_diet_row:
            old_row_idx = prey_name_to_diet_row[group_name]
            diet_values[new_row_idx, :] = diet_df.loc[
                old_row_idx, diet_cols
            ].values.astype(float)

    # Add Import row at the end if present
    if "Import" in prey_name_to_diet_row:
        import_row_idx = prey_name_to_diet_row["Import"]
        # Import goes at index ngroups (after all groups)
        if n_prey > ngroups:
            diet_values[ngroups, :] = diet_df.loc[
                import_row_idx, diet_cols
            ].values.astype(float)

    diet_values = np.nan_to_num(diet_values, nan=0.0)

    # Adjust diet for mixotrophs (Type between 0 and 1)
    for col_idx, grp_idx in enumerate(living_idx):
        if 0 < types[grp_idx] < 1:
            mix_q = 1 - types[grp_idx]
            diet_values[:, col_idx] *= mix_q

    # Extract diet for living groups only (prey rows are living groups)
    # nodetrdiet[i, j] = fraction of predator j's diet from prey i (both living)
    # Normalize predator diet columns to exclude Import fractions when present
    nodetrdiet = np.zeros((nliving, nliving))
    import_row = (
        diet_values[ngroups, :]
        if diet_values.shape[0] > ngroups
        else np.zeros(diet_values.shape[1])
    )
    # For each predator column, normalize by (1 - import_frac) if possible
    for j, pred_idx in enumerate(living_idx):
        import_frac = import_row[j] if j < len(import_row) else 0.0
        denom = 1.0 - import_frac if (1.0 - import_frac) > 0 else 1.0
        for i, prey_idx in enumerate(living_idx):
            nodetrdiet[i, j] = diet_values[prey_idx, j] / denom

    # Fill in GE (P/Q), QB, or PB from other inputs
    # Compute GE = PB/QB when QB is present and non-zero, otherwise use prodcons
    ge = np.where((~np.isnan(qb)) & (qb != 0) & (~np.isnan(pb)), pb / qb, prodcons)
    # Replace NaN GE with 0 (safe default) and avoid dividing by zero below
    ge = np.nan_to_num(ge, nan=0.0)
    # Only fill QB where it's missing and we have a non-zero GE
    # Use np.divide with where to avoid divide-by-zero warnings when GE is zero
    safe_pb_over_ge = np.empty_like(pb)
    safe_pb_over_ge[:] = np.nan
    np.divide(pb, ge, out=safe_pb_over_ge, where=(ge != 0))
    qb = np.where(np.isnan(qb) & (ge != 0), safe_pb_over_ge, qb)
    # Fill PB where missing from prodcons * QB
    pb = np.where(np.isnan(pb), prodcons * qb, pb)

    # As a last resort, if both PB and QB are missing for a *living* group, set reasonable defaults
    both_missing = np.isnan(pb) & np.isnan(qb) & (types < 2)
    if np.any(both_missing):
        # Use a small default turnover/consumption rate to allow balancing for living groups
        pb = np.where(both_missing, 1.0, pb)
        qb = np.where(both_missing, 1.0, qb)

    # Remember which biomass, PB and EE values were originally missing (before filling defaults)
    original_no_b = np.isnan(biomass)
    original_pb_missing = np.isnan(model_df["PB"].values.astype(float))
    original_no_ee = np.isnan(model_df["EE"].values.astype(float))
    # Groups where B and EE are known but PB is missing → solve for PB
    original_no_pb = original_pb_missing & ~original_no_b & ~original_no_ee

    # Keep biomass as NaN for living groups when originally missing so the solver treats them
    # as unknowns and solves for biomass when EE is provided (this matches R's behavior).
    # Previously we set a default value (1.0) here which prevented solving for biomass for
    # groups with EE specified but missing biomass (e.g., Megabenthos). Do not fill here.
    # biomass = np.where(np.isnan(biomass) & (types < 2), 1.0, biomass)

    # For fleet groups (type == 3), ensure biomass/PB/QB/EE are zero to match R conventions
    fleet_mask = types == 3
    if np.any(fleet_mask):
        biomass[fleet_mask] = np.where(
            np.isnan(biomass[fleet_mask]), 0.0, biomass[fleet_mask]
        )
        pb[fleet_mask] = np.where(np.isnan(pb[fleet_mask]), 0.0, pb[fleet_mask])
        qb[fleet_mask] = np.where(np.isnan(qb[fleet_mask]), 0.0, qb[fleet_mask])
        ee[fleet_mask] = np.where(np.isnan(ee[fleet_mask]), 0.0, ee[fleet_mask])

    # Get landings and discards matrices
    det_groups = groups[dead_idx].tolist()
    fleet_groups = groups[fleet_idx].tolist()

    # Find landings columns (fleet names)
    landing_cols = fleet_groups
    discard_cols = [f"{f}.disc" for f in fleet_groups]

    landmat = np.zeros((ngroups, ngear))
    discardmat = np.zeros((ngroups, ngear))

    for g_idx, col in enumerate(landing_cols):
        if col in model_df.columns:
            landmat[:, g_idx] = model_df[col].values.astype(float)
    for g_idx, col in enumerate(discard_cols):
        if col in model_df.columns:
            discardmat[:, g_idx] = model_df[col].values.astype(float)

    landmat = np.nan_to_num(landmat, nan=0.0)
    discardmat = np.nan_to_num(discardmat, nan=0.0)

    totcatchmat = landmat + discardmat
    totcatch = np.sum(totcatchmat, axis=1)
    _landings = np.sum(landmat, axis=1)
    _discards = np.sum(discardmat, axis=1)

    # Flag missing parameters (use the ORIGINAL missing-biomass mask)
    no_b = original_no_b
    no_ee = np.isnan(ee)
    logger.debug("original_no_b: %s", original_no_b)
    logger.debug("initial no_ee: %s", no_ee)

    # Iterative solve to handle EE>1 cases by capping EE at 1 and re-solving
    # Start with masks from current state
    it_max = 5
    it = 0
    iterations = []
    while True:
        it += 1
        # Extract living group values for this iteration
        living_biomass = biomass[living_idx]
        living_qb = qb[living_idx]
        living_pb = pb[living_idx]
        living_ee = ee[living_idx]
        living_bioacc = bioacc[living_idx]
        living_catch = totcatch[living_idx]
        # Determine which variables are unknown in this iteration
        living_no_b = np.isnan(living_biomass)
        living_no_ee = np.isnan(living_ee)

        # Consumption matrix: each column j shows consumption by predator j
        bio_qb = np.where(
            np.isnan(living_biomass * living_qb), 0.0, living_biomass * living_qb
        )
        # Zero consumption contributions from predators whose biomass is unknown
        # (their predation terms are moved into A instead)
        pred_unknown_mask = np.array(
            [
                original_no_b[pred_global] or np.isnan(biomass[pred_global])
                for pred_global in living_idx
            ],
            dtype=bool,
        )
        bio_qb = np.where(pred_unknown_mask, 0.0, bio_qb)
        cons = nodetrdiet * bio_qb[np.newaxis, :]

        # RHS: exports + predation
        b_vec = living_catch + living_bioacc + np.sum(cons, axis=1)

        # Build A matrix for this iteration
        A = np.zeros((nliving, nliving))
        for i in range(nliving):
            g_idx = living_idx[i]
            if original_no_pb[g_idx]:  # Solve for PB: A[i,i] = B*EE, x[i] = PB
                A[i, i] = living_biomass[i] * living_ee[i]
            elif living_no_ee[i]:  # Solve for EE
                A[i, i] = (
                    living_biomass[i] * living_pb[i]
                    if not np.isnan(living_biomass[i])
                    else living_pb[i] * living_ee[i]
                )
            else:  # Solve for B
                A[i, i] = living_pb[i] * living_ee[i]

        qb_dc = nodetrdiet * living_qb[np.newaxis, :]
        qb_dc = np.nan_to_num(qb_dc, nan=0.0)
        for j in range(nliving):
            # Treat a predator as having unknown biomass if it was originally missing
            # or if we've flipped it to unknown in an earlier iteration (biomass NaN).
            pred_global = living_idx[j]
            pred_unknown = original_no_b[pred_global] or np.isnan(biomass[pred_global])
            if pred_unknown:
                logger.debug(
                    "predator %s treated as unknown (original_no_b=%s, biomass_nan=%s)",
                    pred_global,
                    original_no_b[pred_global],
                    np.isnan(biomass[pred_global]),
                )
            if pred_unknown:
                A[:, j] -= qb_dc[:, j]

        # Validate
        if not np.all(np.isfinite(A)) or not np.all(np.isfinite(b_vec)):
            logger.debug("A finite mask: %s", np.isfinite(A))
            logger.debug("A: %s", A)
            logger.debug("b_vec finite mask: %s", np.isfinite(b_vec))
            logger.debug("b_vec: %s", b_vec)
            raise ValueError(
                "Model is missing or invalid parameters - can't be balanced. Use check_rpath_params() to diagnose."
            )

        # Solve linear system
        n = A.shape[0]
        try:
            if n <= 50:
                x = _gauss_solve(A, b_vec)
            else:
                x = np.linalg.solve(A, b_vec)
        except (ValueError, np.linalg.LinAlgError):
            logger.warning("Primary solver failed, falling back to least-squares")
            try:
                x = np.linalg.lstsq(A, b_vec, rcond=1e-6)[0]
            except (ValueError, np.linalg.LinAlgError) as e:
                raise ValueError(
                    "Unable to solve linear system during balancing"
                ) from e

        # Assign solved values back to living groups for this iteration
        for i, idx in enumerate(living_idx):
            logger.debug(
                "idx=%s iter=%s living_no_b=%s living_no_ee=%s x=%s biomass_before=%s",
                idx,
                it,
                living_no_b[i],
                living_no_ee[i],
                x[i],
                biomass[idx],
            )
            if original_no_pb[idx]:
                pb[idx] = x[i]
                logger.debug("Assigned pb[%s] = %s", idx, x[i])
                # Recalculate QB from estimated PB if QB was originally missing
                orig_qb = model_df["QB"].values.astype(float)[idx]
                if np.isnan(orig_qb) and ge[idx] > 0:
                    qb[idx] = pb[idx] / ge[idx]
                    logger.debug("Recalculated qb[%s] = %s from pb/ge", idx, qb[idx])
            elif living_no_ee[i]:
                ee[idx] = x[i]
                logger.debug("Assigned ee[%s] = %s", idx, x[i])
            if living_no_b[i]:
                biomass[idx] = x[i]
                logger.debug(
                    "Assigned biomass[%s] = %s biomass_after=%s",
                    idx,
                    x[i],
                    biomass[idx],
                )

        # Record iteration snapshot for diagnostics
        iterations.append(
            {
                "iter": it,
                "A": A.copy(),
                "b_vec": b_vec.copy(),
                "x": x.copy(),
                "ee": ee.copy(),
                "biomass": biomass.copy(),
            }
        )
        # Check for EE values > 1 for groups that were originally missing EE
        flipped = False
        # Find groups with EE > 1 eligible for flipping: those whose EE and biomass were both originally missing
        over = [
            (i, idx, ee[idx])
            for i, idx in enumerate(living_idx)
            if original_no_ee[idx]
            and original_no_b[idx]
            and not np.isnan(ee[idx])
            and ee[idx] > 1.0
        ]
        if over:
            # Flip only the largest eligible violation to avoid cascade effects
            over.sort(key=lambda t: t[2], reverse=True)
            i, idx, val = over[0]
            logger.debug(
                "ee[%s] = %s > 1.0 (largest eligible), capping to 1 and solving for biomass next iteration",
                idx,
                val,
            )
            ee[idx] = 1.0
            biomass[idx] = np.nan
            flipped = True

        # If no flips or reached iteration limit, break
        if not flipped or it >= it_max:
            break

    # After iterations, compute final A/b_vec once more for diagnostics
    living_biomass = biomass[living_idx]
    living_qb = qb[living_idx]
    living_pb = pb[living_idx]
    living_ee = ee[living_idx]
    bio_qb = np.where(
        np.isnan(living_biomass * living_qb), 0.0, living_biomass * living_qb
    )
    pred_unknown_mask = np.array(
        [
            original_no_b[pred_global] or np.isnan(biomass[pred_global])
            for pred_global in living_idx
        ],
        dtype=bool,
    )
    bio_qb = np.where(pred_unknown_mask, 0.0, bio_qb)
    cons = nodetrdiet * bio_qb[np.newaxis, :]
    b_vec = living_catch + living_bioacc + np.sum(cons, axis=1)
    A = np.zeros((nliving, nliving))
    for i in range(nliving):
        g_idx = living_idx[i]
        if original_no_pb[g_idx]:
            A[i, i] = living_biomass[i] * living_ee[i]
        elif np.isnan(living_ee[i]):
            A[i, i] = (
                living_biomass[i] * living_pb[i]
                if not np.isnan(living_biomass[i])
                else living_pb[i] * living_ee[i]
            )
        else:
            A[i, i] = living_pb[i] * living_ee[i]
    qb_dc = nodetrdiet * living_qb[np.newaxis, :]
    qb_dc = np.nan_to_num(qb_dc, nan=0.0)
    for j in range(nliving):
        if np.isnan(living_biomass[j]):
            A[:, j] -= qb_dc[:, j]

    # Save final solve results in context for debug output
    # (x and b_vec/A are available from the last iteration)
    try:
        if n <= 50:
            x = _gauss_solve(A, b_vec)
        else:
            x = np.linalg.solve(A, b_vec)
    except (ValueError, np.linalg.LinAlgError):
        logger.warning("Final solver failed, falling back to least-squares")
        try:
            x = np.linalg.lstsq(A, b_vec, rcond=1e-6)[0]
        except (ValueError, np.linalg.LinAlgError) as e:
            raise ValueError("Unable to solve linear system during balancing") from e

    # Calculate M0 (other mortality) for living groups (detritus handled after
    # detritus PB/biomass is computed below)
    m0 = np.zeros(ngroups)
    for i, idx in enumerate(living_idx):
        m0[idx] = pb[idx] * (1 - ee[idx])

    # Flows to detritus from living groups
    # M0 can be negative if EE > 1, but loss flows should be non-negative
    qb_loss = np.where(np.isnan(qb), 0.0, qb)
    loss = np.zeros(ngroups)
    for idx in living_idx:
        # Only positive M0 contributes to detrital flow
        m0_pos = max(0.0, m0[idx])
        loss[idx] = (m0_pos * biomass[idx]) + (
            biomass[idx] * qb_loss[idx] * unassim[idx]
        )
    # Add discards from fleets
    # For each fleet, sum discards across all living groups
    for f_idx, fleet_global_idx in enumerate(fleet_idx):
        loss[fleet_global_idx] = np.sum(discardmat[living_idx, f_idx])

    # Get detritus fate matrix
    detfate = np.zeros((ngroups, ndead))
    for d_idx, det_name in enumerate(det_groups):
        if det_name in model_df.columns:
            detfate[:, d_idx] = model_df[det_name].values.astype(float)
    detfate = np.nan_to_num(detfate, nan=0.0)

    # Detrital inputs
    det_input = np.zeros(ndead)
    for d_idx, det_idx in enumerate(dead_idx):
        det_input[d_idx] = (
            model_df["DetInput"].values[det_idx]
            if "DetInput" in model_df.columns
            else 0.0
        )
    det_input = np.nan_to_num(det_input, nan=0.0)

    # Stage 1: Inputs from living + gear sources only (not other detritus)
    living_fleet_idx = np.concatenate([living_idx, fleet_idx])
    living_fleet_loss = loss[living_fleet_idx]
    living_fleet_detfate = detfate[living_fleet_idx, :]
    detinputs1 = (
        np.sum(living_fleet_loss[:, np.newaxis] * living_fleet_detfate, axis=0)
        + det_input
    )

    # Detritus consumption by living groups
    # diet_values rows are in original order, columns are in living_idx order
    detcons = np.zeros(ndead)
    for d_local_idx, det_global_idx in enumerate(dead_idx):
        # Get diet fraction from this detritus for each living predator
        for pred_local_idx, pred_global_idx in enumerate(living_idx):
            dc_frac = diet_values[det_global_idx, pred_local_idx]
            pred_bio_qb = biomass[pred_global_idx] * qb[pred_global_idx]
            if not np.isnan(pred_bio_qb):
                detcons[d_local_idx] += dc_frac * pred_bio_qb

    # Stage 2: Route unconsumed detritus through detritus-to-detritus fate matrix
    det_unused = np.maximum(0.0, detinputs1 - detcons)
    detdetfate = detfate[dead_idx, :]  # rows for detritus groups only
    detinputs = detinputs1 + np.sum(det_unused[:, np.newaxis] * detdetfate, axis=0)

    # Detritus EE
    with np.errstate(divide="ignore", invalid="ignore"):
        det_ee = np.where(detinputs > 0, detcons / detinputs, 0.0)
    for d_idx, det_idx in enumerate(dead_idx):
        ee[det_idx] = det_ee[d_idx]

    # Set detritus biomass and PB
    default_det_pb = 0.5
    det_pb = np.zeros(ndead)
    det_b = np.zeros(ndead)
    for d_idx, det_idx in enumerate(dead_idx):
        det_pb_input = pb[det_idx]
        det_b_input = biomass[det_idx]

        # Ensure detinputs is non-negative
        det_in = max(0.0, detinputs[d_idx])

        # Treat PB as missing if original input was missing (avoid placeholder 1.0 from both_missing)
        if np.isnan(det_pb_input) or det_pb_input <= 0 or original_pb_missing[det_idx]:
            det_pb[d_idx] = default_det_pb
        else:
            det_pb[d_idx] = det_pb_input

        # If biomass was originally missing (we filled prior defaults), treat as missing
        if np.isnan(det_b_input) or det_b_input <= 0 or original_no_b[det_idx]:
            det_b[d_idx] = det_in / det_pb[d_idx] if det_pb[d_idx] > 0 else 0
        else:
            det_b[d_idx] = det_b_input

        # Recalculate PB based on actual inputs and biomass
        # PB for detritus = total inputs / biomass (turnover rate)
        if det_b[d_idx] > 0 and det_in > 0:
            det_pb[d_idx] = det_in / det_b[d_idx]
        elif det_b[d_idx] > 0:
            # No inputs calculated, use default or input PB
            det_pb[d_idx] = (
                default_det_pb if np.isnan(det_pb_input) else max(0.01, det_pb_input)
            )

        biomass[det_idx] = det_b[d_idx]
        pb[det_idx] = det_pb[d_idx]

    # Compute M0 for detritus groups now that PB and EE are finalized for detritus
    for d_idx, det_idx in enumerate(dead_idx):
        m0[det_idx] = pb[det_idx] * (1 - ee[det_idx])

    # Trophic level calculations
    # TL = 1 + sum_i(DC_ij * TL_i) for each predator j
    # Build full diet matrix for all groups (living + dead)
    n_bio = nliving + ndead
    bio_idx = np.concatenate(
        [living_idx, dead_idx]
    )  # Indices of living+dead in original order

    full_diet = np.zeros((n_bio, n_bio))

    # Fill in diet values - rows are prey (in bio_idx order), cols are predators (living only)
    for i, prey_global_idx in enumerate(bio_idx):
        for j in range(len(living_idx)):
            full_diet[i, j] = diet_values[prey_global_idx, j]

    # Normalize to exclude import
    import_row = (
        diet_values[ngroups, :] if diet_values.shape[0] > ngroups else np.zeros(nliving)
    )
    for j in range(nliving):
        total_diet = np.sum(full_diet[:, j])
        import_frac = import_row[j] if j < len(import_row) else 0
        if total_diet > 0 and (1 - import_frac) > 0:
            full_diet[:, j] = (
                full_diet[:, j] / (1 - import_frac) if import_frac < 1 else 0
            )

    # Set up linear system: (I - DC^T) * TL = 1
    tl_matrix = np.eye(n_bio) - full_diet.T
    b_tl = np.ones(n_bio)

    # Solve TL system robustly
    try:
        n_tl = tl_matrix.shape[0]
        if n_tl <= 50:
            tl_bio = _gauss_solve(tl_matrix, b_tl)
        else:
            tl_bio = np.linalg.solve(tl_matrix, b_tl)
    except (ValueError, np.linalg.LinAlgError):
        logger.warning("TL solve failed, falling back to least-squares")
        tl_bio = np.linalg.lstsq(tl_matrix, b_tl, rcond=1e-6)[0]

    # Map TL back to original order
    tl = np.ones(ngroups)
    for i, idx in enumerate(bio_idx):
        tl[idx] = tl_bio[i]

    # TL for fleets = weighted average of caught groups
    for g_idx, fleet_global_idx in enumerate(fleet_idx):
        geartot = np.sum(landmat[:, g_idx] + discardmat[:, g_idx])
        if geartot > 0:
            caught = (landmat[:, g_idx] + discardmat[:, g_idx]) / geartot
            tl[fleet_global_idx] = 1 + np.sum(caught * tl)

    # Prepare output arrays (in original order)
    biomass_out = biomass.copy()
    pb_out = pb.copy()
    qb_out = qb.copy()
    qb_out[np.isnan(qb_out)] = 0.0
    ee_out = ee.copy()
    ee_out[fleet_idx] = 0.0  # Fleet EE is always 0

    # Calculate GE (gross efficiency), handling zero QB values
    with np.errstate(divide="ignore", invalid="ignore"):
        ge_out = np.where(qb_out > 0, pb_out / qb_out, 0.0)
    ge_out = np.nan_to_num(ge_out, nan=0.0)

    # M0 (other mortality) for living groups, 0 for others
    m0_out = m0.copy()

    # Prepare diet matrix output (rows = groups + import, cols = living predators)
    diet_out = np.zeros((ngroups + 1, nliving))
    diet_out[:ngroups, :] = diet_values[:ngroups, :]
    if diet_values.shape[0] > ngroups:
        diet_out[ngroups, :] = diet_values[ngroups, :]  # Import row

    rpath_obj = Rpath(
        NUM_GROUPS=ngroups,
        NUM_LIVING=nliving,
        NUM_DEAD=ndead,
        NUM_GEARS=ngear,
        Group=groups.astype(str),
        type=types,
        TL=tl,
        Biomass=biomass_out,
        PB=pb_out,
        QB=qb_out,
        EE=ee_out,
        GE=ge_out,
        M0=m0_out,
        BA=bioacc,
        Unassim=unassim,
        DC=diet_out,
        DetFate=detfate,
        Landings=landmat,
        Discards=discardmat,
        eco_name=eco_name,
        eco_area=eco_area,
    )

    if debug:
        diagnostics = {
            "A": A,
            "b_vec": b_vec,
            "x": x,
            "diet_values": diet_values,
            "nodetrdiet": nodetrdiet,
            "living_idx": living_idx,
            "no_b": no_b,
            "no_ee": no_ee,
            "pb": pb,
            "qb": qb,
            "biomass_before": model_df["Biomass"].values.astype(float),
            "biomass_after": biomass.copy(),
            "detinputs": detinputs,
            "detcons": detcons,
            "det_pb": det_pb,
            "det_b": det_b,
            "iterations": iterations,
        }
        return rpath_obj, diagnostics

    return rpath_obj

Parameters

pypath.core.params

Parameter data structures for PyPath.

This module contains the RpathParams class and functions for creating, reading, writing, and validating Ecopath parameter files.

RpathStanzaParams dataclass

Parameters for multi-stanza (age-structured) groups.

Attributes:

Name Type Description
n_stanza_groups int

Number of stanza group sets (e.g., juvenile + adult = 1 set)

stgroups DataFrame

Stanza group parameters (VBGF_Ksp, VBGF_d, Wmat, etc.)

stindiv DataFrame

Individual stanza parameters (First, Last, Z, Leading)

Source code in pypath/core/params.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
@dataclass
class RpathStanzaParams:
    """Parameters for multi-stanza (age-structured) groups.

    Attributes
    ----------
    n_stanza_groups : int
        Number of stanza group sets (e.g., juvenile + adult = 1 set)
    stgroups : pd.DataFrame
        Stanza group parameters (VBGF_Ksp, VBGF_d, Wmat, etc.)
    stindiv : pd.DataFrame
        Individual stanza parameters (First, Last, Z, Leading)
    """

    n_stanza_groups: int = 0
    stgroups: Optional[pd.DataFrame] = None
    stindiv: Optional[pd.DataFrame] = None

RpathParams dataclass

Container for Rpath model parameters.

This class holds all parameters needed to create a balanced Ecopath model.

Attributes:

Name Type Description
model DataFrame

Basic parameters for each group including: - Group: Group name - Type: 0=consumer, 1=producer, 2=detritus, 3=fleet - Biomass: Biomass (t/km²) - PB: Production/Biomass ratio (1/year) - QB: Consumption/Biomass ratio (1/year) - EE: Ecotrophic efficiency - ProdCons: Production/Consumption ratio (GE) - BioAcc: Biomass accumulation rate - Unassim: Unassimilated consumption fraction - DetInput: Detrital input (for detritus groups) Plus columns for detritus fate and landings/discards by fleet.

diet DataFrame

Diet composition matrix where rows are prey (including Import) and columns are predators. Values are fractions (0-1).

stanzas RpathStanzaParams

Multi-stanza (age-structured) group parameters.

pedigree DataFrame

Data quality/pedigree information for parameters.

remarks DataFrame

Comments/remarks for parameter values. Has same structure as model with string values containing remarks for each cell.

Examples:

>>> params = create_rpath_params(
...     groups=['Phyto', 'Zoo', 'Fish', 'Detritus', 'Fleet'],
...     types=[1, 0, 0, 2, 3]
... )
>>> params.model['Biomass'] = [10.0, 5.0, 2.0, 100.0, np.nan]
Source code in pypath/core/params.py
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
@dataclass
class RpathParams:
    """Container for Rpath model parameters.

    This class holds all parameters needed to create a balanced Ecopath model.

    Attributes
    ----------
    model : pd.DataFrame
        Basic parameters for each group including:
        - Group: Group name
        - Type: 0=consumer, 1=producer, 2=detritus, 3=fleet
        - Biomass: Biomass (t/km²)
        - PB: Production/Biomass ratio (1/year)
        - QB: Consumption/Biomass ratio (1/year)
        - EE: Ecotrophic efficiency
        - ProdCons: Production/Consumption ratio (GE)
        - BioAcc: Biomass accumulation rate
        - Unassim: Unassimilated consumption fraction
        - DetInput: Detrital input (for detritus groups)
        Plus columns for detritus fate and landings/discards by fleet.

    diet : pd.DataFrame
        Diet composition matrix where rows are prey (including Import)
        and columns are predators. Values are fractions (0-1).

    stanzas : RpathStanzaParams
        Multi-stanza (age-structured) group parameters.

    pedigree : pd.DataFrame
        Data quality/pedigree information for parameters.

    remarks : pd.DataFrame
        Comments/remarks for parameter values. Has same structure as model
        with string values containing remarks for each cell.

    Examples
    --------
    >>> params = create_rpath_params(
    ...     groups=['Phyto', 'Zoo', 'Fish', 'Detritus', 'Fleet'],
    ...     types=[1, 0, 0, 2, 3]
    ... )
    >>> params.model['Biomass'] = [10.0, 5.0, 2.0, 100.0, np.nan]
    """

    model: pd.DataFrame
    diet: pd.DataFrame
    stanzas: RpathStanzaParams = field(default_factory=RpathStanzaParams)
    pedigree: Optional[pd.DataFrame] = None
    remarks: Optional[pd.DataFrame] = None
    ecosim: Optional[Dict[str, Any]] = None

    def __repr__(self) -> str:
        n_groups = len(self.model)
        n_living = len(self.model[self.model["Type"] <= 1])
        n_dead = len(self.model[self.model["Type"] == 2])
        n_fleet = len(self.model[self.model["Type"] == 3])
        return (
            f"RpathParams(\n"
            f"  groups={n_groups} (living={n_living}, detritus={n_dead}, fleets={n_fleet})\n"
            f"  stanzas={self.stanzas.n_stanza_groups}\n"
            f")"
        )

get_groups_by_type

get_groups_by_type(groups: List[str], types: List[int]) -> Dict[str, List[str]]

Return dict mapping type names to group lists.

Parameters:

Name Type Description Default
groups list of str

Names of all groups in the model.

required
types list of int

Type code for each group (0=consumer, 1=producer, 2=detritus, 3=fleet).

required

Returns:

Type Description
dict

Dictionary with keys: 'consumers', 'producers', 'detritus', 'fleets', 'living' (types 0 and 1), and 'prey' (types 0, 1, and 2).

Source code in pypath/core/params.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def get_groups_by_type(groups: List[str], types: List[int]) -> Dict[str, List[str]]:
    """Return dict mapping type names to group lists.

    Parameters
    ----------
    groups : list of str
        Names of all groups in the model.
    types : list of int
        Type code for each group (0=consumer, 1=producer, 2=detritus, 3=fleet).

    Returns
    -------
    dict
        Dictionary with keys: 'consumers', 'producers', 'detritus', 'fleets',
        'living' (types 0 and 1), and 'prey' (types 0, 1, and 2).
    """
    groups = list(groups)
    types = list(types)
    return {
        "consumers": [g for g, t in zip(groups, types) if t == 0],
        "producers": [g for g, t in zip(groups, types) if t == 1],
        "detritus": [g for g, t in zip(groups, types) if t == 2],
        "fleets": [g for g, t in zip(groups, types) if t == 3],
        "living": [g for g, t in zip(groups, types) if t < 2],
        "prey": [g for g, t in zip(groups, types) if t < 3],
    }

create_rpath_params

create_rpath_params(groups: List[str], types: List[int], stgroups: Optional[List[str]] = None) -> RpathParams

Create a shell RpathParams object with empty parameter values.

Creates the basic structure for an Ecopath model that can be filled in with actual parameter values.

Parameters:

Name Type Description Default
groups list of str

Names of all groups in the model (living, detritus, and fleets).

required
types list of int

Type code for each group: - 0: Consumer - 1: Primary producer (or value 0-1 for mixotrophs) - 2: Detritus - 3: Fleet/fishery

required
stgroups list of str

Stanza group assignment for each group. Use None for non-stanza groups. Groups with the same stanza group name will be linked (e.g., juvenile/adult).

None

Returns:

Type Description
RpathParams

Parameter object with NA values ready to be filled in.

Examples:

>>> params = create_rpath_params(
...     groups=['Phyto', 'Zoo', 'SmallFish', 'LargeFish', 'Detritus', 'Fleet'],
...     types=[1, 0, 0, 0, 2, 3],
...     stgroups=[None, None, 'Fish', 'Fish', None, None]
... )
Source code in pypath/core/params.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def create_rpath_params(
    groups: List[str], types: List[int], stgroups: Optional[List[str]] = None
) -> RpathParams:
    """Create a shell RpathParams object with empty parameter values.

    Creates the basic structure for an Ecopath model that can be filled
    in with actual parameter values.

    Parameters
    ----------
    groups : list of str
        Names of all groups in the model (living, detritus, and fleets).
    types : list of int
        Type code for each group:
        - 0: Consumer
        - 1: Primary producer (or value 0-1 for mixotrophs)
        - 2: Detritus
        - 3: Fleet/fishery
    stgroups : list of str, optional
        Stanza group assignment for each group. Use None for non-stanza groups.
        Groups with the same stanza group name will be linked (e.g., juvenile/adult).

    Returns
    -------
    RpathParams
        Parameter object with NA values ready to be filled in.

    Examples
    --------
    >>> params = create_rpath_params(
    ...     groups=['Phyto', 'Zoo', 'SmallFish', 'LargeFish', 'Detritus', 'Fleet'],
    ...     types=[1, 0, 0, 0, 2, 3],
    ...     stgroups=[None, None, 'Fish', 'Fish', None, None]
    ... )
    """
    if len(groups) != len(types):
        raise ValueError("groups and types must have the same length")

    n_groups = len(groups)

    # Identify group types
    gbt = get_groups_by_type(groups, types)
    pred_groups = gbt["living"]  # Consumers/producers
    prey_groups = gbt["prey"]  # All except fleets
    det_groups = gbt["detritus"]
    fleet_groups = gbt["fleets"]

    # Create model DataFrame
    model_data = {
        "Group": groups,
        "Type": types,
        "Biomass": [np.nan] * n_groups,
        "PB": [np.nan] * n_groups,
        "QB": [np.nan] * n_groups,
        "EE": [np.nan] * n_groups,
        "ProdCons": [np.nan] * n_groups,
        "BioAcc": [np.nan] * n_groups,
        "Unassim": [np.nan] * n_groups,
        "DetInput": [np.nan] * n_groups,
    }

    # Add detrital fate columns
    for det in det_groups:
        model_data[det] = [np.nan] * n_groups
        # Set DetInput to 0 for detritus groups
        for i, t in enumerate(types):
            if t == 2:
                model_data["DetInput"][i] = 0.0

    # Add landing and discard columns for each fleet
    n_bio = len([t for t in types if t < 3])  # Non-fleet groups
    for fleet in fleet_groups:
        # Landings
        model_data[fleet] = [0.0] * n_bio + [np.nan] * len(fleet_groups)
        # Discards
        model_data[f"{fleet}.disc"] = [0.0] * n_bio + [np.nan] * len(fleet_groups)

    model = pd.DataFrame(model_data)

    # Create diet DataFrame
    diet_data = {"Group": prey_groups + ["Import"]}
    for pred in pred_groups:
        diet_data[pred] = [np.nan] * (len(prey_groups) + 1)
    diet = pd.DataFrame(diet_data)

    # Create stanza parameters if provided
    stanza_params = RpathStanzaParams()
    if stgroups is not None and any(s is not None for s in stgroups):
        # Get unique stanza groups
        unique_stgroups = sorted(set(s for s in stgroups if s is not None))
        n_stanza_groups = len(unique_stgroups)

        # Count stanzas per group
        nstanzas = [sum(1 for s in stgroups if s == sg) for sg in unique_stgroups]

        stgroups_df = pd.DataFrame(
            {
                "StGroupNum": range(1, n_stanza_groups + 1),
                "StanzaGroup": unique_stgroups,
                "nstanzas": nstanzas,
                "VBGF_Ksp": [np.nan] * n_stanza_groups,
                "VBGF_d": [0.66667] * n_stanza_groups,
                "Wmat": [np.nan] * n_stanza_groups,
                "BAB": [0.0] * n_stanza_groups,
                "RecPower": [1.0] * n_stanza_groups,
            }
        )

        # Individual stanza records
        stindiv_records = []
        for i, (g, t, sg) in enumerate(zip(groups, types, stgroups)):
            if sg is not None:
                st_group_num = unique_stgroups.index(sg) + 1
                stindiv_records.append(
                    {
                        "StGroupNum": st_group_num,
                        "StanzaNum": 0,  # Will be assigned later
                        "GroupNum": i + 1,
                        "Group": g,
                        "First": np.nan,
                        "Last": np.nan,
                        "Z": np.nan,
                        "Leading": np.nan,
                    }
                )

        stindiv_df = pd.DataFrame(stindiv_records)

        stanza_params = RpathStanzaParams(
            n_stanza_groups=n_stanza_groups, stgroups=stgroups_df, stindiv=stindiv_df
        )

    # Create pedigree DataFrame
    pedigree_data = {
        "Group": groups,
        "Biomass": [1.0] * n_groups,
        "PB": [1.0] * n_groups,
        "QB": [1.0] * n_groups,
        "Diet": [1.0] * n_groups,
    }
    # Add fleet pedigree columns
    for fleet in fleet_groups:
        pedigree_data[fleet] = [1.0] * n_groups
    pedigree = pd.DataFrame(pedigree_data)

    return RpathParams(model=model, diet=diet, stanzas=stanza_params, pedigree=pedigree)

read_rpath_params

read_rpath_params(model_file: Union[str, Path], diet_file: Union[str, Path], pedigree_file: Optional[Union[str, Path]] = None, stanza_group_file: Optional[Union[str, Path]] = None, stanza_file: Optional[Union[str, Path]] = None) -> RpathParams

Read Rpath parameters from CSV files.

Parameters:

Name Type Description Default
model_file str or Path

Path to CSV file with model parameters.

required
diet_file str or Path

Path to CSV file with diet composition matrix.

required
pedigree_file str or Path

Path to CSV file with pedigree information.

None
stanza_group_file str or Path

Path to CSV file with stanza group parameters.

None
stanza_file str or Path

Path to CSV file with individual stanza parameters.

None

Returns:

Type Description
RpathParams

Parameter object populated from files.

Source code in pypath/core/params.py
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
def read_rpath_params(
    model_file: Union[str, Path],
    diet_file: Union[str, Path],
    pedigree_file: Optional[Union[str, Path]] = None,
    stanza_group_file: Optional[Union[str, Path]] = None,
    stanza_file: Optional[Union[str, Path]] = None,
) -> RpathParams:
    """Read Rpath parameters from CSV files.

    Parameters
    ----------
    model_file : str or Path
        Path to CSV file with model parameters.
    diet_file : str or Path
        Path to CSV file with diet composition matrix.
    pedigree_file : str or Path, optional
        Path to CSV file with pedigree information.
    stanza_group_file : str or Path, optional
        Path to CSV file with stanza group parameters.
    stanza_file : str or Path, optional
        Path to CSV file with individual stanza parameters.

    Returns
    -------
    RpathParams
        Parameter object populated from files.
    """
    model = pd.read_csv(model_file)
    diet = pd.read_csv(diet_file)

    # Read stanza files if provided
    stanza_params = RpathStanzaParams()
    if stanza_group_file is not None and stanza_file is not None:
        stgroups = pd.read_csv(stanza_group_file)
        stindiv = pd.read_csv(stanza_file)
        stanza_params = RpathStanzaParams(
            n_stanza_groups=len(stgroups), stgroups=stgroups, stindiv=stindiv
        )

    # Read pedigree if provided
    pedigree = None
    if pedigree_file is not None:
        pedigree = pd.read_csv(pedigree_file)
    else:
        # Create default pedigree
        fleet_groups = model[model["Type"] == 3]["Group"].tolist()
        pedigree_data = {
            "Group": model["Group"].tolist(),
            "B": [1.0] * len(model),
            "PB": [1.0] * len(model),
            "QB": [1.0] * len(model),
            "Diet": [1.0] * len(model),
        }
        for fleet in fleet_groups:
            pedigree_data[fleet] = [1.0] * len(model)
        pedigree = pd.DataFrame(pedigree_data)

    return RpathParams(model=model, diet=diet, stanzas=stanza_params, pedigree=pedigree)

write_rpath_params

write_rpath_params(params: RpathParams, eco_name: str, path: Union[str, Path] = '') -> None

Write Rpath parameters to CSV files.

Parameters:

Name Type Description Default
params RpathParams

Parameter object to write.

required
eco_name str

Ecosystem name used in file names.

required
path str or Path

Directory path for output files.

''
Source code in pypath/core/params.py
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
def write_rpath_params(
    params: RpathParams, eco_name: str, path: Union[str, Path] = ""
) -> None:
    """Write Rpath parameters to CSV files.

    Parameters
    ----------
    params : RpathParams
        Parameter object to write.
    eco_name : str
        Ecosystem name used in file names.
    path : str or Path
        Directory path for output files.
    """
    path = Path(path)

    params.model.to_csv(path / f"{eco_name}_model.csv", index=False)
    params.diet.to_csv(path / f"{eco_name}_diet.csv", index=False)

    if params.pedigree is not None:
        params.pedigree.to_csv(path / f"{eco_name}_pedigree.csv", index=False)

    if params.stanzas.n_stanza_groups > 0:
        params.stanzas.stgroups.to_csv(
            path / f"{eco_name}_stanza_groups.csv", index=False
        )
        params.stanzas.stindiv.to_csv(path / f"{eco_name}_stanzas.csv", index=False)

check_rpath_params

check_rpath_params(params: RpathParams) -> bool

Check Rpath parameter files for consistency.

Validates that parameter files are filled out correctly and data is in the expected locations.

Parameters:

Name Type Description Default
params RpathParams

Parameter object to validate.

required

Returns:

Type Description
bool

True if parameters are valid, False otherwise.

Raises:

Type Description
warn

For each validation issue found.

Source code in pypath/core/params.py
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
def check_rpath_params(params: RpathParams) -> bool:
    """Check Rpath parameter files for consistency.

    Validates that parameter files are filled out correctly and data
    is in the expected locations.

    Parameters
    ----------
    params : RpathParams
        Parameter object to validate.

    Returns
    -------
    bool
        True if parameters are valid, False otherwise.

    Raises
    ------
    warnings.warn
        For each validation issue found.
    """
    model = params.model
    diet = params.diet

    n_warnings = 0

    # Check that all types are represented
    type_requirements = [
        (0, "consumer"),
        (1, "producer"),
        (2, "detrital group"),
        (3, "fleet"),
    ]
    for type_val, type_name in type_requirements:
        if len(model[model["Type"] == type_val]) == 0:
            warnings.warn(f"Model must contain at least 1 {type_name}")
            n_warnings += 1

    # Check that either Biomass or EE is provided for living groups
    living = model[model["Type"] < 2]
    missing_both = living[living["Biomass"].isna() & living["EE"].isna()]
    if len(missing_both) > 0:
        groups = missing_both["Group"].tolist()
        warnings.warn(f"Groups missing both Biomass and EE: {groups}")
        n_warnings += 1

    # Inform about PB estimation capability
    has_b_ee_no_pb = living[
        ~living["Biomass"].isna() & ~living["EE"].isna() & living["PB"].isna()
    ]
    if len(has_b_ee_no_pb) > 0:
        groups = has_b_ee_no_pb["Group"].tolist()
        logger.info("Groups with B+EE but missing PB (will be estimated): %s", groups)

    # Check that consumers have QB or ProdCons
    consumers = model[model["Type"] < 1]
    missing_qb = consumers[consumers["QB"].isna() & consumers["ProdCons"].isna()]
    if len(missing_qb) > 0:
        groups = missing_qb["Group"].tolist()
        warnings.warn(f"Consumers missing both QB and ProdCons: {groups}")
        n_warnings += 1

    # Check diet columns sum to ~1 for consumers
    _n_living = len(model[model["Type"] <= 1])
    pred_groups = model[model["Type"] < 2]["Group"].tolist()

    for pred in pred_groups:
        if pred in diet.columns:
            col_sum = diet[pred].sum()
            pred_type = model[model["Group"] == pred]["Type"].values[0]
            expected = 1.0 - pred_type  # Producers have diet = 0
            if not np.isclose(col_sum, expected, atol=0.01) and not np.isnan(col_sum):
                warnings.warn(
                    f"Diet column '{pred}' sums to {col_sum:.3f}, expected ~{expected}"
                )
                n_warnings += 1

    # Check Import row exists
    if "Import" not in diet["Group"].values and "import" not in diet["Group"].values:
        warnings.warn("Diet matrix is missing the Import row")
        n_warnings += 1

    if n_warnings == 0:
        logger.info("Rpath parameter file is functional.")
        return True
    else:
        logger.warning(
            "Rpath parameter file needs attention! (%d warnings)", n_warnings
        )
        return False

Ecosim (Dynamic Simulation)

pypath.core.ecosim

Ecosim dynamic simulation implementation.

This module will contain the core Ecosim simulation engine, including the derivative calculations and integration methods.

RsimParams dataclass

Dynamic simulation parameters.

Contains all parameters needed to run an Ecosim simulation, derived from a balanced Rpath model.

Attributes:

Name Type Description
NUM_GROUPS int

Total number of groups

NUM_LIVING int

Number of living groups

NUM_DEAD int

Number of detritus groups

NUM_GEARS int

Number of fishing fleets

NUM_BIO int

Number of biomass groups (living + dead)

spname list

Species/group names with "Outside" as first element

spnum ndarray

Species numbers (0 to NUM_GROUPS)

B_BaseRef ndarray

Reference biomass values

MzeroMort ndarray

Other mortality rate (M0 = PB * (1-EE))

UnassimRespFrac ndarray

Unassimilated fraction of consumption

ActiveRespFrac ndarray

Active respiration fraction

FtimeAdj ndarray

Foraging time adjustment rate

FtimeQBOpt ndarray

Optimal Q/B for foraging time

PBopt ndarray

Base production/biomass

NoIntegrate ndarray

Fast equilibrium flag (0 = fast eq, else normal)

Source code in pypath/core/ecosim.py
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
@dataclass
class RsimParams:
    """Dynamic simulation parameters.

    Contains all parameters needed to run an Ecosim simulation,
    derived from a balanced Rpath model.

    Attributes
    ----------
    NUM_GROUPS : int
        Total number of groups
    NUM_LIVING : int
        Number of living groups
    NUM_DEAD : int
        Number of detritus groups
    NUM_GEARS : int
        Number of fishing fleets
    NUM_BIO : int
        Number of biomass groups (living + dead)
    spname : list
        Species/group names with "Outside" as first element
    spnum : np.ndarray
        Species numbers (0 to NUM_GROUPS)
    B_BaseRef : np.ndarray
        Reference biomass values
    MzeroMort : np.ndarray
        Other mortality rate (M0 = PB * (1-EE))
    UnassimRespFrac : np.ndarray
        Unassimilated fraction of consumption
    ActiveRespFrac : np.ndarray
        Active respiration fraction
    FtimeAdj : np.ndarray
        Foraging time adjustment rate
    FtimeQBOpt : np.ndarray
        Optimal Q/B for foraging time
    PBopt : np.ndarray
        Base production/biomass
    NoIntegrate : np.ndarray
        Fast equilibrium flag (0 = fast eq, else normal)

    Predator-Prey Link Arrays
    -------------------------
    PreyFrom : np.ndarray
        Prey index for each link
    PreyTo : np.ndarray
        Predator index for each link
    QQ : np.ndarray
        Base consumption rate for each link
    DD : np.ndarray
        Handling time parameter
    VV : np.ndarray
        Vulnerability parameter
    HandleSwitch : np.ndarray
        Prey switching exponent
    PredPredWeight : np.ndarray
        Predator density weight
    PreyPreyWeight : np.ndarray
        Prey density weight
    NumPredPreyLinks : int
        Number of predator-prey links

    Fishing Link Arrays
    -------------------
    FishFrom : np.ndarray
        Fished group index
    FishThrough : np.ndarray
        Fleet index
    FishQ : np.ndarray
        Fishing rate (catch/biomass)
    FishTo : np.ndarray
        Destination (0=outside, or detritus)
    NumFishingLinks : int
        Number of fishing links

    Detritus Link Arrays
    --------------------
    DetFrac : np.ndarray
        Fraction flowing to detritus
    DetFrom : np.ndarray
        Source group index
    DetTo : np.ndarray
        Detritus destination index
    NumDetLinks : int
        Number of detritus links
    """

    NUM_GROUPS: int
    NUM_LIVING: int
    NUM_DEAD: int
    NUM_GEARS: int
    NUM_BIO: int
    spname: List[str]
    spnum: np.ndarray
    B_BaseRef: np.ndarray
    MzeroMort: np.ndarray
    UnassimRespFrac: np.ndarray
    ActiveRespFrac: np.ndarray
    FtimeAdj: np.ndarray
    FtimeQBOpt: np.ndarray
    PBopt: np.ndarray
    NoIntegrate: np.ndarray
    HandleSelf: np.ndarray
    ScrambleSelf: np.ndarray

    # Predator-prey links
    PreyFrom: np.ndarray
    PreyTo: np.ndarray
    QQ: np.ndarray
    DD: np.ndarray
    VV: np.ndarray
    HandleSwitch: np.ndarray
    PredPredWeight: np.ndarray
    PreyPreyWeight: np.ndarray
    NumPredPreyLinks: int

    # Fishing links
    FishFrom: np.ndarray
    FishThrough: np.ndarray
    FishQ: np.ndarray
    FishTo: np.ndarray
    NumFishingLinks: int

    # Detritus links
    DetFrac: np.ndarray
    DetFrom: np.ndarray
    DetTo: np.ndarray
    NumDetLinks: int

    # Group type information
    # PP_type: 0=consumer, 1=producer, 2=detritus
    PP_type: np.ndarray = None

    # Integration parameters
    BURN_YEARS: int = -1
    COUPLED: int = 1
    RK4_STEPS: int = 4
    SENSE_LIMIT: Tuple[float, float] = (1e-4, 1e4)
    # Control whether monthly M0 algebraic adjustments are performed during the run
    MONTHLY_M0_ADJUST: bool = True

RsimState dataclass

State variables for Ecosim simulation.

Attributes:

Name Type Description
Biomass ndarray

Current biomass values

N ndarray

Numbers (for stanza groups)

Ftime ndarray

Foraging time multiplier

Source code in pypath/core/ecosim.py
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
@dataclass
class RsimState:
    """State variables for Ecosim simulation.

    Attributes
    ----------
    Biomass : np.ndarray
        Current biomass values
    N : np.ndarray
        Numbers (for stanza groups)
    Ftime : np.ndarray
        Foraging time multiplier
    """

    Biomass: np.ndarray
    N: np.ndarray
    Ftime: np.ndarray
    SpawnBio: Optional[np.ndarray] = None
    StanzaPred: Optional[np.ndarray] = None
    EggsStanza: Optional[np.ndarray] = None
    NageS: Optional[np.ndarray] = None
    WageS: Optional[np.ndarray] = None
    QageS: Optional[np.ndarray] = None

RsimForcing dataclass

Forcing matrices for environmental and biological effects.

All matrices are (n_months x n_groups+1) where first column is "Outside".

Attributes:

Name Type Description
ForcedPrey ndarray

Prey availability multiplier

ForcedMort ndarray

Mortality multiplier

ForcedRecs ndarray

Recruitment multiplier

ForcedSearch ndarray

Search rate multiplier

ForcedActresp ndarray

Active respiration multiplier

ForcedMigrate ndarray

Migration rate

ForcedBio ndarray

Forced biomass values (-1 = not forced)

Source code in pypath/core/ecosim.py
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
@dataclass
class RsimForcing:
    """Forcing matrices for environmental and biological effects.

    All matrices are (n_months x n_groups+1) where first column is "Outside".

    Attributes
    ----------
    ForcedPrey : np.ndarray
        Prey availability multiplier
    ForcedMort : np.ndarray
        Mortality multiplier
    ForcedRecs : np.ndarray
        Recruitment multiplier
    ForcedSearch : np.ndarray
        Search rate multiplier
    ForcedActresp : np.ndarray
        Active respiration multiplier
    ForcedMigrate : np.ndarray
        Migration rate
    ForcedBio : np.ndarray
        Forced biomass values (-1 = not forced)
    """

    ForcedPrey: np.ndarray
    ForcedMort: np.ndarray
    ForcedRecs: np.ndarray
    ForcedSearch: np.ndarray
    ForcedActresp: np.ndarray
    ForcedMigrate: np.ndarray
    ForcedBio: np.ndarray
    ForcedEffort: Optional[np.ndarray] = None

RsimFishing dataclass

Fishing forcing matrices.

Attributes:

Name Type Description
ForcedEffort ndarray

Monthly effort multiplier (n_months x n_gears+1)

ForcedFRate ndarray

Annual F rate by species (n_years x n_bio+1)

ForcedCatch ndarray

Annual forced catch by species (n_years x n_bio+1)

Source code in pypath/core/ecosim.py
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
@dataclass
class RsimFishing:
    """Fishing forcing matrices.

    Attributes
    ----------
    ForcedEffort : np.ndarray
        Monthly effort multiplier (n_months x n_gears+1)
    ForcedFRate : np.ndarray
        Annual F rate by species (n_years x n_bio+1)
    ForcedCatch : np.ndarray
        Annual forced catch by species (n_years x n_bio+1)
    """

    ForcedEffort: np.ndarray
    ForcedFRate: np.ndarray
    ForcedCatch: np.ndarray

RsimScenario dataclass

Complete Ecosim simulation scenario.

Attributes:

Name Type Description
params RsimParams

Dynamic simulation parameters

start_state RsimState

Initial state variables

forcing RsimForcing

Environmental forcing matrices

fishing RsimFishing

Fishing forcing matrices

stanzas dict

Multi-stanza parameters (if any)

eco_name str

Ecosystem name

start_year int

First year of simulation

ecospace (EcospaceParams, optional)

Spatial ECOSPACE parameters (if None, runs non-spatial Ecosim)

environmental_drivers (EnvironmentalDrivers, optional)

Time-varying environmental layers for habitat capacity

Source code in pypath/core/ecosim.py
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
@dataclass
class RsimScenario:
    """Complete Ecosim simulation scenario.

    Attributes
    ----------
    params : RsimParams
        Dynamic simulation parameters
    start_state : RsimState
        Initial state variables
    forcing : RsimForcing
        Environmental forcing matrices
    fishing : RsimFishing
        Fishing forcing matrices
    stanzas : dict
        Multi-stanza parameters (if any)
    eco_name : str
        Ecosystem name
    start_year : int
        First year of simulation
    ecospace : EcospaceParams, optional
        Spatial ECOSPACE parameters (if None, runs non-spatial Ecosim)
    environmental_drivers : EnvironmentalDrivers, optional
        Time-varying environmental layers for habitat capacity
    """

    params: RsimParams
    start_state: RsimState
    forcing: RsimForcing
    fishing: RsimFishing
    stanzas: Optional[RsimStanzas] = None
    # Optional stanza biomass time series (filled during run if stanzas present)
    stanza_biomass: Optional[np.ndarray] = None
    eco_name: str = ""
    start_year: int = 1
    ecospace: Optional["EcospaceParams"] = (
        None  # Forward reference to avoid circular import
    )
    environmental_drivers: Optional["EnvironmentalDrivers"] = None

RsimOutput dataclass

Output from Ecosim simulation run.

Attributes:

Name Type Description
out_Biomass ndarray

Monthly biomass values (n_months x n_groups+1)

out_Catch ndarray

Monthly catch values (n_months x n_groups+1)

out_Gear_Catch ndarray

Monthly catch by gear link

annual_Biomass ndarray

Annual biomass (n_years x n_groups+1)

annual_Catch ndarray

Annual catch (n_years x n_groups+1)

annual_QB ndarray

Annual Q/B values

annual_Qlink ndarray

Annual consumption by pred-prey pair

stanza_biomass ndarray or None

Optional monthly stanza-resolved biomass (n_months x n_groups+1)

end_state RsimState

Final state at end of simulation

crash_year int

Year of crash (-1 if no crash)

crashed_groups set

Set of group indices that crashed (biomass < threshold)

pred ndarray

Predator names for Qlink columns

prey ndarray

Prey names for Qlink columns

start_state RsimState

Initial state (copy)

params dict

Summary parameters

Source code in pypath/core/ecosim.py
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
@dataclass
class RsimOutput:
    """Output from Ecosim simulation run.

    Attributes
    ----------
    out_Biomass : np.ndarray
        Monthly biomass values (n_months x n_groups+1)
    out_Catch : np.ndarray
        Monthly catch values (n_months x n_groups+1)
    out_Gear_Catch : np.ndarray
        Monthly catch by gear link
    annual_Biomass : np.ndarray
        Annual biomass (n_years x n_groups+1)
    annual_Catch : np.ndarray
        Annual catch (n_years x n_groups+1)
    annual_QB : np.ndarray
        Annual Q/B values
    annual_Qlink : np.ndarray
        Annual consumption by pred-prey pair
    stanza_biomass : np.ndarray or None
        Optional monthly stanza-resolved biomass (n_months x n_groups+1)
    end_state : RsimState
        Final state at end of simulation
    crash_year : int
        Year of crash (-1 if no crash)
    crashed_groups : set
        Set of group indices that crashed (biomass < threshold)
    pred : np.ndarray
        Predator names for Qlink columns
    prey : np.ndarray
        Prey names for Qlink columns
    start_state : RsimState
        Initial state (copy)
    params : dict
        Summary parameters
    """

    out_Biomass: np.ndarray
    out_Catch: np.ndarray
    out_Gear_Catch: np.ndarray
    annual_Biomass: np.ndarray
    annual_Catch: np.ndarray
    annual_QB: np.ndarray
    annual_Qlink: np.ndarray
    stanza_biomass: Optional[np.ndarray]
    end_state: RsimState
    crash_year: int
    crashed_groups: set
    pred: np.ndarray
    prey: np.ndarray
    Gear_Catch_sp: np.ndarray
    Gear_Catch_gear: np.ndarray
    Gear_Catch_disp: np.ndarray
    start_state: RsimState
    params: dict

rsim_params

rsim_params(rpath: Rpath, mscramble: float = 2.0, mhandle: float = 1000.0, preyswitch: float = 1.0, scrambleselfwt: float = 0.0, handleselfwt: float = 0.0, steps_yr: int = 12, steps_m: int = 1) -> RsimParams

Convert Rpath model to Ecosim simulation parameters.

Parameters:

Name Type Description Default
rpath Rpath

Balanced Ecopath model

required
mscramble float

Base vulnerability parameter (default 2.0 = mixed response)

2.0
mhandle float

Base handling time parameter (default 1000 = off)

1000.0
preyswitch float

Prey switching exponent (default 1.0 = off)

1.0
scrambleselfwt float

Predator overlap weight (0 = individual, 1 = all overlap)

0.0
handleselfwt float

Prey overlap weight (0 = individual, 1 = all overlap)

0.0
steps_yr int

Timesteps per year (default 12 = monthly)

12
steps_m int

Sub-timesteps per month (default 1)

1

Returns:

Type Description
RsimParams

Parameters object for simulation

Source code in pypath/core/ecosim.py
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
def rsim_params(
    rpath: Rpath,
    mscramble: float = 2.0,
    mhandle: float = 1000.0,
    preyswitch: float = 1.0,
    scrambleselfwt: float = 0.0,
    handleselfwt: float = 0.0,
    steps_yr: int = 12,
    steps_m: int = 1,
) -> RsimParams:
    """Convert Rpath model to Ecosim simulation parameters.

    Parameters
    ----------
    rpath : Rpath
        Balanced Ecopath model
    mscramble : float
        Base vulnerability parameter (default 2.0 = mixed response)
    mhandle : float
        Base handling time parameter (default 1000 = off)
    preyswitch : float
        Prey switching exponent (default 1.0 = off)
    scrambleselfwt : float
        Predator overlap weight (0 = individual, 1 = all overlap)
    handleselfwt : float
        Prey overlap weight (0 = individual, 1 = all overlap)
    steps_yr : int
        Timesteps per year (default 12 = monthly)
    steps_m : int
        Sub-timesteps per month (default 1)

    Returns
    -------
    RsimParams
        Parameters object for simulation
    """
    nliving = rpath.NUM_LIVING
    ndead = rpath.NUM_DEAD
    ngear = rpath.NUM_GEARS
    ngroups = rpath.NUM_GROUPS
    nbio = nliving + ndead

    # Species names with "Outside" prepended
    spname = ["Outside"] + list(rpath.Group)
    spnum = np.arange(ngroups + 1)

    # Reference biomass (with leading 1.0 for Outside)
    b_baseref = np.concatenate([[1.0], rpath.Biomass])

    # Other mortality M0 = PB * (1 - EE)
    mzero = np.concatenate([[0.0], rpath.PB * (1.0 - rpath.EE)])

    # Unassimilated fraction
    unassim = np.concatenate([[0.0], rpath.Unassim])

    # Build PP_type array: 0=consumer, 1=producer, 2=detritus
    # This is based on the actual group types from the Rpath model
    pp_type = np.zeros(ngroups + 1, dtype=int)
    for i in range(ngroups):
        grp_type = int(rpath.type[i])
        if grp_type == 0:
            pp_type[i + 1] = 0  # Consumer
        elif grp_type == 1:
            pp_type[i + 1] = 1  # Producer (primary producer)
        else:  # type == 2 (detritus) or type == 3 (fleet)
            pp_type[i + 1] = 2  # Detritus / non-living

    # Active respiration = 1 - P/Q - Unassim (for consumers)
    qb = rpath.QB.copy()
    # Replace invalid QB values (-9999 or negative) with 0 for non-consumers
    qb = np.where((qb < 0) | (qb == -9999) | np.isnan(qb), 0.0, qb)

    pb = rpath.PB
    active_resp = np.zeros(ngroups + 1)
    for i in range(nliving):
        if qb[i] > 0:
            active_resp[i + 1] = max(0, 1.0 - (pb[i] / qb[i]) - rpath.Unassim[i])

    # Foraging time parameters
    ftime_adj = np.zeros(ngroups + 1)
    # For producers (type=1), use PB as the "consumption" rate
    # For consumers (type=0), use QB
    # For detritus (type=2) and fleets (type=3), use 1.0 as default
    ftime_qbopt_values = np.where(
        rpath.type == 1,
        rpath.PB,
        np.where(
            (rpath.type == 0) & (qb > 0),
            qb,
            0.0,  # Default for detritus, fleets, or invalid QB
        ),
    )
    ftime_qbopt = np.concatenate([[1.0], ftime_qbopt_values])
    pbopt = np.concatenate([[1.0], rpath.PB])

    # NoIntegrate flag: 1 indicates groups that should not be integrated (fast-turnover/equilibrium)
    # Previously this used 0/spnum which was inconsistent with downstream checks. Use 1 for NoIntegrate.
    no_integrate = np.where(mzero * b_baseref > 2 * steps_yr * steps_m, 1, 0)

    # Ensure detritus (PP_type == 2) groups are treated as algebraic (NoIntegrate)
    # Rpath commonly treats dead/detritus pools as fast equilibrium; marking them
    # NoIntegrate here makes PyPath behavior match Rpath finite-difference outputs
    try:
        det_idx = np.where(pp_type == 2)[0]
        if det_idx.size > 0:
            no_integrate[det_idx] = 1
    except Exception as e:
        logger.debug("detritus NoIntegrate marking failed: %s", e)

    # Predator-prey handling parameters
    handle_self = np.full(ngroups + 1, handleselfwt)
    scramble_self = np.full(ngroups + 1, scrambleselfwt)

    # Build predator-prey links
    # Primary production links (producers eating "Outside")
    prim_to = []
    prim_from = []
    prim_q = []

    for i in range(nliving):
        if rpath.type[i] > 0 and rpath.type[i] <= 1:  # Producer or mixotroph
            prim_to.append(i + 1)  # +1 for 0-indexing offset
            prim_from.append(0)  # From Outside
            q = rpath.PB[i] * rpath.Biomass[i]
            # Adjust for mixotrophs
            if rpath.type[i] < 1:
                q = q / rpath.GE[i] * rpath.type[i] if rpath.GE[i] > 0 else q
            prim_q.append(q)

    # Predator-prey links from diet matrix
    # NOTE: Only consumers (type=0) can be predators in the diet matrix
    pred_to = []
    pred_from = []
    pred_q = []

    dc = rpath.DC[: nliving + ndead, :nliving].copy()

    # Normalize incomplete diets to sum to 1.0 (excluding import)
    # This ensures proper mass balance at equilibrium
    import_row = (
        rpath.DC[-1, :nliving] if len(rpath.DC) > nliving + ndead else np.zeros(nliving)
    )
    for pred_idx in range(nliving):
        if rpath.type[pred_idx] != 0:  # Skip non-consumers
            continue
        if qb[pred_idx] <= 0 or qb[pred_idx] == -9999 or np.isnan(qb[pred_idx]):
            continue

        # Calculate diet sum (excluding import)
        diet_sum = np.sum(dc[:, pred_idx])
        import_frac = import_row[pred_idx] if pred_idx < len(import_row) else 0
        total_diet = diet_sum + import_frac

        # Normalize if diet is incomplete (sums to less than 1.0)
        # This can happen with incomplete data or data entry errors
        if total_diet > 0 and abs(total_diet - 1.0) > 1e-6:
            # Normalize DC column to make total sum to 1.0
            scale_factor = 1.0 / total_diet
            dc[:, pred_idx] *= scale_factor
            import_row[pred_idx] *= scale_factor
    # Loop over predators first to keep order consistent with reference (predator-major order)
    for pred_idx in range(nliving):
        # Skip non-consumers
        if rpath.type[pred_idx] != 0:
            continue
        # Skip if predator has invalid QB value
        if qb[pred_idx] <= 0 or qb[pred_idx] == -9999 or np.isnan(qb[pred_idx]):
            continue
        for prey_idx in range(nliving + ndead):
            if dc[prey_idx, pred_idx] > 0:
                pred_from.append(prey_idx + 1)
                pred_to.append(pred_idx + 1)
                q = dc[prey_idx, pred_idx] * qb[pred_idx] * rpath.Biomass[pred_idx]
                # Ensure Q is non-negative
                if q > 0:
                    pred_q.append(q)
                else:
                    # Remove the last appended pred_from and pred_to
                    pred_from.pop()
                    pred_to.pop()

    # Handle import (last row of DC = nrow)
    # Import links: prey from Outside (index 0)
    # Note: import_row was already normalized above
    for pred_idx in range(nliving):
        # Skip if this "predator" is not a consumer (type=0)
        if rpath.type[pred_idx] != 0:
            continue
        # Skip if predator has invalid QB value
        if qb[pred_idx] <= 0 or qb[pred_idx] == -9999 or np.isnan(qb[pred_idx]):
            continue
        if import_row[pred_idx] > 0:
            pred_from.append(0)  # From Outside
            pred_to.append(pred_idx + 1)
            q = import_row[pred_idx] * qb[pred_idx] * rpath.Biomass[pred_idx]
            if q > 0:
                pred_q.append(q)
            else:
                pred_from.pop()
                pred_to.pop()

    # Combine links
    prey_from = np.array([0] + prim_from + pred_from)
    prey_to = np.array([0] + prim_to + pred_to)
    qq = np.array([0.0] + prim_q + pred_q)

    numpredprey = len(qq) - 1

    # Vulnerability and handling parameters
    dd = np.full(len(qq), mhandle)
    vv = np.full(len(qq), mscramble)
    handle_switch = np.full(len(qq), preyswitch)
    handle_switch[0] = 0

    # Calculate predator and prey weights for scramble
    btmp = b_baseref

    # Safe division for VV calculation
    aa = np.zeros(len(qq))

    for i in range(1, len(qq)):
        prey_b = btmp[prey_from[i]]
        pred_b = btmp[prey_to[i]]
        if prey_b > 0 and pred_b > 0:
            numerator = 2.0 * qq[i] * vv[i]
            denominator = vv[i] * pred_b * prey_b - qq[i] * pred_b
            if abs(denominator) > EPSILON:
                aa[i] = numerator / denominator

    pred_pred_weight = aa * btmp[prey_to]
    prey_prey_weight = aa * btmp[prey_from]

    # Normalize weights
    pred_tot_weight = np.zeros(ngroups + 1)
    prey_tot_weight = np.zeros(ngroups + 1)

    for i in range(1, len(qq)):
        pred_tot_weight[prey_from[i]] += pred_pred_weight[i]
        prey_tot_weight[prey_to[i]] += prey_prey_weight[i]

    for i in range(1, len(qq)):
        if pred_tot_weight[prey_from[i]] > 0:
            pred_pred_weight[i] /= pred_tot_weight[prey_from[i]]
        if prey_tot_weight[prey_to[i]] > 0:
            prey_prey_weight[i] /= prey_tot_weight[prey_to[i]]

    # Build fishing links
    fish_from = [0]
    fish_through = [0]
    fish_q = [0.0]
    fish_to = [0]

    for gear_idx in range(ngear):
        for grp_idx in range(ngroups):
            landing = rpath.Landings[grp_idx, gear_idx]
            if landing > 0 and b_baseref[grp_idx + 1] > 0:
                fish_from.append(grp_idx + 1)
                fish_through.append(nliving + ndead + gear_idx + 1)
                fish_q.append(landing / b_baseref[grp_idx + 1])
                fish_to.append(0)  # Landings go Outside

            discard = rpath.Discards[grp_idx, gear_idx]
            if discard > 0 and b_baseref[grp_idx + 1] > 0:
                # Discards go to detritus based on fate
                for det_idx in range(ndead):
                    det_frac = (
                        rpath.DetFate[nliving + ndead + gear_idx, det_idx]
                        if nliving + ndead + gear_idx < len(rpath.DetFate)
                        else 1.0 / ndead
                    )
                    if det_frac > 0:
                        fish_from.append(grp_idx + 1)
                        fish_through.append(nliving + ndead + gear_idx + 1)
                        fish_q.append(discard * det_frac / b_baseref[grp_idx + 1])
                        # Use dead global indices to avoid arithmetic/indexing ambiguity
                        fish_to.append(nliving + det_idx + 1)

    fish_from = np.array(fish_from)
    fish_through = np.array(fish_through)
    fish_q = np.array(fish_q)
    fish_to = np.array(fish_to)

    # Build detritus links
    det_from = [0]
    det_to = [0]
    det_frac_list = [0.0]

    # DEBUG: print per-detritus source groups from rpath.DetFate to help trace missing links
    for det_idx in range(ndead):
        sources = [
            (i, rpath.Group[i]) for i in range(ngroups) if rpath.DetFate[i, det_idx] > 0
        ]
        # Use nliving + det_idx as the detritus global group index (no +1)
        logger.debug(
            "DetFate det_idx=%s dest_global=%s sources=%s",
            det_idx,
            nliving + det_idx,
            sources,
        )

    # FIX: Use ngroups instead of nliving+ndead to include gear rows (22-24) that contribute to detritus
    for grp_idx in range(ngroups):
        for det_idx in range(ndead):
            frac = rpath.DetFate[grp_idx, det_idx]
            if frac > 0:
                det_from.append(grp_idx + 1)
                det_to.append(nliving + det_idx + 1)
                det_frac_list.append(frac)
                # DEBUG: show each det link created
                logger.debug(
                    "DETLINK: grp_idx=%s det_idx=%s frac=%s det_to=%s",
                    grp_idx,
                    det_idx,
                    frac,
                    nliving + det_idx + 1,
                )
        # Flow to outside (1 - sum of det fate)
        det_out = 1.0 - np.sum(rpath.DetFate[grp_idx, :])
        if det_out > 0:
            det_from.append(grp_idx + 1)
            det_to.append(0)
            det_frac_list.append(det_out)

    det_from = np.array(det_from)
    det_to = np.array(det_to)
    det_frac = np.array(det_frac_list)

    # DEBUG: detect detritus columns with no DetFate sources and report mapping
    try:
        # Sum DetFate over ALL source groups including gears
        col_sums = np.sum(rpath.DetFate[:, :], axis=0)
        zero_cols = np.where(col_sums == 0)[0]
        if len(zero_cols) > 0:
            det_names = [rpath.Group[nliving + zc] for zc in zero_cols]
            logger.debug(
                "DetFate columns with zero source fractions: cols=%s, detritus names=%s",
                zero_cols,
                det_names,
            )
        # Also report which detritus columns appear in det_to mapping
        det_to_cols = np.unique(
            (det_to[(det_to > nliving) & (det_to <= nliving + ndead)] - nliving - 1)
        )
        logger.debug("DetTo mapped detritus columns (indices): %s", det_to_cols)
        logger.debug("Unique DetTo values: %s", np.unique(det_to))
    except Exception as e:
        logger.debug("DetFate diagnostic logging failed: %s", e)

    return RsimParams(
        NUM_GROUPS=ngroups,
        NUM_LIVING=nliving,
        NUM_DEAD=ndead,
        NUM_GEARS=ngear,
        NUM_BIO=nbio,
        spname=spname,
        spnum=spnum,
        B_BaseRef=b_baseref,
        MzeroMort=mzero,
        UnassimRespFrac=unassim,
        ActiveRespFrac=active_resp,
        FtimeAdj=ftime_adj,
        FtimeQBOpt=ftime_qbopt,
        PBopt=pbopt,
        NoIntegrate=no_integrate,
        HandleSelf=handle_self,
        ScrambleSelf=scramble_self,
        PreyFrom=prey_from,
        PreyTo=prey_to,
        QQ=qq,
        DD=dd,
        VV=vv,
        HandleSwitch=handle_switch,
        PredPredWeight=pred_pred_weight,
        PreyPreyWeight=prey_prey_weight,
        NumPredPreyLinks=numpredprey,
        FishFrom=fish_from,
        FishThrough=fish_through,
        FishQ=fish_q,
        FishTo=fish_to,
        NumFishingLinks=len(fish_from) - 1,
        DetFrac=det_frac,
        DetFrom=det_from,
        DetTo=det_to,
        NumDetLinks=len(det_from) - 1,
        PP_type=pp_type,
    )

rsim_state

rsim_state(params: RsimParams) -> RsimState

Create initial state vectors for simulation.

Parameters:

Name Type Description Default
params RsimParams

Simulation parameters

required

Returns:

Type Description
RsimState

Initial state with biomass, N, and Ftime

Source code in pypath/core/ecosim.py
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
def rsim_state(params: RsimParams) -> RsimState:
    """Create initial state vectors for simulation.

    Parameters
    ----------
    params : RsimParams
        Simulation parameters

    Returns
    -------
    RsimState
        Initial state with biomass, N, and Ftime
    """
    return RsimState(
        Biomass=params.B_BaseRef.copy(),
        N=np.zeros(params.NUM_GROUPS + 1),
        Ftime=np.ones(params.NUM_GROUPS + 1),
    )

rsim_forcing

rsim_forcing(params: RsimParams, years: range) -> RsimForcing

Create forcing matrices with default values.

Parameters:

Name Type Description Default
params RsimParams

Simulation parameters

required
years range

Years of simulation

required

Returns:

Type Description
RsimForcing

Forcing matrices initialized to default values

Source code in pypath/core/ecosim.py
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
def rsim_forcing(params: RsimParams, years: range) -> RsimForcing:
    """Create forcing matrices with default values.

    Parameters
    ----------
    params : RsimParams
        Simulation parameters
    years : range
        Years of simulation

    Returns
    -------
    RsimForcing
        Forcing matrices initialized to default values
    """
    nyrs = len(years)
    n_months = nyrs * 12
    n_groups = params.NUM_GROUPS + 1

    # Default forcing = 1.0 (no change)
    ones = np.ones((n_months, n_groups))

    return RsimForcing(
        ForcedPrey=ones.copy(),
        ForcedMort=ones.copy(),
        ForcedRecs=ones.copy(),
        ForcedSearch=ones.copy(),
        ForcedActresp=ones.copy(),
        ForcedMigrate=np.zeros((n_months, n_groups)),
        ForcedBio=np.full((n_months, n_groups), -1.0),  # -1 = not forced
        ForcedEffort=ones.copy(),
    )

rsim_fishing

rsim_fishing(params: RsimParams, years: range) -> RsimFishing

Create fishing matrices with default values.

Parameters:

Name Type Description Default
params RsimParams

Simulation parameters

required
years range

Years of simulation

required

Returns:

Type Description
RsimFishing

Fishing matrices initialized to default values

Source code in pypath/core/ecosim.py
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
def rsim_fishing(params: RsimParams, years: range) -> RsimFishing:
    """Create fishing matrices with default values.

    Parameters
    ----------
    params : RsimParams
        Simulation parameters
    years : range
        Years of simulation

    Returns
    -------
    RsimFishing
        Fishing matrices initialized to default values
    """
    nyrs = len(years)
    n_months = nyrs * 12

    # Effort matrix (monthly, for gears)
    effort = np.ones((n_months, params.NUM_GEARS + 1))

    # F rate and Catch matrices (annual, for biomass groups)
    frate = np.zeros((nyrs, params.NUM_BIO + 1))
    catch = np.zeros((nyrs, params.NUM_BIO + 1))

    return RsimFishing(
        ForcedEffort=effort,
        ForcedFRate=frate,
        ForcedCatch=catch,
    )

rsim_scenario

rsim_scenario(rpath: Rpath, rpath_params: RpathParams, years: range = range(1, 101), vulnerability: float = 2.0) -> RsimScenario

Create a complete Ecosim scenario.

Parameters:

Name Type Description Default
rpath Rpath

Balanced Ecopath model

required
rpath_params RpathParams

Original model parameters

required
years range

Years to simulate

range(1, 101)
vulnerability float

Base vulnerability parameter (default 2.0 = mixed response) Controls predator-prey functional response: - 1.0 = donor control (top-down) - 2.0 = mixed control - Higher values = more bottom-up control

2.0

Returns:

Type Description
RsimScenario

Complete scenario ready for simulation

Source code in pypath/core/ecosim.py
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
def rsim_scenario(
    rpath: Rpath,
    rpath_params: RpathParams,
    years: range = range(1, 101),
    vulnerability: float = 2.0,
) -> RsimScenario:
    """Create a complete Ecosim scenario.

    Parameters
    ----------
    rpath : Rpath
        Balanced Ecopath model
    rpath_params : RpathParams
        Original model parameters
    years : range
        Years to simulate
    vulnerability : float, optional
        Base vulnerability parameter (default 2.0 = mixed response)
        Controls predator-prey functional response:
        - 1.0 = donor control (top-down)
        - 2.0 = mixed control
        - Higher values = more bottom-up control

    Returns
    -------
    RsimScenario
        Complete scenario ready for simulation
    """
    if len(years) < 2:
        raise ValueError("Years must be a range of at least 2 years")

    params = rsim_params(rpath, mscramble=vulnerability)
    # Preserve optional instrumentation and debug controls set on the
    # original RpathParams object by copying them onto the generated
    # rsim params object. This allows callers/tests to attach
    # 'INSTRUMENT_GROUPS' or 'instrument_callback' to the rpath_params
    # and have them available during simulation without changing
    # existing callsites.
    try:
        for _attr in (
            "INSTRUMENT_GROUPS",
            "VERBOSE_DEBUG",
            "instrument_callback",
            "spname",
            "INSTRUMENT_ASSUME_1BASED",
            "model",
        ):
            if hasattr(rpath_params, _attr):
                setattr(params, _attr, getattr(rpath_params, _attr))
    except Exception as e:
        logger.debug("attribute transfer from rpath_params failed: %s", e)

    state = rsim_state(params)
    forcing = rsim_forcing(params, years)
    fishing = rsim_fishing(params, years)

    # Stanza handling: initialize if rpath_params contains stanza definitions
    stanzas = None
    try:
        if (
            getattr(rpath_params, "stanzas", None) is not None
            and rpath_params.stanzas.n_stanza_groups > 0
        ):
            # Compute rpath stanza diagnostics (biomass/Q distribution)
            rpath_stanzas(rpath_params)
            # Initialize Rsim-compatible stanza parameters
            stanzas = rsim_stanzas(rpath_params, state, params)
    except Exception as e:
        # If stanza initialization fails, continue without stanzas but log via debug
        logger.debug("stanza initialization failed: %s", e, exc_info=True)
        stanzas = None

    return RsimScenario(
        params=params,
        start_state=state,
        forcing=forcing,
        fishing=fishing,
        stanzas=stanzas,
        eco_name=rpath.eco_name,
        start_year=years[0],
    )

rsim_run

rsim_run(scenario: RsimScenario, method: str = 'RK4', years: Optional[range] = None) -> RsimOutput

Run Ecosim simulation.

Parameters:

Name Type Description Default
scenario RsimScenario

Simulation scenario

required
method str

Integration method: 'RK4' (Runge-Kutta 4) or 'AB' (Adams-Bashforth)

'RK4'
years range

Years to run (default: all years in scenario)

None

Returns:

Type Description
RsimOutput

Simulation results

Source code in pypath/core/ecosim.py
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
def rsim_run(
    scenario: RsimScenario,
    method: str = "RK4",
    years: Optional[range] = None,
) -> RsimOutput:
    """Run Ecosim simulation.

    Parameters
    ----------
    scenario : RsimScenario
        Simulation scenario
    method : str
        Integration method: 'RK4' (Runge-Kutta 4) or 'AB' (Adams-Bashforth)
    years : range, optional
        Years to run (default: all years in scenario)

    Returns
    -------
    RsimOutput
        Simulation results
    """
    from pypath.core.ecosim_deriv import integrate_ab, integrate_rk4

    params = scenario.params
    forcing = scenario.forcing
    fishing_obj = scenario.fishing
    # Build a normalized dict view of fishing to pass to derivative/integrator
    fishing_dict = _normalize_fishing_input(fishing_obj, params.NUM_GROUPS + 1)

    # Fishing link arrays (FishFrom/FishThrough/FishQ) live on params, not
    # on the fishing object.  Ensure they are present in fishing_dict so
    # deriv_vector can compute per-link fishing mortality correctly.
    if not fishing_dict.get("FishFrom", []):
        fishing_dict["FishFrom"] = getattr(params, "FishFrom", np.array([0]))
        fishing_dict["FishThrough"] = getattr(params, "FishThrough", np.array([0]))
        fishing_dict["FishQ"] = getattr(params, "FishQ", np.array([0.0]))

    # Determine years to run
    if years is None:
        n_months = forcing.ForcedBio.shape[0]
        n_years = n_months // 12
    else:
        n_years = len(years)
        n_months = n_years * 12

    n_groups = params.NUM_GROUPS + 1

    # Initialize output arrays
    out_biomass = np.zeros((n_months + 1, n_groups))
    out_catch = np.zeros((n_months + 1, n_groups))
    out_gear_catch = np.zeros((n_months + 1, params.NumFishingLinks + 1))

    # Optional stanza biomass time series
    stanza_biomass = (
        np.zeros((n_months + 1, n_groups))
        if scenario.stanzas is not None and scenario.stanzas.n_split > 0
        else None
    )

    # Initialize state
    state = scenario.start_state.Biomass.copy()
    out_biomass[0] = state

    # If stanzas present, compute initial stanza biomass snapshot
    if stanza_biomass is not None:
        for isp in range(1, scenario.stanzas.n_split + 1):
            nst = scenario.stanzas.n_stanzas[isp]
            for ist in range(1, nst + 1):
                ieco = int(scenario.stanzas.ecopath_code[isp, ist])
                first = int(scenario.stanzas.age1[isp, ist])
                last = int(scenario.stanzas.age2[isp, ist])
                # Sum biomass across ages for this stanza
                bio = np.nansum(
                    scenario.stanzas.base_nage_s[first : last + 1, isp]
                    * scenario.stanzas.base_wage_s[first : last + 1, isp]
                )
                if ieco >= 0 and ieco < n_groups:
                    stanza_biomass[0, ieco] += bio

    # Build params dict for derivative and matrix computations
    params_dict = {
        "NUM_GROUPS": params.NUM_GROUPS,
        "NUM_LIVING": params.NUM_LIVING,
        "NUM_DEAD": params.NUM_DEAD,
        "NUM_GEARS": params.NUM_GEARS,
        "PB": params.PBopt,
        "QB": params.FtimeQBOpt,
        "M0": params.MzeroMort,
        "Unassim": params.UnassimRespFrac,
        "ActiveLink": _build_active_link_matrix(params),
        "VV": _build_link_matrix(params, params.VV),
        "DD": _build_link_matrix(params, params.DD),
        "QQbase": _build_link_matrix(params, params.QQ),
        "Bbase": params.B_BaseRef,
        "PP_type": params.PP_type,
        "NoIntegrate": params.NoIntegrate,
        # Include fish discard mappings so deriv_vector sees them when params is a dict
        "FishFrom": getattr(params, "FishFrom", np.array([])),
        "FishTo": getattr(params, "FishTo", np.array([])),
        "FishQ": getattr(params, "FishQ", np.array([])),
    }

    # Pre-compute sparse link arrays for the consumption kernel so that
    # deriv_vector can skip inactive prey-predator pairs entirely.
    from pypath.core.link_array import ActiveLinkArray

    _links = ActiveLinkArray.from_bool_matrix(params_dict["ActiveLink"])
    params_dict["_link_prey"] = _links.prey
    params_dict["_link_pred"] = _links.pred

    # Propagate IBM groups if any functional groups are IBM-managed.
    if hasattr(params, "ibm_groups"):
        params_dict["ibm_groups"] = params.ibm_groups

    # Propagate optional debugging/instrumentation control flags from the
    # RpathParams object to the params dict so deriv_vector can use them
    # when running in dict-mode. This is useful for targeted runtime
    # instrumentation without changing function signatures.
    try:
        if hasattr(params, "INSTRUMENT_GROUPS"):
            params_dict["INSTRUMENT_GROUPS"] = getattr(params, "INSTRUMENT_GROUPS")
            try:
                logger.debug(
                    "DEBUG-INSTR COPY: params.INSTRUMENT_GROUPS attr=%r type=%s",
                    getattr(params, "INSTRUMENT_GROUPS", None),
                    type(getattr(params, "INSTRUMENT_GROUPS", None)),
                )
            except Exception as e:
                logger.debug("instrumentation debug logging failed: %s", e)

        # Normalize instrument group names (strings) to 0-based indices using
        # the scenario's spname mapping so instrumentation payloads use the same
        # numeric indexing expected by unit tests.
        try:
            ig = params_dict.get("INSTRUMENT_GROUPS", None)
            spname = params_dict.get("spname", None)
            if ig is not None and spname is not None and isinstance(ig, (list, tuple)):
                normalized = []
                for g in ig:
                    if isinstance(g, str):
                        # Prefer mapping against original model 'Group' order when available
                        try:
                            model_df = getattr(params, "model", None)
                            if (
                                model_df is not None
                                and "Group" in model_df.columns
                                and g in model_df["Group"].values
                            ):
                                normalized.append(int(list(model_df["Group"]).index(g)))
                                continue
                        except Exception as e:
                            logger.debug("instrument group normalization failed: %s", e)
                        # Fallback: use spname mapping (spname includes leading 'Outside')
                        if g in spname:
                            sidx = spname.index(g)
                            if sidx > 0:  # convert to 0-based group index
                                normalized.append(sidx - 1)
                    else:
                        try:
                            normalized.append(int(g))
                        except Exception as e:
                            logger.debug("instrument group normalization failed: %s", e)
                if normalized:
                    normalized_sorted = sorted(set(normalized))
                    params_dict["INSTRUMENT_GROUPS"] = normalized_sorted
                    # Also write back normalization to the params attribute
                    try:
                        setattr(params, "INSTRUMENT_GROUPS", normalized_sorted)
                    except Exception as e:
                        logger.debug("instrument group normalization failed: %s", e)
                        try:
                            params["INSTRUMENT_GROUPS"] = normalized_sorted
                        except Exception as e:
                            logger.debug("instrument group normalization failed: %s", e)
                    try:
                        logger.debug(
                            "DEBUG-INSTR-MAP: ig_raw=%s normalized=%s model_present=%s",
                            ig,
                            normalized_sorted,
                            model_df is not None,
                        )
                    except Exception as e:
                        logger.debug("instrumentation debug logging failed: %s", e)
        except Exception as e:
            logger.debug("instrument group normalization failed: %s", e)
        if hasattr(params, "VERBOSE_DEBUG"):
            params_dict["VERBOSE_DEBUG"] = getattr(params, "VERBOSE_DEBUG")
        # Also include the species name mapping so deriv_vector can resolve
        # names (e.g., 'Seabirds') into indices for instrumentation.
        if hasattr(params, "spname"):
            params_dict["spname"] = getattr(params, "spname")
        # Also propagate the original model DataFrame when available so
        # deriv_vector can prefer the model-defined group ordering for
        # name->index mapping during instrumentation resolution.
        if hasattr(params, "model"):
            try:
                params_dict["model"] = getattr(params, "model")
            except Exception as e:
                logger.debug("model propagation to params_dict failed: %s", e)
        # Propagate an optional instrumentation callback function (callable)
        # so integration routines can report compact instrumentation data to
        # the outside world without changing function signatures.
        if hasattr(params, "MONTHLY_M0_ADJUST"):
            params_dict["MONTHLY_M0_ADJUST"] = getattr(params, "MONTHLY_M0_ADJUST")

        # callers/tests without relying on global I/O.
        if hasattr(params, "instrument_callback"):
            orig_cb = getattr(params, "instrument_callback")

            # Wrap the user-provided callback to ensure the first instrumentation
            # callback uses the caller-specified attribute-based INSTRUMENT_GROUPS
            # mapping when available. This avoids race conditions where RK4
            # warmup payloads may resolve names differently (spname vs model).
            def _wrapped_cb(payload):
                try:
                    if not getattr(_wrapped_cb, "first_called", False):
                        _wrapped_cb.first_called = True
                        attr_ig = getattr(params, "INSTRUMENT_GROUPS", None)
                        if isinstance(attr_ig, (list, tuple)) and payload.get(
                            "method"
                        ) in ("AB", "RK4"):
                            resolved = []
                            model_df = getattr(params, "model", None)
                            spname = getattr(params, "spname", None)
                            for g in attr_ig:
                                if isinstance(g, str):
                                    try:
                                        if (
                                            model_df is not None
                                            and "Group" in model_df.columns
                                            and g in model_df["Group"].values
                                        ):
                                            resolved.append(
                                                int(list(model_df["Group"]).index(g))
                                            )
                                            continue
                                    except Exception as e:
                                        logger.debug(
                                            "callback group resolution failed: %s", e
                                        )
                                    if spname is not None and g in spname:
                                        sidx = spname.index(g)
                                        if sidx > 0:
                                            resolved.append(sidx - 1)
                                else:
                                    try:
                                        resolved.append(int(g))
                                    except Exception as e:
                                        logger.debug(
                                            "callback group resolution failed: %s", e
                                        )
                            if resolved:
                                try:
                                    payload["groups"] = resolved
                                except Exception as e:
                                    logger.debug(
                                        "callback group resolution failed: %s", e
                                    )
                except Exception as e:
                    logger.debug("callback group resolution failed: %s", e)
                return orig_cb(payload)

            params_dict["instrument_callback"] = _wrapped_cb
        # Additional debug visibility for tests: print presence of callback
        try:
            logger.debug(
                "DEBUG-RUN: params hasattr instrument_callback=%s params_dict_has_cb=%s",
                hasattr(params, "instrument_callback"),
                "instrument_callback" in params_dict,
            )
            try:
                logger.debug(
                    "DEBUG-RUN: params.INSTRUMENT_GROUPS (attr)=%s params_dict['INSTRUMENT_GROUPS']=%s",
                    getattr(params, "INSTRUMENT_GROUPS", None),
                    params_dict.get("INSTRUMENT_GROUPS", None),
                )
            except Exception as e:
                logger.debug("callback group resolution failed: %s", e)
        except Exception as e:
            logger.debug("callback group resolution failed: %s", e)
    except Exception as e:
        logger.debug("callback group resolution failed: %s", e)

    # Debug: always log what's in params_dict for INSTRUMENT_GROUPS to catch
    # cases where it might be present due to other code paths or defaults.
    try:
        logger.debug(
            "DEBUG-INSTR-PARAMSDICT initial INSTRUMENT_GROUPS = %r",
            params_dict.get("INSTRUMENT_GROUPS", None),
        )
    except Exception as e:
        logger.debug("callback group resolution failed: %s", e)

    # Migration helper: accept legacy numeric 1-based INSTRUMENT_GROUPS on the
    # params object or dict. Convert to 0-based indices and emit a
    # DeprecationWarning so callers can update. This ensures integrator code
    # always sees a normalized numeric list.
    try:
        ig = params_dict.get("INSTRUMENT_GROUPS", None)
        # Explicit opt-in: only auto-convert numeric legacy 1-based groups when the
        # caller explicitly requests it via INSTRUMENT_ASSUME_1BASED.
        assume_flag = params_dict.get("INSTRUMENT_ASSUME_1BASED", False) or getattr(
            params, "INSTRUMENT_ASSUME_1BASED", False
        )
        if ig is not None:
            # check for numeric-only lists/tuples
            if isinstance(ig, (list, tuple)) and all(
                isinstance(x, (int, float, np.integer)) for x in ig
            ):
                nums = [int(x) for x in ig]
                if (
                    assume_flag
                    and nums
                    and all(1 <= v <= params.NUM_GROUPS for v in nums)
                    and min(nums) >= 1
                ):
                    import warnings as _warnings

                    _warnings.warn(
                        "Numeric INSTRUMENT_GROUPS indices are expected to be 0-based. "
                        "Detected probable 1-based indices — converting to 0-based for now. "
                        "Please update your code to use 0-based indices.",
                        DeprecationWarning,
                        stacklevel=3,
                    )
                    normalized = [v - 1 for v in nums]
                    params_dict["INSTRUMENT_GROUPS"] = normalized
                    # Also write back to params object if it has the attribute
                    try:
                        if hasattr(params, "INSTRUMENT_GROUPS"):
                            setattr(params, "INSTRUMENT_GROUPS", normalized)
                    except Exception as e:
                        logger.debug("verbose instrumentation config failed: %s", e)
                    try:
                        if params_dict.get("VERBOSE_INSTRUMENTATION"):
                            logger.debug(
                                "DEBUG-INSTR-MIGRATE: converted numeric 1-based %s -> %s",
                                nums,
                                normalized,
                            )
                    except Exception as e:
                        logger.debug("verbose instrumentation config failed: %s", e)
    except Exception as e:
        logger.debug("verbose instrumentation config failed: %s", e)

    # Provide a module-level fallback for instrumentation callback resolution.
    # Some callsites attach the callback as an attribute on the params object;
    # ensure integrator code can still find it by exporting it to the
    # ecosim_deriv module as `_last_instrument_callback`.
    try:
        import pypath.core.ecosim_deriv as _ed

        if hasattr(params, "instrument_callback"):
            _ed._last_instrument_callback = getattr(params, "instrument_callback")
            try:
                logger.debug("exported instrument_callback to ecosim_deriv")
            except Exception as e:
                logger.debug("verbose instrumentation config failed: %s", e)
            try:
                logger.debug(
                    "DEBUG-RUN: exported _last_instrument_callback=%s",
                    _ed._last_instrument_callback,
                )
            except Exception as e:
                logger.debug("verbose instrumentation config failed: %s", e)
        # Export original INSTRUMENT_GROUPS attribute (if present) so integrator
        # can consult the canonical caller-specified groups in legacy callsites
        # where the list was attached as an attribute on the params object.
        try:
            # Ensure attribute-based INSTRUMENT_GROUPS is normalized to numeric 0-based
            attr_ig = getattr(params, "INSTRUMENT_GROUPS", None)
            if isinstance(attr_ig, (list, tuple)) and attr_ig:
                # Map string names to model-based indices if available
                try:
                    model_df = getattr(params, "model", None)
                    normalized_attr = []
                    for g in attr_ig:
                        if (
                            isinstance(g, str)
                            and model_df is not None
                            and "Group" in model_df.columns
                            and g in model_df["Group"].values
                        ):
                            normalized_attr.append(
                                int(list(model_df["Group"]).index(g))
                            )
                        elif (
                            isinstance(g, str)
                            and getattr(params, "spname", None) is not None
                            and g in params.spname
                        ):
                            sidx = params.spname.index(g)
                            if sidx > 0:
                                normalized_attr.append(sidx - 1)
                        else:
                            try:
                                normalized_attr.append(int(g))
                            except Exception as e:
                                logger.debug(
                                    "verbose instrumentation config failed: %s", e
                                )
                    if normalized_attr:
                        try:
                            setattr(
                                params,
                                "INSTRUMENT_GROUPS",
                                sorted(set(normalized_attr)),
                            )
                            attr_ig = getattr(params, "INSTRUMENT_GROUPS")
                        except Exception as e:
                            logger.debug("verbose instrumentation config failed: %s", e)
                except Exception as e:
                    logger.debug("verbose instrumentation config failed: %s", e)
            _ed._last_instrument_groups = getattr(params, "INSTRUMENT_GROUPS", None)
            # Also export a module-level fallback variable so deriv_vector can
            # pick up attribute-based instrumentation when only the params
            # object had the attribute set (legacy code paths).
            try:
                globals()["_last_instrument_groups"] = _ed._last_instrument_groups
            except Exception as e:
                logger.debug("verbose instrumentation config failed: %s", e)
            if _ed._last_instrument_groups is not None:
                if params_dict.get("VERBOSE_INSTRUMENTATION"):
                    logger.debug(
                        "exported _last_instrument_groups=%s",
                        _ed._last_instrument_groups,
                    )
        except Exception as e:
            logger.debug("verbose instrumentation config failed: %s", e)
    except Exception as e:
        logger.debug("verbose instrumentation config failed: %s", e)

    # Debug: log summary of NoIntegrate array for verification
    try:
        noint = np.asarray(params_dict.get("NoIntegrate"))
        logger.debug("NoIntegrate array length=%d sample=%s", len(noint), noint[:10])
        logger.debug("NoIntegrate true count=%d", int(np.sum(noint != 0)))
    except Exception as e:
        logger.debug("verbose instrumentation config failed: %s", e)

    # Enforce exact equilibrium at initialization for tiny residual derivatives
    # This prevents tiny numerical residuals at t=0 from accumulating over long
    # simulations (observed in Seabirds tests).
    INIT_DERIV_EPS = 1e-8
    # Build forcing for month 0 (first month) similar to loop below
    forcing0 = {
        "Ftime": scenario.start_state.Ftime.copy(),
        "ForcedBio": np.where(forcing.ForcedBio[0] > 0, forcing.ForcedBio[0], 0),
        "ForcedMigrate": forcing.ForcedMigrate[0],
        "ForcedEffort": (
            fishing_obj.ForcedEffort[0]
            if 0 < len(fishing_obj.ForcedEffort)
            else np.ones(params.NUM_GEARS + 1)
        ),
    }
    # Debugging: log forcing0 summary to compare with test precomputed forcing
    try:
        logger.debug(
            "forcing0 sample ForcedEffort[:4]=%s ForcedBio[:4]=%s",
            forcing0["ForcedEffort"][:4],
            forcing0["ForcedBio"][:4],
        )
    except Exception as e:
        logger.debug("forcing0 debug logging failed: %s", e)

    try:
        # Compute initial derivative without applying NoIntegrate masking so we
        # capture the true algebraic residuals used to compute small M0 nudges.
        params_no_noint = params_dict.copy()
        params_no_noint["NoIntegrate"] = np.zeros(n_groups, dtype=int)
        # Use the actual fishing_dict (with FishFrom/FishQ from params) so
        # the init-deriv computation includes fishing mortality.  Previously
        # this used a zero-fishing dict which caused M0 to be nudged to
        # compensate for "missing" fishing, double-counting it once the fix
        # that populates fishing_dict from params was applied.
        # If Seabirds exists, request trace debug in deriv_vector to expose components
        # Set TRACE unconditionally (no silent failure) so logs are consistent
        if hasattr(params, "spname") and "Seabirds" in params.spname:
            sidx = params.spname.index("Seabirds")
            params_no_noint["TRACE_DEBUG_GROUPS"] = [sidx]
            # Provide species names list to deriv_vector for trace printing
            params_no_noint["spname"] = params.spname
            logger.debug("requesting TRACE_DEBUG_GROUPS for seabirds idx=%d", sidx)
        # Threshold for considering small initial derivatives
        ADJUST_DERIV_MAX = 1e-3
        logger.debug(
            "computing init_deriv with fishing_dict FishFrom length=%d",
            len(fishing_dict.get("FishFrom", [])),
        )
        init_deriv = deriv_vector(state.copy(), params_no_noint, forcing0, fishing_dict)
        # Also compute a test-style derivative with TRACE to compare
        params_test = params_no_noint.copy()
        try:
            params_test["TRACE_DEBUG_GROUPS"] = params_no_noint.get(
                "TRACE_DEBUG_GROUPS", None
            )
            deriv_test = deriv_vector(state.copy(), params_test, forcing0, fishing_dict)
            try:
                if "Seabirds" in params.spname:
                    sidx = params.spname.index("Seabirds")
                    logger.debug(
                        "post-deriv debug: init_deriv[seab]=%.6e deriv_test[seab]=%.6e",
                        init_deriv[sidx],
                        deriv_test[sidx],
                    )
            except Exception as e:
                logger.debug("derivative comparison debug failed: %s", e)
        except Exception as e:
            logger.debug("derivative comparison debug failed: %s", e)
            deriv_test = None
        init_mask = np.abs(init_deriv) < INIT_DERIV_EPS
        # Don't include the outside cell (index 0) in masking
        init_mask[0] = False
        # Debug: report summary of initial derivatives
        logger.debug(
            "init_deriv min=%.6e max=%.6e",
            float(np.nanmin(init_deriv)),
            float(np.nanmax(init_deriv)),
        )
        try:
            logger.debug("init_deriv sample[:10]=%s", init_deriv[:10])
        except Exception as e:
            logger.debug("init_deriv debug logging failed: %s", e)
        # If Seabirds exists, report its index/value
        try:
            if "Seabirds" in params.spname:
                sidx = params.spname.index("Seabirds")
                logger.debug(
                    "Seabirds index=%d init_deriv=%.6e (no NoIntegrate applied)",
                    sidx,
                    init_deriv[sidx],
                )
        except Exception as e:
            logger.debug("init_deriv debug logging failed: %s", e)

        # Build a test-style params dict (like tests do) and compute its derivative
        try:
            params_test = {
                "NUM_GROUPS": params.NUM_GROUPS,
                "NUM_LIVING": params.NUM_LIVING,
                "NUM_DEAD": params.NUM_DEAD,
                "NUM_GEARS": params.NUM_GEARS,
                "PB": params.PBopt,
                "QB": params.FtimeQBOpt,
                "M0": params.MzeroMort.copy(),
                "Unassim": params.UnassimRespFrac,
                "ActiveLink": _build_active_link_matrix(params),
                "VV": _build_link_matrix(params, params.VV),
                "DD": _build_link_matrix(params, params.DD),
                "QQbase": _build_link_matrix(params, params.QQ),
                "Bbase": params.B_BaseRef,
                "PP_type": params.PP_type,
                "FishFrom": getattr(params, "FishFrom", np.array([])),
                "FishTo": getattr(params, "FishTo", np.array([])),
                "FishQ": getattr(params, "FishQ", np.array([])),
            }
            # If Seabirds exists, add TRACE keys to params_test so deriv_vector prints breakdown
            try:
                if "Seabirds" in params.spname:
                    sidx = params.spname.index("Seabirds")
                    params_test["TRACE_DEBUG_GROUPS"] = [sidx]
                    params_test["spname"] = params.spname
            except Exception as e:
                logger.debug("TRACE key addition failed: %s", e)
            deriv_test = deriv_vector(state.copy(), params_test, forcing0, fishing_dict)
            diffs = np.abs(init_deriv - deriv_test)
            TH = 1e-12
            if np.any(diffs > TH):
                logger.debug(
                    "derivative mismatch between raw (NoIntegrate disabled) and test-style params; count>%s: %d",
                    TH,
                    int(np.sum(diffs > TH)),
                )
                # Show first few mismatches
                mism = np.where(diffs > TH)[0][:20]
                for idx in mism:
                    name = (
                        params.spname[idx]
                        if (hasattr(params, "spname") and idx < len(params.spname))
                        else ""
                    )
                    logger.debug(
                        "deriv diff idx=%d name=%s raw=%.6e test=%.6e diff=%.6e",
                        idx,
                        name,
                        init_deriv[idx],
                        deriv_test[idx],
                        diffs[idx],
                    )
            # Also compare key parameter arrays for quick diffs
            for key in ["M0", "PB", "QB", "Bbase", "NoIntegrate"]:
                a = params_dict.get(key)
                b = params_test.get(key)
                try:
                    aa = np.asarray(a)
                    bb = np.asarray(b)
                    if aa.shape == bb.shape:
                        md = np.nanmax(np.abs(aa - bb))
                        if md > 0:
                            logger.debug("param '%s' max abs diff = %.6e", key, md)
                    else:
                        logger.debug(
                            "param '%s' shape differs: %s vs %s",
                            key,
                            aa.shape,
                            bb.shape,
                        )
                except Exception as e:
                    logger.debug("parameter comparison failed: %s", e)
            # Quick QQ check for Seabirds if present
            try:
                if "Seabirds" in params.spname:
                    sidx = params.spname.index("Seabirds")
                    QQ_a = params_dict.get("QQbase")
                    QQ_b = params_test.get("QQbase")
                    if (
                        QQ_a is not None
                        and QQ_b is not None
                        and QQ_a.shape == QQ_b.shape
                    ):
                        col_diff = np.nanmax(np.abs(QQ_a[:, sidx] - QQ_b[:, sidx]))
                        row_diff = np.nanmax(np.abs(QQ_a[sidx, :] - QQ_b[sidx, :]))
                        if col_diff > 0 or row_diff > 0:
                            logger.debug(
                                "QQ differences for Seabirds col_diff=%.6e row_diff=%.6e",
                                col_diff,
                                row_diff,
                            )
            except Exception as e:
                logger.debug("QQ Seabirds check failed: %s", e)
        except Exception as e:
            logger.debug("derivative comparison failed: %s", e)

    except Exception as e:
        logger.debug("init_deriv computation failed: %s", e)
        init_mask = np.zeros(n_groups, dtype=bool)

    if np.any(init_mask):
        # Informative message for debugging; this can be toggled or removed if desired
        logger.debug(
            "zeroing tiny initial derivatives for groups: %s",
            np.where(init_mask)[0].tolist(),
        )

    # Build a looser mask for small initial derivatives that we should prevent from changing
    # during the warmup period to avoid slow drift accumulation
    try:
        init_mask_loose = np.abs(init_deriv) < ADJUST_DERIV_MAX
        init_mask_loose[0] = False
        if np.any(init_mask_loose) and not np.array_equal(init_mask_loose, init_mask):
            logger.debug(
                "small initial derivatives (loose mask) groups: %s",
                np.where(init_mask_loose)[0].tolist(),
            )
    except Exception as e:
        logger.debug("loose mask computation failed: %s", e)
        init_mask_loose = np.zeros(n_groups, dtype=bool)

    # If there are tiny residuals, nudge M0 in params_dict so initial derivative is exactly zero
    # This uses a tiny adjustment only when the required change is small (avoid large parameter changes)
    # Only accept very small M0 changes at init/final check to avoid drifting from Rpath
    # (use a strict absolute threshold to prevent ~1e-3 sized nudges that affect parity)
    M0_ADJUST_THRESHOLD = (
        1e-4  # absolute threshold allowed for M0 adjustments (was 1e-3)
    )
    ADJUST_DERIV_MAX = (
        1e-3  # consider adjusting M0 for initial derivatives smaller than this (abs)
    )
    try:
        # Build immediate fishing mortality (base) to compute fish_loss
        logger.debug("entering M0 adjustment block")
        # Use the normalized fishing dict so we don't depend on dataclass vs dict
        fishing_mort = fishing_dict.get("FishingMort", np.zeros(n_groups))
        fish_from = fishing_dict.get("FishFrom", [])
        fish_q = fishing_dict.get("FishQ", np.array([0.0]))
        for i in range(1, len(fish_from)):
            grp = int(fish_from[i])
            fishing_mort[grp] += fish_q[i]

        for grp in range(1, params.NUM_GROUPS + 1):
            # Consider small initial residuals within ADJUST_DERIV_MAX
            if not (abs(init_deriv[grp]) < ADJUST_DERIV_MAX):
                continue
            B = state[grp]
            if B <= 0:
                continue
            # Use baseline QQ (QQbase) to approximate consumption and predation loss at equilibrium
            QQbase = params_dict.get("QQbase")
            # Consumption by this group (sum over prey)
            consumption = (
                float(np.nansum(QQbase[:, grp])) if QQbase is not None else 0.0
            )
            # Predation loss on this group (sum over predators)
            predation_loss = (
                float(np.nansum(QQbase[grp, :])) if QQbase is not None else 0.0
            )

            # Production (handle producers/consumers)
            PB = params_dict.get("PB")[grp]
            QB = params_dict.get("QB")[grp]
            PP_type = params_dict.get("PP_type", np.zeros(params.NUM_GROUPS + 1))
            Bbase = params_dict.get("Bbase")
            if PP_type[grp] > 0:
                # Primary producer: use density-dependent formula at baseline forcing
                if Bbase is not None and Bbase[grp] > 0:
                    rel_bio = B / Bbase[grp]
                    dd_factor = max(0.0, 2.0 - rel_bio)
                    production = PB * B * dd_factor
                else:
                    production = PB * B
            elif QB > 0:
                GE = PB / QB
                production = GE * consumption
            else:
                production = PB * B

            fish_loss = fishing_mort[grp] * B
            current_m0 = float(params_dict.get("M0")[grp])
            # Compute desired M0 using raw initial derivative computed without NoIntegrate masking
            desired_m0 = current_m0 + float(init_deriv[grp]) / B
            diff = desired_m0 - current_m0
            # Debug log the computed quantities
            logger.debug(
                "grp=%d B=%.6e consumption=%.6e production=%.6e predation_loss=%.6e fish_loss=%.6e current_m0=%.6e desired_m0=%.6e diff=%.6e",
                grp,
                B,
                consumption,
                production,
                predation_loss,
                fish_loss,
                current_m0,
                desired_m0,
                diff,
            )
            # Extra debugging for Seabirds specifically
            try:
                if "Seabirds" in params.spname:
                    sidx = params.spname.index("Seabirds")
                    if grp == sidx:
                        logger.debug(
                            "Seabirds calculation: B=%.6e consumption=%.6e pred_loss=%.6e production=%.6e fish_loss=%.6e current_m0=%.6e desired_m0=%.6e diff=%.6e",
                            B,
                            consumption,
                            predation_loss,
                            production,
                            fish_loss,
                            current_m0,
                            desired_m0,
                            diff,
                        )
            except Exception as e:
                logger.debug("Seabirds debug logging failed: %s", e)
            # Accept small changes only (absolute threshold)
            if np.isfinite(desired_m0) and abs(diff) <= M0_ADJUST_THRESHOLD:
                seab_lbl = ""
                try:
                    if (
                        "Seabirds" in params.spname
                        and params.spname.index("Seabirds") == grp
                    ):
                        seab_lbl = "Seabirds"
                except Exception as e:
                    logger.debug("Seabirds label lookup failed: %s", e)
                logger.debug(
                    "assigning M0 for grp=%d (%s) diff=%.6e", grp, seab_lbl, diff
                )
                # Iteratively refine M0 to drive the raw (no-NoIntegrate) initial residual toward zero
                MAX_M0_ITER = 5
                TOL_INIT_DERIV_ITER = 1e-10
                try:
                    # Start from a params dict that has NoIntegrate disabled so we measure the raw algebraic residual
                    params_iter = params_no_noint.copy()
                    # Ensure M0 array is present and set initial candidate
                    params_iter["M0"] = params_dict["M0"].copy()
                    params_iter["M0"][grp] = desired_m0
                    params_dict["M0"][grp] = desired_m0
                    logger.debug(
                        "initial M0 assigned grp=%d value=%.6e",
                        grp,
                        params_dict["M0"][grp],
                    )

                    for it in range(1, MAX_M0_ITER + 1):
                        try:
                            init_deriv_iter = deriv_vector(
                                state.copy(), params_iter, forcing0, fishing_dict
                            )
                            residual = float(init_deriv_iter[grp])
                            logger.debug(
                                "M0 iter grp=%d it=%d residual=%.6e",
                                grp,
                                it,
                                residual,
                            )
                            # If residual sufficiently small, stop
                            if abs(residual) < TOL_INIT_DERIV_ITER:
                                break
                            # Compute correction step: delta_m0 = residual / B
                            step = residual / B
                            # Clamp step so we don't jump by more than allowed threshold
                            if abs(step) > M0_ADJUST_THRESHOLD:
                                step = np.sign(step) * M0_ADJUST_THRESHOLD
                            params_iter["M0"][grp] += step
                            params_dict["M0"][grp] = params_iter["M0"][grp]
                        except Exception as e:
                            logger.debug(
                                "M0 iteration failed for grp=%d it=%d: %s",
                                grp,
                                it,
                                e,
                            )
                            break
                    logger.debug(
                        "final M0 for grp=%d value=%.6e",
                        grp,
                        params_dict["M0"][grp],
                    )

                    # Final check using the params dict that will be persisted
                    try:
                        params_check = params_dict.copy()
                        init_deriv_check = deriv_vector(
                            state.copy(), params_check, forcing0, fishing_dict
                        )
                        final_residual = float(init_deriv_check[grp])
                        logger.debug(
                            "final check residual grp=%d residual=%.6e",
                            grp,
                            final_residual,
                        )
                        if (
                            B != 0
                            and np.isfinite(final_residual)
                            and abs(final_residual) > 0.0
                        ):
                            step = final_residual / B
                            if abs(step) > M0_ADJUST_THRESHOLD:
                                step = np.sign(step) * M0_ADJUST_THRESHOLD
                            params_dict["M0"][grp] += step
                            logger.debug(
                                "final adj applied grp=%d new_m0=%.6e step=%.6e",
                                grp,
                                params_dict["M0"][grp],
                                step,
                            )
                    except Exception as e:
                        logger.debug("final M0 check failed for grp=%d: %s", grp, e)
                        pass
                except Exception as e:
                    logger.debug("M0 assignment failed for grp=%d: %s", grp, e)
                    pass

            # Only persist small/safe M0 adjustments; avoid overwriting for larger computed desired_m0
            if np.isfinite(desired_m0) and abs(diff) <= M0_ADJUST_THRESHOLD:
                params_dict["M0"][grp] = desired_m0
                logger.debug(
                    "enforcing exact initial equilibrium for group %d: M0 %.6e -> %.6e (init_deriv=%.6e)",
                    grp,
                    current_m0,
                    desired_m0,
                    init_deriv[grp],
                )
            else:
                # Do not apply large adjustments; leave M0 as originally specified
                if np.isfinite(desired_m0):
                    logger.debug(
                        "skipping initial M0 assign for grp=%d (diff=%.6e > threshold)",
                        grp,
                        diff,
                    )
    except Exception as e:
        logger.debug("M0 adjustment failed: %s", e, exc_info=True)

    # Debug: report M0 small sample
    try:
        if "Seabirds" in params.spname:
            sidx = params.spname.index("Seabirds")
            logger.debug(
                "M0 after adjust for Seabirds idx=%d value=%.6e",
                sidx,
                params_dict["M0"][sidx],
            )
    except Exception as e:
        logger.debug("M0 post-adjust debug failed: %s", e)

    # Final check: compute derivative using the params dict that will be persisted
    # and make a small algebraic correction if a tiny residual remains.
    try:
        logger.debug("performing final M0 algebraic check")
        check_deriv = deriv_vector(state.copy(), params_dict, forcing0, fishing_dict)
        for grp in range(1, params.NUM_GROUPS + 1):
            if not np.isfinite(check_deriv[grp]) or state[grp] <= 0:
                continue
            # only consider small residuals within adjustment window
            if abs(check_deriv[grp]) < ADJUST_DERIV_MAX and abs(check_deriv[grp]) > 0:
                step = check_deriv[grp] / state[grp]
                if abs(step) > M0_ADJUST_THRESHOLD:
                    step = np.sign(step) * M0_ADJUST_THRESHOLD
                params_dict["M0"][grp] += step
                logger.debug(
                    "final algebraic adjust grp=%d step=%.6e new_m0=%.6e residual_after=%.6e",
                    grp,
                    step,
                    params_dict["M0"][grp],
                    check_deriv[grp],
                )
    except Exception as e:
        logger.debug("final M0 algebraic check failed: %s", e)
        pass

    # Persist any M0 adjustments back to the params dataclass so diagnostics
    # and downstream code that read scenario.params.MzeroMort see the same values
    try:
        if "M0" in params_dict:
            # params is the RsimParams object (scenario.params)
            params.MzeroMort = params_dict["M0"].copy()
            # Debug sample to confirm persistence
            try:
                logger.debug("persisted adjusted M0 sample=%s", params.MzeroMort[:6])
            except Exception as e:
                logger.debug("M0 persistence debug failed: %s", e)
    except Exception as e:
        logger.debug("M0 persistence debug failed: %s", e)

    # Build fishing dict
    fishing_dict = {
        "FishFrom": params.FishFrom,
        "FishThrough": params.FishThrough,
        "FishQ": params.FishQ,
        "FishingMort": np.zeros(n_groups),  # Base fishing mortality (no effort scaling)
    }

    # Calculate base fishing mortality (without effort scaling)
    for i in range(1, len(params.FishFrom)):
        grp = params.FishFrom[i]
        fishing_dict["FishingMort"][grp] += params.FishQ[i]

    # History for Adams-Bashforth
    derivs_history = []

    dt = 1.0 / 12.0  # Monthly timestep
    crash_year = -1
    crashed_groups = set()  # Track which groups have crashed
    crash_threshold = 1e-4  # More reasonable threshold (0.0001 vs 0.000001)

    # Initialize annual Qlink accumulator if links exist
    annual_qlink = (
        np.zeros((n_years, len(params.PreyFrom))) if len(params.PreyFrom) > 0 else None
    )

    # Pre-compute the NoIntegrate mask once (NoIntegrate never changes during sim).
    # This avoids recomputing np.asarray(...) != 0 four times per month.
    no_integrate_mask = (
        np.asarray(params_dict.get("NoIntegrate", np.zeros(n_groups))) != 0
    )

    # Pre-copy Ftime once — it is static and doesn't change across months.
    _ftime_snapshot = scenario.start_state.Ftime.copy()

    # Main simulation loop
    # Debug: log fishing link summary before starting loop
    try:
        if len(params.FishFrom) > 0:
            logger.debug(
                "starting simulation months=%s FishFrom=%s FishQ=%s FishThrough=%s ForcedEffort_sample=%s",
                n_months,
                params.FishFrom,
                params.FishQ,
                params.FishThrough,
                (
                    fishing_obj.ForcedEffort[0]
                    if len(fishing_obj.ForcedEffort) > 0
                    else None
                ),
            )
    except Exception as e:
        logger.debug("fishing link debug failed: %s", e)

    for month in range(1, n_months + 1):
        # Debug: indicate loop iteration for first few months
        if month <= 6:
            logger.debug("entering month loop month=%s", month)
        t = month * dt
        year_idx = (month - 1) // 12
        month_in_year = (month - 1) % 12

        # Build forcing dict for this timestep
        forcing_dict = {
            "Ftime": _ftime_snapshot,
            "ForcedBio": np.where(
                forcing.ForcedBio[month - 1] > 0, forcing.ForcedBio[month - 1], 0
            ),
            "ForcedMigrate": forcing.ForcedMigrate[month - 1],
            "ForcedEffort": (
                fishing_obj.ForcedEffort[month - 1]
                if month - 1 < len(fishing_obj.ForcedEffort)
                else np.ones(params.NUM_GEARS + 1)
            ),
        }

        # Integration step
        if method.upper() == "RK4":
            old_state = state.copy()
            state = integrate_rk4(state, params_dict, forcing_dict, fishing_dict, dt)
            # If small initial derivative mask exists, prevent first-step change for those groups
            if month == 1 and np.any(init_mask):
                state[init_mask] = old_state[init_mask]
            # Prevent groups with small initial residuals from changing during warmup
            WARMUP_MONTHS = 12
            if month <= WARMUP_MONTHS and np.any(init_mask_loose):
                state[init_mask_loose] = old_state[init_mask_loose]
        elif method.upper() == "AB":
            # Warmup: use RK4 for the first few months to populate history and
            # improve stability before switching to multi-step Adams-Bashforth
            if month <= 12:
                # Use one year of RK4 warmup to get stable derivative history
                old_state = state.copy()
                # Mark parent integration method so RK4 instrumentation reports 'AB' for warmup
                params_dict["_integration_parent_method"] = "AB"
                try:
                    state = integrate_rk4(
                        state, params_dict, forcing_dict, fishing_dict, dt
                    )
                finally:
                    # Clean up the temporary context flag
                    params_dict.pop("_integration_parent_method", None)
                if month == 1 and np.any(init_mask):
                    state[init_mask] = old_state[init_mask]
                # Prevent groups with small initial residuals from changing during warmup
                WARMUP_MONTHS = 12
                if month <= WARMUP_MONTHS and np.any(init_mask_loose):
                    state[init_mask_loose] = old_state[init_mask_loose]
                try:
                    new_deriv = deriv_vector(
                        state, params_dict, forcing_dict, fishing_dict
                    )
                    # Sanitize derivative before storing to history to avoid
                    # carrying non-finite or extreme values into Adams-Bashforth
                    new_deriv = np.nan_to_num(
                        new_deriv, nan=0.0, posinf=1e6, neginf=-1e6
                    )
                    new_deriv = np.clip(new_deriv, -1e6, 1e6)
                    # Zero derivatives for NoIntegrate groups to align with Rpath
                    if np.any(no_integrate_mask):
                        new_deriv[no_integrate_mask] = 0.0
                    derivs_history.insert(0, new_deriv)
                    if len(derivs_history) > 3:
                        derivs_history.pop()
                except Exception as e:
                    logger.debug("derivative history update failed: %s", e)
            else:
                if params_dict.get("VERBOSE_INSTRUMENTATION"):
                    logger.debug(
                        "DEBUG-INTEGRATOR: about to call integrate_ab with params_dict['INSTRUMENT_GROUPS']=%r",
                        params_dict.get("INSTRUMENT_GROUPS", None),
                    )
                state, new_deriv = integrate_ab(
                    state, derivs_history, params_dict, forcing_dict, fishing_dict, dt
                )
                # Ensure NoIntegrate groups remain fixed and have zero derivative
                if np.any(no_integrate_mask):
                    Bbase = params_dict.get("Bbase")
                    if Bbase is not None:
                        state[no_integrate_mask] = Bbase[no_integrate_mask]
                    new_deriv[no_integrate_mask] = 0.0
                derivs_history.insert(0, new_deriv)
                if len(derivs_history) > 3:
                    derivs_history.pop()
        else:
            # Unknown method: fallback to RK4 to be safe
            old_state = state.copy()
            state = integrate_rk4(state, params_dict, forcing_dict, fishing_dict, dt)
            if month == 1 and np.any(init_mask):
                state[init_mask] = old_state[init_mask]

        # Monthly M0 adjustment to enforce algebraic equilibrium for small residuals
        if not params_dict.get("MONTHLY_M0_ADJUST", True):
            if params_dict.get("VERBOSE_DEBUG"):
                logger.debug(
                    "skipping monthly M0 adjustment (disabled) for month=%s", month
                )
        else:
            try:
                if params_dict.get("VERBOSE_DEBUG"):
                    logger.debug(
                        "entering monthly M0 adjustment block for month=%s", month
                    )
                # Compute raw derivative with actual fishing to measure algebraic residual
                raw_init_deriv = deriv_vector(
                    state.copy(), params_dict, forcing_dict, fishing_dict
                )
                if params_dict.get("VERBOSE_DEBUG"):
                    logger.debug(
                        "computed raw_init_deriv sample %s", raw_init_deriv[:10]
                    )
                # Use the normalized fishing dict so we don't depend on dataclass vs dict
                fishing_mort = fishing_dict.get("FishingMort", np.zeros(n_groups))
                fish_from = fishing_dict.get("FishFrom", [])
                fish_q = fishing_dict.get("FishQ", np.array([0.0]))
                for i in range(1, len(fish_from)):
                    grp = int(fish_from[i])
                    fishing_mort[grp] += fish_q[i]

                for grp in range(1, params.NUM_GROUPS + 1):
                    if not (abs(raw_init_deriv[grp]) < ADJUST_DERIV_MAX):
                        continue
                    B = state[grp]
                    if B <= 0:
                        continue
                    QQbase = params_dict.get("QQbase")
                    consumption = (
                        float(np.nansum(QQbase[:, grp])) if QQbase is not None else 0.0
                    )
                    predation_loss = (
                        float(np.nansum(QQbase[grp, :])) if QQbase is not None else 0.0
                    )
                    PB = params_dict.get("PB")[grp]
                    QB = params_dict.get("QB")[grp]
                    PP_type = params_dict.get(
                        "PP_type", np.zeros(params.NUM_GROUPS + 1)
                    )
                    Bbase = params_dict.get("Bbase")
                    if PP_type[grp] > 0:
                        if Bbase is not None and Bbase[grp] > 0:
                            rel_bio = B / Bbase[grp]
                            dd_factor = max(0.0, 2.0 - rel_bio)
                            production = PB * B * dd_factor
                        else:
                            production = PB * B
                    elif QB > 0:
                        GE = PB / QB
                        production = GE * consumption
                    else:
                        production = PB * B
                    fish_loss = fishing_mort[grp] * B
                    current_m0 = float(params_dict.get("M0")[grp])
                    desired_m0 = current_m0 + float(raw_init_deriv[grp]) / B
                    diff = desired_m0 - current_m0
                    # Debug log
                    logger.debug(
                        "monthly grp=%s B=%.6e consumption=%.6e production=%.6e predation_loss=%.6e fish_loss=%.6e current_m0=%.6e desired_m0=%.6e diff=%.6e",
                        grp,
                        B,
                        consumption,
                        production,
                        predation_loss,
                        fish_loss,
                        current_m0,
                        desired_m0,
                        diff,
                    )
                    if np.isfinite(desired_m0) and abs(diff) <= M0_ADJUST_THRESHOLD:
                        # Iteratively refine monthly M0 similar to initialization
                        try:
                            params_iter = params_dict.copy()
                            params_iter["M0"] = params_dict["M0"].copy()
                            params_iter["M0"][grp] = desired_m0
                            params_dict["M0"][grp] = desired_m0
                            MAX_M0_ITER = 3
                            TOL_INIT_DERIV_ITER = 1e-10
                            for it in range(1, MAX_M0_ITER + 1):
                                try:
                                    init_deriv_iter = deriv_vector(
                                        state.copy(),
                                        params_iter,
                                        forcing_dict,
                                        fishing_dict,
                                    )
                                    residual = float(init_deriv_iter[grp])
                                    if abs(residual) < TOL_INIT_DERIV_ITER:
                                        break
                                    step = residual / B
                                    if abs(step) > M0_ADJUST_THRESHOLD:
                                        step = np.sign(step) * M0_ADJUST_THRESHOLD
                                    params_iter["M0"][grp] += step
                                    params_dict["M0"][grp] = params_iter["M0"][grp]
                                except Exception as e:
                                    logger.debug("monthly M0 iteration failed: %s", e)
                                    break
                            if params_dict.get("VERBOSE_DEBUG"):
                                logger.debug(
                                    "monthly assigned M0 grp=%s new_m0=%.6e",
                                    grp,
                                    params_dict["M0"][grp],
                                )
                        except Exception as e:
                            logger.debug("monthly M0 assignment failed: %s", e)
                            params_dict["M0"][grp] = desired_m0
                            if params_dict.get("VERBOSE_DEBUG"):
                                logger.debug(
                                    "monthly assigned M0 grp=%s new_m0=%.6e",
                                    grp,
                                    params_dict["M0"][grp],
                                )
            except Exception as e:
                logger.debug("monthly M0 adjustment failed: %s", e)

        # Apply NoIntegrate behavior: hold fast-turnover groups at baseline
        try:
            # NoIntegrate uses 1 to indicate fast-turnover groups in params (1 = NoIntegrate)
            Bbase = params_dict.get("Bbase")
            if Bbase is not None:
                state[no_integrate_mask] = Bbase[no_integrate_mask]
        except Exception as e:
            logger.debug("NoIntegrate application failed: %s", e)

        # Replace invalid numeric values to prevent NaN/inf runaway and
        # ensure non-negative biomass
        state = np.where(np.isfinite(state), state, EPSILON)
        state = np.maximum(state, EPSILON)

        # Update stanza groups (age structure dynamics)
        if scenario.stanzas is not None and scenario.stanzas.n_split > 0:
            # Update state in a temporary state object
            temp_state = RsimState(
                Biomass=state.copy(),
                N=np.zeros_like(state),
                Ftime=forcing_dict["Ftime"],
            )
            # Call stanza update for this month
            split_update(scenario.stanzas, temp_state, params, month)
            # Update predation rates based on new stanza structure
            split_set_pred(scenario.stanzas, temp_state, params)
            # Note: Biomass redistribution among stanza groups handled in split_update

            # Record stanza-resolved biomass for this month
            for isp in range(1, scenario.stanzas.n_split + 1):
                nst = scenario.stanzas.n_stanzas[isp]
                for ist in range(1, nst + 1):
                    ieco = int(scenario.stanzas.ecopath_code[isp, ist])
                    first = int(scenario.stanzas.age1[isp, ist])
                    last = int(scenario.stanzas.age2[isp, ist])
                    bio = np.nansum(
                        scenario.stanzas.base_nage_s[first : last + 1, isp]
                        * scenario.stanzas.base_wage_s[first : last + 1, isp]
                    )
                    if ieco >= 0 and ieco < n_groups and stanza_biomass is not None:
                        stanza_biomass[month, ieco] += bio

        # Check for crash (biomass < threshold)
        # Use more reasonable threshold to avoid false alarms from numerical noise
        if crash_year < 0:
            low_biomass_groups = np.where(
                state[1 : params.NUM_LIVING + 1] < crash_threshold
            )[0]
            if len(low_biomass_groups) > 0:
                # Record first crash year
                crash_year = year_idx + scenario.start_year
                # Track which groups crashed
                for grp_idx in low_biomass_groups:
                    crashed_groups.add(grp_idx + 1)  # +1 because we sliced from index 1

        # Enforce NoIntegrate at the final step for this month (after stanza updates)
        if np.any(no_integrate_mask):
            Bbase = params_dict.get("Bbase")
            if Bbase is not None:
                state[no_integrate_mask] = Bbase[no_integrate_mask]

        # Store results
        out_biomass[month] = state
        # Re-assert NoIntegrate groups in stored results to mitigate any numerical drift
        if np.any(no_integrate_mask):
            Bbase = params_dict.get("Bbase")
            if Bbase is not None:
                out_biomass[month, no_integrate_mask] = Bbase[no_integrate_mask]

        # Compute consumption QQ matrix for this month to track Qlinks
        QQ_month = _compute_Q_matrix(params_dict, state, forcing_dict)
        # Accumulate monthly Q (converted to monthly by dividing by 12)
        if annual_qlink is not None:
            for li in range(len(params.PreyFrom)):
                prey = params.PreyFrom[li]
                pred = params.PreyTo[li]
                if prey < QQ_month.shape[0] and pred < QQ_month.shape[1]:
                    annual_qlink[year_idx, li] += QQ_month[prey, pred] / 12.0

        # Calculate catch for this month
        for i in range(1, len(params.FishFrom)):
            grp = params.FishFrom[i]
            gear_group_idx = params.FishThrough[i]
            # Convert group-based gear index to gear array index
            gear_idx = int(gear_group_idx - params.NUM_LIVING - params.NUM_DEAD)
            effort_mult = (
                forcing_dict["ForcedEffort"][gear_idx]
                if 0 < gear_idx < len(forcing_dict["ForcedEffort"])
                else 1.0
            )
            catch = params.FishQ[i] * state[grp] * effort_mult / 12.0
            # Debug log to trace catch computation for early months
            try:
                if month <= 2:
                    logger.debug(
                        "month=%s link=%s grp=%s FishQ=%.6e effort_mult=%.6e state=%.6e catch=%.6e",
                        month,
                        i,
                        grp,
                        params.FishQ[i],
                        effort_mult,
                        state[grp],
                        catch,
                    )
            except Exception as e:
                logger.debug("catch computation debug failed: %s", e)
            out_catch[month, grp] += catch
            out_gear_catch[month, i] = catch

    # Calculate annual values
    annual_biomass = np.zeros((n_years, n_groups))
    annual_catch = np.zeros((n_years, n_groups))
    annual_qb = np.zeros((n_years, n_groups))

    for yr in range(n_years):
        start_m = yr * 12 + 1
        end_m = (yr + 1) * 12 + 1
        annual_biomass[yr] = np.mean(out_biomass[start_m:end_m], axis=0)
        annual_catch[yr] = np.sum(out_catch[start_m:end_m], axis=0)

    # If Qlink accumulation was tracked, ensure shape is set
    if "annual_qlink" not in locals():
        annual_qlink = np.zeros((n_years, len(params.PreyFrom)))

    # If stanza_biomass was not computed (no stanzas), set to None
    if stanza_biomass is None:
        stanza_biomass_out = None
    else:
        stanza_biomass_out = stanza_biomass

    # Create end state
    end_state = RsimState(
        Biomass=state.copy(),
        N=np.zeros(n_groups),
        Ftime=scenario.start_state.Ftime.copy(),
    )

    # Build predator-prey identifiers for output
    pred_names = np.array(
        [params.spname[params.PreyTo[i]] for i in range(len(params.PreyTo))]
    )
    prey_names = np.array(
        [params.spname[params.PreyFrom[i]] for i in range(len(params.PreyFrom))]
    )

    # Gear catch identifiers
    gear_catch_sp = np.array(
        [params.spname[params.FishFrom[i]] for i in range(len(params.FishFrom))]
    )
    gear_catch_gear = np.array(
        [
            (
                params.spname[params.FishThrough[i]]
                if params.FishThrough[i] < len(params.spname)
                else f"Gear{params.FishThrough[i]}"
            )
            for i in range(len(params.FishThrough))
        ]
    )
    gear_catch_disp = np.where(params.FishTo == 0, "Landings", "Discards")

    # Return full monthly time series including the initial snapshot (index 0).
    # Tests and downstream code expect the initial state to be included as row 0.

    return RsimOutput(
        out_Biomass=out_biomass,
        out_Catch=out_catch,
        out_Gear_Catch=out_gear_catch,
        annual_Biomass=annual_biomass,
        annual_Catch=annual_catch,
        annual_QB=annual_qb,
        annual_Qlink=annual_qlink,
        stanza_biomass=stanza_biomass_out,
        end_state=end_state,
        crash_year=crash_year,
        crashed_groups=crashed_groups,
        pred=pred_names,
        prey=prey_names,
        Gear_Catch_sp=gear_catch_sp,
        Gear_Catch_gear=gear_catch_gear,
        Gear_Catch_disp=gear_catch_disp,
        start_state=copy.deepcopy(scenario.start_state),
        params={
            "NUM_GROUPS": params.NUM_GROUPS,
            "NUM_LIVING": params.NUM_LIVING,
            "years": n_years,
        },
    )

ODE Derivatives

pypath.core.ecosim_deriv

Ecosim derivative calculation and integration routines.

This module contains the core numerical routines for Ecosim simulation: - deriv_vector: Calculate derivatives for all state variables - RK4 and Adams-Bashforth integration methods - Prey switching and mediation functions - Primary production forcing

These are ported from the C++ ecosim.cpp file in Rpath.

SimState dataclass

Current state of the simulation.

Source code in pypath/core/ecosim_deriv.py
561
562
563
564
565
566
567
568
569
570
571
572
573
574
@dataclass
class SimState:
    """Current state of the simulation."""

    # Biomass and related state variables (indexed 0 to NUM_GROUPS)
    Biomass: np.ndarray  # Current biomass
    Ftime: np.ndarray  # Fishing time forcing

    # Consumption tracking
    QQ: np.ndarray  # Consumption Q[prey, pred] matrix

    # Forcing arrays
    force_bybio: np.ndarray  # Biomass forcing
    force_byprey: np.ndarray  # Prey-specific forcing

prey_switching

prey_switching(BB: ndarray, Bbase: ndarray, pred: int, ActiveLink: ndarray, switch_power: float = 2.0) -> np.ndarray

Calculate prey switching factors.

Prey switching occurs when predators preferentially consume more abundant prey, stabilizing the system. Uses a power function of relative abundance.

Parameters:

Name Type Description Default
BB ndarray

Current biomass array

required
Bbase ndarray

Baseline biomass array

required
pred int

Predator index

required
ActiveLink ndarray

Active link matrix [prey, pred]

required
switch_power float

Prey switching power (default 2.0, range 0-2) - 0: No switching - 1: Linear switching - 2: Strong switching (Murdoch switching)

2.0

Returns:

Type Description
ndarray

Switching factors for each prey (indexed by prey)

Source code in pypath/core/ecosim_deriv.py
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
def prey_switching(
    BB: np.ndarray,
    Bbase: np.ndarray,
    pred: int,
    ActiveLink: np.ndarray,
    switch_power: float = 2.0,
) -> np.ndarray:
    """
    Calculate prey switching factors.

    Prey switching occurs when predators preferentially consume more abundant
    prey, stabilizing the system. Uses a power function of relative abundance.

    Parameters
    ----------
    BB : np.ndarray
        Current biomass array
    Bbase : np.ndarray
        Baseline biomass array
    pred : int
        Predator index
    ActiveLink : np.ndarray
        Active link matrix [prey, pred]
    switch_power : float
        Prey switching power (default 2.0, range 0-2)
        - 0: No switching
        - 1: Linear switching
        - 2: Strong switching (Murdoch switching)

    Returns
    -------
    np.ndarray
        Switching factors for each prey (indexed by prey)
    """
    n_groups = len(BB)
    switch_factor = np.ones(n_groups)

    if switch_power <= 0:
        return switch_factor

    # Sum of relative prey abundance for this predator
    total_rel = 0.0
    n_active = 0
    for prey in range(1, n_groups):
        if ActiveLink[prey, pred] and Bbase[prey] > 0:
            total_rel += (BB[prey] / Bbase[prey]) ** switch_power
            n_active += 1

    if total_rel <= 0:
        return switch_factor

    # Calculate switching factor for each prey
    for prey in range(1, n_groups):
        if ActiveLink[prey, pred] and Bbase[prey] > 0:
            rel_abund = (BB[prey] / Bbase[prey]) ** switch_power
            switch_factor[prey] = rel_abund / total_rel * n_active

    return switch_factor

mediation_function

mediation_function(mediation_type: int, med_bio: float, med_base: float, med_params: Dict[str, float]) -> float

Calculate mediation effect on predation.

Mediation allows a third party (mediator) to affect the predator-prey interaction, representing effects like habitat provision or fear.

Parameters:

Name Type Description Default
mediation_type int

Type of mediation function: - 0: No mediation (returns 1.0) - 1: Positive mediation (more mediator = more predation) - 2: Negative mediation (more mediator = less predation) - 3: U-shaped (optimal at intermediate mediator biomass)

required
med_bio float

Current mediator biomass

required
med_base float

Baseline mediator biomass

required
med_params dict

Parameters including 'low', 'high', 'shape'

required

Returns:

Type Description
float

Mediation multiplier (>0)

Source code in pypath/core/ecosim_deriv.py
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
def mediation_function(
    mediation_type: int, med_bio: float, med_base: float, med_params: Dict[str, float]
) -> float:
    """
    Calculate mediation effect on predation.

    Mediation allows a third party (mediator) to affect the predator-prey
    interaction, representing effects like habitat provision or fear.

    Parameters
    ----------
    mediation_type : int
        Type of mediation function:
        - 0: No mediation (returns 1.0)
        - 1: Positive mediation (more mediator = more predation)
        - 2: Negative mediation (more mediator = less predation)
        - 3: U-shaped (optimal at intermediate mediator biomass)
    med_bio : float
        Current mediator biomass
    med_base : float
        Baseline mediator biomass
    med_params : dict
        Parameters including 'low', 'high', 'shape'

    Returns
    -------
    float
        Mediation multiplier (>0)
    """
    if mediation_type == 0 or med_base <= 0:
        return 1.0

    low = med_params.get("low", 0.5)
    high = med_params.get("high", 2.0)
    shape = med_params.get("shape", 1.0)

    x = med_bio / med_base  # Relative biomass

    if mediation_type == 1:  # Positive mediation
        # Saturating increase
        med_mult = low + (high - low) * (x**shape) / (1.0 + x**shape)
    elif mediation_type == 2:  # Negative mediation
        # Saturating decrease
        med_mult = high - (high - low) * (x**shape) / (1.0 + x**shape)
    elif mediation_type == 3:  # U-shaped
        # Optimal at x=1, declines at extremes
        diff = abs(x - 1.0)
        med_mult = high - (high - low) * (diff**shape) / (1.0 + diff**shape)
    else:
        med_mult = 1.0

    return max(med_mult, 0.001)  # Ensure positive

primary_production_forcing

primary_production_forcing(BB: ndarray, Bbase: ndarray, PB: ndarray, PP_forcing: ndarray, PP_type: ndarray, NUM_LIVING: int) -> np.ndarray

Calculate primary production with environmental forcing.

In Ecosim/Rpath, primary producers use density-dependent production to ensure stability. The production rate decreases as biomass increases above baseline, mimicking nutrient limitation.

Parameters:

Name Type Description Default
BB ndarray

Current biomass

required
Bbase ndarray

Baseline biomass

required
PB ndarray

Production/biomass ratios

required
PP_forcing ndarray

Primary production forcing multipliers by group

required
PP_type ndarray

Producer type by group: - 0: Not a producer (consumer) - 1: Primary producer (density-dependent, default) - 2: Detritus (no production)

required
NUM_LIVING int

Number of living groups

required

Returns:

Type Description
ndarray

Primary production rates

Source code in pypath/core/ecosim_deriv.py
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
def primary_production_forcing(
    BB: np.ndarray,
    Bbase: np.ndarray,
    PB: np.ndarray,
    PP_forcing: np.ndarray,
    PP_type: np.ndarray,
    NUM_LIVING: int,
) -> np.ndarray:
    """
    Calculate primary production with environmental forcing.

    In Ecosim/Rpath, primary producers use density-dependent production
    to ensure stability. The production rate decreases as biomass
    increases above baseline, mimicking nutrient limitation.

    Parameters
    ----------
    BB : np.ndarray
        Current biomass
    Bbase : np.ndarray
        Baseline biomass
    PB : np.ndarray
        Production/biomass ratios
    PP_forcing : np.ndarray
        Primary production forcing multipliers by group
    PP_type : np.ndarray
        Producer type by group:
        - 0: Not a producer (consumer)
        - 1: Primary producer (density-dependent, default)
        - 2: Detritus (no production)
    NUM_LIVING : int
        Number of living groups

    Returns
    -------
    np.ndarray
        Primary production rates
    """
    n_groups = len(BB)
    production = np.zeros(n_groups)

    for i in range(1, min(NUM_LIVING + 1, n_groups)):
        if PP_type[i] == 0:
            # Not a producer - production calculated from consumption
            continue
        elif PP_type[i] == 1:
            # Primary producer: density-dependent production
            # In Rpath/EwE, this follows: P = PB * B * forcing * (2 - B/Bbase)
            # This gives equilibrium at B = Bbase when forcing = 1
            # and ensures stability by reducing growth as B increases
            if Bbase[i] > 0:
                rel_bio = BB[i] / Bbase[i]
                # Production is PB * B at baseline, decreases as B increases
                # This factor = 2 - rel_bio ensures:
                # - At B = Bbase: factor = 1.0, production = PB * B
                # - At B = 2*Bbase: factor = 0.0, production = 0
                # - At B = 0: factor = 2.0, production = 2 * PB * B (rapid recovery)
                dd_factor = max(0, 2.0 - rel_bio)
                production[i] = PB[i] * BB[i] * PP_forcing[i] * dd_factor
            else:
                production[i] = PB[i] * BB[i] * PP_forcing[i]
        # PP_type == 2 is detritus, no production

    return production

deriv_vector

deriv_vector(state: ndarray, params: dict, forcing: dict, fishing: dict, t: float = 0.0) -> np.ndarray

Calculate derivatives for all state variables in Ecosim.

This is the core function that implements the Ecosim differential equations based on foraging arena theory with prey switching and mediation support.

The functional response is: C_ij = (a_ij * v_ij * B_i * B_j * T_j * S_ij * D_j * M_ij) / (v_ij + v_ijT_jD_j + a_ijB_jD_j + a_ijd_ijB_j*D_j^2)

Where: a_ij = base search rate (from QQ/BB setup) v_ij = vulnerability exchange rate B_i = prey biomass B_j = predator biomass T_j = time forcing on predator S_ij = prey switching factor D_j = handling time factor d_ij = handling time for this link M_ij = mediation multiplier

Parameters:

Name Type Description Default
state ndarray

Current state vector (biomass values) indexed 0 to NUM_GROUPS

required
params dict

Model parameters containing: - NUM_GROUPS: Total number of groups - NUM_LIVING: Number of living groups - NUM_DEAD: Number of detritus groups - NUM_GEARS: Number of fishing gears - PB: Production/Biomass ratios - QB: Consumption/Biomass ratios - ActiveLink: Boolean array [prey, pred] of active links - DC: Diet composition matrix [prey, pred] - VV: Vulnerability parameters [prey, pred] - DD: Handling time parameters [prey, pred] - Bbase: Baseline biomass [group] - DetFrac: Fraction to detritus [group] - Unassim: Unassimilated fraction [group] - SwitchPower: Prey switching power (0-2, default 0) - PP_type: Producer type array [group] - Mediation: Mediation configuration dict

required
forcing dict

Forcing arrays: - ForcedBio: Forced biomass values [group] - ForcedMigrate: Migration forcing [group] - ForcedCatch: Forced catch [group] - ForcedEffort: Forced effort [gear] - PP_forcing: Primary production forcing [group] - Ftime: Time forcing [group]

required
fishing dict

Fishing parameters: - FishingMort: Base fishing mortality [group] - EffortCap: Effort cap [gear]

required
t float

Current time (for time-varying forcing)

0.0

Returns:

Type Description
ndarray

Derivative vector (dB/dt for each group)

Source code in pypath/core/ecosim_deriv.py
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
def deriv_vector(
    state: np.ndarray, params: dict, forcing: dict, fishing: dict, t: float = 0.0
) -> np.ndarray:
    """
    Calculate derivatives for all state variables in Ecosim.

    This is the core function that implements the Ecosim differential equations
    based on foraging arena theory with prey switching and mediation support.

    The functional response is:
        C_ij = (a_ij * v_ij * B_i * B_j * T_j * S_ij * D_j * M_ij) /
               (v_ij + v_ij*T_j*D_j + a_ij*B_j*D_j + a_ij*d_ij*B_j*D_j^2)

    Where:
        a_ij = base search rate (from QQ/BB setup)
        v_ij = vulnerability exchange rate
        B_i = prey biomass
        B_j = predator biomass
        T_j = time forcing on predator
        S_ij = prey switching factor
        D_j = handling time factor
        d_ij = handling time for this link
        M_ij = mediation multiplier

    Parameters
    ----------
    state : np.ndarray
        Current state vector (biomass values) indexed 0 to NUM_GROUPS
    params : dict
        Model parameters containing:
        - NUM_GROUPS: Total number of groups
        - NUM_LIVING: Number of living groups
        - NUM_DEAD: Number of detritus groups
        - NUM_GEARS: Number of fishing gears
        - PB: Production/Biomass ratios
        - QB: Consumption/Biomass ratios
        - ActiveLink: Boolean array [prey, pred] of active links
        - DC: Diet composition matrix [prey, pred]
        - VV: Vulnerability parameters [prey, pred]
        - DD: Handling time parameters [prey, pred]
        - Bbase: Baseline biomass [group]
        - DetFrac: Fraction to detritus [group]
        - Unassim: Unassimilated fraction [group]
        - SwitchPower: Prey switching power (0-2, default 0)
        - PP_type: Producer type array [group]
        - Mediation: Mediation configuration dict
    forcing : dict
        Forcing arrays:
        - ForcedBio: Forced biomass values [group]
        - ForcedMigrate: Migration forcing [group]
        - ForcedCatch: Forced catch [group]
        - ForcedEffort: Forced effort [gear]
        - PP_forcing: Primary production forcing [group]
        - Ftime: Time forcing [group]
    fishing : dict
        Fishing parameters:
        - FishingMort: Base fishing mortality [group]
        - EffortCap: Effort cap [gear]
    t : float
        Current time (for time-varying forcing)

    Returns
    -------
    np.ndarray
        Derivative vector (dB/dt for each group)
    """
    NUM_GROUPS = params["NUM_GROUPS"]
    NUM_LIVING = params["NUM_LIVING"]
    NUM_DEAD = params["NUM_DEAD"]
    NUM_GEARS = params.get("NUM_GEARS", 0)

    # Initialize output arrays
    deriv = np.zeros(NUM_GROUPS + 1)  # +1 for 0-indexing with outside

    # Extract parameters
    PB = params["PB"]
    QB = params.get("QB", np.zeros(NUM_GROUPS + 1))
    ActiveLink = params["ActiveLink"]
    VV = params["VV"]
    DD = params["DD"]
    Unassim = params.get("Unassim", np.zeros(NUM_GROUPS + 1))
    Bbase = params.get("Bbase", state.copy())  # Baseline biomass
    _SwitchPower = params.get("SwitchPower", 0.0)  # Prey switching power
    PP_type = params.get("PP_type", np.zeros(NUM_GROUPS + 1, dtype=int))
    _Mediation = params.get("Mediation", {})  # Mediation configuration
    # Pre-fetch spname and M0 once to avoid repeated dict lookups and
    # default-argument allocations ([None]*N, np.zeros) inside inner loops.
    spname_list = params.get("spname", None)
    M0_arr = params.get("M0", None)
    _NoIntegrate_raw = params.get("NoIntegrate", None)
    _TRACE_DEBUG_GROUPS = params.get("TRACE_DEBUG_GROUPS", None)

    # Diagnostic: if trace requested, print spname type and membership check
    try:
        if _TRACE_DEBUG_GROUPS is not None or spname_list is not None:
            spname = spname_list
            logger.debug(
                "TRACE DEBUG: params.keys() sample=%s",
                list(params.keys())[:20],
            )
            logger.debug(
                "TRACE DEBUG: spname type=%s len=%s contains_Seabirds=%s",
                type(spname),
                len(spname) if spname is not None else 0,
                "Seabirds" in spname if spname is not None else False,
            )
    except Exception as e:
        logger.debug("TRACE DEBUG: params introspection failed: %s", e)

    # Current biomass (state variable)
    BB = state.copy()

    # Enforce NoIntegrate algebraic groups in stage evaluations
    # Some groups are marked 'NoIntegrate' to represent algebraic equilibria
    # (fast turnover). Ensure derivative evaluations always see these at
    # their baseline Bbase value so intermediate RK4 stages don't pollute
    # predation/functional response calculations.
    try:
        no_integrate_mask = (
            np.asarray(
                _NoIntegrate_raw
                if _NoIntegrate_raw is not None
                else np.zeros(NUM_GROUPS + 1)
            )
            != 0
        )
        if np.any(no_integrate_mask):
            Bbase_arr = params.get("Bbase", None)
            if Bbase_arr is not None:
                # apply baseline values for NoIntegrate groups to the local BB
                # (BB is already a copy from state.copy() above)
                BB[no_integrate_mask] = Bbase_arr[no_integrate_mask]
    except (TypeError, ValueError, IndexError):
        pass

    # Instrumentation: resolve requested groups to 0-based indices (names or indices)
    # NOTE: group names map via params['spname'] (which includes a leading 'Outside').
    # We normalize to 0-based indices corresponding to `groups` list (0 => first real group).
    INSTRUMENT_GROUPS = params.get("INSTRUMENT_GROUPS", None)
    try:
        logger.debug(
            "INSTRUMENT-RAW: INSTRUMENT_GROUPS raw=%r type=%s params_is_dict=%s",
            INSTRUMENT_GROUPS,
            type(INSTRUMENT_GROUPS),
            isinstance(params, dict),
        )
    except (TypeError, ValueError):
        pass
    instrument_set = set()
    if INSTRUMENT_GROUPS is not None:
        try:
            spname = spname_list
            numeric_inputs = []
            for g in INSTRUMENT_GROUPS:
                if isinstance(g, str):
                    # Prefer mapping via params['model'] if available (stable group ordering)
                    model_df = (
                        params.get("model", None)
                        if isinstance(params, dict)
                        else getattr(params, "model", None)
                    )
                    if (
                        model_df is not None
                        and hasattr(model_df, "columns")
                        and "Group" in model_df.columns
                    ):
                        groups_list = list(model_df["Group"])
                        if g in groups_list:
                            instrument_set.add(groups_list.index(g))
                            continue
                    # Fallback to spname mapping (may include leading 'Outside')
                    if spname is not None and g in spname:
                        sp_idx = spname.index(g)
                        # Convert spname index (with leading 'Outside') to 0-based group index
                        if sp_idx > 0:
                            instrument_set.add(sp_idx - 1)
                else:
                    # Collect numeric inputs for later disambiguation
                    try:
                        numeric_inputs.append(int(g))
                    except (TypeError, ValueError):
                        pass
            # Heuristic: if numeric inputs look like 1-based indices (all in 1..NUM_GROUPS),
            # emit a DeprecationWarning and convert to 0-based by subtracting 1.
            max_idx = NUM_GROUPS - 1
            if numeric_inputs:
                if (
                    all(1 <= v <= NUM_GROUPS for v in numeric_inputs)
                    and min(numeric_inputs) >= 1
                ):
                    # Likely 1-based indices; log, warn, and convert
                    logger.debug(
                        "INSTRUMENT: detected probable 1-based numeric indices %s; converting to 0-based",
                        numeric_inputs,
                    )
                    warnings.warn(
                        "Numeric INSTRUMENT_GROUPS indices are expected to be 0-based. "
                        "Detected probable 1-based indices — converting to 0-based for now. "
                        "Please update your code to use 0-based indices.",
                        DeprecationWarning,
                        stacklevel=3,
                    )
                    numeric_inputs = [v - 1 for v in numeric_inputs]
                # Add numeric inputs (after any conversion) into instrument_set
                for v in numeric_inputs:
                    instrument_set.add(v)
            # Filter to valid range [0, NUM_GROUPS-1]
            instrument_set = set(i for i in instrument_set if 0 <= i <= max_idx)
            # Ensure downstream uses the normalized (0-based) representation so
            # instrumentation callback and other code sees converted indices.
            try:
                normalized = sorted(instrument_set)
                try:
                    params["INSTRUMENT_GROUPS"] = normalized
                except (TypeError, KeyError):
                    try:
                        setattr(params, "INSTRUMENT_GROUPS", normalized)
                    except (TypeError, AttributeError):
                        pass
                # Print normalization outcome for visibility
                try:
                    logger.debug(
                        "INSTRUMENT-NORM: numeric_inputs=%s normalized=%s instrument_set=%s",
                        numeric_inputs,
                        normalized,
                        instrument_set,
                    )
                except (TypeError, ValueError):
                    pass
            except (TypeError, ValueError):
                pass
        except Exception as e:
            logger.debug("Instrumentation group resolution error: %s", e)
            instrument_set = set()

    # Initialize consumption matrix
    QQ = np.zeros((NUM_GROUPS + 1, NUM_GROUPS + 1))

    # =========================================================================
    # STEP 1: Calculate predation pressure from each predator on each prey
    # Using foraging arena functional response with prey switching
    #
    # From Rpath ecosim.cpp (vectorized version):
    # Q = QQ * PDY * pow(PYY, HandleSwitch * COUPLED) *
    #     ( DD / ( DD-1.0 + pow((1-Hself)*PYY + Hself*PySuite, HandleSwitch*COUPLED)) ) *
    #     ( VV / ( VV-1.0 + (1-Sself)*PDY + Sself*PdSuite) );
    #
    # Where:
    #   QQ = base consumption rate (DC * QB * Bpred_baseline)
    #   PDY = predYY = Ftime * Bpred / Bpred_baseline (relative predator biomass)
    #   PYY = preyYY = Bprey / Bprey_baseline * force_byprey (relative prey biomass)
    #   DD = handling time (large = no handling time effect, approaching 1.0)
    #   VV = vulnerability (large = no density dependence)
    # =========================================================================

    # Get time-varying forcing (default to 1.0)
    Ftime = forcing.get("Ftime", np.ones(NUM_GROUPS + 1))
    ForcedBio = forcing.get("ForcedBio", np.zeros(NUM_GROUPS + 1))
    PP_forcing = forcing.get("PP_forcing", np.ones(NUM_GROUPS + 1))
    ForcedPrey = forcing.get("ForcedPrey", np.ones(NUM_GROUPS + 1))
    ForcedMigrate = forcing.get("ForcedMigrate", np.zeros(NUM_GROUPS + 1))

    # Calculate relative biomass arrays (vectorized)
    # preyYY = B / Bbase * prey_forcing (where Bbase > 0)
    safe_bbase = np.where(Bbase > 0, Bbase, 1.0)
    preyYY = np.zeros(NUM_GROUPS + 1)
    preyYY[1:] = np.where(
        Bbase[1:] > 0,
        BB[1:] / safe_bbase[1:] * ForcedPrey[1:],
        0.0,
    )

    # predYY = Ftime * B / Bbase (where Bbase > 0, living groups only)
    predYY = np.zeros(NUM_GROUPS + 1)
    sl = slice(1, NUM_LIVING + 1)
    predYY[sl] = np.where(
        Bbase[sl] > 0,
        Ftime[sl] * BB[sl] / safe_bbase[sl],
        0.0,
    )

    # Get base consumption matrix
    QQbase = params.get("QQbase", np.zeros((NUM_GROUPS + 1, NUM_GROUPS + 1)))

    # Compute consumption matrix via numba-accelerated (or pure-Python) kernel.
    # Use pre-computed sparse link arrays when available (avoids iterating
    # over inactive links); otherwise fall back to the dense kernel.
    _link_prey = params.get("_link_prey", None)
    _link_pred = params.get("_link_pred", None)
    if _link_prey is not None and _link_pred is not None:
        _compute_consumption_sparse(
            QQ,
            BB,
            VV,
            DD,
            QQbase,
            preyYY,
            predYY,
            _link_prey,
            _link_pred,
            len(_link_prey),
        )
    else:
        # ActiveLink may be a boolean array; ensure it is integer for numba compat.
        _active_int = (
            ActiveLink.astype(np.int64) if ActiveLink.dtype != np.int64 else ActiveLink
        )
        _compute_consumption(
            QQ,
            BB,
            _active_int,
            VV,
            DD,
            QQbase,
            preyYY,
            predYY,
            NUM_LIVING,
            NUM_GROUPS,
        )

    # Post-loop instrumentation: log per-link breakdown for interesting groups
    if instrument_set:
        try:
            for pred in range(1, NUM_LIVING + 1):
                for prey in range(1, NUM_GROUPS + 1):
                    if QQ[prey, pred] <= 0.0:
                        continue
                    prey0 = prey - 1
                    pred0 = pred - 1
                    if prey0 in instrument_set or pred0 in instrument_set:
                        pname = spname_list[prey] if spname_list is not None else None
                        prname = spname_list[pred] if spname_list is not None else None
                        qbase = QQbase[prey, pred]
                        PYY = preyYY[prey]
                        PDY = predYY[pred]
                        dd = DD[prey, pred]
                        vv = VV[prey, pred]
                        dd_term = dd / (dd - 1.0 + max(PYY, 1e-10)) if dd > 1.0 else 1.0
                        vv_term = vv / (vv - 1.0 + max(PDY, 1e-10)) if vv > 1.0 else 1.0
                        logger.debug(
                            "INSTR Q prey=%s name=%s pred=%s name=%s qbase=%.6e PDY=%.6e PYY=%.6e dd_term=%.6e vv_term=%.6e Q_calc=%.6e",
                            prey,
                            pname,
                            pred,
                            prname,
                            qbase,
                            PDY,
                            PYY,
                            dd_term,
                            vv_term,
                            QQ[prey, pred],
                        )
        except Exception as e:
            logger.debug("Instrumentation error in Q calculation: %s", e)

    # =========================================================================
    # STEP 2: Apply forced biomass adjustments
    # =========================================================================
    for i in range(1, NUM_GROUPS + 1):
        if ForcedBio[i] > 0:
            BB[i] = ForcedBio[i]

    # =========================================================================
    # STEP 3: Calculate fishing mortality with forced effort
    # =========================================================================
    FishMort = np.zeros(NUM_GROUPS + 1)
    Catch = np.zeros(NUM_GROUPS + 1)

    ForcedEffort = forcing.get("ForcedEffort", np.ones(max(NUM_GEARS + 1, 1)))
    # Support both dict-like and dataclass fishing inputs
    if isinstance(fishing, dict):
        FishFrom = fishing.get("FishFrom", np.array([0]))
        FishThrough = fishing.get("FishThrough", np.array([0]))
        FishQ = fishing.get("FishQ", np.array([0.0]))
    else:
        FishFrom = getattr(fishing, "FishFrom", np.array([0]))
        FishThrough = getattr(fishing, "FishThrough", np.array([0]))
        FishQ = getattr(fishing, "FishQ", np.array([0.0]))

    # Calculate fishing mortality with effort scaling per gear
    # Note: FishThrough contains GROUP indices of gears, not gear indices
    # To get gear index: gear_idx = FishThrough[i] - NUM_LIVING - NUM_DEAD
    for i in range(1, len(FishFrom)):
        grp = int(FishFrom[i])
        gear_group_idx = int(FishThrough[i])
        gear_idx = (
            gear_group_idx - NUM_LIVING - NUM_DEAD
        )  # Convert to gear index (1-based)
        effort_mult = (
            ForcedEffort[gear_idx] if 0 < gear_idx < len(ForcedEffort) else 1.0
        )
        FishMort[grp] += FishQ[i] * effort_mult

    for i in range(1, NUM_LIVING + 1):
        Catch[i] = FishMort[i] * BB[i]
        try:
            # i is spname index (1..); instrument_set uses 0-based group indices
            if instrument_set and (i - 1) in instrument_set:
                name = spname_list[i] if spname_list is not None else None
                logger.debug(
                    "INSTR FISH grp=%s name=%s FishMort=%.6e BB=%.6e Catch=%.6e",
                    i,
                    name,
                    FishMort[i],
                    BB[i],
                    Catch[i],
                )
        except Exception as e:
            logger.debug("Instrumentation error in fishing: %s", e)

    # Debugging: print fishing details for small models to trace if fishing is applied

    # =========================================================================
    # STEP 4: Calculate derivatives for living groups
    # =========================================================================

    # Calculate primary production for producers
    pp_rates = primary_production_forcing(
        BB, Bbase, PB, PP_forcing, PP_type, NUM_LIVING
    )

    # IBM integration: check if any groups are replaced by IBMs
    ibm_groups = params.get("ibm_groups", {})

    # Handle IBM groups first (non-numba-compatible path)
    for i in ibm_groups:
        if 1 <= i <= NUM_LIVING:
            from pypath.ibm.integration import apply_ibm_to_derivative

            spatial_ctx = params.get("_ibm_spatial_context_%d" % i, None)
            apply_ibm_to_derivative(
                deriv=deriv,
                QQ=QQ,
                BB=BB,
                ibm_group=ibm_groups[i],
                forcing=forcing,
                dt=params.get("_dt", 1 / 12),
                spatial_context=spatial_ctx,
            )

    # Prepare arrays for the numba-accelerated living-group derivative kernel
    _M0_safe = (
        M0_arr
        if (M0_arr is not None and isinstance(M0_arr, np.ndarray))
        else np.zeros(NUM_GROUPS + 1)
    )
    _GE_arr = np.zeros(NUM_GROUPS + 1)
    for _gi in range(1, NUM_LIVING + 1):
        if QB[_gi] > 0.0:
            _GE_arr[_gi] = PB[_gi] / QB[_gi]
    _ibm_mask = np.zeros(NUM_GROUPS + 1, dtype=np.int64)
    for _ibm_i in ibm_groups:
        if 0 <= _ibm_i <= NUM_GROUPS:
            _ibm_mask[_ibm_i] = 1
    _PP_type_int = np.asarray(PP_type, dtype=np.int64)

    _compute_living_derivs(
        deriv,
        QQ,
        BB,
        _M0_safe,
        ForcedMigrate,
        FishMort,
        pp_rates,
        _GE_arr,
        _PP_type_int,
        PB,
        QB,
        _ibm_mask,
        NUM_LIVING,
        NUM_GROUPS,
    )

    # Post-kernel instrumentation / debug logging for living groups
    # (kept outside numba kernel because it uses Python objects: strings, logging, etc.)
    _need_instr = bool(instrument_set) or _TRACE_DEBUG_GROUPS is not None
    _need_seabird_trace = False
    _seabird_idx = -1
    try:
        if spname_list is not None and "Seabirds" in spname_list:
            _need_seabird_trace = True
            _seabird_idx = spname_list.index("Seabirds")
    except Exception:
        pass

    if _need_instr or _need_seabird_trace:
        for i in range(1, NUM_LIVING + 1):
            if i in ibm_groups:
                continue

            # Recompute per-group terms for logging (cheap scalar ops)
            consumption = float(np.sum(QQ[1:, i]))
            predation_loss = float(np.sum(QQ[i, 1 : NUM_LIVING + 1]))
            m0 = float(_M0_safe[i])
            if PP_type[i] > 0:
                production = float(pp_rates[i])
            elif QB[i] > 0:
                production = float(_GE_arr[i] * consumption)
            else:
                production = float(PB[i] * BB[i])

            # Seabirds trace
            try:
                if _need_seabird_trace and i == _seabird_idx:
                    logger.debug(
                        "TRACE SEABIRDS i=%s name=Seabirds production=%.12e predation_loss=%.12e fish_loss=%.12e m0_loss=%.12e deriv=%.12e",
                        i,
                        production,
                        predation_loss,
                        FishMort[i] * BB[i],
                        m0 * BB[i],
                        deriv[i],
                    )
            except Exception as e:
                logger.debug("Seabirds debug instrumentation error: %s", e)

            # Debug trace for specific groups if requested
            try:
                trace_groups = _TRACE_DEBUG_GROUPS
                if trace_groups is not None and i in trace_groups:
                    name = spname_list[i] if spname_list is not None else None
                    logger.debug(
                        "TRACE DERIV i=%s name=%s production=%.6e predation_loss=%.6e fish_loss=%.6e m0_loss=%.6e deriv=%.6e",
                        i,
                        name,
                        production,
                        predation_loss,
                        FishMort[i] * BB[i],
                        m0 * BB[i],
                        deriv[i],
                    )
            except Exception as e:
                logger.debug("TRACE_DEBUG_GROUPS instrumentation error: %s", e)

            # Instrumentation: detailed per-term breakdown for selected groups
            try:
                if instrument_set and (i - 1) in instrument_set:
                    name = spname_list[i] if spname_list is not None else None
                    unassim_loss = consumption * Unassim[i]
                    fish_loss = FishMort[i] * BB[i]
                    m0_loss = m0 * BB[i]
                    logger.debug(
                        "INSTR DERIV i=%s name=%s production=%.12e consumption=%.12e unassim_loss=%.12e predation_loss=%.12e fish_loss=%.12e m0_loss=%.12e deriv=%.12e",
                        i,
                        name,
                        production,
                        consumption,
                        unassim_loss,
                        predation_loss,
                        fish_loss,
                        m0_loss,
                        deriv[i],
                    )

                    if predation_loss > 0:
                        contribs = []
                        for pred2 in range(1, NUM_LIVING + 1):
                            qval = QQ[i, pred2]
                            if qval > 0:
                                pname = (
                                    spname_list[pred2]
                                    if spname_list is not None
                                    else None
                                )
                                contribs.append((pred2, pname, qval))
                        if contribs:
                            logger.debug("INSTR PREDATORS for prey i={}:".format(i))
                            for pid, pname, qv in contribs:
                                logger.debug(
                                    "  pred=%s name=%s Q=%.12e", pid, pname, qv
                                )
            except Exception as e:
                logger.debug("Instrumentation error in deriv breakdown: %s", e)

    # =========================================================================
    # STEP 5: Calculate derivatives for detritus groups
    # =========================================================================
    DetFrac_raw = params.get("DetFrac", np.zeros((NUM_GROUPS + 1, NUM_DEAD + 1)))
    # RsimParams may store detritus fractions in two formats:
    # 1) a full 2D array shaped (NUM_GROUPS+1, NUM_DEAD+1), or
    # 2) a flat link-list array with accompanying DetFrom/DetTo arrays.
    # Handle both formats robustly and normalize to a 2D matrix DetFrac.
    DetFrac = np.asarray(DetFrac_raw)
    if DetFrac.ndim == 2:
        # Already a matrix - ensure full width if it's a single-column or truncated
        if DetFrac.shape != (NUM_GROUPS + 1, NUM_DEAD + 1):
            mat = np.zeros((NUM_GROUPS + 1, NUM_DEAD + 1))
            # copy what we have into the left/top corner
            r = min(mat.shape[0], DetFrac.shape[0])
            c = min(mat.shape[1], DetFrac.shape[1])
            mat[:r, :c] = DetFrac[:r, :c]
            DetFrac = mat

    elif DetFrac.ndim == 1:
        # Link-list format: try to reconstruct a full matrix using DetFrom/DetTo
        det_from = getattr(params, "DetFrom", None)
        det_to = getattr(params, "DetTo", None)
        if det_from is not None and det_to is not None:
            mat = np.zeros((NUM_GROUPS + 1, NUM_DEAD + 1))
            # det_from/det_to are arrays of same length as DetFrac
            for k in range(len(DetFrac)):
                f = int(det_from[k])
                t = int(det_to[k])
                # DetTo is an absolute group index (0 = Outside, otherwise nliving+det_idx)
                if (
                    t >= (NUM_LIVING + 1)
                    and t <= (NUM_LIVING + NUM_DEAD)
                    and f >= 0
                    and f <= NUM_GROUPS
                ):
                    det_col = t - NUM_LIVING  # 1-based detritus column index
                    mat[f, det_col] += DetFrac[k]

            DetFrac = mat
        else:
            # Fallback: treat as single-column per-group values
            DetFrac = DetFrac.reshape((DetFrac.size, 1))
    else:
        # scalar/None or unexpected -> coerce to minimal matrix
        DetFrac = DetFrac.reshape((1, 1))

    # Universal application of fish-derived discard contributions (work for both
    # 2D DetFrac and link-list reconstructions). This centralizes the logic to
    # avoid duplication and eliminate discrepancies between formats.
    try:
        if isinstance(params, dict):
            fish_from = params.get("FishFrom", None)
            fish_to = params.get("FishTo", None)
            fish_q = params.get("FishQ", None)
        else:
            fish_from = getattr(params, "FishFrom", None)
            fish_to = getattr(params, "FishTo", None)
            fish_q = getattr(params, "FishQ", None)
        if fish_from is not None and fish_to is not None and fish_q is not None:
            fish_from = np.asarray(fish_from)
            fish_to = np.asarray(fish_to)
            fish_q = np.asarray(fish_q, dtype=float)

            # Ensure DetFrac has full row coverage for groups
            if DetFrac.shape[0] < NUM_GROUPS + 1:
                new_rows = NUM_GROUPS + 1
                new_cols = max(DetFrac.shape[1], NUM_DEAD + 1)
                new = np.zeros((new_rows, new_cols))
                new[: DetFrac.shape[0], : DetFrac.shape[1]] = DetFrac
                DetFrac = new

            for k in range(len(fish_from)):
                try:
                    f = int(fish_from[k])
                    t = int(fish_to[k])
                    if not (
                        t >= (NUM_LIVING + 1)
                        and t <= (NUM_LIVING + NUM_DEAD)
                        and f >= 0
                        and f <= NUM_GROUPS
                    ):
                        continue
                    det_col = t - NUM_LIVING
                    src_idx = f
                    fish_input = float(fish_q[k]) * float(BB[src_idx])
                    m0_arr = M0_arr if M0_arr is not None else np.zeros(NUM_GROUPS + 1)
                    qb_arr = QB
                    unassim_arr = Unassim

                    m0_pos = max(
                        0.0, float(m0_arr[src_idx]) if src_idx < len(m0_arr) else 0.0
                    )
                    qb_loss = (
                        float(qb_arr[src_idx])
                        if (src_idx < len(qb_arr) and not np.isnan(qb_arr[src_idx]))
                        else 0.0
                    )
                    unassim_val = (
                        float(unassim_arr[src_idx])
                        if src_idx < len(unassim_arr)
                        else 0.0
                    )
                    source_loss = (
                        m0_pos * float(BB[src_idx])
                        + float(BB[src_idx]) * qb_loss * unassim_val
                    )
                    frac = fish_input / (source_loss + 1e-30)
                    if frac > 0:
                        # Make sure DetFrac has enough columns
                        if DetFrac.shape[1] <= det_col:
                            # expand to required width
                            new = np.zeros((DetFrac.shape[0], det_col + 1))
                            new[:, : DetFrac.shape[1]] = DetFrac
                            DetFrac = new
                        DetFrac[src_idx, det_col] += frac
                        if params.get("VERBOSE_DEBUG", False):
                            logger.debug(
                                "DEBUG: added fish-derived DetFrac mat[%s,%s] += %.3e",
                                src_idx,
                                det_col,
                                frac,
                            )
                except Exception as e:
                    if params.get("VERBOSE_DEBUG", False):
                        logger.debug(
                            "DEBUG: failed to add fish-derived DetFrac (unified) for entry %s: %s",
                            k,
                            e,
                        )
                    continue
    except Exception as e:
        logger.debug("Fish-derived DetFrac computation error: %s", e)

    # Pre-compute total consumption by each predator once, avoiding redundant
    # np.sum(QQ[1:, pred]) calls inside the per-detritus-group loop.
    # Shape: (NUM_LIVING,) where index j corresponds to pred = j + 1.
    total_consump_by_pred = np.sum(QQ[1:, 1 : NUM_LIVING + 1], axis=0)

    # Pre-fetch detritus decay rates outside the loop
    decay_rate = params.get("DetDecay", np.zeros(NUM_DEAD + 1))
    _decay_rate = np.asarray(decay_rate, dtype=np.float64)

    # Ensure DetFrac is a contiguous 2D float64 array for the numba kernel
    _DetFrac = np.ascontiguousarray(DetFrac, dtype=np.float64)

    # Compute detritus derivatives via numba-accelerated (or pure-Python) kernel
    try:
        _compute_detritus_derivs(
            deriv,
            QQ,
            BB,
            total_consump_by_pred,
            Unassim,
            _DetFrac,
            _M0_safe,
            _decay_rate,
            NUM_LIVING,
            NUM_DEAD,
        )
    except (IndexError, ValueError):
        # Fallback: rich debug information and re-raise for inspection
        logger.error(
            "ERROR in detritus kernel: QQ.shape=%s DetFrac.shape=%s Unassim.shape=%s "
            "NUM_LIVING=%s NUM_DEAD=%s BB.shape=%s params_keys_sample=%s",
            getattr(QQ, "shape", type(QQ)),
            getattr(_DetFrac, "shape", type(_DetFrac)),
            getattr(Unassim, "shape", type(Unassim)),
            NUM_LIVING,
            NUM_DEAD,
            getattr(BB, "shape", type(BB)),
            list(params.keys())[:10],
        )
        raise

    # Post-kernel detritus instrumentation / debug logging
    for d in range(NUM_LIVING + 1, NUM_LIVING + NUM_DEAD + 1):
        det_idx = d - NUM_LIVING
        try:
            logger.debug(
                "DEBUG DetFrac ndim=%s shape=%s NUM_LIVING=%s NUM_DEAD=%s d=%s det_idx=%s",
                _DetFrac.ndim,
                _DetFrac.shape,
                NUM_LIVING,
                NUM_DEAD,
                d,
                det_idx,
            )
        except (TypeError, ValueError, AttributeError):
            logger.debug("DEBUG DetFrac: unable to inspect shape/ndim")

        try:
            logger.debug(
                "TRACE DETRITUS d=%s det_idx=%s deriv=%.12e",
                d,
                det_idx,
                deriv[d],
            )
        except Exception as e:
            logger.debug("Detritus debug error: %s", e)

        # Instrumentation: print per-pred and per-grp contributions when requested
        try:
            if instrument_set and (d - 1) in instrument_set:
                logger.debug(
                    "INSTR DETRITUS d=%s det_idx=%s -- per-pred unas contributions:",
                    d,
                    det_idx,
                )
                for pred in range(1, NUM_LIVING + 1):
                    total_consump = total_consump_by_pred[pred - 1]
                    contrib = (
                        total_consump
                        * Unassim[pred]
                        * (
                            _DetFrac[pred, det_idx]
                            if _DetFrac.shape[1] > det_idx
                            else 0
                        )
                    )
                    if contrib != 0:
                        pname = spname_list[pred] if spname_list is not None else None
                        logger.debug(
                            "  pred=%s name=%s total_consump=%.12e unassim=%.12e DetFrac=%.12e contrib=%.12e",
                            pred,
                            pname,
                            total_consump,
                            Unassim[pred],
                            _DetFrac[pred, det_idx],
                            contrib,
                        )

                logger.debug(
                    "INSTR DETRITUS d=%s det_idx=%s -- per-grp mort contributions:",
                    d,
                    det_idx,
                )
                _m0_vals = M0_arr if M0_arr is not None else np.zeros(NUM_GROUPS + 1)
                for grp in range(1, NUM_LIVING + 1):
                    contrib = (
                        _m0_vals[grp]
                        * BB[grp]
                        * (_DetFrac[grp, det_idx] if _DetFrac.shape[1] > det_idx else 0)
                    )
                    if contrib != 0:
                        gname = spname_list[grp] if spname_list is not None else None
                        logger.debug(
                            "  grp=%s name=%s M0=%.12e BB=%.12e DetFrac=%.12e contrib=%.12e",
                            grp,
                            gname,
                            _m0_vals[grp],
                            BB[grp],
                            _DetFrac[grp, det_idx],
                            contrib,
                        )
        except Exception as e:
            logger.debug("Detritus instrumentation error: %s", e)

    # Zero derivatives for NoIntegrate (fast-turnover) groups to enforce algebraic equilibrium
    try:
        # NoIntegrate: Rpath encodes fast-turnover groups as 0. Treat 0 as True for NoIntegrate
        # NoIntegrate uses 1 to indicate fast-turnover groups in params (1 = NoIntegrate)
        no_integrate = (
            np.asarray(
                _NoIntegrate_raw
                if _NoIntegrate_raw is not None
                else np.zeros(NUM_GROUPS + 1)
            )
            != 0
        )
        if np.any(no_integrate):
            deriv[no_integrate] = 0.0
    except (TypeError, ValueError, IndexError):
        pass

    return deriv

integrate_rk4

integrate_rk4(state: ndarray, params: dict, forcing: dict, fishing: dict, dt: float) -> np.ndarray

Runge-Kutta 4th order integration step.

Parameters:

Name Type Description Default
state ndarray

Current state vector

required
params dict

Model parameters

required
forcing dict

Forcing arrays

required
fishing dict

Fishing parameters

required
dt float

Time step

required

Returns:

Type Description
ndarray

Updated state vector

Source code in pypath/core/ecosim_deriv.py
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
def integrate_rk4(
    state: np.ndarray, params: dict, forcing: dict, fishing: dict, dt: float
) -> np.ndarray:
    """
    Runge-Kutta 4th order integration step.

    Parameters
    ----------
    state : np.ndarray
        Current state vector
    params : dict
        Model parameters
    forcing : dict
        Forcing arrays
    fishing : dict
        Fishing parameters
    dt : float
        Time step

    Returns
    -------
    np.ndarray
        Updated state vector
    """
    k1 = deriv_vector(state, params, forcing, fishing)
    k2 = deriv_vector(state + 0.5 * dt * k1, params, forcing, fishing)
    k3 = deriv_vector(state + 0.5 * dt * k2, params, forcing, fishing)
    k4 = deriv_vector(state + dt * k3, params, forcing, fishing)

    new_state = state + (dt / 6.0) * (k1 + 2 * k2 + 2 * k3 + k4)

    # Ensure non-negative biomass
    new_state = np.maximum(new_state, 0.0)

    # Enforce NoIntegrate groups stay at baseline (if provided in params)
    try:
        # NoIntegrate uses 1 to indicate fast-turnover groups in params (1 = NoIntegrate)
        no_integrate = (
            np.asarray(params.get("NoIntegrate", np.zeros(len(new_state)))) != 0
        )
        if np.any(no_integrate):
            Bbase = params.get("Bbase")
            if Bbase is not None:
                new_state[no_integrate] = Bbase[no_integrate]
    except (TypeError, ValueError, IndexError):
        pass

    # Instrumentation: allow callers to obtain compact RK4 stage diagnostics via
    # params.instrument_callback (similar to AB instrumentation). Compute per-stage
    # QQ totals for requested groups and call the callback with a small payload.
    try:
        instr_groups = params.get("INSTRUMENT_GROUPS", None)
        cb = params.get("instrument_callback", None)
        if cb is None:
            cb = globals().get("_last_instrument_callback", None)
        if instr_groups is not None and cb is not None:
            # Resolve numeric or named groups to 0-based indices (reuse AB logic)
            idxs = set()
            spname = params.get("spname", None)
            if isinstance(instr_groups, (list, tuple)) and all(
                isinstance(x, (int, np.integer)) for x in instr_groups
            ):
                nums = [int(x) for x in instr_groups]
                max_idx = len(state) - 1
                try:
                    if (
                        nums
                        and any(v > max_idx for v in nums)
                        and all(1 <= v <= max_idx + 1 for v in nums)
                    ):
                        nums = [v - 1 for v in nums]
                except (TypeError, ValueError):
                    pass
                idxs.update(int(x) for x in nums)
            else:
                # Prefer mapping via model DataFrame when available for stable
                # group ordering; otherwise fallback to spname mapping.
                model_df = params.get("model", None)
                for g in instr_groups:
                    if isinstance(g, str):
                        if (
                            model_df is not None
                            and hasattr(model_df, "columns")
                            and "Group" in model_df.columns
                        ):
                            groups_list = list(model_df["Group"])
                            if g in groups_list:
                                idxs.add(groups_list.index(g))
                                continue
                        if spname is not None and g in spname:
                            sp_idx = spname.index(g)
                            if sp_idx > 0:
                                idxs.add(sp_idx - 1)
                    else:
                        try:
                            val = int(g)
                            idxs.add(val)
                        except (TypeError, ValueError):
                            pass
            if idxs:
                max_idx = len(state) - 1
                valid_idxs = sorted(i for i in idxs if 0 <= i <= max_idx)
                if valid_idxs:
                    # Compute QQ totals for each RK4 stage for the requested groups
                    try:
                        from pypath.core.ecosim import _compute_Q_matrix

                        stages = [
                            state,
                            state + 0.5 * dt * k1,
                            state + 0.5 * dt * k2,
                            state + dt * k3,
                        ]
                        stage_totals = []
                        for st in stages:
                            QQs = _compute_Q_matrix(
                                params, st, {"Ftime": np.ones_like(st)}
                            )
                            totals = [
                                float(np.nansum(QQs[:, i + 1])) for i in valid_idxs
                            ]
                            stage_totals.append(totals)

                        parent = (
                            params.get("_integration_parent_method")
                            if isinstance(params, dict)
                            else None
                        )
                        payload_method = parent if parent is not None else "RK4"
                        # Helpful debug: when used as a warmup for another method
                        # we may want to inspect resolved group indices to ensure
                        # name->index mapping matches caller expectations.

                        payload = {
                            "method": payload_method,
                            "groups": valid_idxs,
                            "stage_consumption_totals": stage_totals,
                            "dt": float(dt),
                        }
                        # If this RK4 call is being used solely as a warmup for
                        # another integrator (e.g., AB), skip invoking the
                        # instrumentation callback here to avoid confusing
                        # caller expectations about the payload contents
                        # (AB expects 'deriv_current' which RK4-stage payloads
                        # do not provide). This keeps the first instrumentation
                        # payload relevant to AB runs as the AB payload.
                        if parent is None:
                            cb(payload)
                        else:
                            logger.debug(
                                "INSTRUMENT-TRACE: skipping RK4-stage callback when used as warmup for parent=%s",
                                parent,
                            )
                    except Exception as e:
                        logger.debug("RK4 instrumentation error: %s", e)
    except Exception as e:
        logger.debug("RK4 outer instrumentation error: %s", e)

    return new_state

integrate_ab

integrate_ab(state: ndarray, derivs_history: list, params: dict, forcing: dict, fishing: dict, dt: float) -> Tuple[np.ndarray, np.ndarray]

Adams-Bashforth integration step.

Uses 4-step Adams-Bashforth method when history is available, falls back to simpler methods with less history.

Parameters:

Name Type Description Default
state ndarray

Current state vector

required
derivs_history list

List of previous derivative vectors (most recent first)

required
params dict

Model parameters

required
forcing dict

Forcing arrays

required
fishing dict

Fishing parameters

required
dt float

Time step

required

Returns:

Type Description
Tuple[ndarray, ndarray]

Updated state vector and new derivative

Source code in pypath/core/ecosim_deriv.py
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
def integrate_ab(
    state: np.ndarray,
    derivs_history: list,
    params: dict,
    forcing: dict,
    fishing: dict,
    dt: float,
) -> Tuple[np.ndarray, np.ndarray]:
    """
    Adams-Bashforth integration step.

    Uses 4-step Adams-Bashforth method when history is available,
    falls back to simpler methods with less history.

    Parameters
    ----------
    state : np.ndarray
        Current state vector
    derivs_history : list
        List of previous derivative vectors (most recent first)
    params : dict
        Model parameters
    forcing : dict
        Forcing arrays
    fishing : dict
        Fishing parameters
    dt : float
        Time step

    Returns
    -------
    Tuple[np.ndarray, np.ndarray]
        Updated state vector and new derivative
    """
    # Calculate current derivative
    deriv_current = deriv_vector(state, params, forcing, fishing)
    deriv_current = _sanitize_deriv(deriv_current)

    n_history = len(derivs_history)

    if n_history >= 3:
        # 4-step Adams-Bashforth
        # y_{n+1} = y_n + dt/24 * (55*f_n - 59*f_{n-1} + 37*f_{n-2} - 9*f_{n-3})
        coef = np.array([55, -59, 37, -9]) / 24.0
        delta = coef[0] * deriv_current
        for i, c in enumerate(coef[1:]):
            if i < len(derivs_history):
                delta += c * _sanitize_deriv(np.asarray(derivs_history[i]))
        new_state = state + dt * delta
    elif n_history >= 2:
        # 3-step Adams-Bashforth
        coef = np.array([23, -16, 5]) / 12.0
        delta = (
            coef[0] * deriv_current
            + coef[1] * _sanitize_deriv(np.asarray(derivs_history[0]))
            + coef[2] * _sanitize_deriv(np.asarray(derivs_history[1]))
        )
        new_state = state + dt * delta
    elif n_history >= 1:
        # 2-step Adams-Bashforth
        coef = np.array([3, -1]) / 2.0
        delta = coef[0] * deriv_current + coef[1] * _sanitize_deriv(
            np.asarray(derivs_history[0])
        )
        new_state = state + dt * delta
    else:
        # Euler method
        new_state = state + dt * deriv_current

    # Prevent extreme relative jumps that indicate instability
    # Cap relative change per step to avoid runaway in Adams-Bashforth
    eps = 1e-12
    min_ratio = 1e-6
    max_ratio = 10.0
    ratios = new_state / np.where(state == 0, eps, state)
    ratios = np.nan_to_num(ratios, nan=1.0, posinf=max_ratio, neginf=0.0)
    ratios = np.clip(ratios, min_ratio, max_ratio)
    new_state = state * ratios

    # Ensure non-negative biomass
    new_state = np.maximum(new_state, 0.0)

    # Enforce NoIntegrate groups stay at baseline (if provided in params)
    try:
        # NoIntegrate uses 1 to indicate fast-turnover groups in params (1 = NoIntegrate)
        no_integrate = (
            np.asarray(params.get("NoIntegrate", np.zeros(len(new_state)))) != 0
        )
        if np.any(no_integrate):
            Bbase = params.get("Bbase")
            if Bbase is not None:
                new_state[no_integrate] = Bbase[no_integrate]
                deriv_current[no_integrate] = 0.0
    except (TypeError, ValueError, IndexError):
        pass

    # Instrumentation callback: if caller requested group-level instrumentation
    # (e.g., params.INSTRUMENT_GROUPS = ['Macrobenthos'] and provided
    # params.instrument_callback callable), call the callback with compact
    # numeric arrays to allow unit tests / debugging harnesses to inspect
    # intermediate AB behavior without parsing verbose logs.
    try:
        instr_groups = params.get("INSTRUMENT_GROUPS", None)
        # Prefer the original attribute-based INSTRUMENT_GROUPS (exported by rsim_run)
        # if present; this helps in cases where the params dict has been mutated
        # during warmup or other computations.
        # Ensure NUM_GROUPS is available for legacy numeric instrument group checks
        NUM_GROUPS = params.get("NUM_GROUPS", None)
        try:
            attr_ig = globals().get("_last_instrument_groups", None)
            if attr_ig is not None:
                # If attr_ig differs from the dict value, prefer the attribute
                # (it represents the caller's original intention).
                if instr_groups is None or instr_groups != attr_ig:
                    # If the attribute appears to be a numeric legacy 1-based
                    # list, convert it aggressively here so caller intent is
                    # preserved and a DeprecationWarning is emitted.
                    try:
                        if isinstance(attr_ig, (list, tuple)) and all(
                            isinstance(x, (int, float, np.integer)) for x in attr_ig
                        ):
                            nums = [int(x) for x in attr_ig]
                            # Convert numeric 1-based indices only when caller explicitly
                            # opts in via INSTRUMENT_ASSUME_1BASED or when numbers exceed
                            # the valid 0-based range but are within plausible 1-based
                            # bounds (1..NUM_GROUPS).
                            assume_flag = params.get("INSTRUMENT_ASSUME_1BASED", False)
                            if nums and (
                                assume_flag
                                or (
                                    any(v > NUM_GROUPS - 1 for v in nums)
                                    and all(1 <= v <= NUM_GROUPS for v in nums)
                                )
                            ):
                                import warnings as _warnings

                                _warnings.warn(
                                    "Numeric INSTRUMENT_GROUPS indices are expected to be 0-based. "
                                    "Detected probable 1-based indices — converting to 0-based for now. "
                                    "Please update your code to use 0-based indices.",
                                    DeprecationWarning,
                                    stacklevel=3,
                                )
                                nums = [v - 1 for v in nums]
                                instr_groups = nums
                                # write back normalization to params dict/attr if possible
                                try:
                                    params["INSTRUMENT_GROUPS"] = instr_groups
                                except (TypeError, KeyError):
                                    try:
                                        setattr(
                                            params, "INSTRUMENT_GROUPS", instr_groups
                                        )
                                    except (TypeError, AttributeError):
                                        pass
                            else:
                                instr_groups = attr_ig
                        else:
                            instr_groups = attr_ig
                    except (TypeError, ValueError):
                        instr_groups = attr_ig
        except (TypeError, ValueError):
            pass

        # Resolve instrumentation callback: prefer per-call params dict value, fallback
        # to module-level last-known callback (set by rsim_run) to handle callsites
        # that attach the callback as an attribute on the params object instead
        # of the params dict (legacy code paths).
        cb = params.get("instrument_callback", None)
        if cb is None:
            # Module-level fallback (set by rsim_run if available)
            try:
                cb = globals().get("_last_instrument_callback", None)
                if cb is not None:
                    logger.debug("INSTRUMENT: using module-level fallback callback")
            except (TypeError, AttributeError):
                cb = None
        # Print debug info without referencing undefined symbols
        try:
            logger.debug(
                "INSTRUMENT-DEBUG: instr_groups=%s cb_present=%s cb=%s",
                instr_groups,
                cb is not None,
                cb,
            )
        except (TypeError, ValueError):
            pass
        # Only proceed if caller requested instrumentation via instr_groups
        # and a callback is available.
        if instr_groups is not None and cb is not None:
            # Prefer a pre-normalized numeric list (0-based indices) when provided
            idxs = set()
            spname = params.get("spname", None)
            # If instr_groups is a list of numeric indices (possibly normalized),
            # use them directly; otherwise try to resolve names to indices.
            try:
                # treat as numeric list when all elements are ints
                if isinstance(instr_groups, (list, tuple)) and all(
                    isinstance(x, (int, np.integer)) for x in instr_groups
                ):
                    # Detailed tracing for numeric-based instrument group resolution
                    nums = [int(x) for x in instr_groups]
                    max_idx = len(state) - 1
                    try:
                        logger.debug(
                            "INSTRUMENT-TRACE: before conversion nums=%s max_idx=%s instr_groups_id=%s params_has=%s _last_instrument_groups=%s",
                            nums,
                            max_idx,
                            id(instr_groups),
                            (
                                "INSTRUMENT_GROUPS" in params
                                if isinstance(params, dict)
                                else hasattr(params, "INSTRUMENT_GROUPS")
                            ),
                            globals().get("_last_instrument_groups", None),
                        )
                    except (TypeError, ValueError):
                        pass

                    # Avoid double-conversion: assume numeric lists are already 0-based
                    # unless they contain values outside the valid 0-based range.
                    # Only convert if some values exceed the max 0-based index but are
                    # within the plausible 1-based range (1..max_idx+1).
                    try:
                        if (
                            nums
                            and any(v > max_idx for v in nums)
                            and all(1 <= v <= max_idx + 1 for v in nums)
                        ):
                            import warnings as _warnings

                            logger.debug(
                                "INSTRUMENT-TRACE: detected probable 1-based numeric indices %s; converting to 0-based",
                                nums,
                            )
                            _warnings.warn(
                                "Numeric INSTRUMENT_GROUPS indices are expected to be 0-based. "
                                "Detected probable 1-based indices — converting to 0-based for now. "
                                "Please update your code to use 0-based indices.",
                                DeprecationWarning,
                                stacklevel=3,
                            )
                            nums = [v - 1 for v in nums]
                    except (TypeError, ValueError):
                        pass

                    try:
                        logger.debug(
                            "INSTRUMENT-TRACE: after conversion (or no conversion) nums=%s",
                            nums,
                        )
                    except (TypeError, ValueError):
                        pass

                    # Update idxs with the resolved numeric values (assume normalized unless converted above)
                    idxs.update(int(x) for x in nums)
                    try:
                        logger.debug(
                            "INSTRUMENT-TRACE: idxs updated -> %s (raw), params['INSTRUMENT_GROUPS']=%s",
                            sorted(idxs),
                            (
                                params.get("INSTRUMENT_GROUPS", None)
                                if isinstance(params, dict)
                                else getattr(params, "INSTRUMENT_GROUPS", None)
                            ),
                        )
                    except (TypeError, ValueError):
                        pass
                else:
                    model_df = params.get("model", None)
                    for g in instr_groups:
                        if isinstance(g, str):
                            # Prefer model-defined ordering when available
                            if (
                                model_df is not None
                                and hasattr(model_df, "columns")
                                and "Group" in model_df.columns
                            ):
                                groups_list = list(model_df["Group"])
                                if g in groups_list:
                                    idxs.add(groups_list.index(g))
                                    continue
                            if spname is not None and g in spname:
                                sp_idx = spname.index(g)
                                if sp_idx > 0:
                                    idxs.add(sp_idx - 1)
                        else:
                            try:
                                val = int(g)
                                idxs.add(val)
                            except (TypeError, ValueError):
                                pass
            except (TypeError, ValueError):
                # Best-effort: if resolution fails, leave idxs empty
                idxs = set()
            # Filter indices to valid range and sort
            if idxs:
                max_idx = len(state) - 1
                valid_idxs = sorted(i for i in idxs if 0 <= i <= max_idx)

                # If we exported caller attribute INSTRUMENT_GROUPS earlier, use it
                # only as a fallback when dict-derived resolution failed. This avoids
                # preferring older caller attribute values that may be legacy 1-based
                # and lead to conflicting normalization choices.
                try:
                    attr_ig = globals().get("_last_instrument_groups", None)
                    if attr_ig is not None:
                        alt_idxs = set()
                        # Resolve attribute-provided groups similarly to dict ones
                        if isinstance(attr_ig, (list, tuple)):
                            if all(isinstance(x, (int, np.integer)) for x in attr_ig):
                                nums = [int(x) for x in attr_ig]
                                # Only convert attribute-provided numeric 1-based indices
                                # when caller explicitly opts in via INSTRUMENT_ASSUME_1BASED
                                if params.get("INSTRUMENT_ASSUME_1BASED", False):
                                    if (
                                        nums
                                        and any(v > max_idx for v in nums)
                                        and all(1 <= v <= max_idx + 1 for v in nums)
                                    ):
                                        import warnings as _warnings

                                        _warnings.warn(
                                            "Numeric INSTRUMENT_GROUPS indices are expected to be 0-based. "
                                            "Detected probable 1-based indices — converting to 0-based for now. "
                                            "Please update your code to use 0-based indices.",
                                            DeprecationWarning,
                                            stacklevel=3,
                                        )
                                        nums = [v - 1 for v in nums]
                                alt_idxs.update(int(x) for x in nums)
                            else:
                                for g in attr_ig:
                                    if (
                                        isinstance(g, str)
                                        and spname is not None
                                        and g in spname
                                    ):
                                        sp_idx = spname.index(g)
                                        if sp_idx > 0:
                                            alt_idxs.add(sp_idx - 1)
                                    else:
                                        try:
                                            val = int(g)
                                            alt_idxs.add(val)
                                        except (TypeError, ValueError):
                                            pass
                        elif isinstance(attr_ig, str) and spname is not None:
                            if attr_ig in spname:
                                sp_idx = spname.index(attr_ig)
                                if sp_idx > 0:
                                    alt_idxs.add(sp_idx - 1)

                        # Prefer attribute-derived indices when available (it represents
                        # the caller's original intent), falling back to dict-derived
                        # resolution only when attribute resolution fails.
                        alt_valid = sorted(i for i in alt_idxs if 0 <= i <= max_idx)
                        if alt_valid:
                            logger.debug(
                                "INSTRUMENT-TRACE: preferring attr_ig alt_valid=%s over dict-derived valid_idxs=%s",
                                alt_valid,
                                valid_idxs,
                            )
                            valid_idxs = alt_valid
                            # Also write back the normalized groups into params when possible
                            try:
                                normalized = list(valid_idxs)
                                try:
                                    params["INSTRUMENT_GROUPS"] = normalized
                                except (TypeError, KeyError):
                                    try:
                                        setattr(params, "INSTRUMENT_GROUPS", normalized)
                                    except (TypeError, AttributeError):
                                        pass
                                logger.debug(
                                    "INSTRUMENT-TRACE: wrote normalized attr_ig back to params: %s",
                                    normalized,
                                )
                            except (TypeError, ValueError):
                                pass
                except (TypeError, ValueError):
                    pass
                if valid_idxs:
                    idx_list = valid_idxs
                    # Collect history for these groups (may be empty)
                    hist = [np.asarray(h)[idx_list].tolist() for h in derivs_history]
                    payload = {
                        "method": "AB",
                        "groups": idx_list,
                        "deriv_current": np.asarray(deriv_current)[idx_list].tolist(),
                        "derivs_history": hist,
                        "new_state": np.asarray(new_state)[idx_list].tolist(),
                        "dt": float(dt),
                    }
                try:
                    try:
                        if (
                            isinstance(params, dict)
                            and params.get("VERBOSE_INSTRUMENTATION")
                        ) or getattr(params, "VERBOSE_INSTRUMENTATION", False):
                            logger.debug(
                                "INSTRUMENT-TRACE-PAYLOAD: idx_list=%s state_len=%s deriv_slice=%s new_state_slice=%s cb=%s params_INSTRUMENT_GROUPS=%s _last_instrument_groups=%s",
                                idx_list,
                                len(state),
                                np.asarray(deriv_current)[idx_list].tolist(),
                                np.asarray(new_state)[idx_list].tolist(),
                                cb,
                                (
                                    params.get("INSTRUMENT_GROUPS", None)
                                    if isinstance(params, dict)
                                    else getattr(params, "INSTRUMENT_GROUPS", None)
                                ),
                                globals().get("_last_instrument_groups", None),
                            )
                    except (TypeError, ValueError):
                        pass
                    logger.debug("INSTRUMENT: calling callback groups=%s", idx_list)
                    cb(payload)
                except Exception as e:
                    # Don't allow instrumentation failures to break integration
                    logger.debug("Instrumentation callback failed: %s", e)
    except Exception as e:
        logger.debug("AB outer instrumentation error: %s", e)

    return new_state, deriv_current

run_ecosim

run_ecosim(initial_state: ndarray, params: dict, forcing: dict, fishing: dict, years: float, dt: float = 1 / 12, method: str = 'ab', save_interval: int = 1) -> dict

Run Ecosim simulation.

Parameters:

Name Type Description Default
initial_state ndarray

Initial biomass vector

required
params dict

Model parameters

required
forcing dict

Forcing arrays

required
fishing dict

Fishing parameters

required
years float

Number of years to simulate

required
dt float

Time step (fraction of year)

1 / 12
method str

Integration method ('rk4' or 'ab')

'ab'
save_interval int

Save state every N steps

1

Returns:

Type Description
dict

Results containing: - time: Time points - biomass: Biomass time series [time, group] - catch: Catch time series [time, group]

Source code in pypath/core/ecosim_deriv.py
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
def run_ecosim(
    initial_state: np.ndarray,
    params: dict,
    forcing: dict,
    fishing: dict,
    years: float,
    dt: float = 1 / 12,  # Monthly time step
    method: str = "ab",  # 'rk4' or 'ab'
    save_interval: int = 1,
) -> dict:
    """
    Run Ecosim simulation.

    Parameters
    ----------
    initial_state : np.ndarray
        Initial biomass vector
    params : dict
        Model parameters
    forcing : dict
        Forcing arrays
    fishing : dict
        Fishing parameters
    years : float
        Number of years to simulate
    dt : float
        Time step (fraction of year)
    method : str
        Integration method ('rk4' or 'ab')
    save_interval : int
        Save state every N steps

    Returns
    -------
    dict
        Results containing:
        - time: Time points
        - biomass: Biomass time series [time, group]
        - catch: Catch time series [time, group]
    """
    n_steps = int(years / dt)
    n_groups = len(initial_state)

    # Initialize output arrays
    save_times = list(range(0, n_steps + 1, save_interval))
    n_saves = len(save_times)

    time_out = np.zeros(n_saves)
    biomass_out = np.zeros((n_saves, n_groups))

    # Initialize state
    state = initial_state.copy()
    derivs_history = []  # For Adams-Bashforth

    # Save initial state
    save_idx = 0
    time_out[save_idx] = 0.0
    biomass_out[save_idx] = state
    save_idx += 1

    # Main integration loop
    for step in range(1, n_steps + 1):
        t = step * dt

        # Update forcing for current time if time-varying
        # (This would interpolate forcing arrays to current time)

        if method == "rk4":
            state = integrate_rk4(state, params, forcing, fishing, dt)
        else:  # Adams-Bashforth
            state, new_deriv = integrate_ab(
                state, derivs_history, params, forcing, fishing, dt
            )
            # Update history (keep last 3)
            derivs_history.insert(0, new_deriv)
            if len(derivs_history) > 3:
                derivs_history.pop()

        # Save if at save interval
        if step in save_times:
            time_out[save_idx] = t
            biomass_out[save_idx] = state
            save_idx += 1

    return {
        "time": time_out,
        "biomass": biomass_out,
        "years": years,
        "dt": dt,
        "method": method,
    }

Stanzas (Multi-Stanza Groups)

pypath.core.stanzas

Multi-stanza (age-structured) groups for PyPath.

This module implements age-structured population dynamics using Von Bertalanffy growth and stage-based mortality rates.

Based on Rpath's rpath.stanzas() and rsim.stanzas() functions.

EcosimStanzaParams dataclass

Container for all multi-stanza parameters.

Attributes: n_stanza_groups: Number of stanza groups stanza_groups: List of StanzaGroup objects stanza_individuals: List of StanzaIndividual objects st_groups: DataFrame with stanza calculations per age

Source code in pypath/core/stanzas.py
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
@dataclass
class EcosimStanzaParams:
    """Container for all multi-stanza parameters.

    Attributes:
        n_stanza_groups: Number of stanza groups
        stanza_groups: List of StanzaGroup objects
        stanza_individuals: List of StanzaIndividual objects
        st_groups: DataFrame with stanza calculations per age
    """

    n_stanza_groups: int = 0
    stanza_groups: List[StanzaGroup] = field(default_factory=list)
    stanza_individuals: List[StanzaIndividual] = field(default_factory=list)
    st_groups: Dict[int, pd.DataFrame] = field(default_factory=dict)

RsimStanzas dataclass

Stanza parameters for Ecosim simulation.

Contains age-structured dynamics parameters needed by the simulation engine.

Source code in pypath/core/stanzas.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
@dataclass
class RsimStanzas:
    """Stanza parameters for Ecosim simulation.

    Contains age-structured dynamics parameters needed by
    the simulation engine.
    """

    n_split: int = 0
    n_stanzas: np.ndarray = field(default_factory=lambda: np.array([0]))
    ecopath_code: np.ndarray = field(default_factory=lambda: np.zeros((2, 2)))
    age1: np.ndarray = field(default_factory=lambda: np.zeros((2, 2)))
    age2: np.ndarray = field(default_factory=lambda: np.zeros((2, 2)))

    # Age-at-size arrays (rows=months, cols=species)
    base_wage_s: np.ndarray = field(default_factory=lambda: np.zeros((2, 2)))
    base_nage_s: np.ndarray = field(default_factory=lambda: np.zeros((2, 2)))
    base_qage_s: np.ndarray = field(default_factory=lambda: np.zeros((2, 2)))

    # Maturity and recruitment
    wmat: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    rec_power: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    recruits: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    vbgf_d: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    r_zero_s: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    vbm: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))

    # Growth coefficients
    split_alpha: np.ndarray = field(default_factory=lambda: np.zeros((2, 2)))

    # Spawning
    spawn_x: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    spawn_energy: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    base_eggs_stanza: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    base_spawn_bio: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    r_scale_split: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))
    base_stanza_pred: np.ndarray = field(default_factory=lambda: np.array([0.0, 0.0]))

StanzaGroup dataclass

Parameters for a single multi-stanza species group.

Attributes: stanza_group_num: Index of this stanza group (1-based) n_stanzas: Number of age stanzas in this group vbgf_ksp: Von Bertalanffy K parameter (annual) vbgf_d: Von Bertalanffy d parameter (default 2/3) wmat: Weight at 50% maturity relative to Winf bab: Biomass accumulation / background mortality rec_power: Recruitment power parameter recruits: Base number of recruits (R) last_month: Final month of the oldest age class

Source code in pypath/core/stanzas.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
@dataclass
class StanzaGroup:
    """Parameters for a single multi-stanza species group.

    Attributes:
        stanza_group_num: Index of this stanza group (1-based)
        n_stanzas: Number of age stanzas in this group
        vbgf_ksp: Von Bertalanffy K parameter (annual)
        vbgf_d: Von Bertalanffy d parameter (default 2/3)
        wmat: Weight at 50% maturity relative to Winf
        bab: Biomass accumulation / background mortality
        rec_power: Recruitment power parameter
        recruits: Base number of recruits (R)
        last_month: Final month of the oldest age class
    """

    stanza_group_num: int
    n_stanzas: int
    vbgf_ksp: float
    vbgf_d: float = 0.66667
    wmat: float = 0.0
    bab: float = 0.0
    rec_power: float = 1.0
    recruits: float = 0.0
    last_month: int = 0

StanzaIndividual dataclass

Parameters for an individual stanza (age class) within a group.

Attributes: stanza_group_num: Index of parent stanza group stanza_num: Index of this stanza within group (1-based) group_num: Ecopath group number for this stanza group_name: Name of this stanza group in model first: First month of this age class last: Last month of this age class z: Total mortality rate (annual) leading: True if this is the leading (reference) stanza biomass: Calculated biomass qb: Calculated Q/B

Source code in pypath/core/stanzas.py
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
@dataclass
class StanzaIndividual:
    """Parameters for an individual stanza (age class) within a group.

    Attributes:
        stanza_group_num: Index of parent stanza group
        stanza_num: Index of this stanza within group (1-based)
        group_num: Ecopath group number for this stanza
        group_name: Name of this stanza group in model
        first: First month of this age class
        last: Last month of this age class
        z: Total mortality rate (annual)
        leading: True if this is the leading (reference) stanza
        biomass: Calculated biomass
        qb: Calculated Q/B
    """

    stanza_group_num: int
    stanza_num: int
    group_num: int
    group_name: str
    first: int
    last: int
    z: float
    leading: bool = False
    biomass: float = 0.0
    qb: float = 0.0

calculate_survival

calculate_survival(z_by_month: ndarray, bab: float = 0.0) -> np.ndarray

Calculate cumulative survival to each age.

Parameters:

Name Type Description Default
z_by_month ndarray

Monthly mortality rate for each month

required
bab float

Background/accumulation mortality rate (annual)

0.0

Returns:

Type Description
ndarray

Cumulative survival probability to each age

Source code in pypath/core/stanzas.py
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
def calculate_survival(z_by_month: np.ndarray, bab: float = 0.0) -> np.ndarray:
    """Calculate cumulative survival to each age.

    Parameters
    ----------
    z_by_month : np.ndarray
        Monthly mortality rate for each month
    bab : float
        Background/accumulation mortality rate (annual)

    Returns
    -------
    np.ndarray
        Cumulative survival probability to each age
    """
    monthly_z = (z_by_month + bab) / 12.0
    monthly_survival = np.exp(-monthly_z)
    # Shift survival - first month survival is 1.0
    monthly_survival = np.concatenate([[1.0], monthly_survival[:-1]])
    return np.cumprod(monthly_survival)

create_stanza_params

create_stanza_params(groups: List[Dict[str, Any]], individuals: List[Dict[str, Any]]) -> EcosimStanzaParams

Create EcosimStanzaParams from dictionaries.

Convenience function to create stanza parameters from dictionary inputs.

Parameters:

Name Type Description Default
groups List[Dict[str, Any]]

List of dictionaries with stanza group parameters. Required keys: stanza_group_num, n_stanzas, vbgf_ksp. Optional keys: vbgf_d, wmat, bab, rec_power.

required
individuals List[Dict[str, Any]]

List of dictionaries with individual stanza parameters. Required keys: stanza_group_num, stanza_num, group_num, group_name, first, last, z. Optional keys: leading.

required

Returns:

Type Description
EcosimStanzaParams

EcosimStanzaParams object

Examples:

>>> groups = [{'stanza_group_num': 1, 'n_stanzas': 2, 'vbgf_ksp': 0.3}]
>>> individuals = [
...     {'stanza_group_num': 1, 'stanza_num': 1, 'group_num': 1,
...      'group_name': 'Fish_juv', 'first': 0, 'last': 11,
...      'z': 1.5, 'leading': False},
...     {'stanza_group_num': 1, 'stanza_num': 2, 'group_num': 2,
...      'group_name': 'Fish_adult', 'first': 12, 'last': 60,
...      'z': 0.5, 'leading': True}
... ]
>>> params = create_stanza_params(groups, individuals)
Source code in pypath/core/stanzas.py
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
def create_stanza_params(
    groups: List[Dict[str, Any]], individuals: List[Dict[str, Any]]
) -> EcosimStanzaParams:
    """Create EcosimStanzaParams from dictionaries.

    Convenience function to create stanza parameters from
    dictionary inputs.

    Parameters
    ----------
    groups : List[Dict[str, Any]]
        List of dictionaries with stanza group parameters.
        Required keys: stanza_group_num, n_stanzas, vbgf_ksp.
        Optional keys: vbgf_d, wmat, bab, rec_power.
    individuals : List[Dict[str, Any]]
        List of dictionaries with individual stanza parameters.
        Required keys: stanza_group_num, stanza_num, group_num,
        group_name, first, last, z.
        Optional keys: leading.

    Returns
    -------
    EcosimStanzaParams
        EcosimStanzaParams object

    Examples
    --------
        >>> groups = [{'stanza_group_num': 1, 'n_stanzas': 2, 'vbgf_ksp': 0.3}]
        >>> individuals = [
        ...     {'stanza_group_num': 1, 'stanza_num': 1, 'group_num': 1,
        ...      'group_name': 'Fish_juv', 'first': 0, 'last': 11,
        ...      'z': 1.5, 'leading': False},
        ...     {'stanza_group_num': 1, 'stanza_num': 2, 'group_num': 2,
        ...      'group_name': 'Fish_adult', 'first': 12, 'last': 60,
        ...      'z': 0.5, 'leading': True}
        ... ]
        >>> params = create_stanza_params(groups, individuals)
    """
    stanza_groups = []
    for g in groups:
        sg = StanzaGroup(
            stanza_group_num=g["stanza_group_num"],
            n_stanzas=g["n_stanzas"],
            vbgf_ksp=g["vbgf_ksp"],
            vbgf_d=g.get("vbgf_d", 0.66667),
            wmat=g.get("wmat", 0.0),
            bab=g.get("bab", 0.0),
            rec_power=g.get("rec_power", 1.0),
        )
        stanza_groups.append(sg)

    stanza_individuals = []
    for ind in individuals:
        si = StanzaIndividual(
            stanza_group_num=ind["stanza_group_num"],
            stanza_num=ind["stanza_num"],
            group_num=ind["group_num"],
            group_name=ind["group_name"],
            first=ind["first"],
            last=ind["last"],
            z=ind["z"],
            leading=ind.get("leading", False),
        )
        stanza_individuals.append(si)

    return EcosimStanzaParams(
        n_stanza_groups=len(groups),
        stanza_groups=stanza_groups,
        stanza_individuals=stanza_individuals,
    )

rpath_stanzas

rpath_stanzas(rpath_params: RpathParams) -> RpathParams

Calculate biomass and consumption for multi-stanza groups.

Uses the leading stanza to calculate biomass and consumption of trailing stanzas necessary to support the leading stanza.

This implements Von Bertalanffy growth to distribute biomass across age classes based on the leading stanza's biomass.

Parameters:

Name Type Description Default
rpath_params RpathParams

RpathParams object with stanza information

required

Returns:

Type Description
RpathParams

Updated RpathParams with calculated stanza biomass and Q/B

Source code in pypath/core/stanzas.py
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
def rpath_stanzas(rpath_params: RpathParams) -> RpathParams:
    """Calculate biomass and consumption for multi-stanza groups.

    Uses the leading stanza to calculate biomass and consumption
    of trailing stanzas necessary to support the leading stanza.

    This implements Von Bertalanffy growth to distribute biomass
    across age classes based on the leading stanza's biomass.

    Parameters
    ----------
    rpath_params : RpathParams
        RpathParams object with stanza information

    Returns
    -------
    RpathParams
        Updated RpathParams with calculated stanza biomass and Q/B
    """
    # Check if stanzas exist
    if rpath_params.stanzas is None:
        return rpath_params

    stanza_params = rpath_params.stanzas
    if stanza_params.n_stanza_groups == 0:
        return rpath_params

    n_split = stanza_params.n_stanza_groups

    # Process each stanza group
    for isp in range(n_split):
        stanza_group = stanza_params.stanza_groups[isp]

        # Get stanzas for this group
        group_stanzas = [
            s for s in stanza_params.stanza_individuals if s.stanza_group_num == isp + 1
        ]
        group_stanzas.sort(key=lambda x: x.stanza_num)

        _n_stanzas = len(group_stanzas)

        # Find the leading stanza
        leading_stanza = None
        for st in group_stanzas:
            if st.leading:
                leading_stanza = st
                break

        if leading_stanza is None:
            raise ValueError(f"No leading stanza found for stanza group {isp + 1}")

        # Calculate last month using biomass accumulation method
        # This finds the age at which 99.999% of cumulative biomass is reached
        st_max = group_stanzas[-1]

        # Get growth parameters
        k_monthly = (stanza_group.vbgf_ksp * 3) / 12.0
        d = stanza_group.vbgf_d
        bab = stanza_group.bab

        # Calculate out to a very long time (5999 months = ~500 years)
        ages = np.arange(st_max.first, 6000)
        monthly_z = (st_max.z + bab) / 12.0

        # Survival and biomass
        nn = np.cumprod(
            np.concatenate([[1.0], np.exp(-monthly_z * np.ones(len(ages) - 1))])
        )
        bb = nn * von_bertalanffy_weight(ages, k_monthly, d)

        # Cumulative biomass fraction
        bb_cum = np.cumsum(bb) / np.sum(bb)

        # Find age at 99.999% cumulative biomass
        idx = np.argmax(bb_cum > 0.99999)
        if idx == 0 and bb_cum[0] <= 0.99999:
            idx = len(ages) - 1
        last_month = int(np.ceil((ages[idx] + 1) / 12.0) * 12 - 1)

        stanza_group.last_month = last_month

        # Update oldest stanza's last month
        group_stanzas[-1].last = last_month

        # Build age-structured table for this group
        all_ages = np.arange(group_stanzas[0].first, last_month + 1)

        st_group = pd.DataFrame(
            {
                "age": all_ages,
                "WageS": von_bertalanffy_weight(all_ages, k_monthly, d),
            }
        )
        st_group["QageS"] = von_bertalanffy_consumption(st_group["WageS"].values, d)

        # Calculate survival for each age
        # Need to assign Z by stanza
        z_by_age = np.zeros(len(all_ages))
        for st in group_stanzas:
            mask = (all_ages >= st.first) & (all_ages <= st.last)
            z_by_age[mask] = st.z

        st_group["Survive"] = calculate_survival(z_by_age, bab)

        # Biomass and consumption relative values
        st_group["B"] = st_group["Survive"] * st_group["WageS"]
        st_group["Q"] = st_group["Survive"] * st_group["QageS"]

        # Calculate relative biomass/consumption for each stanza
        for st in group_stanzas:
            mask = (st_group["age"] >= st.first) & (st_group["age"] <= st.last)
            st.bs_num = st_group.loc[mask, "B"].sum()
            st.qs_num = st_group.loc[mask, "Q"].sum()

        # Total biomass and consumption denominators
        bs_denom = sum(st.bs_num for st in group_stanzas)
        qs_denom = sum(st.qs_num for st in group_stanzas)

        # Relative fractions
        for st in group_stanzas:
            st.bs = st.bs_num / bs_denom if bs_denom > 0 else 0
            st.qs = st.qs_num / qs_denom if qs_denom > 0 else 0

        # Get leading stanza biomass from model
        leading_idx = rpath_params.model[
            rpath_params.model["Group"] == leading_stanza.group_name
        ].index[0]
        leading_biomass = rpath_params.model.loc[leading_idx, "Biomass"]
        leading_qb = rpath_params.model.loc[leading_idx, "QB"]

        # Calculate total biomass and consumption from leading
        if leading_stanza.bs > 0:
            total_biomass = leading_biomass / leading_stanza.bs
        else:
            total_biomass = leading_biomass

        if leading_stanza.qs > 0:
            total_cons = leading_qb * leading_biomass / leading_stanza.qs
        else:
            total_cons = leading_qb * leading_biomass

        # Distribute to other stanzas
        for st in group_stanzas:
            st.biomass = st.bs * total_biomass
            st.qb = (st.qs * total_cons) / st.biomass if st.biomass > 0 else 0

        # Calculate recruits (numbers at age 0)
        bio_per_egg = st_group.loc[
            (st_group["age"] >= leading_stanza.first)
            & (st_group["age"] <= leading_stanza.last),
            "B",
        ].sum()

        if bio_per_egg > 0:
            recruits = leading_biomass / bio_per_egg
        else:
            recruits = 0

        stanza_group.recruits = recruits

        # Numbers at age
        st_group["NageS"] = st_group["Survive"] * recruits

        # Store in params
        stanza_params.st_groups[isp + 1] = st_group

        # Update model DataFrame with calculated values
        for st in group_stanzas:
            model_idx = rpath_params.model[
                rpath_params.model["Group"] == st.group_name
            ].index
            if len(model_idx) > 0:
                rpath_params.model.loc[model_idx[0], "Biomass"] = st.biomass
                rpath_params.model.loc[model_idx[0], "QB"] = st.qb

    return rpath_params

rsim_stanzas

rsim_stanzas(rpath_params: RpathParams, state: ndarray, params: dict) -> RsimStanzas

Initialize stanza parameters for Ecosim simulation.

Creates the stanza parameter structure needed by rsim_run().

Parameters:

Name Type Description Default
rpath_params RpathParams

RpathParams object with stanza information

required
state ndarray

RsimState object with initial state

required
params dict

RsimParams object with simulation parameters

required

Returns:

Type Description
RsimStanzas

RsimStanzas object with simulation parameters

Source code in pypath/core/stanzas.py
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def rsim_stanzas(
    rpath_params: RpathParams, state: np.ndarray, params: dict
) -> RsimStanzas:
    """Initialize stanza parameters for Ecosim simulation.

    Creates the stanza parameter structure needed by rsim_run().

    Parameters
    ----------
    rpath_params : RpathParams
        RpathParams object with stanza information
    state : np.ndarray
        RsimState object with initial state
    params : dict
        RsimParams object with simulation parameters

    Returns
    -------
    RsimStanzas
        RsimStanzas object with simulation parameters
    """
    rstan = RsimStanzas()

    # Check if stanzas exist
    if rpath_params.stanzas is None or rpath_params.stanzas.n_stanza_groups == 0:
        # Return empty stanza structure
        rstan.n_split = 0
        rstan.n_stanzas = np.array([0, 0])
        rstan.ecopath_code = np.zeros((2, 2))
        rstan.age1 = np.zeros((2, 2))
        rstan.age2 = np.zeros((2, 2))
        rstan.base_wage_s = np.zeros((2, 2))
        rstan.base_nage_s = np.zeros((2, 2))
        rstan.base_qage_s = np.zeros((2, 2))
        rstan.wmat = np.array([0.0, 0.0])
        rstan.rec_power = np.array([0.0, 0.0])
        rstan.recruits = np.array([0.0, 0.0])
        rstan.vbgf_d = np.array([0.0, 0.0])
        rstan.r_zero_s = np.array([0.0, 0.0])
        rstan.vbm = np.array([0.0, 0.0])
        rstan.split_alpha = np.zeros((2, 2))
        rstan.spawn_x = np.array([0.0, 0.0])
        rstan.spawn_energy = np.array([0.0, 0.0])
        rstan.base_eggs_stanza = np.array([0.0, 0.0])
        rstan.base_spawn_bio = np.array([0.0, 0.0])
        rstan.r_scale_split = np.array([0.0, 0.0])
        rstan.base_stanza_pred = np.zeros(params.NUM_GROUPS + 1)
        return rstan

    stanza_params = rpath_params.stanzas
    n_split = stanza_params.n_stanza_groups

    rstan.n_split = n_split

    # Get max stanzas and max months
    max_stanzas = max(sg.n_stanzas for sg in stanza_params.stanza_groups)
    max_months = max(sg.last_month for sg in stanza_params.stanza_groups) + 1

    # Initialize arrays with leading zeros for C-style indexing
    rstan.n_stanzas = np.zeros(n_split + 1, dtype=int)
    rstan.ecopath_code = np.full((n_split + 1, max_stanzas + 1), np.nan)
    rstan.age1 = np.full((n_split + 1, max_stanzas + 1), np.nan)
    rstan.age2 = np.full((n_split + 1, max_stanzas + 1), np.nan)
    rstan.base_wage_s = np.full((max_months, n_split + 1), np.nan)
    rstan.base_nage_s = np.full((max_months, n_split + 1), np.nan)
    rstan.base_qage_s = np.full((max_months, n_split + 1), np.nan)
    rstan.split_alpha = np.full((max_months, n_split + 1), np.nan)

    # Stanza pred accumulator (extra leading slot for 1-based indexing)
    s_pred = np.zeros(params.NUM_GROUPS + 2)

    # Process each stanza group
    for isp in range(n_split):
        stanza_group = stanza_params.stanza_groups[isp]
        rstan.n_stanzas[isp + 1] = stanza_group.n_stanzas

        # Get stanzas for this group
        group_stanzas = [
            s for s in stanza_params.stanza_individuals if s.stanza_group_num == isp + 1
        ]
        group_stanzas.sort(key=lambda x: x.stanza_num)

        # Fill in age codes
        for ist, st in enumerate(group_stanzas):
            rstan.ecopath_code[isp + 1, ist + 1] = st.group_num
            rstan.age1[isp + 1, ist + 1] = st.first
            rstan.age2[isp + 1, ist + 1] = st.last

        # Get age-structured data
        if isp + 1 in stanza_params.st_groups:
            st_group = stanza_params.st_groups[isp + 1]
            n_rows = len(st_group)
            rstan.base_wage_s[:n_rows, isp + 1] = st_group["WageS"].values
            rstan.base_nage_s[:n_rows, isp + 1] = st_group["NageS"].values
            rstan.base_qage_s[:n_rows, isp + 1] = st_group["QageS"].values

    # Maturity and recruitment parameters
    rstan.wmat = np.zeros(n_split + 1)
    rstan.rec_power = np.zeros(n_split + 1)
    rstan.recruits = np.zeros(n_split + 1)
    rstan.vbgf_d = np.zeros(n_split + 1)
    rstan.r_zero_s = np.zeros(n_split + 1)
    rstan.vbm = np.zeros(n_split + 1)

    for isp, sg in enumerate(stanza_params.stanza_groups):
        rstan.wmat[isp + 1] = sg.wmat
        rstan.rec_power[isp + 1] = sg.rec_power
        rstan.recruits[isp + 1] = sg.recruits
        rstan.vbgf_d[isp + 1] = sg.vbgf_d
        rstan.r_zero_s[isp + 1] = sg.recruits
        # Energy required to grow a unit in weight (scaled to Winf=1)
        rstan.vbm[isp + 1] = 1.0 - 3.0 * sg.vbgf_ksp / 12.0

    # Calculate spawning biomass and eggs
    eggs = np.zeros(n_split + 1)
    for isp in range(n_split):
        stanza_group = stanza_params.stanza_groups[isp]
        if isp + 1 in stanza_params.st_groups:
            st_group = stanza_params.st_groups[isp + 1]
            # Sum eggs from mature individuals
            mature_mask = st_group["WageS"] > rstan.wmat[isp + 1]
            if mature_mask.any():
                eggs[isp + 1] = (
                    st_group.loc[mature_mask, "NageS"]
                    * (st_group.loc[mature_mask, "WageS"] - rstan.wmat[isp + 1])
                ).sum()

    # Initialize split alpha growth coefficients
    for isp in range(n_split):
        stanza_group = stanza_params.stanza_groups[isp]
        group_stanzas = [
            s for s in stanza_params.stanza_individuals if s.stanza_group_num == isp + 1
        ]
        group_stanzas.sort(key=lambda x: x.stanza_num)

        if isp + 1 not in stanza_params.st_groups:
            continue

        st_group = stanza_params.st_groups[isp + 1]

        for ist, st in enumerate(group_stanzas):
            ieco = st.group_num
            first = st.first
            last = st.last

            # Calculate predation for this stanza
            mask = (st_group["age"] >= first) & (st_group["age"] <= last)
            pred = (st_group.loc[mask, "NageS"] * st_group.loc[mask, "QageS"]).sum()

            # Get consumption
            start_eaten_by = st.qb * st.biomass

            if start_eaten_by > 0:
                # Calculate split alpha
                wage_s = st_group["WageS"].values
                wage_s_next = np.roll(wage_s, -1)
                wage_s_next[-1] = wage_s[-1]

                split_alpha = (
                    (wage_s_next - rstan.vbm[isp + 1] * wage_s) * pred / start_eaten_by
                )
                rstan.split_alpha[first : last + 1, isp + 1] = split_alpha[
                    first : last + 1
                ]

            s_pred[ieco + 1] = pred

        # Carry over final split alpha to plus group
        last_stanza = group_stanzas[-1]
        final_age = last_stanza.last
        if final_age > 0 and final_age < max_months:
            rstan.split_alpha[final_age, isp + 1] = rstan.split_alpha[
                final_age - 1, isp + 1
            ]

    # Misc parameters
    # Spawn X is Beverton-Holt. 10000 = off, 2 = half saturation
    rstan.spawn_x = np.concatenate([[0.0], np.full(n_split, 10000.0)])
    rstan.spawn_energy = np.concatenate([[0.0], np.ones(n_split)])
    rstan.base_eggs_stanza = eggs
    rstan.base_spawn_bio = eggs.copy()
    rstan.r_scale_split = np.concatenate([[0.0], np.ones(n_split)])
    rstan.base_stanza_pred = s_pred

    return rstan

split_set_pred

split_set_pred(stanzas: RsimStanzas, state: ndarray, params: dict) -> None

Set predation rates for stanza groups.

Updates the consumption calculations for multi-stanza groups based on current biomass.

Parameters:

Name Type Description Default
stanzas RsimStanzas

RsimStanzas object

required
state ndarray

RsimState with current biomass

required
params dict

RsimParams with model parameters

required
Source code in pypath/core/stanzas.py
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
def split_set_pred(stanzas: RsimStanzas, state: np.ndarray, params: dict) -> None:
    """Set predation rates for stanza groups.

    Updates the consumption calculations for multi-stanza
    groups based on current biomass.

    Parameters
    ----------
    stanzas : RsimStanzas
        RsimStanzas object
    state : np.ndarray
        RsimState with current biomass
    params : dict
        RsimParams with model parameters
    """
    if stanzas.n_split == 0:
        return

    s_pred = np.zeros(params.NUM_GROUPS + 2)

    for isp in range(1, stanzas.n_split + 1):
        n_stanzas = stanzas.n_stanzas[isp]

        if n_stanzas == 0:
            continue

        for ist in range(1, n_stanzas + 1):
            ieco = int(stanzas.ecopath_code[isp, ist])
            first = int(stanzas.age1[isp, ist])
            last = int(stanzas.age2[isp, ist])

            # Calculate total consumption for this stanza
            pred = 0.0
            for age in range(first, last + 1):
                nage_s = stanzas.base_nage_s[age, isp]
                qage_s = stanzas.base_qage_s[age, isp]

                if not np.isnan(nage_s) and not np.isnan(qage_s):
                    pred += nage_s * qage_s

            s_pred[ieco + 1] = pred

    stanzas.base_stanza_pred = s_pred

split_update

split_update(stanzas: RsimStanzas, state: ndarray, params: dict, sim_month: int) -> None

Update stanza age structure for a simulation month.

This updates the numbers-at-age, weight-at-age, and recruitment for multi-stanza groups.

Called monthly during Ecosim simulation.

Parameters:

Name Type Description Default
stanzas RsimStanzas

RsimStanzas object

required
state ndarray

RsimState with current biomass

required
params dict

RsimParams with model parameters

required
sim_month int

Current simulation month

required
Source code in pypath/core/stanzas.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
def split_update(
    stanzas: RsimStanzas, state: np.ndarray, params: dict, sim_month: int
) -> None:
    """Update stanza age structure for a simulation month.

    This updates the numbers-at-age, weight-at-age, and
    recruitment for multi-stanza groups.

    Called monthly during Ecosim simulation.

    Parameters
    ----------
    stanzas : RsimStanzas
        RsimStanzas object
    state : np.ndarray
        RsimState with current biomass
    params : dict
        RsimParams with model parameters
    sim_month : int
        Current simulation month
    """
    if stanzas.n_split == 0:
        return

    for isp in range(1, stanzas.n_split + 1):
        n_stanzas = stanzas.n_stanzas[isp]

        if n_stanzas == 0:
            continue

        # Get Von Bertalanffy parameters
        _vbm = stanzas.vbm[isp]
        _vbgf_d = stanzas.vbgf_d[isp]

        # Get current spawning biomass
        spawn_bio = 0.0
        wmat = stanzas.wmat[isp]

        # Sum spawning biomass from mature age classes
        for ist in range(1, n_stanzas + 1):
            first = int(stanzas.age1[isp, ist])
            last = int(stanzas.age2[isp, ist])

            for age in range(first, last + 1):
                wage_s = stanzas.base_wage_s[age, isp]
                nage_s = stanzas.base_nage_s[age, isp]

                if wage_s > wmat and not np.isnan(wage_s) and not np.isnan(nage_s):
                    spawn_bio += nage_s * (wage_s - wmat)

        stanzas.base_spawn_bio[isp] = spawn_bio

        # Calculate recruitment using Beverton-Holt if spawn_x < 10000
        spawn_x = stanzas.spawn_x[isp]
        r_zero = stanzas.r_zero_s[isp]
        base_spawn = stanzas.base_eggs_stanza[isp]

        if spawn_x < 9999 and base_spawn > 0:
            # Beverton-Holt recruitment
            rel_spawn = spawn_bio / base_spawn
            recruits = (
                r_zero * rel_spawn / (1.0 + (spawn_x - 1.0) * rel_spawn / spawn_x)
            )
        else:
            recruits = stanzas.recruits[isp]

        # Update numbers at age (aging process)
        # Shift numbers forward by one month
        new_nage = np.roll(stanzas.base_nage_s[:, isp], 1)
        new_nage[0] = recruits  # New recruits enter at age 0

        # Apply mortality
        for ist in range(1, n_stanzas + 1):
            ieco = int(stanzas.ecopath_code[isp, ist])
            first = int(stanzas.age1[isp, ist])
            last = int(stanzas.age2[isp, ist])

            # Get current mortality from state (guard against indexing issues)
            if hasattr(params, "MzeroMort") and (ieco + 1) < len(params.MzeroMort):
                m0 = params.MzeroMort[ieco + 1]
            else:
                m0 = 0.0

            # Apply monthly mortality
            monthly_z = m0 / 12.0
            survival = np.exp(-monthly_z)

            for age in range(first, last + 1):
                if age < len(new_nage):
                    new_nage[age] *= survival

        stanzas.base_nage_s[:, isp] = new_nage

von_bertalanffy_consumption

von_bertalanffy_consumption(wage_s: ndarray, d: float = 0.66667) -> np.ndarray

Calculate consumption at age from weight.

Q(a) = W(a)^d

Parameters:

Name Type Description Default
wage_s ndarray

Weight at age relative to Winf

required
d float

Allometric exponent (default 2/3)

0.66667

Returns:

Type Description
ndarray

Consumption at each age

Source code in pypath/core/stanzas.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
def von_bertalanffy_consumption(wage_s: np.ndarray, d: float = 0.66667) -> np.ndarray:
    """Calculate consumption at age from weight.

    Q(a) = W(a)^d

    Parameters
    ----------
    wage_s : np.ndarray
        Weight at age relative to Winf
    d : float
        Allometric exponent (default 2/3)

    Returns
    -------
    np.ndarray
        Consumption at each age
    """
    return wage_s**d

von_bertalanffy_weight

von_bertalanffy_weight(age: ndarray, k: float, d: float = 0.66667) -> np.ndarray

Calculate weight at age using Von Bertalanffy growth model.

W(a) = (1 - exp(-K * (1-d) * a))^(1/(1-d))

Weight is relative to Winf (asymptotic weight = 1).

Parameters:

Name Type Description Default
age ndarray

Age in months

required
k float

Monthly K parameter (Ksp * 3 / 12)

required
d float

Allometric exponent (default 2/3)

0.66667

Returns:

Type Description
ndarray

Weight relative to Winf at each age

Source code in pypath/core/stanzas.py
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
def von_bertalanffy_weight(age: np.ndarray, k: float, d: float = 0.66667) -> np.ndarray:
    """Calculate weight at age using Von Bertalanffy growth model.

    W(a) = (1 - exp(-K * (1-d) * a))^(1/(1-d))

    Weight is relative to Winf (asymptotic weight = 1).

    Parameters
    ----------
    age : np.ndarray
        Age in months
    k : float
        Monthly K parameter (Ksp * 3 / 12)
    d : float
        Allometric exponent (default 2/3)

    Returns
    -------
    np.ndarray
        Weight relative to Winf at each age
    """
    return (1.0 - np.exp(-k * (1.0 - d) * age)) ** (1.0 / (1.0 - d))

Adjustments

pypath.core.adjustments

Adjustment functions for Ecosim scenarios.

This module provides functions to modify fishing rates, forcing functions, and other scenario parameters over time.

Based on Rpath's adjust.fishing(), adjust.forcing(), and adjust.scenario() functions.

adjust_fishing

adjust_fishing(scenario: RsimScenario, parameter: str, group: Union[str, int, List[Union[str, int]]], sim_year: Union[int, range, List[int]], value: Union[float, ndarray], sim_month: Optional[Union[int, range, List[int]]] = None) -> RsimScenario

Adjust fishing parameters in an Ecosim scenario.

Modifies fishing-related forcing matrices (ForcedEffort, ForcedFRate, or ForcedCatch) for specified groups and time periods.

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
parameter str

One of 'ForcedEffort', 'ForcedFRate', or 'ForcedCatch'

required
group Union[str, int, List[Union[str, int]]]

Group name(s) or index(es) to modify. Can be: - Single group name (str) or index (int) - List of group names or indices

required
sim_year Union[int, range, List[int]]

Year(s) to modify. Can be: - Single year (int) - Range of years (range object) - List of years

required
value Union[float, ndarray]

New value(s) to set. Can be: - Single value applied to all specified cells - Array matching the shape of selected cells

required
sim_month Optional[Union[int, range, List[int]]]

Optional month(s) to modify (1-12). Only used for ForcedEffort which is monthly. If None, modifies all months.

None

Returns:

Type Description
RsimScenario

Modified scenario object

Examples:

>>> # Double fishing mortality for 'Fish' group in years 10-20
>>> scenario = adjust_fishing(
...     scenario,
...     parameter='ForcedFRate',
...     group='Fish',
...     sim_year=range(10, 21),
...     value=0.5
... )

>>> # Set catch quota
>>> scenario = adjust_fishing(
...     scenario,
...     parameter='ForcedCatch',
...     group=['Cod', 'Haddock'],
...     sim_year=2025,
...     value=100.0
... )
Source code in pypath/core/adjustments.py
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
def adjust_fishing(
    scenario: RsimScenario,
    parameter: str,
    group: Union[str, int, List[Union[str, int]]],
    sim_year: Union[int, range, List[int]],
    value: Union[float, np.ndarray],
    sim_month: Optional[Union[int, range, List[int]]] = None,
) -> RsimScenario:
    """Adjust fishing parameters in an Ecosim scenario.

    Modifies fishing-related forcing matrices (ForcedEffort, ForcedFRate,
    or ForcedCatch) for specified groups and time periods.

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    parameter : str
        One of 'ForcedEffort', 'ForcedFRate', or 'ForcedCatch'
    group : Union[str, int, List[Union[str, int]]]
        Group name(s) or index(es) to modify. Can be:
        - Single group name (str) or index (int)
        - List of group names or indices
    sim_year : Union[int, range, List[int]]
        Year(s) to modify. Can be:
        - Single year (int)
        - Range of years (range object)
        - List of years
    value : Union[float, np.ndarray]
        New value(s) to set. Can be:
        - Single value applied to all specified cells
        - Array matching the shape of selected cells
    sim_month : Optional[Union[int, range, List[int]]]
        Optional month(s) to modify (1-12). Only used for
        ForcedEffort which is monthly. If None, modifies all months.

    Returns
    -------
    RsimScenario
        Modified scenario object

    Examples
    --------
        >>> # Double fishing mortality for 'Fish' group in years 10-20
        >>> scenario = adjust_fishing(
        ...     scenario,
        ...     parameter='ForcedFRate',
        ...     group='Fish',
        ...     sim_year=range(10, 21),
        ...     value=0.5
        ... )

        >>> # Set catch quota
        >>> scenario = adjust_fishing(
        ...     scenario,
        ...     parameter='ForcedCatch',
        ...     group=['Cod', 'Haddock'],
        ...     sim_year=2025,
        ...     value=100.0
        ... )
    """
    valid_params = ["ForcedEffort", "ForcedFRate", "ForcedCatch"]
    if parameter not in valid_params:
        raise ValueError(f"parameter must be one of {valid_params}")

    # Get the fishing matrix
    fishing_matrix = getattr(scenario.fishing, parameter)

    # Convert group to indices
    group_indices = _resolve_group_indices(scenario, group, parameter)

    # Convert years to row indices
    year_indices = _resolve_year_indices(scenario, sim_year, parameter)

    # For ForcedEffort (monthly), handle month selection
    if parameter == "ForcedEffort" and sim_month is not None:
        row_indices = _resolve_month_indices(
            year_indices, sim_month, fishing_matrix.shape[0]
        )
    else:
        row_indices = year_indices

    # Set values
    if np.isscalar(value):
        for gi in group_indices:
            for ri in row_indices:
                fishing_matrix[ri, gi] = value
    else:
        # Value is an array - must match shape
        value = np.asarray(value)
        if value.shape == (len(row_indices), len(group_indices)):
            for i, ri in enumerate(row_indices):
                for j, gi in enumerate(group_indices):
                    fishing_matrix[ri, gi] = value[i, j]
        elif value.shape == (len(row_indices),):
            # Broadcast across groups
            for gi in group_indices:
                for i, ri in enumerate(row_indices):
                    fishing_matrix[ri, gi] = value[i]
        else:
            raise ValueError(f"value shape {value.shape} doesn't match selection")

    return scenario

adjust_forcing

adjust_forcing(scenario: RsimScenario, parameter: str, group: Union[str, int, List[Union[str, int]]], sim_year: Union[int, range, List[int]], sim_month: Union[int, range, List[int]], value: Union[float, ndarray]) -> RsimScenario

Adjust forcing parameters in an Ecosim scenario.

Modifies environmental forcing matrices (ForcedPrey, ForcedMort, ForcedRecs, ForcedSearch, ForcedActresp, ForcedMigrate, ForcedBio) for specified groups and time periods.

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
parameter str

One of: - 'ForcedPrey': Prey availability multiplier - 'ForcedMort': Additional mortality multiplier - 'ForcedRecs': Recruitment multiplier - 'ForcedSearch': Search rate multiplier - 'ForcedActresp': Active respiration multiplier - 'ForcedMigrate': Migration rate (additive) - 'ForcedBio': Biomass forcing (-1 = off)

required
group Union[str, int, List[Union[str, int]]]

Group name(s) or index(es) to modify

required
sim_year Union[int, range, List[int]]

Year(s) to modify

required
sim_month Union[int, range, List[int]]

Month(s) to modify (1-12)

required
value Union[float, ndarray]

New value(s) to set

required

Returns:

Type Description
RsimScenario

Modified scenario object

Examples:

>>> # Reduce prey availability in summer
>>> scenario = adjust_forcing(
...     scenario,
...     parameter='ForcedPrey',
...     group='Zooplankton',
...     sim_year=range(1, 51),
...     sim_month=[6, 7, 8],
...     value=0.8
... )

>>> # Add pulse recruitment
>>> scenario = adjust_forcing(
...     scenario,
...     parameter='ForcedRecs',
...     group='Fish',
...     sim_year=15,
...     sim_month=3,
...     value=2.0
... )
Source code in pypath/core/adjustments.py
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
def adjust_forcing(
    scenario: RsimScenario,
    parameter: str,
    group: Union[str, int, List[Union[str, int]]],
    sim_year: Union[int, range, List[int]],
    sim_month: Union[int, range, List[int]],
    value: Union[float, np.ndarray],
) -> RsimScenario:
    """Adjust forcing parameters in an Ecosim scenario.

    Modifies environmental forcing matrices (ForcedPrey, ForcedMort,
    ForcedRecs, ForcedSearch, ForcedActresp, ForcedMigrate, ForcedBio)
    for specified groups and time periods.

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    parameter : str
        One of:
        - 'ForcedPrey': Prey availability multiplier
        - 'ForcedMort': Additional mortality multiplier
        - 'ForcedRecs': Recruitment multiplier
        - 'ForcedSearch': Search rate multiplier
        - 'ForcedActresp': Active respiration multiplier
        - 'ForcedMigrate': Migration rate (additive)
        - 'ForcedBio': Biomass forcing (-1 = off)
    group : Union[str, int, List[Union[str, int]]]
        Group name(s) or index(es) to modify
    sim_year : Union[int, range, List[int]]
        Year(s) to modify
    sim_month : Union[int, range, List[int]]
        Month(s) to modify (1-12)
    value : Union[float, np.ndarray]
        New value(s) to set

    Returns
    -------
    RsimScenario
        Modified scenario object

    Examples
    --------
        >>> # Reduce prey availability in summer
        >>> scenario = adjust_forcing(
        ...     scenario,
        ...     parameter='ForcedPrey',
        ...     group='Zooplankton',
        ...     sim_year=range(1, 51),
        ...     sim_month=[6, 7, 8],
        ...     value=0.8
        ... )

        >>> # Add pulse recruitment
        >>> scenario = adjust_forcing(
        ...     scenario,
        ...     parameter='ForcedRecs',
        ...     group='Fish',
        ...     sim_year=15,
        ...     sim_month=3,
        ...     value=2.0
        ... )
    """
    valid_params = [
        "ForcedPrey",
        "ForcedMort",
        "ForcedRecs",
        "ForcedSearch",
        "ForcedActresp",
        "ForcedMigrate",
        "ForcedBio",
    ]
    if parameter not in valid_params:
        raise ValueError(f"parameter must be one of {valid_params}")

    # Get the forcing matrix
    forcing_matrix = getattr(scenario.forcing, parameter)

    # Convert group to indices
    group_indices = _resolve_group_indices(scenario, group, parameter, is_forcing=True)

    # Convert years to row indices
    year_indices = _resolve_year_indices(scenario, sim_year, parameter)

    # Convert to monthly row indices
    row_indices = _resolve_month_indices(
        year_indices, sim_month, forcing_matrix.shape[0]
    )

    # Set values
    if np.isscalar(value):
        for gi in group_indices:
            for ri in row_indices:
                forcing_matrix[ri, gi] = value
    else:
        value = np.asarray(value)
        if value.shape == (len(row_indices), len(group_indices)):
            for i, ri in enumerate(row_indices):
                for j, gi in enumerate(group_indices):
                    forcing_matrix[ri, gi] = value[i, j]
        elif value.shape == (len(row_indices),):
            for gi in group_indices:
                for i, ri in enumerate(row_indices):
                    forcing_matrix[ri, gi] = value[i]
        else:
            raise ValueError(f"value shape {value.shape} doesn't match selection")

    return scenario

adjust_group_parameter

adjust_group_parameter(scenario: RsimScenario, group: Union[str, int], parameter: str, value: float) -> RsimScenario

Adjust a parameter for a specific group.

Modifies group-level parameters in the scenario's params object.

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
group Union[str, int]

Group name or index

required
parameter str

Parameter name. Options include: - 'MzeroMort': Background mortality - 'UnassimRespFrac': Unassimilated fraction - 'ActiveRespFrac': Active respiration fraction - 'FtimeAdj': Feeding time adjustment - 'PBopt': Optimal P/B

required
value float

New value to set

required

Returns:

Type Description
RsimScenario

Modified scenario object

Source code in pypath/core/adjustments.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
def adjust_group_parameter(
    scenario: RsimScenario, group: Union[str, int], parameter: str, value: float
) -> RsimScenario:
    """Adjust a parameter for a specific group.

    Modifies group-level parameters in the scenario's params object.

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    group : Union[str, int]
        Group name or index
    parameter : str
        Parameter name. Options include:
        - 'MzeroMort': Background mortality
        - 'UnassimRespFrac': Unassimilated fraction
        - 'ActiveRespFrac': Active respiration fraction
        - 'FtimeAdj': Feeding time adjustment
        - 'PBopt': Optimal P/B
    value : float
        New value to set

    Returns
    -------
    RsimScenario
        Modified scenario object
    """
    group_idx = _get_group_index(scenario, group) + 1  # +1 for "Outside"

    if hasattr(scenario.params, parameter):
        param_array = getattr(scenario.params, parameter)
        if isinstance(param_array, np.ndarray) and len(param_array) > group_idx:
            param_array[group_idx] = value
        else:
            raise ValueError(
                f"Parameter '{parameter}' not accessible at index {group_idx}"
            )
    else:
        raise AttributeError(f"Parameter '{parameter}' not found in scenario.params")

    return scenario

adjust_scenario

adjust_scenario(scenario: RsimScenario, parameter: str, value: Union[float, int, ndarray]) -> RsimScenario

Adjust global scenario parameters.

Modifies simulation-wide parameters in the scenario's params object.

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
parameter str

Parameter name to modify. Common options: - 'BURN_YEARS': Number of burn-in years (-1 = off) - 'COUPLED': Coupling flag (0 = uncoupled, 1 = coupled) - 'RK4_STEPS': Integration steps per month - 'SENSE_LIMIT': Sensitivity limits [min, max]

required
value Union[float, int, ndarray]

New value to set

required

Returns:

Type Description
RsimScenario

Modified scenario object

Examples:

>>> # Enable burn-in period
>>> scenario = adjust_scenario(scenario, 'BURN_YEARS', 10)

>>> # Change integration precision
>>> scenario = adjust_scenario(scenario, 'RK4_STEPS', 8)
Source code in pypath/core/adjustments.py
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
def adjust_scenario(
    scenario: RsimScenario, parameter: str, value: Union[float, int, np.ndarray]
) -> RsimScenario:
    """Adjust global scenario parameters.

    Modifies simulation-wide parameters in the scenario's params object.

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    parameter : str
        Parameter name to modify. Common options:
        - 'BURN_YEARS': Number of burn-in years (-1 = off)
        - 'COUPLED': Coupling flag (0 = uncoupled, 1 = coupled)
        - 'RK4_STEPS': Integration steps per month
        - 'SENSE_LIMIT': Sensitivity limits [min, max]
    value : Union[float, int, np.ndarray]
        New value to set

    Returns
    -------
    RsimScenario
        Modified scenario object

    Examples
    --------
        >>> # Enable burn-in period
        >>> scenario = adjust_scenario(scenario, 'BURN_YEARS', 10)

        >>> # Change integration precision
        >>> scenario = adjust_scenario(scenario, 'RK4_STEPS', 8)
    """
    if hasattr(scenario.params, parameter):
        setattr(scenario.params, parameter, value)
    else:
        raise AttributeError(f"Parameter '{parameter}' not found in scenario.params")

    return scenario

create_fishing_ramp

create_fishing_ramp(scenario: RsimScenario, group: Union[str, int], start_year: int, end_year: int, start_value: float, end_value: float, parameter: str = 'ForcedFRate') -> RsimScenario

Create a linear ramp in fishing pressure.

Convenience function to linearly interpolate fishing between two values over a range of years.

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
group Union[str, int]

Group to modify

required
start_year int

First year of ramp

required
end_year int

Last year of ramp

required
start_value float

Value at start_year

required
end_value float

Value at end_year

required
parameter str

Fishing parameter to modify

'ForcedFRate'

Returns:

Type Description
RsimScenario

Modified scenario object

Source code in pypath/core/adjustments.py
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
def create_fishing_ramp(
    scenario: RsimScenario,
    group: Union[str, int],
    start_year: int,
    end_year: int,
    start_value: float,
    end_value: float,
    parameter: str = "ForcedFRate",
) -> RsimScenario:
    """Create a linear ramp in fishing pressure.

    Convenience function to linearly interpolate fishing between
    two values over a range of years.

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    group : Union[str, int]
        Group to modify
    start_year : int
        First year of ramp
    end_year : int
        Last year of ramp
    start_value : float
        Value at start_year
    end_value : float
        Value at end_year
    parameter : str
        Fishing parameter to modify

    Returns
    -------
    RsimScenario
        Modified scenario object
    """
    years = list(range(start_year, end_year + 1))
    values = np.linspace(start_value, end_value, len(years))

    return adjust_fishing(
        scenario, parameter=parameter, group=group, sim_year=years, value=values
    )

create_pulse_forcing

create_pulse_forcing(scenario: RsimScenario, group: Union[str, int], pulse_years: List[int], pulse_months: Union[int, List[int]], magnitude: float, parameter: str = 'ForcedRecs') -> RsimScenario

Create pulse forcing events.

Convenience function to add periodic pulse events (e.g., recruitment pulses, mortality events).

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
group Union[str, int]

Group to modify

required
pulse_years List[int]

List of years with pulse events

required
pulse_months Union[int, List[int]]

Month(s) when pulse occurs

required
magnitude float

Multiplier for pulse (>1 = increase, <1 = decrease)

required
parameter str

Forcing parameter to modify

'ForcedRecs'

Returns:

Type Description
RsimScenario

Modified scenario object

Source code in pypath/core/adjustments.py
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
def create_pulse_forcing(
    scenario: RsimScenario,
    group: Union[str, int],
    pulse_years: List[int],
    pulse_months: Union[int, List[int]],
    magnitude: float,
    parameter: str = "ForcedRecs",
) -> RsimScenario:
    """Create pulse forcing events.

    Convenience function to add periodic pulse events
    (e.g., recruitment pulses, mortality events).

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    group : Union[str, int]
        Group to modify
    pulse_years : List[int]
        List of years with pulse events
    pulse_months : Union[int, List[int]]
        Month(s) when pulse occurs
    magnitude : float
        Multiplier for pulse (>1 = increase, <1 = decrease)
    parameter : str
        Forcing parameter to modify

    Returns
    -------
    RsimScenario
        Modified scenario object
    """
    for year in pulse_years:
        scenario = adjust_forcing(
            scenario,
            parameter=parameter,
            group=group,
            sim_year=year,
            sim_month=pulse_months,
            value=magnitude,
        )

    return scenario

create_seasonal_forcing

create_seasonal_forcing(scenario: RsimScenario, group: Union[str, int], years: Union[range, List[int]], monthly_values: List[float], parameter: str = 'ForcedPrey') -> RsimScenario

Create seasonal forcing pattern.

Applies a repeating 12-month pattern of forcing values across multiple years.

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
group Union[str, int]

Group to modify

required
years Union[range, List[int]]

Years to apply pattern

required
monthly_values List[float]

List of 12 values, one per month

required
parameter str

Forcing parameter to modify

'ForcedPrey'

Returns:

Type Description
RsimScenario

Modified scenario object

Examples:

>>> # Higher prey availability in summer
>>> seasonal = [0.8, 0.9, 1.0, 1.1, 1.2, 1.3,
...             1.3, 1.2, 1.1, 1.0, 0.9, 0.8]
>>> scenario = create_seasonal_forcing(
...     scenario, 'Zooplankton', range(1, 51), seasonal
... )
Source code in pypath/core/adjustments.py
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
def create_seasonal_forcing(
    scenario: RsimScenario,
    group: Union[str, int],
    years: Union[range, List[int]],
    monthly_values: List[float],
    parameter: str = "ForcedPrey",
) -> RsimScenario:
    """Create seasonal forcing pattern.

    Applies a repeating 12-month pattern of forcing values
    across multiple years.

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    group : Union[str, int]
        Group to modify
    years : Union[range, List[int]]
        Years to apply pattern
    monthly_values : List[float]
        List of 12 values, one per month
    parameter : str
        Forcing parameter to modify

    Returns
    -------
    RsimScenario
        Modified scenario object

    Examples
    --------
        >>> # Higher prey availability in summer
        >>> seasonal = [0.8, 0.9, 1.0, 1.1, 1.2, 1.3,
        ...             1.3, 1.2, 1.1, 1.0, 0.9, 0.8]
        >>> scenario = create_seasonal_forcing(
        ...     scenario, 'Zooplankton', range(1, 51), seasonal
        ... )
    """
    if len(monthly_values) != 12:
        raise ValueError("monthly_values must have exactly 12 elements")

    if isinstance(years, range):
        years = list(years)

    for month in range(1, 13):
        scenario = adjust_forcing(
            scenario,
            parameter=parameter,
            group=group,
            sim_year=years,
            sim_month=month,
            value=monthly_values[month - 1],
        )

    return scenario

set_handling_time

set_handling_time(scenario: RsimScenario, predator: Union[str, int], prey: Union[str, int], value: float) -> RsimScenario

Set handling time (d) for a predator-prey link.

Handling time controls predator satiation: - d = 1000: Off (default) - d = 0: Maximum satiation effect

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
predator Union[str, int]

Predator group name or index

required
prey Union[str, int]

Prey group name or index

required
value float

New handling time value

required

Returns:

Type Description
RsimScenario

Modified scenario object

Source code in pypath/core/adjustments.py
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
def set_handling_time(
    scenario: RsimScenario,
    predator: Union[str, int],
    prey: Union[str, int],
    value: float,
) -> RsimScenario:
    """Set handling time (d) for a predator-prey link.

    Handling time controls predator satiation:
    - d = 1000: Off (default)
    - d = 0: Maximum satiation effect

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    predator : Union[str, int]
        Predator group name or index
    prey : Union[str, int]
        Prey group name or index
    value : float
        New handling time value

    Returns
    -------
    RsimScenario
        Modified scenario object
    """
    pred_idx = _get_group_index(scenario, predator)
    prey_idx = _get_group_index(scenario, prey)

    # Find the link
    params = scenario.params
    for i in range(1, params.NumPredPreyLinks + 1):
        if params.PreyTo[i] == pred_idx and params.PreyFrom[i] == prey_idx:
            params.DD[i] = value
            return scenario

    raise ValueError(f"No predator-prey link found between {predator} and {prey}")

set_vulnerability

set_vulnerability(scenario: RsimScenario, predator: Union[str, int], prey: Union[str, int], value: float) -> RsimScenario

Set vulnerability (v) for a predator-prey link.

Vulnerability controls the functional response shape: - v = 1: Linear (Type I) - v = 2: Holling Type II (default) - v > 2: Approaches Type III

Parameters:

Name Type Description Default
scenario RsimScenario

RsimScenario object to modify

required
predator Union[str, int]

Predator group name or index

required
prey Union[str, int]

Prey group name or index

required
value float

New vulnerability value

required

Returns:

Type Description
RsimScenario

Modified scenario object

Source code in pypath/core/adjustments.py
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
def set_vulnerability(
    scenario: RsimScenario,
    predator: Union[str, int],
    prey: Union[str, int],
    value: float,
) -> RsimScenario:
    """Set vulnerability (v) for a predator-prey link.

    Vulnerability controls the functional response shape:
    - v = 1: Linear (Type I)
    - v = 2: Holling Type II (default)
    - v > 2: Approaches Type III

    Parameters
    ----------
    scenario : RsimScenario
        RsimScenario object to modify
    predator : Union[str, int]
        Predator group name or index
    prey : Union[str, int]
        Prey group name or index
    value : float
        New vulnerability value

    Returns
    -------
    RsimScenario
        Modified scenario object
    """
    pred_idx = _get_group_index(scenario, predator)
    prey_idx = _get_group_index(scenario, prey)

    # Find the link
    params = scenario.params
    for i in range(1, params.NumPredPreyLinks + 1):
        if params.PreyTo[i] == pred_idx and params.PreyFrom[i] == prey_idx:
            params.VV[i] = value
            return scenario

    raise ValueError(f"No predator-prey link found between {predator} and {prey}")

Forcing

pypath.core.forcing

Advanced forcing mechanisms for Ecosim simulations.

This module provides: 1. State-variable forcing (biomass, catch, etc.) 2. Dynamic diet rewiring based on prey availability 3. Flexible forcing modes (replace, add, multiply) 4. Temporal interpolation for sub-annual time steps

DietRewiring dataclass

Dynamic diet matrix rewiring based on prey availability.

Allows predator diet preferences to change over time in response to changing prey abundance (prey switching, adaptive foraging).

Attributes:

Name Type Description
enabled bool

Whether diet rewiring is active

switching_power float

Prey switching exponent (higher = more switching)

min_proportion float

Minimum diet proportion to maintain (prevents division by zero)

update_interval int

How often to update diet (in months)

base_diet ndarray

Original diet matrix (n_prey x n_pred)

current_diet ndarray

Current diet matrix (updated each interval)

Source code in pypath/core/forcing.py
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
@dataclass
class DietRewiring:
    """Dynamic diet matrix rewiring based on prey availability.

    Allows predator diet preferences to change over time in response to
    changing prey abundance (prey switching, adaptive foraging).

    Attributes
    ----------
    enabled : bool
        Whether diet rewiring is active
    switching_power : float
        Prey switching exponent (higher = more switching)
    min_proportion : float
        Minimum diet proportion to maintain (prevents division by zero)
    update_interval : int
        How often to update diet (in months)
    base_diet : np.ndarray
        Original diet matrix (n_prey x n_pred)
    current_diet : np.ndarray
        Current diet matrix (updated each interval)
    """

    enabled: bool = False
    switching_power: float = 2.0
    min_proportion: float = 0.001
    update_interval: int = 12  # Monthly
    base_diet: Optional[np.ndarray] = None
    current_diet: Optional[np.ndarray] = None

    def initialize(self, diet_matrix: np.ndarray):
        """Initialize with base diet matrix.

        Parameters
        ----------
        diet_matrix : np.ndarray
            Base diet proportions (n_prey x n_pred)
        """
        self.base_diet = diet_matrix.copy()
        self.current_diet = diet_matrix.copy()

    def update_diet(
        self, prey_biomass: np.ndarray, predator_idx: Optional[int] = None
    ) -> np.ndarray:
        """Update diet preferences based on prey availability.

        Uses a prey switching model where diet preferences shift toward
        more abundant prey species.

        Parameters
        ----------
        prey_biomass : np.ndarray
            Current biomass of all prey groups
        predator_idx : int, optional
            Update only this predator (None = update all)

        Returns
        -------
        np.ndarray
            Updated diet matrix

        Notes
        -----
        The prey switching model:

        new_diet[prey, pred] = base_diet[prey, pred] * (biomass[prey] / B_ref[prey])^power

        Then normalize so sum of diet = 1 for each predator.

        Higher switching_power = stronger response to biomass changes.
        """
        if not self.enabled:
            return None

        if self.base_diet is None:
            return self.current_diet

        n_prey, n_pred = self.base_diet.shape
        new_diet = self.current_diet.copy()

        # Calculate relative prey availability
        # Only use first n_prey entries (matching diet matrix)
        prey_biomass_subset = prey_biomass[:n_prey]
        prey_availability = np.maximum(prey_biomass_subset, self.min_proportion)

        # Update diet for specified predator(s)
        pred_range = range(n_pred) if predator_idx is None else [predator_idx]

        for pred in pred_range:
            # Get base diet for this predator
            base_prefs = self.base_diet[:, pred]

            # Only update where there's a base preference
            active_prey = base_prefs > self.min_proportion

            if not np.any(active_prey):
                continue

            # Apply prey switching model
            # new_pref = base_pref * (availability)^power
            new_prefs = base_prefs.copy()
            new_prefs[active_prey] = base_prefs[active_prey] * np.power(
                prey_availability[active_prey]
                / np.mean(prey_availability[active_prey]),
                self.switching_power,
            )

            # Ensure minimum proportions
            new_prefs = np.maximum(new_prefs, self.min_proportion)

            # Normalize to sum to 1
            total = np.sum(new_prefs)
            if total > 0:
                new_diet[:, pred] = new_prefs / total

        self.current_diet = new_diet
        return new_diet

    def reset(self):
        """Reset diet to base values."""
        if self.base_diet is not None:
            self.current_diet = self.base_diet.copy()
initialize
initialize(diet_matrix: ndarray)

Initialize with base diet matrix.

Parameters:

Name Type Description Default
diet_matrix ndarray

Base diet proportions (n_prey x n_pred)

required
Source code in pypath/core/forcing.py
285
286
287
288
289
290
291
292
293
294
def initialize(self, diet_matrix: np.ndarray):
    """Initialize with base diet matrix.

    Parameters
    ----------
    diet_matrix : np.ndarray
        Base diet proportions (n_prey x n_pred)
    """
    self.base_diet = diet_matrix.copy()
    self.current_diet = diet_matrix.copy()
reset
reset()

Reset diet to base values.

Source code in pypath/core/forcing.py
373
374
375
376
def reset(self):
    """Reset diet to base values."""
    if self.base_diet is not None:
        self.current_diet = self.base_diet.copy()
update_diet
update_diet(prey_biomass: ndarray, predator_idx: Optional[int] = None) -> np.ndarray

Update diet preferences based on prey availability.

Uses a prey switching model where diet preferences shift toward more abundant prey species.

Parameters:

Name Type Description Default
prey_biomass ndarray

Current biomass of all prey groups

required
predator_idx int

Update only this predator (None = update all)

None

Returns:

Type Description
ndarray

Updated diet matrix

Notes

The prey switching model:

new_diet[prey, pred] = base_diet[prey, pred] * (biomass[prey] / B_ref[prey])^power

Then normalize so sum of diet = 1 for each predator.

Higher switching_power = stronger response to biomass changes.

Source code in pypath/core/forcing.py
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
def update_diet(
    self, prey_biomass: np.ndarray, predator_idx: Optional[int] = None
) -> np.ndarray:
    """Update diet preferences based on prey availability.

    Uses a prey switching model where diet preferences shift toward
    more abundant prey species.

    Parameters
    ----------
    prey_biomass : np.ndarray
        Current biomass of all prey groups
    predator_idx : int, optional
        Update only this predator (None = update all)

    Returns
    -------
    np.ndarray
        Updated diet matrix

    Notes
    -----
    The prey switching model:

    new_diet[prey, pred] = base_diet[prey, pred] * (biomass[prey] / B_ref[prey])^power

    Then normalize so sum of diet = 1 for each predator.

    Higher switching_power = stronger response to biomass changes.
    """
    if not self.enabled:
        return None

    if self.base_diet is None:
        return self.current_diet

    n_prey, n_pred = self.base_diet.shape
    new_diet = self.current_diet.copy()

    # Calculate relative prey availability
    # Only use first n_prey entries (matching diet matrix)
    prey_biomass_subset = prey_biomass[:n_prey]
    prey_availability = np.maximum(prey_biomass_subset, self.min_proportion)

    # Update diet for specified predator(s)
    pred_range = range(n_pred) if predator_idx is None else [predator_idx]

    for pred in pred_range:
        # Get base diet for this predator
        base_prefs = self.base_diet[:, pred]

        # Only update where there's a base preference
        active_prey = base_prefs > self.min_proportion

        if not np.any(active_prey):
            continue

        # Apply prey switching model
        # new_pref = base_pref * (availability)^power
        new_prefs = base_prefs.copy()
        new_prefs[active_prey] = base_prefs[active_prey] * np.power(
            prey_availability[active_prey]
            / np.mean(prey_availability[active_prey]),
            self.switching_power,
        )

        # Ensure minimum proportions
        new_prefs = np.maximum(new_prefs, self.min_proportion)

        # Normalize to sum to 1
        total = np.sum(new_prefs)
        if total > 0:
            new_diet[:, pred] = new_prefs / total

    self.current_diet = new_diet
    return new_diet

ForcingFunction dataclass

Single forcing function for a state variable.

Attributes:

Name Type Description
group_idx int

Index of group to force (-1 for all groups)

variable StateVariable

Which state variable to force

mode ForcingMode

How to apply the forcing

time_series ndarray

Time series of forced values (years)

years ndarray

Year indices corresponding to time_series

interpolate bool

Whether to interpolate between annual values

active bool

Whether this forcing is currently active

Source code in pypath/core/forcing.py
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
@dataclass
class ForcingFunction:
    """Single forcing function for a state variable.

    Attributes
    ----------
    group_idx : int
        Index of group to force (-1 for all groups)
    variable : StateVariable
        Which state variable to force
    mode : ForcingMode
        How to apply the forcing
    time_series : np.ndarray
        Time series of forced values (years)
    years : np.ndarray
        Year indices corresponding to time_series
    interpolate : bool
        Whether to interpolate between annual values
    active : bool
        Whether this forcing is currently active
    """

    group_idx: int
    variable: StateVariable
    mode: ForcingMode
    time_series: np.ndarray
    years: np.ndarray
    interpolate: bool = True
    active: bool = True

    def get_value(self, year: float) -> float:
        """Get forced value at given year (with interpolation).

        Parameters
        ----------
        year : float
            Simulation year (can be fractional for monthly time steps)

        Returns
        -------
        float
            Forced value at this time
        """
        if not self.active:
            return np.nan

        # Check if year is in range
        if year < self.years[0] or year > self.years[-1]:
            # Outside range - return NaN (no forcing)
            return np.nan

        if self.interpolate:
            # Linear interpolation
            return np.interp(year, self.years, self.time_series)
        else:
            # Use nearest year
            idx = np.argmin(np.abs(self.years - year))
            return self.time_series[idx]
get_value
get_value(year: float) -> float

Get forced value at given year (with interpolation).

Parameters:

Name Type Description Default
year float

Simulation year (can be fractional for monthly time steps)

required

Returns:

Type Description
float

Forced value at this time

Source code in pypath/core/forcing.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def get_value(self, year: float) -> float:
    """Get forced value at given year (with interpolation).

    Parameters
    ----------
    year : float
        Simulation year (can be fractional for monthly time steps)

    Returns
    -------
    float
        Forced value at this time
    """
    if not self.active:
        return np.nan

    # Check if year is in range
    if year < self.years[0] or year > self.years[-1]:
        # Outside range - return NaN (no forcing)
        return np.nan

    if self.interpolate:
        # Linear interpolation
        return np.interp(year, self.years, self.time_series)
    else:
        # Use nearest year
        idx = np.argmin(np.abs(self.years - year))
        return self.time_series[idx]

ForcingMode

Bases: Enum

Mode for applying forced values.

Source code in pypath/core/forcing.py
24
25
26
27
28
29
30
class ForcingMode(Enum):
    """Mode for applying forced values."""

    REPLACE = "replace"  # Replace state variable with forced value
    ADD = "add"  # Add forced value to computed value
    MULTIPLY = "multiply"  # Multiply computed value by forced value
    RESCALE = "rescale"  # Rescale to match forced value

StateForcing dataclass

Collection of forcing functions for state variables.

Attributes:

Name Type Description
functions list[ForcingFunction]

List of individual forcing functions

Source code in pypath/core/forcing.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
@dataclass
class StateForcing:
    """Collection of forcing functions for state variables.

    Attributes
    ----------
    functions : list[ForcingFunction]
        List of individual forcing functions
    """

    functions: List[ForcingFunction] = field(default_factory=list)

    def add_forcing(
        self,
        group_idx: int,
        variable: Union[str, StateVariable],
        time_series: Union[np.ndarray, pd.Series, Dict[int, float]],
        years: Optional[np.ndarray] = None,
        mode: Union[str, ForcingMode] = ForcingMode.REPLACE,
        interpolate: bool = True,
    ):
        """Add a forcing function.

        Parameters
        ----------
        group_idx : int
            Index of group to force
        variable : str or StateVariable
            Which state variable to force
        time_series : array-like or dict
            Time series of forced values
            If dict, keys are years and values are forced values
        years : array-like, optional
            Year indices (required if time_series is array)
        mode : str or ForcingMode
            How to apply forcing ("replace", "add", "multiply", "rescale")
        interpolate : bool
            Whether to interpolate between annual values

        Examples
        --------
        >>> forcing = StateForcing()
        >>> # Force phytoplankton biomass to observed series
        >>> forcing.add_forcing(
        ...     group_idx=0,
        ...     variable='biomass',
        ...     time_series={2000: 15.0, 2001: 18.0, 2002: 16.0},
        ...     mode='replace'
        ... )
        >>>
        >>> # Add recruitment pulse for herring in 2005
        >>> forcing.add_forcing(
        ...     group_idx=3,
        ...     variable='recruitment',
        ...     time_series={2005: 2.5},  # 2.5x normal recruitment
        ...     mode='multiply'
        ... )
        """
        # Convert variable to enum
        if isinstance(variable, str):
            variable = StateVariable(variable.lower())

        # Convert mode to enum
        if isinstance(mode, str):
            mode = ForcingMode(mode.lower())

        # Handle dict input
        if isinstance(time_series, dict):
            years = np.array(list(time_series.keys()))
            time_series = np.array(list(time_series.values()))
        elif isinstance(time_series, pd.Series):
            years = time_series.index.values
            time_series = time_series.values
        else:
            time_series = np.asarray(time_series)

        if years is None:
            raise ValueError("years must be provided if time_series is not a dict")

        years = np.asarray(years)

        # Create forcing function
        func = ForcingFunction(
            group_idx=group_idx,
            variable=variable,
            mode=mode,
            time_series=time_series,
            years=years,
            interpolate=interpolate,
            active=True,
        )

        self.functions.append(func)

    def get_forcing(
        self, year: float, variable: StateVariable, group_idx: Optional[int] = None
    ) -> List[Tuple[ForcingFunction, float]]:
        """Get all active forcing values for a variable at given time.

        Parameters
        ----------
        year : float
            Current simulation year
        variable : StateVariable
            Which state variable to query
        group_idx : int, optional
            Specific group index (None = all groups)

        Returns
        -------
        list of (ForcingFunction, float)
            List of (forcing function, forced value) tuples
        """
        results = []

        for func in self.functions:
            if not func.active or func.variable != variable:
                continue

            # Check if this forcing applies to the group
            if group_idx is not None:
                if func.group_idx != -1 and func.group_idx != group_idx:
                    continue

            value = func.get_value(year)
            if not np.isnan(value):
                results.append((func, value))

        return results

    def remove_forcing(self, group_idx: int, variable: Union[str, StateVariable]):
        """Remove forcing for a specific group and variable.

        Parameters
        ----------
        group_idx : int
            Index of group
        variable : str or StateVariable
            Which state variable
        """
        if isinstance(variable, str):
            variable = StateVariable(variable.lower())

        self.functions = [
            f
            for f in self.functions
            if not (f.group_idx == group_idx and f.variable == variable)
        ]
add_forcing
add_forcing(group_idx: int, variable: Union[str, StateVariable], time_series: Union[ndarray, Series, Dict[int, float]], years: Optional[ndarray] = None, mode: Union[str, ForcingMode] = ForcingMode.REPLACE, interpolate: bool = True)

Add a forcing function.

Parameters:

Name Type Description Default
group_idx int

Index of group to force

required
variable str or StateVariable

Which state variable to force

required
time_series array - like or dict

Time series of forced values If dict, keys are years and values are forced values

required
years array - like

Year indices (required if time_series is array)

None
mode str or ForcingMode

How to apply forcing ("replace", "add", "multiply", "rescale")

REPLACE
interpolate bool

Whether to interpolate between annual values

True

Examples:

>>> forcing = StateForcing()
>>> # Force phytoplankton biomass to observed series
>>> forcing.add_forcing(
...     group_idx=0,
...     variable='biomass',
...     time_series={2000: 15.0, 2001: 18.0, 2002: 16.0},
...     mode='replace'
... )
>>>
>>> # Add recruitment pulse for herring in 2005
>>> forcing.add_forcing(
...     group_idx=3,
...     variable='recruitment',
...     time_series={2005: 2.5},  # 2.5x normal recruitment
...     mode='multiply'
... )
Source code in pypath/core/forcing.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
def add_forcing(
    self,
    group_idx: int,
    variable: Union[str, StateVariable],
    time_series: Union[np.ndarray, pd.Series, Dict[int, float]],
    years: Optional[np.ndarray] = None,
    mode: Union[str, ForcingMode] = ForcingMode.REPLACE,
    interpolate: bool = True,
):
    """Add a forcing function.

    Parameters
    ----------
    group_idx : int
        Index of group to force
    variable : str or StateVariable
        Which state variable to force
    time_series : array-like or dict
        Time series of forced values
        If dict, keys are years and values are forced values
    years : array-like, optional
        Year indices (required if time_series is array)
    mode : str or ForcingMode
        How to apply forcing ("replace", "add", "multiply", "rescale")
    interpolate : bool
        Whether to interpolate between annual values

    Examples
    --------
    >>> forcing = StateForcing()
    >>> # Force phytoplankton biomass to observed series
    >>> forcing.add_forcing(
    ...     group_idx=0,
    ...     variable='biomass',
    ...     time_series={2000: 15.0, 2001: 18.0, 2002: 16.0},
    ...     mode='replace'
    ... )
    >>>
    >>> # Add recruitment pulse for herring in 2005
    >>> forcing.add_forcing(
    ...     group_idx=3,
    ...     variable='recruitment',
    ...     time_series={2005: 2.5},  # 2.5x normal recruitment
    ...     mode='multiply'
    ... )
    """
    # Convert variable to enum
    if isinstance(variable, str):
        variable = StateVariable(variable.lower())

    # Convert mode to enum
    if isinstance(mode, str):
        mode = ForcingMode(mode.lower())

    # Handle dict input
    if isinstance(time_series, dict):
        years = np.array(list(time_series.keys()))
        time_series = np.array(list(time_series.values()))
    elif isinstance(time_series, pd.Series):
        years = time_series.index.values
        time_series = time_series.values
    else:
        time_series = np.asarray(time_series)

    if years is None:
        raise ValueError("years must be provided if time_series is not a dict")

    years = np.asarray(years)

    # Create forcing function
    func = ForcingFunction(
        group_idx=group_idx,
        variable=variable,
        mode=mode,
        time_series=time_series,
        years=years,
        interpolate=interpolate,
        active=True,
    )

    self.functions.append(func)
get_forcing
get_forcing(year: float, variable: StateVariable, group_idx: Optional[int] = None) -> List[Tuple[ForcingFunction, float]]

Get all active forcing values for a variable at given time.

Parameters:

Name Type Description Default
year float

Current simulation year

required
variable StateVariable

Which state variable to query

required
group_idx int

Specific group index (None = all groups)

None

Returns:

Type Description
list of (ForcingFunction, float)

List of (forcing function, forced value) tuples

Source code in pypath/core/forcing.py
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def get_forcing(
    self, year: float, variable: StateVariable, group_idx: Optional[int] = None
) -> List[Tuple[ForcingFunction, float]]:
    """Get all active forcing values for a variable at given time.

    Parameters
    ----------
    year : float
        Current simulation year
    variable : StateVariable
        Which state variable to query
    group_idx : int, optional
        Specific group index (None = all groups)

    Returns
    -------
    list of (ForcingFunction, float)
        List of (forcing function, forced value) tuples
    """
    results = []

    for func in self.functions:
        if not func.active or func.variable != variable:
            continue

        # Check if this forcing applies to the group
        if group_idx is not None:
            if func.group_idx != -1 and func.group_idx != group_idx:
                continue

        value = func.get_value(year)
        if not np.isnan(value):
            results.append((func, value))

    return results
remove_forcing
remove_forcing(group_idx: int, variable: Union[str, StateVariable])

Remove forcing for a specific group and variable.

Parameters:

Name Type Description Default
group_idx int

Index of group

required
variable str or StateVariable

Which state variable

required
Source code in pypath/core/forcing.py
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
def remove_forcing(self, group_idx: int, variable: Union[str, StateVariable]):
    """Remove forcing for a specific group and variable.

    Parameters
    ----------
    group_idx : int
        Index of group
    variable : str or StateVariable
        Which state variable
    """
    if isinstance(variable, str):
        variable = StateVariable(variable.lower())

    self.functions = [
        f
        for f in self.functions
        if not (f.group_idx == group_idx and f.variable == variable)
    ]

StateVariable

Bases: Enum

State variables that can be forced.

Source code in pypath/core/forcing.py
33
34
35
36
37
38
39
40
41
42
class StateVariable(Enum):
    """State variables that can be forced."""

    BIOMASS = "biomass"
    CATCH = "catch"
    FISHING_MORTALITY = "fishing_mortality"
    RECRUITMENT = "recruitment"
    MORTALITY = "mortality"
    MIGRATION = "migration"
    PRIMARY_PRODUCTION = "primary_production"

create_biomass_forcing

create_biomass_forcing(group_idx: int, observed_biomass: Union[ndarray, Series, Dict[int, float]], years: Optional[ndarray] = None, mode: str = 'replace', interpolate: bool = True) -> StateForcing

Convenience function to create biomass forcing.

Parameters:

Name Type Description Default
group_idx int

Index of group to force

required
observed_biomass array - like or dict

Observed biomass time series

required
years array - like

Year indices

None
mode str

Forcing mode ("replace", "add", "multiply")

'replace'
interpolate bool

Whether to interpolate monthly values

True

Returns:

Type Description
StateForcing

Forcing object ready to use

Examples:

>>> # Force phytoplankton to observed biomass
>>> forcing = create_biomass_forcing(
...     group_idx=0,
...     observed_biomass={2000: 15.0, 2005: 18.0, 2010: 16.0},
...     mode='replace'
... )
Source code in pypath/core/forcing.py
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def create_biomass_forcing(
    group_idx: int,
    observed_biomass: Union[np.ndarray, pd.Series, Dict[int, float]],
    years: Optional[np.ndarray] = None,
    mode: str = "replace",
    interpolate: bool = True,
) -> StateForcing:
    """Convenience function to create biomass forcing.

    Parameters
    ----------
    group_idx : int
        Index of group to force
    observed_biomass : array-like or dict
        Observed biomass time series
    years : array-like, optional
        Year indices
    mode : str
        Forcing mode ("replace", "add", "multiply")
    interpolate : bool
        Whether to interpolate monthly values

    Returns
    -------
    StateForcing
        Forcing object ready to use

    Examples
    --------
    >>> # Force phytoplankton to observed biomass
    >>> forcing = create_biomass_forcing(
    ...     group_idx=0,
    ...     observed_biomass={2000: 15.0, 2005: 18.0, 2010: 16.0},
    ...     mode='replace'
    ... )
    """
    forcing = StateForcing()
    forcing.add_forcing(
        group_idx=group_idx,
        variable=StateVariable.BIOMASS,
        time_series=observed_biomass,
        years=years,
        mode=mode,
        interpolate=interpolate,
    )
    return forcing

create_diet_rewiring

create_diet_rewiring(switching_power: float = 2.0, min_proportion: float = 0.001, update_interval: int = 12) -> DietRewiring

Convenience function to create diet rewiring configuration.

Parameters:

Name Type Description Default
switching_power float

Prey switching exponent (1.0 = proportional, >1 = switching)

2.0
min_proportion float

Minimum diet proportion to maintain

0.001
update_interval int

How often to update diet (months)

12

Returns:

Type Description
DietRewiring

Diet rewiring object ready to use

Examples:

>>> # Enable strong prey switching
>>> rewiring = create_diet_rewiring(switching_power=3.0)
Source code in pypath/core/forcing.py
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
def create_diet_rewiring(
    switching_power: float = 2.0,
    min_proportion: float = 0.001,
    update_interval: int = 12,
) -> DietRewiring:
    """Convenience function to create diet rewiring configuration.

    Parameters
    ----------
    switching_power : float
        Prey switching exponent (1.0 = proportional, >1 = switching)
    min_proportion : float
        Minimum diet proportion to maintain
    update_interval : int
        How often to update diet (months)

    Returns
    -------
    DietRewiring
        Diet rewiring object ready to use

    Examples
    --------
    >>> # Enable strong prey switching
    >>> rewiring = create_diet_rewiring(switching_power=3.0)
    """
    return DietRewiring(
        enabled=True,
        switching_power=switching_power,
        min_proportion=min_proportion,
        update_interval=update_interval,
    )

create_recruitment_forcing

create_recruitment_forcing(group_idx: int, recruitment_multiplier: Union[ndarray, Dict[int, float]], years: Optional[ndarray] = None, interpolate: bool = False) -> StateForcing

Convenience function to create recruitment forcing.

Parameters:

Name Type Description Default
group_idx int

Index of group to force

required
recruitment_multiplier array - like or dict

Recruitment multiplier (1.0 = normal, 2.0 = double, etc.)

required
years array - like

Year indices

None
interpolate bool

Whether to interpolate (usually False for recruitment pulses)

False

Returns:

Type Description
StateForcing

Forcing object ready to use

Examples:

>>> # Strong recruitment in 2005, weak in 2010
>>> forcing = create_recruitment_forcing(
...     group_idx=3,
...     recruitment_multiplier={2005: 3.0, 2010: 0.5}
... )
Source code in pypath/core/forcing.py
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
def create_recruitment_forcing(
    group_idx: int,
    recruitment_multiplier: Union[np.ndarray, Dict[int, float]],
    years: Optional[np.ndarray] = None,
    interpolate: bool = False,
) -> StateForcing:
    """Convenience function to create recruitment forcing.

    Parameters
    ----------
    group_idx : int
        Index of group to force
    recruitment_multiplier : array-like or dict
        Recruitment multiplier (1.0 = normal, 2.0 = double, etc.)
    years : array-like, optional
        Year indices
    interpolate : bool
        Whether to interpolate (usually False for recruitment pulses)

    Returns
    -------
    StateForcing
        Forcing object ready to use

    Examples
    --------
    >>> # Strong recruitment in 2005, weak in 2010
    >>> forcing = create_recruitment_forcing(
    ...     group_idx=3,
    ...     recruitment_multiplier={2005: 3.0, 2010: 0.5}
    ... )
    """
    forcing = StateForcing()
    forcing.add_forcing(
        group_idx=group_idx,
        variable=StateVariable.RECRUITMENT,
        time_series=recruitment_multiplier,
        years=years,
        mode=ForcingMode.MULTIPLY,
        interpolate=interpolate,
    )
    return forcing

Optimization

pypath.core.optimization

Bayesian optimization for Ecosim parameter calibration.

This module provides tools to optimize Ecosim parameters to match observed time series data using Bayesian optimization with Gaussian Processes.

EcosimOptimizer

Bayesian optimizer for Ecosim parameters.

Calibrates Ecosim parameters to match observed biomass time series using Bayesian optimization with Gaussian Processes.

Parameters:

Name Type Description Default
model Rpath

Balanced Ecopath model

required
params RpathParams

Model parameters

required
observed_data dict

Dictionary mapping group indices to observed biomass time series Example: {1: np.array([1.0, 1.2, 1.1, ...]), 2: np.array([0.5, 0.6, ...])}

required
years range

Years to simulate

required
objective str or callable

Objective function to minimize. Options: - 'mse': Mean squared error - 'mape': Mean absolute percentage error - 'nrmse': Normalized root mean squared error - 'loglik': Negative log-likelihood - callable: Custom function(y_true, y_pred) -> float

'mse'
verbose bool

Print optimization progress

True

Attributes:

Name Type Description
model Rpath

Ecopath model

params RpathParams

Model parameters

observed_data dict

Observed time series

years range

Simulation years

objective_func callable

Objective function

verbose bool

Verbosity flag

n_calls int

Number of function evaluations performed

Source code in pypath/core/optimization.py
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
class EcosimOptimizer:
    """Bayesian optimizer for Ecosim parameters.

    Calibrates Ecosim parameters to match observed biomass time series using
    Bayesian optimization with Gaussian Processes.

    Parameters
    ----------
    model : Rpath
        Balanced Ecopath model
    params : RpathParams
        Model parameters
    observed_data : dict
        Dictionary mapping group indices to observed biomass time series
        Example: {1: np.array([1.0, 1.2, 1.1, ...]), 2: np.array([0.5, 0.6, ...])}
    years : range
        Years to simulate
    objective : str or callable
        Objective function to minimize. Options:
        - 'mse': Mean squared error
        - 'mape': Mean absolute percentage error
        - 'nrmse': Normalized root mean squared error
        - 'loglik': Negative log-likelihood
        - callable: Custom function(y_true, y_pred) -> float
    verbose : bool
        Print optimization progress

    Attributes
    ----------
    model : Rpath
        Ecopath model
    params : RpathParams
        Model parameters
    observed_data : dict
        Observed time series
    years : range
        Simulation years
    objective_func : callable
        Objective function
    verbose : bool
        Verbosity flag
    n_calls : int
        Number of function evaluations performed
    """

    def __init__(
        self,
        model: Rpath,
        params: RpathParams,
        observed_data: Dict[int, np.ndarray],
        years: range,
        objective: str = "mse",
        verbose: bool = True,
    ):
        if not HAS_SKOPT:
            raise ImportError(
                "scikit-optimize is required for Bayesian optimization. "
                "Install with: pip install scikit-optimize"
            )

        self.model = model
        self.params = params
        self.observed_data = observed_data
        self.years = years
        self.verbose = verbose
        self.n_calls = 0

        # Set objective function
        if isinstance(objective, str):
            objective_funcs = {
                "mse": mean_squared_error,
                "mape": mean_absolute_percentage_error,
                "nrmse": normalized_root_mean_squared_error,
                "loglik": log_likelihood,
            }
            if objective not in objective_funcs:
                raise ValueError(
                    f"Unknown objective: {objective}. Choose from {list(objective_funcs.keys())}"
                )
            self.objective_func = objective_funcs[objective]
        else:
            self.objective_func = objective

        # Validate observed data
        self._validate_observed_data()

    def _validate_observed_data(self) -> None:
        """Validate observed data format and dimensions."""
        n_years = len(self.years)
        for group_idx, data in self.observed_data.items():
            if len(data) != n_years:
                raise ValueError(
                    f"Observed data for group {group_idx} has {len(data)} points "
                    f"but simulation has {n_years} years"
                )
            if group_idx < 1 or group_idx > self.model.NUM_LIVING:
                raise ValueError(
                    f"Group index {group_idx} out of range [1, {self.model.NUM_LIVING}]"
                )

    def _run_simulation(self, param_dict: Dict[str, float]) -> np.ndarray:
        """Run Ecosim simulation with given parameters.

        Parameters
        ----------
        param_dict : dict
            Dictionary of parameter values

        Returns
        -------
        np.ndarray
            Simulated biomass for observed groups
        """
        # Create scenario with parameters
        scenario = rsim_scenario(self.model, self.params, years=self.years)

        # Update parameters
        for param_name, value in param_dict.items():
            self._update_scenario_parameter(scenario, param_name, value)

        # Run simulation
        try:
            result = rsim_run(scenario, method="RK4")

            # Extract biomass for observed groups
            simulated = {}
            for group_idx in self.observed_data.keys():
                # Annual biomass: shape (n_years, n_groups+1)
                # Group indices start at 1 (0 is "Outside")
                simulated[group_idx] = result.annual_Biomass[:, group_idx]

            return simulated
        except Exception as e:
            if self.verbose:
                logger.error(f"Simulation failed with parameters {param_dict}: {e}")
            # Return high penalty for failed simulations
            return None

    def _update_scenario_parameter(
        self, scenario: RsimScenario, param_name: str, value: float
    ) -> None:
        """Update a parameter in the scenario.

        Parameters
        ----------
        scenario : RsimScenario
            Simulation scenario
        param_name : str
            Parameter name (e.g., 'VV_1', 'QQ_5', 'vulnerability')
        value : float
            Parameter value
        """
        if param_name == "vulnerability":
            # Update base vulnerability for all groups
            scenario.params.VV[:] = value
        elif param_name.startswith("VV_"):
            # Update specific group vulnerability
            group_idx = int(param_name.split("_")[1])
            scenario.params.VV[group_idx] = value
        elif param_name.startswith("QQ_"):
            # Update specific link QQ
            link_idx = int(param_name.split("_")[1])
            scenario.params.QQ[link_idx] = value
        elif param_name.startswith("DD_"):
            # Update specific link DD
            link_idx = int(param_name.split("_")[1])
            scenario.params.DD[link_idx] = value
        elif param_name.startswith("PB_"):
            # Update specific group PB
            group_idx = int(param_name.split("_")[1])
            scenario.params.PBopt[group_idx] = value
        elif param_name.startswith("QB_"):
            # Update specific group QB
            group_idx = int(param_name.split("_")[1])
            scenario.params.QBopt[group_idx] = value
        else:
            raise ValueError(f"Unknown parameter: {param_name}")

    def _calculate_objective(self, simulated: Dict[int, np.ndarray]) -> float:
        """Calculate objective function value.

        Parameters
        ----------
        simulated : dict
            Simulated biomass for each group

        Returns
        -------
        float
            Objective function value (lower is better)
        """
        if simulated is None:
            return 1e10  # High penalty for failed simulations

        total_error = 0.0
        n_groups = len(self.observed_data)

        for group_idx, observed in self.observed_data.items():
            predicted = simulated[group_idx]
            error = self.objective_func(observed, predicted)
            total_error += error

        # Average across groups
        return total_error / n_groups

    def optimize(
        self,
        param_bounds: Dict[str, Tuple[float, float]],
        n_calls: int = 50,
        n_initial_points: int = 10,
        random_state: int = 42,
    ) -> OptimizationResult:
        """Run Bayesian optimization.

        Parameters
        ----------
        param_bounds : dict
            Dictionary mapping parameter names to (min, max) bounds
            Example: {'vulnerability': (1.0, 5.0), 'VV_3': (1.0, 10.0)}
        n_calls : int
            Number of function evaluations
        n_initial_points : int
            Number of random initial points before Bayesian optimization
        random_state : int
            Random seed for reproducibility

        Returns
        -------
        OptimizationResult
            Optimization results including best parameters and convergence
        """
        import time

        start_time = time.time()

        # Define search space
        dimensions = []
        param_names = []
        for name, (low, high) in param_bounds.items():
            dimensions.append(Real(low, high, name=name))
            param_names.append(name)

        # Store all evaluations
        all_params_list = []
        all_scores = []

        # Define objective function for skopt
        @use_named_args(dimensions)
        def objective(**params):
            self.n_calls += 1

            if self.verbose:
                logger.info(f"=== Iteration {self.n_calls}/{n_calls} ===")
                logger.info(
                    "Parameters: %s", {k: f"{v:.4f}" for k, v in params.items()}
                )

            # Run simulation
            simulated = self._run_simulation(params)

            # Calculate objective
            score = self._calculate_objective(simulated)

            if self.verbose:
                logger.info(f"Objective: {score:.6f}")

            # Store results
            all_params_list.append(params.copy())
            all_scores.append(score)

            return score

        # Run optimization
        if self.verbose:
            logger.info(f"Starting Bayesian optimization with {n_calls} evaluations...")
            logger.info(f"Optimizing parameters: {list(param_bounds.keys())}")
            logger.info(f"Observed groups: {list(self.observed_data.keys())}")
            logger.info(f"Simulation years: {len(self.years)}")

        self.n_calls = 0
        result = gp_minimize(
            objective,
            dimensions,
            n_calls=n_calls,
            n_initial_points=n_initial_points,
            random_state=random_state,
            verbose=False,
        )

        optimization_time = time.time() - start_time

        # Extract results
        best_params = {name: value for name, value in zip(param_names, result.x)}
        best_score = result.fun
        convergence = [np.min(all_scores[: i + 1]) for i in range(len(all_scores))]

        if self.verbose:
            logger.info(f"{'=' * 60}")
            logger.info("OPTIMIZATION COMPLETE")
            logger.info(f"{'=' * 60}")
            logger.info(f"Best score: {best_score:.6f}")
            logger.info("Best parameters:")
            for name, value in best_params.items():
                logger.info(f"  {name}: {value:.4f}")
            logger.info(f"Total evaluations: {self.n_calls}")
            logger.info(f"Optimization time: {optimization_time:.2f} seconds")
            logger.info(f"{'=' * 60}")

        return OptimizationResult(
            best_params=best_params,
            best_score=best_score,
            n_iterations=self.n_calls,
            convergence=convergence,
            all_params=all_params_list,
            all_scores=all_scores,
            optimization_time=optimization_time,
        )

    def validate(
        self,
        params: Dict[str, float],
        test_data: Optional[Dict[int, np.ndarray]] = None,
    ) -> Dict[str, Any]:
        """Validate optimized parameters on test data.

        Parameters
        ----------
        params : dict
            Parameter values to validate
        test_data : dict, optional
            Test data. If None, uses training data (self.observed_data)

        Returns
        -------
        dict
            Validation metrics for each group and overall
        """
        if test_data is None:
            test_data = self.observed_data

        # Run simulation with optimized parameters
        simulated = self._run_simulation(params)

        if simulated is None:
            return {"error": "Simulation failed"}

        # Calculate metrics for each group
        results = {}
        for group_idx, observed in test_data.items():
            predicted = simulated[group_idx]

            results[f"group_{group_idx}"] = {
                "mse": mean_squared_error(observed, predicted),
                "mape": mean_absolute_percentage_error(observed, predicted),
                "nrmse": normalized_root_mean_squared_error(observed, predicted),
                "correlation": np.corrcoef(observed, predicted)[0, 1],
            }

        # Calculate overall metrics
        all_observed = np.concatenate([test_data[idx] for idx in test_data.keys()])
        all_predicted = np.concatenate([simulated[idx] for idx in test_data.keys()])

        results["overall"] = {
            "mse": mean_squared_error(all_observed, all_predicted),
            "mape": mean_absolute_percentage_error(all_observed, all_predicted),
            "nrmse": normalized_root_mean_squared_error(all_observed, all_predicted),
            "correlation": np.corrcoef(all_observed, all_predicted)[0, 1],
        }

        return results
optimize
optimize(param_bounds: Dict[str, Tuple[float, float]], n_calls: int = 50, n_initial_points: int = 10, random_state: int = 42) -> OptimizationResult

Run Bayesian optimization.

Parameters:

Name Type Description Default
param_bounds dict

Dictionary mapping parameter names to (min, max) bounds Example: {'vulnerability': (1.0, 5.0), 'VV_3': (1.0, 10.0)}

required
n_calls int

Number of function evaluations

50
n_initial_points int

Number of random initial points before Bayesian optimization

10
random_state int

Random seed for reproducibility

42

Returns:

Type Description
OptimizationResult

Optimization results including best parameters and convergence

Source code in pypath/core/optimization.py
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
def optimize(
    self,
    param_bounds: Dict[str, Tuple[float, float]],
    n_calls: int = 50,
    n_initial_points: int = 10,
    random_state: int = 42,
) -> OptimizationResult:
    """Run Bayesian optimization.

    Parameters
    ----------
    param_bounds : dict
        Dictionary mapping parameter names to (min, max) bounds
        Example: {'vulnerability': (1.0, 5.0), 'VV_3': (1.0, 10.0)}
    n_calls : int
        Number of function evaluations
    n_initial_points : int
        Number of random initial points before Bayesian optimization
    random_state : int
        Random seed for reproducibility

    Returns
    -------
    OptimizationResult
        Optimization results including best parameters and convergence
    """
    import time

    start_time = time.time()

    # Define search space
    dimensions = []
    param_names = []
    for name, (low, high) in param_bounds.items():
        dimensions.append(Real(low, high, name=name))
        param_names.append(name)

    # Store all evaluations
    all_params_list = []
    all_scores = []

    # Define objective function for skopt
    @use_named_args(dimensions)
    def objective(**params):
        self.n_calls += 1

        if self.verbose:
            logger.info(f"=== Iteration {self.n_calls}/{n_calls} ===")
            logger.info(
                "Parameters: %s", {k: f"{v:.4f}" for k, v in params.items()}
            )

        # Run simulation
        simulated = self._run_simulation(params)

        # Calculate objective
        score = self._calculate_objective(simulated)

        if self.verbose:
            logger.info(f"Objective: {score:.6f}")

        # Store results
        all_params_list.append(params.copy())
        all_scores.append(score)

        return score

    # Run optimization
    if self.verbose:
        logger.info(f"Starting Bayesian optimization with {n_calls} evaluations...")
        logger.info(f"Optimizing parameters: {list(param_bounds.keys())}")
        logger.info(f"Observed groups: {list(self.observed_data.keys())}")
        logger.info(f"Simulation years: {len(self.years)}")

    self.n_calls = 0
    result = gp_minimize(
        objective,
        dimensions,
        n_calls=n_calls,
        n_initial_points=n_initial_points,
        random_state=random_state,
        verbose=False,
    )

    optimization_time = time.time() - start_time

    # Extract results
    best_params = {name: value for name, value in zip(param_names, result.x)}
    best_score = result.fun
    convergence = [np.min(all_scores[: i + 1]) for i in range(len(all_scores))]

    if self.verbose:
        logger.info(f"{'=' * 60}")
        logger.info("OPTIMIZATION COMPLETE")
        logger.info(f"{'=' * 60}")
        logger.info(f"Best score: {best_score:.6f}")
        logger.info("Best parameters:")
        for name, value in best_params.items():
            logger.info(f"  {name}: {value:.4f}")
        logger.info(f"Total evaluations: {self.n_calls}")
        logger.info(f"Optimization time: {optimization_time:.2f} seconds")
        logger.info(f"{'=' * 60}")

    return OptimizationResult(
        best_params=best_params,
        best_score=best_score,
        n_iterations=self.n_calls,
        convergence=convergence,
        all_params=all_params_list,
        all_scores=all_scores,
        optimization_time=optimization_time,
    )
validate
validate(params: Dict[str, float], test_data: Optional[Dict[int, ndarray]] = None) -> Dict[str, Any]

Validate optimized parameters on test data.

Parameters:

Name Type Description Default
params dict

Parameter values to validate

required
test_data dict

Test data. If None, uses training data (self.observed_data)

None

Returns:

Type Description
dict

Validation metrics for each group and overall

Source code in pypath/core/optimization.py
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
def validate(
    self,
    params: Dict[str, float],
    test_data: Optional[Dict[int, np.ndarray]] = None,
) -> Dict[str, Any]:
    """Validate optimized parameters on test data.

    Parameters
    ----------
    params : dict
        Parameter values to validate
    test_data : dict, optional
        Test data. If None, uses training data (self.observed_data)

    Returns
    -------
    dict
        Validation metrics for each group and overall
    """
    if test_data is None:
        test_data = self.observed_data

    # Run simulation with optimized parameters
    simulated = self._run_simulation(params)

    if simulated is None:
        return {"error": "Simulation failed"}

    # Calculate metrics for each group
    results = {}
    for group_idx, observed in test_data.items():
        predicted = simulated[group_idx]

        results[f"group_{group_idx}"] = {
            "mse": mean_squared_error(observed, predicted),
            "mape": mean_absolute_percentage_error(observed, predicted),
            "nrmse": normalized_root_mean_squared_error(observed, predicted),
            "correlation": np.corrcoef(observed, predicted)[0, 1],
        }

    # Calculate overall metrics
    all_observed = np.concatenate([test_data[idx] for idx in test_data.keys()])
    all_predicted = np.concatenate([simulated[idx] for idx in test_data.keys()])

    results["overall"] = {
        "mse": mean_squared_error(all_observed, all_predicted),
        "mape": mean_absolute_percentage_error(all_observed, all_predicted),
        "nrmse": normalized_root_mean_squared_error(all_observed, all_predicted),
        "correlation": np.corrcoef(all_observed, all_predicted)[0, 1],
    }

    return results

OptimizationResult dataclass

Results from Bayesian optimization.

Attributes:

Name Type Description
best_params dict

Best parameter values found

best_score float

Best objective function value (lower is better)

n_iterations int

Number of optimization iterations

convergence list

Objective function values over iterations

all_params list

All parameter combinations tried

all_scores list

All objective function values

optimization_time float

Total optimization time in seconds

Source code in pypath/core/optimization.py
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
@dataclass
class OptimizationResult:
    """Results from Bayesian optimization.

    Attributes
    ----------
    best_params : dict
        Best parameter values found
    best_score : float
        Best objective function value (lower is better)
    n_iterations : int
        Number of optimization iterations
    convergence : list
        Objective function values over iterations
    all_params : list
        All parameter combinations tried
    all_scores : list
        All objective function values
    optimization_time : float
        Total optimization time in seconds
    """

    best_params: Dict[str, float]
    best_score: float
    n_iterations: int
    convergence: List[float]
    all_params: List[Dict[str, float]]
    all_scores: List[float]
    optimization_time: float

log_likelihood

log_likelihood(y_true: ndarray, y_pred: ndarray, sigma: float = 0.1) -> float

Calculate negative log-likelihood (Gaussian).

Parameters:

Name Type Description Default
y_true ndarray

Observed values

required
y_pred ndarray

Predicted values

required
sigma float

Standard deviation of measurement error

0.1

Returns:

Type Description
float

Negative log-likelihood

Source code in pypath/core/optimization.py
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
def log_likelihood(y_true: np.ndarray, y_pred: np.ndarray, sigma: float = 0.1) -> float:
    """Calculate negative log-likelihood (Gaussian).

    Parameters
    ----------
    y_true : np.ndarray
        Observed values
    y_pred : np.ndarray
        Predicted values
    sigma : float
        Standard deviation of measurement error

    Returns
    -------
    float
        Negative log-likelihood
    """
    n = len(y_true)
    return 0.5 * n * np.log(2 * np.pi * sigma**2) + np.sum((y_true - y_pred) ** 2) / (
        2 * sigma**2
    )

mean_absolute_percentage_error

mean_absolute_percentage_error(y_true: ndarray, y_pred: ndarray) -> float

Calculate mean absolute percentage error.

Parameters:

Name Type Description Default
y_true ndarray

Observed values

required
y_pred ndarray

Predicted values

required

Returns:

Type Description
float

Mean absolute percentage error

Source code in pypath/core/optimization.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def mean_absolute_percentage_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
    """Calculate mean absolute percentage error.

    Parameters
    ----------
    y_true : np.ndarray
        Observed values
    y_pred : np.ndarray
        Predicted values

    Returns
    -------
    float
        Mean absolute percentage error
    """
    # Avoid division by zero
    mask = y_true != 0
    return np.mean(np.abs((y_true[mask] - y_pred[mask]) / y_true[mask])) * 100

mean_squared_error

mean_squared_error(y_true: ndarray, y_pred: ndarray) -> float

Calculate mean squared error between observed and predicted values.

Parameters:

Name Type Description Default
y_true ndarray

Observed values

required
y_pred ndarray

Predicted values

required

Returns:

Type Description
float

Mean squared error

Source code in pypath/core/optimization.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
    """Calculate mean squared error between observed and predicted values.

    Parameters
    ----------
    y_true : np.ndarray
        Observed values
    y_pred : np.ndarray
        Predicted values

    Returns
    -------
    float
        Mean squared error
    """
    return np.mean((y_true - y_pred) ** 2)

normalized_root_mean_squared_error

normalized_root_mean_squared_error(y_true: ndarray, y_pred: ndarray) -> float

Calculate normalized root mean squared error.

Parameters:

Name Type Description Default
y_true ndarray

Observed values

required
y_pred ndarray

Predicted values

required

Returns:

Type Description
float

Normalized RMSE

Source code in pypath/core/optimization.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
def normalized_root_mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
    """Calculate normalized root mean squared error.

    Parameters
    ----------
    y_true : np.ndarray
        Observed values
    y_pred : np.ndarray
        Predicted values

    Returns
    -------
    float
        Normalized RMSE
    """
    rmse = np.sqrt(np.mean((y_true - y_pred) ** 2))
    y_range = np.max(y_true) - np.min(y_true)
    if y_range <= 0:
        raise ValueError(
            "Cannot compute NRMSE: y_true has zero range (all values identical)"
        )
    return rmse / y_range

plot_fit

plot_fit(optimizer: EcosimOptimizer, params: Dict[str, float], save_path: Optional[str] = None)

Plot observed vs simulated biomass time series.

Parameters:

Name Type Description Default
optimizer EcosimOptimizer

Optimizer instance with observed data

required
params dict

Parameter values to simulate

required
save_path str

Path to save figure

None
Source code in pypath/core/optimization.py
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
def plot_fit(
    optimizer: EcosimOptimizer,
    params: Dict[str, float],
    save_path: Optional[str] = None,
):
    """Plot observed vs simulated biomass time series.

    Parameters
    ----------
    optimizer : EcosimOptimizer
        Optimizer instance with observed data
    params : dict
        Parameter values to simulate
    save_path : str, optional
        Path to save figure
    """
    import matplotlib.pyplot as plt

    # Run simulation
    simulated = optimizer._run_simulation(params)

    if simulated is None:
        logger.error("Simulation failed!")
        return None

    # Create figure
    n_groups = len(optimizer.observed_data)
    n_cols = min(3, n_groups)
    n_rows = (n_groups + n_cols - 1) // n_cols

    fig, axes = plt.subplots(n_rows, n_cols, figsize=(5 * n_cols, 4 * n_rows))
    if n_groups == 1:
        axes = np.array([axes])
    axes = axes.flatten()

    # Plot each group
    years = list(optimizer.years)
    for i, (group_idx, observed) in enumerate(optimizer.observed_data.items()):
        predicted = simulated[group_idx]
        group_name = optimizer.model.Group[group_idx]

        axes[i].plot(years, observed, "o-", label="Observed", linewidth=2, markersize=6)
        axes[i].plot(
            years, predicted, "s--", label="Simulated", linewidth=2, markersize=5
        )
        axes[i].set_xlabel("Year")
        axes[i].set_ylabel("Biomass")
        axes[i].set_title(f"{group_name} (Group {group_idx})")
        axes[i].legend()
        axes[i].grid(True, alpha=0.3)

        # Add metrics
        mse = mean_squared_error(observed, predicted)
        corr = np.corrcoef(observed, predicted)[0, 1]
        axes[i].text(
            0.05,
            0.95,
            f"MSE: {mse:.4f}\nCorr: {corr:.3f}",
            transform=axes[i].transAxes,
            verticalalignment="top",
            bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.5),
        )

    # Hide unused subplots
    for i in range(n_groups, len(axes)):
        axes[i].axis("off")

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches="tight")

    return fig

plot_optimization_results

plot_optimization_results(result: OptimizationResult, save_path: Optional[str] = None)

Plot optimization convergence and parameter distributions.

Parameters:

Name Type Description Default
result OptimizationResult

Optimization results

required
save_path str

Path to save figure

None
Source code in pypath/core/optimization.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
def plot_optimization_results(
    result: OptimizationResult, save_path: Optional[str] = None
):
    """Plot optimization convergence and parameter distributions.

    Parameters
    ----------
    result : OptimizationResult
        Optimization results
    save_path : str, optional
        Path to save figure
    """
    import matplotlib.pyplot as plt

    fig, axes = plt.subplots(1, 2, figsize=(12, 4))

    # Convergence plot
    axes[0].plot(result.convergence, "b-", linewidth=2)
    axes[0].scatter(
        range(len(result.all_scores)),
        result.all_scores,
        c=result.all_scores,
        cmap="viridis",
        alpha=0.5,
        s=30,
    )
    axes[0].set_xlabel("Iteration")
    axes[0].set_ylabel("Best Objective Value")
    axes[0].set_title("Optimization Convergence")
    axes[0].grid(True, alpha=0.3)

    # Parameter evolution
    param_names = list(result.best_params.keys())
    n_params = len(param_names)

    if n_params <= 4:
        # Show all parameters if few enough
        for i, name in enumerate(param_names):
            values = [p[name] for p in result.all_params]
            axes[1].scatter(range(len(values)), values, label=name, alpha=0.6, s=30)
        axes[1].set_xlabel("Iteration")
        axes[1].set_ylabel("Parameter Value")
        axes[1].set_title("Parameter Evolution")
        axes[1].legend()
        axes[1].grid(True, alpha=0.3)
    else:
        # Show histogram of best parameters
        best_values = list(result.best_params.values())
        axes[1].barh(param_names, best_values, color="steelblue")
        axes[1].set_xlabel("Best Parameter Value")
        axes[1].set_title("Optimized Parameters")
        axes[1].grid(True, alpha=0.3, axis="x")

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches="tight")

    return fig

Plotting

pypath.core.plotting

Plotting module for PyPath.

This module provides visualization functions for Ecopath models and Ecosim simulation results using matplotlib and optionally plotly.

Functions include: - Food web network diagrams - Biomass time series - Catch time series - Trophic level distributions - Mixed Trophic Impacts heatmaps

Based on Rpath's plotting functions.

plot_biomass

plot_biomass(output: RsimOutput, groups: Optional[List[int]] = None, relative: bool = False, title: str = 'Biomass Time Series', figsize: Tuple[int, int] = (12, 6), legend_loc: str = 'best', ax: Optional[Axes] = None) -> plt.Figure

Plot biomass time series from Ecosim simulation.

Parameters:

Name Type Description Default
output RsimOutput

Simulation results

required
groups list of int

Group indices to plot (default: all living)

None
relative bool

If True, plot relative to initial biomass

False
title str

Plot title

'Biomass Time Series'
figsize tuple

Figure size

(12, 6)
legend_loc str

Legend location

'best'
ax Axes

Matplotlib axes

None

Returns:

Type Description
Figure
Source code in pypath/core/plotting.py
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
def plot_biomass(
    output: RsimOutput,
    groups: Optional[List[int]] = None,
    relative: bool = False,
    title: str = "Biomass Time Series",
    figsize: Tuple[int, int] = (12, 6),
    legend_loc: str = "best",
    ax: Optional[plt.Axes] = None,
) -> plt.Figure:
    """Plot biomass time series from Ecosim simulation.

    Parameters
    ----------
    output : RsimOutput
        Simulation results
    groups : list of int, optional
        Group indices to plot (default: all living)
    relative : bool
        If True, plot relative to initial biomass
    title : str
        Plot title
    figsize : tuple
        Figure size
    legend_loc : str
        Legend location
    ax : Axes, optional
        Matplotlib axes

    Returns
    -------
    matplotlib.Figure
    """
    biomass = output.out_Biomass_annual
    n_years, n_groups = biomass.shape

    if groups is None:
        # Plot all groups with significant biomass
        groups = [i for i in range(1, n_groups) if biomass[0, i] > 0]

    if ax is None:
        fig, ax = plt.subplots(figsize=figsize)
    else:
        fig = ax.figure

    years = np.arange(1, n_years + 1)

    for grp in groups:
        y = biomass[:, grp]
        if relative and y[0] > 0:
            y = y / y[0]
        ax.plot(years, y, label=f"Group {grp}", linewidth=1.5)

    ax.set_xlabel("Year", fontsize=11)
    ylabel = "Relative Biomass (B/Bâ‚€)" if relative else "Biomass"
    ax.set_ylabel(ylabel, fontsize=11)
    ax.set_title(title, fontsize=12)

    if relative:
        ax.axhline(y=1, color="k", linestyle="--", alpha=0.5)

    ax.legend(loc=legend_loc, fontsize=9)
    ax.grid(True, alpha=0.3)

    plt.tight_layout()
    return fig

plot_biomass_grid

plot_biomass_grid(output: RsimOutput, groups: Optional[List[int]] = None, n_cols: int = 4, relative: bool = True, figsize: Optional[Tuple[int, int]] = None) -> plt.Figure

Plot biomass as a grid of subplots.

Parameters:

Name Type Description Default
output RsimOutput

Simulation results

required
groups list of int

Group indices to plot

None
n_cols int

Number of columns in grid

4
relative bool

Plot relative to initial biomass

True
figsize tuple

Figure size

None

Returns:

Type Description
Figure
Source code in pypath/core/plotting.py
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
def plot_biomass_grid(
    output: RsimOutput,
    groups: Optional[List[int]] = None,
    n_cols: int = 4,
    relative: bool = True,
    figsize: Optional[Tuple[int, int]] = None,
) -> plt.Figure:
    """Plot biomass as a grid of subplots.

    Parameters
    ----------
    output : RsimOutput
        Simulation results
    groups : list of int, optional
        Group indices to plot
    n_cols : int
        Number of columns in grid
    relative : bool
        Plot relative to initial biomass
    figsize : tuple, optional
        Figure size

    Returns
    -------
    matplotlib.Figure
    """
    biomass = output.out_Biomass_annual
    n_years, n_groups = biomass.shape

    if groups is None:
        groups = [i for i in range(1, n_groups) if biomass[0, i] > 0]

    n_plots = len(groups)
    n_rows = (n_plots + n_cols - 1) // n_cols

    if figsize is None:
        figsize = (3 * n_cols, 2.5 * n_rows)

    fig, axes = plt.subplots(n_rows, n_cols, figsize=figsize, squeeze=False)
    axes = axes.flatten()

    years = np.arange(1, n_years + 1)

    for idx, grp in enumerate(groups):
        ax = axes[idx]
        y = biomass[:, grp]

        if relative and y[0] > 0:
            y = y / y[0]
            ax.axhline(y=1, color="k", linestyle="--", alpha=0.3)

        ax.plot(years, y, color="steelblue", linewidth=1.5)
        ax.set_title(f"Group {grp}", fontsize=10)
        ax.tick_params(labelsize=8)
        ax.grid(True, alpha=0.3)

    # Hide empty subplots
    for idx in range(len(groups), len(axes)):
        axes[idx].set_visible(False)

    plt.suptitle(
        "Biomass Time Series" + (" (Relative)" if relative else ""), fontsize=12
    )
    plt.tight_layout()
    return fig

plot_biomass_interactive

plot_biomass_interactive(output: RsimOutput, groups: Optional[List[int]] = None, relative: bool = False, title: str = 'Biomass Time Series') -> Any

Create interactive biomass plot with Plotly.

Parameters:

Name Type Description Default
output RsimOutput

Simulation results

required
groups list of int

Groups to plot

None
relative bool

Plot relative to initial

False
title str

Plot title

'Biomass Time Series'

Returns:

Type Description
Figure
Source code in pypath/core/plotting.py
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
def plot_biomass_interactive(
    output: RsimOutput,
    groups: Optional[List[int]] = None,
    relative: bool = False,
    title: str = "Biomass Time Series",
) -> Any:
    """Create interactive biomass plot with Plotly.

    Parameters
    ----------
    output : RsimOutput
        Simulation results
    groups : list of int, optional
        Groups to plot
    relative : bool
        Plot relative to initial
    title : str
        Plot title

    Returns
    -------
    plotly.graph_objects.Figure
    """
    if not HAS_PLOTLY:
        raise ImportError(
            "plotly is required for interactive plots. Install with: pip install plotly"
        )

    biomass = output.out_Biomass_annual
    n_years, n_groups = biomass.shape

    if groups is None:
        groups = [i for i in range(1, n_groups) if biomass[0, i] > 0]

    fig = go.Figure()

    years = np.arange(1, n_years + 1)

    for grp in groups:
        y = biomass[:, grp]
        if relative and y[0] > 0:
            y = y / y[0]

        fig.add_trace(
            go.Scatter(
                x=years,
                y=y,
                mode="lines",
                name=f"Group {grp}",
                hovertemplate="Year: %{x}<br>Biomass: %{y:.4f}<extra></extra>",
            )
        )

    ylabel = "Relative Biomass (B/Bâ‚€)" if relative else "Biomass"

    fig.update_layout(
        title=title,
        xaxis_title="Year",
        yaxis_title=ylabel,
        hovermode="x unified",
        template="plotly_white",
    )

    if relative:
        fig.add_hline(y=1, line_dash="dash", line_color="gray", opacity=0.5)

    return fig

plot_catch

plot_catch(output: RsimOutput, groups: Optional[List[int]] = None, title: str = 'Catch Time Series', figsize: Tuple[int, int] = (12, 6), stacked: bool = False, ax: Optional[Axes] = None) -> plt.Figure

Plot catch time series from Ecosim simulation.

Parameters:

Name Type Description Default
output RsimOutput

Simulation results

required
groups list of int

Group indices to plot

None
title str

Plot title

'Catch Time Series'
figsize tuple

Figure size

(12, 6)
stacked bool

If True, create stacked area plot

False
ax Axes

Matplotlib axes

None

Returns:

Type Description
Figure
Source code in pypath/core/plotting.py
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
def plot_catch(
    output: RsimOutput,
    groups: Optional[List[int]] = None,
    title: str = "Catch Time Series",
    figsize: Tuple[int, int] = (12, 6),
    stacked: bool = False,
    ax: Optional[plt.Axes] = None,
) -> plt.Figure:
    """Plot catch time series from Ecosim simulation.

    Parameters
    ----------
    output : RsimOutput
        Simulation results
    groups : list of int, optional
        Group indices to plot
    title : str
        Plot title
    figsize : tuple
        Figure size
    stacked : bool
        If True, create stacked area plot
    ax : Axes, optional
        Matplotlib axes

    Returns
    -------
    matplotlib.Figure
    """
    catch = output.out_Catch_annual
    n_years, n_groups = catch.shape

    if groups is None:
        # Plot groups with any catch
        groups = [i for i in range(1, n_groups) if np.sum(catch[:, i]) > 0]

    if not groups:
        # No catch - return empty plot
        fig, ax = plt.subplots(figsize=figsize)
        ax.text(
            0.5, 0.5, "No catch data", ha="center", va="center", transform=ax.transAxes
        )
        ax.set_title(title)
        return fig

    if ax is None:
        fig, ax = plt.subplots(figsize=figsize)
    else:
        fig = ax.figure

    years = np.arange(1, n_years + 1)

    if stacked:
        catch_data = [catch[:, grp] for grp in groups]
        labels = [f"Group {grp}" for grp in groups]
        ax.stackplot(years, catch_data, labels=labels, alpha=0.7)
    else:
        for grp in groups:
            ax.plot(years, catch[:, grp], label=f"Group {grp}", linewidth=1.5)

    ax.set_xlabel("Year", fontsize=11)
    ax.set_ylabel("Catch", fontsize=11)
    ax.set_title(title, fontsize=12)
    ax.legend(loc="best", fontsize=9)
    ax.grid(True, alpha=0.3)

    plt.tight_layout()
    return fig

plot_ecosim_summary

plot_ecosim_summary(output: RsimOutput, groups: Optional[List[int]] = None, figsize: Tuple[int, int] = (14, 10)) -> plt.Figure

Create summary plot with biomass, relative biomass, and catch.

Parameters:

Name Type Description Default
output RsimOutput

Simulation results

required
groups list of int

Groups to plot

None
figsize tuple

Figure size

(14, 10)

Returns:

Type Description
Figure
Source code in pypath/core/plotting.py
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
def plot_ecosim_summary(
    output: RsimOutput,
    groups: Optional[List[int]] = None,
    figsize: Tuple[int, int] = (14, 10),
) -> plt.Figure:
    """Create summary plot with biomass, relative biomass, and catch.

    Parameters
    ----------
    output : RsimOutput
        Simulation results
    groups : list of int, optional
        Groups to plot
    figsize : tuple
        Figure size

    Returns
    -------
    matplotlib.Figure
    """
    fig, axes = plt.subplots(2, 2, figsize=figsize)

    plot_biomass(output, groups=groups, ax=axes[0, 0], relative=False)
    axes[0, 0].set_title("Absolute Biomass")

    plot_biomass(output, groups=groups, ax=axes[0, 1], relative=True)
    axes[0, 1].set_title("Relative Biomass (B/Bâ‚€)")

    plot_catch(output, groups=groups, ax=axes[1, 0], stacked=False)
    axes[1, 0].set_title("Catch by Group")

    plot_catch(output, groups=groups, ax=axes[1, 1], stacked=True)
    axes[1, 1].set_title("Total Catch (Stacked)")

    plt.tight_layout()
    return fig

plot_foodweb

plot_foodweb(rpath: Rpath, title: str = 'Food Web', layout: str = 'trophic', node_size_by: str = 'biomass', edge_width_by: str = 'flow', show_labels: bool = True, min_flow: float = 0.01, figsize: Tuple[int, int] = (12, 10), cmap: str = 'viridis', ax: Optional[Axes] = None) -> plt.Figure

Plot food web network diagram.

Parameters:

Name Type Description Default
rpath Rpath

Balanced Ecopath model

required
title str

Plot title

'Food Web'
layout str

Node layout: 'trophic' (y=TL), 'spring', 'circular'

'trophic'
node_size_by str

What to scale node size by: 'biomass', 'production', 'equal'

'biomass'
edge_width_by str

What to scale edge width by: 'flow', 'diet', 'equal'

'flow'
show_labels bool

Show group labels

True
min_flow float

Minimum flow to show (relative to max)

0.01
figsize tuple

Figure size

(12, 10)
cmap str

Colormap for trophic levels

'viridis'
ax Axes

Matplotlib axes to plot on

None

Returns:

Type Description
Figure

The figure object

Source code in pypath/core/plotting.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
def plot_foodweb(
    rpath: Rpath,
    title: str = "Food Web",
    layout: str = "trophic",
    node_size_by: str = "biomass",
    edge_width_by: str = "flow",
    show_labels: bool = True,
    min_flow: float = 0.01,
    figsize: Tuple[int, int] = (12, 10),
    cmap: str = "viridis",
    ax: Optional[plt.Axes] = None,
) -> plt.Figure:
    """Plot food web network diagram.

    Parameters
    ----------
    rpath : Rpath
        Balanced Ecopath model
    title : str
        Plot title
    layout : str
        Node layout: 'trophic' (y=TL), 'spring', 'circular'
    node_size_by : str
        What to scale node size by: 'biomass', 'production', 'equal'
    edge_width_by : str
        What to scale edge width by: 'flow', 'diet', 'equal'
    show_labels : bool
        Show group labels
    min_flow : float
        Minimum flow to show (relative to max)
    figsize : tuple
        Figure size
    cmap : str
        Colormap for trophic levels
    ax : Axes, optional
        Matplotlib axes to plot on

    Returns
    -------
    matplotlib.Figure
        The figure object
    """
    if not HAS_NETWORKX:
        raise ImportError(
            "networkx is required for food web plots. Install with: pip install networkx"
        )

    n_living = rpath.NUM_LIVING
    n_dead = rpath.NUM_DEAD
    n_total = n_living + n_dead

    # Create directed graph
    G = nx.DiGraph()

    # Add nodes
    for i in range(1, n_total + 1):
        G.add_node(
            i, tl=rpath.TL[i], biomass=rpath.Biomass[i], is_detritus=i > n_living
        )

    # Add edges (from prey to predator)
    max_flow = 0
    for pred in range(1, n_living + 1):
        for prey in range(1, n_total + 1):
            if rpath.DC[prey, pred] > 0:
                flow = rpath.DC[prey, pred] * rpath.QB[pred] * rpath.Biomass[pred]
                max_flow = max(max_flow, flow)
                G.add_edge(prey, pred, flow=flow, diet=rpath.DC[prey, pred])

    # Filter small flows
    edges_to_remove = []
    for u, v, data in G.edges(data=True):
        if data["flow"] < min_flow * max_flow:
            edges_to_remove.append((u, v))
    G.remove_edges_from(edges_to_remove)

    # Calculate layout
    if layout == "trophic":
        # Position by trophic level (y) and spread horizontally
        pos = {}
        tl_groups = {}
        for node in G.nodes():
            tl = round(G.nodes[node]["tl"], 1)
            if tl not in tl_groups:
                tl_groups[tl] = []
            tl_groups[tl].append(node)

        for tl, nodes in tl_groups.items():
            n = len(nodes)
            for i, node in enumerate(nodes):
                x = (i - (n - 1) / 2) * 0.8
                pos[node] = (x, tl)
    elif layout == "spring":
        pos = nx.spring_layout(G, seed=42)
    elif layout == "circular":
        pos = nx.circular_layout(G)
    else:
        pos = nx.spring_layout(G, seed=42)

    # Calculate node sizes
    if node_size_by == "biomass":
        max_bio = max(rpath.Biomass[1 : n_total + 1])
        node_sizes = [500 + 2000 * (rpath.Biomass[i] / max_bio) for i in G.nodes()]
    elif node_size_by == "production":
        prods = [rpath.PB[i] * rpath.Biomass[i] for i in G.nodes()]
        max_prod = max(prods) if max(prods) > 0 else 1
        node_sizes = [500 + 2000 * (p / max_prod) for p in prods]
    else:
        node_sizes = [800] * len(G.nodes())

    # Calculate edge widths
    if edge_width_by == "flow":
        edge_widths = []
        for u, v in G.edges():
            w = G.edges[u, v]["flow"] / max_flow if max_flow > 0 else 0
            edge_widths.append(0.5 + 4 * w)
    elif edge_width_by == "diet":
        edge_widths = [0.5 + 4 * G.edges[u, v]["diet"] for u, v in G.edges()]
    else:
        edge_widths = [1.5] * len(G.edges())

    # Node colors by trophic level
    trophic_levels = [G.nodes[i]["tl"] for i in G.nodes()]

    # Create figure
    if ax is None:
        fig, ax = plt.subplots(figsize=figsize)
    else:
        fig = ax.figure

    # Draw network
    nx.draw_networkx_nodes(
        G,
        pos,
        node_size=node_sizes,
        node_color=trophic_levels,
        cmap=plt.colormaps[cmap],
        ax=ax,
        alpha=0.8,
    )

    nx.draw_networkx_edges(
        G,
        pos,
        width=edge_widths,
        edge_color="gray",
        alpha=0.5,
        arrows=True,
        arrowsize=15,
        connectionstyle="arc3,rad=0.1",
        ax=ax,
    )

    if show_labels:
        labels = {i: f"G{i}" for i in G.nodes()}
        nx.draw_networkx_labels(G, pos, labels, font_size=8, ax=ax)

    # Add colorbar for trophic levels
    sm = plt.cm.ScalarMappable(
        cmap=plt.colormaps[cmap],
        norm=plt.Normalize(vmin=min(trophic_levels), vmax=max(trophic_levels)),
    )
    sm.set_array([])
    cbar = plt.colorbar(sm, ax=ax, shrink=0.6)
    cbar.set_label("Trophic Level")

    ax.set_title(title, fontsize=14)
    ax.axis("off")

    plt.tight_layout()
    return fig

plot_foodweb_interactive

plot_foodweb_interactive(rpath: Rpath, title: str = 'Food Web', min_flow: float = 0.01) -> Any

Create interactive food web plot with Plotly.

Parameters:

Name Type Description Default
rpath Rpath

Balanced model

required
title str

Plot title

'Food Web'
min_flow float

Minimum flow to show

0.01

Returns:

Type Description
Figure
Source code in pypath/core/plotting.py
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
def plot_foodweb_interactive(
    rpath: Rpath,
    title: str = "Food Web",
    min_flow: float = 0.01,
) -> Any:
    """Create interactive food web plot with Plotly.

    Parameters
    ----------
    rpath : Rpath
        Balanced model
    title : str
        Plot title
    min_flow : float
        Minimum flow to show

    Returns
    -------
    plotly.graph_objects.Figure
    """
    if not HAS_PLOTLY:
        raise ImportError(
            "plotly is required for interactive plots. Install with: pip install plotly"
        )
    if not HAS_NETWORKX:
        raise ImportError(
            "networkx is required for food web plots. Install with: pip install networkx"
        )

    n_living = rpath.NUM_LIVING
    n_dead = rpath.NUM_DEAD
    n_total = n_living + n_dead

    # Build graph for layout
    G = nx.DiGraph()
    for i in range(1, n_total + 1):
        G.add_node(i, tl=rpath.TL[i], biomass=rpath.Biomass[i])

    max_flow = 0
    edges = []
    for pred in range(1, n_living + 1):
        for prey in range(1, n_total + 1):
            if rpath.DC[prey, pred] > 0:
                flow = rpath.DC[prey, pred] * rpath.QB[pred] * rpath.Biomass[pred]
                max_flow = max(max_flow, flow)
                G.add_edge(prey, pred)
                edges.append((prey, pred, flow))

    # Layout
    pos = {}
    tl_groups = {}
    for node in G.nodes():
        tl = round(rpath.TL[node], 1)
        if tl not in tl_groups:
            tl_groups[tl] = []
        tl_groups[tl].append(node)

    for tl, nodes in tl_groups.items():
        n = len(nodes)
        for i, node in enumerate(nodes):
            x = (i - (n - 1) / 2) * 0.8
            pos[node] = (x, tl)

    # Node trace
    node_x = [pos[node][0] for node in G.nodes()]
    node_y = [pos[node][1] for node in G.nodes()]
    node_text = [
        f"Group {n}<br>TL: {rpath.TL[n]:.2f}<br>B: {rpath.Biomass[n]:.4f}"
        for n in G.nodes()
    ]
    node_size = [
        10 + 30 * rpath.Biomass[n] / max(rpath.Biomass[1 : n_total + 1])
        for n in G.nodes()
    ]

    node_trace = go.Scatter(
        x=node_x,
        y=node_y,
        mode="markers+text",
        hoverinfo="text",
        text=[f"G{n}" for n in G.nodes()],
        hovertext=node_text,
        textposition="top center",
        marker=dict(
            size=node_size,
            color=[rpath.TL[n] for n in G.nodes()],
            colorscale="Viridis",
            colorbar=dict(title="Trophic Level"),
            line_width=2,
        ),
    )

    # Edge traces
    edge_traces = []
    for prey, pred, flow in edges:
        if flow >= min_flow * max_flow:
            x0, y0 = pos[prey]
            x1, y1 = pos[pred]
            width = 1 + 4 * flow / max_flow

            edge_traces.append(
                go.Scatter(
                    x=[x0, x1, None],
                    y=[y0, y1, None],
                    mode="lines",
                    line=dict(width=width, color="gray"),
                    hoverinfo="none",
                    showlegend=False,
                )
            )

    fig = go.Figure(data=edge_traces + [node_trace])

    fig.update_layout(
        title=title,
        showlegend=False,
        hovermode="closest",
        xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
        yaxis=dict(
            showgrid=False, zeroline=False, showticklabels=False, title="Trophic Level"
        ),
        template="plotly_white",
    )

    return fig

plot_mti_heatmap

plot_mti_heatmap(mti: ndarray, group_names: Optional[List[str]] = None, title: str = 'Mixed Trophic Impacts', figsize: Tuple[int, int] = (10, 8), cmap: str = 'RdBu_r', ax: Optional[Axes] = None) -> plt.Figure

Plot Mixed Trophic Impacts as a heatmap.

Parameters:

Name Type Description Default
mti ndarray

MTI matrix from mixed_trophic_impacts()

required
group_names list of str

Names for groups

None
title str

Plot title

'Mixed Trophic Impacts'
figsize tuple

Figure size

(10, 8)
cmap str

Colormap

'RdBu_r'
ax Axes

Matplotlib axes

None

Returns:

Type Description
Figure
Source code in pypath/core/plotting.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def plot_mti_heatmap(
    mti: np.ndarray,
    group_names: Optional[List[str]] = None,
    title: str = "Mixed Trophic Impacts",
    figsize: Tuple[int, int] = (10, 8),
    cmap: str = "RdBu_r",
    ax: Optional[plt.Axes] = None,
) -> plt.Figure:
    """Plot Mixed Trophic Impacts as a heatmap.

    Parameters
    ----------
    mti : np.ndarray
        MTI matrix from mixed_trophic_impacts()
    group_names : list of str, optional
        Names for groups
    title : str
        Plot title
    figsize : tuple
        Figure size
    cmap : str
        Colormap
    ax : Axes, optional
        Matplotlib axes

    Returns
    -------
    matplotlib.Figure
    """
    n = mti.shape[0]

    if group_names is None:
        group_names = [f"G{i}" for i in range(1, n + 1)]

    if ax is None:
        fig, ax = plt.subplots(figsize=figsize)
    else:
        fig = ax.figure

    # Symmetric colormap around zero
    vmax = np.max(np.abs(mti))
    vmin = -vmax

    im = ax.imshow(mti, cmap=cmap, vmin=vmin, vmax=vmax, aspect="auto")

    # Colorbar
    cbar = plt.colorbar(im, ax=ax, shrink=0.8)
    cbar.set_label("Impact")

    # Labels
    ax.set_xticks(range(n))
    ax.set_yticks(range(n))
    ax.set_xticklabels(group_names, rotation=45, ha="right", fontsize=8)
    ax.set_yticklabels(group_names, fontsize=8)

    ax.set_xlabel("Impacted", fontsize=11)
    ax.set_ylabel("Impacting", fontsize=11)
    ax.set_title(title, fontsize=12)

    plt.tight_layout()
    return fig

plot_trophic_spectrum

plot_trophic_spectrum(rpath: Rpath, by: str = 'biomass', n_bins: int = 10, title: str = 'Trophic Spectrum', figsize: Tuple[int, int] = (10, 6), ax: Optional[Axes] = None) -> plt.Figure

Plot trophic spectrum (biomass or production by trophic level).

Parameters:

Name Type Description Default
rpath Rpath

Balanced model

required
by str

What to aggregate: 'biomass', 'production', 'consumption'

'biomass'
n_bins int

Number of trophic level bins

10
title str

Plot title

'Trophic Spectrum'
figsize tuple

Figure size

(10, 6)
ax Axes

Matplotlib axes

None

Returns:

Type Description
Figure
Source code in pypath/core/plotting.py
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
def plot_trophic_spectrum(
    rpath: Rpath,
    by: str = "biomass",
    n_bins: int = 10,
    title: str = "Trophic Spectrum",
    figsize: Tuple[int, int] = (10, 6),
    ax: Optional[plt.Axes] = None,
) -> plt.Figure:
    """Plot trophic spectrum (biomass or production by trophic level).

    Parameters
    ----------
    rpath : Rpath
        Balanced model
    by : str
        What to aggregate: 'biomass', 'production', 'consumption'
    n_bins : int
        Number of trophic level bins
    title : str
        Plot title
    figsize : tuple
        Figure size
    ax : Axes, optional
        Matplotlib axes

    Returns
    -------
    matplotlib.Figure
    """
    n_living = rpath.NUM_LIVING

    # Get values and trophic levels
    tl = rpath.TL[1 : n_living + 1]

    if by == "biomass":
        values = rpath.Biomass[1 : n_living + 1]
        ylabel = "Biomass"
    elif by == "production":
        values = rpath.PB[1 : n_living + 1] * rpath.Biomass[1 : n_living + 1]
        ylabel = "Production"
    elif by == "consumption":
        values = rpath.QB[1 : n_living + 1] * rpath.Biomass[1 : n_living + 1]
        ylabel = "Consumption"
    else:
        raise ValueError(f"Unknown 'by' value: {by}")

    # Create bins
    tl_min, tl_max = np.floor(np.min(tl)), np.ceil(np.max(tl))
    bins = np.linspace(tl_min, tl_max, n_bins + 1)
    bin_centers = (bins[:-1] + bins[1:]) / 2

    # Aggregate
    aggregated = np.zeros(n_bins)
    for i in range(len(tl)):
        bin_idx = np.digitize(tl[i], bins) - 1
        bin_idx = min(bin_idx, n_bins - 1)
        aggregated[bin_idx] += values[i]

    if ax is None:
        fig, ax = plt.subplots(figsize=figsize)
    else:
        fig = ax.figure

    ax.bar(
        bin_centers,
        aggregated,
        width=bins[1] - bins[0],
        color="steelblue",
        edgecolor="black",
        alpha=0.7,
    )

    ax.set_xlabel("Trophic Level", fontsize=11)
    ax.set_ylabel(ylabel, fontsize=11)
    ax.set_title(title, fontsize=12)
    ax.grid(True, alpha=0.3, axis="y")

    plt.tight_layout()
    return fig

save_plots

save_plots(figures: Union[Figure, List[Figure]], filename: str, dpi: int = 150, format: str = 'png') -> None

Save matplotlib figure(s) to file.

Parameters:

Name Type Description Default
figures Figure or list of Figure

Figure(s) to save

required
filename str

Output filename (without extension for multiple figures)

required
dpi int

Resolution

150
format str

Output format ('png', 'pdf', 'svg')

'png'
Source code in pypath/core/plotting.py
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
def save_plots(
    figures: Union[plt.Figure, List[plt.Figure]],
    filename: str,
    dpi: int = 150,
    format: str = "png",
) -> None:
    """Save matplotlib figure(s) to file.

    Parameters
    ----------
    figures : Figure or list of Figure
        Figure(s) to save
    filename : str
        Output filename (without extension for multiple figures)
    dpi : int
        Resolution
    format : str
        Output format ('png', 'pdf', 'svg')
    """
    if isinstance(figures, plt.Figure):
        figures = [figures]

    if len(figures) == 1:
        figures[0].savefig(f"{filename}.{format}", dpi=dpi, bbox_inches="tight")
    else:
        for i, fig in enumerate(figures):
            fig.savefig(f"{filename}_{i + 1}.{format}", dpi=dpi, bbox_inches="tight")