All single-series metrics (sharpe, sortino, calmar, omega, max_drawdown, ulcer_index, value_at_risk, cvar) take a returns Series and a small number of keyword arguments (periods_per_year, risk_free, alpha, target) with sensible defaults for daily data. The batch_* variants accept a dict of named series or a wide DataFrame and dispatch to the Rust-accelerated kernel when available — see Rust kernels. returns_stats / batch_summary produce the same Series / DataFrame shape used by the tear sheet, so custom reporting code can share the formatting layer.
Free functions on returns. Every function accepts a pd.Series (single
strategy) or a pd.DataFrame (panel of strategies as columns) and
returns the same shape: a scalar stays a scalar for Series input, a
Series indexed by column name for DataFrame input.
Organised into six concerns:
:mod:fundcloud.metrics.core — scalar metrics independent of a
benchmark (return, risk, risk-adjusted, higher moments).
Uses the sample standard deviation (ddof=1). Returns are assumed
to be simple per-period returns; for log returns the formula is the same
numerator and denominator.
Examples:
>>> importpandasaspd,numpyasnp>>> rng=np.random.default_rng(0)>>> r=pd.Series(rng.normal(0.0005,0.01,252))>>> round(sharpe(r,periods_per_year=252),2)0.7>>> # Works on a DataFrame too — returns a Series indexed by column:>>> panel=pd.DataFrame({"a":r,"b":-r})>>> isinstance(sharpe(panel),pd.Series)True
defsharpe(returns:pd.Series|pd.DataFrame,*,risk_free:float|None=None,periods_per_year:int|None=None,)->float|pd.Series:"""Annualised Sharpe ratio. Uses the **sample** standard deviation (``ddof=1``). Returns are assumed to be simple per-period returns; for log returns the formula is the same numerator and denominator. Examples -------- >>> import pandas as pd, numpy as np >>> rng = np.random.default_rng(0) >>> r = pd.Series(rng.normal(0.0005, 0.01, 252)) >>> round(sharpe(r, periods_per_year=252), 2) # doctest: +SKIP 0.7 >>> # Works on a DataFrame too — returns a Series indexed by column: >>> panel = pd.DataFrame({"a": r, "b": -r}) >>> isinstance(sharpe(panel), pd.Series) True """df=_to_df(returns)ppy=_periods(periods_per_year)rf_pp=_rf_per_period(risk_free,ppy)excess=df-rf_ppmu=excess.mean()sigma=excess.std(ddof=1)out=(mu/sigma)*np.sqrt(ppy)out=out.replace([np.inf,-np.inf],np.nan)return_collapse(out,returns)
defsortino(returns:pd.Series|pd.DataFrame,*,target:float=0.0,periods_per_year:int|None=None,)->float|pd.Series:"""Annualised Sortino ratio. Downside deviation uses only periods with returns strictly below ``target`` and divides by the sample count (``ddof=0``). """df=_to_df(returns)ppy=_periods(periods_per_year)diff=df-targetdownside=diff.clip(upper=0.0)# pop std with mean=0 => sqrt(mean(x^2))dd=np.sqrt((downside**2).mean())mu=diff.mean()out=(mu/dd)*np.sqrt(ppy)out=out.replace([np.inf,-np.inf],np.nan)return_collapse(out,returns)
defcalmar(returns:pd.Series|pd.DataFrame,*,periods_per_year:int|None=None,)->float|pd.Series:"""Annualised return divided by absolute max drawdown."""df=_to_df(returns)ppy=_periods(periods_per_year)ann_ret=(1.0+df).prod()**(ppy/max(len(df),1))-1.0mdd=max_drawdown(df).abs()out=ann_ret/mddout=out.replace([np.inf,-np.inf],np.nan)return_collapse(out,returns)
defomega(returns:pd.Series|pd.DataFrame,*,target:float=0.0)->float|pd.Series:"""Omega ratio at ``target`` threshold. Ratio of the expected gain above target to expected loss below. """df=_to_df(returns)diff=df-targetgains=diff.clip(lower=0.0).sum()losses=-diff.clip(upper=0.0).sum()out=gains/lossesout=out.replace([np.inf,-np.inf],np.nan)return_collapse(out,returns)
defmax_drawdown(returns:pd.Series|pd.DataFrame)->float|pd.Series:"""Largest peak-to-trough loss (negative number)."""dd=drawdown_series(_to_df(returns))out=dd.min()return_collapse(out,returns)
defulcer_index(returns:pd.Series|pd.DataFrame)->float|pd.Series:"""Ulcer Index: RMS of drawdowns, in percent."""dd_pct=drawdown_series(_to_df(returns))*100.0out=np.sqrt((dd_pct**2).mean())return_collapse(out,returns)
defcvar(returns:pd.Series|pd.DataFrame,*,alpha:float=0.95)->float|pd.Series:"""Conditional Value-at-Risk (Expected Shortfall) at confidence ``alpha``. Returns a **loss** as a negative number — the mean of returns below the ``(1 - alpha)`` quantile. """ifnot0.0<alpha<1.0:raiseValueError("alpha must be in (0, 1)")df=_to_df(returns)q=df.quantile(1.0-alpha)out=pd.Series(index=df.columns,dtype=float)forcindf.columns:mask=df[c]<=q[c]out[c]=df.loc[mask,c].mean()ifmask.any()elsenp.nanreturn_collapse(out,returns)
defvalue_at_risk(returns:pd.Series|pd.DataFrame,*,alpha:float=0.95)->float|pd.Series:"""Historical Value-at-Risk at confidence ``alpha``. Returns a **loss** as a negative number (the (1-alpha) quantile of returns). """ifnot0.0<alpha<1.0:raiseValueError("alpha must be in (0, 1)")df=_to_df(returns)out=df.quantile(1.0-alpha)return_collapse(out,returns)
defreturns_stats(returns:pd.Series|pd.DataFrame,*,risk_free:float|None=None,periods_per_year:int|None=None,cvar_alpha:float=0.95,)->pd.DataFrame:"""Bundle of the common metrics into a single, scannable summary table. Rows are metrics, columns are strategies. """df=_to_df(returns)ppy=_periods(periods_per_year)n=len(df)total_return=(1.0+df).prod()-1.0cagr=(1.0+df).prod()**(ppy/max(n,1))-1.0ann_vol=df.std(ddof=1)*np.sqrt(ppy)rows={"periods":pd.Series(n,index=df.columns),"total_return":total_return,"cagr":cagr,"ann_volatility":ann_vol,"sharpe":sharpe(df,risk_free=risk_free,periods_per_year=ppy),"sortino":sortino(df,periods_per_year=ppy),"calmar":calmar(df,periods_per_year=ppy),"max_drawdown":max_drawdown(df),"ulcer_index":ulcer_index(df),"cvar":cvar(df,alpha=cvar_alpha),"omega":omega(df),}returnpd.DataFrame(rows).T
defbatch_summary(strategies:Mapping[str,pd.Series|pd.DataFrame],*,risk_free:float|None=None,periods_per_year:int|None=None,cvar_alpha:float=0.95,)->pd.DataFrame:"""One row per strategy, standard metrics as columns."""ifnotstrategies:returnpd.DataFrame()rows={}forname,rinstrategies.items():s=_reduce_returns(r)rows[name]=_core.returns_stats(s,risk_free=risk_free,periods_per_year=periods_per_year,cvar_alpha=cvar_alpha,).iloc[:,0]out=pd.DataFrame(rows).T# Enforce float dtype; sklearn/skfolio sometimes hands us object columns.returnout.apply(pd.to_numeric,errors="coerce").replace([np.inf,-np.inf],np.nan)