Add files via upload

This commit is contained in:
PSBigBig × MiniPS 2026-02-12 20:59:59 +08:00 committed by GitHub
parent c0ba59cd53
commit c5098a488c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 433 additions and 0 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

View file

@ -0,0 +1,433 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "m3XUqnsNzbHF"
},
"outputs": [],
"source": [
"# TU Q101_A - Toy equity premium tension\n",
"# --------------------------------------\n",
"# This is a single-cell Colab-style notebook.\n",
"# It simulates three tiny \"worlds\" for the equity premium puzzle and\n",
"# computes a scalar tension observable T_premium for each world.\n",
"#\n",
"# Goals:\n",
"# - Show how a very simple consumption-based asset pricing model behaves.\n",
"# - Make the \"equity premium puzzle\" visible as a tension between\n",
"# target premia and what the model can generate with reasonable parameters.\n",
"#\n",
"# No API key is needed. Everything is fully offline and reproducible.\n",
"\n",
"import math\n",
"import numpy as np\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"\n",
"# -----------------------------\n",
"# 1. Global configuration\n",
"# -----------------------------\n",
"\n",
"# Randomness\n",
"GLOBAL_SEED = 101\n",
"N_PATHS = 12000 # number of Monte Carlo samples per scenario\n",
"\n",
"# Target information for the toy \"data\"\n",
"TARGET_PREMIUM = 0.06 # 6% long-run equity premium band (stylised fact)\n",
"GAMMA_GRID_MIN = 0.5\n",
"GAMMA_GRID_MAX = 20.0\n",
"GAMMA_GRID_STEPS = 80\n",
"\n",
"# Reasonable risk-aversion band for a representative agent.\n",
"# If the model needs gamma far outside this band, we treat it as a puzzle.\n",
"GAMMA_PLAUSIBLE_LOW = 1.0\n",
"GAMMA_PLAUSIBLE_HIGH = 5.0\n",
"\n",
"# Tension weighting for T_premium\n",
"PREMIUM_SCALE = 0.02 # 2% difference in premium = 1 unit of deviation\n",
"WEIGHT_PREMIUM = 0.4\n",
"WEIGHT_GAMMA = 0.4\n",
"WEIGHT_VOL = 0.2\n",
"\n",
"\n",
"# -----------------------------\n",
"# 2. Scenario definitions\n",
"# -----------------------------\n",
"# Each scenario is a tiny world with:\n",
"# - consumption growth process (mu_c, sigma_c)\n",
"# - risky payoff process (mu_p, sigma_p)\n",
"# - correlation between consumption and risky payoff (rho)\n",
"# - discount factor beta\n",
"#\n",
"# All parameters are intentionally simple and hand-tuned. We only want to\n",
"# show relative patterns, not to calibrate real markets.\n",
"\n",
"SCENARIOS = {\n",
" \"no_puzzle\": {\n",
" \"name\": \"High-volatility world (no puzzle)\",\n",
" \"description\": (\n",
" \"Consumption growth is volatile and strongly correlated \"\n",
" \"with the risky asset. A moderate gamma can support a 6% \"\n",
" \"equity premium without stress.\"\n",
" ),\n",
" \"mu_c\": 0.02,\n",
" \"sigma_c\": 0.10,\n",
" \"mu_p\": 0.05,\n",
" \"sigma_p\": 0.20,\n",
" \"rho\": 0.8,\n",
" \"beta\": 0.99,\n",
" \"premium_target\": TARGET_PREMIUM,\n",
" \"vol_target_range\": (0.15, 0.30),\n",
" },\n",
" \"realistic_puzzle\": {\n",
" \"name\": \"Low-vol consumption (classic puzzle)\",\n",
" \"description\": (\n",
" \"Consumption growth volatility is low, closer to real data. \"\n",
" \"We still demand a 6% equity premium. The model tends to \"\n",
" \"require very large gamma to get close.\"\n",
" ),\n",
" \"mu_c\": 0.02,\n",
" \"sigma_c\": 0.02,\n",
" \"mu_p\": 0.06,\n",
" \"sigma_p\": 0.20,\n",
" \"rho\": 0.4,\n",
" \"beta\": 0.99,\n",
" \"premium_target\": TARGET_PREMIUM,\n",
" \"vol_target_range\": (0.15, 0.25),\n",
" },\n",
" \"anemic_asset\": {\n",
" \"name\": \"Low-vol risky asset (anemic risk)\",\n",
" \"description\": (\n",
" \"Consumption volatility is low and the risky asset itself \"\n",
" \"is not very volatile. Asking for a 6% premium here is almost \"\n",
" \"impossible without extreme parameters.\"\n",
" ),\n",
" \"mu_c\": 0.02,\n",
" \"sigma_c\": 0.02,\n",
" \"mu_p\": 0.03,\n",
" \"sigma_p\": 0.08,\n",
" \"rho\": 0.3,\n",
" \"beta\": 0.99,\n",
" \"premium_target\": TARGET_PREMIUM,\n",
" \"vol_target_range\": (0.10, 0.18),\n",
" },\n",
"}\n",
"\n",
"\n",
"# -----------------------------\n",
"# 3. Helper functions\n",
"# -----------------------------\n",
"\n",
"def generate_joint_shocks(n_paths, rho, rng):\n",
" \"\"\"\n",
" Generate two correlated standard normal shock series.\n",
"\n",
" Parameters\n",
" ----------\n",
" n_paths : int\n",
" Number of Monte Carlo draws.\n",
" rho : float\n",
" Correlation between the two series (consumption and risky payoff).\n",
" rng : np.random.Generator\n",
" Random number generator.\n",
"\n",
" Returns\n",
" -------\n",
" z_c : np.ndarray\n",
" Shocks for consumption.\n",
" z_p : np.ndarray\n",
" Shocks for risky payoff, correlated with z_c.\n",
" \"\"\"\n",
" z_c = rng.normal(size=n_paths)\n",
" z_2 = rng.normal(size=n_paths)\n",
" z_p = rho * z_c + math.sqrt(max(0.0, 1.0 - rho**2)) * z_2\n",
" return z_c, z_p\n",
"\n",
"\n",
"def simulate_world_once(scenario_key, params, n_paths, seed_offset=0):\n",
" \"\"\"\n",
" Simulate one world and sweep over gamma values.\n",
"\n",
" For each gamma in the grid we compute:\n",
" - stochastic discount factor m_t\n",
" - risk-free rate R_f(gamma)\n",
" - risky asset price P_e(gamma)\n",
" - risky return distribution R_e_t(gamma)\n",
" - implied equity premium premium_model(gamma)\n",
"\n",
" We then find gamma_star that best matches the target premium\n",
" and compute a scalar tension T_premium.\n",
"\n",
" Returns\n",
" -------\n",
" summary : dict\n",
" Best-fit information for the scenario (gamma_star, premium, vol, R_f, T_premium).\n",
" curve : dict\n",
" Gamma grid and premium grid for plotting.\n",
" \"\"\"\n",
" rng = np.random.default_rng(GLOBAL_SEED + seed_offset)\n",
"\n",
" # Unpack parameters\n",
" mu_c = params[\"mu_c\"]\n",
" sigma_c = params[\"sigma_c\"]\n",
" mu_p = params[\"mu_p\"]\n",
" sigma_p = params[\"sigma_p\"]\n",
" rho = params[\"rho\"]\n",
" beta = params[\"beta\"]\n",
" premium_target = params[\"premium_target\"]\n",
" vol_target_low, vol_target_high = params[\"vol_target_range\"]\n",
"\n",
" # Generate correlated shocks\n",
" z_c, z_p = generate_joint_shocks(n_paths, rho, rng)\n",
"\n",
" # Build gross consumption growth and risky payoff\n",
" g = np.exp(mu_c + sigma_c * z_c) # consumption growth C_{t+1} / C_t\n",
" pay_e = np.exp(mu_p + sigma_p * z_p) # risky asset payoff next period\n",
"\n",
" # Prepare gamma grid\n",
" gamma_grid = np.linspace(GAMMA_GRID_MIN, GAMMA_GRID_MAX, GAMMA_GRID_STEPS)\n",
"\n",
" # Storage for the full premium curve\n",
" premium_grid = []\n",
" vol_grid = []\n",
" rf_grid = []\n",
"\n",
" # Track the best gamma (smallest premium error)\n",
" best = None\n",
"\n",
" for gamma in gamma_grid:\n",
" # Stochastic discount factor m_{t+1} = beta * (g_{t+1})^{-gamma}\n",
" m = beta * g**(-gamma)\n",
" em = np.mean(m)\n",
"\n",
" # Risk-free rate: price of risk-free payoff 1 is E[m], so R_f = 1 / E[m]\n",
" r_f = 1.0 / em\n",
"\n",
" # Price of risky payoff and implied return distribution\n",
" price_e = np.mean(m * pay_e)\n",
" re_path = pay_e / price_e\n",
"\n",
" premium_model = float(np.mean(re_path) - r_f)\n",
" vol_model = float(np.std(re_path))\n",
"\n",
" premium_grid.append(premium_model)\n",
" vol_grid.append(vol_model)\n",
" rf_grid.append(r_f)\n",
"\n",
" diff = premium_model - premium_target\n",
"\n",
" if best is None or abs(diff) < abs(best[\"diff\"]):\n",
" best = {\n",
" \"scenario_key\": scenario_key,\n",
" \"gamma_star\": float(gamma),\n",
" \"premium_model\": premium_model,\n",
" \"premium_target\": float(premium_target),\n",
" \"diff\": diff,\n",
" \"vol\": vol_model,\n",
" \"R_f\": float(r_f),\n",
" }\n",
"\n",
" # Compute scalar tension T_premium\n",
" # 1) Premium mismatch component (scaled by PREMIUM_SCALE)\n",
" diff_premium = abs(best[\"diff\"]) / PREMIUM_SCALE\n",
"\n",
" # 2) Gamma penalty: outside [GAMMA_PLAUSIBLE_LOW, GAMMA_PLAUSIBLE_HIGH]\n",
" gamma_star = best[\"gamma_star\"]\n",
" if gamma_star < GAMMA_PLAUSIBLE_LOW:\n",
" gamma_penalty = (GAMMA_PLAUSIBLE_LOW - gamma_star) / GAMMA_PLAUSIBLE_LOW\n",
" elif gamma_star > GAMMA_PLAUSIBLE_HIGH:\n",
" gamma_penalty = (gamma_star - GAMMA_PLAUSIBLE_HIGH) / GAMMA_PLAUSIBLE_HIGH\n",
" else:\n",
" gamma_penalty = 0.0\n",
"\n",
" # 3) Volatility penalty: outside the target vol band\n",
" vol_star = best[\"vol\"]\n",
" if vol_star < vol_target_low:\n",
" vol_penalty = (vol_target_low - vol_star) / vol_target_low\n",
" elif vol_star > vol_target_high:\n",
" vol_penalty = (vol_star - vol_target_high) / vol_target_high\n",
" else:\n",
" vol_penalty = 0.0\n",
"\n",
" # Final scalar tension (simple weighted sum)\n",
" t_premium = (\n",
" WEIGHT_PREMIUM * diff_premium\n",
" + WEIGHT_GAMMA * gamma_penalty\n",
" + WEIGHT_VOL * vol_penalty\n",
" )\n",
" best[\"T_premium\"] = float(t_premium)\n",
"\n",
" curve = {\n",
" \"gamma_grid\": gamma_grid,\n",
" \"premium_grid\": np.array(premium_grid),\n",
" \"vol_grid\": np.array(vol_grid),\n",
" \"rf_grid\": np.array(rf_grid),\n",
" }\n",
"\n",
" return best, curve\n",
"\n",
"\n",
"# -----------------------------\n",
"# 4. Run all scenarios\n",
"# -----------------------------\n",
"\n",
"summaries = []\n",
"curves = {}\n",
"\n",
"for idx, (key, params) in enumerate(SCENARIOS.items()):\n",
" best, curve = simulate_world_once(key, params, n_paths=N_PATHS, seed_offset=idx)\n",
" summaries.append(best)\n",
" curves[key] = curve\n",
"\n",
"# Build a DataFrame for nice printing\n",
"summary_df = pd.DataFrame(summaries)\n",
"\n",
"# Add human-readable scenario names\n",
"summary_df[\"scenario_name\"] = summary_df[\"scenario_key\"].map(\n",
" {k: v[\"name\"] for k, v in SCENARIOS.items()}\n",
")\n",
"\n",
"# Reorder columns and sort by T_premium (lower = closer to plausible world)\n",
"summary_df = summary_df[\n",
" [\n",
" \"scenario_key\",\n",
" \"scenario_name\",\n",
" \"gamma_star\",\n",
" \"premium_target\",\n",
" \"premium_model\",\n",
" \"diff\",\n",
" \"vol\",\n",
" \"R_f\",\n",
" \"T_premium\",\n",
" ]\n",
"].sort_values(\"T_premium\")\n",
"\n",
"\n",
"# -----------------------------\n",
"# 5. Print textual summary\n",
"# -----------------------------\n",
"\n",
"print(\"TU Q101_A - Toy equity premium tension\")\n",
"print(\"--------------------------------------\")\n",
"print(\n",
" \"This notebook simulates three tiny consumption-based worlds and\\n\"\n",
" \"computes a scalar tension observable T_premium for each world.\\n\"\n",
")\n",
"print(\"All runs are fully offline. No API key is needed.\\n\")\n",
"\n",
"print(\"Target band and plausible parameter region:\")\n",
"print(f\"- Target long-run equity premium (stylised): {TARGET_PREMIUM:.2%}\")\n",
"print(\n",
" f\"- Plausible risk-aversion band for a representative agent: \"\n",
" f\"gamma in [{GAMMA_PLAUSIBLE_LOW:.1f}, {GAMMA_PLAUSIBLE_HIGH:.1f}]\"\n",
")\n",
"print(f\"- Premium mismatch is scaled by {PREMIUM_SCALE:.2%} per unit.\\n\")\n",
"\n",
"print(\"Configured scenarios:\")\n",
"for key, params in SCENARIOS.items():\n",
" print(f\"- {key}: {params['name']}\")\n",
" print(f\" {params['description']}\")\n",
"print()\n",
"\n",
"print(\"All simulations completed.\\n\")\n",
"\n",
"print(\"Summary table (sorted by T_premium, lower means closer to plausible world):\")\n",
"print(summary_df.to_string(index=False, float_format=lambda x: f\"{x:0.4f}\"))\n",
"print()\n",
"\n",
"# Quick interpretation lines\n",
"for _, row in summary_df.iterrows():\n",
" label = row[\"scenario_key\"]\n",
" t_val = row[\"T_premium\"]\n",
" gamma_star = row[\"gamma_star\"]\n",
" prem = row[\"premium_model\"]\n",
" diff = row[\"diff\"]\n",
" rf = row[\"R_f\"]\n",
" vol = row[\"vol\"]\n",
"\n",
" print(\n",
" f\"- {label}: T_premium ≈ {t_val:0.3f}, \"\n",
" f\"gamma* ≈ {gamma_star:0.1f}, \"\n",
" f\"premium_model ≈ {prem:0.2%} \"\n",
" f\"(target {row['premium_target']:0.2%}, diff {diff:0.2%}), \"\n",
" f\"R_f ≈ {rf:0.3f}, vol ≈ {vol:0.2f}\"\n",
" )\n",
"print()\n",
"\n",
"\n",
"# -----------------------------\n",
"# 6. Plot: premium vs gamma\n",
"# -----------------------------\n",
"\n",
"plt.figure(figsize=(9, 5))\n",
"\n",
"for key, curve in curves.items():\n",
" gamma_grid = curve[\"gamma_grid\"]\n",
" premium_grid = curve[\"premium_grid\"]\n",
" plt.plot(\n",
" gamma_grid,\n",
" premium_grid,\n",
" label=SCENARIOS[key][\"name\"],\n",
" )\n",
"\n",
"# Target premium band (horizontal band)\n",
"plt.axhline(TARGET_PREMIUM, linestyle=\"--\")\n",
"plt.axhline(TARGET_PREMIUM - PREMIUM_SCALE, linestyle=\":\")\n",
"plt.axhline(TARGET_PREMIUM + PREMIUM_SCALE, linestyle=\":\")\n",
"\n",
"# Plausible gamma band (vertical band)\n",
"plt.axvspan(\n",
" GAMMA_PLAUSIBLE_LOW,\n",
" GAMMA_PLAUSIBLE_HIGH,\n",
" alpha=0.1,\n",
")\n",
"\n",
"plt.title(\"TU Q101_A · Model equity premium vs risk aversion gamma\")\n",
"plt.xlabel(\"Risk aversion gamma\")\n",
"plt.ylabel(\"Model equity premium (E[Re] - Rf)\")\n",
"plt.legend()\n",
"plt.tight_layout()\n",
"plt.savefig(\"Q101A_premium_vs_gamma.png\", dpi=150)\n",
"plt.show()\n",
"\n",
"print(\"Saved premium-vs-gamma plot as: Q101A_premium_vs_gamma.png\")\n",
"\n",
"# -----------------------------\n",
"# 7. Plot: T_premium per scenario\n",
"# -----------------------------\n",
"\n",
"plt.figure(figsize=(6, 4))\n",
"\n",
"x_labels = summary_df[\"scenario_key\"].tolist()\n",
"x_pos = range(len(x_labels))\n",
"t_values = summary_df[\"T_premium\"].values\n",
"\n",
"plt.bar(x_pos, t_values)\n",
"plt.xticks(x_pos, x_labels)\n",
"plt.ylabel(\"T_premium (higher = more tension)\")\n",
"plt.title(\"TU Q101_A · T_premium per scenario\")\n",
"plt.tight_layout()\n",
"plt.savefig(\"Q101A_T_premium.png\", dpi=150)\n",
"plt.show()\n",
"\n",
"print(\"Saved tension bar plot as: Q101A_T_premium.png\")\n"
]
}
]
}