Skip to article frontmatterSkip to article content
Site not loading correctly?

This may be due to an incorrect BASE_URL configuration. See the MyST Documentation for reference.

This notebook accompanies the Forward Euler lecture notes. We implement forward and backward Euler methods, compare them against exact solutions, and explore convergence, stability, and stiffness interactively.

import numpy as np
import scipy.linalg as LA
import matplotlib.pyplot as plt

from integrate import (be1_adaptive, be2_adaptive, rk2_adaptive,
                       pc_adaptive, fwd_euler, rk2, adams_bashforth,
                       pece, adams_pece)

Forward Euler

We solve the scalar test problem u=auu' = au, u(0)=1u(0) = 1, whose exact solution is u(t)=eatu(t) = e^{at}. Forward Euler with step size kk gives un+1=un+kf(tn,un)u_{n+1} = u_n + k\,f(t_n, u_n). The plot compares the numerical and exact solutions.

a = 2.0

def f(t, u):
    return a * u

# Initial condition
u0 = 1.0

# Time setup
Tf = 1.25
k  = 1./16  # Time step
numsteps = np.ceil(Tf / k).astype(int)
k = Tf / numsteps 

# Create output vector
vs = np.empty(numsteps+1, dtype=float)
vs[0] = u0

v = u0 # This always holds the current ODE value
for i in range(numsteps):
    t = k * (i - 1)
    v = v + k * f(t, v)
    vs[i+1] = v

# Get timesteps 
tf = np.linspace(0, Tf, 1000)
fig, ax = plt.subplots(1, 1)
plt.plot(tf, np.exp(a * tf), lw=3, label='Exact solution')

# for i in range(1, numsteps+1):
#     C = vs[i] * np.exp(-a * ts[i])
#     plt.plot(tf, C * np.exp(a * tf), alpha=0.75)

ts = np.linspace(0, Tf, numsteps+1)
plt.plot(ts, vs, color='k', lw=2, zorder=5, marker='o', label='Euler\'s methods')

plt.title('Euler\'s method solution k = {0:.4f}'.format(k))
plt.ylabel('$y(t)$')
plt.xlabel('Time')
plt.legend(loc='best')
#plt.ylim([0, 100])
plt.xlim([0, Tf])
(0.0, 1.25)
<Figure size 640x480 with 1 Axes>

Backward Euler (Secant Method)

Backward Euler is implicit: un+1=un+kf(tn+1,un+1)u_{n+1} = u_n + k\,f(t_{n+1}, u_{n+1}). At each step we must solve a nonlinear equation for un+1u_{n+1}. Here we use the secant method as the nonlinear solver. We apply it to the same test problem u=auu' = au and compare with the exact solution.

a = 2.0

def f(t, u):
    return a * u

# Initial condition
u0 = 1.0

# Time setup
Tf = 1.25
k  = 1./16  # Time step
numsteps = np.ceil(Tf / k).astype(int)
k = Tf / numsteps 

# Create output vector
vs = np.empty(numsteps+1, dtype=float)
vs[0] = u0

rtol = 1e-4

v = u0 # This always holds the current ODE value
for i in range(numsteps):
    t = k * i  # Time at which we are trying to compute solution at.
    
    # Setup secant method
    v0 = v
    v1 = v + k * f(t, v)
    
    # The function we are trying to find the root of.
    def h(v_old, v_new):
        return v_old + k * f(t, v_new) - v_new
    
    # Secant method
    done = False
    while not done:
        v2 = v1 - h(v, v1) * (v1 - v0) / (h(v, v1) - h(v, v0))
        v0, v1 = v1, v2
        done = abs(v0 - v1) / abs(v0) < rtol

    # Update the solution
    v = v1
    vs[i+1] = v

# Get timesteps 
tf = np.linspace(0, Tf, 1000)
fig, ax = plt.subplots(1, 1)
plt.plot(tf, np.exp(a * tf), lw=3, label='Exact solution')

# for i in range(1, numsteps+1):
#     C = vs[i] * np.exp(-a * ts[i])
#     plt.plot(tf, C * np.exp(a * tf), alpha=0.75)

ts = np.linspace(0, Tf, numsteps+1)
plt.plot(ts, vs, color='k', lw=2, zorder=5, marker='o', label='Euler\'s methods')

plt.title('Euler\'s method solution k = {0:.4f}'.format(k))
plt.ylabel('$y(t)$')
plt.xlabel('Time')
plt.legend(loc='best')
#plt.ylim([0, 100])
plt.xlim([0, Tf])
(0.0, 1.25)
<Figure size 640x480 with 1 Axes>

Backward Euler (Simplified Newton)

Same problem, but now using a simplified Newton iteration as the nonlinear solver. The Jacobian is approximated by finite differences and held fixed across Newton iterates, which is cheaper per iteration than the secant method above.

from math import sqrt

# Backward Euler with simplified Newton iteration
a = 2.0

def f(t, u):
    return a * u

# Initial condition
u0 = 1.0

# Time setup
Tf = 1.25
k  = 1./16  # Time step
numsteps = np.ceil(Tf / k).astype(int)
k = Tf / numsteps 

# Create output vector
vs = np.empty(numsteps+1, dtype=float)
vs[0] = u0

chi = 1e-1
rtol = 1e-4
atol = 1e-4
delta = sqrt(np.finfo(float).eps)

v = u0 # This always holds the current ODE value
for i in range(numsteps):
    t = k * i  # Time at which we are trying to compute solution at.

    # Get derivative approximation
    df = (f(t, v + delta) - f(t, v)) / delta

    # Setup simplified Newton
    v_new = v
    dv = 1.0
    done = False
    while not done:
        dv_new = (v + k * f(t + k, v_new) - v_new) / (1.0 - k * df)
        theta = abs(dv_new) / abs(dv)
        done = dv_new == 0.0 or abs(dv_new) <= chi * atol * (1 - theta) / theta
        
        dv = dv_new
        v_new += dv_new

    # Update the solution
    v = v_new
    vs[i+1] = v

# Get timesteps 
tf = np.linspace(0, Tf, 1000)
fig, ax = plt.subplots(1, 1)
plt.plot(tf, np.exp(a * tf), lw=3, label='Exact solution')

ts = np.linspace(0, Tf, numsteps+1)
plt.plot(ts, vs, color='k', lw=2, zorder=5, marker='o', label="Backward Euler (Newton)")

plt.title('Backward Euler (simplified Newton) k = {0:.4f}'.format(k))
plt.ylabel('$y(t)$')
plt.xlabel('Time')
plt.legend(loc='best')
plt.xlim([0, Tf])
(0.0, 1.25)
<Figure size 640x480 with 1 Axes>

Backward Euler with Richardson Adaptive Step Size

We add adaptive step-size control to backward Euler using Richardson extrapolation.

Setup. Starting from (t,v)(t, v), backward Euler (order p=1p=1) is applied in two ways:

  1. One full step of size kkvfullv_{\text{full}}

  2. Two half-steps of size k/2k/2vhalfv_{\text{half}}

Both approximate u(t+k)u(t+k), but with different leading error terms. For a method of order pp, the local error after one step of size hh is Chp+1Ch^{p+1}:

vfull=u(t+k)+Ckp+1+O(kp+2)v_{\text{full}} = u(t+k) + Ck^{p+1} + O(k^{p+2})
vhalf=u(t+k)+2C(k2)p+1+O(kp+2)=u(t+k)+Ckp+12p+O(kp+2)v_{\text{half}} = u(t+k) + 2C\left(\frac{k}{2}\right)^{p+1} + O(k^{p+2}) = u(t+k) + \frac{C k^{p+1}}{2^p} + O(k^{p+2})

The factor of 2 in the half-step version comes from accumulating error over two steps.

Error estimate. Subtracting:

vhalfvfull=Ckp+1(12p1)+O(kp+2)v_{\text{half}} - v_{\text{full}} = Ck^{p+1}\left(\frac{1}{2^p} - 1\right) + O(k^{p+2})

So vhalfvfull|v_{\text{half}} - v_{\text{full}}| estimates the local error, which drives the accept/reject decision and step-size adjustment.

Extrapolation. Since we know the error structure, we can cancel the leading error term:

vRich=vhalf+vhalfvfull2p1v_{\text{Rich}} = v_{\text{half}} + \frac{v_{\text{half}} - v_{\text{full}}}{2^p - 1}

For p=1p = 1: vRich=2vhalfvfullv_{\text{Rich}} = 2v_{\text{half}} - v_{\text{full}}, which gives an order p+1=2p+1 = 2 accurate solution for free.

Richardson extrapolation does double duty: it provides an error estimate for adaptivity and boosts the solution from order 1 to order 2.

# be1_adaptive is imported from the integrate package.
# It implements scalar backward Euler with Richardson extrapolation
# using a simplified Newton iteration (scalar division, no LU).
a = -2.0

def f(t, u):
    return a * u

# Initial condition
u0 = 1.0

data = be1_adaptive(u0, f, te=1.2, k0=0.2, atol=1e-3, rtol=1e-3)
Scalar backward Euler: Successful
Accepted steps =  20  Rejected steps =  2
ts = data['t']
ue = u0 * np.exp(a * ts)
un = data['u'].flatten()
aerr = np.max(np.abs(ue - un))
rerr = np.max(np.abs((ue - un) / ue))

print('aerr = {0:.4e}; rerr = {1:.4e}'.format(aerr, rerr))

fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].set_title('Number of approximation points = {0:d}'.format(ts.size))
#axs[0].plot(ts, np.abs(un - ue), marker='o', label='Numerical', color='k')
axs[0].plot(ts, un, label='Numerical', marker='o', color='k', ls='-')
axs[0].plot(ts, ue, label='Exact', color='r', ls='--')
axs[0].set_xlabel('$t$')
axs[0].set_ylabel('$u(t)$')
# axs[0].axhline(1e-3, color='r', ls='--')

axs[1].set_title('Step sizes')
axs[1].plot(ts[:-1], np.diff(ts), marker='o', label='Numerical', color='k')
axs[1].set_xlabel('$t$')
axs[1].set_ylabel('$k$')
fig.tight_layout()
aerr = 5.6624e-04; rerr = 5.4652e-03
<Figure size 1000x500 with 2 Axes>

Backward Euler for Systems

We extend the adaptive backward Euler solver to systems of ODEs (uRnu \in \mathbb{R}^n). The simplified Newton iteration now requires assembling and LU-factoring the Jacobian matrix IkJfI - kJ_f. We test on the van der Pol oscillator, a classic stiff system where adaptive step sizes are essential.

# be2_adaptive is imported from the integrate package.
# It implements backward Euler for systems with Richardson extrapolation
# using simplified Newton iteration with LU factorization.
eps = 1e-2

def f(t, y):
    return np.array([
        y[1],
        ((1. - y[0]**2) * y[1] - y[0]) / eps
    ])

ic = np.array([2.0, 0.])

t0 = 0.0
te = 5.0

data1 = be2_adaptive(ic, f, te=te, stepmax=400000, k0=0.1, atol=1e-2, rtol=1e-2)
Second order backward Euler: Successful
Accepted steps =  389  Rejected steps =  136  Rejected Newton steps =  8
fig, axs = plt.subplots(3, 1, figsize=(6.5, 5), sharex=True)

ts = data1['t']
un = data1['u']

ks = np.diff(ts)
ks = np.hstack((ks, ks[-1]))

axs[0].plot(ts, un[:, 0])
axs[0].set_ylabel('$u_1(t)$')
axs[1].plot(ts, un[:, 1])
axs[1].set_ylabel('$u_2(t)$')
axs[2].semilogy(ts, ks)
axs[2].set_ylabel('$k$')
axs[2].set_xlabel('Time $t$')
# axs[2].set_ylim([1e-7, 5e-3])
# axs[2].axhline(0.33 * eps, color='r', ls='--')
for ax in axs:
    ax.set_xlim([t0, te])
<Figure size 650x500 with 3 Axes>

Comparing Methods: Euler, RK2, Adams-Bashforth, PECE

We implement several one-step and multistep methods on the same test problem u=auu' = au and compare accuracy at fixed step size. Methods shown: forward Euler (order 1), RK2 midpoint (order 2), Adams-Bashforth 2-step (order 2), and predictor-corrector (PECE) variants. The plot overlays all methods against the exact solution.

a = 2.0

def f(t, u):
    return a * u

# Initial condition
u0 = 1.0

# Time setup
Tf = 1.25
k  = 1./8  # Time step
numsteps = np.ceil(Tf / k).astype(int)
k = Tf / numsteps 
ts = np.arange(numsteps + 1) * k
ue = u0 * np.exp(a * ts)

# run some sims (methods imported from integrate package)
v1s = fwd_euler(u0, f, k, numsteps)
e1s = np.max(np.abs(v1s - ue))

v5s = rk2(u0, f, k, numsteps)
e5s = np.max(np.abs(v5s - ue))

v2s = pece(u0, f, k, numsteps)
e2s = np.max(np.abs(v2s - ue))

v3s = adams_bashforth(u0, f, k, numsteps)
e3s = np.max(np.abs(v3s - ue))

v4s = adams_pece(u0, f, k, numsteps)
e4s = np.max(np.abs(v4s - ue))

# Get timesteps 
tf = np.linspace(0, Tf, 1000)
fig, ax = plt.subplots(1, 1)
plt.plot(tf, np.exp(a * tf), lw=4, label='Exact solution')

# for i in range(1, numsteps+1):
#     C = vs[i] * np.exp(-a * ts[i])
#     plt.plot(tf, C * np.exp(a * tf), alpha=0.75)

ts = np.linspace(0, Tf, numsteps+1)
plt.plot(ts, v1s, color='k', lw=2, ls=':', zorder=4, marker='o', label='Forward Euler')
plt.plot(ts, v5s, color='k', lw=2, ls='-.', zorder=4, marker='o', label='RK2')
# plt.plot(ts, v2s, color='r', lw=2, zorder=5, marker='o', label='PECE Euler')
# plt.plot(ts, v3s, color='b', lw=2, ls=':', zorder=4, marker='o', label='Adams')
# plt.plot(ts, v4s, color='b', lw=2, zorder=5, marker='o', label='PECE Adams')

plt.title('Euler\'s method solution k = {0:.4f}'.format(k))
plt.ylabel('$y(t)$')
plt.xlabel('Time')
plt.legend(loc='best')
#plt.ylim([0, 100])
plt.xlim([0, Tf])
(0.0, 1.25)
<Figure size 640x480 with 1 Axes>
print('Euler E1 = %.4g' % e1s)
print('PECE Euler E2 = %.4g' % e2s)
print('Adams E3 = %.4g' % e3s)
print('PECE Adams E4 = %.4g' % e4s)
Euler E1 = 2.869
PECE Euler E2 = 0.2608
Adams E3 = 0.9047
PECE Adams E4 = 0.03204

Adaptive Step-Size Control with RK2

We implement an adaptive RK2 (Heun) integrator using the embedded pair idea: the difference between a forward Euler step and an RK2 step estimates the local truncation error. The step size is adjusted so the estimated error stays below a user-specified tolerance. The plots show the solution and the adapted step sizes.

# rk2_adaptive is imported from the integrate package.
# It implements an adaptive RK2 (midpoint) integrator using the embedded
# Euler/RK2 pair for error estimation and step-size control.
a = 2.0

def f(t, u):
    return a * u

# Initial condition
u0 = 1.0

te = 1.15
data = rk2_adaptive(u0, f, te=te, k0=0.1, atol=1e-2, rtol=1e-2)
Explicit RK2: Success!
Accepted steps =  15  Rejected steps =  0
ts = data['t']
ue = u0 * np.exp(a * ts)
un = data['u'].flatten()
aerr = np.max(np.abs(ue - un))
rerr = np.max(np.abs((ue - un) / ue))

print('aerr = {0:.4e}; rerr = {1:.4e}'.format(aerr, rerr))

fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].set_title('Number of approximation points = {0:d}'.format(ts.size))
axs[0].plot(ts, un, marker='o', label='Numerical', color='k')
axs[0].plot(ts, ue, label='Exact', color='r', ls='--')
axs[0].set_xlabel('$t$')
axs[0].set_ylabel('$u(t)$')
axs[0].axvline(te, color='r', ls='--')

axs[1].set_title('Step sizes')
axs[1].plot(ts[:-1], np.diff(ts), marker='o', label='Numerical', color='k')
axs[1].set_xlabel('$t$')
axs[1].set_ylabel('$k$')
fig.tight_layout()
aerr = 8.9004e-02; rerr = 8.9235e-03
<Figure size 1000x500 with 2 Axes>

Adaptive Predictor-Corrector

An adaptive PECE (Predict-Evaluate-Correct-Evaluate) integrator: the predictor is a forward Euler step, the corrector applies the trapezoidal rule. Their difference estimates the local error and drives step-size control. Tested on u=auu' = au with decaying solution.

# pc_adaptive is imported from the integrate package.
# It implements an adaptive PECE integrator: Euler predictor with
# trapezoidal corrector. Their difference drives step-size control.
a = -2.0

def f(t, u):
    return a * u

# Initial condition
u0 = 1.0

data = pc_adaptive(u0, f, te=1.2, k0=0.1, atol=1e-2, rtol=1e-2)
Adaptive PECE: Successful
Accepted steps =  11  Rejected steps =  0
ts = data['t']
ue = u0 * np.exp(a * ts)
un = data['u'].flatten()
aerr = np.max(np.abs(ue - un))
rerr = np.max(np.abs((ue - un) / ue))

print('aerr = {0:.4e}; rerr = {1:.4e}'.format(aerr, rerr))

fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].set_title('Number of approximation points = {0:d}'.format(ts.size))
#axs[0].plot(ts, un, marker='o', label='Numerical', color='k')
axs[0].plot(ts, np.abs(un - ue), marker='o', label='Numerical', color='k')
#axs[0].plot(ts, ue, label='Exact', color='r', ls='--')
axs[0].axhline(1e-2, color='r', ls='--')
axs[0].set_xlabel('$t$')
axs[0].set_ylabel('$u(t)$')

axs[1].set_title('Step sizes')
axs[1].plot(ts[:-1], np.diff(ts), marker='o', label='Numerical', color='k')
axs[1].set_xlabel('$t$')
axs[1].set_ylabel('$k$')
fig.tight_layout()
aerr = 2.9518e-03; rerr = 2.4614e-02
<Figure size 1000x500 with 2 Axes>

Van der Pol Oscillator (Stiff System)

The van der Pol oscillator with small ε\varepsilon is a classic stiff problem: the solution has sharp transients separated by slow phases. The adaptive RK2 integrator must take very small steps during transients and can relax during slow phases. The bottom panel shows how the step size adapts automatically.

eps = 1e-2

def f(t, y):
    return np.array([
        y[1],
        ((1. - y[0]**2) * y[1] - y[0]) / eps
    ])
    

ic = np.array([2.0, 0.])

t0 = 0.0
te = 5.0

data1 = rk2_adaptive(ic, f, te=te, stepmax=400000, k0=0.1, atol=1e-2, rtol=1e-2)
# data2 = pc_adaptive(ic, f, te=te, stepmax=40000, k0=0.1, atol=1e-4, rtol=1e-4)
Explicit RK2: Success!
Accepted steps =  872  Rejected steps =  103
fig, axs = plt.subplots(3, 1, figsize=(6.5, 5), sharex=True)

ts = data1['t']
un = data1['u']

ks = np.diff(ts)
ks = np.hstack((ks, ks[-1]))

axs[0].plot(ts, un[:, 0])
axs[0].set_ylabel('$u_1(t)$')
axs[1].plot(ts, un[:, 1])
axs[1].set_ylabel('$u_2(t)$')
axs[2].semilogy(ts, ks)
axs[2].set_ylabel('$k$')
axs[2].set_xlabel('Time $t$')
# axs[2].set_ylim([1e-7, 5e-3])
# axs[2].axhline(0.33 * eps, color='r', ls='--')
for ax in axs:
    ax.set_xlim([t0, te])
<Figure size 650x500 with 3 Axes>

Stability of Forward Euler

We solve u=λ(ucost)sintu' = \lambda(u - \cos t) - \sin t, which has exact solution u(t)=costu(t) = \cos t, for three values of λ\lambda. With fixed step size h=103h = 10^{-3}: λ=0\lambda = 0 and λ=10\lambda = -10 are stable, but λ=2100\lambda = -2100 violates the stability condition h<2/λ9.5×104h < 2/|\lambda| \approx 9.5 \times 10^{-4} and the solution blows up.

def f1(t, u):
    return -np.sin(t)

# setup problem
u0 = 1.0
t0 = 0.0
te = 2.0

# fwd_euler is imported from the integrate package

# First goal with k = 1e-3
k  = 1e-3
numsteps = np.ceil(te / k).astype(int)
k = te / numsteps 
ts = np.arange(numsteps + 1) * k
ue = np.cos(ts)

# Execute Euler's method
vs1 = fwd_euler(u0, f1, k, numsteps)
err = np.abs(vs1 - ue)[-1]  # Grab last error

print(f'lambda = {0.0:10.1f}, h = {k:.6e}, error = {err:.6e}')

fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs[0].plot(ts, vs1)
axs[0].plot(ts, ue, ls='--')
axs[0].set_title(r'$\lambda = 0.0$')

# First with lambda
lam = -10.0

def f2(t, u):
    return lam * (u - np.cos(t)) - np.sin(t)

# Solve
k  = 1e-3
numsteps = np.ceil(te / k).astype(int)
k = te / numsteps 
ts = np.arange(numsteps + 1) * k
ue = np.cos(ts)

# Execute Euler's method
vs2 = fwd_euler(u0, f2, k, numsteps)
err = np.abs(vs2 - ue)[-1]  # Grab last error

print(f'lambda = {lam:10.1f}, h = {k:.6e}, error = {err:.6e}')

axs[1].plot(ts, vs2)
axs[1].plot(ts, ue, ls='--')
axs[1].set_title(r'$\lambda = -10.0$')

# Let's do it again 
lam = -2100.0

def f2(t, u):
    return lam * (u - np.cos(t)) - np.sin(t)

# Solve
k  = 1e-3
numsteps = np.ceil(te / k).astype(int)
k = te / numsteps 
ts = np.arange(numsteps + 1) * k
ue = np.cos(ts)

# Execute Euler's method
vs3 = fwd_euler(u0, f2, k, numsteps)
err = np.abs(vs3 - ue)[-1]  # Grab last error

print(f'lambda = {lam:10.1f}, h = {k:.6e}, error = {err:.6e}')

axs[2].plot(ts, vs3)
axs[2].plot(ts, ue, ls='--')
axs[2].set_title(r'$\lambda = -2100.0$')
lambda =        0.0, h = 1.000000e-03, error = 4.547667e-04
lambda =      -10.0, h = 1.000000e-03, error = 1.611611e-05
lambda =    -2100.0, h = 1.000000e-03, error = 1.452516e+76
<Figure size 1500x500 with 3 Axes>

Rescuing the unstable case

For λ=2100\lambda = -2100, we reduce hh until we satisfy the stability condition hλ2h|\lambda| \leq 2. The output shows the error dropping dramatically once hh is small enough.

# Let's do it again 
lam = -2100.0

def f2(t, u):
    return lam * (u - np.cos(t)) - np.sin(t)

# Solve
for k in [0.000976, 0.000950, 0.0008, 0.0004]:
    numsteps = np.ceil(te / k).astype(int)
    k = te / numsteps 
    ts = np.arange(numsteps + 1) * k
    ue = np.cos(ts)
    
    # Execute Euler's method
    vs3 = fwd_euler(u0, f2, k, numsteps)
    err = np.abs(vs3 - ue)[-1]  # Grab last error
    
    print(f'lambda = {lam:10.1f}, h = {k:.2e}, error = {err:.6e}')
lambda =    -2100.0, h = 9.76e-04, error = 5.881046e+35
lambda =    -2100.0, h = 9.50e-04, error = 9.406405e-08
lambda =    -2100.0, h = 8.00e-04, error = 7.922978e-08
lambda =    -2100.0, h = 4.00e-04, error = 3.960334e-08

Stiffness: Forward vs Backward Euler

We add a stiff term 100(ucost)-100(u - \cos t) to the ODE and compare forward and backward Euler across a range of step sizes. Forward Euler requires h<2/100=0.02h < 2/100 = 0.02 for stability; for larger hh the error explodes. Backward Euler is unconditionally stable and converges at the expected O(h)O(h) rate regardless of step size.

logkset = np.arange(1, 10)

tmax = 2.
exact = np.cos(tmax)
ex_errs = []

for logk in logkset:
    k = 0.5**logk
    nsteps = int(tmax/k)
    t = 0
    v = np.cos(t)
    f = -np.sin(t)
    for i in range(nsteps):
        t = i * k
        # f = -np.sin(t)
        f = -np.sin(t) - 100 * (v - np.cos(t))
        vnew = v + k * f
        v = vnew
    
    error = np.abs(v - exact)
    ex_errs.append(error)

plt.ylabel('Error')
plt.xlabel('Time step $k$')
plt.loglog(0.5**logkset, ex_errs, marker='o')
plt.grid()
<Figure size 640x480 with 1 Axes>
logkset = np.arange(1, 10)

tmax = 2.
exact = np.cos(tmax)
errs = []

for logk in logkset:
    k = 0.5**logk
    nsteps = int(tmax/k)
    t = 0
    v = np.cos(t)
    f = -np.sin(t)
    for i in range(nsteps):
        t = i * k
        # First
        vnew = v + k * (-np.sin(t + k))
        # Second
        # vnew = (v + k * (-np.sin(k + t) + 100*np.cos(t + k))) / (1 + k * 100)
        v = vnew
    
    error = np.abs(v - exact)
    errs.append(error)

plt.ylabel('Error')
plt.xlabel('Time step $k$')
plt.loglog(0.5**logkset, errs, marker='o', label='Bwd Euler')
plt.loglog(0.5**logkset, ex_errs, marker='o', label='Fwd Euler')
plt.grid()
plt.legend(loc='best')
<Figure size 640x480 with 1 Axes>