# Robert Coding Interview
Simulate a coin flip using nothing but the standard Python library.
Things to look out for:
1. Docstrings
2. Functional programming
3. [DONE] Sanity-checking implementation (p vs. 1-p)
4. Defensive programming checks for input
```python
#input n = number of coin flips
#output = number of heads
import random
# functional style
# multiply all numbers from 1 to 10
mul(range(1, 11))
# vs. imperative
res = 1
for i in range(1, 11):
res *= i
ST -> H -> T => 1.1 (final result)
ST -> T -> H => 1.1 (final result)
1.0 -> 1.1 -> 1.1 ->
H H T T H -> [('H', 2), ('T', 2), ('H', 1)]
def coin_flip(n: int, p: float):
'''
Return the number of heads from n coin flips.
:param n: Number of coin flips to simulate.
:param p: Probability for heads coin flip.
:returns: Number of heads.
'''
# Defensive programming
if (p > 1) or (p < 0):
# ValueError is the right thing - this is a builtin to the Python language
raise ValueError("p should be between 0 and 1 inclusive.")
sequence = []
heads = 0
for i in range(n):
rand = random()
if rand > 1-p:
heads += 1
if i == 0:
sequence.append(['H', 1])
elif sequence[-1][0] == 'H':
sequence[-1][1] += 1
else:
sequence.append(['T', 1])
else:
if i == 0:
sequence.append(['T', 1])
elif sequence[-1][0] == 'T':
sequence[-1][1] += 1
else:
sequence.append(['H', 1])
return {sequence, heads}
# ans = sum([random() > 0.5 for _ in range(n)])
#return sum(map(lambda : random() > 1-p, range(n)))
def coin_flips(n: int):
'''
Return the number of heads from n coin flips.
:param n: Number of coin flips to simulate.
:returns: Number of heads.
'''
ans_sum = 0
for i in range(n):
rand = random() # 0-1
if rand > 0.5:
ans_sum += 1
return ans_sum
```
You're N coin flips with H heads.
What is the estimated value of `p`,
and its uncertainty bounds?
```python
#input H heads, n flips
#output p and sigma(p)
# prior Beta(1, 1) beta-distributed with equal weight on "H" and "T".
# likelihood Binom(n trials, H success)
# # because Beta distribution is a conjucate prior for Binomial likelihoods,
# # the posterior can be analytically calculated as a Beta distribution.
# posterior Beta(1+H, 1+n-H)
# to calculate uncertainty, take % bounds on the CDF of posterior distribution
from scipy.stats import beta
def estimate_prob(H: int, n: int):
'''
Determine probablity of heads and uncertain of probability.
# Assumes prior distribution of Beta(1, 1).
:param H: number of heads
:param n: number of coin flip
'''
if H > n:
raise ValueError('Number of heads must be less than number of flips')
if (H < 0) or (n < 0):
raise ValueError('Number of heads and number of flips must be positive')
posterior = beta(1 + H, 1 + n - H)
expectation = posterior.cdf(0.5)
lowerbound = posterior.cdf(0.05)
upperbound = posterior.cdf(0.95)
return expectation, (lowerbound, upperbound)
p = float(H)/n
#delta_p = n*p*(1-p)
delta_p_plus =
delta_p_minus =
return p, delta_p
```