Click here to Skip to main content
15,884,298 members
Please Sign up or sign in to vote.
1.00/5 (3 votes)
See more:
__author__ = 'bill'

from sqlalchemy import create_engine
import pandas as pd
import numpy as np
import math
# Connection to my local dev mysql server.
con_string = 'mysql://root:19940401@localhost:3306/ccar'
# run ID, used to support multile run and disinguish reports. Hardcoded to 1 for now.
run_id = 1
# connection to database
con = create_engine(con_string).connect()
# LGD type and UGD types. Due to different scnarios, LGD and UGD types may change.
# I have set it up so it can supoort other scnario as best as possible. However the scope
# of the POC was Adverse scenario only.
lgd_type = "downturnlgd"
ugd_type = "downturnugd"
# Regions name. It is used to resolve which matrix is used. Again, their naming
#convention is a mess.
regions = ['World', 'US', 'UK', 'EU', 'Japan', 'Developing Asia']
# Total quarters to run. Not really used. Was used for debuugging puropose.
quarters = 9


def setup(con):
cpd = pd.DataFrame(con.execute("SELECT y1,y2,y3,y4,y5 from stg_cpd WHERE version_id=%i" % run_id).fetchall())
lgd = pd.DataFrame(con.execute("SELECT lgd0,lgd1,lgd2,lgd3,lgd4,lgd5,lgd6,"
"lgd7,lgd8,lgd9,lgd10,lgd11,lgd12 FROM stg_lgd "
"WHERE version_id =%i AND lgdtype like '%s'"
% (run_id, lgd_type))
.fetchall())
ugd = pd.DataFrame(con.execute("SELECT pd1,pd2,pd3,pd4,pd5,pd6,pd7,pd8"
",pd9,pd10,pd11,pd12,pd13,pd14,pd15,pd16,"
"pd17,pd18 FROM stg_ugd WHERE version_id =%i "
"AND ugdtype like '%s'" % (run_id, ugd_type))
.fetchall())
qual_pc = pd.DataFrame(con.execute("SELECT q1,q2,q3,q4,q5,q6,q7,q8,q9,q10 "
"FROM stg_qualitative_reserve_pct"
" WHERE version_id =%i "
% (run_id))
.fetchall())

return cpd.values, lgd[:1].values[0], ugd[:1].values[0], qual_pc[:1].values[0]


def getmatric(con, matrix, portfolio, scenairo):
# The data provided is a mess, so to match their naming.
if portfolio == "FIN":
portfolio = "FI"
m = pd.DataFrame(con.execute("SELECT probability FROM stg_matrix "
"WHERE version_id=%i AND scenario like'%s' "
"AND portfolio like '%s' AND region like '%s' "
"ORDER BY fiscalyear, quarter ASC"
% (run_id, scenairo, portfolio, matrix)).
fetchall())
mat = []
for i in range(0, quarters):
mat.append(m[0][324 * i:324 + 324 * i].values.reshape(18, 18))
return mat


def getrecords(recordType, region, portfolio):
# Commented out section was used to limit result pulled, for testing and debugging purpose.
if recordType == 0:
funded = 1
records = pd.DataFrame(con.execute("SELECT * FROM record WHERE version_id=%i "
"AND category like '%s' AND region like '%s'"
# "AND exposure_id='Exposure 27472'"
# "AND exposure_id='Exposure 14119'"
# "AND( exposure_id ='Exposure 14119' OR exposure_id ='Exposure 27467' )"
"AND funded=%i AND exposure_id REGEXP 'Exposure*'"
% (run_id, portfolio, region, funded))
.fetchall())
elif recordType == 1:
funded = 0
records = pd.DataFrame(con.execute("SELECT * FROM record WHERE version_id=%i "
"AND category like '%s' AND region like '%s'"
# "AND exposure_id='Exposure 27472'"
# "AND exposure_id='Exposure 14119'"
# "AND( exposure_id ='Exposure 14119' OR exposure_id ='Exposure 27467' )"
"AND funded=%i AND ugd=1.0"
"AND exposure_id REGEXP 'Exposure*'"
% (run_id, portfolio, region, funded))
.fetchall())
else:
funded = 0
records = pd.DataFrame(con.execute("SELECT * FROM record WHERE version_id=%i "
"AND category like '%s' AND region like '%s'"
# "AND exposure_id='Exposure 27472'"
# "AND exposure_id='Exposure 14119'"
# "AND( exposure_id ='Exposure 14119' OR exposure_id ='Exposure 27467' )"
"AND funded=%i AND ugd<1.0"
"AND exposure_id REGEXP 'Exposure*'"
% (run_id, portfolio, region, funded))
.fetchall())
return records


def getgrowthrecords(recordType, region, portfolio):
if recordType == 0:
# Funded
funded = 1
records = pd.DataFrame(con.execute("SELECT * FROM record "
"JOIN stg_growth_input "
"ON stg_growth_input.exposure_id = record.exposure_id "
"WHERE record.version_id=%i "
"AND category like '%s' AND matrix_name like '%s'"
"AND funded=%i AND record.exposure_id REGEXP 'Growth*' "
% (run_id, portfolio, region, funded))
.fetchall())
elif recordType == 1:
# Non-Funded and Non-Traded
funded = 0
records = pd.DataFrame(con.execute("SELECT * FROM record "
"JOIN stg_growth_input "
"ON stg_growth_input.exposure_id = record.exposure_id "
"WHERE record.version_id=%i "
"AND category like '%s' AND matrix_name like '%s'"
"AND funded=%i AND record.exposure_id REGEXP 'Growth*' "
"AND UGD=1.0"
% (run_id, portfolio, region, funded))
.fetchall())
else:
#Non-Funded and Traded
funded = 0
records = pd.DataFrame(con.execute("SELECT * FROM record "
"JOIN stg_growth_input "
"ON stg_growth_input.exposure_id = record.exposure_id "
"WHERE record.version_id=%i "
"AND category like '%s' AND matrix_name like '%s'"
"AND funded=%i AND record.exposure_id REGEXP 'Growth *' "
"AND UGD<1.0"
% (run_id, portfolio, region, funded))
.fetchall())
return records

def setupmatrix(pds, exposures, lgd, facility, n):
# Convert the 1 by 324 X 9 array to (18 by 18) x 9 matrix.
records = []
LGD = []
for t in range(0, n):
temp = np.zeros(18)
temp[pds[t] - 1] = exposures[t]
LGD.append(lgd[facility[t]])
records.append(temp)
LGD = np.asarray(LGD)
return np.asarray(records).astype('float64').T, LGD.T

def calc_CUM_XPDFFQ(cpd_t, cpd_t_1, t, T):
# detailed step see Reserve Calcualtor Excel provided by BNYM.
# Test cases
# 1
# 0.0123128076995
# 0.00318296917833
# 0.601329280161
# print calc_CUM_XPDFFQ(0,0,5.4794521,5)
# print calc_CUM_XPDFFQ(0.98771,0.97942,2.0027397,2)
# print calc_CUM_XPDFFQ(0.9953, 0.99882, 0.6767123,1)
# print calc_CUM_XPDFFQ(0.445,0.322, 5.6788999,5)
# print cpd_t, cpd_t_1, t, T
if t < 1:
return 1 - math.pow(cpd_t, t)
elif cpd_t == 0:
return 1
elif t > 5:
return 1 - math.pow(cpd_t, t / 5)
else:
return 1- cpd_t * math.pow(cpd_t_1 / cpd_t, t - T)

def calc_reserve(cpd_table, t, T_0, T_1):
t_reserve = []
for pds in range(0, 18):
# resolving CPD T
if t <= 0:
cpd_t = 0
else:
cpd_t = (1 - cpd_table[pds][T_0 - 1])
# resolving CPD_T+1
if t <= 0:
cpd_t_1 = 0
else:
cpd_t_1 = (1 - cpd_table[pds][T_1 - 1])
# Calculating CUM_XPDFFQ_PC
cum_xvd = calc_CUM_XPDFFQ(cpd_t, cpd_t_1, t, T_0)
# Final RSVPC
t_reserve.append(cum_xvd)
t_reserve = np.asarray(t_reserve)
return t_reserve

def getReserve(cpd_table, t, T_0, T_1, lgd_table, lgd_rating, n):
RSV_PC = []
cum = []
for i in range(0, n):
cum.append(calc_reserve(cpd_table, t[i], int(T_0[i]), int(T_1[i])))
for i in range(0, n):
RSV_PC.append( cum[i]*lgd_table[lgd_rating[i]])
return np.asarray(RSV_PC),np.asarray(cum)

def migrate(mat, records, pds, n):
records = np.dot(mat, records)
for i in range(0, n):
# n is the column number in trades, it is also the trades index
applicable_sum = 0
for m in range(0, pds[i] - 1):
# iterate over all ratings better than country cap
# sum it up, set these cells to 0
applicable_sum += records[m, i]
records[m, i] = 0
# then add applicable sum to country rating cap's cell
records[pds[i] - 1, i] += applicable_sum
return records


def starting_commitment(mat, records, rating, n):
records = migrate(mat, records, rating, n)
npa =np.asarray(records[16:18]).copy()
# npa is still npa
return records, npa

def ending_commitment(mat, records, npa, lgd):
#The formulae is chargeoff = npa * lgd. Using NumPy matrix row order calculation.
chargeoff = np.multiply(npa, lgd)
return records, chargeoff

def starting_reserve(i, records, input, mat, qualitative_percentage_table, exposure):
if i == 0:

startingPoint = input / (1 - qualitative_percentage_table[i])
starting_reserve = records * (startingPoint / exposure).values
else:
starting_reserve = np.dot(mat[i].T, input)
return starting_reserve

def quantitative_reserve(RSV_PC, endingCommitment, recordType, UGD):
if recordType ==2:
UGD = UGD.reshape(18,1)
return (endingCommitment * RSV_PC) * UGD
else:
return endingCommitment * RSV_PC

def export(ids,y9c, region, funded, quarter, stage, records, npas):
name = ""
if funded:
name += "Funded "
else:
name += "Non-Funded "
name += stage
name += ".csv"
output = pd.DataFrame((np.vstack((records, npas))).T, index=ids)
output['quarters'] = pd.Series(quarter, index=output.index)
output['y9c'] = y9c.values
output.to_csv(name, ",", header=False, mode='a')
def region_matrix_mapping(region):
if region == "World":
m_name = "US"
elif region == 'Japan':
m_name = "JP"
elif region == "Developing Asia":
m_name = 'DA'
elif region in regions:
m_name = region
return region, m_name

def prepare(records,n):
ids = records[5]
y9c = records[4]
rating = records[14]
pds = records[6]
exposure = records[12]
startingQuantitativeReserve = records[13]
facility = records[7]
RSV_PC, cum = getReserve(cpd_table, records[9].values, records[10].values, records[11].values, lgd_table,
records[7].values, n)
RSV_PC = RSV_PC.T
return ids, y9c, rating, pds, exposure, startingQuantitativeReserve, RSV_PC, cum, facility

def runreport(region, recordType, portfolio, scenario, qualitative_percentage_table):

region, m_name = region_matrix_mapping(region)
mat = getmatric(con, m_name, portfolio, scenario)
if recordType == 0:
# Funded Record Migration
funded = True
records = getrecords(0, region, portfolio)
elif recordType == 1:
funded = False
# Non-funded and Non-Traded Record Migration
# use filter Non-Funded records by input ugd at record level to be 1
# during calculation process, use UGD = 1 for calculating reserve
records = getrecords(1, region, portfolio)
else:
funded = False
# Non-Funded and Traded Record
# use PD level UGD in calculation
records = getrecords(2, region, portfolio)
n = int(records.shape[0])
if n == 0:
return
ids, y9c, rating, pds, exposure, startingQuantitativeReserve, RSV_PC,cum, facility = prepare(records,n)
records, LGD = setupmatrix(pds, exposure, lgd_table, facility, n)
export(ids,y9c, region, funded, 0, "reserve",RSV_PC, np.zeros((2,n)))
export(ids,y9c, region, funded, 0, "cumulative percentage",cum.T, np.zeros((2,n)))
for i in range(0, 9):
# starting commitment is the 18 by n matrix migrated with migrational matrix
startingCommitment, npa = starting_commitment(mat[i].T, records, rating, n)
export(ids,y9c, region, funded, i, "starting commitment", startingCommitment, npa)
# ending commitment is basically the same with starting commitment, however NPAs and charegOffs are adjusted
endingCommitment, chargeOff = ending_commitment(mat[i].T, startingCommitment, npa, LGD)
# making adjustment because Non-Funded Chargeoff has to me calculated on the funded side
# Because Non-Traded and Traded sub category of Non-Funded use different UGD method
# NPAs will be removed before entering reserve
endingCommitment[16] = np.zeros(n)
endingCommitment[17] = np.zeros(n)
if recordType == 2:
chargeOff = chargeOff * ugd_table.reshape(18,1)[16:18]
npa = npa* ugd_table.reshape(18,1)[16:18]
if recordType==0:
export(ids, y9c, region, funded, i, "ending commitment", endingCommitment, npa-chargeOff)
else:
export(ids, y9c, region, funded, i, "ending commitment", endingCommitment, np.zeros((2,n)))
export(ids, y9c, region, funded, i, "starting commitment adjustment", np.zeros((18,n)), npa)
# Exporting Actual NPA table
export(ids, y9c, region, 1, i, "NPA", np.zeros((18,n)), npa-chargeOff)
if(i<8):
export(ids, y9c, region, 1, i+1, "NPA", np.zeros((18,n)), npa-chargeOff)
if(i<7):
export(ids, y9c, region, 1, i+2, "NPA", np.zeros((18,n)), npa-chargeOff)
if (i<6):
export(ids, y9c, region, 1, i+3, "NPA", np.zeros((18,n)), npa-chargeOff)
# THe actual meaning of chargeoff is a negative number
export(ids, y9c, region, funded, i, 'chargeoff', np.zeros((18,n)), -chargeOff)
# However, during sql load, all adjustment are labeled with Funded, because they are adjustment made from
# Non-Funded side to Funded Side
# starting reserve used starting commitment (without oecd enforcement), so we do a repeated migration
# later will be refactored to improve process speed
if i == 0:
startingReserve = starting_reserve(i, np.dot(mat[i].T, records),
startingQuantitativeReserve, mat, qual_pc, exposure)
else:
startingReserve = starting_reserve(i, np.dot(mat[i].T, records), endingReserve, mat, qual_pc, exposure)
export(ids,y9c, region, funded, i, "starting reserve", startingReserve, np.zeros((2, n)))
# quantitative reserve is based on ending commitment and reserve percentage
# quantitativeReserve = RSV_PC.T
quantitativeReserve = quantitative_reserve(RSV_PC, endingCommitment, recordType, ugd_table)
export(ids,y9c, region, funded, i, "quantitative reserve", quantitativeReserve, np.zeros((2, n)))
endingReserve = quantitativeReserve / (1 - qual_pc[i + 1])
export(ids, y9c, region, funded, i, "ending reserve", endingReserve, np.zeros((2, n)))
qualitativeReserve = endingReserve - quantitativeReserve
export(ids,y9c, region, funded, i, "qualitative reserve", qualitativeReserve, np.zeros((2, n)))
# ending commitment becomes the starting point for next quarter
records = endingCommitment
def adjust(records, growth, pd,n, quarter):
for i in range (0,n):
j = pd[i]
records[j-1][i]+=growth[i][quarter]
return records
def rungrowthreport(region, recordType, portfolio, scenario, qualitative_percentage_table):
region, m_name = region_matrix_mapping(region)
mat = getmatric(con, m_name, portfolio, scenario)

if recordType == 0:
# Funded Record Migration
funded = True
records = getgrowthrecords(0, region, portfolio)
elif recordType == 1:
funded = False
# Non-funded and Non-Traded Record Migration
# use filter Non-Funded records by input ugd at record level to be 1
# during calculation process, use UGD = 1 for calculating reserve
records = getgrowthrecords(1, region, portfolio)
else:
funded = False
# Non-Funded and Traded Record
# use PD level UGD in calculation
records = getgrowthrecords(2, region, portfolio)
n = int(records.shape[0])
if n == 0:
return
growth = records[(range(21,30))].values

ids, y9c, rating, pds, exposure, startingQuantitativeReserve, RSV_PC,cum, facility =prepare(records,n)
records, LGD = setupmatrix(pds, exposure, lgd_table, facility, n)
for i in range(0, 9):
if i == 0:
startingCommitment = records;
npa = np.asarray(startingCommitment[16:18]).copy()
# starting commitment is the 18 by n matrix migrated with migrational matrix
else:
startingCommitment, npa = starting_commitment(mat[i].T, records, rating, n)

export(ids,y9c, region, funded, i, "starting commitment", startingCommitment, npa)
# ending commitment is basically the same with starting commitment, however NPAs and charegOffs are adjusted
# in growth exposure, endingCommitment is just the StartingCommitment with certain fields added from input
endingCommitment, chargeOff = ending_commitment(mat[i-1].T, startingCommitment, npa, LGD)
endingCommitment = adjust(startingCommitment, growth, pds, n, i)
# making adjustment because Non-Funded Chargeoff has to me calculated on the funded side
# Because Non-Traded and Traded sub category of Non-Funded use different UGD method
# NPAs will be removed before entering reserve
endingCommitment[16] = np.zeros(n)
endingCommitment[17] = np.zeros(n)
if recordType == 2:
chargeOff = chargeOff * ugd_table.reshape(18,1)[16:18]
npa = npa* ugd_table.reshape(18,1)[16:18]
if recordType==0:
export(ids, y9c, region, funded, i, "ending commitment", endingCommitment, npa-chargeOff)
else:
export(ids, y9c, region, funded, i, "ending commitment", endingCommitment, np.zeros((2,n)))
export(ids, y9c, region, funded, i, "starting commitment adjustment", np.zeros((18,n)), npa)
# Exporting Actual NPA table
export(ids, y9c, region, 1, i, "NPA", np.zeros((18,n)), npa-chargeOff)
if(i<8):
export(ids, y9c, region, 1, i+1, "NPA", np.zeros((18,n)), npa-chargeOff)
if(i<7):
export(ids, y9c, region, 1, i+2, "NPA", np.zeros((18,n)), npa-chargeOff)
if (i<6):
export(ids, y9c, region, 1, i+3, "NPA", np.zeros((18,n)), npa-chargeOff)
# THe actual meaning of chargeoff is a negative number
export(ids, y9c, region, funded, i, 'chargeoff', np.zeros((18,n)), -chargeOff)
# However, during sql load, all adjustment are labeled with Funded, because they are adjustment made from
# Non-Funded side to Funded Side

# starting reserve used starting commitment (without oecd enforcement), so we do a repeated migration
# later will be refactored to improve process speed

if i == 0:
startingReserve = starting_reserve(i, np.dot(mat[i].T, records),
startingQuantitativeReserve, mat, qual_pc, exposure)
else:
startingReserve = starting_reserve(i, np.dot(mat[i].T, records), endingReserve, mat, qual_pc, exposure)
export(ids,y9c, region, funded, i, "starting reserve", startingReserve, np.zeros((2, n)))


# quantitative reserve is based on ending commitment and reserve percentage
# quantitativeReserve = RSV_PC.T
quantitativeReserve = quantitative_reserve(RSV_PC, endingCommitment, recordType, ugd_table)
export(ids,y9c, region, funded, i, "quantitative reserve", quantitativeReserve, np.zeros((2, n)))
endingReserve = quantitativeReserve / (1 - qual_pc[i + 1])
export(ids, y9c, region, funded, i, "ending reserve", endingReserve, np.zeros((2, n)))
qualitativeReserve = endingReserve - quantitativeReserve
export(ids,y9c, region, funded, i, "qualitative reserve", qualitativeReserve, np.zeros((2, n)))
# ending commitment becomes the starting point for next quarter
records = endingCommitment


if __name__ == "__main__":
regions = ['World', 'US', 'UK', 'EU', 'Japan', 'Developing Asia'] # 'Developing Asia', we were not given developing asia matrices
matrices = ['US', 'UK', 'EU', 'DA', 'JP']
portfolios = ['C&I', 'FIN']
for r in regions:
cpd_table, lgd_table, ugd_table, qual_pc = setup(con)
#
runreport(r, 0, "C&I", "Adverse", qual_pc)

runreport(r, 1, "C&I", "Adverse", qual_pc)

runreport(r, 2, "C&I", "Adverse", qual_pc)

runreport(r, 0, "FIN", "Adverse", qual_pc)

runreport(r, 1, "FIN", "Adverse", qual_pc)

runreport(r, 2, "FIN", "Adverse", qual_pc)

rungrowthreport(r, 0, "C&I", "Adverse", qual_pc )
rungrowthreport(r, 0, "FIN", "Adverse", qual_pc )
rungrowthreport(r, 1, "C&I", "Adverse", qual_pc )
rungrowthreport(r, 1, "FIN", "Adverse", qual_pc )
rungrowthreport(r, 2, "C&I", "Adverse", qual_pc )
rungrowthreport(r, 2, "FIN", "Adverse", qual_pc )
# a = np.genfromtxt('test.txt',delimiter=',')
# b = np.zeros((18,1))
# b[7]= 2045000000
# print np.dot(a, b)
Posted
Comments
Andy Lanng 29-Jul-15 4:21am    
you will have to pick it apart and rewrite the java. They don't run on even similar frameworks so there won't be any great converters
Member 11870841 29-Jul-15 4:52am    
where from do i need to start flow of code ?
Richard MacCutchan 29-Jul-15 6:58am    
Start at the beginning. Python and Java are not similar enough that you can do a direct conversion.

1) Learn Python.
2) Learn Java.
3) Understand exactly what the code does in Python and look for equivalent functionality in the Java framework. Recode using that functionality.

Alternatively, look for Java code that performs the same job, and hand that in to your tutor instead... :laugh:
 
Share this answer
 
Comments
Member 11870841 29-Jul-15 4:50am    
Thanks, i know the java and i understood some part of the python code but i am not aware with Matrix what it is ?
You can install Jython[^]. Then call Python code in Java without converting Python code to Java code.
 
Share this answer
 

This content, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)



CodeProject, 20 Bay Street, 11th Floor Toronto, Ontario, Canada M5J 2N8 +1 (416) 849-8900