Create Azure function

 Connection.py

import os
import sqlalchemy
import pyodbc
def get_engine():

   
    connection_string = (
    'DRIVER={ODBC Driver 17 for SQL Server};'
    f'SERVER={os.environ["DB_SERVER"]};'
    f'DATABASE={os.environ["DB_SCHEMA"]};'
    f'UID={os.environ["DB_USER"]};'
    f'PWD={os.environ["DB_PWD"]};'
    'Trusted_Connection=no;'
    )
    connection_url = sqlalchemy.engine.URL.create(
        "mssql+pyodbc",
        query=dict(odbc_connect=connection_string)
    )
    return sqlalchemy.create_engine(connection_url, fast_executemany=True)

def get_connection_details():
    conn = pyodbc.connect(
        'DRIVER={ODBC Driver 17 for SQL Server};\
            SERVER=%s;\
                DATABASE=%s;\
                    UID=%s;PWD=%s' %(os.environ["DB_SERVER"], os.environ["DB_SCHEMA"], os.environ["DB_USER"], os.environ["DB_PWD"])
        )
    return conn

var_blob_connection_string=os.environ["adi_blob_connection_string"]
var_container_name=os.environ["adi_container_name"]
var_subfolder_path=os.environ["adi_subfolder_path"]
var_endpoint=os.environ["adi_endpoint"]
var_key=os.environ["adi_key"]
var_formUrl=os.environ["adi_formUrl"]  

Constants.py

STORED_PROCEDURES = {
    'LOAD_EBITDA_DCF_RESULT': 'dbo.sp_Load_DCF_Exitmultiples_Ebitda_Result_Field',
    'LOAD_EBITDA_DCF_CASHFLOW_RESULT': 'dbo.sp_Load_DCF_Exitmultiples_Ebitda_Result_Cashflow_Field',
    'LOAD_SALES_DCF_RESULT': 'dbo.sp_Load_DCF_Exitmultiples_Sales_Result_Field',
    'LOAD_SALES_DCF_CASHFLOW_RESULT': 'dbo.sp_Load_DCF_Exitmultiples_Sales_Result_Cashflow_Field',
    'LOAD_EBITDA_CALIB_RESULT': 'dbo.sp_Load_EBITDA_Calibration_Result_Field',
    'LOAD_EBITDA_LIQUIDITY_RESULT': 'dbo.sp_Load_Ebitda_Liquidity_Result_Field',
    'LOAD_DCF_GG_RESULT': 'dbo.sp_Load_Gordengrowth_Result_Field',
    'LOAD_DCF_GG_CASHFLOW_RESULT': 'dbo.sp_Load_Gordengrowth_Result_Cashflow_Field',
    'LOAD_SALES_CALIB_RESULT': 'dbo.sp_Load_Sales_Calibration_Result_Field',
    'LOAD_SALES_LIQUIDITY_RESULT': 'dbo.sp_Load_Sales_Liquidity_Result_Field',
    'LOAD_AFS_DOCUMENT':'dbo.sp_Load_AFS_Consolidated',
    'LOAD_MA_DOCUMENT':'dbo.sp_Load_MA_Consolidated',
    'LOAD_AFS_BACKTESTING':'dbo.sp_Load_Afs_Ma_Backtesting_Afs',
    'LOAD_MA_BACKTESTING':'dbo.sp_Load_Afs_Ma_Backtesting_Ma',
    'LOAD_ASSET_VAL_MAP' :'dbo.sp_Merge_Asset_Valuation_Map',
    'LOAD_BUDGET_BACK_AFS' :'dbo.sp_Load_Budget_Backtesting_Afs',
    'LOAD_BUDGET_BACK_MA' :'dbo.sp_Load_Budget_Backtesting_Ma',
    'LOAD_SENSITIVITY_CHECK':'dbo.sp_Load_Sensitivity_Check'

    ########
   
}
percentile_25=0.25
percentile_75=0.75


function_app.py

import azure.functions as func
import logging
import json
import multiples_sales_caliberation
import multiples_ebitda_caliberation
import multiples_sales_liquidity
import multiples_ebitda_liquidity
import gorden_growth
import dcf_exit_multiples_sales
import dcf_exit_multiples_ebitda
import extract_document_data
import asset_sensitivity_check
from response_handling import handle_error, handle_success

from fn_pwc_cit_rates_scaper_bronze import bp as cit_bronze_scraper
from fn_cit_rates_bronze_silver_transformation import bp as cit_bronze_silver_transformation
from fn_risk_premium_scraper_bronze import bp as risk_premium_scraper_bronze
from fn_risk_premium_bronze_gold_transformation import bp as risk_premium_bronze_gold_transformation


app = func.FunctionApp(http_auth_level=func.AuthLevel.FUNCTION)

app.register_functions(cit_bronze_scraper)
app.register_functions(cit_bronze_silver_transformation)
app.register_functions(risk_premium_scraper_bronze)
app.register_functions(risk_premium_bronze_gold_transformation)
#todo other functions should be mved to separate files in another PR

@app.route(route="fn_multiples_sales_caliberation")
def fn_multiples_sales_caliberation(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            Methodology_Id = req_body.get('Methodology_Id','Unknown')
            try:

                result = multiples_sales_caliberation.multiples_sales_caliberation( Asset_Valuation_Id,Methodology_Id)          
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)
               
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )
   
@app.route(route="fn_multiples_ebitda_caliberation")
def fn_multiples_ebitda_caliberation(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            Methodology_Id = req_body.get('Methodology_Id','Unknown')
            try:

                result = multiples_ebitda_caliberation.multiples_ebitda_caliberation( Asset_Valuation_Id,Methodology_Id)
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)  
           
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )
   
@app.route(route="fn_multiples_sales_liquidity")
def fn_multiples_sales_liquidity(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            Methodology_Id = req_body.get('Methodology_Id','Unknown')
            try:

                result = multiples_sales_liquidity.multiples_sales_liquidity( Asset_Valuation_Id,Methodology_Id)
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)  
           
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )    
   
@app.route(route="fn_multiples_ebitda_liquidity")
def fn_multiples_ebitda_liquidity(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            Methodology_Id = req_body.get('Methodology_Id','Unknown')
            try:

                result = multiples_ebitda_liquidity.multiples_ebitda_liquidity( Asset_Valuation_Id,Methodology_Id)
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)    
           
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )      
   
@app.route(route="fn_gorden_growth")
def fn_gorden_growth(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            Methodology_Id = req_body.get('Methodology_Id','Unknown')
            try:

                result = gorden_growth.gorden_growth( Asset_Valuation_Id,Methodology_Id)
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)  
           
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )      

@app.route(route="fn_dcf_exit_multiples_sales")
def fn_dcf_exit_multiples_sales(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            Methodology_Id = req_body.get('Methodology_Id','Unknown')
            try:

                result = dcf_exit_multiples_sales.dcf_exit_multiples_sales( Asset_Valuation_Id,Methodology_Id)
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)    
           
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )      

@app.route(route="fn_dcf_exit_multiples_ebitda")
def fn_dcf_exit_multiples_ebitda(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            Methodology_Id = req_body.get('Methodology_Id','Unknown')
            try:

                result = dcf_exit_multiples_ebitda.dcf_exit_multiples_ebitda( Asset_Valuation_Id,Methodology_Id)
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )
   
@app.route(route="fn_extract_document_data")
def fn_extract_document_data(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Fund_Val_Doc_Map_Id = req_body.get('Asset_Fund_Val_Doc_Map_Id', 'Unknown')
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            try:

                result = extract_document_data.fn_extract_document_data( Asset_Fund_Val_Doc_Map_Id,Asset_Valuation_Id)
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )    
   
@app.route(route="fn_sensitivity_check")
def fn_sensitivity_check(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')
 
    name = req.params.get('name')
    if not name:
        try:
            req_body = req.get_json()
            Asset_Valuation_Id = req_body.get('Asset_Valuation_Id', 'Unknown')
            try:

                result = asset_sensitivity_check.asset_sensitivity_check( Asset_Valuation_Id)
                result_json = handle_success(result)
            except (RuntimeError, ValueError, Exception) as e:
                result_json = handle_error(e)
        except ValueError:
            pass
        else:
            name = req_body.get('name')
 
    if name:
        return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
    else:
        return func.HttpResponse(
             json.dumps(result_json),
             mimetype="application/json",
             status_code=200
        )        
   
             


host.json

{
  "version": "2.0",
  "logging": {
    "applicationInsights": {
      "samplingSettings": {
        "isEnabled": true,
        "excludedTypes": "Request"
      }
    }
  },
  "extensionBundle": {
    "id": "Microsoft.Azure.Functions.ExtensionBundle",
    "version": "[4.*, 5.0.0)"
  }
}

local.settings.json

{
  "IsEncrypted": false,
  "Values": {
    "AzureWebJobsStorage": "",
    "FUNCTIONS_WORKER_RUNTIME": "python",
    "DB_SERVER": "eunpafc038sql01.database.windows.net",
    "DB_SCHEMA": "VIA_DEV"  ,
    "DB_USER": "dev_VIA_app_sql_login",
    "DB_PWD": "fgHcFJD1tyiblaO",
    "adi_blob_connection_string": "DefaultEndpointsProtocol=https;AccountName=eunpafc038sta03;AccountKey=enfYyJwB1sZwWFtAJo0LY3Ojg8th5i7kfIq14RcryAD2UT1fmZpwH4AevayZ+wEubco3uLJNB0YS+AStM4TCEA==;EndpointSuffix=core.windows.net",
    "adi_container_name": "datasourcefiles",
    "adi_subfolder_path": "via-dev/",
    "adi_endpoint": "https://eunpfsowamsafr01.cognitiveservices.azure.com/",
    "adi_key": "5990419e88544988bbb1e6d769fff8c4",
    "adi_formUrl": "https://eunpafc038sta02.blob.core.windows.net/via-dev/"
   
  }
}

requirements.txt

# DO NOT include azure-functions-worker in this file
# The Python Worker is managed by Azure Functions platform
# Manually managing azure-functions-worker may cause unexpected issues

azure-functions
pyodbc
pandas
IPython
sqlalchemy
scipy
numpy
lxml
requests
beautifulsoup4
azure.core
azure.ai.documentintelligence
azure.storage.blob

gorden_growth.py

from scipy.stats import norm
import pandas as pd
import math
import pyodbc
from warnings import filterwarnings
from IPython.display import display
import sqlalchemy
from sqlalchemy import create_engine, text
import os
from connection import get_engine,get_connection_details
from constants import STORED_PROCEDURES,percentile_75,percentile_25
from datetime import date

def gorden_growth( Asset_Valuation_Id,Methodology_Id):
    filterwarnings("ignore", category=UserWarning, message='.*pandas only supports SQLAlchemy connectable.*')
   
    engine = get_engine()
    conn=get_connection_details()
   
   
    try:

        dci = pd.read_sql("EXEC dbo.sp_Get_Capitaliq ?", conn, params=(Asset_Valuation_Id))
        dcv = pd.read_sql("EXEC dbo.sp_Get_Client_Valuation ?", conn, params=(Asset_Valuation_Id))
        dac = pd.read_sql("EXEC dbo.sp_Get_Asset_Valuation ?, ?", conn, params=(Asset_Valuation_Id, Methodology_Id))
        if dac.empty:
                raise ValueError("No rows returned from Asset Configuration.")
        conn.close()
    except pyodbc.Error as e:
        print(f"Error: Unable to connect to the database. {e}")
        error_message = f"An unexpected error occurred: {e}"
        raise RuntimeError(error_message)
    except ValueError as e:
        # Handle specific exception for each query if no rows returned
        print(f"Program execution stopped due to an error: {e}")
        raise  # Re-raise the exception if needed
    except Exception as e:
        # Handle any other exceptions
        print(f"An unexpected error occurred: {e}")
        error_message = f"An unexpected error occurred: {e}"
        raise RuntimeError(error_message)


    dci_agg=dci.groupby(['Asset_Valuation_Id']).agg(
    Beta_Unlevered_mean=('Beta_Unleveraged','mean'),
    Beta_Unlevered_median=('Beta_Unleveraged','median'),
    Peers_Leverage_mean=('Peers_Leverage','mean'),
    Peers_Leverage_median=('Peers_Leverage','median'),
    Peers_Debt_Ratio_mean=('Peers_Debt_Ratio','mean'),
    Peers_Debt_Ratio_median=('Peers_Debt_Ratio','median'),
    Peers_Equity_Ratio_mean=('Peers_Equity_Ratio','mean'),
    Peers_Equity_Ratio_median=('Peers_Equity_Ratio','median'),
    Peers_Leverage_25percentile=('Peers_Leverage',lambda x :x.quantile(percentile_25)),
    Peers_Leverage_75percentile=('Peers_Leverage',lambda x :x.quantile(percentile_75)),
    Beta_Unlevered_25percentile=('Beta_Unleveraged',lambda x :x.quantile(percentile_25)),
    Beta_Unlevered_75percentile=('Beta_Unleveraged',lambda x:x.quantile(percentile_75)),
    Peers_Debt_Ratio_25percentile=('Peers_Debt_Ratio',lambda x :x.quantile(percentile_25)),
    Peers_Debt_Ratio_75percentile=('Peers_Debt_Ratio',lambda x:x.quantile(percentile_75)),
    Peers_Equity_Ratio_25percentile=('Peers_Equity_Ratio',lambda x :x.quantile(percentile_25)),
    Peers_Equity_Ratio_75percentile=('Peers_Equity_Ratio',lambda x:x.quantile(percentile_75))
    )


    merged_df = dac.merge(dci_agg, on=['Asset_Valuation_Id'], how='left', suffixes=('', '_daf'))

    gorden_growth_basic_df = merged_df

    ##
    gorden_growth_basic_df['Risk_Free_Rate_mean']=gorden_growth_basic_df['Risk_Free_Rate']
    gorden_growth_basic_df['Risk_Free_Rate_median']=gorden_growth_basic_df['Risk_Free_Rate']
    gorden_growth_basic_df['Risk_Free_Rate_25percentile']=gorden_growth_basic_df['Risk_Free_Rate']
    gorden_growth_basic_df['Risk_Free_Rate_75percentile']=gorden_growth_basic_df['Risk_Free_Rate']
    ##

    gorden_growth_basic_df['Beta_Levered_mean']=gorden_growth_basic_df['Beta_Unlevered_mean']*(1+(1-gorden_growth_basic_df['Corporate_Tax_Rate'])*gorden_growth_basic_df['Peers_Leverage_mean'])
    gorden_growth_basic_df['Beta_Levered_median']=gorden_growth_basic_df['Beta_Unlevered_median']*(1+(1-gorden_growth_basic_df['Corporate_Tax_Rate'])*gorden_growth_basic_df['Peers_Leverage_median'])
    #
    gorden_growth_basic_df['Beta_Levered_25percentile']=gorden_growth_basic_df['Beta_Unlevered_25percentile']*(1+(1-gorden_growth_basic_df['Corporate_Tax_Rate'])*gorden_growth_basic_df['Peers_Leverage_25percentile'])
    gorden_growth_basic_df['Beta_Levered_75percentile']=gorden_growth_basic_df['Beta_Unlevered_75percentile']*(1+(1-gorden_growth_basic_df['Corporate_Tax_Rate'])*gorden_growth_basic_df['Peers_Leverage_75percentile'])
    #
    gorden_growth_basic_df['ke_mean']=gorden_growth_basic_df['Risk_Free_Rate_mean']+(gorden_growth_basic_df['EMPR_Market_Risk_Premium']*gorden_growth_basic_df['Beta_Levered_mean'])+gorden_growth_basic_df['Asset_Specific_Risk_Premium']
    gorden_growth_basic_df['ke_median']=gorden_growth_basic_df['Risk_Free_Rate_median']+(gorden_growth_basic_df['EMPR_Market_Risk_Premium']*gorden_growth_basic_df['Beta_Levered_median'])+gorden_growth_basic_df['Asset_Specific_Risk_Premium']
    #
    gorden_growth_basic_df['ke_25percentile']=gorden_growth_basic_df['Risk_Free_Rate_25percentile']+(gorden_growth_basic_df['EMPR_Market_Risk_Premium']*gorden_growth_basic_df['Beta_Levered_25percentile'])+gorden_growth_basic_df['Asset_Specific_Risk_Premium']
    gorden_growth_basic_df['ke_75percentile']=gorden_growth_basic_df['Risk_Free_Rate_75percentile']+(gorden_growth_basic_df['EMPR_Market_Risk_Premium']*gorden_growth_basic_df['Beta_Levered_75percentile'])+gorden_growth_basic_df['Asset_Specific_Risk_Premium']
    #
    gorden_growth_basic_df['kd']=gorden_growth_basic_df['Corporate_Credit_Spread']*(1-gorden_growth_basic_df['Corporate_Tax_Rate'])
   
    gorden_growth_basic_df['r_mean']=(gorden_growth_basic_df['kd']*gorden_growth_basic_df['Peers_Debt_Ratio_mean'])+(gorden_growth_basic_df['ke_mean']*gorden_growth_basic_df['Peers_Equity_Ratio_mean'])
    gorden_growth_basic_df['r_median']=(gorden_growth_basic_df['kd']*gorden_growth_basic_df['Peers_Debt_Ratio_median'])+(gorden_growth_basic_df['ke_median']*gorden_growth_basic_df['Peers_Equity_Ratio_median'])
    #
    gorden_growth_basic_df['r_25percentile']=(gorden_growth_basic_df['kd']*gorden_growth_basic_df['Peers_Debt_Ratio_25percentile'])+(gorden_growth_basic_df['ke_25percentile']*gorden_growth_basic_df['Peers_Equity_Ratio_25percentile'])
    gorden_growth_basic_df['r_75percentile']=(gorden_growth_basic_df['kd']*gorden_growth_basic_df['Peers_Debt_Ratio_75percentile'])+(gorden_growth_basic_df['ke_75percentile']*gorden_growth_basic_df['Peers_Equity_Ratio_75percentile'])
    #
    #adding cashflow year level details table
    cashflow_df=gorden_growth_basic_df
    cashflow_detail_df=cashflow_df.merge(dcv,on=['Asset_Valuation_Id'],how='inner',suffixes=('', '_dcv'))
    cashflow_detail_df['Cashflow_date_rank_n']=(pd.to_datetime(cashflow_detail_df['Cashflow_Date'])-pd.to_datetime(cashflow_detail_df['Valuation_Date'])).dt.days/365.25
    cashflow_detail_df['Discount_Factor_mean']=(1+cashflow_detail_df['r_mean'])**cashflow_detail_df['Cashflow_date_rank_n']
    cashflow_detail_df['Discount_Factor_median']=(1+cashflow_detail_df['r_median'])**cashflow_detail_df['Cashflow_date_rank_n']
    #
    cashflow_detail_df['Discount_Factor_25percentile']=(1+cashflow_detail_df['r_25percentile'])**cashflow_detail_df['Cashflow_date_rank_n']
    cashflow_detail_df['Discount_Factor_75percentile']=(1+cashflow_detail_df['r_75percentile'])**cashflow_detail_df['Cashflow_date_rank_n']
    #
    cashflow_detail_df['CF_By_Df_mean']=cashflow_detail_df['Cashflow']/cashflow_detail_df['Discount_Factor_mean']
    cashflow_detail_df['CF_By_Df_median']=cashflow_detail_df['Cashflow']/cashflow_detail_df['Discount_Factor_median']
    #
    cashflow_detail_df['CF_By_Df_25percentile']=cashflow_detail_df['Cashflow']/cashflow_detail_df['Discount_Factor_25percentile']
    cashflow_detail_df['CF_By_Df_75percentile']=cashflow_detail_df['Cashflow']/cashflow_detail_df['Discount_Factor_75percentile']
    #
    cashflow_detail_df['TV_mean']=cashflow_detail_df['Cashflow']*(1+cashflow_detail_df['Growth_Rate'])/(cashflow_detail_df['r_mean']-cashflow_detail_df['Growth_Rate'])
    cashflow_detail_df['TV_median']=cashflow_detail_df['Cashflow']*(1+cashflow_detail_df['Growth_Rate'])/(cashflow_detail_df['r_median']-cashflow_detail_df['Growth_Rate'])
    #
    cashflow_detail_df['TV_25percentile']=cashflow_detail_df['Cashflow']*(1+cashflow_detail_df['Growth_Rate'])/(cashflow_detail_df['r_25percentile']-cashflow_detail_df['Growth_Rate'])
    cashflow_detail_df['TV_75percentile']=cashflow_detail_df['Cashflow']*(1+cashflow_detail_df['Growth_Rate'])/(cashflow_detail_df['r_75percentile']-cashflow_detail_df['Growth_Rate'])
    #
    #cashflow_detail_df['PV_mean']=cashflow_detail_df['CF_By_Df_mean']+cashflow_detail_df['TV_mean']/cashflow_detail_df['Discount_Factor_mean']
    cashflow_detail_df['PV_mean']=cashflow_detail_df['Cashflow']/cashflow_detail_df['Discount_Factor_mean']
    #cashflow_detail_df['PV_median']=cashflow_detail_df['CF_By_Df_median']+cashflow_detail_df['TV_median']/cashflow_detail_df['Discount_Factor_median']
    cashflow_detail_df['PV_median']=cashflow_detail_df['Cashflow']/cashflow_detail_df['Discount_Factor_median']
    #
    cashflow_detail_df['PV_25percentile']=cashflow_detail_df['Cashflow']/cashflow_detail_df['Discount_Factor_25percentile']
    cashflow_detail_df['PV_75percentile']=cashflow_detail_df['Cashflow']/cashflow_detail_df['Discount_Factor_75percentile']
    #

    cashflow_detail_df=cashflow_detail_df[['Asset_Valuation_Id','Methodology_Id','Cashflow','Beta_Unlevered_mean','Beta_Unlevered_median','Peers_Leverage_mean','Peers_Leverage_median','Risk_Free_Rate_mean','Corporate_Credit_Spread','Peers_Debt_Ratio_mean','Peers_Debt_Ratio_median','Peers_Equity_Ratio_mean','Peers_Equity_Ratio_median','Beta_Levered_mean','Beta_Levered_median','ke_mean','ke_median','kd','r_mean','r_median','Discount_Factor_mean','Discount_Factor_median','No_of_years','PV_mean','PV_median','Cashflow_date_rank_n',"Cashflow_Date",'Beta_Unlevered_25percentile','Beta_Unlevered_75percentile','Peers_Leverage_25percentile','Peers_Leverage_75percentile','Peers_Debt_Ratio_25percentile','Peers_Debt_Ratio_75percentile','Peers_Equity_Ratio_25percentile','Peers_Equity_Ratio_75percentile','Beta_Levered_25percentile','Beta_Levered_75percentile','ke_25percentile','ke_75percentile','r_25percentile','r_75percentile','Discount_Factor_25percentile','Discount_Factor_75percentile','PV_25percentile','PV_75percentile']]

   
    dcv['Cashflow_Max']=dcv.loc[dcv['Cashflow_Date'].idxmax(), 'Cashflow']
    max_cashflow=dcv.loc[dcv['Cashflow_Date'].idxmax(), 'Cashflow']

    gorden_growth_basic_df['TV_mean']=max_cashflow*((1+gorden_growth_basic_df['Growth_Rate'])/(gorden_growth_basic_df['r_mean']-gorden_growth_basic_df['Growth_Rate']))
    gorden_growth_basic_df['TV_median']=max_cashflow*((1+gorden_growth_basic_df['Growth_Rate'])/(gorden_growth_basic_df['r_median']-gorden_growth_basic_df['Growth_Rate']))
    #
    gorden_growth_basic_df['TV_25percentile']=max_cashflow*((1+gorden_growth_basic_df['Growth_Rate'])/(gorden_growth_basic_df['r_25percentile']-gorden_growth_basic_df['Growth_Rate']))
    gorden_growth_basic_df['TV_75percentile']=max_cashflow*((1+gorden_growth_basic_df['Growth_Rate'])/(gorden_growth_basic_df['r_75percentile']-gorden_growth_basic_df['Growth_Rate']))
    #


    gorden_growth_agg_df=gorden_growth_basic_df.merge(dcv,on=['Asset_Valuation_Id'],how='inner',suffixes=('', '_dcv'))

    gorden_growth_agg_df['Cashflow_date_rank_n']=(pd.to_datetime(gorden_growth_agg_df['Cashflow_Date'])-pd.to_datetime(gorden_growth_agg_df['Valuation_Date'])).dt.days/365.25
    gorden_growth_agg_df['Discount_Factor_mean']=(1+gorden_growth_agg_df['r_mean'])**gorden_growth_agg_df['Cashflow_date_rank_n']
    gorden_growth_agg_df['Discount_Factor_median']=(1+gorden_growth_agg_df['r_median'])**gorden_growth_agg_df['Cashflow_date_rank_n']
    #
    gorden_growth_agg_df['Discount_Factor_25percentile']=(1+gorden_growth_agg_df['r_25percentile'])**gorden_growth_agg_df['Cashflow_date_rank_n']
    gorden_growth_agg_df['Discount_Factor_75percentile']=(1+gorden_growth_agg_df['r_75percentile'])**gorden_growth_agg_df['Cashflow_date_rank_n']
    #
    gorden_growth_agg_df['CF_By_Df_mean']=gorden_growth_agg_df['Cashflow']/gorden_growth_agg_df['Discount_Factor_mean']
    gorden_growth_agg_df['CF_By_Df_median']=gorden_growth_agg_df['Cashflow']/gorden_growth_agg_df['Discount_Factor_median']
    #
    gorden_growth_agg_df['CF_By_Df_25percentile']=gorden_growth_agg_df['Cashflow']/gorden_growth_agg_df['Discount_Factor_25percentile']
    gorden_growth_agg_df['CF_By_Df_75percentile']=gorden_growth_agg_df['Cashflow']/gorden_growth_agg_df['Discount_Factor_75percentile']
    #

    gorden_growth_agg_df['Discount_Factor_mean_max']=gorden_growth_agg_df.loc[gorden_growth_agg_df['Cashflow_Date'].idxmax(), 'Discount_Factor_mean']
    gorden_growth_agg_df['Discount_Factor_median_max']=gorden_growth_agg_df.loc[gorden_growth_agg_df['Cashflow_Date'].idxmax(), 'Discount_Factor_median']
    #
    gorden_growth_agg_df['Discount_Factor_25percentile_max']=gorden_growth_agg_df.loc[gorden_growth_agg_df['Cashflow_Date'].idxmax(), 'Discount_Factor_25percentile']
    gorden_growth_agg_df['Discount_Factor_75percentile_max']=gorden_growth_agg_df.loc[gorden_growth_agg_df['Cashflow_Date'].idxmax(), 'Discount_Factor_75percentile']
    #
    #finding the max or unique value of each aggregate data
    gorden_growth_final_df=gorden_growth_agg_df.groupby(['Asset_Valuation_Id','Methodology_Id']).agg(
    CF_By_Df_mean_sum=('CF_By_Df_mean','sum'),
    CF_By_Df_median_sum=('CF_By_Df_median','sum'),

    CF_By_Df_25percentile_sum=('CF_By_Df_25percentile','sum'),
    CF_By_Df_75percentile_sum=('CF_By_Df_75percentile','sum'),

    Ownership=('Client_Ownership', 'max'),
    TV_mean=('TV_mean','max'),
    TV_median=('TV_median','max'),

    TV_25percentile=('TV_25percentile','max'),
    TV_75percentile=('TV_75percentile','max'),

    net_debt_at_val=('Net_Debt_At_Val','max'),
    Discount_Factor_mean_max=('Discount_Factor_mean_max','max'),
    Discount_Factor_median_max=('Discount_Factor_median_max','max'),

    Discount_Factor_25percentile_max=('Discount_Factor_25percentile_max','max'),
    Discount_Factor_75percentile_max=('Discount_Factor_75percentile_max','max'),

    after_tax_cost_of_debt=('kd','max'),
    cost_of_equity_mean=('ke_mean','max'),
    cost_of_equity_median=('ke_median','max'),

    cost_of_equity_25percentile=('ke_25percentile','max'),
    cost_of_equity_75percentile=('ke_75percentile','max'),

    Beta_Unlevered_mean_max=('Beta_Unlevered_mean','max'),
    Beta_Unlevered_median_max=('Beta_Unlevered_median','max'),

    Beta_Unlevered_25percentile_max=('Beta_Unlevered_25percentile','max'),
    Beta_Unlevered_75percentile_max=('Beta_Unlevered_75percentile','max'),

    Beta_Levered_mean_max=('Beta_Levered_mean','max'),
    Beta_Levered_median_max=('Beta_Levered_median','max'),

    Beta_Levered_25percentile_max=('Beta_Levered_25percentile','max'),
    Beta_Levered_75percentile_max=('Beta_Levered_75percentile','max'),

    Peers_Debt_Ratio_mean_max=('Peers_Debt_Ratio_mean','max'),
    Peers_Debt_Ratio_median_max=('Peers_Debt_Ratio_median','max'),
    Peers_Equity_Ratio_mean_max=('Peers_Equity_Ratio_mean','max'),
    Peers_Equity_Ratio_median_max=('Peers_Equity_Ratio_median','max'),
    Peers_Leverage_mean_max=('Peers_Leverage_mean','max'),
    Peers_Leverage_median_max=('Peers_Leverage_median','max'),
    r_mean_max=('r_mean','max'),
    r_median_max=('r_median','max'),

    Peers_Debt_Ratio_25percentile_max=('Peers_Debt_Ratio_25percentile','max'),
    Peers_Debt_Ratio_75percentile_max=('Peers_Debt_Ratio_75percentile','max'),
    Peers_Equity_Ratio_25percentile_max=('Peers_Equity_Ratio_25percentile','max'),
    Peers_Equity_Ratio_75percentile_max=('Peers_Equity_Ratio_75percentile','max'),
    Peers_Leverage_25percentile_max=('Peers_Leverage_25percentile','max'),
    Peers_Leverage_75percentile_max=('Peers_Leverage_75percentile','max'),
    r_25percentile_max=('r_25percentile','max'),
    r_75percentile_max=('r_75percentile','max')

    )


    gorden_growth_final_df['Total_PV_mean']=gorden_growth_final_df['CF_By_Df_mean_sum']+gorden_growth_final_df['TV_mean']/gorden_growth_final_df['Discount_Factor_mean_max']
    gorden_growth_final_df['Total_PV_median']=gorden_growth_final_df['CF_By_Df_median_sum']+gorden_growth_final_df['TV_median']/gorden_growth_final_df['Discount_Factor_median_max']
    #
    gorden_growth_final_df['Total_PV_25percentile']=gorden_growth_final_df['CF_By_Df_25percentile_sum']+gorden_growth_final_df['TV_25percentile']/gorden_growth_final_df['Discount_Factor_25percentile_max']
    gorden_growth_final_df['Total_PV_75percentile']=gorden_growth_final_df['CF_By_Df_75percentile_sum']+gorden_growth_final_df['TV_75percentile']/gorden_growth_final_df['Discount_Factor_75percentile_max']
    #
    #display(gorden_growth_final_df)

    gorden_growth_final_df['Equity_mean' ]=gorden_growth_final_df['Ownership']*(gorden_growth_final_df['Total_PV_mean']-gorden_growth_final_df['net_debt_at_val'])
    gorden_growth_final_df['Equity_median' ]=gorden_growth_final_df['Ownership']*(gorden_growth_final_df['Total_PV_median']-gorden_growth_final_df['net_debt_at_val'] )                                                                          
    #
    gorden_growth_final_df['Equity_25percentile' ]=gorden_growth_final_df['Ownership']*(gorden_growth_final_df['Total_PV_25percentile']-gorden_growth_final_df['net_debt_at_val'])
    gorden_growth_final_df['Equity_75percentile' ]=gorden_growth_final_df['Ownership']*(gorden_growth_final_df['Total_PV_75percentile']-gorden_growth_final_df['net_debt_at_val'] )
    #
    # Resetting the index to include it as columns
    gorden_growth_df = gorden_growth_final_df.reset_index()

    # Correcting the column names to match the DataFrame
    gorden_growth_df = gorden_growth_df[['Asset_Valuation_Id','Methodology_Id',  'Equity_mean', 'Equity_median','Total_PV_mean','Total_PV_median','Discount_Factor_mean_max','Discount_Factor_median_max','CF_By_Df_mean_sum','CF_By_Df_median_sum','TV_mean','TV_median','cost_of_equity_mean','cost_of_equity_median','after_tax_cost_of_debt','Beta_Unlevered_mean_max','Beta_Unlevered_median_max','Beta_Levered_mean_max','Beta_Levered_median_max','Peers_Debt_Ratio_mean_max','Peers_Debt_Ratio_median_max','Peers_Equity_Ratio_mean_max','Peers_Equity_Ratio_median_max','Peers_Leverage_mean_max','Peers_Leverage_median_max','r_mean_max','r_median_max','Equity_25percentile', 'Equity_75percentile','Total_PV_25percentile','Total_PV_75percentile','Discount_Factor_25percentile_max','Discount_Factor_75percentile_max','CF_By_Df_25percentile_sum','CF_By_Df_75percentile_sum','TV_25percentile','TV_75percentile','cost_of_equity_25percentile','cost_of_equity_75percentile','Beta_Unlevered_25percentile_max','Beta_Unlevered_75percentile_max','Beta_Levered_25percentile_max','Beta_Levered_75percentile_max','Peers_Debt_Ratio_25percentile_max','Peers_Debt_Ratio_75percentile_max','Peers_Equity_Ratio_25percentile_max','Peers_Equity_Ratio_75percentile_max','Peers_Leverage_25percentile_max','Peers_Leverage_75percentile_max','r_25percentile_max','r_75percentile_max']]

    # Write the data frame to SQL Server
    gorden_growth_df.to_sql('#temp_table_gg', engine, if_exists='replace', index=False)
    cashflow_detail_df.to_sql('#temp_table_cash_gg', engine, if_exists='replace', index=False)

   

    # Retrieve the stored procedure name
    proc_name1 = STORED_PROCEDURES['LOAD_DCF_GG_RESULT']
    proc_name2 = STORED_PROCEDURES['LOAD_DCF_GG_CASHFLOW_RESULT']

    # Execute the MERGE statement
    with engine.begin() as connection:
       

        connection.execute(text(f"EXEC {proc_name1}"))
        connection.execute(text(f"EXEC {proc_name2}"))
    result_json = gorden_growth_df.to_json(orient='records')
    return result_json        

multiples_sales_calibration.py

import pyodbc
import pandas as pd
from warnings import filterwarnings
from IPython.display import display
import sqlalchemy
import os
from sqlalchemy import create_engine, text
from connection import get_engine,get_connection_details
from constants import STORED_PROCEDURES,percentile_25,percentile_75
import numpy as np

def multiples_sales_caliberation( Asset_Valuation_Id,Methodology_Id):
    filterwarnings("ignore", category=UserWarning, message='.*pandas only supports SQLAlchemy connectable.*')
   
    engine = get_engine()
    conn=get_connection_details()

   
    try:
       
           
            dci = pd.read_sql("EXEC dbo.sp_Get_Capitaliq ?", conn, params=(Asset_Valuation_Id))
            dac = pd.read_sql("EXEC dbo.sp_Get_Asset_Valuation ?, ?", conn, params=(Asset_Valuation_Id, Methodology_Id))
            if dac.empty:
                    raise ValueError("No rows returned from Asset Configuration.")
            conn.close()
    except pyodbc.Error as e:
        print(f"Error: Unable to connect to the database. {e}")
        error_message = f"An unexpected error occurred: {e}"
        raise RuntimeError(error_message)
    except ValueError as e:
        # Handle specific exception for each query if no rows returned
        print(f"Program execution stopped due to an error: {e}")
        raise  # Re-raise the exception if needed
    except Exception as e:
        # Handle any other exceptions
        print(f"An unexpected error occurred: {e}")
        error_message = f"An unexpected error occurred: {e}"
        raise RuntimeError(error_message)
    #dcv_agg = dcv.groupby(['Account', 'Fund_Of_Fund', 'Fund', 'Asset_Id', 'Processing_Year']).agg(Client_Ownership=('Client_Ownership', 'max')
    #)
    # if it is an NTM data then select ntm val else it is ltm
    if (dac['Is_Ntm']).all() == 1:
        print('NTM')
        dci_agg = dci.groupby(['Asset_Valuation_Id']).agg(
        peers_multiples_trans_mean=('Ltm_Ev_Sales_Trans', 'mean'),
        peers_multiples_val_mean=('Ntm_Ev_Sales_Val', 'mean'),
        peers_multiples_trans_median=('Ltm_Ev_Sales_Trans', 'median'),
        peers_multiples_val_median=('Ntm_Ev_Sales_Val', 'median'),
        peers_multiples_trans_25percentile=('Ltm_Ev_Sales_Trans', lambda x: x.quantile(percentile_25)),
        peers_multiples_val_25percentile=('Ntm_Ev_Sales_Val', lambda x: x.quantile(percentile_25)),
        peers_multiples_trans_75percentile=('Ltm_Ev_Sales_Trans', lambda x: x.quantile(percentile_75)),
        peers_multiples_val_75percentile=('Ntm_Ev_Sales_Val', lambda x: x.quantile(percentile_75))
        )
    else:
        print('LTM')
        dci_agg = dci.groupby(['Asset_Valuation_Id']).agg(
        peers_multiples_trans_mean=('Ltm_Ev_Sales_Trans', 'mean'),
        peers_multiples_val_mean=('Ltm_Ev_Sales_Val', 'mean'),
        peers_multiples_trans_median=('Ltm_Ev_Sales_Trans', 'median'),
        peers_multiples_val_median=('Ltm_Ev_Sales_Val', 'median'),
        peers_multiples_trans_25percentile=('Ltm_Ev_Sales_Trans', lambda x: x.quantile(percentile_25)),
        peers_multiples_val_25percentile=('Ltm_Ev_Sales_Val', lambda x: x.quantile(percentile_25)),
        peers_multiples_trans_75percentile=('Ltm_Ev_Sales_Trans', lambda x: x.quantile(percentile_75)),
        peers_multiples_val_75percentile=('Ltm_Ev_Sales_Val', lambda x: x.quantile(percentile_75))
        )
   
    merged_df = dac.merge(dci_agg, on=['Asset_Valuation_Id'], how='left', suffixes=('', '_daf'))
   

    if (dac['Is_Ntm']).all() == 1:

        print('NTM')
        merged_df['Revenue_At_val'] = merged_df['Revenue_Ntm_Val']
        merged_df['Revenue_At_Trans'] = merged_df['Revenue_At_Trans']
    else:
                 
         merged_df['Revenue_At_val'] = merged_df['Revenue_At_Val']
         merged_df['Revenue_At_Trans'] = merged_df['Revenue_At_Trans']
     
    sales_calibration_basic_df = merged_df
   
    sales_calibration_basic_df['EV_At_Trans'] = sales_calibration_basic_df['Purchase_Price']+sales_calibration_basic_df['Net_Debt_At_Trans']
    sales_calibration_basic_df['Multiple_At_Trans'] = sales_calibration_basic_df['EV_At_Trans']/sales_calibration_basic_df['Revenue_At_Trans']
    sales_calibration_basic_df['Calibrated_Premium_Discnt_Mean'] = (sales_calibration_basic_df['Multiple_At_Trans']/sales_calibration_basic_df['peers_multiples_trans_mean'])-1
    sales_calibration_basic_df['Calibrated_Premium_Discnt_Median'] = (sales_calibration_basic_df['Multiple_At_Trans']/sales_calibration_basic_df['peers_multiples_trans_median'])-1
    sales_calibration_basic_df['Multiples_At_Val_Mean'] = sales_calibration_basic_df['peers_multiples_val_mean']*(1+sales_calibration_basic_df['Calibrated_Premium_Discnt_Mean'])
    sales_calibration_basic_df['Multiples_At_Val_Median'] = sales_calibration_basic_df['peers_multiples_val_median']*(1+sales_calibration_basic_df['Calibrated_Premium_Discnt_Median'])
    #
    lowest_calibrated_premium_discnt = np.minimum(sales_calibration_basic_df['Calibrated_Premium_Discnt_Mean'], sales_calibration_basic_df['Calibrated_Premium_Discnt_Median'])
    highest_calibrated_premium_discnt = np.maximum(sales_calibration_basic_df['Calibrated_Premium_Discnt_Mean'], sales_calibration_basic_df['Calibrated_Premium_Discnt_Median'])
    sales_calibration_basic_df['Multiples_At_Val_25percentile'] = sales_calibration_basic_df['peers_multiples_val_25percentile']*(1+lowest_calibrated_premium_discnt)
    sales_calibration_basic_df['Multiples_At_Val_75percentile'] = sales_calibration_basic_df['peers_multiples_val_75percentile']*(1+highest_calibrated_premium_discnt)
    #
    sales_calibration_basic_df['EV_Mean'] = sales_calibration_basic_df['Multiples_At_Val_Mean']*sales_calibration_basic_df['Revenue_At_val']
    sales_calibration_basic_df['EV_Median'] = sales_calibration_basic_df['Multiples_At_Val_Median']*sales_calibration_basic_df['Revenue_At_val']
    #
    sales_calibration_basic_df['EV_25percentile'] = sales_calibration_basic_df['Multiples_At_Val_25percentile']*sales_calibration_basic_df['Revenue_At_val']
    sales_calibration_basic_df['EV_75percentile'] = sales_calibration_basic_df['Multiples_At_Val_75percentile']*sales_calibration_basic_df['Revenue_At_val']
    #
    sales_calibration_basic_df['Equity_Mean'] = sales_calibration_basic_df['EV_Mean']-sales_calibration_basic_df['Net_Debt_At_Val']
    sales_calibration_basic_df['Equity_Median'] = sales_calibration_basic_df['EV_Median']-sales_calibration_basic_df['Net_Debt_At_Val']
    #
    sales_calibration_basic_df['Equity_25percentile'] = sales_calibration_basic_df['EV_25percentile']-sales_calibration_basic_df['Net_Debt_At_Val']
    sales_calibration_basic_df['Equity_75percentile'] = sales_calibration_basic_df['EV_75percentile']-sales_calibration_basic_df['Net_Debt_At_Val']
    #
    sales_calibration_df = sales_calibration_basic_df[['Asset_Valuation_Id', 'Methodology_Id','Equity_Mean', 'Equity_Median','EV_Mean','EV_Median','Multiples_At_Val_Mean','Multiples_At_Val_Median','Calibrated_Premium_Discnt_Mean','Calibrated_Premium_Discnt_Median','Multiple_At_Trans','EV_At_Trans','peers_multiples_trans_mean','peers_multiples_val_mean','peers_multiples_trans_median','peers_multiples_val_median','peers_multiples_trans_25percentile','peers_multiples_val_25percentile','peers_multiples_trans_75percentile','peers_multiples_val_75percentile','Client_Ownership','Multiples_At_Val_25percentile','Multiples_At_Val_75percentile','EV_25percentile','EV_75percentile','Equity_25percentile','Equity_75percentile']]
    # Write the data frame to SQL Server
    sales_calibration_df.to_sql('#temp_table_sales_calib', engine, if_exists='replace', index=False)

   

    # Retrieve the stored procedure name
    proc_name = STORED_PROCEDURES['LOAD_SALES_CALIB_RESULT']

    # Execute the MERGE statement
    with engine.begin() as connection:
       
        #connection.execute(text(" EXEC VIA_GOLD.sp_Load_Sales_Calibration_Result"))
        connection.execute(text(f"EXEC {proc_name}"))
    result_json = sales_calibration_df.to_json(orient='records')
    return result_json


extract_document_data.py

from azure.core.credentials import AzureKeyCredential
from azure.ai.documentintelligence import DocumentIntelligenceClient
from azure.ai.documentintelligence.models import AnalyzeDocumentRequest
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
from warnings import filterwarnings
import pyodbc
import sqlalchemy
from sqlalchemy import create_engine, text
import json
from azure.storage.blob import BlobServiceClient
import pandas as pd
import os
from IPython.display import display
from constants import STORED_PROCEDURES
from connection import get_engine,get_connection_details,var_blob_connection_string,var_container_name,var_subfolder_path,var_endpoint,var_key,var_formUrl
#from connection import get_engine,get_connection_details

def fn_extract_document_data(Asset_Fund_Val_Doc_Map_Id,Asset_Valuation_Id):
    filterwarnings("ignore", category=UserWarning, message='.*pandas only supports SQLAlchemy connectable.*')

    engine = get_engine()
    conn=get_connection_details()

    try:
        doc_stat = pd.read_sql("EXEC dbo.sp_Get_Data_Extraction_Status ? ",conn,params=(Asset_Fund_Val_Doc_Map_Id))
        extraction_stat=doc_stat['Data_Extraction_Status'].values[0]
        print(f"extraction_status {extraction_stat}")
        if extraction_stat == 1:
            raise RuntimeError ("Document extraction is in progress now, please try after some time")
        doc_details = pd.read_sql("EXEC dbo.sp_Get_Document_Details ? ,?", conn, params=(Asset_Fund_Val_Doc_Map_Id,Asset_Valuation_Id))
       
       
        with conn.cursor() as cursor:
            cursor.execute("EXEC dbo.sp_Update_Data_Extraction_Status ?, ?", Asset_Fund_Val_Doc_Map_Id,1)
            conn.commit()

       
        blob_connection_string=var_blob_connection_string

       
        container_name=var_container_name
       
        subfolder_path=var_subfolder_path
        file_name=doc_details['Document_Path'].values[0]
        blob_name = f"{file_name}.json"

       
        endpoint=var_endpoint
       
        key=var_key

       
        model_id=doc_details['Model_Name'].values[0]
       
        formUrl=f"{var_formUrl}{file_name}"

        ###################################
        document_intelligence_client  = DocumentIntelligenceClient(
        endpoint=endpoint, credential=AzureKeyCredential(key)
        )

        # Make sure your document's type is included in the list of document types the custom model can analyze
        poller = document_intelligence_client.begin_analyze_document(
            model_id, AnalyzeDocumentRequest(url_source=formUrl)
        )
        result = poller.result()

        # Extract JSON content from the result variable
        result_json = result.as_dict()

        # Convert the JSON content to a string
        result_json_str = json.dumps(result_json)

        # Create a BlobServiceClient
        blob_service_client = BlobServiceClient.from_connection_string(blob_connection_string)

        # Create a BlobClient
        blob_client = blob_service_client.get_blob_client(container=container_name, blob=os.path.join(subfolder_path, blob_name))

        # Upload the JSON content to the blob
        blob_client.upload_blob(result_json_str, overwrite=True)

        print(f"JSON content successfully uploaded to {os.path.join(subfolder_path, blob_name)} in container {container_name}.")

    ######################## Save to Table #######################

        blob_name = doc_details['Document_Name'].values[0]
        blob_service_client = BlobServiceClient.from_connection_string(blob_connection_string)
        container_client = blob_service_client.get_container_client(container_name)

        #full_blob_path=f"{subfolder_path}{file_name}.json"

       
        # Construct the full blob path
        full_blob_path = f"{subfolder_path}{file_name}.json"
        print(full_blob_path)
        # Initialize blob client
        blob_client = container_client.get_blob_client(full_blob_path)

        # Download the JSON content directly from the blob
        download_stream = blob_client.download_blob()
        json_content = download_stream.readall()

        # Parse JSON data from the downloaded content
        data = json.loads(json_content)

        try:

            Document_Type=doc_details['Document_Type'].values[0]
            # Extract values from valueObject
            extracted_data = {}
            documents = data['documents']
            for document in documents:
                if Document_Type == 'Audited Financial Statement':
                    afs_data = document['fields']['AFSData']['valueArray']
                elif Document_Type == 'Management Account':
                    afs_data = document['fields']['MA_Dataset']['valueArray']
                else:
                    afs_data = []
                for item in afs_data:
                    value_object = item['valueObject']
                    for key, value in value_object.items():
                        if 'content' in value:
                            extracted_data[key] = value['content']
                        elif key not in extracted_data:
                            extracted_data[key] = None
        except Exception as e:
        # Handle any other exceptions
            error_message = f"An unexpected error occurred, Please check the File you Upload : {e}"
            raise RuntimeError(error_message)
   
        # Create DataFrame from extracted data
        df_afs = pd.DataFrame([extracted_data])
       
        # Display DataFrame
        display(df_afs)
       
        # Insert DataFrame into SQL table
        df_afs.to_sql('#audited_financials', engine, if_exists='replace', index=False)

        #Delete the local file downloaded
       # os.remove(local_file_path)
        # Update status to completed#####

        # Retrieve the stored procedure name
        if Document_Type=='Audited Financial Statement':
            proc_name = STORED_PROCEDURES['LOAD_AFS_DOCUMENT']
            proc_backtesting =STORED_PROCEDURES['LOAD_AFS_BACKTESTING']
            proc_budget_back=STORED_PROCEDURES['LOAD_BUDGET_BACK_AFS']
        elif Document_Type == 'Management Account':
            proc_name = STORED_PROCEDURES['LOAD_MA_DOCUMENT']
            proc_backtesting =STORED_PROCEDURES['LOAD_MA_BACKTESTING']
            proc_budget_back=STORED_PROCEDURES['LOAD_BUDGET_BACK_MA']
        else:
            proc_name = ''

        proc_asset_val=STORED_PROCEDURES['LOAD_ASSET_VAL_MAP']
        # Execute the MERGE statement
        with engine.begin() as connection:
            connection.execute(text(f"EXEC {proc_name} :param1,:param2"),{'param1':Asset_Fund_Val_Doc_Map_Id,'param2':Asset_Valuation_Id})
            connection.execute(text(f"EXEC {proc_backtesting} :param1,:param2"),{'param1':Asset_Valuation_Id,'param2':Asset_Fund_Val_Doc_Map_Id})
            connection.execute(text(f"EXEC {proc_budget_back} :param1,:param2"),{'param1':Asset_Valuation_Id,'param2':Asset_Fund_Val_Doc_Map_Id})
            connection.execute(text(f"EXEC {proc_asset_val} :param1,:param2"),{'param1':Asset_Valuation_Id,'param2':Asset_Fund_Val_Doc_Map_Id})
           
        with conn.cursor() as cursor:
            cursor.execute("EXEC dbo.sp_Update_Data_Extraction_Status ?, ?", Asset_Fund_Val_Doc_Map_Id,2)
            conn.commit()

    except pyodbc.Error as e:
        print(f"Error: Unable to connect to the database. {e}")
        error_message = f"An unexpected error occurred: {e}"
        ## Update status to Failure#####
        with conn.cursor() as cursor:
            cursor.execute("EXEC dbo.sp_Update_Data_Extraction_Status ?, ?", Asset_Fund_Val_Doc_Map_Id,3)
            conn.commit()
        raise RuntimeError(error_message)

    except ValueError as e:
        # Handle specific exception for each query if no rows returned
        print(f"Program execution stopped due to an error: {e}")
        ## Update status to Failure#####
        with conn.cursor() as cursor:
            cursor.execute("EXEC dbo.sp_Update_Data_Extraction_Status ?, ?", Asset_Fund_Val_Doc_Map_Id,3)
            conn.commit()
        raise  # Re-raise the exception if needed
    except Exception as e:
        # Handle any other exceptions
        print(f"An unexpected error occurred: {e}")
        error_message = f"An unexpected error occurred: {e}"
        ## Update status to Failure#####
        with conn.cursor() as cursor:
            cursor.execute("EXEC dbo.sp_Update_Data_Extraction_Status ?, ?", Asset_Fund_Val_Doc_Map_Id,3)
            conn.commit()
        raise RuntimeError(error_message)
   

multiples_ebitda_liquidity.py

import pandas as pd
import math
from scipy.stats import norm
import pyodbc
from warnings import filterwarnings
from IPython.display import display
import sqlalchemy
from sqlalchemy import create_engine, text
import os
import numpy as np
from connection import get_engine,get_connection_details
from constants import STORED_PROCEDURES,percentile_75,percentile_25

def multiples_ebitda_liquidity( Asset_Valuation_Id,Methodology_Id):
    filterwarnings("ignore", category=UserWarning, message='.*pandas only supports SQLAlchemy connectable.*')
   
    engine = get_engine()
    conn=get_connection_details()
   
   
    try:

       
        dci = pd.read_sql("EXEC dbo.sp_Get_Capitaliq ?", conn, params=(Asset_Valuation_Id))
        dac = pd.read_sql("EXEC dbo.sp_Get_Asset_Valuation ?, ?", conn, params=(Asset_Valuation_Id, Methodology_Id))
        if dac.empty:
                raise ValueError("No rows returned from Asset Configuration.")
        conn.close()
    except pyodbc.Error as e:
        print(f"Error: Unable to connect to the database. {e}")
        error_message = f"An unexpected error occurred: {e}"
        raise RuntimeError(error_message)
    except ValueError as e:
        # Handle specific exception for each query if no rows returned
        print(f"Program execution stopped due to an error: {e}")
        raise  # Re-raise the exception if needed
    except Exception as e:
        # Handle any other exceptions
        print(f"An unexpected error occurred: {e}")
        error_message = f"An unexpected error occurred: {e}"
        raise RuntimeError(error_message)

    #df.to_sql("Asset_Config_test", engine, schema="via_gold", if_exists="append", index=False)
   
    if (dac['Is_Ntm']).all() == 1:
        print('NTM')
        dci_agg = dci.groupby(['Asset_Valuation_Id']).agg(
        peers_multiples_trans_mean=('Ltm_Ev_Ebitda_Trans', 'mean'),
        peers_multiples_val_mean=('Ntm_Ev_Ebitda_Val', 'mean'),
        peers_multiples_trans_median=('Ltm_Ev_Ebitda_Trans', 'median'),
        peers_multiples_val_median=('Ntm_Ev_Ebitda_Val', 'median'),
        Volatility_mean=('Volatility', 'mean'),
        Volatility_median=('Volatility', 'median'),
        peers_multiples_trans_25percentile=('Ltm_Ev_Ebitda_Trans', lambda x: x.quantile(percentile_25)),
        peers_multiples_val_25percentile=('Ntm_Ev_Ebitda_Val', lambda x: x.quantile(percentile_25)),
        peers_multiples_trans_75percentile=('Ltm_Ev_Ebitda_Trans', lambda x: x.quantile(percentile_75)),
        peers_multiples_val_75percentile=('Ntm_Ev_Ebitda_Val', lambda x: x.quantile(percentile_75))
        )
    else:
        print('LTM')
        dci_agg = dci.groupby(['Asset_Valuation_Id']).agg(
        peers_multiples_trans_mean=('Ltm_Ev_Ebitda_Trans', 'mean'),
        peers_multiples_val_mean=('Ltm_Ev_Ebitda_Val', 'mean'),
        peers_multiples_trans_median=('Ltm_Ev_Ebitda_Trans', 'median'),
        peers_multiples_val_median=('Ltm_Ev_Ebitda_Val', 'median'),
        Volatility_mean=('Volatility', 'mean'),
        Volatility_median=('Volatility', 'median'),
        peers_multiples_trans_25percentile=('Ltm_Ev_Ebitda_Trans', lambda x: x.quantile(percentile_25)),
        peers_multiples_val_25percentile=('Ltm_Ev_Ebitda_Val', lambda x: x.quantile(percentile_25)),
        peers_multiples_trans_75percentile=('Ltm_Ev_Ebitda_Trans', lambda x: x.quantile(percentile_75)),
        peers_multiples_val_75percentile=('Ltm_Ev_Ebitda_Val', lambda x: x.quantile(percentile_75))
        )
   
    merged_df = dac.merge(dci_agg, on=['Asset_Valuation_Id'], how='left', suffixes=('', '_daf'))

    if (dac['Is_Ntm']).all() == 1:
        print('NTM')
        merged_df['EBITDA_At_val'] = merged_df['Ebitda_Ntm_Val']
        merged_df['EBITDA_At_Trans'] = merged_df['Ebitda_At_Trans']
    else:
        merged_df['EBITDA_At_val'] = merged_df['Ebitda_At_Val']
        merged_df['EBITDA_At_Trans'] = merged_df['Ebitda_At_Trans']
    merged_df['Ownership']    =merged_df['Client_Ownership']
       
    sales_liquidity_basic_df = merged_df

    #sales_liquidity_basic_df[Volatility_mean]=sales_liquidity_basic_df[Volatility_mean]*.01
    sales_liquidity_basic_df['Terms_Of_Holding_Period']=float(sales_liquidity_basic_df['Terms_Of_Holding_Period'].iloc[0])
    sales_liquidity_basic_df['Volatility_mean'] = float(sales_liquidity_basic_df['Volatility_mean'].iloc[0])
    sales_liquidity_basic_df['Volatility_median'] = float(sales_liquidity_basic_df['Volatility_median'].iloc[0])

    sales_liquidity_basic_df['a_Mean']=(sales_liquidity_basic_df['Volatility_mean']**2)*sales_liquidity_basic_df['Terms_Of_Holding_Period']
    sales_liquidity_basic_df['a_Median']=(sales_liquidity_basic_df['Volatility_median']**2)*sales_liquidity_basic_df['Terms_Of_Holding_Period']
    sales_liquidity_basic_df['b_Mean'] = np.log(2 * (np.exp(sales_liquidity_basic_df['Volatility_mean']**2 * sales_liquidity_basic_df['Terms_Of_Holding_Period']) - sales_liquidity_basic_df['Volatility_mean']**2 * sales_liquidity_basic_df['Terms_Of_Holding_Period'] -  1 ))
    sales_liquidity_basic_df['b_Median'] = np.log(2 * (np.exp(sales_liquidity_basic_df['Volatility_median']**2 * sales_liquidity_basic_df['Terms_Of_Holding_Period']) - sales_liquidity_basic_df['Volatility_median']**2 * sales_liquidity_basic_df['Terms_Of_Holding_Period'] -  1 ))
    sales_liquidity_basic_df['c_Mean'] = -2 * np.log(np.exp(sales_liquidity_basic_df['Volatility_mean']**2 * sales_liquidity_basic_df['Terms_Of_Holding_Period']) - 1)
    sales_liquidity_basic_df['c_Median'] = -2 * np.log(np.exp(sales_liquidity_basic_df['Volatility_median']**2 * sales_liquidity_basic_df['Terms_Of_Holding_Period']) - 1)    
    sales_liquidity_basic_df['aplusbplusc_Mean']=sales_liquidity_basic_df['a_Mean']+sales_liquidity_basic_df['b_Mean']+sales_liquidity_basic_df['c_Mean']+(sales_liquidity_basic_df['Dividend_Yield'])
    sales_liquidity_basic_df['aplusbplusc_Median']=sales_liquidity_basic_df['a_Median']+sales_liquidity_basic_df['b_Median']+sales_liquidity_basic_df['c_Median']+(sales_liquidity_basic_df['Dividend_Yield'])
    sales_liquidity_basic_df['x_mean']=sales_liquidity_basic_df['aplusbplusc_Mean']**0.5
    sales_liquidity_basic_df['x_median']=sales_liquidity_basic_df['aplusbplusc_Median']**0.5


    sales_liquidity_basic_df['liquidity_discount_mean']=-1 * (norm.cdf(sales_liquidity_basic_df['x_mean'] / 2) - norm.cdf(-sales_liquidity_basic_df['x_mean'] / 2))
    sales_liquidity_basic_df['liquidity_discount_median']=-1 * (norm.cdf(sales_liquidity_basic_df['x_median'] / 2) - norm.cdf(-sales_liquidity_basic_df['x_median'] / 2))


    sales_liquidity_basic_df['multiples_mean']=sales_liquidity_basic_df['peers_multiples_val_mean']*(1+sales_liquidity_basic_df['liquidity_discount_mean'])
    sales_liquidity_basic_df['multiples_median']=sales_liquidity_basic_df['peers_multiples_val_median']*(1+sales_liquidity_basic_df['liquidity_discount_median'])
    #
     #
    lowest_liquidity_discnt = np.minimum(sales_liquidity_basic_df['liquidity_discount_mean'], sales_liquidity_basic_df['liquidity_discount_median'])
    highest_liquidity_discnt = np.maximum(sales_liquidity_basic_df['liquidity_discount_mean'], sales_liquidity_basic_df['liquidity_discount_median'])
    sales_liquidity_basic_df['multiples_25percentile']=sales_liquidity_basic_df['peers_multiples_val_25percentile']*(1+lowest_liquidity_discnt)
    sales_liquidity_basic_df['multiples_75percentile']=sales_liquidity_basic_df['peers_multiples_val_75percentile']*(1+highest_liquidity_discnt)
    #
    sales_liquidity_basic_df['EV_Mean']=sales_liquidity_basic_df['multiples_mean']*sales_liquidity_basic_df['EBITDA_At_val']
    sales_liquidity_basic_df['EV_Median']=sales_liquidity_basic_df['multiples_median']*sales_liquidity_basic_df['EBITDA_At_val']
    #
    sales_liquidity_basic_df['EV_25percentile']=sales_liquidity_basic_df['multiples_25percentile']*sales_liquidity_basic_df['EBITDA_At_val']
    sales_liquidity_basic_df['EV_75percentile']=sales_liquidity_basic_df['multiples_75percentile']*sales_liquidity_basic_df['EBITDA_At_val']
    #
    sales_liquidity_basic_df['Equity_Mean']=sales_liquidity_basic_df['Ownership']*(sales_liquidity_basic_df['EV_Mean']-sales_liquidity_basic_df['Net_Debt_At_Val'])
    sales_liquidity_basic_df['Equity_Median']=sales_liquidity_basic_df['Ownership']*(sales_liquidity_basic_df['EV_Median']-sales_liquidity_basic_df['Net_Debt_At_Val'])
    #
    sales_liquidity_basic_df['Equity_25percentile']=sales_liquidity_basic_df['EV_25percentile']-sales_liquidity_basic_df['Net_Debt_At_Val']
    sales_liquidity_basic_df['Equity_75percentile']=sales_liquidity_basic_df['EV_75percentile']-sales_liquidity_basic_df['Net_Debt_At_Val']

    #select relevant fields from the dataframe
    ebitda_liquidity_final_df=sales_liquidity_basic_df[['Asset_Valuation_Id','Methodology_Id','Equity_Mean','Equity_Median','EV_Mean','EV_Median','multiples_mean','multiples_median','liquidity_discount_mean','liquidity_discount_median','Volatility_mean','Volatility_median','peers_multiples_trans_mean','peers_multiples_val_mean','peers_multiples_trans_median','peers_multiples_val_median','peers_multiples_trans_25percentile','peers_multiples_val_25percentile','peers_multiples_trans_75percentile','peers_multiples_val_75percentile','Ownership','a_Mean','a_Median','b_Mean','b_Median','c_Mean','c_Median','aplusbplusc_Mean','aplusbplusc_Median','x_mean','x_median','multiples_25percentile','multiples_75percentile','EV_25percentile','EV_75percentile','Equity_25percentile','Equity_75percentile']]

   
    # Write the data frame to SQL Server
    ebitda_liquidity_final_df.to_sql('#temp_table_ebitliquid', engine, if_exists='replace', index=False)

   

    # Retrieve the stored procedure name
    proc_name = STORED_PROCEDURES['LOAD_EBITDA_LIQUIDITY_RESULT']

    # Execute the MERGE statement
    with engine.begin() as connection:
       
        #connection.execute(text(" EXEC VIA_GOLD.sp_Load_Ebitda_Liquidity_Result"))
        connection.execute(text(f"EXEC {proc_name}"))

    result_json = ebitda_liquidity_final_df.to_json(orient='records')
    return result_json    


Comments

Popular posts from this blog

Introduction To Oracle10g

Insert

Except