Import libraries¶

In [1]:
#!pip install matplotlib
#!pip install numpy==1.26.1 # Needed to downgrade numpy version
#!pip install seaborn
import time
import numpy as np
import pandas as pd
import bw2calc as bc
import bw2io as bi
import bw2data as bd
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os
from premise import *
from premise_gwp import add_premise_gwp
import pandas as pd
import time
from tqdm import tqdm
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

Set project containing IAM-pLCIs¶

In [2]:
bd.projects.set_current("WP2_paper_v3")
list(bd.databases)
Out[2]:
['biosphere3',
 'ecoinvent 3.9.1',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP60_2025_2024-07-23 14-41',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP60_2030_2024-07-23 14-51',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP60_2035_2024-07-23 15-02',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP60_2040_2024-07-23 15-15',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP60_2045_2024-07-23 15-33',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP60_2050_2024-07-23 15-59',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP45_2025_2024-07-23 16-34',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP45_2030_2024-07-23 17-05',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP45_2035_2024-07-23 17-30',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP45_2040_2024-07-23 17-54',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP45_2045_2024-07-23 18-14',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP45_2050_2024-07-23 18-44',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP26_2025_2024-07-23 19-09',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP26_2030_2024-07-23 19-44',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP26_2035_2024-07-23 20-12',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP26_2040_2024-07-23 20-53',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP26_2045_2024-07-23 21-25',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP26_2050_2024-07-23 21-58',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP19_2025_2024-07-23 22-35',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP19_2030_2024-07-23 23-12',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP19_2035_2024-07-24 00-07',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP19_2040_2024-07-24 00-49',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP19_2045_2024-07-24 01-33',
 'ecoinvent_cutoff_3.9_tiam-ucl_SSP2-RCP19_2050_2024-07-24 02-18',
 'ecoinvent_cutoff_3.9_image_SSP2-Base_2025_2024-07-24 03-06',
 'ecoinvent_cutoff_3.9_image_SSP2-Base_2030_2024-07-24 03-57',
 'ecoinvent_cutoff_3.9_image_SSP2-Base_2035_2024-07-24 04-49',
 'ecoinvent_cutoff_3.9_image_SSP2-Base_2040_2024-07-24 05-25',
 'ecoinvent_cutoff_3.9_image_SSP2-Base_2045_2024-07-24 06-21',
 'ecoinvent_cutoff_3.9_image_SSP2-Base_2050_2024-07-24 07-19',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP26_2025_2024-07-24 08-19',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP26_2030_2024-07-24 09-37',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP26_2030_2024-07-24 10-05',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP26_2035_2024-07-24 10-51',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP26_2040_2024-07-24 11-46',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP26_2045_2024-07-24 12-39',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP26_2050_2024-07-24 13-28',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP19_2025_2024-07-24 14-30',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP19_2030_2024-07-24 15-35',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP19_2035_2024-07-24 16-51',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP19_2040_2024-07-24 17-47',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP19_2045_2024-07-24 18-40',
 'ecoinvent_cutoff_3.9_image_SSP2-RCP19_2050_2024-07-24 19-40',
 'ecoinvent_cutoff_3.9_remind_SSP2-Base_2025_2024-07-24 20-38',
 'ecoinvent_cutoff_3.9_remind_SSP2-Base_2030_2024-07-24 21-38',
 'ecoinvent_cutoff_3.9_remind_SSP2-Base_2035_2024-07-24 22-42',
 'ecoinvent_cutoff_3.9_remind_SSP2-Base_2040_2024-07-24 23-49',
 'ecoinvent_cutoff_3.9_remind_SSP2-Base_2045_2024-07-25 00-50',
 'ecoinvent_cutoff_3.9_remind_SSP2-Base_2050_2024-07-25 02-01',
 'ecoinvent_cutoff_3.9_remind_SSP2-NDC_2025_2024-07-25 03-03',
 'ecoinvent_cutoff_3.9_remind_SSP2-NDC_2030_2024-07-25 04-19',
 'ecoinvent_cutoff_3.9_remind_SSP2-NDC_2035_2024-07-25 05-25',
 'ecoinvent_cutoff_3.9_remind_SSP2-NDC_2040_2024-07-25 06-35',
 'ecoinvent_cutoff_3.9_remind_SSP2-NDC_2045_2024-07-25 07-45',
 'ecoinvent_cutoff_3.9_remind_SSP2-NDC_2050_2024-07-25 09-25',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg1150_2025_2024-07-25 10-56',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg1150_2030_2024-07-25 12-35',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg1150_2035_2024-07-25 14-04',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg1150_2040_2024-07-25 15-37',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg1150_2045_2024-07-25 17-20',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg1150_2050_2024-07-25 18-57',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg500_2025_2024-07-25 20-26',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg500_2030_2024-07-25 21-32',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg500_2035_2024-07-25 23-02',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg500_2040_2024-07-26 00-35',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg500_2045_2024-07-26 02-07',
 'ecoinvent_cutoff_3.9_remind_SSP2-PkBudg500_2050_2024-07-26 03-45']

LCIA Calculations¶

In [ ]:
#Other potential products of interest

#names = [
#    'market for steel, low-alloyed',
#    'market group for electricity, low voltage',
#    'market for clinker',
#    'market for diesel, low-sulfur',
#    'market for hydrogen, gaseous',
#    'market for clinker',
#    'electricity production, at wood burning power plant',
#    'market for natural gas, high pressure',
#]
In [ ]:
# Define your inputs
names = ['market group for electricity, low voltage',]

location = 'World'  # This remains constant for all products as in your original requirement
scenarios = ['RCP19','PkBudg500','RCP26','PkBudg1150','Base','RCP60','RCP45','NDC']
iams = ['image','tiam-ucl','remind']

premise_gwp = [m for m in bd.methods if 'IPCC 2021' in str(m) and 'climate change' in str(m) and 'GWP 100a, incl. H and bio CO2' in str(m)
          ][0]

methods = [m for m in bd.methods if 'EF v3.1' in str(m) and 'no LT' not in str(m)
           and 'EN15804' not in str(m)
           and 'climate change' not in str(m)
           and 'human toxicity: carcinogenic, inorganics' not in str(m)
           and 'human toxicity: carcinogenic, organics' not in str(m)
           and 'human toxicity: non-carcinogenic, inorganics' not in str(m)
           and 'human toxicity: non-carcinogenic, organics' not in str(m)
           and 'ecotoxicity: freshwater, inorganics' not in str(m)
           and 'ecotoxicity: freshwater, organics' not in str(m)
          ]
methods.append(premise_gwp)

years = [str(year) for year in range(2025, 2051, 5)]

results = []

start_time = time.time()

for name in names:
    for method in methods:
        method_data = bd.Method(method)
        method_data.load()
        method_unit = method_data.metadata.get('unit', 'Unit not found')

        for iam in iams:
            for scenario in scenarios:
                for year in years:
                    try:
                        # Construct the base part of the database name
                        base_db_name = f'ecoinvent_cutoff_3.9_{iam}_SSP2-{scenario}_{year}'
                        # Load the full list of databases
                        db_names = [db for db in bd.databases if base_db_name in db]

                        if not db_names:
                            print(f"No database found for base name: {base_db_name}")
                            continue

                        # Use the first match that starts with the base name
                        db_name = db_names[0]
                        db = bd.Database(db_name)
                        
                        act = next(x for x in db if name in x['name'] and location in x['location']
                                   and 'period' not in x['name']
                                   and 'post' not in x['name']
                                  )
                        FU = {act: 1}

                        lca = bc.LCA(FU, method)
                        lca.lci()
                        lca.lcia()

                        results.append({
                            "Product": name,
                            "Method": method[1],
                            "Unit": method_unit,
                            "IAM": iam,
                            "Scenario": scenario,
                            "Year": year,
                            "Impact Score": lca.score
                        })
                    except Exception as e:
                        print(f"Failed for {name}, {method[1]}, {scenario}, {year} with error: {e}")

# Convert the list of results to a DataFrame
results_df = pd.DataFrame(results)
results_df.to_excel(f'WP2_pLCA_electricity.xlsx', index=False)
total_time = (time.time() - start_time) / 60
print(f"LCA calculations completed and results are saved. Total time taken: {total_time:.2f} minutes.")
In [ ]:
results_df

Plot: TIAM-UCL LCIA¶

In [64]:
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D

# Load the DataFrame
df = pd.read_excel('WP2_pLCA_electricity.xlsx')
product_name = "market group for electricity, low voltage"
results_df = df[(df['Product'] == product_name) & (df['IAM'] == 'tiam-ucl')]

# Define color mappings and legend labels
color_map = {
    'No climate action (<3C)': '#843033', #RED
    'Baseline (NDCs)': '#F3A43B', #ORANGE
    'Ambitious (<2C)': '#37A2B7', # BLUE
    'Very ambitious (<1.5C)': '#117733'
}
legend_labels_map = {
    'No climate action (<3C)': 'No climate action (<3C)',
    'Baseline (NDCs)': 'Baseline (NDCs)',
    'Ambitious (<2C)': 'Ambitious (<2C)',
    'Very ambitious (<1.5C)': 'Very ambitious (<1.5C)'
}

# Calculate percentage difference relative to the baseline year (2025)
processed_data = []
baseline_year = 2025

for method in results_df['Method'].unique():
    method_data = results_df[results_df['Method'] == method]
    baseline_values = method_data[method_data['Year'] == baseline_year]['Impact Score'].values[0]
    for scenario in results_df['Scenario'].unique():
        scenario_data = method_data[method_data['Scenario'] == scenario]
        percentage_difference = ((scenario_data['Impact Score'] - baseline_values) / baseline_values) * 100
        for _, row in scenario_data.iterrows():
            processed_data.append({
                'Method': method,
                'Scenario': scenario,
                'Year': row['Year'],
                'Unit': row['Unit'],
                'Percentage Difference': ((row['Impact Score'] - baseline_values) / baseline_values) * 100
            })

processed_df = pd.DataFrame(processed_data)

# Export processed results to an Excel file
processed_df.to_excel('processed_results.xlsx', index=False)

# Create subplots
method_list = results_df['Method'].unique()
nrows = (len(method_list) + 3) // 4
ncols = 4
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15.5, nrows * 3.5))
fig.subplots_adjust(hspace=0.38, wspace=0.25, top=0.90)  # Adjust figure layout

# Iterate over methods to plot
for i, ax in enumerate(axes.flatten()):
    if i < len(method_list):
        method = method_list[i]
        method_data = processed_df[processed_df['Method'] == method]

        for scenario in processed_df['Scenario'].unique():
            scenario_data = method_data[method_data['Scenario'] == scenario]
            ax.plot(scenario_data['Year'], scenario_data['Percentage Difference'], marker='o',
                    label=scenario, color=color_map.get(scenario, 'black'))
            ax.set_xticks(scenario_data['Year'].unique())  # Set x-axis ticks

        # Draw a solid black line through y=0
        ax.axhline(0, color='black', linestyle='-', linewidth=1)
        ax.axhline(100, color='grey', linestyle='--', linewidth=1)
        ax.axhline(-100, color='grey', linestyle='--', linewidth=1)

        # Set titles, labels, and limits
        ax.set_title(method, fontsize=14)  # Increase title font size
        if i % ncols == 0:
            ax.set_ylabel('%', fontsize=12)  # Increase y-axis label font size for leftmost charts
        else:
            ax.set_ylabel('')  # Remove y-axis label for other charts
        ax.tick_params(axis='x', labelsize=12)  # Increase x-axis tick label size
        ax.tick_params(axis='y', labelsize=12)  # Increase y-axis tick label size
        ax.set_ylim(-120, 100)  # Set y-axis limits to -120% and +100%

    else:
        ax.axis('off')

# Remove x-axis label but keep tick labels
for ax in axes.flatten():
    ax.set_xlabel('')

# Define custom legend handles and labels based on the color map
legend_handles = [Line2D([0], [0], color=color, lw=2) for scenario, color in color_map.items()]
legend_labels = [legend_labels_map[scenario] for scenario in color_map.keys()]

# Add the custom legend at the top with increased font size
fig.legend(legend_handles, legend_labels, loc='upper center', ncol=len(legend_labels), title='Scenario', fontsize=16, title_fontsize=16, frameon=False)

# Save and show the plot
plt.savefig('product_impact_plot.svg')
plt.show()
No description has been provided for this image

Plot inter-IAM comparisons¶

In [71]:
# Load data
df = pd.read_excel('WP2_pLCA_electricity.xlsx')

# Define parameters
product_name = "market group for electricity, low voltage"
selected_scenarios = ['Very ambitious (<1.5C)'] #You can change scenario

# Filter the DataFrame for the selected product and scenarios
results_df = df[(df['Product'] == product_name) & (df['Scenario'].isin(selected_scenarios))]

# Define color mappings for IAM-Scenario combinations
color_map = {
    'image': '#843033',
    'tiam-ucl': '#F3A43B',
    'remind': '#37A2B7',
}

# Calculate the baseline impact score for the year 2025
baseline_df = results_df[results_df['Year'] == 2025].copy()
baseline_df.set_index(['IAM', 'Scenario', 'Method'], inplace=True)

# Transform the impact scores to be a percentage change relative to the year 2020
def calculate_percentage_change(row):
    try:
        baseline_score = baseline_df.loc[(row['IAM'], row['Scenario'], row['Method']), 'Impact Score']
        return ((row['Impact Score'] - baseline_score) / baseline_score) * 100
    except KeyError:
        return None

results_df['Impact Score (%)'] = results_df.apply(calculate_percentage_change, axis=1)

# Save the results with percentage changes to an Excel file
results_df.to_excel('results_with_percentage_changes.xlsx', index=False)

# Get unique methods and set up the subplots grid
method_list = results_df['Method'].unique()
nrows = (len(method_list) + 3) // 4
ncols = 4

fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15.5, nrows * 3.5))
fig.subplots_adjust(hspace=0.38, wspace=0.25, top=0.91)  # Adjust figure layout

# Iterate over methods to plot
for i, ax in enumerate(axes.flatten()):
    if i < len(method_list):
        method = method_list[i]
        method_data = results_df[results_df['Method'] == method]
        unit = method_data['Unit'].iloc[0]

        # Iterate over IAM-Scenario combinations
        for (iam, scenario), group in method_data.groupby(['IAM', 'Scenario']):
            color_key = iam  # Use IAM as color key
            ax.plot(group['Year'], group['Impact Score (%)'], marker='o',
                    label=color_key, color=color_map.get(color_key, 'black'))
            ax.set_xticks(group['Year'].unique())  # Set x-axis ticks

        # Draw horizontal lines
        ax.axhline(0, color='black', linestyle='-', linewidth=1)
        ax.axhline(100, color='grey', linestyle='--', linewidth=1)
        ax.axhline(-100, color='grey', linestyle='--', linewidth=1)

        # Set titles, labels, and limits
        ax.set_title(method, fontsize=14)  # Increase title font size
        if i % ncols == 0:
            ax.set_ylabel(f'%', fontsize=12)  # Increase y-axis label font size only for the leftmost charts
        ax.tick_params(axis='x', labelsize=12)  # Increase x-axis tick label size
        ax.tick_params(axis='y', labelsize=12)  # Increase y-axis tick label size

        # Set y-axis limits based on the method
        if method == "land use":
            ax.set_ylim(-120, 200)  # Set y-axis limits to -120% and +200%
        elif method == "ozone depletion":
            ax.set_ylim(-120, 150)  # Set y-axis limits to -120% and +150%
        else:
            ax.set_ylim(-120, 100)  # Set y-axis limits to -120% and +100%

    else:
        ax.axis('off')

# Remove x-axis label
for ax in axes.flatten():
    ax.set_xlabel('')

# Define custom legend handles and labels based on the color map
legend_handles = [Line2D([0], [0], color=color, lw=2) for iam, color in color_map.items()]
legend_labels = list(color_map.keys())

# Add the custom legend at the top with increased font size
fig.legend(legend_handles, legend_labels, loc='upper center', ncol=len(legend_labels), fontsize=16, title_fontsize=16, frameon=False, bbox_to_anchor=(0.5, 0.98))

# Save and show the plot
plt.savefig('product_impact_plot.svg')
plt.show()
C:\Users\js3700\AppData\Local\Temp\ipykernel_11748\3323716636.py:34: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
  results_df['Impact Score (%)'] = results_df.apply(calculate_percentage_change, axis=1)
No description has been provided for this image

Life-cycle GWP of individual technologies¶

Calculated individual with data processed in excel

In [199]:
iam = 'tiam-ucl'
scenario = 'RCP19'
region = 'CH'
year = '2050'
activity = 'nuclear, boiling water reactor'
premise_gwp = [m for m in bd.methods if 'IPCC 2021' in str(m) and 'climate change' in str(m) and 'GWP 100a, incl. H and bio CO2' in str(m)][0]

for database in list(bd.databases):
    if IAM in database and Scenario in database and year in database:
        selected_database = bd.Database(database)

activity = [x for x in selected_database if activity in x['name'] and region in x['location']]
activity
Out[199]:
['electricity production, nuclear, boiling water reactor' (kilowatt hour, CH, None)]
In [200]:
act = activity[0]
act
Out[200]:
'electricity production, nuclear, boiling water reactor' (kilowatt hour, CH, None)
In [201]:
def calculate_direct_impacts(activity, method):
    direct_score = 0
    biosphere_exchanges = []
    cf_data = bd.Method(method).load()
    cf_dict = {cf[0]: cf[1] for cf in cf_data}

    for exchange in activity.biosphere():
        flow_uuid = exchange.input.key
        amount = exchange['amount']
        cf = cf_dict.get(flow_uuid, 0)
        score = amount * cf
        direct_score += score
        biosphere_exchanges.append({'name': exchange.input['name'], 'amount': amount, 'score': score})

    return direct_score, biosphere_exchanges

act = activity[0]
FU = {act: 1}
lca = bc.LCA(FU, premise_gwp)
lca.lci()
lca.lcia()
total_score = lca.score

# Calculate direct impacts
direct_score, biosphere_exchanges = calculate_direct_impacts(act, premise_gwp)

# Calculate indirect impacts
indirect_score = total_score - direct_score

# Create the results DataFrame
results = {
    'Activity Name': [act['name']],
    'Total Score': [total_score],
    'Direct Score': [direct_score],
    'Indirect Score': [indirect_score]
}
results_df = pd.DataFrame(results)
results_df
Out[201]:
Activity Name Total Score Direct Score Indirect Score
0 electricity production, nuclear, boiling water... 0.003141 0.0 0.003141
In [235]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np

# Data based on the provided text
data = {
    'Name': [
        'Coal, conventional', 'Coal, conventional', 'Coal, supercritical', 'Coal, supercritical',
        'Coal, supercritical w CCS', 'Coal, supercritical w CCS', 'Natural gas, gas turbine',
        'Natural gas, gas turbine', 'Natural gas, combined cycle w CCS', 'Natural gas, combined cycle w CCS',
        'Biomass, combustion', 'Biomass, combustion', 'Biomass, combustion w CCS',
        'Biomass, combustion w CCS', 'Hydro, impoundment', 'Hydro, impoundment', 'Solar, photovoltaic',
        'Solar, photovoltaic', 'Wind, offshore', 'Wind, offshore', 'Nuclear, boiling water reactor',
        'Nuclear, boiling water reactor'
    ],
    'year': [
        2025, 2050, 2025, 2050, 2025, 2050, 2025, 2050, 2025, 2050, 2025, 2050, 2025, 2050, 2025, 2050, 2025, 2050,
        2025, 2050, 2025, 2050
    ],
    'direct': [
        0.94, 0.94, 0.52, 0.52, 0.05, 0.05, 0.46, 0.43, 0.04, 0.04, 1.11, 0.93, 0.20, 0.12, 0.00, 0.00, 0.00, 0.00, 0.00,
        0.00, 0.00, 0.00
    ],
    'indirect': [
        0.13, 0.10, 0.11, 0.09, 0.12, 0.08, 0.04, 0.03, 0.03, 0.02, -1.02, -0.90, -1.75, -1.07, 0.01, 0.00, 0.035, 0.016,
        0.015, 0.010, 0.006, 0.003
    ],
    'total': [
        1.07, 1.04, 0.64, 0.61, 0.18, 0.13, 0.50, 0.46, 0.08, 0.06, 0.09, 0.03, -1.55, -0.95, 0.01, 0.00, 0.035, 0.016,
        0.015, 0.010, 0.006, 0.003
    ]
}

df = pd.DataFrame(data)

# Define order for the plot
order = [
    'Coal, conventional', 'Coal, supercritical', 'Coal, supercritical w CCS', 'Natural gas, gas turbine', 
    'Natural gas, combined cycle w CCS', 'Biomass, combustion', 'Biomass, combustion w CCS', 
    'Hydro, impoundment', 'Solar, photovoltaic', 'Wind, offshore', 'Nuclear, boiling water reactor'
]

df['Name'] = pd.Categorical(df['Name'], categories=order, ordered=True)
df = df.sort_values('Name')

# Create pivot table for plotting
pivot_df = df.pivot(index='Name', columns='year', values=['direct', 'indirect', 'total'])
pivot_df.columns = [f'{col[0]}_{col[1]}' for col in pivot_df.columns]

# Plotting
fig, ax = plt.subplots(figsize=(9, 5))

# Define bar width and positions
bar_width = 0.35
index = np.arange(len(pivot_df))

# Define colors
colors = {
    'indirect_2025': '#843033',
    'direct_2025': '#A1565C',
    'indirect_2050': '#366F78',
    'direct_2050': '#51A5B2'
}

# Function to plot bars, accounting for specific biomass adjustments
def plot_bars(year, color_direct, color_indirect):
    for i, name in enumerate(pivot_df.index):
        if name in ['Biomass, combustion', 'Biomass, combustion w CCS']:
            ax.barh(i - bar_width/2 if year == 2025 else i + bar_width/2, pivot_df[f'indirect_{year}'][name], bar_width, label=f'Indirect {year}' if i == 0 else "", color=color_indirect)
            ax.barh(i - bar_width/2 if year == 2025 else i + bar_width/2, pivot_df[f'direct_{year}'][name], bar_width, label=f'Direct {year}' if i == 0 else "", color=color_direct)
        else:
            ax.barh(i - bar_width/2 if year == 2025 else i + bar_width/2, pivot_df[f'indirect_{year}'][name], bar_width, label=f'Indirect {year}' if i == 0 else "", color=color_indirect)
            ax.barh(i - bar_width/2 if year == 2025 else i + bar_width/2, pivot_df[f'direct_{year}'][name], bar_width, left=pivot_df[f'indirect_{year}'][name], label=f'Direct {year}' if i == 0 else "", color=color_direct)

# Plot bars
plot_bars(2025, colors['direct_2025'], colors['indirect_2025'])
plot_bars(2050, colors['direct_2050'], colors['indirect_2050'])

# Plot total score as points
ax.scatter(pivot_df['total_2025'], index - bar_width/2, marker='o', color='black', s=25, label='Total')
ax.scatter(pivot_df['total_2050'], index + bar_width/2, marker='o', color='black', s=25)

# Add labels, title, and legend
ax.set_xlabel('kg CO2e per kWh', fontsize=11)
ax.set_yticks(index)
ax.set_yticklabels(pivot_df.index, fontsize=12)
ax.tick_params(axis='x', labelsize=11)  # Increase x-axis tick label size

# Simplify the legend to avoid duplication
handles, labels = ax.get_legend_handles_labels()
by_label = dict(zip(labels, handles))
ax.legend(by_label.values(), by_label.keys(), fontsize=12, frameon=False)

ax.set_xlim(-1.8, 1.2)

# Add line through x=0
ax.axvline(x=0, color='black', linestyle='-', linewidth=1)
plt.tight_layout()
plt.savefig("plot.svg")
plt.show()
No description has been provided for this image

Electricity production contribution analyses¶

As above, calculated individually as below and further processed in excel manually

In [6]:
rename_dict = {
    'Biomass':[
        'electricity production, at biomass-fired IGCC power plant',
        'electricity production, at wood burning power plant',
        'heat and power co-generation, biogas, gas engine',
        'treatment of municipal solid waste, incineration',
        'heat and power co-generation, biogas, gas engine, renewable energy products',
        'heat and power co-generation, existing, wood chips',
        'heat and power co-generation, wood chips, 6667 kW, state-of-the-art 2014',
        'heat and power co-generation, wood chips, 6667 kW'
        
    ],

    'Biomass w CCS':[
        'electricity production, at biomass-fired IGCC power plant, pre, pipeline 200km, storage 1000m',
        'electricity production, at wood burning power plant, post, pipeline 200km, storage 1000m',
    ],

    'Coal':[
        'electricity production, at co-firing wood and coal power plant, 50-50',
        'electricity production, at co-firing wood and coal power plant, 80-20',
        'electricity production, at hard coal-fired IGCC power plant',
        'electricity production, at lignite-fired IGCC power plant',
        'electricity production, hard coal, supercritical',
        'electricity production, hard coal, ultra-supercritical',
        'electricity production, lignite',
        'electricity production, hard coal',
        'heat and power co-generation, hard coal',
        'heat and power co-generation, lignite',
        
    ],

    'Coal w CCS':[
        'electricity production, at hard coal-fired IGCC power plant, pre, pipeline 200km, storage 1000m',
        'electricity production, at hard coal-fired power plant, ultra-super critical, oxy, pipeline 200km, storage 1000m',
        'electricity production, at lignite-fired IGCC power plant, pre, pipeline 200km, storage 1000m',    
    ],

    'Gas w CCS':[
        'electricity production, at natural gas-fired combined cycle power plant, post, pipeline 200km, storage 1000m',
    ],

    'Gas':[
        'electricity production, natural gas, combined cycle power plant',
        'electricity production, natural gas, gas turbine, conventional power plant',
        'electricity production, natural gas, conventional power plant',
        'heat and power co-generation, natural gas, combined cycle power plant, 400MW electrical',
        'heat and power co-generation, natural gas, conventional power plant, 100MW electrical'
        
        
    ],

    'Geo':[
        'electricity production, deep geothermal',
    ],

    'Hydro':[
        'electricity production, hydro, run-of-river',
        'electricity production, wave energy converter',
        'electricity production, hydro, reservoir, alpine region',
        'electricity production, hydro, reservoir, non-alpine region',
    ],

    'Nuclear':[
        'electricity production, nuclear, boiling water reactor',
        'electricity production, nuclear, pressure water reactor',
        'electricity production, nuclear, pressure water reactor, heavy water moderated'
    ],

    'Oil':[
        'electricity production, oil',
        'heat and power co-generation, oil',
    ],

    'Solar':[
        'electricity production, photovoltaic, commercial',
        'electricity production, solar thermal parabolic trough, 50 MW',
        'electricity production, solar tower power plant, 20 MW',
    ],

    'Wind':[
        'electricity production, wind, 1-3MW turbine, offshore',
        'electricity production, wind, 1-3MW turbine, onshore',
        'electricity production, wind, <1MW turbine, onshore',
        'electricity production, wind, >3MW turbine, onshore',
    ],

    'Storage':[
        'electricity supply, high voltage, from vanadium-redox flow battery system',
    ],

    'Efficiency Loss':[
        'market group for electricity, high voltage',
    ]
}

scenario = 'SSP2-RCP19'
iam = 'tiam-ucl'
year = '2050'
region = 'WEU'
activity = 'market group for electricity, high voltage'

method = [m for m in bd.methods if 'EF v3.1' in str(m) and 'no LT' not in str(m)
           and 'EN15804' not in str(m)
           and 'climate change: biogenic' not in str(m)
           and 'climate change: fossil' not in str(m)
           and 'climate change: land use and land use change' not in str(m)
           and 'human toxicity: carcinogenic, inorganics' not in str(m)
           and 'human toxicity: carcinogenic, organics' not in str(m)
           and 'human toxicity: non-carcinogenic, inorganics' not in str(m)
           and 'human toxicity: non-carcinogenic, organics' not in str(m)
          ][:1][0]
#method = [m for m in bd.methods if 'IPCC 2021' in str(m) and 'climate change' in str(m) and 'GWP 100a, incl. H and bio CO2' in str(m)][0]

for database in list(bd.databases):
    if scenario in database and year in database and iam in database:
        selected_database = database

act = [x for x in bd.Database(selected_database) if activity in x['name'] and region in x['location'] and 'period' not in x['name']][0]

dfs = []

for x in act.technosphere():
    data = {'Amount': [x['amount']], 'Name': [x['name']], 'Location': [x['location']]}
    df = pd.DataFrame(data)
    dfs.append(df)

# Concatenate all DataFrames in the list
result_df = pd.concat(dfs, ignore_index=True)
grouped_df = result_df.groupby(['Name']).agg({'Amount': 'sum', 'Location': 'first'}).reset_index()

# Initialize an empty DataFrame to store results
results_df = pd.DataFrame(columns=['Name', 'Location', 'Total Score'])

# Start timing the process
start_time = time.time()

# Loop through the DataFrame with a progress bar
for index, row in tqdm(grouped_df.iterrows(), total=len(grouped_df), desc="Processing", unit=" row"):
    name = row['Name']
    location = row['Location']
    amount = row['Amount']

    # Fetch the activity from the Brightway database
    activity = [x for x in bd.Database(selected_database) 
                if name in x['name'] and location in x['location'] and 'period' not in x['name']][0]
    
    # Define the functional unit
    FU = {activity: amount}
    
    # Perform the LCA calculation
    lca = bc.LCA(FU, method)
    lca.lci()
    lca.lcia()
    
    # Store the results in the results DataFrame
    results_df = pd.concat([results_df, pd.DataFrame({'Name': [name], 'Location': [location], 'Total Score': [lca.score]})], ignore_index=True)

# End timing the process
end_time = time.time()
print("Time taken:", end_time - start_time)

x = results_df.copy()

# Replace names using the rename_dict
for key, values in rename_dict.items():
    for value in values:
        x['Name'] = x['Name'].replace({value: key})

# Calculate total score excluding 'Efficiency Loss'
total_score_without_efficiency_loss = x.loc[x['Name'] != 'Efficiency Loss', 'Total Score'].sum()

# Calculate the total score for 'Efficiency Loss'
efficiency_loss_score = x.loc[x['Name'] == 'Efficiency Loss', 'Total Score'].sum()

# Drop the rows where 'Name' is 'Efficiency Loss'
x = x[x['Name'] != 'Efficiency Loss']

# Distribute the 'Efficiency Loss' score proportionally
x['Total Score'] += (x['Total Score'] / total_score_without_efficiency_loss) * efficiency_loss_score

# Group by 'Name' and sum the 'Total Score' values
grouped_scores = x.groupby('Name')['Total Score'].sum().reset_index()
grouped_scores
Processing:   0%|                                                                             | 0/33 [00:00<?, ? row/s]C:\Users\js3700\AppData\Local\Temp\ipykernel_14644\298560873.py:157: FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.
  results_df = pd.concat([results_df, pd.DataFrame({'Name': [name], 'Location': [location], 'Total Score': [lca.score]})], ignore_index=True)
Processing: 100%|████████████████████████████████████████████████████████████████████| 33/33 [05:57<00:00, 10.84s/ row]
Time taken: 357.6148691177368

Out[6]:
Name Total Score
0 Biomass -1.305442e-14
1 Biomass w CCS 6.057236e-13
2 Coal 3.432963e-07
3 Coal w CCS 4.158983e-07
4 Gas 1.279965e-15
5 Gas w CCS 7.918286e-15
6 Geo 1.724812e-06
7 Hydro 1.318494e-06
8 Nuclear 6.388180e-06
9 Oil 2.617781e-14
10 Solar 3.005792e-05
11 Storage 2.593631e-05
12 Wind 4.561243e-05

Plot: Normalization¶

In [103]:
scenario = 'No climate scenario'
iam = 'tiam-ucl'

# Load the Excel file
excel_file = "WP2_pLCA_electricity.xlsx"
results_df = pd.read_excel(excel_file, sheet_name="Results")
nf_df = pd.read_excel(excel_file, sheet_name="NF")

# Merge the Results and NF dataframes on the "Method" column
merged_df = pd.merge(results_df, nf_df, on="Method", how="left")

# Check if the merge was successful
if "Normalization Factor" not in merged_df.columns:
    raise ValueError("Normalization Factor column not found in NF sheet or Method column doesn't match between Results and NF sheets.")

# Filter data for the specified scenario and IAM
scenario_df = merged_df[(merged_df['Scenario'] == scenario) & (merged_df['IAM'] == iam)]

# Calculate the Impact Score divided by the Normalization Factor
scenario_df["Normalized Impact Score"] = scenario_df["Impact Score"] / scenario_df["Normalization Factor"]

# Save the processed results to an Excel file
scenario_df.to_excel(f'processed_results.xlsx', index=False)

# Pivot the data to create a heatmap
heatmap_df = scenario_df.pivot_table(index='Method', columns='Year', values='Normalized Impact Score', aggfunc='mean')

# Sort rows (methods) based on their values for the year 2025 in descending order
heatmap_df = heatmap_df.sort_values(by=2025, ascending=False)

# Plot the heatmap without annotations
plt.figure(figsize=(3, 4))  # Adjusted figure size
ax = sns.heatmap(heatmap_df, cmap='rocket_r', annot=False, linewidths=0.5)

# Set scientific notation for color bar
colorbar = ax.collections[0].colorbar
colorbar.set_ticklabels([f'{x:.1e}' for x in colorbar.get_ticks()])

plt.title(scenario)
plt.xlabel('')
plt.ylabel('')
plt.savefig(f'heatmap.svg', bbox_inches='tight')  # Save the figure with adjusted bounding box
plt.show()
C:\Users\js3700\AppData\Local\Temp\ipykernel_24964\4252288701.py:24: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
  scenario_df["Normalized Impact Score"] = scenario_df["Impact Score"] / scenario_df["Normalization Factor"]
C:\Users\js3700\AppData\Local\Temp\ipykernel_24964\4252288701.py:38: UserWarning: set_ticklabels() should only be used with a fixed number of ticks, i.e. after set_ticks() or using a FixedLocator.
  colorbar.set_ticklabels([f'{x:.1e}' for x in colorbar.get_ticks()])
No description has been provided for this image