vdwow commited on
Commit
8a56d57
·
1 Parent(s): 291e05e

fix: linting + text mistakes

Browse files
app.py CHANGED
@@ -8,21 +8,17 @@ from src.content import (
8
  LICENCE_TEXT,
9
  INTRO_TEXT,
10
  METHODOLOGY_TEXT,
11
- SUPPORT_TEXT
12
  )
13
 
14
  from src.expert import expert_mode
15
  from src.calculator import calculator_mode
16
  from src.token_estimator import token_estimator
17
 
18
- st.set_page_config(
19
- layout="wide",
20
- page_title="ECOLOGITS",
21
- page_icon='💬'
22
- )
23
 
24
- with open( "src/style.css" ) as css:
25
- st.markdown( f'<style>{css.read()}</style>' , unsafe_allow_html= True)
26
 
27
  st.html(HERO_TEXT)
28
 
@@ -30,41 +26,36 @@ st.markdown(INTRO_TEXT, unsafe_allow_html=True)
30
 
31
  tab_calculator, tab_expert, tab_token, tab_method, tab_about, tab_support = st.tabs(
32
  [
33
- '🧮 Calculator',
34
- '🤓 Expert Mode',
35
- '🪙 Tokens estimator',
36
- '📖 Methodology',
37
- 'ℹ️ About',
38
- '🩷 Support us'
39
  ]
40
  )
41
 
42
  with tab_calculator:
43
-
44
  calculator_mode()
45
 
46
  with tab_expert:
47
-
48
  expert_mode()
49
-
50
  with tab_token:
51
-
52
  token_estimator()
53
 
54
  with tab_method:
55
-
56
  st.write(METHODOLOGY_TEXT)
57
 
58
  with tab_about:
59
-
60
  st.markdown(ABOUT_TEXT, unsafe_allow_html=True)
61
 
62
  with tab_support:
63
  st.markdown(SUPPORT_TEXT, unsafe_allow_html=True)
64
 
65
 
66
- with st.expander('📚 Citation'):
67
  st.html(CITATION_LABEL)
68
  st.html(CITATION_TEXT)
69
 
70
- st.html(LICENCE_TEXT)
 
8
  LICENCE_TEXT,
9
  INTRO_TEXT,
10
  METHODOLOGY_TEXT,
11
+ SUPPORT_TEXT,
12
  )
13
 
14
  from src.expert import expert_mode
15
  from src.calculator import calculator_mode
16
  from src.token_estimator import token_estimator
17
 
18
+ st.set_page_config(layout="wide", page_title="ECOLOGITS", page_icon="💬")
 
 
 
 
19
 
20
+ with open("src/style.css") as css:
21
+ st.markdown(f"<style>{css.read()}</style>", unsafe_allow_html=True)
22
 
23
  st.html(HERO_TEXT)
24
 
 
26
 
27
  tab_calculator, tab_expert, tab_token, tab_method, tab_about, tab_support = st.tabs(
28
  [
29
+ "🧮 Calculator",
30
+ "🤓 Expert Mode",
31
+ "🪙 Tokens estimator",
32
+ "📖 Methodology",
33
+ "ℹ️ About",
34
+ "🩷 Support us",
35
  ]
36
  )
37
 
38
  with tab_calculator:
 
39
  calculator_mode()
40
 
41
  with tab_expert:
 
42
  expert_mode()
43
+
44
  with tab_token:
 
45
  token_estimator()
46
 
47
  with tab_method:
 
48
  st.write(METHODOLOGY_TEXT)
49
 
50
  with tab_about:
 
51
  st.markdown(ABOUT_TEXT, unsafe_allow_html=True)
52
 
53
  with tab_support:
54
  st.markdown(SUPPORT_TEXT, unsafe_allow_html=True)
55
 
56
 
57
+ with st.expander("📚 Citation"):
58
  st.html(CITATION_LABEL)
59
  st.html(CITATION_TEXT)
60
 
61
+ st.html(LICENCE_TEXT)
src/__init__.py CHANGED
@@ -6,4 +6,4 @@ from .utils import *
6
  from .calculator import calculator_mode
7
  from .impacts import get_impacts, display_impacts
8
  from .models import load_models
9
- from .electricity_mix import *
 
6
  from .calculator import calculator_mode
7
  from .impacts import get_impacts, display_impacts
8
  from .models import load_models
9
+ from .electricity_mix import *
src/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (522 Bytes). View file
 
src/__pycache__/calculator.cpython-313.pyc ADDED
Binary file (4.73 kB). View file
 
src/__pycache__/constants.cpython-313.pyc ADDED
Binary file (2.51 kB). View file
 
src/__pycache__/content.cpython-313.pyc ADDED
Binary file (20 kB). View file
 
src/__pycache__/electricity_mix.cpython-313.pyc ADDED
Binary file (5.74 kB). View file
 
src/__pycache__/expert.cpython-313.pyc ADDED
Binary file (10.1 kB). View file
 
src/__pycache__/impacts.cpython-313.pyc ADDED
Binary file (7.93 kB). View file
 
src/__pycache__/models.cpython-313.pyc ADDED
Binary file (4.47 kB). View file
 
src/__pycache__/token_estimator.cpython-313.pyc ADDED
Binary file (1.91 kB). View file
 
src/__pycache__/utils.cpython-313.pyc ADDED
Binary file (10.9 kB). View file
 
src/calculator.py CHANGED
@@ -1,72 +1,101 @@
1
  import streamlit as st
2
 
3
  from ecologits.tracers.utils import llm_impacts
4
- from src.impacts import get_impacts, display_impacts, display_equivalent
5
  from src.utils import format_impacts
6
  from src.content import WARNING_CLOSED_SOURCE, WARNING_MULTI_MODAL, WARNING_BOTH
7
  from src.models import load_models
8
 
9
  from src.constants import PROMPTS
10
 
11
- def calculator_mode():
12
 
 
13
  with st.container(border=True):
14
-
15
  df = load_models(filter_main=True)
16
-
17
  col1, col2, col3 = st.columns(3)
18
 
19
  with col1:
20
  provider = st.selectbox(
21
- label = 'Provider',
22
- options = [x for x in df['provider_clean'].unique()],
23
- index = 7
24
  )
25
 
26
  with col2:
27
  model = st.selectbox(
28
- label = 'Model',
29
- options = [x for x in df['name_clean'].unique() if x in df[df['provider_clean'] == provider]['name_clean'].unique()]
 
 
 
 
30
  )
31
 
32
  with col3:
33
- output_tokens = st.selectbox('Example prompt', [x[0] for x in PROMPTS])
34
-
35
  # WARNING DISPLAY
36
- provider_raw = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]['provider'].values[0]
37
- model_raw = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]['name'].values[0]
 
 
 
 
38
 
39
- df_filtered = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]
 
 
40
 
41
- if df_filtered['warning_arch'].values[0] and not df_filtered['warning_multi_modal'].values[0]:
 
 
 
42
  st.warning(WARNING_CLOSED_SOURCE)
43
- if df_filtered['warning_multi_modal'].values[0] and not df_filtered['warning_arch'].values[0]:
 
 
 
44
  st.warning(WARNING_MULTI_MODAL)
45
- if df_filtered['warning_arch'].values[0] and df_filtered['warning_multi_modal'].values[0]:
 
 
 
46
  st.warning(WARNING_BOTH)
47
-
48
- try:
49
 
 
50
  impacts = llm_impacts(
51
- provider=provider_raw,
52
- model_name=model_raw,
53
- output_token_count=[x[1] for x in PROMPTS if x[0] == output_tokens][0],
54
- request_latency=100000
55
- )
56
 
57
  impacts, _, _ = format_impacts(impacts)
58
-
59
- with st.container(border=True):
60
 
61
- st.markdown('<h3 align = "center">Environmental impacts</h3>', unsafe_allow_html=True)
62
- st.markdown('<p align = "center">To understand how the environmental impacts are computed go to the 📖 Methodology tab.</p>', unsafe_allow_html=True)
 
 
 
 
 
 
 
63
  display_impacts(impacts)
64
-
65
  with st.container(border=True):
66
-
67
- st.markdown('<h3 align = "center">That\'s equivalent to ...</h3>', unsafe_allow_html=True)
68
- st.markdown('<p align = "center">Making this request to the LLM is equivalent to the following actions :</p>', unsafe_allow_html=True)
 
 
 
 
 
69
  display_equivalent(impacts)
70
-
71
- except Exception as e:
72
- st.error('Could not find the model in the repository. Please try another model.')
 
 
 
1
  import streamlit as st
2
 
3
  from ecologits.tracers.utils import llm_impacts
4
+ from src.impacts import display_impacts, display_equivalent
5
  from src.utils import format_impacts
6
  from src.content import WARNING_CLOSED_SOURCE, WARNING_MULTI_MODAL, WARNING_BOTH
7
  from src.models import load_models
8
 
9
  from src.constants import PROMPTS
10
 
 
11
 
12
+ def calculator_mode():
13
  with st.container(border=True):
 
14
  df = load_models(filter_main=True)
15
+
16
  col1, col2, col3 = st.columns(3)
17
 
18
  with col1:
19
  provider = st.selectbox(
20
+ label="Provider",
21
+ options=[x for x in df["provider_clean"].unique()],
22
+ index=7,
23
  )
24
 
25
  with col2:
26
  model = st.selectbox(
27
+ label="Model",
28
+ options=[
29
+ x
30
+ for x in df["name_clean"].unique()
31
+ if x in df[df["provider_clean"] == provider]["name_clean"].unique()
32
+ ],
33
  )
34
 
35
  with col3:
36
+ output_tokens = st.selectbox("Example prompt", [x[0] for x in PROMPTS])
37
+
38
  # WARNING DISPLAY
39
+ provider_raw = df[
40
+ (df["provider_clean"] == provider) & (df["name_clean"] == model)
41
+ ]["provider"].values[0]
42
+ model_raw = df[
43
+ (df["provider_clean"] == provider) & (df["name_clean"] == model)
44
+ ]["name"].values[0]
45
 
46
+ df_filtered = df[
47
+ (df["provider_clean"] == provider) & (df["name_clean"] == model)
48
+ ]
49
 
50
+ if (
51
+ df_filtered["warning_arch"].values[0]
52
+ and not df_filtered["warning_multi_modal"].values[0]
53
+ ):
54
  st.warning(WARNING_CLOSED_SOURCE)
55
+ if (
56
+ df_filtered["warning_multi_modal"].values[0]
57
+ and not df_filtered["warning_arch"].values[0]
58
+ ):
59
  st.warning(WARNING_MULTI_MODAL)
60
+ if (
61
+ df_filtered["warning_arch"].values[0]
62
+ and df_filtered["warning_multi_modal"].values[0]
63
+ ):
64
  st.warning(WARNING_BOTH)
 
 
65
 
66
+ try:
67
  impacts = llm_impacts(
68
+ provider=provider_raw,
69
+ model_name=model_raw,
70
+ output_token_count=[x[1] for x in PROMPTS if x[0] == output_tokens][0],
71
+ request_latency=100000,
72
+ )
73
 
74
  impacts, _, _ = format_impacts(impacts)
 
 
75
 
76
+ with st.container(border=True):
77
+ st.markdown(
78
+ '<h3 align = "center">Environmental impacts</h3>',
79
+ unsafe_allow_html=True,
80
+ )
81
+ st.markdown(
82
+ '<p align = "center">To understand how the environmental impacts are computed go to the 📖 Methodology tab.</p>',
83
+ unsafe_allow_html=True,
84
+ )
85
  display_impacts(impacts)
86
+
87
  with st.container(border=True):
88
+ st.markdown(
89
+ '<h3 align = "center">That\'s equivalent to ...</h3>',
90
+ unsafe_allow_html=True,
91
+ )
92
+ st.markdown(
93
+ '<p align = "center">Making this request to the LLM is equivalent to the following actions :</p>',
94
+ unsafe_allow_html=True,
95
+ )
96
  display_equivalent(impacts)
97
+
98
+ except Exception:
99
+ st.error(
100
+ "Could not find the model in the repository. Please try another model."
101
+ )
src/constants.py CHANGED
@@ -4,99 +4,108 @@ PROMPTS = [
4
  ("Write an article summary", 250),
5
  ("Small conversation with a chatbot", 400),
6
  ("Write a report of 5 pages", 5000),
7
- ("Write the code for this app", 15000)
8
  ]
9
  PROMPTS = [(s + f" ({v} output tokens)", v) for (s, v) in PROMPTS]
10
 
11
  MODEL_REPOSITORY_URL = "https://raw.githubusercontent.com/genai-impact/ecologits/refs/heads/main/ecologits/data/models.json"
12
 
13
  main_models_openai = [
14
- 'chatgpt-4o-latest',
15
- 'gpt-3.5-turbo',
16
- 'gpt-4',
17
- 'gpt-4-turbo',
18
- 'gpt-4o',
19
- 'gpt-4o-mini',
20
- 'o1',
21
- 'o1-mini'
22
  ]
23
 
24
  main_models_meta = [
25
- 'meta-llama/Meta-Llama-3.1-8B',
26
- 'meta-llama/Meta-Llama-3.1-70B',
27
- 'meta-llama/Meta-Llama-3.1-405B',
28
- 'meta-llama/Meta-Llama-3-8B',
29
- 'meta-llama/Meta-Llama-3-70B',
30
- 'meta-llama/Meta-Llama-3-70B',
31
- 'meta-llama/Llama-2-7b',
32
- 'meta-llama/Llama-2-13b',
33
- 'meta-llama/Llama-2-70b',
34
- 'meta-llama/CodeLlama-7b-hf',
35
- 'meta-llama/CodeLlama-13b-hf',
36
- 'meta-llama/CodeLlama-34b-hf',
37
- 'meta-llama/CodeLlama-70b-hf'
38
  ]
39
 
40
  main_models_msft = [
41
- 'microsoft/phi-1',
42
- 'microsoft/phi-1_5',
43
- 'microsoft/Phi-3-mini-128k-instruct',
44
- 'microsoft/Phi-3-small-128k-instruct',
45
- 'microsoft/Phi-3-medium-128k-instruct',
46
  ]
47
 
48
  main_models_anthropic = [
49
- 'claude-2.0',
50
- 'claude-2.1',
51
- 'claude-3-5-haiku-latest',
52
- 'claude-3-5-sonnet-latest',
53
- 'claude-3-7-sonnet-latest',
54
- 'claude-3-haiku-20240307',
55
- 'claude-3-opus-latest',
56
- 'claude-3-sonnet-20240229'
57
  ]
58
 
59
  main_models_cohere = [
60
- 'c4ai-aya-expanse-8b',
61
- 'c4ai-aya-expanse-32b',
62
- 'command',
63
- 'command-light',
64
- 'command-r',
65
- 'command-r-plus',
66
  ]
67
 
68
  main_models_google = [
69
- 'google/gemma-2-2b',
70
- 'google/gemma-2-9b',
71
- 'google/gemma-2-27b',
72
- 'google/codegemma-2b',
73
- 'google/codegemma-7b',
74
- 'gemini-1.0-pro',
75
- 'gemini-1.5-pro',
76
- 'gemini-1.5-flash',
77
- 'gemini-2.0-flash'
78
  ]
79
 
80
  main_models_databricks = [
81
- 'databricks/dolly-v1-6b',
82
- 'databricks/dolly-v2-12b',
83
- 'databricks/dolly-v2-7b',
84
- 'databricks/dolly-v2-3b',
85
- 'databricks/dbrx-base'
86
  ]
87
 
88
  main_models_mistral = [
89
- 'mistralai/Mistral-7B-v0.3',
90
- 'mistralai/Mixtral-8x7B-v0.1',
91
- 'mistralai/Mixtral-8x22B-v0.1',
92
- 'mistralai/Codestral-22B-v0.1',
93
- 'mistralai/Mathstral-7B-v0.1',
94
- 'ministral-3b-latest',
95
- 'ministral-8b-latest',
96
- 'mistral-tiny',
97
- 'mistral-small',
98
- 'mistral-medium',
99
- 'mistral-large-latest'
100
  ]
101
 
102
- MAIN_MODELS = main_models_meta + main_models_openai + main_models_anthropic + main_models_cohere + main_models_msft + main_models_mistral + main_models_databricks + main_models_google
 
 
 
 
 
 
 
 
 
 
4
  ("Write an article summary", 250),
5
  ("Small conversation with a chatbot", 400),
6
  ("Write a report of 5 pages", 5000),
7
+ ("Write the code for this app", 15000),
8
  ]
9
  PROMPTS = [(s + f" ({v} output tokens)", v) for (s, v) in PROMPTS]
10
 
11
  MODEL_REPOSITORY_URL = "https://raw.githubusercontent.com/genai-impact/ecologits/refs/heads/main/ecologits/data/models.json"
12
 
13
  main_models_openai = [
14
+ "chatgpt-4o-latest",
15
+ "gpt-3.5-turbo",
16
+ "gpt-4",
17
+ "gpt-4-turbo",
18
+ "gpt-4o",
19
+ "gpt-4o-mini",
20
+ "o1",
21
+ "o1-mini",
22
  ]
23
 
24
  main_models_meta = [
25
+ "meta-llama/Meta-Llama-3.1-8B",
26
+ "meta-llama/Meta-Llama-3.1-70B",
27
+ "meta-llama/Meta-Llama-3.1-405B",
28
+ "meta-llama/Meta-Llama-3-8B",
29
+ "meta-llama/Meta-Llama-3-70B",
30
+ "meta-llama/Meta-Llama-3-70B",
31
+ "meta-llama/Llama-2-7b",
32
+ "meta-llama/Llama-2-13b",
33
+ "meta-llama/Llama-2-70b",
34
+ "meta-llama/CodeLlama-7b-hf",
35
+ "meta-llama/CodeLlama-13b-hf",
36
+ "meta-llama/CodeLlama-34b-hf",
37
+ "meta-llama/CodeLlama-70b-hf",
38
  ]
39
 
40
  main_models_msft = [
41
+ "microsoft/phi-1",
42
+ "microsoft/phi-1_5",
43
+ "microsoft/Phi-3-mini-128k-instruct",
44
+ "microsoft/Phi-3-small-128k-instruct",
45
+ "microsoft/Phi-3-medium-128k-instruct",
46
  ]
47
 
48
  main_models_anthropic = [
49
+ "claude-2.0",
50
+ "claude-2.1",
51
+ "claude-3-5-haiku-latest",
52
+ "claude-3-5-sonnet-latest",
53
+ "claude-3-7-sonnet-latest",
54
+ "claude-3-haiku-20240307",
55
+ "claude-3-opus-latest",
56
+ "claude-3-sonnet-20240229",
57
  ]
58
 
59
  main_models_cohere = [
60
+ "c4ai-aya-expanse-8b",
61
+ "c4ai-aya-expanse-32b",
62
+ "command",
63
+ "command-light",
64
+ "command-r",
65
+ "command-r-plus",
66
  ]
67
 
68
  main_models_google = [
69
+ "google/gemma-2-2b",
70
+ "google/gemma-2-9b",
71
+ "google/gemma-2-27b",
72
+ "google/codegemma-2b",
73
+ "google/codegemma-7b",
74
+ "gemini-1.0-pro",
75
+ "gemini-1.5-pro",
76
+ "gemini-1.5-flash",
77
+ "gemini-2.0-flash",
78
  ]
79
 
80
  main_models_databricks = [
81
+ "databricks/dolly-v1-6b",
82
+ "databricks/dolly-v2-12b",
83
+ "databricks/dolly-v2-7b",
84
+ "databricks/dolly-v2-3b",
85
+ "databricks/dbrx-base",
86
  ]
87
 
88
  main_models_mistral = [
89
+ "mistralai/Mistral-7B-v0.3",
90
+ "mistralai/Mixtral-8x7B-v0.1",
91
+ "mistralai/Mixtral-8x22B-v0.1",
92
+ "mistralai/Codestral-22B-v0.1",
93
+ "mistralai/Mathstral-7B-v0.1",
94
+ "ministral-3b-latest",
95
+ "ministral-8b-latest",
96
+ "mistral-tiny",
97
+ "mistral-small",
98
+ "mistral-medium",
99
+ "mistral-large-latest",
100
  ]
101
 
102
+ MAIN_MODELS = (
103
+ main_models_meta
104
+ + main_models_openai
105
+ + main_models_anthropic
106
+ + main_models_cohere
107
+ + main_models_msft
108
+ + main_models_mistral
109
+ + main_models_databricks
110
+ + main_models_google
111
+ )
src/content.py CHANGED
@@ -18,7 +18,7 @@ HERO_TEXT = """
18
  """
19
 
20
  INTRO_TEXT = """
21
- <div style="background-color:#d4edda; padding:5px; border-radius:10px; color:#155724;">
22
  <p>
23
  EcoLogits Calculator is developed and maintained by
24
  <a href = "https://genai-impact.org/" > GenAI Impact </a> non-profit.
@@ -76,7 +76,7 @@ ABOUT_TEXT = r"""
76
  The rapid evolution of generative AI is reshaping numerous industries and aspects of our daily lives. While these
77
  advancements offer some benefits, they also **pose substantial environmental challenges that cannot be overlooked**.
78
  Plus the issue of AI's environmental footprint has been mainly discussed at training stage but rarely at the inference
79
- stage. That is an issue because **inference impacts for LLMs can largely overcome the training impacts when deployed
80
  at large scales**.
81
  At **[GenAI Impact](https://genai-impact.org/) we are dedicated to understanding and mitigating the environmental
82
  impacts of generative AI** through rigorous research, innovative tools, and community engagement. Especially, in early
@@ -134,15 +134,17 @@ We also welcome any open-source contributions on 🌱 **[EcoLogits](https://gith
134
  <a href="https://creativecommons.org/licenses/by-sa/4.0/?ref=chooser-v1" target="_blank" rel="license noopener noreferrer" style="display:inline-block;">
135
  CC BY-SA 4.0
136
  </a>
137
- <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/cc.svg?ref=chooser-v1" alt="">
138
- <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/by.svg?ref=chooser-v1" alt="">
139
- <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/sa.svg?ref=chooser-v1" alt="">
 
 
140
  </p>
141
 
142
  ## 🙌 Acknowledgement
143
- We thank [Data For Good](https://dataforgood.fr/) and [Boavizta](https://boavizta.org/en) for supporting this project.
144
- Their contributions of tools, best practices, and expertise in environmental impact
145
- assessment have been extremely valuable.
146
  We also extend our gratitude to the open-source contributions of 🤗 [Hugging Face](huggingface.com) on the LLM-Perf
147
  Leaderboard.
148
 
@@ -150,6 +152,7 @@ Leaderboard.
150
  For general question on the project, please use the [GitHub thread](https://github.com/genai-impact/ecologits/discussions/45).
151
  Otherwise use our contact form on [genai-impact.org/contact](https://genai-impact.org/contact/).
152
  """
 
153
  SUPPORT_TEXT = r"""
154
  ## How to support
155
  At GenAI Impact, our projects are powered by the passion and dedication of our team.
@@ -262,7 +265,7 @@ We multiply that value by the GHG emissions of the request to get an equivalent
262
  These equivalents are computed based on the request impacts scaled to a worldwide adoption use case. We imply that the
263
  same request is done 1% of the planet everyday for 1 year, and then compute impact equivalents.
264
  $$
265
- I_{scaled} = I_{request} * [1 \\% \ \text{of}\ 8B\ \text{people on earth}] * 365\ \text{days}
266
  $$
267
  #### Number of 💨 wind turbines or ☢️ nuclear plants
268
  We compare the ⚡️ direct energy consumption (scaled) by the energy production of wind turbines and nuclear power
@@ -307,4 +310,4 @@ LICENCE_TEXT = """<p xmlns:cc="http://creativecommons.org/ns#" >
307
  <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/cc.svg?ref=chooser-v1" alt="">
308
  <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/by.svg?ref=chooser-v1" alt="">
309
  <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/sa.svg?ref=chooser-v1" alt="">
310
- </p>"""
 
18
  """
19
 
20
  INTRO_TEXT = """
21
+ <div align = "center"; style="background-color:#d4edda; padding:10px; border-radius:10px; color:#155724;">
22
  <p>
23
  EcoLogits Calculator is developed and maintained by
24
  <a href = "https://genai-impact.org/" > GenAI Impact </a> non-profit.
 
76
  The rapid evolution of generative AI is reshaping numerous industries and aspects of our daily lives. While these
77
  advancements offer some benefits, they also **pose substantial environmental challenges that cannot be overlooked**.
78
  Plus the issue of AI's environmental footprint has been mainly discussed at training stage but rarely at the inference
79
+ stage. That is an issue because **inference impacts for large langauge models (LLMs) can largely overcome the training impacts when deployed
80
  at large scales**.
81
  At **[GenAI Impact](https://genai-impact.org/) we are dedicated to understanding and mitigating the environmental
82
  impacts of generative AI** through rigorous research, innovative tools, and community engagement. Especially, in early
 
134
  <a href="https://creativecommons.org/licenses/by-sa/4.0/?ref=chooser-v1" target="_blank" rel="license noopener noreferrer" style="display:inline-block;">
135
  CC BY-SA 4.0
136
  </a>
137
+ <br>
138
+ <br>
139
+ <img style="display:inline-block;height:5px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/cc.svg?ref=chooser-v1" alt="">
140
+ <img style="display:inline-block;height:5px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/by.svg?ref=chooser-v1" alt="">
141
+ <img style="display:inline-block;height:5px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/sa.svg?ref=chooser-v1" alt="">
142
  </p>
143
 
144
  ## 🙌 Acknowledgement
145
+ We thank [Data For Good](https://dataforgood.fr/) and [Boavizta](https://boavizta.org/en) for supporting the
146
+ development of this project. Their contributions of tools, best practices, and expertise in environmental impact
147
+ assessment have been invaluable.
148
  We also extend our gratitude to the open-source contributions of 🤗 [Hugging Face](huggingface.com) on the LLM-Perf
149
  Leaderboard.
150
 
 
152
  For general question on the project, please use the [GitHub thread](https://github.com/genai-impact/ecologits/discussions/45).
153
  Otherwise use our contact form on [genai-impact.org/contact](https://genai-impact.org/contact/).
154
  """
155
+
156
  SUPPORT_TEXT = r"""
157
  ## How to support
158
  At GenAI Impact, our projects are powered by the passion and dedication of our team.
 
265
  These equivalents are computed based on the request impacts scaled to a worldwide adoption use case. We imply that the
266
  same request is done 1% of the planet everyday for 1 year, and then compute impact equivalents.
267
  $$
268
+ I_{scaled} = I_{request} * [1 \% \ \text{of}\ 8B\ \text{people on earth}] * 365\ \text{days}
269
  $$
270
  #### Number of 💨 wind turbines or ☢️ nuclear plants
271
  We compare the ⚡️ direct energy consumption (scaled) by the energy production of wind turbines and nuclear power
 
310
  <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/cc.svg?ref=chooser-v1" alt="">
311
  <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/by.svg?ref=chooser-v1" alt="">
312
  <img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/sa.svg?ref=chooser-v1" alt="">
313
+ </p>"""
src/electricity_mix.py CHANGED
@@ -142,7 +142,7 @@ COUNTRY_CODES = [
142
  ("🇦 Netherlands Antilles", "ANT"),
143
  ("🇦🇲 Armenia", "ARM"),
144
  ("🇦🇱 Albania", "ALB"),
145
- ("🇦🇪 United Arab Emirates", "ARE")
146
  ]
147
 
148
 
@@ -156,20 +156,19 @@ def find_electricity_mix(code: str):
156
  res += [float(row[code])]
157
  return res
158
 
159
- def dataframe_electricity_mix(countries: list):
160
 
161
- df = pd.read_csv('src/data/electricity_mix.csv')
162
- df['name_unit'] = df['name'] + ' (' + df['unit'] + ')'
163
- df = df[['name_unit'] + [x[1] for x in COUNTRY_CODES if x[0] in countries]]
 
164
 
165
  df_melted = df.melt(
166
- id_vars=['name_unit'],
167
  value_vars=[x[1] for x in COUNTRY_CODES if x[0] in countries],
168
- var_name='country',
169
- value_name='value')
 
170
 
171
- df = df_melted.pivot(columns='name_unit',
172
- index='country',
173
- values='value')
174
 
175
- return df
 
142
  ("🇦 Netherlands Antilles", "ANT"),
143
  ("🇦🇲 Armenia", "ARM"),
144
  ("🇦🇱 Albania", "ALB"),
145
+ ("🇦🇪 United Arab Emirates", "ARE"),
146
  ]
147
 
148
 
 
156
  res += [float(row[code])]
157
  return res
158
 
 
159
 
160
+ def dataframe_electricity_mix(countries: list):
161
+ df = pd.read_csv("src/data/electricity_mix.csv")
162
+ df["name_unit"] = df["name"] + " (" + df["unit"] + ")"
163
+ df = df[["name_unit"] + [x[1] for x in COUNTRY_CODES if x[0] in countries]]
164
 
165
  df_melted = df.melt(
166
+ id_vars=["name_unit"],
167
  value_vars=[x[1] for x in COUNTRY_CODES if x[0] in countries],
168
+ var_name="country",
169
+ value_name="value",
170
+ )
171
 
172
+ df = df_melted.pivot(columns="name_unit", index="country", values="value")
 
 
173
 
174
+ return df
src/expert.py CHANGED
@@ -3,125 +3,176 @@ from ecologits.impacts.llm import compute_llm_impacts
3
 
4
  from src.utils import format_impacts, average_range_impacts
5
  from src.impacts import display_impacts
6
- from src.electricity_mix import COUNTRY_CODES, find_electricity_mix, dataframe_electricity_mix
 
 
 
 
7
  from src.models import load_models
8
  from src.constants import PROMPTS
9
 
10
  import plotly.express as px
11
 
 
12
  def reset_model():
13
- model = 'CUSTOM'
14
 
15
- def expert_mode():
16
 
 
17
  st.markdown("### 🤓 Expert mode")
18
 
19
- with st.container(border = True):
20
-
21
  ########## Model info ##########
22
 
23
  col1, col2, col3 = st.columns(3)
24
-
25
  df = load_models(filter_main=True)
26
 
27
  with col1:
28
  provider_exp = st.selectbox(
29
- label = 'Provider',
30
- options = [x for x in df['provider_clean'].unique()],
31
- index = 7,
32
- key = 1
33
  )
34
 
35
  with col2:
36
  model_exp = st.selectbox(
37
- label = 'Model',
38
- options = [x for x in df['name_clean'].unique() if x in df[df['provider_clean'] == provider_exp]['name_clean'].unique()],
39
- key = 2
 
 
 
 
 
40
  )
41
 
42
  with col3:
43
  output_tokens_exp = st.selectbox(
44
- label = 'Example prompt',
45
- options = [x[0] for x in PROMPTS],
46
- key = 3
47
  )
48
-
49
- df_filtered = df[(df['provider_clean'] == provider_exp) & (df['name_clean'] == model_exp)]
 
 
50
 
51
  try:
52
- total_params = int(df_filtered['total_parameters'].iloc[0])
53
  except:
54
- total_params = int((df_filtered['total_parameters'].values[0]['min'] + df_filtered['total_parameters'].values[0]['max'])/2)
55
-
 
 
 
 
 
 
56
  try:
57
- active_params = int(df_filtered['active_parameters'].iloc[0])
58
  except:
59
- active_params = int((df_filtered['active_parameters'].values[0]['min'] + df_filtered['active_parameters'].values[0]['max'])/2)
 
 
 
 
 
 
60
 
61
- ########## Model parameters ##########
62
 
63
  col11, col22, col33 = st.columns(3)
64
 
65
  with col11:
66
- active_params = st.number_input('Active parameters (B)', 0, None, active_params)
 
 
67
 
68
  with col22:
69
- total_params = st.number_input('Total parameters (B)', 0, None, total_params)
 
 
70
 
71
  with col33:
72
  output_tokens = st.number_input(
73
- label = 'Output completion tokens',
74
- min_value = 0,
75
- value = [x[1] for x in PROMPTS if x[0] == output_tokens_exp][0]
76
  )
77
 
78
  ########## Electricity mix ##########
79
 
80
- location = st.selectbox('Location', [x[0] for x in COUNTRY_CODES])
81
 
82
  col4, col5, col6 = st.columns(3)
83
 
84
  with col4:
85
- mix_gwp = st.number_input('Electricity mix - GHG emissions [kgCO2eq / kWh]', find_electricity_mix([x[1] for x in COUNTRY_CODES if x[0] ==location][0])[2], format="%0.6f")
86
- #disp_ranges = st.toggle('Display impact ranges', False)
 
 
 
 
 
 
87
  with col5:
88
- mix_adpe = st.number_input('Electricity mix - Abiotic resources [kgSbeq / kWh]', find_electricity_mix([x[1] for x in COUNTRY_CODES if x[0] ==location][0])[0], format="%0.13f")
 
 
 
 
 
 
89
  with col6:
90
- mix_pe = st.number_input('Electricity mix - Primary energy [MJ / kWh]', find_electricity_mix([x[1] for x in COUNTRY_CODES if x[0] ==location][0])[1], format="%0.3f")
91
-
92
- impacts = compute_llm_impacts(model_active_parameter_count=active_params,
93
- model_total_parameter_count=total_params,
94
- output_token_count=output_tokens,
95
- request_latency=100000,
96
- if_electricity_mix_gwp=mix_gwp,
97
- if_electricity_mix_adpe=mix_adpe,
98
- if_electricity_mix_pe=mix_pe
99
  )
100
-
 
 
 
 
 
 
 
 
 
 
101
  impacts, usage, embodied = format_impacts(impacts)
102
-
103
- with st.container(border = True):
104
 
105
- st.markdown('<h3 align="center">Environmental Impacts</h2>', unsafe_allow_html = True)
 
 
 
106
 
107
  display_impacts(impacts)
108
 
109
- with st.expander('⚖️ Usage vs Embodied'):
 
 
 
 
110
 
111
- st.markdown('<h3 align="center">Embodied vs Usage comparison</h2>', unsafe_allow_html = True)
 
 
112
 
113
- st.markdown('The usage impacts account for the electricity consumption of the model while the embodied impacts account for resource extraction (e.g., minerals and metals), manufacturing, and transportation of the hardware.')
114
-
115
  col_ghg_comparison, col_adpe_comparison, col_pe_comparison = st.columns(3)
116
-
117
- with col_ghg_comparison:
118
 
 
119
  fig_gwp = px.pie(
120
- values = [average_range_impacts(usage.gwp.value), average_range_impacts(embodied.gwp.value)],
121
- names = ['usage', 'embodied'],
122
- title = 'GHG emissions',
123
- color_discrete_sequence=["#00BF63", "#0B3B36"],
124
- width = 100
 
 
 
125
  )
126
  fig_gwp.update_layout(showlegend=False, title_x=0.5)
127
 
@@ -129,59 +180,66 @@ def expert_mode():
129
 
130
  with col_adpe_comparison:
131
  fig_adpe = px.pie(
132
- values = [average_range_impacts(usage.adpe.value), average_range_impacts(embodied.adpe.value)],
133
- names = ['usage', 'embodied'],
134
- title = 'Abiotic depletion',
135
- color_discrete_sequence=["#0B3B36","#00BF63"],
136
- width = 100)
137
- fig_adpe.update_layout(
138
- showlegend=False,
139
- title_x=0.5)
140
-
 
 
141
  st.plotly_chart(fig_adpe)
142
 
143
  with col_pe_comparison:
144
  fig_pe = px.pie(
145
- values = [average_range_impacts(usage.pe.value), average_range_impacts(embodied.pe.value)],
146
- names = ['usage', 'embodied'],
147
- title = 'Primary energy',
 
 
 
148
  color_discrete_sequence=["#00BF63", "#0B3B36"],
149
- width = 100)
 
150
  fig_pe.update_layout(showlegend=False, title_x=0.5)
151
 
152
  st.plotly_chart(fig_pe)
153
 
154
- with st.expander('🌍️ Location impact'):
155
-
156
- st.markdown('<h4 align="center">How can location impact the footprint ?</h4>', unsafe_allow_html = True)
 
 
157
 
158
  countries_to_compare = st.multiselect(
159
- label = 'Countries to compare',
160
- options = [x[0] for x in COUNTRY_CODES],
161
- default = ["🇫🇷 France", "🇺🇸 United States", "🇨🇳 China"]
162
- )
163
 
164
  try:
165
-
166
  df_comp = dataframe_electricity_mix(countries_to_compare)
167
 
168
  impact_type = st.selectbox(
169
- label='Select an impact type to compare',
170
- options=[x for x in df_comp.columns if x!='country'],
171
- index=1)
 
 
 
172
 
173
- df_comp.sort_values(by = impact_type, inplace = True)
174
-
175
  fig_2 = px.bar(
176
  df_comp,
177
- x = df_comp.index,
178
- y = impact_type,
179
- text = impact_type,
180
- color = impact_type
181
  )
182
-
183
  st.plotly_chart(fig_2)
184
 
185
  except:
186
-
187
- st.warning("Can't display chart with no values.")
 
3
 
4
  from src.utils import format_impacts, average_range_impacts
5
  from src.impacts import display_impacts
6
+ from src.electricity_mix import (
7
+ COUNTRY_CODES,
8
+ find_electricity_mix,
9
+ dataframe_electricity_mix,
10
+ )
11
  from src.models import load_models
12
  from src.constants import PROMPTS
13
 
14
  import plotly.express as px
15
 
16
+
17
  def reset_model():
18
+ model = "CUSTOM"
19
 
 
20
 
21
+ def expert_mode():
22
  st.markdown("### 🤓 Expert mode")
23
 
24
+ with st.container(border=True):
 
25
  ########## Model info ##########
26
 
27
  col1, col2, col3 = st.columns(3)
28
+
29
  df = load_models(filter_main=True)
30
 
31
  with col1:
32
  provider_exp = st.selectbox(
33
+ label="Provider",
34
+ options=[x for x in df["provider_clean"].unique()],
35
+ index=7,
36
+ key=1,
37
  )
38
 
39
  with col2:
40
  model_exp = st.selectbox(
41
+ label="Model",
42
+ options=[
43
+ x
44
+ for x in df["name_clean"].unique()
45
+ if x
46
+ in df[df["provider_clean"] == provider_exp]["name_clean"].unique()
47
+ ],
48
+ key=2,
49
  )
50
 
51
  with col3:
52
  output_tokens_exp = st.selectbox(
53
+ label="Example prompt", options=[x[0] for x in PROMPTS], key=3
 
 
54
  )
55
+
56
+ df_filtered = df[
57
+ (df["provider_clean"] == provider_exp) & (df["name_clean"] == model_exp)
58
+ ]
59
 
60
  try:
61
+ total_params = int(df_filtered["total_parameters"].iloc[0])
62
  except:
63
+ total_params = int(
64
+ (
65
+ df_filtered["total_parameters"].values[0]["min"]
66
+ + df_filtered["total_parameters"].values[0]["max"]
67
+ )
68
+ / 2
69
+ )
70
+
71
  try:
72
+ active_params = int(df_filtered["active_parameters"].iloc[0])
73
  except:
74
+ active_params = int(
75
+ (
76
+ df_filtered["active_parameters"].values[0]["min"]
77
+ + df_filtered["active_parameters"].values[0]["max"]
78
+ )
79
+ / 2
80
+ )
81
 
82
+ ########## Model parameters ##########
83
 
84
  col11, col22, col33 = st.columns(3)
85
 
86
  with col11:
87
+ active_params = st.number_input(
88
+ "Active parameters (B)", 0, None, active_params
89
+ )
90
 
91
  with col22:
92
+ total_params = st.number_input(
93
+ "Total parameters (B)", 0, None, total_params
94
+ )
95
 
96
  with col33:
97
  output_tokens = st.number_input(
98
+ label="Output completion tokens",
99
+ min_value=0,
100
+ value=[x[1] for x in PROMPTS if x[0] == output_tokens_exp][0],
101
  )
102
 
103
  ########## Electricity mix ##########
104
 
105
+ location = st.selectbox("Location", [x[0] for x in COUNTRY_CODES])
106
 
107
  col4, col5, col6 = st.columns(3)
108
 
109
  with col4:
110
+ mix_gwp = st.number_input(
111
+ "Electricity mix - GHG emissions [kgCO2eq / kWh]",
112
+ find_electricity_mix(
113
+ [x[1] for x in COUNTRY_CODES if x[0] == location][0]
114
+ )[2],
115
+ format="%0.6f",
116
+ )
117
+ # disp_ranges = st.toggle('Display impact ranges', False)
118
  with col5:
119
+ mix_adpe = st.number_input(
120
+ "Electricity mix - Abiotic resources [kgSbeq / kWh]",
121
+ find_electricity_mix(
122
+ [x[1] for x in COUNTRY_CODES if x[0] == location][0]
123
+ )[0],
124
+ format="%0.13f",
125
+ )
126
  with col6:
127
+ mix_pe = st.number_input(
128
+ "Electricity mix - Primary energy [MJ / kWh]",
129
+ find_electricity_mix(
130
+ [x[1] for x in COUNTRY_CODES if x[0] == location][0]
131
+ )[1],
132
+ format="%0.3f",
 
 
 
133
  )
134
+
135
+ impacts = compute_llm_impacts(
136
+ model_active_parameter_count=active_params,
137
+ model_total_parameter_count=total_params,
138
+ output_token_count=output_tokens,
139
+ request_latency=100000,
140
+ if_electricity_mix_gwp=mix_gwp,
141
+ if_electricity_mix_adpe=mix_adpe,
142
+ if_electricity_mix_pe=mix_pe,
143
+ )
144
+
145
  impacts, usage, embodied = format_impacts(impacts)
 
 
146
 
147
+ with st.container(border=True):
148
+ st.markdown(
149
+ '<h3 align="center">Environmental Impacts</h2>', unsafe_allow_html=True
150
+ )
151
 
152
  display_impacts(impacts)
153
 
154
+ with st.expander("⚖️ Usage vs Embodied"):
155
+ st.markdown(
156
+ '<h3 align="center">Embodied vs Usage comparison</h2>',
157
+ unsafe_allow_html=True,
158
+ )
159
 
160
+ st.markdown(
161
+ "The usage impacts account for the electricity consumption of the model while the embodied impacts account for resource extraction (e.g., minerals and metals), manufacturing, and transportation of the hardware."
162
+ )
163
 
 
 
164
  col_ghg_comparison, col_adpe_comparison, col_pe_comparison = st.columns(3)
 
 
165
 
166
+ with col_ghg_comparison:
167
  fig_gwp = px.pie(
168
+ values=[
169
+ average_range_impacts(usage.gwp.value),
170
+ average_range_impacts(embodied.gwp.value),
171
+ ],
172
+ names=["usage", "embodied"],
173
+ title="GHG emissions",
174
+ color_discrete_sequence=["#00BF63", "#0B3B36"],
175
+ width=100,
176
  )
177
  fig_gwp.update_layout(showlegend=False, title_x=0.5)
178
 
 
180
 
181
  with col_adpe_comparison:
182
  fig_adpe = px.pie(
183
+ values=[
184
+ average_range_impacts(usage.adpe.value),
185
+ average_range_impacts(embodied.adpe.value),
186
+ ],
187
+ names=["usage", "embodied"],
188
+ title="Abiotic depletion",
189
+ color_discrete_sequence=["#0B3B36", "#00BF63"],
190
+ width=100,
191
+ )
192
+ fig_adpe.update_layout(showlegend=False, title_x=0.5)
193
+
194
  st.plotly_chart(fig_adpe)
195
 
196
  with col_pe_comparison:
197
  fig_pe = px.pie(
198
+ values=[
199
+ average_range_impacts(usage.pe.value),
200
+ average_range_impacts(embodied.pe.value),
201
+ ],
202
+ names=["usage", "embodied"],
203
+ title="Primary energy",
204
  color_discrete_sequence=["#00BF63", "#0B3B36"],
205
+ width=100,
206
+ )
207
  fig_pe.update_layout(showlegend=False, title_x=0.5)
208
 
209
  st.plotly_chart(fig_pe)
210
 
211
+ with st.expander("🌍️ Location impact"):
212
+ st.markdown(
213
+ '<h4 align="center">How can location impact the footprint ?</h4>',
214
+ unsafe_allow_html=True,
215
+ )
216
 
217
  countries_to_compare = st.multiselect(
218
+ label="Countries to compare",
219
+ options=[x[0] for x in COUNTRY_CODES],
220
+ default=["🇫🇷 France", "🇺🇸 United States", "🇨🇳 China"],
221
+ )
222
 
223
  try:
 
224
  df_comp = dataframe_electricity_mix(countries_to_compare)
225
 
226
  impact_type = st.selectbox(
227
+ label="Select an impact type to compare",
228
+ options=[x for x in df_comp.columns if x != "country"],
229
+ index=1,
230
+ )
231
+
232
+ df_comp.sort_values(by=impact_type, inplace=True)
233
 
 
 
234
  fig_2 = px.bar(
235
  df_comp,
236
+ x=df_comp.index,
237
+ y=impact_type,
238
+ text=impact_type,
239
+ color=impact_type,
240
  )
241
+
242
  st.plotly_chart(fig_2)
243
 
244
  except:
245
+ st.warning("Can't display chart with no values.")
 
src/impacts.py CHANGED
@@ -1,5 +1,4 @@
1
  import streamlit as st
2
- import ecologits
3
  from src.utils import (
4
  format_energy_eq_electric_vehicle,
5
  format_energy_eq_electricity_consumption_ireland,
@@ -8,57 +7,77 @@ from src.utils import (
8
  format_gwp_eq_airplane_paris_nyc,
9
  format_gwp_eq_streaming,
10
  PhysicalActivity,
11
- EnergyProduction
12
  )
13
 
14
  ############################################################################################################
15
 
16
- def get_impacts(model, active_params, total_params, mix_ghg, mix_adpe, mix_pe):
17
 
 
18
  return 1
19
 
 
20
  ############################################################################################################
21
 
22
 
23
  def display_impacts(impacts):
24
-
25
  st.divider()
26
 
27
  col_energy, col_ghg, col_adpe, col_pe, col_water = st.columns(5)
28
 
29
  with col_energy:
30
- st.markdown('<h4 align="center">⚡️ Energy</h4>', unsafe_allow_html = True)
31
- st.latex(f'\Large {impacts.energy.magnitude:.3g} \ \large {impacts.energy.units}')
32
- st.markdown(f'<p align="center"><i>Evaluates the electricity consumption<i></p>', unsafe_allow_html = True)
 
 
 
 
 
33
 
34
  with col_ghg:
35
- st.markdown('<h4 align="center">🌍️ GHG Emissions</h4>', unsafe_allow_html = True)
36
- st.latex(f'\Large {impacts.gwp.magnitude:.3g} \ \large {impacts.gwp.units}')
37
- st.markdown(f'<p align="center"><i>Evaluates the effect on global warming<i></p>', unsafe_allow_html = True)
 
 
 
38
 
39
  with col_adpe:
40
- st.markdown('<h4 align="center">🪨 Abiotic Resources</h4>', unsafe_allow_html = True)
41
- st.latex(f'\Large {impacts.adpe.magnitude:.3g} \ \large {impacts.adpe.units}')
42
- st.markdown(f'<p align="center"><i>Evaluates the use of metals and minerals<i></p>', unsafe_allow_html = True)
 
 
 
 
 
43
 
44
  with col_pe:
45
- st.markdown('<h4 align="center">⛽️ Primary Energy</h4>', unsafe_allow_html = True)
46
- st.latex(f'\Large {impacts.pe.magnitude:.3g} \ \large {impacts.pe.units}')
47
- st.markdown(f'<p align="center"><i>Evaluates the use of energy resources<i></p>', unsafe_allow_html = True)
 
 
 
48
 
49
  with col_water:
50
- st.markdown('<h4 align="center">🚰 Water</h4>', unsafe_allow_html = True)
51
- st.latex('\Large Upcoming...')
52
- st.markdown(f'<p align="center"><i>Evaluates the use of water<i></p>', unsafe_allow_html = True)
 
 
 
 
53
 
54
  ############################################################################################################
55
 
56
- def display_equivalent(impacts):
57
 
 
58
  st.divider()
59
 
60
  ev_eq = format_energy_eq_electric_vehicle(impacts.energy)
61
-
62
  streaming_eq = format_gwp_eq_streaming(impacts.gwp)
63
 
64
  col1, col2, col3 = st.columns(3)
@@ -70,47 +89,85 @@ def display_equivalent(impacts):
70
  if physical_activity == PhysicalActivity.RUNNING:
71
  physical_activity = "🏃 " + physical_activity.capitalize()
72
 
73
- st.markdown(f'<h4 align="center">{physical_activity}</h4>', unsafe_allow_html = True)
74
- st.latex(f'\Large {distance.magnitude:.3g} \ \large {distance.units}')
75
- st.markdown(f'<p align="center"><i>Based on energy consumption<i></p>', unsafe_allow_html = True)
 
 
 
 
 
76
 
77
  with col2:
78
  ev_eq = format_energy_eq_electric_vehicle(impacts.energy)
79
- st.markdown(f'<h4 align="center">🔋 Electric Vehicle</h4>', unsafe_allow_html = True)
80
- st.latex(f'\Large {ev_eq.magnitude:.3g} \ \large {ev_eq.units}')
81
- st.markdown(f'<p align="center"><i>Based on energy consumption<i></p>', unsafe_allow_html = True)
 
 
 
 
 
82
 
83
  with col3:
84
  streaming_eq = format_gwp_eq_streaming(impacts.gwp)
85
- st.markdown(f'<h4 align="center">⏯️ Streaming</h4>', unsafe_allow_html = True)
86
- st.latex(f'\Large {streaming_eq.magnitude:.3g} \ \large {streaming_eq.units}')
87
- st.markdown(f'<p align="center"><i>Based on GHG emissions<i></p>', unsafe_allow_html = True)
88
-
 
 
 
89
  st.divider()
90
-
91
- st.markdown('<h3 align="center">What if 1% of the planet does this request everyday for 1 year ?</h3>', unsafe_allow_html = True)
92
- st.markdown('<p align="center">If this use case is largely deployed around the world, the equivalent impacts would be the impacts of this request x 1% of 8 billion people x 365 days in a year.</p>', unsafe_allow_html = True)
 
 
 
 
 
 
93
 
94
  col4, col5, col6 = st.columns(3)
95
 
96
  with col4:
97
-
98
- electricity_production, count = format_energy_eq_electricity_production(impacts.energy)
 
99
  if electricity_production == EnergyProduction.NUCLEAR:
100
  emoji = "☢️"
101
  name = "Nuclear power plants"
102
  if electricity_production == EnergyProduction.WIND:
103
  emoji = "💨️ "
104
  name = "Wind turbines"
105
- st.markdown(f'<h4 align="center">{emoji} {count.magnitude:.0f} {name} (yearly)</h4>', unsafe_allow_html = True)
106
- st.markdown(f'<p align="center"><i>Based on energy consumption<i></p>', unsafe_allow_html = True)
107
-
 
 
 
 
 
 
108
  with col5:
109
  ireland_count = format_energy_eq_electricity_consumption_ireland(impacts.energy)
110
- st.markdown(f'<h4 align="center">🇮🇪 {ireland_count.magnitude:.3f} x Ireland <span style="font-size: 12px">(yearly ⚡️ cons.)</span></h2></h4>', unsafe_allow_html = True)
111
- st.markdown(f'<p align="center"><i>Based on energy consumption<i></p>', unsafe_allow_html = True)
 
 
 
 
 
 
112
 
113
  with col6:
114
  paris_nyc_airplane = format_gwp_eq_airplane_paris_nyc(impacts.gwp)
115
- st.markdown(f'<h4 align="center">✈️ {round(paris_nyc_airplane.magnitude):,} Paris ↔ NYC</h4>', unsafe_allow_html = True)
116
- st.markdown(f'<p align="center"><i>Based on GHG emissions<i></p>', unsafe_allow_html = True)
 
 
 
 
 
 
 
1
  import streamlit as st
 
2
  from src.utils import (
3
  format_energy_eq_electric_vehicle,
4
  format_energy_eq_electricity_consumption_ireland,
 
7
  format_gwp_eq_airplane_paris_nyc,
8
  format_gwp_eq_streaming,
9
  PhysicalActivity,
10
+ EnergyProduction,
11
  )
12
 
13
  ############################################################################################################
14
 
 
15
 
16
+ def get_impacts(model, active_params, total_params, mix_ghg, mix_adpe, mix_pe):
17
  return 1
18
 
19
+
20
  ############################################################################################################
21
 
22
 
23
  def display_impacts(impacts):
 
24
  st.divider()
25
 
26
  col_energy, col_ghg, col_adpe, col_pe, col_water = st.columns(5)
27
 
28
  with col_energy:
29
+ st.markdown('<h4 align="center">⚡️ Energy</h4>', unsafe_allow_html=True)
30
+ st.latex(
31
+ f"\Large {impacts.energy.magnitude:.3g} \ \large {impacts.energy.units}"
32
+ )
33
+ st.markdown(
34
+ '<p align="center"><i>Evaluates the electricity consumption<i></p>',
35
+ unsafe_allow_html=True,
36
+ )
37
 
38
  with col_ghg:
39
+ st.markdown('<h4 align="center">🌍️ GHG Emissions</h4>', unsafe_allow_html=True)
40
+ st.latex(f"\Large {impacts.gwp.magnitude:.3g} \ \large {impacts.gwp.units}")
41
+ st.markdown(
42
+ '<p align="center"><i>Evaluates the effect on global warming<i></p>',
43
+ unsafe_allow_html=True,
44
+ )
45
 
46
  with col_adpe:
47
+ st.markdown(
48
+ '<h4 align="center">🪨 Abiotic Resources</h4>', unsafe_allow_html=True
49
+ )
50
+ st.latex(f"\Large {impacts.adpe.magnitude:.3g} \ \large {impacts.adpe.units}")
51
+ st.markdown(
52
+ '<p align="center"><i>Evaluates the use of metals and minerals<i></p>',
53
+ unsafe_allow_html=True,
54
+ )
55
 
56
  with col_pe:
57
+ st.markdown('<h4 align="center">⛽️ Primary Energy</h4>', unsafe_allow_html=True)
58
+ st.latex(f"\Large {impacts.pe.magnitude:.3g} \ \large {impacts.pe.units}")
59
+ st.markdown(
60
+ '<p align="center"><i>Evaluates the use of energy resources<i></p>',
61
+ unsafe_allow_html=True,
62
+ )
63
 
64
  with col_water:
65
+ st.markdown('<h4 align="center">🚰 Water</h4>', unsafe_allow_html=True)
66
+ st.latex("\Large Upcoming...")
67
+ st.markdown(
68
+ '<p align="center"><i>Evaluates the use of water<i></p>',
69
+ unsafe_allow_html=True,
70
+ )
71
+
72
 
73
  ############################################################################################################
74
 
 
75
 
76
+ def display_equivalent(impacts):
77
  st.divider()
78
 
79
  ev_eq = format_energy_eq_electric_vehicle(impacts.energy)
80
+
81
  streaming_eq = format_gwp_eq_streaming(impacts.gwp)
82
 
83
  col1, col2, col3 = st.columns(3)
 
89
  if physical_activity == PhysicalActivity.RUNNING:
90
  physical_activity = "🏃 " + physical_activity.capitalize()
91
 
92
+ st.markdown(
93
+ f'<h4 align="center">{physical_activity}</h4>', unsafe_allow_html=True
94
+ )
95
+ st.latex(f"\Large {distance.magnitude:.3g} \ \large {distance.units}")
96
+ st.markdown(
97
+ '<p align="center"><i>Based on energy consumption<i></p>',
98
+ unsafe_allow_html=True,
99
+ )
100
 
101
  with col2:
102
  ev_eq = format_energy_eq_electric_vehicle(impacts.energy)
103
+ st.markdown(
104
+ '<h4 align="center">🔋 Electric Vehicle</h4>', unsafe_allow_html=True
105
+ )
106
+ st.latex(f"\Large {ev_eq.magnitude:.3g} \ \large {ev_eq.units}")
107
+ st.markdown(
108
+ '<p align="center"><i>Based on energy consumption<i></p>',
109
+ unsafe_allow_html=True,
110
+ )
111
 
112
  with col3:
113
  streaming_eq = format_gwp_eq_streaming(impacts.gwp)
114
+ st.markdown('<h4 align="center">⏯️ Streaming</h4>', unsafe_allow_html=True)
115
+ st.latex(f"\Large {streaming_eq.magnitude:.3g} \ \large {streaming_eq.units}")
116
+ st.markdown(
117
+ '<p align="center"><i>Based on GHG emissions<i></p>',
118
+ unsafe_allow_html=True,
119
+ )
120
+
121
  st.divider()
122
+
123
+ st.markdown(
124
+ '<h3 align="center">What if 1% of the planet does this request everyday for 1 year ?</h3>',
125
+ unsafe_allow_html=True,
126
+ )
127
+ st.markdown(
128
+ '<p align="center">If this use case is largely deployed around the world, the equivalent impacts would be the impacts of this request x 1% of 8 billion people x 365 days in a year.</p>',
129
+ unsafe_allow_html=True,
130
+ )
131
 
132
  col4, col5, col6 = st.columns(3)
133
 
134
  with col4:
135
+ electricity_production, count = format_energy_eq_electricity_production(
136
+ impacts.energy
137
+ )
138
  if electricity_production == EnergyProduction.NUCLEAR:
139
  emoji = "☢️"
140
  name = "Nuclear power plants"
141
  if electricity_production == EnergyProduction.WIND:
142
  emoji = "💨️ "
143
  name = "Wind turbines"
144
+ st.markdown(
145
+ f'<h4 align="center">{emoji} {count.magnitude:.0f} {name} (yearly)</h4>',
146
+ unsafe_allow_html=True,
147
+ )
148
+ st.markdown(
149
+ '<p align="center"><i>Based on energy consumption<i></p>',
150
+ unsafe_allow_html=True,
151
+ )
152
+
153
  with col5:
154
  ireland_count = format_energy_eq_electricity_consumption_ireland(impacts.energy)
155
+ st.markdown(
156
+ f'<h4 align="center">🇮🇪 {ireland_count.magnitude:.3f} x Ireland <span style="font-size: 12px">(yearly ⚡️ cons.)</span></h2></h4>',
157
+ unsafe_allow_html=True,
158
+ )
159
+ st.markdown(
160
+ '<p align="center"><i>Based on energy consumption<i></p>',
161
+ unsafe_allow_html=True,
162
+ )
163
 
164
  with col6:
165
  paris_nyc_airplane = format_gwp_eq_airplane_paris_nyc(impacts.gwp)
166
+ st.markdown(
167
+ f'<h4 align="center">✈️ {round(paris_nyc_airplane.magnitude):,} Paris ↔ NYC</h4>',
168
+ unsafe_allow_html=True,
169
+ )
170
+ st.markdown(
171
+ '<p align="center"><i>Based on GHG emissions<i></p>',
172
+ unsafe_allow_html=True,
173
+ )
src/models.py CHANGED
@@ -4,52 +4,76 @@ import pandas as pd
4
  from src.constants import MODEL_REPOSITORY_URL, MAIN_MODELS
5
  import streamlit as st
6
 
7
- def clean_models_data(df, with_filter = True):
8
-
9
  dict_providers = {
10
- 'google': 'Google',
11
- 'mistralai': 'MistralAI',
12
- 'meta-llama': 'Meta',
13
- 'openai': 'OpenAI',
14
- 'anthropic': 'Anthropic',
15
- 'cohere': 'Cohere',
16
- 'microsoft': 'Microsoft',
17
- 'mistral-community': 'Mistral Community',
18
- 'databricks': 'Databricks'
19
  }
20
 
21
  models_to_keep = MAIN_MODELS
22
-
23
- df.drop('type', axis=1, inplace=True)
24
-
25
- df.loc[df['name'].str.contains('/'), 'name_clean'] = df.loc[df['name'].str.contains('/'), 'name'].str.split('/').str[1]
26
- df['name_clean'] = df['name_clean'].fillna(df['name'])
27
- df['name_clean'] = df['name_clean'].replace({'-': ' ', 'latest': ''}, regex = True)
28
-
29
- df.loc[df['provider'] == 'huggingface_hub', 'provider_clean'] = df.loc[df['provider'] == 'huggingface_hub', 'name'].str.split('/').str[0]
30
- df['provider_clean'] = df['provider_clean'].fillna(df['provider'])
31
- df['provider_clean'] = df['provider_clean'].replace(dict_providers, regex = True)
32
-
33
- df['architecture_type'] = df['architecture'].apply(lambda x: x['type'])
34
- df['architecture_parameters'] = df['architecture'].apply(lambda x: x['parameters'])
35
- df['total_parameters'] = df['architecture_parameters'].apply(lambda x: x['total'] if isinstance(x, dict) and 'total' in x.keys() else x)
36
- df['active_parameters'] = df['architecture_parameters'].apply(lambda x: x['active'] if isinstance(x, dict) and 'active' in x.keys() else x)
37
-
38
- df['warnings'] = df['warnings'].apply(lambda x: ', '.join(x) if x else None).fillna('none')
39
- df['warning_arch'] = df['warnings'].apply(lambda x: 'model-arch-not-released' in x)
40
- df['warning_multi_modal'] = df['warnings'].apply(lambda x: 'model-arch-multimodal' in x)
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  if with_filter == True:
43
- df = df[df['name'].isin(models_to_keep)]
44
-
45
- return df[['provider', 'provider_clean', 'name', 'name_clean', 'architecture_type', 'architecture_parameters', 'total_parameters', 'active_parameters', 'warning_arch', 'warning_multi_modal']]
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- @st.cache_data
48
- def load_models(filter_main = True):
49
 
 
 
50
  resp = requests.get(MODEL_REPOSITORY_URL)
51
  data = json.loads(resp.text)
52
- df = pd.DataFrame(data['models'])
53
 
54
  return clean_models_data(df, filter_main)
55
-
 
4
  from src.constants import MODEL_REPOSITORY_URL, MAIN_MODELS
5
  import streamlit as st
6
 
7
+
8
+ def clean_models_data(df, with_filter=True):
9
  dict_providers = {
10
+ "google": "Google",
11
+ "mistralai": "MistralAI",
12
+ "meta-llama": "Meta",
13
+ "openai": "OpenAI",
14
+ "anthropic": "Anthropic",
15
+ "cohere": "Cohere",
16
+ "microsoft": "Microsoft",
17
+ "mistral-community": "Mistral Community",
18
+ "databricks": "Databricks",
19
  }
20
 
21
  models_to_keep = MAIN_MODELS
22
+
23
+ df.drop("type", axis=1, inplace=True)
24
+
25
+ df.loc[df["name"].str.contains("/"), "name_clean"] = (
26
+ df.loc[df["name"].str.contains("/"), "name"].str.split("/").str[1]
27
+ )
28
+ df["name_clean"] = df["name_clean"].fillna(df["name"])
29
+ df["name_clean"] = df["name_clean"].replace({"-": " ", "latest": ""}, regex=True)
30
+
31
+ df.loc[df["provider"] == "huggingface_hub", "provider_clean"] = (
32
+ df.loc[df["provider"] == "huggingface_hub", "name"].str.split("/").str[0]
33
+ )
34
+ df["provider_clean"] = df["provider_clean"].fillna(df["provider"])
35
+ df["provider_clean"] = df["provider_clean"].replace(dict_providers, regex=True)
36
+
37
+ df["architecture_type"] = df["architecture"].apply(lambda x: x["type"])
38
+ df["architecture_parameters"] = df["architecture"].apply(lambda x: x["parameters"])
39
+ df["total_parameters"] = df["architecture_parameters"].apply(
40
+ lambda x: x["total"] if isinstance(x, dict) and "total" in x.keys() else x
41
+ )
42
+ df["active_parameters"] = df["architecture_parameters"].apply(
43
+ lambda x: x["active"] if isinstance(x, dict) and "active" in x.keys() else x
44
+ )
45
+
46
+ df["warnings"] = (
47
+ df["warnings"].apply(lambda x: ", ".join(x) if x else None).fillna("none")
48
+ )
49
+ df["warning_arch"] = df["warnings"].apply(lambda x: "model-arch-not-released" in x)
50
+ df["warning_multi_modal"] = df["warnings"].apply(
51
+ lambda x: "model-arch-multimodal" in x
52
+ )
53
 
54
  if with_filter == True:
55
+ df = df[df["name"].isin(models_to_keep)]
56
+
57
+ return df[
58
+ [
59
+ "provider",
60
+ "provider_clean",
61
+ "name",
62
+ "name_clean",
63
+ "architecture_type",
64
+ "architecture_parameters",
65
+ "total_parameters",
66
+ "active_parameters",
67
+ "warning_arch",
68
+ "warning_multi_modal",
69
+ ]
70
+ ]
71
 
 
 
72
 
73
+ @st.cache_data
74
+ def load_models(filter_main=True):
75
  resp = requests.get(MODEL_REPOSITORY_URL)
76
  data = json.loads(resp.text)
77
+ df = pd.DataFrame(data["models"])
78
 
79
  return clean_models_data(df, filter_main)
 
src/token_estimator.py CHANGED
@@ -2,29 +2,36 @@ import streamlit as st
2
  import tiktoken
3
  from .content import TOKEN_ESTIMATOR_TEXT
4
 
 
5
  def num_tokens_from_string(string: str, encoding_name: str) -> int:
6
  """Returns the number of tokens in a text string."""
7
  encoding = tiktoken.get_encoding(encoding_name)
8
  num_tokens = len(encoding.encode(string))
9
  return num_tokens
10
 
 
11
  def token_estimator():
12
-
13
  st.markdown("### 🪙 Tokens estimator")
14
-
15
- st.markdown("As our methodology deeply relies on the number of tokens processed by the model *(and as no-one is token-fluent)*, we provide you with a tool to estimate the number of tokens in a given text.")
16
-
17
- st.expander("ℹ️ What is a token anyway ?", expanded = False).markdown(TOKEN_ESTIMATOR_TEXT)
18
-
19
- user_text_input = st.text_area("Type or paste some text to estimate the amount of tokens.", "EcoLogits is a great project!")
20
-
 
 
 
 
 
 
 
21
  _, col2, _ = st.columns([2, 1, 2])
22
-
23
  with col2:
24
-
25
  st.metric(
26
- label = 'tokens estimated amount',
27
- #label_visibility = 'hidden',
28
- value = num_tokens_from_string(user_text_input, "cl100k_base"),
29
- border = True
30
- )
 
2
  import tiktoken
3
  from .content import TOKEN_ESTIMATOR_TEXT
4
 
5
+
6
  def num_tokens_from_string(string: str, encoding_name: str) -> int:
7
  """Returns the number of tokens in a text string."""
8
  encoding = tiktoken.get_encoding(encoding_name)
9
  num_tokens = len(encoding.encode(string))
10
  return num_tokens
11
 
12
+
13
  def token_estimator():
 
14
  st.markdown("### 🪙 Tokens estimator")
15
+
16
+ st.markdown(
17
+ "As our methodology deeply relies on the number of tokens processed by the model *(and as no-one is token-fluent)*, we provide you with a tool to estimate the number of tokens in a given text."
18
+ )
19
+
20
+ st.expander("ℹ️ What is a token anyway ?", expanded=False).markdown(
21
+ TOKEN_ESTIMATOR_TEXT
22
+ )
23
+
24
+ user_text_input = st.text_area(
25
+ "Type or paste some text to estimate the amount of tokens.",
26
+ "EcoLogits is a great project!",
27
+ )
28
+
29
  _, col2, _ = st.columns([2, 1, 2])
30
+
31
  with col2:
 
32
  st.metric(
33
+ label="tokens estimated amount",
34
+ # label_visibility = 'hidden',
35
+ value=num_tokens_from_string(user_text_input, "cl100k_base"),
36
+ border=True,
37
+ )
src/utils.py CHANGED
@@ -1,9 +1,9 @@
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
4
- from ecologits.model_repository import models
5
  from ecologits.impacts.modeling import Impacts, Energy, GWP, ADPe, PE
6
- #from ecologits.tracers.utils import llm_impacts
 
7
  from pint import UnitRegistry, Quantity
8
 
9
  #####################################################################################
@@ -11,24 +11,25 @@ from pint import UnitRegistry, Quantity
11
  #####################################################################################
12
 
13
  u = UnitRegistry()
14
- u.define('Wh = watt_hour')
15
- u.define('kWh = kilowatt_hour')
16
- u.define('MWh = megawatt_hour')
17
- u.define('GWh = gigawatt_hour')
18
- u.define('TWh = terawatt_hour')
19
- u.define('gCO2eq = gram')
20
- u.define('kgCO2eq = kilogram')
21
- u.define('tCO2eq = metricton')
22
- u.define('kgSbeq = kilogram')
23
- u.define('kJ = kilojoule')
24
- u.define('MJ = megajoule')
25
- u.define('m = meter')
26
- u.define('km = kilometer')
27
- u.define('s = second')
28
- u.define('min = minute')
29
- u.define('h = hour')
30
  q = u.Quantity
31
 
 
32
  @dataclass
33
  class QImpacts:
34
  energy: Quantity
@@ -59,8 +60,8 @@ COUNTRIES = [
59
  #####################################################################################
60
 
61
  # From https://www.runningtools.com/energyusage.htm
62
- RUNNING_ENERGY_EQ = q("294 kJ / km") # running 1 km at 10 km/h with a weight of 70 kg
63
- WALKING_ENERGY_EQ = q("196 kJ / km") # walking 1 km at 3 km/h with a weight of 70 kg
64
 
65
  # From https://selectra.info/energie/actualites/insolite/consommation-vehicules-electriques-france-2040
66
  # and https://www.tesla.com/fr_fr/support/power-consumption
@@ -94,85 +95,113 @@ AIRPLANE_PARIS_NYC_GWP_EQ = q("177000 kgCO2eq")
94
  ### IMPACTS FORMATING
95
  #####################################################################################
96
 
 
97
  def format_energy(energy: Energy) -> Quantity:
98
  val = q(energy.value, energy.unit)
99
  if val < q("1 kWh"):
100
  val = val.to("Wh")
101
  return val
102
 
 
103
  def format_gwp(gwp: GWP) -> Quantity:
104
  val = q(gwp.value, gwp.unit)
105
  if val < q("1 kgCO2eq"):
106
  val = val.to("gCO2eq")
107
  return val
108
 
 
109
  def format_adpe(adpe: ADPe) -> Quantity:
110
  return q(adpe.value, adpe.unit)
111
 
 
112
  def format_pe(pe: PE) -> Quantity:
113
  val = q(pe.value, pe.unit)
114
  if val < q("1 MJ"):
115
  val = val.to("kJ")
116
  return val
117
 
118
- def format_impacts(impacts: Impacts) -> QImpacts:
119
 
 
120
  try:
121
- impacts.energy.value = (impacts.energy.value.max + impacts.energy.value.min)/2
122
- impacts.gwp.value = (impacts.gwp.value.max + impacts.gwp.value.min)/2
123
- impacts.adpe.value = (impacts.adpe.value.max + impacts.adpe.value.min)/2
124
- impacts.pe.value = (impacts.pe.value.max + impacts.pe.value.min)/2
125
- return QImpacts(
126
- energy=format_energy(impacts.energy),
127
- gwp=format_gwp(impacts.gwp),
128
- adpe=format_adpe(impacts.adpe),
129
- pe=format_pe(impacts.pe)
130
- ), impacts.usage, impacts.embodied
131
- except: #when no range
132
- return QImpacts(
133
- energy=format_energy(impacts.energy),
134
- gwp=format_gwp(impacts.gwp),
135
- adpe=format_adpe(impacts.adpe),
136
- pe=format_pe(impacts.pe)
137
- ), impacts.usage, impacts.embodied
 
 
 
 
 
 
 
 
 
138
 
139
  def split_impacts_u_e(impacts: Impacts) -> QImpacts:
140
  return impacts.usage, impacts.embodied
141
 
 
142
  def average_range_impacts(x):
143
-
144
  if isinstance(x, float):
145
- return x
146
  else:
147
- return (x.max + x.min)/2
 
148
 
149
  def format_impacts_expert(impacts: Impacts, display_range: bool) -> QImpacts:
150
-
151
  if display_range:
152
- return QImpacts(
153
- energy=format_energy(impacts.energy),
154
- gwp=format_gwp(impacts.gwp),
155
- adpe=format_adpe(impacts.adpe),
156
- pe=format_pe(impacts.pe)
157
- ), impacts.usage, impacts.embodied
158
-
 
 
 
 
159
  else:
160
- energy = {"value":(impacts.energy.value.max + impacts.energy.value.min)/2, "unit":impacts.energy.unit}
161
- gwp = (impacts.gwp.value.max + impacts.gwp.value.min)/2
162
- adpe = (impacts.adpe.value.max + impacts.adpe.value.min)/2
163
- pe = (impacts.pe.value.max + impacts.pe.value.min)/2
164
- return QImpacts(
165
- energy=format_energy(energy),
166
- gwp=format_gwp(gwp),
167
- adpe=format_adpe(adpe),
168
- pe=format_pe(pe)
169
- ), impacts.usage, impacts.embodied
 
 
 
 
 
 
 
 
170
 
171
  #####################################################################################
172
  ### EQUIVALENT FORMATING
173
  #####################################################################################
174
 
175
- def format_energy_eq_physical_activity(energy: Quantity) -> tuple[PhysicalActivity, Quantity]:
 
 
 
176
  energy = energy.to("kJ")
177
  running_eq = energy / RUNNING_ENERGY_EQ
178
  if running_eq > q("1 km"):
@@ -183,6 +212,7 @@ def format_energy_eq_physical_activity(energy: Quantity) -> tuple[PhysicalActivi
183
  walking_eq = walking_eq.to("meter")
184
  return PhysicalActivity.WALKING, walking_eq
185
 
 
186
  def format_energy_eq_electric_vehicle(energy: Quantity) -> Quantity:
187
  energy = energy.to("kWh")
188
  ev_eq = energy / EV_ENERGY_EQ
@@ -190,6 +220,7 @@ def format_energy_eq_electric_vehicle(energy: Quantity) -> Quantity:
190
  ev_eq = ev_eq.to("meter")
191
  return ev_eq
192
 
 
193
  def format_gwp_eq_streaming(gwp: Quantity) -> Quantity:
194
  gwp = gwp.to("kgCO2eq")
195
  streaming_eq = gwp * STREAMING_GWP_EQ
@@ -199,7 +230,10 @@ def format_gwp_eq_streaming(gwp: Quantity) -> Quantity:
199
  streaming_eq = streaming_eq.to("s")
200
  return streaming_eq
201
 
202
- def format_energy_eq_electricity_production(energy: Quantity) -> tuple[EnergyProduction, Quantity]:
 
 
 
203
  electricity_eq = energy * ONE_PERCENT_WORLD_POPULATION * DAYS_IN_YEAR
204
  electricity_eq = electricity_eq.to("TWh")
205
  if electricity_eq > YEARLY_NUCLEAR_ENERGY_EQ:
@@ -213,7 +247,10 @@ def format_energy_eq_electricity_consumption_ireland(energy: Quantity) -> Quanti
213
  electricity_eq = electricity_eq.to("TWh")
214
  return electricity_eq / YEARLY_IRELAND_ELECTRICITY_CONSUMPTION
215
 
 
216
  def format_gwp_eq_airplane_paris_nyc(gwp: Quantity) -> Quantity:
217
  gwp_eq = gwp * ONE_PERCENT_WORLD_POPULATION * DAYS_IN_YEAR
218
  gwp_eq = gwp_eq.to("kgCO2eq")
219
- return gwp_eq / AIRPLANE_PARIS_NYC_GWP_EQ####################################################################################### MODELS PARAMETER####################################################################################
 
 
 
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
 
4
  from ecologits.impacts.modeling import Impacts, Energy, GWP, ADPe, PE
5
+
6
+ # from ecologits.tracers.utils import llm_impacts
7
  from pint import UnitRegistry, Quantity
8
 
9
  #####################################################################################
 
11
  #####################################################################################
12
 
13
  u = UnitRegistry()
14
+ u.define("Wh = watt_hour")
15
+ u.define("kWh = kilowatt_hour")
16
+ u.define("MWh = megawatt_hour")
17
+ u.define("GWh = gigawatt_hour")
18
+ u.define("TWh = terawatt_hour")
19
+ u.define("gCO2eq = gram")
20
+ u.define("kgCO2eq = kilogram")
21
+ u.define("tCO2eq = metricton")
22
+ u.define("kgSbeq = kilogram")
23
+ u.define("kJ = kilojoule")
24
+ u.define("MJ = megajoule")
25
+ u.define("m = meter")
26
+ u.define("km = kilometer")
27
+ u.define("s = second")
28
+ u.define("min = minute")
29
+ u.define("h = hour")
30
  q = u.Quantity
31
 
32
+
33
  @dataclass
34
  class QImpacts:
35
  energy: Quantity
 
60
  #####################################################################################
61
 
62
  # From https://www.runningtools.com/energyusage.htm
63
+ RUNNING_ENERGY_EQ = q("294 kJ / km") # running 1 km at 10 km/h with a weight of 70 kg
64
+ WALKING_ENERGY_EQ = q("196 kJ / km") # walking 1 km at 3 km/h with a weight of 70 kg
65
 
66
  # From https://selectra.info/energie/actualites/insolite/consommation-vehicules-electriques-france-2040
67
  # and https://www.tesla.com/fr_fr/support/power-consumption
 
95
  ### IMPACTS FORMATING
96
  #####################################################################################
97
 
98
+
99
  def format_energy(energy: Energy) -> Quantity:
100
  val = q(energy.value, energy.unit)
101
  if val < q("1 kWh"):
102
  val = val.to("Wh")
103
  return val
104
 
105
+
106
  def format_gwp(gwp: GWP) -> Quantity:
107
  val = q(gwp.value, gwp.unit)
108
  if val < q("1 kgCO2eq"):
109
  val = val.to("gCO2eq")
110
  return val
111
 
112
+
113
  def format_adpe(adpe: ADPe) -> Quantity:
114
  return q(adpe.value, adpe.unit)
115
 
116
+
117
  def format_pe(pe: PE) -> Quantity:
118
  val = q(pe.value, pe.unit)
119
  if val < q("1 MJ"):
120
  val = val.to("kJ")
121
  return val
122
 
 
123
 
124
+ def format_impacts(impacts: Impacts) -> QImpacts:
125
  try:
126
+ impacts.energy.value = (impacts.energy.value.max + impacts.energy.value.min) / 2
127
+ impacts.gwp.value = (impacts.gwp.value.max + impacts.gwp.value.min) / 2
128
+ impacts.adpe.value = (impacts.adpe.value.max + impacts.adpe.value.min) / 2
129
+ impacts.pe.value = (impacts.pe.value.max + impacts.pe.value.min) / 2
130
+ return (
131
+ QImpacts(
132
+ energy=format_energy(impacts.energy),
133
+ gwp=format_gwp(impacts.gwp),
134
+ adpe=format_adpe(impacts.adpe),
135
+ pe=format_pe(impacts.pe),
136
+ ),
137
+ impacts.usage,
138
+ impacts.embodied,
139
+ )
140
+ except: # when no range
141
+ return (
142
+ QImpacts(
143
+ energy=format_energy(impacts.energy),
144
+ gwp=format_gwp(impacts.gwp),
145
+ adpe=format_adpe(impacts.adpe),
146
+ pe=format_pe(impacts.pe),
147
+ ),
148
+ impacts.usage,
149
+ impacts.embodied,
150
+ )
151
+
152
 
153
  def split_impacts_u_e(impacts: Impacts) -> QImpacts:
154
  return impacts.usage, impacts.embodied
155
 
156
+
157
  def average_range_impacts(x):
 
158
  if isinstance(x, float):
159
+ return x
160
  else:
161
+ return (x.max + x.min) / 2
162
+
163
 
164
  def format_impacts_expert(impacts: Impacts, display_range: bool) -> QImpacts:
 
165
  if display_range:
166
+ return (
167
+ QImpacts(
168
+ energy=format_energy(impacts.energy),
169
+ gwp=format_gwp(impacts.gwp),
170
+ adpe=format_adpe(impacts.adpe),
171
+ pe=format_pe(impacts.pe),
172
+ ),
173
+ impacts.usage,
174
+ impacts.embodied,
175
+ )
176
+
177
  else:
178
+ energy = {
179
+ "value": (impacts.energy.value.max + impacts.energy.value.min) / 2,
180
+ "unit": impacts.energy.unit,
181
+ }
182
+ gwp = (impacts.gwp.value.max + impacts.gwp.value.min) / 2
183
+ adpe = (impacts.adpe.value.max + impacts.adpe.value.min) / 2
184
+ pe = (impacts.pe.value.max + impacts.pe.value.min) / 2
185
+ return (
186
+ QImpacts(
187
+ energy=format_energy(energy),
188
+ gwp=format_gwp(gwp),
189
+ adpe=format_adpe(adpe),
190
+ pe=format_pe(pe),
191
+ ),
192
+ impacts.usage,
193
+ impacts.embodied,
194
+ )
195
+
196
 
197
  #####################################################################################
198
  ### EQUIVALENT FORMATING
199
  #####################################################################################
200
 
201
+
202
+ def format_energy_eq_physical_activity(
203
+ energy: Quantity,
204
+ ) -> tuple[PhysicalActivity, Quantity]:
205
  energy = energy.to("kJ")
206
  running_eq = energy / RUNNING_ENERGY_EQ
207
  if running_eq > q("1 km"):
 
212
  walking_eq = walking_eq.to("meter")
213
  return PhysicalActivity.WALKING, walking_eq
214
 
215
+
216
  def format_energy_eq_electric_vehicle(energy: Quantity) -> Quantity:
217
  energy = energy.to("kWh")
218
  ev_eq = energy / EV_ENERGY_EQ
 
220
  ev_eq = ev_eq.to("meter")
221
  return ev_eq
222
 
223
+
224
  def format_gwp_eq_streaming(gwp: Quantity) -> Quantity:
225
  gwp = gwp.to("kgCO2eq")
226
  streaming_eq = gwp * STREAMING_GWP_EQ
 
230
  streaming_eq = streaming_eq.to("s")
231
  return streaming_eq
232
 
233
+
234
+ def format_energy_eq_electricity_production(
235
+ energy: Quantity,
236
+ ) -> tuple[EnergyProduction, Quantity]:
237
  electricity_eq = energy * ONE_PERCENT_WORLD_POPULATION * DAYS_IN_YEAR
238
  electricity_eq = electricity_eq.to("TWh")
239
  if electricity_eq > YEARLY_NUCLEAR_ENERGY_EQ:
 
247
  electricity_eq = electricity_eq.to("TWh")
248
  return electricity_eq / YEARLY_IRELAND_ELECTRICITY_CONSUMPTION
249
 
250
+
251
  def format_gwp_eq_airplane_paris_nyc(gwp: Quantity) -> Quantity:
252
  gwp_eq = gwp * ONE_PERCENT_WORLD_POPULATION * DAYS_IN_YEAR
253
  gwp_eq = gwp_eq.to("kgCO2eq")
254
+ return (
255
+ gwp_eq / AIRPLANE_PARIS_NYC_GWP_EQ
256
+ ) ####################################################################################### MODELS PARAMETER####################################################################################