From 24ed3fcc5106bd0e86c037842c69a9017c3f386b Mon Sep 17 00:00:00 2001
From: atla8167 <athanasio.lakes@dsv.su.se>
Date: Sun, 8 Dec 2024 17:47:01 +0200
Subject: [PATCH] Glacier

---
 base/glacier/README.md                        | 107 +++++
 .../glacier/src/LIMESegment/Utils/__init__.py |   0
 .../__pycache__/__init__.cpython-310.pyc      | Bin 0 -> 164 bytes
 .../Utils/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 182 bytes
 .../__pycache__/explanations.cpython-310.pyc  | Bin 0 -> 7565 bytes
 .../__pycache__/explanations.cpython-38.pyc   | Bin 0 -> 7835 bytes
 .../src/LIMESegment/Utils/explanations.py     | 214 +++++++++
 base/glacier/src/LIMESegment/Utils/metrics.py |  53 +++
 .../src/LIMESegment/Utils/perturbations.py    |  61 +++
 .../src/__pycache__/_guided.cpython-310.pyc   | Bin 0 -> 8802 bytes
 .../src/__pycache__/_guided.cpython-38.pyc    | Bin 0 -> 8682 bytes
 ...ntcf_search_1dcnn_function.cpython-310.pyc | Bin 0 -> 5530 bytes
 ...er_compute_counterfactuals.cpython-310.pyc | Bin 0 -> 2678 bytes
 .../help_functions.cpython-310.pyc            | Bin 0 -> 8163 bytes
 .../__pycache__/help_functions.cpython-38.pyc | Bin 0 -> 8263 bytes
 .../__pycache__/keras_models.cpython-310.pyc  | Bin 0 -> 5537 bytes
 .../__pycache__/keras_models.cpython-38.pyc   | Bin 0 -> 5621 bytes
 base/glacier/src/_guided.py                   | 358 ++++++++++++++
 .../src/gc_latentcf_search_1dcnn_function.py  | 263 ++++++++++
 .../src/glacier_compute_counterfactuals.py    | 134 ++++++
 base/glacier/src/help_functions.py            | 449 ++++++++++++++++++
 base/glacier/src/keras_models.py              | 260 ++++++++++
 base/glacier/src/requirements.txt             |  11 +
 23 files changed, 1910 insertions(+)
 create mode 100755 base/glacier/README.md
 create mode 100755 base/glacier/src/LIMESegment/Utils/__init__.py
 create mode 100644 base/glacier/src/LIMESegment/Utils/__pycache__/__init__.cpython-310.pyc
 create mode 100755 base/glacier/src/LIMESegment/Utils/__pycache__/__init__.cpython-38.pyc
 create mode 100644 base/glacier/src/LIMESegment/Utils/__pycache__/explanations.cpython-310.pyc
 create mode 100755 base/glacier/src/LIMESegment/Utils/__pycache__/explanations.cpython-38.pyc
 create mode 100755 base/glacier/src/LIMESegment/Utils/explanations.py
 create mode 100755 base/glacier/src/LIMESegment/Utils/metrics.py
 create mode 100755 base/glacier/src/LIMESegment/Utils/perturbations.py
 create mode 100644 base/glacier/src/__pycache__/_guided.cpython-310.pyc
 create mode 100755 base/glacier/src/__pycache__/_guided.cpython-38.pyc
 create mode 100644 base/glacier/src/__pycache__/gc_latentcf_search_1dcnn_function.cpython-310.pyc
 create mode 100644 base/glacier/src/__pycache__/glacier_compute_counterfactuals.cpython-310.pyc
 create mode 100644 base/glacier/src/__pycache__/help_functions.cpython-310.pyc
 create mode 100755 base/glacier/src/__pycache__/help_functions.cpython-38.pyc
 create mode 100644 base/glacier/src/__pycache__/keras_models.cpython-310.pyc
 create mode 100755 base/glacier/src/__pycache__/keras_models.cpython-38.pyc
 create mode 100755 base/glacier/src/_guided.py
 create mode 100644 base/glacier/src/gc_latentcf_search_1dcnn_function.py
 create mode 100644 base/glacier/src/glacier_compute_counterfactuals.py
 create mode 100755 base/glacier/src/help_functions.py
 create mode 100755 base/glacier/src/keras_models.py
 create mode 100755 base/glacier/src/requirements.txt

diff --git a/base/glacier/README.md b/base/glacier/README.md
new file mode 100755
index 000000000..1a86ba0d6
--- /dev/null
+++ b/base/glacier/README.md
@@ -0,0 +1,107 @@
+# Glacier: Guided Locally Constrained Counterfactual Explanations for Time Series Classification
+Glacier: Guided Locally Constrained Counterfactual Explanations for Time Series Classification<br/>
+Published at Machine Learning (MACH) Journal, 2024 Mar.<br/>
+(Originally published at the DS'2021 Conference)<br/>
+[[paper_link]](https://doi.org/10.1007/s10994-023-06502-x)
+
+We propose `Glacier`, a model-agnostic method for generating *locally constrained counterfactual explanations* for time series classification using gradient search either on the original space, or on a latent space that is learned using an auto-encoder. An additional flexibility of our method is the inclusion of constraints on the counterfactual generation process that favour applying changes to particular time series points or segments, while discouraging changing some others. The main purpose of these constraints is to ensure the generation of more realistic and feasible counterfactuals. We conduct extensive experiments on a total of 40 datasets from the UCR archive, comparing different instantiations of Glacier against three competitors.
+
+If you find this GitHub repo useful in your research work, please consider citing our paper:
+```
+@article{wang_glacier_2024,
+	title = {Glacier: guided locally constrained counterfactual explanations for time series classification},
+	volume = {113},
+	issn = {1573-0565},
+	doi = {10.1007/s10994-023-06502-x},
+	number = {3},
+	journal = {Machine Learning},
+	author = {Wang, Zhendong and Samsten, Isak and Miliou, Ioanna and Mochaourab, Rami and Papapetrou, Panagiotis},
+	month = mar,
+	year = {2024},
+}
+```
+Please also consider citing the original publication:
+```
+@inproceedings{wang_learning_2021,
+	title = {Learning Time Series Counterfactuals via Latent Space Representations},
+	booktitle = {International Conference on Discovery Science},
+	author = {Wang, Zhendong and Samsten, Isak, and Mochaourab, Rami, and Papapetrou, Panagiotis},
+	year = {2021},
+}
+``` 
+
+## Dependencies
+For reproducibility, all the experiments and the computational runtime are evaluated with NVIDIA GeForce RTX 2080 (GPU) and AMD Ryzen Threadripper 2950X 16-Core Processor (CPU), with following dependencies:
+  - python=3.8
+  - cudatoolkit=11.2
+  - cudnn=8.1
+
+### Installation:
+We recommend to create a conda virtual environment with the following command:
+```
+conda env create -f environment.yml
+```
+
+## Data preparation
+
+We mainly focus on the problem of binary classification tasks with univariate time series data. After filtering, a subset of 40 datasets from the [UCR archive](https://www.cs.ucr.edu/∼eamonn/time_series_data_2018/) is selected, containing various representations from different data sources. For example, *TwoLeadECG* represents ECG measurements in the medical domain and *Wafer* exemplifies sensor data in semiconductor  manufacturing. In terms of time series length, it varies from `24` (*ItalyPowerDemand*) to `2709` timesteps(*HandOutlines*) in our chosen subset.
+
+For the evaluation in our experiment, we choose to apply 5-fold cross-validation with stratified splits, with a standard 80/20 training and testing for all the datasets. Also, to compensate the imbalanced target classes in specific datasets, we apply a simple upsampling technique and then choose to generate counterfactual explanations for a subset of 50 samples with a fixed random seed among the testing data. 
+
+## Experiment Setup
+
+In the experiment, we choose to train one of the most recent state-of-the-art CNN models, `LSTM-FCN`, as the black-box model to apply the Glacier counterfactual framework. We then employ two autoencoders to transform the original samples into counterfactuals: a 1-dimensional convolutional neural network (`1dCNN`) model and an `LSTM` model; together with a version of gradient search directly on the original space (`NoAE`).
+
+Thus, we explore four variants of Glacier: *unconstrained*, *example-specific*, *global*, and *uniform* across 40 UCR datasets. In our main experiment, we set the decision boundary threshold $\tau=0.5$, and the prediction margin weight $w=0.5$ in the optimization function. The variants are explained as below:
+- *unconstrained*: encouraging changes in the whole time series;
+- *example-specific*: imposing constraints to favouring changes to particular time series points on a given test example, using LIMESegment (Sivill et al., 2022); 
+- *global*: imposing constraints to favouring changes to particular time series points across all the test examples, using interval importance;  
+- *uniform*: disencouraging changes in the whole time series. 
+
+
+### Baseline Models
+We additionally compare with three baseline models for the counterfactual generation:
+- (Karlsson et al., 2018) *Random shapelet forest (RSF)*: we set the number of estimators to `50` and max depth to `5`, while the other parameters are kept at the default values (see the original implementation: `https://github.com/isaksamsten/wildboar`);
+- (Karlsson et al., 2018) *k-NN counterfactual (k-NN)*: we train a k-NN classifier with k equals to `5` and the distance metric set to Euclidean;
+- (Delaney et al, 2021) *Native Guide (NG)*: we adopt an implementation and default parameters from `https://github.com/e-delaney/Instance-Based_CFE_TSC`. 
+
+
+## Running the comparison
+
+The implementation code of our experiment is at [here](./src/gc_latentcf_search.py), with a detailed description of Glacier model parameters. For executing the experiment for a single dataset (e.g. *TwoLeadECG*), simply run:
+```
+python src/gc_latentcf_search.py --dataset TwoLeadECG --pos 1 --neg 2 --output twoleadecg-outfile.csv --w-type local --lr-list 0.0001 0.0001 0.0001;
+```
+
+Then for running the experiment for all the 40 datasets from UCR, run the following code:
+```
+bash run_all_datasets.sh
+```
+
+```
+bash run_cf_baselines.sh
+```
+
+Finally, we conduct an ablation study to examine the effects of the following two hyperparameters in Glacier for the *TwoLeadECG* dataset: prediction margin weight parameter $w$ and decision boundary threshold $\tau$, using the following code:
+```
+bash run_ablation_pred_margin.sh
+```
+
+```
+bash run_ablation_tau.sh
+```
+
+
+## Results
+
+The results of main analysis are available at this [csv file](./results/results_final_all.csv), together with two ablation study [csv files](./results/). 
+
+Besides, we have two example cases generated from Glacier. The first one shows examples of counterfactuals for ECG measurements using *unconstrained* and *example-specific* constraints in Glacier:
+
+![examples](./results/results_example_local.png) 
+
+And the second case show the counterfactuals for engine signals with *unconstrained* and *global constraints* in Glacier:
+
+![examples](./results/results_example_global.png) 
+
+In both cases, illustrated in blue are the original time series and in red the generated counterfactuals of the opposite class. The “red” bars suggest the time series points for which changes are favourable. 
\ No newline at end of file
diff --git a/base/glacier/src/LIMESegment/Utils/__init__.py b/base/glacier/src/LIMESegment/Utils/__init__.py
new file mode 100755
index 000000000..e69de29bb
diff --git a/base/glacier/src/LIMESegment/Utils/__pycache__/__init__.cpython-310.pyc b/base/glacier/src/LIMESegment/Utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ce780052c8f3123f29880790306216c39367b3d
GIT binary patch
literal 164
zcmd1j<>g`kg3xyg=^*+sh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o6vVKO;XkRX-;&
zJGEFpwW6dbHMcZ3zC1NaKPj;|RX;r^F*!4}NWZu!S>MOg*EKjbJvTM4L_f47GpATT
hK0Y%qvm`!Vub}c4hfQvNN@-529ms-WCLqDW002;vCr1DP

literal 0
HcmV?d00001

diff --git a/base/glacier/src/LIMESegment/Utils/__pycache__/__init__.cpython-38.pyc b/base/glacier/src/LIMESegment/Utils/__pycache__/__init__.cpython-38.pyc
new file mode 100755
index 0000000000000000000000000000000000000000..e4cf3ae9b2e2cf970a131b1a8b9e858870f53a0d
GIT binary patch
literal 182
zcmWIL<>g`kf|N%`(?IlN5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;x_?enx(7s(wyl
zc51PHYDGy=YHn$+eoks)QC?<Vx^78kZmMo^YEfotv2JpHX<kWcQCebhNoitEv3_w;
yvc8X}uWN8>dTwf7iGFBFCQvj!J~J<~BtBlRpz;=nO>TZlX-=vg$kNY1%m4r~o-l|2

literal 0
HcmV?d00001

diff --git a/base/glacier/src/LIMESegment/Utils/__pycache__/explanations.cpython-310.pyc b/base/glacier/src/LIMESegment/Utils/__pycache__/explanations.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc0cb3eddf3cf3a4241357232f8abfe82a7afd6d
GIT binary patch
literal 7565
zcmcIpTW=i6b?)lRbWhKf!;wTv+U!K$EIgZCapTwy)=_pHd*wvVTDDi(MKW1s(41-x
zo1B}fd&Em>oQ)|*lz}y1ATWZwWC#QS1mp(<hW~@#Y@lBfBroAhUiKx(OY)u5Gvttz
zuugys(Wk4cPF0<%I`y5eio;ULH}L!5KN{U1{)S=v8+|PPEPQ+wPyF`~(vYHO^hHYu
zN}D~iW%1i?*?3z$r=M%(xXkXkeXr$l+Ue!{`Br{b$8Y&EC*8+Js~~?(=B58ww2HDI
zi^wU-k}Tt0mM_biTt-Pn*5xJSROMxP1@9%fB3JRQ$%ectuR&uOsq6A3r0UYVXEcBP
z7?v7Mk>=v4I|zDrk#ydVWH&^j*4Yd@+ievL<gnk4lOPG7iPYT);zTAp9UATYS3!d2
zhVtTH2*b!5iLoWLp@kHmm`{x-#)golv>tnjDeXgJ%aZo0A)OWDz<k#@5Kl#7LpOH~
z>50`fCibCt*Ld*Z#7UfK4v&kr9Br*Y{=i%{j>JhGa<3R!-U{+3q)m_aoEll)`5G>@
z^{dwCn&`vR_B5|^+N~SfyM|tc@$hqK!N*h3_K}ztkI0t%6M=ENG;W=zaZY@Ut&o(U
zSC+Q6j!fhgrT39JDNV|{q|I$Z{j)A774090ZBzZj;4SUz@~WYWkA(WAE=lKij7dos
z&_Cx>-@2H!IMdAZF)P&i6=PC7S5C8-)t#4@xz$<!@!!F^DxM`>I1(5I?A3(>0e{H{
zE*AIwFd3^sBiRfaoy}m-4I86jG)UscaHG+U9)&21`e7puRTRd41HU_i(Kz`^gVXe#
zG~Q?o#{JP=BTy>Xql$5~A2#lvXzWDrx}7%XzR^JIjh)R<g^k7hTaEjtO=gy~XO_It
z7^C|}JPJF}MiffwZ5<it=#0tS-?zufS@Q;?n<sWHs>2Q$?uK#JE4GN}cQ79(KNG1O
zo@G4on-E<?8xQIeaY!4%xB?*9fb(Wwl%l+COPN~y*nEf$`Ai&Bd(Vt#W@_GiGZhg!
zG5aG$o9n;+Xl--Y57&CZb{MaPyNL?><9>T5Twhxc;&83o3p!D#)?(FJd+*M-Zr=;L
z{cw=1eUL;w(jWDLL6AhlL40$xm-@GQQJi#!{n6Vmk-231o5uc2ckj;X-MrNsc7k5~
zHWG79Mb@c2#4+h#7`%;49()}M9(*nv{Hl=+kO#t|X{sWWQY(_X7-V6jhNEE|%62Ck
zts+cORft#duz7U@mDmCj%^FZEAt!1%{7xM}RX0qXcrzG<Xn2sM=3tas!Frt9{V*7$
z7N9H51*1_okg2`Vi$<x#E1kN5>c&Hrq&5u}I$2v*FC3&c7q~&ps?AC&lC-ekh^^8Y
z=S%a+uot3!CrtB?0u@mzRyEk17RZctrrz>l%{=W<nr-x{H_QX(rInwhJEK-14trsT
z3h?1}hL5ltWNJnoRYJq_DJ@f)R)D83&7Z7h0eSJ4A<TT)yCI6g7M}P5|J6lZ)bL$J
zj)xKxk0;8afj{K@XEpB$^*X8^e;%Sk3qSwS8o7ABjwk+Chz#|orY3|JgwqnvEv-+i
zkF1H9m=i0pr@&vJiVY-mlAIRWrWo*G2^g50xSFuo270@|ctTmcX9)XJ;%f(Z=|QKU
zJ;GPC?j%LZy=JT!6CX87Nm+{n0SR>5Aha$(tDy6Ux`-1S<}E}gZRtWC$e0+@iY`j?
z$ebZ?Sz^m(wo^<2CYQ6(pdF-vf%e_gUC<OQcQ6W79Cn*kMKc3uNj8$yRUvz!x{N%v
zLSmIlt^H`EuG8mL64yAdAMB<MLkk>q+)thTPz_^s1BERI-nfy#=PE-LbtA-RU?PR;
zRBR}a5Tj+ac6VE=8!CJ_4hNmRHf`x)9MMK^!}D7nyeb~U9ymOdT2+#ZT4^2zv>ye%
zaoDPzc-cvVsk;+K-OVHh2O0PKd;K6$(Qd209(1<5YB(Oq_NlIDr}nN2JE!|RbHtTX
z?K={n1W@iU7LiNF4Tu}UhY!}ob@-eQU%ZURG(EeDJn~j`3rZaVnE8)fn$VV=?=ccY
z8xkNbT*T1^T<VJxxYtx1850+@LnM~Y9l#+o((&Lr94V(gpfCm6-_|CjCwY2H^NN8q
z+)X3Qk|St0OWiUaUP17;5grl#j}RUa`aS+a*oS*qi|*mF)v4VXj`q|gn8=HoIRHDw
zE68gW(wDp80Ej`mI}ZAzUKqE5UZEPPFbUQHCSQVHT7jDpqN9w%kfO;;FI}vwf*tj{
zR94SgJKI?5qkQH?>I<Zn;|AZHy?^Wc;#u#1?K|&5FD8=-H|Xy&qeKzdVSZf0imBTu
zJVOc{(#FH<L(qY>VS1+xUK7wCY<`nx7vzspw+wAl6ub+dr}b992hntR_=?(5?~<Ug
zsjrcsxR?ooqO=4{?+j%0D3ar#_b14TJqWV5PCoPpq~vsZK^&(RNbIphMu+0r{I`Sz
zRk85IKS7*Hj6>kaQ{ap=#)eu-#G%-NGr?_4W~+|Qamf{F!QEU=zp|iZvzD7!hhhp=
zo7FuMaIM^d^%Tz61w8;YaJt0R@=uH>z$j+@a4R`OM`Dh^O8-#64c!BKQh;N-#LvL)
zOzFXaA0*yZ9$1D`!Y5#pgDTL{OvHhywvo%&r_nOpx->9#5ggKz1=RcN#G4kFrxtP2
z@N|KADxn?ni+D<4quvzQcO+H}q$-pmM4sBjbPL3DD}z_jS_!TF6dDC)`(<v;zu20r
zEE}?V%}A;`kL`%B`nlGUr9)FLeIh1RU7gmzRZF^ZWP#^ax0ZE&gHA5BY?~dElO5Ay
z9g`~@|2KXYSP88svl2p7W+f%C5;4V`02DC$umUS^WX(vJ$qc6pW&Hl-ou-+3>(OA?
zkAfcW-)9+hQC~qZ3jNM#z9ZA(;%VvBF`36~L;X{V5jTwemlp`y^N3dJfDf$)YzPir
zcCZ`8>JI8&7#vLi4~_x~4=z6ua9YAaQv$`ZK{Z7dwMczVy>b~kfPJ~)F2l|_i+7+L
zlf_q!G#`#Ty-0$-TrlBeeqWoLutX+oiK&VBiim$Q!$QM{j%H+6QN3plN5?eRquAC>
z?ZHs>Q+qf7W1{7n&2=hvp9%FoGR~Q}k1RIvHSA5KXRG$_?_j|9@ZGE{!XR~p1P5bc
zGHQ(k1M+R8QWrogqfRoj1zeC~vLD6K0OwxNH}wjsSs>9I$4{d-8>z?vQ)?qi04~FD
zqupFm1U{+LAIh+o`uu7qd!VJBL_}w9C<cnCZEAw?5|XpM>K#hGP2#H%X>Eq!GXsge
zr`}$>7p#Z9IL*%+i`84CZFPDZJV8w>n=G;-YnSn{u;6xW6vbbJAatzZA!M(K6|*X?
znq?rmhm?;T+w{y8aTBF0;&tIunW-M4;+benk(iu|P&y|2f$|0-FHk;#BS^&EBLW`>
z<jvCd7NFxI`d^Hr$8|K-bU6^DxomfVUExjw0Ww=gUe(iRnmZRQ(&8VtPuWLW1zp4O
zb)7;j6RFna6V7w0)=uKg(Kx{H+|?FJXKj3+#!6o7fvjAoAV@o=L(?$-5j!Ra<Y}QJ
z;cQwQniOK1m<9_>GzTg&4~EW3Fm&!2>Kmxkw0QzDBKc>?U{}Vp5HC0ljBqqMUNDtQ
z&7D|1LUAiQb7X@(Kd2&Cm<{I;+9$)|cuBkKLQJQXCWntHLW6T5ii2y_pCVLF1;Q6d
zH?1~82{;&s68HcHixme~0z;c($TP0=Vl1J}KgVjSLV%`$+Vi`fh4UovjF+DfP!TJn
z(=!%ukG@Y9kTLkjlyx6s{|3&sopBGGi;rE;17U2U({`_;5|{`>(3o1G)?1a?_6%?w
z3%X(4s?5%X^Ng(jKc@=pMdh>I{s;vznMj-ODkv3vXoAl(K15r9{D>$0o8Tny8$>SJ
zOnj$iB)Dap=OyG-Tq>D)99XmnSR9K;D<LuBb&eyn!}oDimrr2e7%rC6J2Yf2<09I{
z!oCg~Y)o_5$_45tap-1>Pi(8;9=<Nl2=qr_CP;%<5DRgcK$jD66hv=2i-EH|{JQpV
zvIA>@BYYVQCJ(E8I>`}-0D~YNvy)g`1zo|dSspy53a(O_R7sC*_W3RicJVltkYB`8
z(o3?iY``{R6i5+2VP;ZG$_K_|SucaPxVoxq%p;b!s)x9W0gotVJmP{M(p*#g_>^uS
z*bnUq{BObQvXv!wJGZIW(f)?&Qs}H2``6yP{f+x~?%jW$kEw4#=R)uEi27**rQCaQ
z9#NUISiFuizM{SjrDt~Lt1;5*J0xfWsqc~qNSxgVe~T<OuVJXWR7@OLeL&(vh!-0~
z5sM9~%ndpX@r39L;hlT@Q)-ez&GW4IN93~=uV%p;VUxj}12ac#-hT{uJRCX>Fvey_
zsCD|J$gDafLK3SaX!X<v3E~6b02~K5DTDWJL@DB9xT}fVxZ~+2o9U$)2|9aP;#izs
zy?Dr)pU1O^{rl8dcCgBr6dkNiky-WWb3kH9;w=&*5<E-1LW~dY%vXMbvY6t9FJ8l4
z%nG72A~Hv4x}7N^&-}P3swRaV+gvekh#c|&A5T%~41CaRlAAJoeDD7qK9U@u+DuG>
z5xVOnpu&k;99WY)O0r{)&(QKE9Ak{B3ur0Mpas{=3@tb4id&av&=S)T0MJsVx6CoL
zR1!}Y0W1}re@<V{r1<c=02@FNgU!1D8y`Ty*G2%FD!|4C*!&1!Q`A))w>*H&62PW9
zAyCPVF!?iJQzM|kQ`R-<GuQwKDaD7O%E@79IjI72>bi~#bXPCwWrm#kR&543h0lN-
z^*t=s0;aqOVtz_$e*v+-diVB+x9|P$;YEE+O%Qyjza;S@wD=hnexJmz2ovgnls_8)
zeoktSNi0D$mp^0sQ)JAD5D}Blz<@PUIR^xnUI+y02c-U2B#6AKClIaLB0I`x)#+|m
zM3?yf<o~eE9bU12LJKhg#pxy|7P+|18|Sw9=jYq}hp2`#&KvY{)gR);=R@^Pr0(Je
zQpB0`-MoZL@lJ$0C=*|)!~c58;stRFToY~g!axmf;t+;+n~y8mp{6u18q<Ffva=Ks
zK*fQE&r(DK`SypeSNLR*VKgriuNh&IB8wN?+NV#(Jnt4d9QVSvflWqCKu-`yAV+^l
WS^jT&m%Wmo^FHV0yqxcqZ~PB)hkDci

literal 0
HcmV?d00001

diff --git a/base/glacier/src/LIMESegment/Utils/__pycache__/explanations.cpython-38.pyc b/base/glacier/src/LIMESegment/Utils/__pycache__/explanations.cpython-38.pyc
new file mode 100755
index 0000000000000000000000000000000000000000..00775e4e12b78773bf02ee4a187e1b94cd1479ae
GIT binary patch
literal 7835
zcmc&(&2J>fb?@qr>7JfX4!OJB4@u*V6KE7KsR&NuC>zPKcWtjB(~5Q#C<AXs&8ePY
zlk>IJJ-g(hCUH#J;s#J)UjoCYp^?i1J_R`hMh?E@oLdm|$w)5YC4WFJ$?v_M84k%6
z90X9(gMM9IpRcN3{oe2Oyk9E$1}^!p|JeD<UpI_@qnG8&#mkp*$A1GM4Jmp?Uo?fF
zwAnM87C-H#ji=Rf`nhI~%j}-p_nIE3onF45Z{}xh{H8B+(*4M27Uat^Fa3{1vnUI)
zh@6rv$ugd0`HHN`Rg_fZn!JRZs=O?(;JG5#<yAauvM#U5>!`6RKPPX<mylYM=6$1a
z^8}h24Uy*Js5=OHTSz+ZMY0<rQETsn?Y))?26EVM#YvEa&qeBXf;f@MleR@oXIBMJ
zS{urX|3w%^V(bcSXd%TX<}>4Cqa&m#t&hCKl=hLaYf1a6A)R&O(7a<Dif1CRQ8#xT
z>50`fCiaoIV?6xX#7UfK4!4WG9Br*be&4)m9E(LBa<3R!-a7IpRGV(^1ue3?^DSKJ
z>wns!Yhnyf+ta+xX?M-g-gS&Bj7N9SgO9tQ?PD=59+NKlj|JxK(!6z^<~i{(w?a}v
zy|T2mb!;N9D828PlhUNDOWNEsekjy0bUCSL|4{6iczgGU;?Xa(ugg~rUEFh#^LM%=
zo!>PkC0&3XTtYhNV%GDlZq@>;L!(?VCdG5*w3=Dld3l+8En~bdLc=QV6<s(Mm<e>&
zg+l?0$p<dB_q{L~t3f^43G3~hV9*WgqhT~i;`*>t??#V96h-~89)~IlW515;-e5FN
z-mG()-jn*R`e5829n=G*f&*$8M^D50gGG%e5$x_si*s+)(R=;LPN>5Aa{lf5gVQcE
zO<FTeZq>&az8;UlcGQVNNuzBe0|T8=naBI~I614{V6?H&Yq>h~kl}t9XQSefh<+RE
zvA9g6a=2G<$FD<l4Q)Jpbs~=FFc?<=1sxc0_C_hn+p`qR|3~H{`@Zp9oKSzyjpt@+
zZoHm~2!ojYk)q@EpRLWEVL#mL1$$w<8SW=4?2r4Ky)aONXwbbyi+(Fx^jq!Wc#wpu
z6SR|Y(2F-?)!uyP-nZ`F54-(vkZiu6M92;IN4;PWB++mXZ;TF7|8_5mllHJbdg~>c
z0?j37m`2@r`qI|ctnJ3_-mr~6-$G(uTanf(4{<_kaA8qwWb&e{A;F7M%ogRUkxiT@
z$f9AYA}Xa;B=<4-!blBA!#I?!cD4$N&_-1uegk(*J(G%P#yXH_)PQISnOD={XX*gP
zx?$?XJHaSK*MlTA2cy&qw&T?9hruAV0BdP37>&Y#OzloD8l?`~K<Wmn8xK{I+B99%
z$@;Q-;UKlSzzt%q+Nh)=NefFR+AN(h%QT-1dm-Al!!-XmP!XkKRfEoHfz)Va)tf%F
znWsHUvyMKEhLym|H1o4`d(<q%VJ~b`0bbnp@G;JbOwFjRO6YjLsAWpi9`FLD`Ne(~
zkQcuJVXTzJO;HrK@WdPZTN7)dhW9FRd{IWKj8s|F@hc)%eF3dbUWI7Ww$CrxA@YV_
z#U1|=BJ&4RQxo<JLU3u~Kv<tx-?1iQVot2Yo&uSHH8wEPNpf0fo4murn?T3h#MOk-
zHgMeq`V;=*Im6yJ6JI+(QV(?s+9Tve?@m&r-0Q}=G4au&l$5nN6p+BX4&itKwF){9
zZwwF8F>f2%(uFl3WnxS#x+u+KbB4@iiDR5;PQC_MUC!o$UXTVZ+FPe5pdp&>U=*r2
z>^7>3)&*vgbduCnA={t2j6AhY;wqI|Pot5#L9f?HT<5%gu%9{%H!#g{KXsmlY8b1V
zC~P{g!%hNgs|;1tjo`U~ljOit(NUl#M$>BU?>Db@RQPBd4%!DTI?}^9qJ!Ro%{M*R
zQ#^(~Fm+UFR!J_`O7l>l^*HE_!)9$^UyBY?_emIacaj*)WZdr`^n*l2`^~lOpuN{s
zz#iE;tt(ooy|2Rd=^4*VaQ(FQJqb_(P`4S2eg{*CUxqMXk3OufCT_3=R>fse&3UlA
z8thNqMvXQB!2BXpCd^{P`v?gl6A7>v=HX}q=Cm_`X-&nkF>yggL}KaOA&fDjA`ia=
zBg$zHxD4m3Hnd6UNuHk4ykZ~?Q`2y|WCq&JQn!sq8=wR>941J?F&ric0lCi{kpUQo
zwQL+NYn|Hd;pjkJf{MJQ+4i$Zd>(m?Li$QK8~`ckRL4Po)C=PlP%Bg;6(+$pK;*Yk
zFRj4H2+vW*j!DtdrI#+YRlyVW`&71;^>((i)JOTuh7>_Tn&S>%pFMx;{MK3TedXPE
zP%kD^Hn2k;|1M)!JlvPDKk6<@&d@{$8MIrxI|Lfo8K!*N;2i-a!m&4bZQ&6qg+s|+
zaSQNHyRH5JqT%r574@U;kf52VuaF==nF#`+v;;lx4P^8<l5m56f~?qsFj(6)p}tO)
zoOUmW<J1DJJ(0+0lOLO3D@afi3wQhzc$dUD0)jjP!boG!Q#TWFBz9p+FdCB?tfO;W
zvLP)Po6G5MEY-5PmYY~dVhX04wLKOvtlXjX45rrwT>w>Zy2RV^9~&P7oj4Bg5=YK(
zk=P_q(mxU~Lif;~6kyga@ij0#Q+hDppC{gK9yo@$;S(@SkTPGJi8wUXKO>h>PorlT
zb!lMhBKV{w3(PrROT1};*=rHeho=j~UJ2`vU&LJkL-nRWzGJa&AXT9hq4CrvlP?ha
ztqiu%Uy1wMN1Xyk0cGybzu2FwtQxX<-AJlBj{}LfwR8O?D@UeW`9w^rx;m|ayOwn2
z*aGjZ?yl;5hvJ!9cF<19&`xN>PRJZi{tMRyMnZeajD!%C8Ob#;5;4WP02Z(V&;q+~
zY|V(7$?T?!%D8^{cEe1)?PxIUM?sHI@biqes5eoJLccwlpUJei97CNNCv%uRR9|qI
zr>`tgv=`B=)BzjX4p<cgV|K6~#p)iqxG+6h0-heZ6rNsfB4E0Ng{B0OWz%Yi%x_Wk
zIq}M62m$WphWiXX=X8F8$_t&-d^m3RA_?+xL5am0zcyFmCMmHirY3wWeEwpUg@z>^
z&q%PMcF!I5kZG<*{;iwZgQ4oD_HY2cL|Zmn?^NtR7wSD^oKx~CvKC4zI>ocS`}emo
z>HBzZtSLexb%g}GWMVUFlLQ0vU8GVMfGeYRGSdcJkUX;=#nAvUG3cB6JXNzmrP-UG
z`tS_c$qZAg6D5F|Vc2OkR@4WS<@AR#?4>?GTFC)usVCvrnH!3MB66FW;Jk$Btgm{T
zQg4y?GDKRNA^A)};_#_=(CP)-VJ}Ye^Uh-RdsN$M_d2{p4J%tN(jx1Zk+QI4dMy;i
zUxdhTvIevygs++FqH10P|MN}q!9H?q(--Sv1L<|~1*FTS8ll}8uS_1942<wPCjEi;
z20SqEK7lDn_}^oKAqND`()KPO<f0$A=uc19&{e}_myqVNGX{=@?-g*A**5a7o_f{X
zIlqw>KWLqDkhBWYhQM~4TrSh8=H&(RIjz=8;>^_8)$rQYmP%)Re2?Zzc1%lH1$|ME
zk#5s6%r9cdWPrRZ6fDk`#i2zZzKLb9z(;eSBy+duEZm}V-%wveqlV23kkQG9$Y4{(
zvJgKwb&)VM3NM(@rRI}ZjZxgpVvcOG=O<O<2D9lLq4v^r;5iSFbHTgQPLsjM<g3BG
z;LpLiUVR3qITdhZAl|gwa4Fzo>{4I@m@IZ2oXMkF6a+A5`gAl|Mr>+Apr(P{^DjY5
z5hXB<x1WhMu|tZhv4Q(Yo&$^@k>0c)6iq(8i3qnn?ty=C==CBn#z8vm`vw|<moN||
z)H#jNtjvyRfRHTchH<kpiw)-)nGS?i86_^C9rmwK5R;u{6zdw1UX8aic0|X3Y>5~C
zn_wug9r!RhQ2f=+NbntQo|lkiaj9e;a%j;;V1q0ou!P6(-8uHv4u7$uwj7Cpak$u0
z@5qq3jG1T`oBIJMu`$izI2UM~#Gx-&9J###CgSVjj7<LkJOyd64B{j%lj(8-#sWW1
zF&P-lqaSDw5gs@UjG+fUlZRG5MRdd`z$S>*{6}K#7IX#Q)$(99RWO&zq)PQzXP>{e
zK`)-i3i6A%OL|2XRt@Mz+yW`$D$G)9N%_#2tm;*87*|(yjTy!2ZuJNsYG8(?j8R;$
zMOtg}Au)Z2VE5`xV1rARm+dT>+_{6rZvV&Bc42JS-?{s>2lwtjc#)N<Z=vdi@fV)8
zj#3_feePMAzgWDJGw!0kjY`k$%vu%F>Rl3ajMN{I_>jZ`&RD^pAZwwZ+M)`?iq!)W
z??ZgLO60j%rI+U_ojQ9$_9aKpV>YSFKPB-YiL-kA3|Xgotl;hcXyCtH9vDuWT?x|y
z<_moC8E$zQDJ)@x&jPuCUWpf|Z4#0Mh5Cx@Muj9g5MUr5P@0&DV$|57)Gh?R6vZuk
zVd^G3>7^MBI~%hx6G*RJ4E5&U1TwzxhtydX-ep|w4I1LBB<_&tk?4~ckRYz0h9r0&
zKBN@;ICo~Fe~p~@OArQp{%ha@>)-}H$5RxOQ}kCAMU%c5RlyF{5y9t}F&HBIsqdlf
z833bIrGvr%_TA4Mz>*vshMAZIWb`eTpc3)FIJ72tlw@I-&)~L;u*#Ua0Jq`{-0&Hk
z!R-!xe%GZLxc!vE5rA8no-)VaR!KZv1hiFj{sm(>lj5VF0`35Y40lI>J0DQRAFu#-
zRluFg!PCnCyrQlm0Q3NOD*(Legn%szsPbn3uSQ^pyR2){XTSp(Q;LJL$|5*hO{##s
zHNA#U{H|Wns|<T<yR{kY6+R=_`#E-bahjiA-Mah7ckh3epr<An@KZ7FAC|TG|ADuE
zI->%?TVwTCKn?l-jPel$`xSVzNmb5)&ZSQU9rb5a{m)5onDJetnzdyfm=VZRWKn-X
zmH(HJ;_&|eJ$i_V{w@Q`jdPHK&(eSte+DV9GNh<Ov_UL)iyp4}1|A#>t8XH;g$v}5
zGwJ<I`V#&KXh-<|W}+l@_>ZW}I}(>_uEl%&pUMV8PyDyi;t-8jF)huD#`OP57C#c{
zRqR$de!NYucS(Gm#E1lOe`d_g-WUN{`z30?aLY*R-Nu09UicPi7uO*ORtZ??=ULuw
S`j@?um-Bzk%i-twH~$+>b*0Au

literal 0
HcmV?d00001

diff --git a/base/glacier/src/LIMESegment/Utils/explanations.py b/base/glacier/src/LIMESegment/Utils/explanations.py
new file mode 100755
index 000000000..b3e6de421
--- /dev/null
+++ b/base/glacier/src/LIMESegment/Utils/explanations.py
@@ -0,0 +1,214 @@
+
+from scipy import signal
+import numpy as np
+import stumpy
+from sklearn.linear_model import Ridge
+from sklearn.utils import check_random_state
+from fastdtw import fastdtw
+import random
+
+
+def NNSegment(t, window_size, change_points):
+    """Return the change points of given time series
+    Input: 
+        t - numpy array of size T
+        window_size - int where window_size < T 
+        change_points - user specified number of change points
+    Output: 
+        np.array of change point indexes 
+    """
+    """Return the change points of given time series
+    Input: 
+        t - numpy array of size T
+        window_size - int where window_size < T 
+        change_points - user specified number of change points
+    Output: 
+        np.array of change point indexes 
+    """
+    mp = stumpy.stump(t, m=window_size)
+    proposed_cp = [i for i in range(0,mp.shape[0]-1) if mp[i+1,1] != mp[i,1] + 1]
+    tolerance = int(window_size/2)
+    variances = []
+    for idx in proposed_cp:
+        mean_change = np.abs(np.mean(t[idx-tolerance:idx]) - np.mean(t[idx:idx+tolerance]))
+        std_change = np.abs(np.std(t[idx-tolerance:idx]) - np.std(t[idx:idx+tolerance])) 
+        std_mean = np.mean([np.std(t[idx-tolerance:idx]),np.std(t[idx:idx+tolerance])])
+        variances.append(mean_change*std_change/std_mean)
+    sorted_idx = np.flip(np.array(variances).argsort())
+    sorted_cp = [proposed_cp[idx] for idx in sorted_idx]
+    selected_cp = []
+    covered = list(np.arange(0,tolerance))
+    ic, i = 0, 0 
+    while ic < change_points:
+        if i == len(sorted_cp):
+            break
+        if sorted_cp[i] not in covered:
+            ic += 1
+            selected_cp.append(sorted_cp[i])
+            covered = covered + list(np.arange(sorted_cp[i],sorted_cp[i]+tolerance))
+            covered = covered + list(np.arange(sorted_cp[i]-tolerance,sorted_cp[i]))
+        i +=1 
+    selected_cp = np.sort(np.asarray(selected_cp))
+    return(list(selected_cp))
+
+def backgroundIdentification(original_signal,f=40):
+    f, t, Zxx = signal.stft(original_signal.reshape(original_signal.shape[0]),1,nperseg=f)
+    frequency_composition_abs = np.abs(Zxx)
+    measures = []
+    for freq,freq_composition in zip(f,frequency_composition_abs):
+        measures.append(np.mean(freq_composition)/np.std(freq_composition))
+    max_value = max(measures)
+    selected_frequency = measures.index(max_value)
+    weights = 1-(measures/sum(measures))
+    dummymatrix = np.zeros((len(f),len(t)))
+    dummymatrix[selected_frequency,:] = 1  
+    #Option to admit information from other frequency bands
+    """dummymatrix = np.ones((len(f),len(t)))
+    for i in range(0,len(weights)):
+        dummymatrix[i,:] = dummymatrix[i,:] * weights[i]"""
+    
+    background_frequency = Zxx * dummymatrix
+    _, xrec = signal.istft(background_frequency, 1)
+    xrec = xrec[:original_signal.shape[0]]
+    xrec = xrec.reshape(original_signal.shape)
+    return xrec
+
+def RBP(generated_samples_interpretable, original_signal, segment_indexes, f):
+    generated_samples_raw = []
+    xrec = backgroundIdentification(original_signal)
+    for sample_interpretable in generated_samples_interpretable:
+        raw_signal = original_signal.copy()
+        for index in range(0,len(sample_interpretable)-1):
+            if sample_interpretable[index] == 0:
+                index0 = segment_indexes[index]
+                index1 = segment_indexes[index+1]
+                raw_signal[index0:index1] = xrec[index0:index1]
+        generated_samples_raw.append(np.asarray(raw_signal))
+    return np.asarray(generated_samples_raw)
+
+def RBPIndividual(original_signal, index0, index1):
+    xrec = backgroundIdentification(original_signal)
+    raw_signal = original_signal.copy()
+    raw_signal[index0:index1] = xrec[index0:index1]
+    return raw_signal
+
+def LIMESegment(example, model, model_type='class', distance='dtw', n=100, window_size=None, cp=None, f=None, random_state=None):
+    random_state = check_random_state(random_state)
+    if window_size is None:
+        window_size =int(example.shape[0]/5)
+    if cp is None:
+        cp = 3
+    if f is None: 
+        f = int(example.shape[0]/10)
+
+    cp_indexes = NNSegment(example.reshape(example.shape[0]), window_size, cp)
+    segment_indexes = [0] + cp_indexes + [-1]
+    
+    generated_samples_interpretable = [random_state.binomial(1, 0.5, len(cp_indexes)+1) for _ in range(0,n)] #Update here with random_state
+    generated_samples_raw = RBP(generated_samples_interpretable, example, segment_indexes, f)
+    sample_predictions = model.predict(generated_samples_raw)
+    
+    if model_type == 'proba':
+        y_labels = np.argmax(sample_predictions, axis=1)
+    elif isinstance(model_type, int): #Update here to use the probability of the target class
+        y_labels = sample_predictions[:, model_type]
+    else:
+        y_labels = sample_predictions
+    
+    if distance == 'dtw':
+        distances = np.asarray([fastdtw(example, sample)[0] for sample in generated_samples_raw])
+        weights = np.exp(-(np.abs((distances - np.mean(distances))/np.std(distances)).reshape(n,)))
+    elif distance == 'euclidean':
+        distances = np.asarray([np.linalg.norm(np.ones(len(cp_indexes)+1)-x) for x in generated_samples_interpretable])
+        weights = np.exp(-(np.abs(distances**2/0.75*(len(segment_indexes)**2)).reshape(n,)))
+        
+    clf = Ridge(random_state=random_state) #Update here with random_state
+    clf.fit(generated_samples_interpretable,y_labels, weights)
+    
+    return clf.coef_, segment_indexes
+
+def background_perturb(original_signal, index0, index1, X_background):
+    perturbed_signal = original_signal.copy()
+    selected_background_ts = X_background[random.randint(0, 20)]
+    perturbed_signal[index0:index1] = selected_background_ts.reshape(perturbed_signal.shape)[index0:index1]   
+    return np.asarray(perturbed_signal)
+
+def mean_perturb(original_signal, index0, index1, mean_value, ws):
+    perturbed_signal = original_signal.copy()
+    mean_signal = np.ones(original_signal.shape)*mean_value
+    perturbed_signal[index0:index1] = mean_signal[index0:index1]
+    return perturbed_signal
+
+def calculate_mean(cp_indexes, X_background, ws):
+    sample_averages = []
+    for ts in X_background: 
+        window_averages = np.mean([np.mean(ts[i:i+ws]) for i in cp_indexes])
+        sample_averages.append(window_averages)
+    return np.mean(sample_averages)
+
+
+def LEFTIST(example, model,  X_background, model_type="class", n=100):
+    ts_length = example.shape[0]
+    cp_indexes = [i for i in range(0,example.shape[0],int(example.shape[0]/10))]
+    example_interpretable = np.ones(len(cp_indexes))
+    generated_samples_interpretable = [ np.random.binomial(1, 0.5, len(cp_indexes)) for _ in range(0,n)]
+    generated_samples_original = []
+    segment_indexes = cp_indexes + [-1]
+    for sample_interpretable in generated_samples_interpretable:
+        raw_sample = example.copy()
+        for index in range(0,len(sample_interpretable)):
+            if sample_interpretable[index] == 0:
+                index0 = segment_indexes[index]
+                index1 = segment_indexes[index+1]
+                raw_sample = background_perturb(raw_sample,index0,index1,X_background)
+        generated_samples_original.append((raw_sample))
+    
+    sample_predictions = model.predict(np.asarray(generated_samples_original))
+    if model_type == 'proba':
+        y_labels = np.argmax(sample_predictions, axis=1)
+    else:
+        y_labels = sample_predictions
+
+    distances = np.asarray([np.linalg.norm(np.ones(len(cp_indexes))-x) for x in generated_samples_interpretable])
+    weights = np.exp(-(np.abs(distances**2/(len(segment_indexes)**2))))
+    clf = Ridge()
+    clf.fit(generated_samples_interpretable,y_labels,weights)
+    return clf.coef_, cp_indexes
+
+    distances = np.asarray([np.linalg.norm(np.ones(len(cp_indexes))-x) for x in generated_samples_interpretable])
+    weights = np.exp(-(np.abs(distances**2/(len(segment_indexes)**2))))
+    clf = Ridge()
+    clf.fit(generated_samples_interpretable,y_labels,weights)
+    return clf.coef_, cp_indexes
+
+def NEVES(example, model, X_background, model_type="class", n=100):
+    ts_length = example.shape[0]
+    cp_indexes = [i for i in range(0,example.shape[0],int(example.shape[0]/10))]
+    example_interpretable = np.ones(len(cp_indexes))
+    generated_samples_interpretable = [ np.random.binomial(1, 0.5, len(cp_indexes)) for _ in range(0,n)]
+    generated_samples_original = []
+    mean_perturb_value = calculate_mean(cp_indexes, X_background, int(cp_indexes[1]-cp_indexes[0]))
+    segment_indexes = cp_indexes + [ts_length]
+    for sample_interpretable in generated_samples_interpretable:
+        raw_sample = example.copy()
+        for index in range(0,len(sample_interpretable)):
+            if sample_interpretable[index] == 0:
+                index0 = segment_indexes[index]
+                index1 = segment_indexes[index+1]
+                raw_sample = mean_perturb(raw_sample,index0,index1,mean_perturb_value,int(cp_indexes[1]-cp_indexes[0]))
+        generated_samples_original.append(raw_sample)
+
+    sample_predictions = model.predict(np.asarray(generated_samples_original))
+    if model_type == 'proba':
+        y_labels = np.argmax(sample_predictions, axis=1)
+    else:
+        y_labels = sample_predictions
+
+    distances = np.asarray([np.linalg.norm(np.ones(len(cp_indexes))-x) for x in generated_samples_interpretable])
+    weights = np.exp(-(np.abs(distances**2/(len(segment_indexes)**2))).reshape(n,))
+    clf = Ridge()
+    clf.fit(generated_samples_interpretable,y_labels,weights)
+    return clf.coef_, cp_indexes
+
+
+
diff --git a/base/glacier/src/LIMESegment/Utils/metrics.py b/base/glacier/src/LIMESegment/Utils/metrics.py
new file mode 100755
index 000000000..b1704bf42
--- /dev/null
+++ b/base/glacier/src/LIMESegment/Utils/metrics.py
@@ -0,0 +1,53 @@
+import numpy as np
+
+
+def add_noise(ts):
+    mu, sigma = 0, 0.1 # mean and standard deviation
+    noise = np.random.normal(mu, sigma, ts.shape[0])
+    noisy_ts = np.add(ts.reshape(ts.shape[0]),noise.reshape(ts.shape[0]))
+    return noisy_ts
+
+def robustness(explanations, noisy_explanations):
+    robust = 0 
+    for i in range(0,len(explanations)):
+        print(explanations)
+        original_order = np.argsort(explanations[i][0])
+        noisy_order = np.argsort(noisy_explanations[i][0])
+        if np.array_equal(original_order,noisy_order[:len(original_order)]):
+            robust += 1
+    return robust/len(explanations)
+
+def reverse_segment(ts, index0, index1):
+    perturbed_ts = ts.copy()
+    perturbed_ts[index0:index1] = np.flip(ts[index0:index1])
+    return perturbed_ts
+
+def faithfulness(explanations, x_test, y_test, original_predictions, model, model_type):
+    perturbed_samples = []
+    for i in range(0,len(explanations)):
+        top_index = np.argmax(np.abs(explanations[i][0]))
+        segment_indices = explanations[i][1]+[-1]
+        example_ts = x_test[i].copy()
+        reversed_sample = reverse_segment(example_ts,segment_indices[top_index],segment_indices[top_index+1])
+        perturbed_samples.append(reversed_sample)
+
+    if model_type == 'proba':
+        reversed_predictions = model.predict(np.asarray(perturbed_samples))
+        correct_indexes = []
+        differences = []
+        for i in range(0,len(y_test)):
+            if y_test[i] == np.argmax(reversed_predictions[i]):
+                correct_indexes.append(i)
+        for index in correct_indexes:
+            prediction_index = int(np.argmax(original_predictions[index]))
+            differences.append(np.abs(original_predictions[index][prediction_index] - reversed_predictions[index][prediction_index]))
+        return np.mean(differences)
+    else:
+        
+        reversed_samples = np.asarray(perturbed_samples)
+        reversed_predictions = model.predict(reversed_samples.reshape(reversed_samples.shape[:2]))
+        correct_indexes = []
+        for i in range(0,len(original_predictions)):
+            if original_predictions[i] == reversed_predictions[i]:
+                correct_indexes.append(i)
+        return len(correct_indexes)/len(original_predictions)
\ No newline at end of file
diff --git a/base/glacier/src/LIMESegment/Utils/perturbations.py b/base/glacier/src/LIMESegment/Utils/perturbations.py
new file mode 100755
index 000000000..b27fc85ad
--- /dev/null
+++ b/base/glacier/src/LIMESegment/Utils/perturbations.py
@@ -0,0 +1,61 @@
+
+from scipy import signal
+import numpy as np
+from scipy.ndimage.filters import gaussian_filter
+
+
+def backgroundIdentification(original_signal):
+    f, t, Zxx = signal.stft(original_signal,1,nperseg=40)
+    frequency_composition_abs = np.abs(Zxx)
+    measures = []
+    for freq,freq_composition in zip(f,frequency_composition_abs):
+        measures.append(np.mean(freq_composition)/np.std(freq_composition))
+    max_value = max(measures)
+    selected_frequency = measures.index(max_value)
+    weights = 1-(measures/sum(measures))
+    dummymatrix = np.zeros((len(f),len(t)))
+    dummymatrix[selected_frequency,:] = 1  
+    #Option to admit information from other frequency bands
+    """dummymatrix = np.ones((len(f),len(t)))
+    for i in range(0,len(weights)):
+        dummymatrix[i,:] = dummymatrix[i,:] * weights[i]"""
+    
+    background_frequency = Zxx * dummymatrix
+    _, xrec = signal.istft(background_frequency, 1)
+    return xrec
+
+def RBP(generated_samples_interpretable, original_signal, segment_indexes):
+    generated_samples_raw = []
+    xrec = backgroundIdentification(original_signal)
+    for sample_interpretable in generated_samples_interpretable:
+        raw_signal = original_signal.copy()
+        for index in range(0,len(sample_interpretable)-1):
+            if sample_interpretable[index] == 0:
+                index0 = segment_indexes[index]
+                index1 = segment_indexes[index+1]
+                raw_signal[index0:index1] = xrec[index0:index1]
+        generated_samples_raw.append(np.asarray(raw_signal))
+    return np.asarray(generated_samples_raw)
+
+def RBPIndividual(original_signal, index0, index1):
+    xrec = backgroundIdentification(original_signal)
+    raw_signal = original_signal.copy()
+    raw_signal[index0:index1] = xrec[index0:index1]
+    return raw_signal
+
+def zeroPerturb(original_signal, index0, index1):
+    new_signal = original_signal.copy()
+    new_signal[index0:index1] = np.zeros(100)
+    return new_signal
+
+def noisePerturb(original_signal, index0, index1):
+    new_signal = original_signal.copy()
+    new_signal[index0:index1] = np.random.randint(-100,100,100).reshape(100)
+    return new_signal 
+
+
+def blurPerturb(original_signal, index0, index1):
+    new_signal = original_signal.copy()
+    new_signal[index0:index1] = gaussian_filter(new_signal[index0:index1],np.std(original_signal))
+    return new_signal
+
diff --git a/base/glacier/src/__pycache__/_guided.cpython-310.pyc b/base/glacier/src/__pycache__/_guided.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a48ccbf37ff495ba349c176080cfae5e317013f1
GIT binary patch
literal 8802
zcmb7KTZ|l6TCQt%b@gT1W5*sl&QcpJoQAphl1*$Ak_E@{dXvmXo?M_ep;FVQx~Im~
zRqazX9?wusvOCTuD`c^NgoIe6nYYaw5Ad=Qugfdqv7XTKfGosCLINqUYrg-Ss-B*)
zH<%vvsdKAy`LEyq_r%Snt>E*{zd8fwc}4k0YD|B0G`@~+5}{xUQ$wXIpK4bVZM|#Y
zsfGH`?3yZ#>0xD9?N(7Ym>F8bTDPVu*A-S_)q4u7?r2?`S*&(X>DFs1YcZSE@2TAe
z>NBjtny5FKx}tR253r}Q+);D$me0KemDw9n>hn8Zcw;z<dFn+y|AC507^=T<<IQU;
z{$S`wX^&o<{?yR;I=<o>zS32hg5#@Qy{56JnNA1A9}bAp`eBvTn0=2fR%Z<y(_~H7
z!n1+{w(+d8BWxB=iydXh@T{??*l|2<c7n~}S!XBNDLfm@#5GRm?Kfi<^aG#0;iWj;
z)z>z@gF|1tI|{wXOM^H{oYgI7;77P+8bkxfb9(Ux-L&uZHc~Hi5^p#PeJ6;Vbj^1{
zv5b?9yq@pGecQpui?$qZBaQv27c-wP+M@TS-}gDX{6sVtCLhs!!}oYZ`@J0ueP_kz
zfuA^6Cp)JbUYIy{0?(1>c2>lOH~kU!6Wl`{eO@f}o)-<~o$lhiv*HaC+;ZNzfySG0
zZ_SH0xVJj*+(i2=Z{&^ql*b#WqkZArh4YJIfgrl#2<A$P;Lca$s5rb6N6xE3(u?o-
ze9Kws1$3i%XEa)LF1~R2ryu{x#Y@jGI%{b<O0Fy|u{c<a`C#e%xyAG6&b_#F`Nc~M
z7Z=W5JhyP^(&gtbEPTzqa9M2mPf|ku;g!MtfBEa(|N6;azWjipYk461|NWKR4C9^`
zcB*+TjYBX8+ve8LyXywvO<s#fX)p}7QLRsrKA#1Bm^<|NAc)*eKNzf~c>^>V6^$fs
zU?~<4-2^oBdz#!&sp3h3N;-EH-(-X$Gnl%e@OrAOtLqxmn0{Z4)Xc~fS5J*~bGNdm
zpk7UtT`N-u>Yj>LO|&#ow=*3*^~^-s$SNqC3~#SD=jgpwRz*3JSt#2=>rC&ja3Db-
zlB>5o?hSohS4t3dO@wG+GHb-+Rc|#2gLKQe;`GDVOXnRTPaJvSWEQ>rHsqK2NdOW#
z({Fi65)2}ALK8^Fhmhh>3NgJlq5l&s4+aIpjiCdbD+OrPPd9yEYEQX4Sl~={ojS_?
zrW5`;R}S3aTP5F}%^+QawlcpT(AmRMp`ge9{yL?^;Os&4WrQhD9&HR)eGb)jNYJF7
z?JqYaYyanp({uYVpEe)-fFAEJPvl;3g1)mHN4|KWv_45`qj1&26vaG%l#_)b+i-wN
zVvRv?hin0HbowYk;`nz*elNvJY3!_dcYNo=TOWdu!g|5NZQ=gJA@m+t)p3M{BdaAQ
zCDMG*p_c~>S5N=*PhU9o3fw`n<b_MT;09Ewb0pW&er`|Ln&ifBGH*E5<8o5Qyb{Jq
z;wLcNz89_B-tZu;*Eo-%^TWh<Y1GZF9t<~J5R`jLdfeQe$g0chSdBNSXwkY)NoxS>
zK;|GR>;a#l32hXeT5cqM*w5=zD{}JCa-{~<Cd=)=_qoD<2}55zzqA$){iV>m<tI!2
z-IV*#1MIT8wCW}P(jfF;JNZ(=drR(MBVayT9Bt*6>jqJfy6$nTkx()B%f`co-clQC
zh5mFrHCx?2dDxFFmTyjM7+zK?_!7N^4SNd}SQ~{ulfp8sD?94@##kHcV`FTl+PV%)
zW-|4@2K!@(wwCEd+l(qq^o!EW$g;tH!KLckfBagILh&MhbIMhc!y>r~J!m6jJ${@e
z5eAJ!;;{y8B5T6_K)nR=Sj5F)jY!DK57{wkqspF+mREvkw2>xxHQq?6vfQZ(cI79C
z8D1}S3-|06f-c3)6wBvU(FxHir{xu~huf*}XF$l@6wSOL$H6x2(rp$kxhYVGP@;0M
zXvsk#CbDp7J-r{K-#{ffg`!fkD%yW*s{Ctfv*0@M-%z)Y;e-be%pzVqz;qo8&{r`1
z3@XHI;kw|K###jTv#VxsPh+rjSIgj>z({TTM_1z^E+GxglUVGY;PRAy>|3rc^Hme(
z-33$M^?2Z|h6n9c*-2)X?3J{7llPW8x>UVi!R7dKD9|C8qTbEb?ZcF<5(o%->4%t@
zSSW0(qS~6RZO=VI%VO#QdXgQQC=nSCP>pCxG}Km<&(-CQ#^=z_iFK%pqu9k#@Vkc(
zG&7A#V8sYr{0HsQQn!KA4zc&p<Q^TR{7{{4Bv}0{DuhjTHBsuKG@z^6oC5rz?+Hfp
zB~0s>d=`(~7<%biUI%?PdcHe^i3O<I2-9E`ZsoeSnsf|#o0-zPxRMunt=BKA()N+j
zo08_n#JmnYKgC!=rl_LMsm8<YXCArZ^y~u{q|4Ep;)2Vl$P1R28XILB(rgxWTytDu
z0A|M&K&{FN<!NOMPzJ)3B&n9f=xEEFuwFqAei8M2rXa^15>4Ptj%ZBbU!!T3pay@D
zIxbN`{-SGPrb`MVYecGUh5CB^ZZnGc&<g=gnHyd>ED6Ua7e6p<8>=SAP_$w98){qV
zzOV&92l>waV)7kWrDtM=@<X1BZYl`*LsW<$Ol8`A5DvnO2y#Q^ZHTr`1_8KM8^gly
z8d+rzAsSOY(K4g2odhY-Yi3mt*&-ynUdgOu%8QCvg~$vegR%y6Omb*tmHifQF|nYw
zr!nKcKCX%P?W|V3Zvy+)`Z|n}xhyF4IpWP^4PgpZSh_Xxq38#R->FMUHi7-)<d9k5
z=W$Wt@A+laa--)ZX<ike_~HeA8hssmpQT-E+f9HQduxKPc_T&O;HPfrt@>eZ5Z1`6
z(ulL%Aid1n)7mK}9G$RK0t@Oq%=Si4T3P{LKEo*zG9L|S$8i{@mTKTDT>j5mHq6wc
z`ZE1~Psh9EhuBPki(vCx2yGF5z)r1`PJr~X&eW4I2&6mvZ8&xj)SOhrQ$MNn4a_l9
zU9|DWj{2(d0f$gjm~kBb<d()Sr4SFMolyQ<Vdh_GJF2RrLL|UsBp3Kq5|42;x<XG@
z!5dqtwOiX${_qTDTI)8ergdiB*Cwq-(P~1&D>#d?-oke#)2PL2_l<G&J%!l@4Dyby
zDv@<wNs>+MOMkzEZ)BuxR^L&-3y89NM2O;SRw2PfcqLZZ)8E&|7K9JNm{nd<vKoL7
zJ*#9bUQ%wKow`*+-U>4Tj)Z77zxdvDnI7;Ti+74N!oDjcFXwUYZ7qbsElLtNLZqE<
zM}+}R=AFnz`0A6uP@M(C<b#Q~BTz#SM;hLJsKhDTBGB{$@*NLOqS!uFPIFd8kcH?r
zL9j6dyyPSoz(oTX2_LZB>H&HUV!j2BMTj>S9!m=Q^7iY~QL+=osS{A%gHk#^L%<JA
zpMU^sag4`V5uBf$cg_w%M4#bV2l36>jVPe4&n|AC`Gu#N7tu?M#L8mlXs$)0-1LW|
zbc_E6oyf%T2YzmjID)m@4w68?e3)diyLmNYg0Q)@DYGX@-omANYwo1oNW`)GK?s<c
zw{9Yp146$ir|0!|5}%iiBSO_F%TOi<1hkN}Il3IDNOYA1kP*lc7f4dlw{(kDz+l9@
zNoiK*>6{25$oi8NNxCg$Y~W!_-g5%^-MTayiR*=7t_Fo$xm$YJS25?X4XI7`-o<G0
zGKxmEt)9_lwT3#YwscFiRU45*3n-G1qk+=;pVL3nJ_huG8#XG(RO_K-(7ahfL#H0`
zE0Zk__+)}W6VUNvNFg+o@TJOIDZqztAwZX8VInJqRe=_&>jqqnHa3_(Hj(tu=ag{;
zvrX87qr#<Bpz#K2=$`SuPWm~vq3P-YO@Cop(<i&$z>egUtPB<lyVeE5tx{`Thjzo^
z+%l5i&FXM>D%_Z~2Fkj9OW}Wu(K;UpM^vYFq}E^wb{nt`#-28AV7$58${OVSD5V2S
z_5VHfA00R~ZeKV{NwXh4(dqx@OHTij^7P4Cq~2aD=%Vny%Z%NbJtZPHE@(roaWiYC
z?abOy`@pWiJ>LX3mpO^gzCr06k&aYgMC;uGqVuyf>9<hijqBWF$o-{n!#VRC)c+h6
z0+2sVHG;w-QqHYGF_%AuzI?_T0bIIeXOf#pu^{v{g;(yhq&p_P5n8~%MYRQbQwvf~
zel0gr+EpmUbsGH!6|d2VNy{d=4jntBFZ=_H|2@75&9<wvKvjkY+1Cu!QX3CXKeWwR
ziV7OXp()x!xL;_}{-|Zxh{7=bZ{@giyrFJC`-sjwViP*5gqTf=?jvOITqGu3cNnvc
zkm?QBMOq^)MyjsM;-2gBui}{e*Qi*eLLeCeG5pu55G41gM)#V!tH9w7^$~2Qq(?%W
zu@qdYq5T|pE`myWHvZGPV9i-=7=en_vgR5W#9KY`Eaiump)f-ria$jq6|v6_bm^2b
zrbGfjr0~@6|6?sh${<sBG+>3=80k(#Nx%}B7`uIp(xjONEP&+cCwL1m5LQCW0>a1)
ztRN5vVTy8}N7gcF)@{NRh+TzWHb|1Ue{fAk_9A2~!gi6kDuQ{Z&*LFFrqf^2O;6{;
z9-n6*2RuCsUYwF-#V$#iA1kwF(o6H#K`!B;m-$PmB73ceeuSJB<<ubgB5A;H(kyb}
zxp{-K2*URv%|O{Dz+QiZ5UpD5#6P6Tze*zo66I6OcWCeoiX-4)LfYG^wBZ2S>Tw{-
zIXqjM7(ZaoX&(z;!QmgH0?I1DNJtAI0YqWJObTw3&E7#^zz|)fu=0e^z(?=^P``rt
zRRjcxyMXeRQXPRqm5~Llp^k7NtKA~oY)8#v%sen=(ilc6h1Iqh@E&BSHEys<R);li
zKt511^@MT~!3c2LwXD8dr8qA$_jFkLI;5bwED;gm;Es{o&@w-CDRRiQ-YBnc2FUhr
zx(WSMK%j@ef#QMS6A3*6JOMm(;0;dU6Q`sZkde=Ouy#QRFtWLZ-*IvS8CgUFFf<gO
zbei3UU}lK}+P#nx+W4i3lcxKj+>MM`Uf)N1NnVj~55Kd?OuU2%-D=_wTtJsxR@mMA
z$k0n}A=m1rYqV(`vb<h)1sHD~j63og5~4hHF?3i!zKufwe@nw)>vP()NzhdL3#-=9
z5P1SS*uWB}fF%r-(_4>-4MmhXzDbTkh>iYz<@-?S>&mCfr*JV8VcxTtwxdCqp!!f<
z1P};2DDgzF0mgyEgi<MRtvdbm2cfGXI;_GCL&P346Tm+;NT}8;>s4k!kSw$<+0Lw6
zBuq8*)IJj;hkl!Y0PO{lYD1*-%!W`wq^Pf`)e5m%$?RQ?;aygPSlO8kv9gzYxZr^g
zU7J1$DdYa{n7FRe)r-sH*UWnse!Ik_&}*_Pma?{=BcLgPa)~q-UlIs9d0D16-{WwS
z5IHgm<%jC@UBNdY@rM2>uENkEQ#vZ|ptndriTYv2e--1e6XR(eB$3ZQ=|LW!TqX*T
z035|PA*(|SrHFD@`%D4()IA-HfJaydt`n&SLL9o(Py`@`0LrF3s&H~2V-cxw6-3_n
z31*Vl`|VsEeXixTNzBB*OLO<V9lgIycOzeQ95!$J+!No9;5qVx2Lfrj94#ftO^CO=
z9I_+0h9tQr&rV*tI4S=F)N^&~FzIQ^O&pJ3KxKRWiI0Zd?lN3Kh`0EViiiqgq=W^u
zuH5VXq~@TOHvmlhun?qg5@E>1TZ#p3ZW#OrG#kCjgNOL!me=dyr{Uff-$Y+WhuP^N
zjTH^ZSiu&GANmt%!+wumCDI`ELdH^pl#i)KL=*huW7P5n3|R560r-)|p!6dDeVXzS
zimp9b*M<3PPMc5sEGwHbX5!zWmxQ_Ap&A`N*Lz`Kdc#ks?|W2yO2s`YKBGcNhoFmt
zW<L8U)vpm@C=Jxmk7-sNrcOJpwsit}6(ruiB2C|)VFEIR%aZ9W{w~_QPlfaj_>Db@
zIq>VI$VEwC-rfvCwi<hUvG^H8P^bHqssD{wd<PySk>f?$Ksp`5T*6NatIKKsDS?^@
neQv!ZQRK_SNpTM_7@!tOLm2Lsu6(3?WLerV`=R!1>*4<bXY$Kc

literal 0
HcmV?d00001

diff --git a/base/glacier/src/__pycache__/_guided.cpython-38.pyc b/base/glacier/src/__pycache__/_guided.cpython-38.pyc
new file mode 100755
index 0000000000000000000000000000000000000000..818024a63a46604d8d2207ab9f986bba9485db91
GIT binary patch
literal 8682
zcma)CTWlQHc|LR9*$bBxB}<g7I1^Whye%)fIJT^+c2Y%_72Av>*>$jWIvVboT@E=r
zvpO>r#l>u#gz_apgaj?nJ~Rc$w>VFIXkPkI^r?L+Q1o#giXwd|grY#7TEMN%e*c-7
z<&siT4Y6m={qmpx|NhJHovEoB!*lui-*^AvB4htbg~>-l;j8$?Ye+cbO2FFrTWPDZ
zthIH#)j;bTZ9}0pEin6)c17m(!0K1qRfSz=+~DSY#?5WDUE>vQ-DmAOpW;<syRWnx
zyvgglf!q{VZn4(%L#)S^TS{iEd%}&8nY|Gvp1A7<H~ND}ByQO8P+NcF#+%n}dA+_D
zCLJ0x`BYK(Dt>W*#A9uRGwfPvYy4@haUDCtp0F2#TR*GFozQ7$AFOR)Z%w>SKFw$F
zuJ9v#7H^9m<;U=@@~8N5yleafpToP(Px4cEH@JcQpU!69jJV(RJ^qH9U~gAn-T1!3
z*tL6uzzyBRkHXkq*|K|Hh~p)G*t1=`6K&8DyKZMAaRWPc`-8x<{m@QUJv)%o*zv&a
zcy`pS*?8P=%XT-C$O}6W_rzjN*535Go<Nls%i_ZLk;OMWSA?|Q+kW4(Z+XJ^V*BcN
z<z&MRV*9S|+WEfiTXMmh-avRU&XI3@UQYF{8}{by_Ts#K%k9TF<-C0Zg*T(lsvB(x
zcV*tbiSk?Sz#Vvrh&GT%`NFvi=NIJ!et6lIx0+J~XTBPR#pdlOw7>4ho#?J7w(MIS
zpH4Jy4+e|&#TQ=s)%X77;-%*o?bRe1#Fv+rc;qieqPKMZ+~WCj=U!ZT>BUP67Z=W5
zJhyP^(o4@@Son%_;U&4`zkw6*{L3r72mkTQo&Wy3zkK;2M|rs?tJ#0Pk{LnNaf4PR
zt0qx^%fYgl)pzeXKJF%~MuWuf`$J^w6Qs{(K_Bk)UD5MHXVde0t4Y=XO$J3F&Kj7C
zM|~#-4ZV(<ug5BQ6PuEtUBxdRAW3zutXtxE!q${Em8)EPpoD6wr_9k3ea+Z0cNy}P
zgzZ?V(o=R7l&Z3%%6u)=P*YD0q>a=>I>j-1ZEB9jHPZ^x>C{3xBgM^d{|XzD1R}nA
z%N1_l!*O#CqN*_yEsT2&M6}|r_<^5n*_Z8Z5V^^`P2`C}HyHP#!Eb|qxflB&kv%!e
zjbpzTq7sroJl+Qv2RRedurd9gV0v7T-{0ulkhzk9R=i}>^K$7aSH~6D<5efNvcKr~
ze(lQ#&hX9h-tA34S%tK6uj|v^gHoU%$KL)tCC6aze)yXRQ|>(6=&yJJqHPnS<#M(^
z-2|`wzcWtu?M7k}KEwez-k+Yxy<q!YdpQa{IiN(JxWp(NwSb~X_~3FvD8hyVRFZS_
z{JVq&bfc47@nhS&H}E<MW=bM^)xGQ4x7TljkP^Lsa9cP(-4JRI%xc>b;Rv;4r#OcX
z+Vpa7;p*vs|J4hpUV$>0D(}LfyLbq^WvwHbnsl?;_*&yk{~CAu6E%(?Va&`RieoPZ
zaC>fe>&}J?ZoMW%1ex#0o<prpW_1AE&^-|D3GQ*S+L%`zK^j#wD4C*ppUTky=s<7~
z7kD6=)L|M)tD5Pt7j(1w#EgP4A)l#Ex$$)S<33^Hi)i}vwWZal?=1!Hx)(2b_Y&bj
z4wgzvEC8*%g;>J;!oGr$FuxRw&XUvH@VUnq2V0rtIDY6Sj&mII#+0z%n%mR0hSF3T
zim8~IsWenm8J;|>+ZM}_V_c)bCVo1864%~B1_;E&<pgN8#<rFB^pQH!M*7G|)HMz0
zW^m<!3QW>vSxvQ~Y=jLi>t$-Bgm%C*s9A0J$FKScgfa9sC)6jMj(9C_A)(;^=yB*G
zK@M#4c#s?OpTs1HoHRogad03MF<$u)w&l20*0j{j^uxhM5@(fYBcaT4t0J#3J3$xZ
zb`qyh*-pXyISU$c`phaS!ER-@%#>?5EmM2}gv<<C%o=$+t_`cSr;1X(C@c{vSY=~T
z${Qu?<cUr5X<a{g4Vm~95>_=$>wi?G_)}+;S=>7P4Ub`m2dtV!3_Nh@8phL4Uiuki
z=(43cn@N?C8rD*_qohz-BV6f@nnGdWBGuu~uSR_wA_t!*LGg~f<q4VDN3-9gzs8q$
z510A}uJGNJ;2?&Tl?26vVL1|x$1S(CTpYiQ!-?mRph8}XaxYVchbdbj!@%z(x6v`S
zkT9#Jm}*TO&OJiQqU!;A5;l#6h(H5WBbpKo)m!WnWx1t_In)cfI^^Y6YQ<F0#fLXE
zJ!z=`*~s9Cr8%MuVY?l=-b0-`w3YIqJe^2h^|Q#3y|SaqRFkO=Syks4Y!Yo(UbI+3
zx0WGh@yhhRo2+Ja&}XCLIeow`46ThI@dv?HrnxI|OV3X;U1}UBzl*He=@!`>4+#h-
zxVbTg+`j82Xp0Go7#MhNZ}^2r&N$iozyaxSG*TXL8JYZm<)KD;Sq3*7MIOf-nOsTr
zV+`h(d4fI7MljSsn35!wk{B&@S<v(aJ;Vj%v+05ycZoG&dkVN^OgvBBEJ+RV0#&?7
z39&=lLQjVTCeIP6+9uU?y6vej5`8y-xyqg3!eLH0K0f$?ZbQr(A49^F8Q^{c@=vl4
zH27TI8=n33<U24+$DkRChg2e2x{{DTLxu>#6|O!2;lRvrCD*IM25V~s2(We45fFYy
zPt9F8ZJd1o>$j_(1SwH#q!kd^BI|a|Os!+=MJ8t<GQ%E5T7|_-d}yWSehIcRT|sqM
z<@y6{RF&gvX|)({z#gu4HGq+^EGhK~oX>a_t_x*&vNiA^=m&}4s^^?+z#oX>L$Dyu
z<Dk+85SNh4^o|=RSw$Mk7cYp@sB6{sue5Cqofx)cXI0*7)=1z?c!?9ZD_)T4WPfCp
z9K?C1lU!yqlhP?W935Y2ZfsEH;cIVna-@|e&Bxe9Oz^>Q)pN>mwWc&x9SR>R|1-1J
zR39TRljA!YjV&I!X8Or%z5??LE(vgIjbs9(&+}Y42|ys(*)Kz}OGoD<lW*-L>+0xZ
zB$_N^#J2Kv_MLwOQ<z*oPWnU@g9Pls)f4Q`5nK2Rbz4zbB3S~LT-jE|hr}MEN_d;z
z+{B1~POP2kF8j&L=xMDX3X#;g^*|k$8bxUe5^iEIY^{mkbgEK`S0Cu3%De0<>|5+6
z`!)-$WQNzal^?(y**PNFZZ<WEN#S0}5xd%Z>c|44f$>uF3QMana_C(lWVpiaEKi)E
zo}U0cVF*b!Y<&9R^*lV_J=V4<!U+3nk2IPqguAs6`0ErTuqFH2e;5{E8PD6HLk1x+
z2j#inkH0h4Y-GTYIg^9zzV9c<kQPUfJS08w@FbGqsj{2BBAqOFw=tZJK1@kLxCtAm
z2SA`0jn#pf)r-Uyv=dppnbb}>V-H`Sbdv2bN^GCP9u&{<IQ)GWvN6uT8bx@WmA?7e
zdHZZHfb$ugwc*~J-3WbJ`RwBG%&*<lymVe7#8nnsM>91XWQNxtBwOMNZOFjxdtPP?
z1bnru=EuIY@`1htu~{YLlCGJxnFmketcf#qR-JLV5zB3N{eYqb&6|kifX?sc-Lv{T
zu)xd05h>LaWGG_;(u&AQb96aM5a22akUJnp92ku`c}opgfyE1#H!cxmp7se3f`Fa?
zBW^bluz@lyuU?Qn->&EQ5Ib%VWQt#Ckb9*vCBZ!m7uE6FJ7|qxM#3r$Wkx-t&H^)L
zwWeYzH4V6dKNX5X$DjGXvlS?eJ>Arm22+{2XXzI8Q)kuTsYg`Ec!>k*muOFZkc5*i
zm=MB9qF)uK6WAG2TfmScFbQW800@LoS<|6a)RE4$k%5?pHpfOLdK(b?(^7kw5N@3W
zbXR{*Bk>&7_Jw-9!gnz{=^85q5MhP7G+ZlG8r31l&?D=5{EM^>Ev7&_<$}-FYU}K$
zinxUKy7(KZVd{BJ2(jO301EV7b<{xn)J`*PkXEDk4It>r_lR>98c$LK>&+kD?VXQh
zjodBea}yjr-$?bH>0K6L2l?5kG@44Ml9|-nR=O}L;bYtY)tANZA>Eh#w_8VY1t)Dc
ztF)5ES?cvQBw6FSa5>_1$=gtx;x(#&juL4oKTSC@QKeU#S-qmKcnWpdv^#*&=aiLk
zW*{5_m(-BjvenGhBr&;USwY5_SfG)rpNQY5jGoY{l7nBR*6Wm9qZWgvjWZ1bafrFa
zduabB{9<xXS)~R~X9nuef*RBn6^x6&y<<An*BaKIsvi4X*YwX+&HBvL*k{aWfT0ic
z)hrc4p*AYdD#K?VVf04?xuxVL;1qe^A);;$f(ee(kN8GF`G(^l6cH3H702OG$8p4$
zuv_svlq^yrEe$d<#AQk(>2Fhx&Np#pX(!uMN7fld7Gk;_OVJzbb5$R^d}QqXSL1>;
zXU#xvO{<CA1v#cex}bc>cpdvDe~LmvBC2ddBu=prx%n{hq#lKK9;pc&`c&Cgp~+`Q
z2vEX@fu>Jo+nwhqu$ijR)ChNefKf2gAlR}O^na>j25AqF#xK`-WG;hx-AOPCCZyD)
zIvI4spI*!TZt2h!PPGg*6|T136;U4*lL4z-u}(&?9v=lDl{wi9dY8gl#VT<b(JJFy
zxn33D0J)@2T^7HKEaJ^t;Dv}nQ4|dPFI{@^E$T%|H#2UKk1y2~1eD@IFiO1v9H>gM
zl5nW=Z&OQQCyI%yx2f?Ak|Vgmm}HolO&Iw2smGDd;Vs(_KrltHNSjLt{%d4FSq7^H
zK^uf;$Y_5i!EF=FZo_Qn@O=^hGucnLN8AAnaTEP3FxTM~!3O)IM1xsh;RJ(K<YCOG
z)pdf{T6n5xGY+&F)P@$b0N)yiH3l9!J!)_>tpidU;18r+Il)fCRe)vnqqM$LAulX7
zb~ONY9b8aZ&TSa7P+NLdgN%8BLw0?pb_Q8}(?@)M(}^j#PiBgE9mzvVBnZO&lD2{d
z6G0Hyi*F)<E#rv}FwYNQKWwhz%S)ysh6-C9a6{fhYpUIlmszUrnNC0<XnY$IB<Eg`
zuSSrT)%WeQI5Tqt4WGpF*m<tm+m+bsIj}aeJj3s0NBVBOj_9b9tkR-Uz_WT;<)giM
z&@0HQ2w{rELDOLdX=4*-h(QRrnO0Nh$|z9&{<Kyle4C+|AaJjtoPzonH0lwyA%9Q9
zFaBF3l5MmfvL8aEud|QXN6<9n|J}E^x~+nlAo>tpXlnW}2i7v+p;e$Up;@7+2rTgF
z5TH=mg_hsdpf7b|oi%d}pAx|?7Rpv$POWufnJQ|kA4|qUeT}qs!8o>LoElgMj6-!r
zu39k9kJH+Y#xa&x!92CJ2Ii?PcW{gY4;_x4m_%;>>mm-t+FEf$9Ok}-Z;JBUC%!3G
zFqJiYjx503f-OzJ#VgXZjbF|~jPD94L@*YKe)&+Iez)+8iHRX)%wZfAd00geEz}kv
zAeldWy`(l?r|XsTD#P<nx{=2Rc!>gZ%}4Qz30vrz$gA5?KV~2wE(#X`++TyiCEa~8
zFSJ|^;TY)Bf;IB3N=5cQCduW>g!hY2BjecGw=-q%iJDc%zKr-j_1)KKG=7=RMzYD2
z<I3>d6Q7RkX2j?YIJ0y(no5$piA;GoL@02KoaAc0chbbgPQ{Oq&y=mhq^B-7CrCg4
z#G6Bebs3PqjtN9SNuQE0Q6gzZGv}-Qc$~A5%Nj60yr5vEZ_wJLuwjO4O<nwidRT6!
zgAcWxEpZpcmIg@aAb=G1vRa6^F<cpZZ_nWg!`Z_<JYtBPL{daQ2oVKQDmO;P*C$fc
z0r$qA>hbMFrwE@IQRnYcYpq-$c7TynlNgDwrFk*;WyJSrki_I4P)_b$>jd3g<=v;c
z4=DMNl5I*pqC_%>B#Mn<Hv1^auMt&Fp^<3~^;q3fo62d8;@ybR0^Ob=bo(>3B2<Te
zMp%X+`VLCMqa=raeD;nb0W)<|hJvKBZqID`0bhw+u~>YCkThyPGx5&~i|;^X#Cdz+
z$>&;yEHkNyC9KNTCwOVh@0oQaHxd!dz!^>>u~c%g8o=j<)_kA6&n(2o7~88o%l7^c
DD22Gn

literal 0
HcmV?d00001

diff --git a/base/glacier/src/__pycache__/gc_latentcf_search_1dcnn_function.cpython-310.pyc b/base/glacier/src/__pycache__/gc_latentcf_search_1dcnn_function.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aa05a49fbaafd3d68b3db10e70a47e003cf2c389
GIT binary patch
literal 5530
zcmai2%WoS=dhZ9>q)3XS-VaMQ$8UOU(eiUfYmcq9Jri#-8fT=9XB!(dTC673RI{5~
z-IOe%cnCZT<RV~C0g@nqe4PIx$D9%%$2#PYoP+=Y_O{r`!1;YeN}^{HqzTsJtFONL
zUiH;i6^)PQG<?7OuS)ZWE1LEn)EWIube`fL{}T$PF+I@iL9N?5`bJ>1P21E5u#}xv
zIF_AJb=Dq3Z3ek^-p;oRc0u8#g7J3IF1AZ{N%hk~wq3T%s%-@m?MZu5wKKs~d)l5>
zZ9SN=XFw|(%x-CHjOBv4_PjmcUa%M1i}qssoPDmnWG?}C3^>bM+7}wjv%<F;E4(%A
z^Q_E@tn{sJUts#CR-HJ8G}=a0m&JA7?6$oydFArh<5fe>ZM%UR);;F9^?H}P^#doa
zN8FR;dK5Ogu^)v_+fBIt2FT@vyME{-UYs~_C-4(l+5{-^8@|W>^z$fSSR=jbaW|&L
zlE|sMiPwy{Uv~pQ3PI#Dhq;LxdkOmbjbk00RnxNYwHJ4T<TdvbkINF)W`2UT-N5O%
zOy;?V8BXkZES58g-}cbvz85=T#M^G*_dGe#?Zj@o6L?PB2h>jv>Lm8)XQ1;G|M)Tr
zPa~7s2Gg1Gt!A6dWGU1smSz^}G|NDBSy}ipVqWl?yPb~5>qOz?qc;7Y;UAM<30-Io
zTw_!FPA6@r+cXt=OT}K$;8Q^PPUADE{<u-wiP~N*aCg19=DkU{*Y38ReQ&$A4c*k5
zfm`=IUW<9X)~q`Lv=}D!284EbeaBg4^)PfA-LOtNTIn3ftmF8hpEynh)MMf*r}t(?
zSzkH9_&^47_zeC^h8t)It)(65LO(Ku-ZGiK0H<f>JM*31H<DB<Ezo1BLj$@=Z*a2S
z@q2tckJfj(Ou29TxN0d`Wv0VD=GPOMf%$gajwjQD$_P8sa(T1uzNwmg8dIhIjnq3z
zH-lmpKz<IzCH&(QiY2|M_ohD(X-L0LQk;C$CUhlH5sedxgz*ks^GSfJ#(+Vy;U_}|
zcm;5&AN-hwivVB4U+EMKls{kq^Eav%X+Lz~la9<e4vgM~6FRcsIKSz-!C-{Xfrt`_
z6UmWf$RD^8cLJ>o_{UikSuLw)%|eP3R2mym-9qE!TSJ3fPD%N1keBI+)`BNL(h{R(
zwo*swcUs?Krm$L;lNrETSz*C;dOwql9p#cdY?@69ES-#Fym(Y%);s;J{`VTo{Db*c
z*EPaoSzwKcv7>S_)|z++C>!hNl1X6-jpYuFejXI^!o;k?aF*7ZI+_;nnD{G^Kbm=`
z>12ahgfs282Fmf@8VzGXYt8iwBk=JdY@8K;ZS;%%k{CalhYV%xs^YH6_eD{Z#kkOS
zA)_!v39!EsB?U7G#!CH(WFc8RIwuOPC01I{-suR06KwL(1jWhzlqiVF1r6oYf@T=~
z>HZ966Qn$Z%=YJ!aWVH+@6U^QHnp4P|I(XQ6z8FZg=9g@i3K(#a$=56AEtku>MyF_
zxz}1amC};sgMZc`Ei^}5(42F>(n4#0q@SEWx*%r6IpVHW5h*q^M~BJmp`m7d1jyW+
zhII(z3^otgtm-d_X)((dz{Bj2hw1)Of409YW|h9qCl?6=+A6Wd!xSW+?$3(Z){;2i
zTG};#r}K-j*g5cV2^2@H9g-!NehP<oEFN7ZT<GP(U=^`UYl%53P17*@mHtQli^&zN
zd*Q8tdU;sSQLQk*VSi3cx0b-srT%5M+`p1s1*ezU`9lM+tNm-}FNw?I8oQt@cD=s{
z$jk_&3dl8a^{o#48~u;bzOLFg6}+n2xB83X2JFAQp!HWEQAJ#aZ+;|JlwYTnUtfPq
zHmZshafMxEmk!N-ExFP9xOKC2i(O_{utRQ(T5CnzW>?v@rp~S(;>ZU^t#zAK*$qU%
zk1=O8QFKK1XqDZhoq;)b#0}Wx7Fz+1DQ=48h9Oo#p>~+<-$@GM4y`Y0M|Z_7c*osj
zv2{<}Wy|a~cGA7%zPKpvu~oG1gY!G${>WZD-@1@IP~*$2)HK-LLzCTO_tAR59-{mN
z96i9SPsM|?St8BWc8%W|d^>qa(!MqN=~GF6Hk352Bz?plD<2$4`5E-^1Ts85O!Xg*
z$hofMOe;BWu&GlyZ$i$8LpeWzzkDJ-X?@B*BQNiNnyelDOr(=X@a><&+n>EfB!(79
zZ_mZ218#|~!mBB~7iV~Mo>o8Kkou}Uz4f{C{KeLbufKe`@p5zP<um82SDRZnX)Q`t
z{AO=@quZ{0@#>dxWqr@}19v;{)+)9Mh}2hPI_$Q$-D3*pIF!d4KEgCbXS*;=NmQ?Q
zX^@Ll_Ej=c@#6~jTy}6%0sOF8Nuo;6Tv=J+^NNx>`_xgWiss+_Eq%vNj_Ew5g$DT&
z<tPR2hAe73ae@>5_zsPmyoQS3Mj^BDAe%bq<!j%lG;5_E?@=b<hs=A^%hV1Ow3np@
z<zh`t!}pI*Xq`?JBURY;0y!3X%|WyG_&L(8$}l$~?vchckTiMhW(5f!M`BT7J{I=3
zyOd2-qDEy%e65n}U3~Wc;wbqk%ONewJn}>2XK~_nVhHyD-0_Dfq!l9x^my;OVhBg~
ztqzasUK|rA$k&=avc1Zs)5(u}(>E(f{+P!`p~sZmRCPNWI#D<2bd%<-XG{P2?++HA
zKu_ZV5~L7PI><^rdukgr`GHgC5ta!P9#N`vlIT`VITm%W9xTIUDgchmv^{8|9@~XI
zq@E|3$LDEH6QdE5tcc5O2T8Aov{_CgwE@I)wkavz_x$Eg68A1{k#R`dLDZ~3-Idkn
z&o(wH^}vl|%9i=s@!x>vafJw5w&iuA`c5o!+irsVE+${e#5>(aBS2%%<J(c}*(LDg
zL&=nrD_xI4qrtXY-;MbK5i+Djrt5lCQ*zY1waPwRx1!vJRKD*gI~8)x%88g2*y|<e
z@L!;imU5-Im)!!vHTc%B1=s3jUxD}lrdog@r@BkiGW96vw!^qL_3Y#VQ5jqy;7-!~
zl@~Yv`@7%&>zCV4pGdPEdzkl#)_jb@&XU&25XWhBt114Jpkqkr9c0pp%h%E8KS$BK
z^ivfpQQdA5!7|9a`C6}B8|kjZ?w&OEw|lDtrHl;yNGoT0hW+Art<o#i&P+#is*j}+
z!Cky?&*xDnjif<As13i6#@?!(C*l3(D;_10oo{x!4tZJ>!m(hoXy0k_Xg}GJnN4f}
zz-0!Gf*sJ5V@)slGqgR<>6RkX9qxyTGy^Z>Rno>xfZghJb`D~?RY5O%K6Vje2j+Qu
z*!-E->tAoY-1veQXnqPT?MTayDcspP71W#^1T>lQgQzYo2;0T{R2`dxliE3XJLTtY
z2ribXllu!odciq--{>$oMtk^kj@T(Jy5+@txZ}9+(=uVx-syTyJ?e%@EUj+n|EBA4
zN@{tDig79^HJ7<(ryGLPHN?)HZb(cFye96Wc50ouZSt#j2O{u4A-wSyE)Nbilc>`n
zrKS83n@lCvbZ6j{MRU3daJmZE`SmlWfYbR<_YRa8@dIgf{Q537S&DVr9hr(cURYg|
z8QNY@s5IgC(u8|UYY=VtUxJ{_kNA$V3U=$zkQN!z<90zEHgM)a@9XuA=U;v4Y;LV@
zy|Bj-G;rH>1}BWVdhkoc?L8{)6U*4ZWDq;sz^&Tx*&CTxI~UhHoCGqb&XhqrgQJ4N
zjkE@5hRh7(w~7YX;Oc~tg_9}DU33Rr;4m6o=81m#8wYli=>w|l!jQ&*i(I(vs14x7
zcIkA4YzM%&nsGW_#5l#m6Z{+wgkdxs90UL~JU8h=3HHR0cSNh<Szs3xHXbAf=Y?HT
ze2pNK2V#yu=6^zkUJY<?*afQTJfM(j&;E$i3D(??DV_{WjvQt~n9Q9;0u{OJIh;8G
zo=t|%czSBslV_OD(BD*OwV@)4&L=98CL*vsMLEpqgNwr&viKowduqh-PH5VB*CU&$
zz&UcUq&uaY|G%COC^?C*;sT%33wl;B>p5ddr*<k;u*!PD%<A*Tw4OC^4K)Y@Bj$N?
z(wxM#G^-oHHAa4u-y0c2&%Q63#t%7b2{X&OVVq$Uzt4_gT+a~(#;o^7s;sBp7gLsD
z>6V`RA!}hJ)VWLwGgH7cOu&a_Azc6mv?k56tfEy0P8M{mH0Gns>6kb2v(UHxkj>@r
zZcxZz7PtY<dka*p@5`B-l>@D!Zb2H%PyO%n%lTP-QeQOqUqfG2ovY-r!e67u|C|(@
z#XCkvy{1uOqH;{c(Lo69V1v0aXEcBv8^#fDAo%l50+eFJ9b_vj!<6GJUdg%BA$mZ<
zt54vmm!3g*rgwlrw9KogwiV=UL9F7dJ+bcxY&&xK$}ob(e2maLDD3j-p!cQ|AY$0X
z9WUsdJ|xBJ#5fDjgJ?}5-7ct==xxKpQuHLD6f)v%r_UHN`)Ke4@)^Aj#T1muiO7ZM
SSH_P_Ip{Cx*+S7Q=Klxu^*Gl6

literal 0
HcmV?d00001

diff --git a/base/glacier/src/__pycache__/glacier_compute_counterfactuals.cpython-310.pyc b/base/glacier/src/__pycache__/glacier_compute_counterfactuals.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88343db1c775d4b493ae7d7a6153c422b990213d
GIT binary patch
literal 2678
zcmZ`*OK;@H5oU8Z9FoI%N~6(pJ@)Qe7{ibbx*3L%z>a|gjxI@nHDQ3D*gYd^`51RM
zwUXStSULEjLvA?*($PR}L4HO4L0_HAUIOHj1VI3$iZfbk1B8R=uIhSpv8ujq7M+e~
z!1J5GP3C_b7{(9CJboI$`~?0&!9Wd4QX?SsYz7UO&D6@8L6e}Yk=j`+Xw|fpI++`|
znHPA#H>sVrGe7XNPSDA^K{x9Ky{sSfvq3PZ<*ju1Yt(bnwQN0DCx%OCo4VBdEeS@H
z{K}a6uOR~CcuI8hCF79*qAQCqjwG8GJc*-pYU=Sxlty{XXc)zD$)or(6mh|s?#D$w
zD@9V|VHQcAT)ZaWZEEWt2xuZf5T)TFqPoXfR-7}WX_C){_Gd{>!xJWCm~!3bOfVS=
z#;DLEnPd!To-h&S1<#^1xng?wc$u?v#wA2cK7e632jh8KK%C(#mdsD3h#h>f)$=~g
zKKwt0zxah|7}7W+3i3Y244XG*<w)b!RL&haYgDeZZkr%&jEy@|c`MGUxwdl8?61tq
zuR63@b!AJrszdD?6V`fFUplIHO@JO$Lp3<FX>-e{)}(uF5JP&jrMzpC7|OnFs{#04
zm)4oD*0Dkm!$@|Nr$*ETdINUtDnh-np+<LL1J-RdLTlLrSzmc~<eF4t<=qbMjA~O3
z)utL>o7JW=ssGaC-^evJfHj+TX!pjdw(@<D^g!|**u$P`O?77th+-?>R_5*K&Y*pe
z{*LxxWdOPZ;Qj!*8*k}aa!iNU<XfVwYFiDe9W|<URfDc=K`u+e|Fop%Bsb~$oK$<N
z#s4B94z;6pRZH!uZM8)^YN(oYbmM(xRZpsSs!8>3K6s<`SZCwL{ZXgR+yr8Nug=_i
ztIR!xC^1`O$kx+Uwl>w%I$K+6qK1%{iQERNqq?epZB@@!7F%HPthT`VKfB$b+p2SC
z!Vb^XQ}tZ-)bq35Yx1!H^B(N5Q+JF(cW)YqNI=lu@m2fqMJS<@4`Oi+-RMGRvx_h;
zQjlCOCHdm=%14GrNqztvx1kpSTNaIEq}zFj-7O?rh^w~iPQIL=!;dE5&WbJ)0h%^i
zmkj3xXSDW>1bSQ9Ap6lo+e;|C-p4J7_IZ@kA`68CRO?omCo=$*Zl(pmJLr@-fK}9i
zf!8m8dhjZgmkTz@3pq)$ManXkOGZDO02v83iBF3Hrv2J{|HH}tayO(_u7NB#a*rJE
zA8<Sy=6^r^;k!To>#v_oUG2UIMT8p&-NwknG&*4^Fi|NB2F8?e-AZ|wCPM0At&&B2
zp5!Z#YabK#fOtLz|Jq-jFrmORqb)%n&V~gqPNoju1`WQ0!yXP#aCip?H02W<-h}}w
z70>D<*SYnf9-n?bStj?&g<{7u6uiENXQ}(9`cS4*7GdDcx(lXHw7H<#;jkD&duaDF
zsQ2eQ%Ak<vQCh;woB2XpX#uBXQ=CQ%#@iT33kSSLbaTOzT<R7Eki=5k@J?BD!7UV8
zB06Wfk+NLdkp8csL9Dod{=z$kcWad*hh3oDuQ04D#tWg>jtlwoAB+1l&I`_6G{JL|
z<6)^=a42Iux&^IVX1UNdUJcOW?`s#^7EW_{s@p{=7bWBv84FuSlT^1}h4t$Nu8hhh
zsl7Ojgh*xy<9ru(2pr5?4FK{&lS|!N0YrO%2Sf^76&x;50~cm2k|l%`v@fupF%yDO
zxUEnRnnyr0Gad{d3G3U2(YmmD_y}STf^~G=UE(RMgM&mh;F5#=PTnlImk;E!S+H$_
zHi9Z#;(o;}{RGacV~+x7b-f)0-I{`?%Al#!qoDu5uK;kQNf@-Q>Yh6adaLeR0pcjw
z`d{^MdH*pQ_@Ij!oMB6SZ}2Z5xPO0fcv@uaFpXX^amX$tT)t%nI5{~y0R$h;(<p`-
z910#k&~RF1P(%iE325iDD3)cEii5?a-k!(*PrL!UiQmI8Z1|)LKhLsApZE=r*p_E|
zW|#QD<LnT}H1D0JNgjUAe;j)VavNkrVwxtg?k&qQEwT&agVx(~{~u=O?G@y@|Ma?D
z(l<TgHB9hk-S@2_?72z!F$6ay$I}hn%*zaLF@*(fFQ9sn;LnlqB+Es?X8;fm-v)+m
z*PlWMrA$&$<4E_!OE~;^evmQv%!x&fJ>Ep+?kP(b;S3Ht{M->s+}0qf;e-fY0=mYO
zU%OtmfzYjRUM3Ve6meRITG#K<@Jrmn`54-!OxY)Rf(mR}r{O^2FtroYaeQK0-TVIn
D_j?&~

literal 0
HcmV?d00001

diff --git a/base/glacier/src/__pycache__/help_functions.cpython-310.pyc b/base/glacier/src/__pycache__/help_functions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2aabe716b6ee0a814b1c455ed2d5dd2f13ea0978
GIT binary patch
literal 8163
zcmb7J-IE;GRqww2F+DvWJ3HFdYFCQAmMuGD$x<YO0i#$6k`)8iCRj>hH;$X$xji$z
z>ggHZ?$K&DJ>akkc2OuPcmRq3wF*=by`&2M1D<(;$IA;J50s#a;;X2_1zvvV_RMH@
zWmmyW_5HrL?>*<-bAIRapjLA={J!?j>)pTn2~GQZs?7h&sC*Gm`m(NRT;qDIc}1>!
zx~enJP`TN+JX<gNN}i+I%ATw8idRL>;#$A%)zugyUg$TxMt{*;RCP0M_K$hTRN0D`
z`pe#O|G0M?b(@#smHr9ugsy#A;|?$1(|CE0d8=iOpXV;G+|#`^l&ZXj(n++|`NBQT
zJH;D(5&38M1%8Y#q3<d5E%W1OdzycaukaISTSwa}Uqjn7{3Jhx{4~GFpW;uW?^(Xi
zpFzuW{4{?S`5FEkKZE>v{ycvH`3sn<tN&+ttIPO{s;u#|oiabiU&0EX<@!ynb?FgN
zbfcxq>WwJ55!}7m4&qR>m~=##2K`|i%2L`JbvkkQP?z<f-5!acz2m3tfe10I`NjY<
zzB0<<C=}O&b~X^Qu@MHCoo$3sx3@VE=_4>m!#f%`20ZFSA%7#t!X&$Ty-jbQ{~4%!
z5l?y!NvIJYJ%+tAoXOw@H<6oVt8AA`yrL+sdlq+blB%q}8K$E+`-X_JP_${K>5s|`
z&s98W14*vuny=*yDc*2Xd#G>VEagrVhkg?DLs{iP7NlXOYAr*WX&84z4O2xO$wN)h
zG9Uf)h2EeaUWkL+VR|9Fn~AVL>igT_=7mknzR-<>Hpp@z73~YXFdq7yQPR$$L6V*y
z?nuY?qa@0Fe+f&aCK6kB_1)$9W1OFkX%h*jKRS<tN1f;AP{_1g+XBD$*!PylddBvR
zTz{aCS<d#&E~NVhsOj1_a05*I_Y9Kdo7zP!eQF!+^nV)<wY7{5sc*OHvNWN9EOiDk
z_aiRtewg(J+&k8egES>R3V$jOULzP~gD`0iI9lUDn#$59M8J=QS00MN-KZaBIP#q!
zjyRRd@t{)(k!*@E4zlP@=szA<X%G5CNK+DGrRGqC-0ugX8zmqvgesGbGz*85il6Na
zLs`pu5V{_1HVZ~AOPYgWm?)k}qn+N7_O_A?=@j{3yJd(45KrnI@eFds(jr-;W><~#
zdtty&pq^Hd)S0WBx~<y>r$63ZImD^QqyGT|@%xu6QkxY2=1N=XvZ6zy<>^uU6DZ{N
zJJ#48SH{(GZCsz@=IQZ*;^qRl*~lCFCirYYdRcDdW^UzfUdgL@EwAUL+{w$``ls;u
z(i41U;B(Nj#nX6wA=v9wQiM27C6e>jqN0#kMp;%0{udX-0O!3@5Gr1a74&=65ZoQ%
zX8YiSI7MrGhLWdfvZZ({EhPYAmD<-RIZ4U0lzb8kss&N89xnC;j7ncd!kX+OrceKM
zgIHp+Y5f!Oe^cyQ{RDOuD{nv&&d?s7r{o1BEt{?<sRG}ZuJ88;d<5O#%c}3cHwxmS
zM>J{tbCkS93F(Bngjg&pNRHu2iJp$;=#I&rrajjGQ}wm!;!l4z3KkwE7}rq9wS7|L
z>;W5_IgHsz6HA#+itxTZHgaR1<tDKlB?ttB#L6w2K_c=1{T+rYZEZX{z)z6DY%xKa
zq$umM)(L}bB*3yD>4q&+mOznSFboChOKEhXOqMfrrk#Q4gDNJe1zAb_Eb8Nahr?95
zi9cB-m3FG^hpf>sk^@?cV+dyB>hFg92z}%-E##-6h{DvL4SN@FR~N3z99%j4S<?`g
z(faB1B0e0Z*S`KFO_Yig7cq89j~_!>E=I|nP^6(hf7pfDez;Eu5`ZC^_(@8Bijr55
zcrcWhnLT;3=KRTuK`|zz#N;Y-S=|s<P&}mBiTN`D@KC{h2Rm1_F<8pkag7_d4bjR}
z?WDE`oxcM8zNC?^&-D}9*vQzHk!#%Kiv8T|7~Hx~;sXI9aX?=o7WRgsz#|esB@|QH
zAOV53E)N6lZ$~`qK`t!CNohn$CRs9+20Ec*^|z#vjQY|^VEue5Rnfttr0b{Pw&%R%
zQ~gQeEVKs+3;}Fm5Yv!v;hia6Cs(hsCFbBt*P&${(=h~DxHie-^hf0q9(7GfC_oz>
zbXmzXupD-bYu{lUtreA1tz)cexE?tDO;}4BHLbPS#`N@b;{bQw8-?L6fg86C8>K){
zDt;DyM`d>b_I^Nit2lD{8yJ(4qIdMu%rx8sQx`W-eMBmyO?;jH2uY9}7<keq2(I=7
zEBzty+M3lg)a)E5`B1;jR<V1OG%7JRE{z>n0Q)u*&r-YME!9o&X2x;{O2EoZ&hDGz
za%OCqun=V+K~f!T>}JNk^?}~exy9{!PG;xUo_-DT^nr%9(&4rhqE#h%5v!K4sy$h?
zs#dM$&L^zuOjfYxRxKZHD^|@)%7&TTy>GmOeT^$rS8A)WaSO-`3TX%!70Pll<%O(5
zA@M=qPcv8>#XMkBGq@Y2hb|L|2@#mo@4u67?t*~dG{+#}Z?gAr6&~~RA(QO<=cFEC
zq4IE$`f;!s#zjA=ap_);qckH_^6H<^a<q0s+dVTH76Rh;BPa_XoAZ}rLd5B-C`@O3
z>aN5sEHl3g@pXEczD3(e!tT*+2vP|@q&dHc<o~*?_!`Z4ixOp_lwG2g{*;>jUn{*s
zGe|ifS?SNFE46g-3n;f7$%b532@!;TdoW6}R1hzOx?_NMNt=8CJkkn;2zI2UOpqXL
z)T)W=$fUj_-ljP|CA6&P5^sv-JeLSGDO=DpgVaNYM$c85kE<z20~IgxoSkV2{j?7C
zt8+l{saL3%5q0F^6MX$`G^d|OQfGC24OR(%o#C+$nue(_;m;2217;s?F%O!y!$b!Y
zpHN{lMNdpGl+qTII4LT9RwrA&0yQvcGk^t(UXvic8V5jFjOtGN><4pj-UJhT&wgz<
zX!p`q3938Jq&bWSnJo1nq64ub?Ym0grM)xB<FFeh9DYv^IGKqp>=4F16VU9B<y9zJ
zRcI-{D}rG!{d0^{cDn(3klk+TRb33}^kh}0KPr<nT*ae^N|1L?&-5)gdzUp*Ec+Iz
zXq?ZUl}c!JidN;Sunn*N)wgfG{*{f_zjX8Uo3FigtyPu=HYwQv{zsVHiNqi&B+f2W
z203!TBemfUT$zMW5A=v+Sw}kL?NMh-_gu<-CA&`?7MNx-F#QYKJ^5jEM)thH#11Nl
zJkLY+oUkh$l*5v9<G!H)904^tCK!)a(#^2cuurO$Q2qqM-IC>S)Q0yI1_{(65LhyG
zuVpELD97-s$VC%T8w?|RD#l0vZOK#$5?w)?ZCT#J+++awj#r;=ND{9G6a?QCR-9mu
zS0iT)XfNsqkQUEL5(30i{W#Mpav5;ZpLhYV?8Tt;ukli4+Svr7+yCQQRiJi97r%_Y
zHtlNqn;J$X8D#j0U^yJyu>;Tpw+E#%h*h+~j45XbuF#%2u4HhJRNc-?d)64HDK9Gr
z$?aIs_T<0;vovRP^f|;oNMKy*$}fTXp7^ATbN=U~sRpgy9>G&3pQod|msX8bjq*;R
zcElT4UshCA-J(>M0U##D#X~eC{UeVwVF}p)X=j91gATXQYtG^|;vBuJG})!{rvOBU
z#a55XtTT1Y{t=T>iZ-||-A!fyW*so{pAPD#_;qyLIwZZMi|J9!cpileQ3j|e5c!0X
zVo0lzGqDKe1+@795vANH05<s=;!Pwib8-RW2vDypO~TaD?2{OqO3fE^D+SQTK{u3*
zVGxnOKjlq&<T&D2u&MzKqCzH1&@igykai{AGTS&E82V8IIA)M0b<ysVUt|;j7(mb7
z0q!4#loR^|U0Yn7J!Ik@5?SYAs&-Xy^XQh|#~5e2C5KcVwLGz<nZhTRm}p6qBy9j)
zfUSYD6MUwew(hr<e)Q$P-Mx3EMc^hI#JJXqtfV~)+8PCIO8`EIxrE|Ada!RYo&{C)
z_R+0=8-0j#7SwBy6GAPAyehgUv#tIvDr8+iJyY2z6Hpt<i35aA>TFsED764rt$p@D
z6E7n81V<#d6!0zqewN_8VAhs7b`r*MqjOu0yrR5U1VPluGVW}7HquoiUEE}M1)5;X
zqW7NE023=9l38$2K|tlxA)s*$8<y5~FBG9Pf@DXQh^0{*Hji8q@iXXeRTX&(27d<)
z;@2qoEt*;)Xb|zc(5k=}D0prm6@I&O(SzOvXc!1pdE9YnzT;96-%xsSrX0nuV)5sQ
z@TX94^oG&U?T;O+t~U_8Z~!VOZ!)+HH1d$QK#R;6avE!ZAq)&renOYfxlOE4r4mXk
zw+=Ch>hnx;#^v1EH^8J@Ie7+J?ySNqB<oXPaL6KJOXM3UUx7RjjDTvTaV73RUaJ^W
zf-m59z)Cd^dq7Jmug+TFaj3U^keli)<+-=ipswjHwaHs*@Ika3Bk&d)SG)zWF!h$&
zY|ZIg>a!NSbKiLY@u3u=v5+r7G|F?LaSJyHz8V4@$xN*jApp`)*MV8Ti@a4<vLo6k
z3xa#%caTW;_RP?VpQl#RKVp*-b=?{SF{5NS0>UK=PzcorRQiw-5-N|h_*9|!<QI7L
zS+G;p#qZJB@6*`Y+}QM@)P11dX$1A+;oeJgU6+c0+7YQ+!-|(l>L_aA7%qY-6Uj48
z7-09HZXgszAl7W^4y5mBTf;P&TDkk*5C8SAZft(_O82F!r~c{hUS7LW#LSd$ODbdf
zBl&jmD68?VlGb-1FBIA245fu}W4xH#*R*f`{<xVz?(*jM5wG|IV-GkeH^tf95@%uT
zD-_|RXa;1bi5P}n#5f^qrSY+3L5)z+jwAi0`}X))eryUGvEsW}@m;jo1P&h19+t+-
z<Ks|NrF;puX9Z!SqI4p2w#xYl`mzAYUrm+|&00~jXzU7R9T}UsFfFTqCu><HtKv=W
z9z(uDIa*KRq^>$?C11mNtNBTF>XJJ3THd&Ai67?2@)ami&?;ZfoB5hyFrwEYV!tMO
zX~fdF{$4d-ETX<06Ovv-_;O=|Vwj+{R>(D#c0d1xPx-B1y}q8}8%?{n4*Wy0t8?qy
zQPx|J#TTT09=D|yqgw$$0l)*N0kCGU8Ei%|Fp^gtB=L?viS|!wJXEoNSso056G_pk
z5U{My1?tnnH?+kAY)@9^J{gD~(AG(E#2-;Y&R${c=!WhZ*x2s!dIy*^?S=gO`qvRm
zr0)o?tnb#&t<SE<E9<Qqg_;qi5+s6R8;NvhTS}$*B_8w(F#O{*fXv$jpy2yLKe($b
zcX7vWps#fbQJ~rP4zK*yGzd{#^FOBb;b|0SFHMNxm12AZ!7V>}A_^jT6_C6OAA%_A
z)znuKz(fV8y@hFY5_a;M(}u9W8LGfq>Q#y_EBMF~rjr>8zWN<}np3fO$?)C9nXOtw
z>G6DN)At#K=>L$;PAbc@cNOmO>KHhOUp@Enx*kcu-RP4MW)+;_ez7ePL-<mPHn<DG
z_^hEjP%*{7I?0{?Vijtc*6oknvcsAx&fCzNP*Zi?#Vkb2b@Ss>wo`-JqO#83uQKr?
zEIt84B=(O%(FS$sqR4KwT0)>ftW%OvatDcAgcn0DYaDIjGo12+?IIUF>bDTa9PUhn
zVh9g0NX0Kvhl_wkIuM<Buw4k&a(Y|clk=b%sME&BD)NsR4X#c5Mp+b3Vjl!a<-)Y1
z@YPabQ|sci^|7&0ZX5YVUdiu{;P~-Ers<1<3Wcliv5JnWD5DB8C|_Iop~|;XpuUji
z_i5U<DIt7O7z-lg96+3b73o|qzIeP!iX^>-M18tIY?fF;2)9u@tZuREuI1vxm}5DM
huFK9iHqr`e3+^gX+d1x>U}ssG7;aAlfXem%_+Lk_qF(?2

literal 0
HcmV?d00001

diff --git a/base/glacier/src/__pycache__/help_functions.cpython-38.pyc b/base/glacier/src/__pycache__/help_functions.cpython-38.pyc
new file mode 100755
index 0000000000000000000000000000000000000000..4ba52c15db812b9627894ac010f6337c28c04034
GIT binary patch
literal 8263
zcmb_hTZ|l6TCRInS6`;5J>!|Nz463xF1_((ZL%SRB+g>TYuI>_jZNY$5^BqRs(Z?=
zu5O>Iwmq}e%VoSItHpwSV7ZGKgv9y<33y+LHzeM8JiKrbLWUPW2#E;ReE&J!Jw0QG
zhZWtbbFXvx|Ns2|_y6^?wVJ2l_mAKC%g!f1u4(^3mDyhzl{fKZCv;6?8q*WaSGn%%
zvTpdM%&ng7JG$yC`L1j$`<~1zeigZq)OvNlF2|V3T(99bdh`CgtXoO5cfda&%XYHR
zTl5!u2mOPx?j%dSWq(=MzO1nlbMI-)-7@?Yc8Ynda!>aUvD2)|YA79MbvAcT^N+9w
zn@9dR_62r;EuiNqw#W{m<tRJDme?{{R@n+WgqEk-VRi)hF?N<c#g3xq8MexvM$2(_
zj6H+=S$3Q~i~KqE9D5%5^Guij7g(!nu+Ph~#$N1{*$H+M@4vwGD_ZNv9ughbTDquS
zj?>HG-7D=diFnHpE|0RXH%KB;%GQUSP7*!PMLle{hdgX=1X;V!BMfW4-Nzg64D%$8
z_{FfD_qk}SMIqkK*P^()e!b5#Oy9WNXK^Qv*xO+qrTO`bZCZHtXQJ{Zp6od!kw%R3
z4Q$)MVN7N+3%SKA%w=V4-)0`RUlrB&qHLJtAMiMjc$?;!{HSc;Igcl6ASv`h3$(&O
zin*_75A-!0pxlX*C`iLzB&saT!z{{Wtz`-;i;@nn;#FQl@<8Kr$Uc4N^m@M+ole4=
zQFc1IoAanQ?43?<<}^;bC-b-$oy;O0N7>1Cf0$xl9nfVMCfVtXw@<G}$sp(q({>*B
z)9ln>L%2Z@r*R$x3z$B$kZ8K2Z!XRr?bKvU8zMga(HZCQ$TPl;Lar6s4KRMo__dcu
zdTwl+g}$SYjKbKqm_bxzHgiZoS(yZrM3Tg@Ti?-j?E~$L+E=yrv}?E^pl)ML=yybE
zOchb;^b;1uOgOzLU+**jKsyPujJV2!iD3ARaG3X_wB2WDP5N0TO4lI^LBjp=fcNjl
zy*S5_Z-+_Ds8mk+9VJbo$)hC9<J(cNJFwF3_XZ$e8eyj9fJZFog}fW5!5xyWTr{#g
z8jLGJzA=bIEnkQ9t<z@naM-ei)gMHu<fAa#*=^z6k<ubumG|$oOkM}^gx=w+$R(Rq
zLUJQKIWAa_Lbi;$6j05m>z-~IhW?rVe>Wa&F74sr?mYj5dGPz!b3z*z|Mpy4Dmm35
zxjN<ShiELE5ABgRs*I|m+NeIm=T}B^lFxJCbE9Z%TaW-7vThV+VHI}a6_uh|)QWmh
zD%_&{L{1yvbl7tEQ&>m|{81{cQt~t<B>$~>NhMBFDk@3_)WtEueZLe&lHdFw^;aWs
zc!;|mfEWBQ&2fa1&rzo>`73NG1e|EfS136|$uUY+DWTgo!-lFNALhe_KaIZZB_vwY
z_;iK^recfrf3s!f32agGu0a-#)4I=6LaM0c&;=z05d^{uf?l5up)mqc4T4*^)vAXR
zv-mTVoTOxT@3KU4#EHKL@MNS?T-~+YqneKYf6AVRbg|p=_M<>Q2ak<M-tG%16xue7
z8#8u{kyXH;-E{d2xlXtGzCJPwbK58^y2mJi_mBs>u;~qwh8^^G4BTpa?cpQ*_!&&t
zGB~ZO%DSj^qA(wF@GMNbQOgn~5M@0aM4S|<FgtNB$~ii-PM`Nc7>jg+sH6cYJ-UdQ
z@X}yBOD3F5S`$&DVI&AN7sC*|jr+eDu^BqatF}lJ1xkAc(_uf4)g{q1qiVRgcb?&y
z{B^W`Hob_6d+D{UKTZ?S!(8KMF?K=^k|{ACr?(@XMZwHr{kiFWSU?Bjuh45hLCLF>
z{3H?|W)pAbkDshLdondB!3!BNnT~}6okMAldMD;j1;9rIR~zhH(MDh?GY)FZylL|H
za#=g9Z9(xb0l_S2r0@%USsR(Tal<S$sC~(PW_3(v-zV{b0FgMLPl<)ICMocc1W*da
zL^eo3V8F|RkOg;QmajuDY{^Ms#%V5$bRbN0Lec8igqaR|zMH}<1}KRtIvA{Ckb&F2
zdo7^)<HVV3_fuE{7{oB4A>Y878R;1ftgadhx~t<xYt=`N<(mAb(cUJBocyR<!XqyV
z@dDJ*L75Y|2ByQ9G3{FhLu*CmRO^^48ZL;IdTI=n3o?4f+T`?f;{bkc4Wnq2ppMsu
zp^}g&<CoC4Uv}pJ1o~vW)RD7a#hA=RqUo-2-0*Zx7W(1yNZQ2J$&Yj^Nr8zcYl7Tz
zM=;x;5U1^FO+(EoaEed#o5l)ui;_kqLmQPwE>yj9)8H4WU2>M{7XPE%C|sxjy8r;V
zZ;i^idBcJ!C<7Ie;xI;DZf@Hj>m8k-!96#33VTbx0BQPILtANY+Y(W&l3vBECCuuK
zXRXRvtA+c7S>5pr&djXky=`jNyd;g4#k~9GhuGJsLUpOMDr;8($e@k}N!X4=Ih}Aq
zRH2SozZYaVjE!U+aH<vFjk7&hiA015EcGv`nl1?VkvjqjziZsGuFP=rfgy}jUl4kX
ziOPe179`>ID48)t-fKym<pfOL_+#Q3T^R>l*nD<4Py!P4Vkis1n^UhPgpac~P?)^)
zqwkAf#Y#fIK^se>?*5JOYv}&|?2dlarT7)h!r!N4*L>0JfBOG2``2jVpL=}vKbxJs
zrSo@DZ@I!4Fi|Bq5d{)ZXPh|6<^2RcOxxsA;1PDndAK2LX}&nIrB&k>kqLc+e?adL
zl9JQ<9&uJp=X*rVaoL8B>SsPOH2R**0$giO+Dk3wyBm`d`e`2Omz09iluO9Pq(z&5
zOeVgEJ!EepAq-hJ4jC?NnoCF$GKBvl$818~E$9o6P<Ec&ZaGbZw=u_Kig0Q$h<!@A
z-GDJ5b*E43gb9|QP{wU0Foo*1$X}4-fFaeW?xfGz0a%fqfw?9uFeLDuw+8+8de$nz
z1SGkz21!2`rFDpUpKl1~uDm+J*%;?Z)QwUG$7&s*+u&X75FjDvun_yTX@#Z=x;z}L
zXMcfl%KqyO&;|BiZ|b~9rzh()`B9l9<2)XT4GAT0>A8Lbe&lN!si<wcP-k|gr4m|Q
z)hf?stoikKzjpP~J8PG|bmh{Ox8AzYDhm^v6h<G;OqAY^c|TRs=P2bvo+5BfZLk5q
zC?)g+jU<e`BV2MPsk5c~9_4|Q<HrsQY&0I2{T1zAnhm2)xWM4I(Xz+2-Gd8WMlT(d
z0TD3szA1qq0X||Q7>`!c^04@@g|d|#gE17mD$3EY4Oc7*Qz%#<xpd;R`=u0OoxnvS
ze~-s)FzhbY;u}bSlF8umFH%CAh4r7Cc^RHSN7QE<(iDyv58>zn4G?_tYvlO>HO9RV
z(gM(s61JXb+Ns_Vi-4WM*qs1EP=m6+!J=6ci8k@U{>QOBvkKYZBeb?938^2yiHVhx
z@iS;3f)((RM=rn<JSCLMAeL%_L6rUzyr(T|RLS8{$+}aNw(Jp%Tv3)jmDjOhH!7G8
zr*BT_7%+%`n8HlerGo=AFm`a~XPnt_Qw`RkJ%rm!&QnLaL9H689qH;q1@W(7eo>KC
zd5bbp1`rt+=l9T%bdlKCgegP=q@5C44%*v7%e;tX_z7C8G~T6j%m8QyYODKY)}45G
z|BRQiV@NcQZl-7MYAkEF483Mq{M#6Hb&o8QmZnFt<~bB{1TdhmK=NfN(~w=WF!(X3
zGGN#pM6C)_!FKXs_*ap%tnqbBVgSglun4IOt4Bg@Ndus$SPG%1!)_!RgD|F0!h}=V
zzQgfvVpbFSWE_oI&}GXWT}zr~x^cp)(3Tn?w*lFb*X}-fTktMun}Cnb2E2)3L^&~&
z(<R0=`ekH%3yG++D3iNVyxqU0U%(i5cT29$spYXPP1Qg7@<dCbB<Tid25b$K9pgT1
z&R*+&W8qJ}{CAu8&H>?DxP}QXxg;y;Qbk)+(Y6FYg>X*93D#oYWL6bb_4fX){v!Gi
zK^;@iXqcQ@+C;6%uZqHStG|T`*&0yKk`~JXe21!H0Jr11F{uOW+JM3Kwy~q}tB7mC
zy(w%7=1YLJCAdU*>xMOQQvhDTVn>erru2ysMUf-RxWVP=NKcORaId{3=!F|Lt$SDl
z{H%aTmg1m<meNy1oa6#FEbKe$5s!o!rW>L}EREZ+faK`#pF)4DD#@c5d=Cx$Ta<j4
zUM&%5h}m6H!quaK?<uJW+MP2#v@qaApUcW_$JyDAvnp(|8)hJ|!G8(KaU%Q?6ySE4
z4ZZP0$9DC)-Y_h{43)!p(#Snt2)#dL2+aY87+{F>Ke~ka9b$znl~6JYdk>SSKFcI`
zR4&|Y6HNMkK^})zxGV5A$rdSW4_QQTihK^~gOF!~5m2-=4gpHYYZYTk@G-oO0V_u1
zum`l1it4llo(!$>>pE`~mRzSivrY}_o7Sm~*Qvp~(F%;fIy6qLgK(N$r#78+vQB;4
zf>pQO9S9Jm5RSQG4#H8M5ss_4L+}9+KuM>HMTHhfM_mM-y^p+AmeRw&jxzrZO1_80
z_ij!NFDC;f^$icGBrjZp;Afl;hIuw4R=-01A5%hN<&##QsI-9m4Zl8(q{=$~Z5sPK
zG`2P~HajEq0El=Jo;|a-_v}p9Srw_<CwD8D@ime=x)_@2!rAqd>{;Zw!#R57Au?y6
zL}kZn>P?LKj2^3D$W^-k{ovpJ`ttR6&vj3pKk_es|MH=8Dl|t{qD=~9@*`=d;F`1{
zUzf7`A*6=_rOZHSZqyjf7tRGp$w#k@nmOdKX#OsuBEN5L0VNd{zgF1%8lXUhLZ%cm
zfh08%DbZEX6tY(u9Y~*&BV<fvUw`SoGdfTln1D*m_yf%N1GG2<9d>9J3!}x+K`5?L
zv4GpOgy57aE$8lya<NPyc}Vz5`uRO?Ey=fN>=NGEH#YZRYE}SV4&{}+ij}-A1Njo=
zXg!RRdh(=|;t<YTEe^|5m*lAr6^)xVUoH+5OHityR<T$#i$f;G6Xr(q2qs(Tr4b9G
z`mN<+UWJi6aF$y&#7WoIC|C+=Yf8$gwE5EOpYdBge{nU#H<I@HD$o%{$WE-@iSzZ<
zgugEIQ@A^|$tM&5=sbc;0npRG9$t?VASl1uPm_&c{9$2SW0CyKAj<s#P$Vf{8EF>P
znQsD_@**An2iTsd%zT~T+qCt=l<ZJK-lj5sbYC}3Y;1FJwFAVOtw-$C>iY;;(kF{o
zS2t@XR;QQc)zwywBHD<BarzL+b0or>ZYdMimsr?SP?^9w|4mAy332c#qZi(lmR#NV
zx6#)+f>_h^(~2lxn*=+QE%`&5AAX2Bduc+1QiAU-xbyo@bcu#nK=Lj;45Fy0$qzCB
zkrHl;xk+^#-13`~hNyQvlHt3|uc(hW_?{DGzIsD~+n|Hbfhy#0;1iEKnbohMwEMnr
z=u;5B1pFSIT|!!CQ{o^|$G{nM>wCNN`Xm8&<0s?KDmcRewJn}N__Fsgw>AgW1J%+n
z-RUQQsw(}L>!}X~<NBvg*@H=Js__OXq&gl0!AAFomUE;=k>&<!j=}#NGmoJWJ{n3X
zA>ltmr;#_{G7ZqV%C5FryiZ+6DY->ShD6N6(;>GuiLc|kpY)X-mGg-DZG=n*8)MNJ
zz>5qs{t<O}2ykS5-bwm*q<Hy@*-d#{PJwcuQ5#>!$cHvC*so3chIyQfV<i^NGB@c^
z{#?c#8a_X1-8DecwIlyYl!ERM&LLBhO*&SF-DRj+#$jblQwAcWKQ4V$>3>P6uVk8V
zsziVibtnTu1f7A6yWXDrn)<-<29Y%*;RQPo9;s4_e-$jjV^r<>SG>A6=gzyH_adH#
YJLj%=w(GbD-DUSB#cF3N1jc{-F9sX6`v3p{

literal 0
HcmV?d00001

diff --git a/base/glacier/src/__pycache__/keras_models.cpython-310.pyc b/base/glacier/src/__pycache__/keras_models.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bf46c95626d6ef877be005978743f9b0f69313b
GIT binary patch
literal 5537
zcmb7IOLH7o74CccIo+etXe`Ti5@Q1fG6h(XL_q}-$FYP%z*f0pIlxp>llI(}JmY!g
zcFVC{JylfMRq@8`VI?p1AMz7euwd0Is#whiHmq30eCOVt>6T&_xU1$)_vv$A=kcBI
z_Gn?jH*nqh=lQ+hdxr53dMIBe9xme-{|=2X1nV0E)?&=47=jD)vC-lcOPmyra38al
z38^P4kXm9z_#%MJhU}E6iUr&|;<TuXCEU59Ax=Qg6U#S^=9wu*H<~Q7??y6=JDeW#
z%fY{m-zI+X&(R1b_+w*&*@cCFGckHBHBz?Ah5eLGEIjknO3f7QBld_1=P94qsf~Hu
z?GD2Tvx}yo;cwy>e}aaF>T#NY2FGw#YCK{SrsO6zX$ki!PcdjN<JfMYTgZ>;rVNu;
zwx^dd$CON(Su*PHXI2~zqC+clcf0*0l5rMb|HG)?j=K+|%nv(B_g<KEN5jk=heC9R
zdsABTA<rt~(MYSwfCfE}rfErfn>qdP0S)GCj)wQvwzA6g@cs>`uWg~D-5rkilZ@Rz
zG~^1j|MT|hozWm#?T2@xcs078$Y`)XXnzsiUcDX0(du45>~tf!8q3bAa!z|N5>Y?C
zIDU}1zZ`b(ASs06*U&i3W7c!dt*@8O^1a0#zVZHkGK#eR#drFnPS}qxUCak~xmU&*
z^?!QBeaik%pB&e$z<Yk~Y2`iTsbE_!VqR%cL$N0*N~B9o70n@o=inhpuAEXt!RFmz
z6w2HMvQDZ?^rCued@~%3@p^A<OP(M(nO!%hNW<NVloe0nA-;g7uw0{L#fDYLE%PPz
z%!k4P?f6bOj^J=dIXuqX(S8EY$A_kl68l3H2=;YyXVb}oVLRy#qBx1hapn)(yHS|z
z%P5woX}RB`mHH#u-D~Rr3c|vl$9EKzeC^S$C_p^!1@-z0v$*^rjrcm6oeo)dc99kt
zT4nm*a6^vIGaD(Xve=Z%kn#gln9o>hBN3e;Pwk15SSf$Rwv10-pSYyav&pT5OHa+z
zAwAX4d1jQJ+}YFTuc<yB(%BIeq<GPzGSf*d;oY_6$4b-06Ywq9^oG8c63!J;$oyWA
z+NmdMSbZg}00tJfk?#Fbm~5;ca(N!x%xsZ77)R4T;9iB44m>gioJ9td6>2EaRs6{X
z++}V&+D!)G{X?b_P#F}$?%rV374q9qX;zi1C4k*fA(`1*(J+oOTZY5ENC#-+N|<!+
zY>(t1>~|k(w6Q*luHDM2Ic})X&D^W~FiE0e=4b$r-=`UT`2!mKO*E~AVY>swX>0gt
zEdsIHg67kb%p10K49f3NABy*kb>(;I-|zIpIPUHOYK}*fzd3(JffG|GHY|%b5K2|P
z!fLF}mU)eNxL@XVjsUv^l@}HfO4#@^npqL~()f}J2bB^}my##!iSg>Cga)QqR;A=E
z+`#vxQi2#P3JK~IU<btnQUV>Ga#fp@?=ve2hj(@m8k7!MT}H^lVLOgK-v@dk9SNDP
zHgytfr&`x;?p%jt8E>>zT@AG2xG94iZ>hr#XvY*|D}03mpf8k?mfn8@q)B)LsA<nM
z0D;=Pd>-&844GN%)|{2oe7Cb=N;ZJm%hCg?-K=LnF;w1ij8oXh%R%`)%(f#hqt&^x
zL3itDnvRZk!rrX<Q8WgCeH?X?k(5_Rw73I!+v>)N?B3o_A|Zc5olLojzuas>A+dn*
zDWG*|E3{TX9pvarhWIVs#|<=fM)q?Mg<KUrK=aEjSsFl|qZuemC1E}mKTjmNMbZz^
zw1NV0*IU&R<gRPi{)#l!;uep&?>gSZH8h@CbL&;43Ui)Ymi#qD9V%?Ii|~aam&_1<
zg@$}^1a*O|@)V@J2h`Q53a^1%P3&Qi%U#HUoijN}4=f-jA+(MD@8)`yxt@A+OwSZP
z5cZnVwNq0Bq6&o7D6bLL2ENt`X<{5W;Cjw8K5<i5p{x86_8@;u4F#wy#fwUvDaD6|
z(KWiG&{5baDUzD*G00T0BY#Rt6fYX}yuw+nI&r43-H(%j#yR;5(wjq^!namX;9Ksx
zBCcu$z)@)R(LJW_4buX&armunaqy~|X_*9f8tAG7bl0HPp>57C3Q5XI<;{PCn!uL8
zp_xCyj}>2rCv(~gpMfjGOFH*JaDh%9tWN&bGE-FMez!{Uqq;U`vf&Lgwc!mXb$U$r
zXN;cvi8*m6-lUSR#7irHSB904M;TU99^IeM>R}}Th+TOd<uw=b7im_p;svVz*d1w7
zoZ3An_0s_Hv-s4URKKyFg?v5r(ry>#>p8kxWtjuw2E{sjv4rSu%xsr(;Ub#<qDJ-c
zbYlLlruYKu`QQu7g*0E|LR!O#iq^Q8!k$=xomN#A1nHvsual;-b&%HgIPATYE<v}N
zF78^$f|YFzFDd~j*<ZwB6^l^iWcnPJJVScK%H$hpvg&Bqe}MX}xG9ci)(~eE`5U~N
zs+3WU${@;{)}m4`qP!UtXsihc(P2sPDd`la5($$iQrcEzRh}jJ+tes%Zj`d@#Ed(%
ziz!MuM&aJ_bk&m^8Edzyc?oQb?qKc6(|W6x?@CkISbm^-=faKFQZYrY(Q1@LIgc)<
z^hT@TFQRp_Ga8IXaW{$DCB!~ni<dWNS|#NEBUTb8Xc|tz_K%*WYP`XZoC{uZMrc@N
zt!1;umT@|Bn59l=&ezM9gR<p3_iPXH2B?(0iHQ|5PN^sysS-p$=eY7_jw>@%QQVv|
z0u=(NZRS}^h`@UmRSZZ9yhm1|vQyqDPTm7Yr>_QzPXo(m@`E`$r*RIv84p*ZI*Mt(
zY9AN5|4MySh8HXH!Q~y~8!}R@B0yFzih5q7=-g55EubE`VhMcTOFiUbW4owG^o3)8
z*iGWSKmUoY>3ft|GEAZ-*Ck2i-zxcGhZ-_@=HH3h=(P)q4mCOjWR3hgK^KE{iMK~`
zYQQT~s^t3kP!u;J8O_>@>Ke|Tax_LjXYP$i4#0}knLwAjpV34sAM{7J!~XmC@Lgsv
zqOS=0G!TI4cd+gue!4Kule$?c@>bQtR(05>!q)CY{eE0zZN?s`;wayxMISpAFHw)H
zo7<an^8Z(8sq-=7ALPHzPooyr*lAGzSz81B&yN6~LHo!NfFgL#bN-G8c#9pNGQyjj
z?oo;I4?kcrI7{kEsaOnQu&$f7swqsN@~o0l^lBzMucNBhyfPf1S03@Ripo^g^Gke*
z6?{ppO!t{(4LhKdt71ZGt!~P|j__qm<qJig*Q?-*mrL9zkGzF#c{GybTiCWP1q!`?
zPme8Xs74^RiesjV9u+wL3vrwXQRcrt|3D=_rC}69Dg{CosE-92-tkf4wnpy=AG-7l
zbyUo3IySE9zjgh$(W>l)V@;@9vnr8mtMljacqx&nHp1p#SWZlJtO|Z!V-3|3SEtir
zZ5AY5C~V^hpuR<@w#j$&Kg%C=T2eALE18dgk4F;6_b;?n=s>DYn~GN}I;K(dGkQV8
rm)2P@KJOjuVZR?;LdqI3ePF2?oO?FE;6LYQJ=;IW&-%81rF!nalhWr*

literal 0
HcmV?d00001

diff --git a/base/glacier/src/__pycache__/keras_models.cpython-38.pyc b/base/glacier/src/__pycache__/keras_models.cpython-38.pyc
new file mode 100755
index 0000000000000000000000000000000000000000..9a9fb68ccf9d0edace734abc63bb736874d6cf44
GIT binary patch
literal 5621
zcmb7I-)|dP6`p(Nr^k-txM{W(3%isBYC)S#vRVWz-L?r`7TT!VrXZ|cd8fXU)UG}D
z-kFrPnn*~kw0#NU5%yt~K1SkC;1A#hi3gtM35h2^@x}uWgzwxt_Bc)obfO&3oH_UB
z`ObIFox5{$zJcq{-9K%YziAkMr-$jw#KTqm$G0JbAz07ovnFGP;KF=nG`Tn{9N|7=
zO;dQH1dSz@gfGg_*y1%&5p%e6#5qwD3%GMdU7UfHCl>D*jq@Y)YBX48KL}+Iw>drL
zmxEs$|LgdVe*htv;LnUbj4Lesnu*b6sgbg6F6@1_XW^NrR%)h@PuWu@oPEA$r#8lM
zH`@$7j4v97y1#?}_+tp_s>^8r>Kxr!sqvKUF{L-LKufs$JVmFuj$^TfZK0pBO&JC+
zZH}&Dj1g^RX34O(n^|$t4-c)(-R|^~P{vsq>yN@-EABiAGe2l2orgiv8AO>o420-J
zJ0qI&A<s&~!9bhIGIjbUM8lHwHgkHxW9rOVA4CsVHnP&K;L&ZEuWX>9-HC>~NyZ)>
z8gdEN|GKq&f6xz?d%=S+UJf56GVJg6mwRC#BaC+`>GZ=(aVR@se5pOyjWEq^%xgF3
z#mlj5FDr+&`U4U6;>*Lwnfv3YjR#3^ioXS6Fo$tN{T*4H|JyPbPtiPi{n~Ca2zA@b
z@AU?43~=Rg-r38&HpBq`(`)WN`#*g)YgvMK{TvC(yULU0O*xNorA30UDG5c=B~gJm
zWbiCJD9P0$LL7Y3iLj-)i)4*V7wARh?(j~~AL8}S%7#2cdUA^vuaSkj5vm<Mi--7K
zh?6^7pWRXGgvNZy4(FX?%65D|7>0m|qaYGz?qD|oR^mfb<I4U}LC3yD&TcqaIcgC)
z;v^i#nIE;b!yws}VJy#Kj`C{|nLm)7otB1NIVe0lyT<aA_dPU>6&rD{?A2;)iOcU$
zk6(e<YSVs?FS4SoSKRPd+z=FUE<g~gxHsiRX!(ID%oi-R5xP!frS_haSSf$XHjGc+
z-E+yJYm=ji#rDnAAv@L1d0|XFx#OqL-cfBl5P%~}AcdkuX>5~P!h2xLKPXERPvFDc
z(u)?gmYA{7Lg#nOshxVFirJUa5{P4d69my41j*X!A(wArnVBt;$HQ>+Yuqc4X(-ZG
zz*)3`vP6QYK_ODk;VyII!FJLQ9vw0jo@zs(?CkUh9U;FClSW0kS|SA)3Z$965k_&C
z*)oWBLJisa^&n~A-yFz((Ca+W{A7I;-n^Sta;8yW&fFWlAW6a~b2L3jqJxGnzfGNA
zglNu1tu{ESr3tG!4~}aI8c%C7FKTHF$_vznA|PWO`3?H@+r1!;JKLR5&Z5a*pS&XA
z!~{jds&k9spI6uttFjtjWL53~T#LNM<rP?*nsz9J<Es$kwDY;~Ii(s(KAuiK&)IY1
z)sqi(OjuUQ=WX1;_fyFS7%Wl`G8>2p=?7Y(A)a!T&6MvmD+!|eTYv_ILspX^qA+U3
z;b*(xQ-mXN)s2P@V(nDx=AEru&@AGO-c?squbAD+a?ZfifKq=<7&G`1Ut&+*nhIKa
ze}cA2d5pSg&C~(W+$7;1@aGJXS*+F^C*XXw<Mc~=fNh`F9#FPtUHiGA;+CVIVt>4x
zrr*bCTk<NTj+HgKTZL#i8tuf!S>>Z}2m<?g*iHsgUMJQ37U*rG6DP8BZ#M~re1{sD
zaveYUwh6_}0-H~O*3ee`t*E=4vny@HFYrFDLm0pw?VrP{M_eVu|FV-)9mqFm1d3Am
z76c|g2_(5e+7BU`<$`fno0TcbUDdAr30bPi&Cj^+7T(0P6^7}#)k+QF!VUk(vE)yo
zYg4itU&JtkUD}BFj}YXAW4sG)m6xIAUGT2vRrn3{*q$9N<$4!-u;*A$+5-#RNgQpX
z{gd3TG_g}}X6#JigJXZEZ0*z(Wl;ggYM$2|YlC5{g*Gt`9Mp%-3%=*3uHskuU93S~
zB0&(9Q;1Q)Goo$LM7lzE1RlknlCacpXGl|_M^dU*h|#>~74B-*sLl#ny*TM>rjtJ;
z;|a+rhHI7!hRdB-AgfwX9D%Ec<}o!l%sR7JjdA#mq-9o3iwG~Lx(b&!Vb!K(jxPcw
zMWyoQKT)n=FTq4(f1;*VH8wn%m%8v7>T7sOM<0q}@RNsKC;w`lDM}N+TT}Yuwl+qx
z;SDpj;SDEsx=i@zjjsE#x##YAd!>9PURnadGR(wRn&Z9ulTkg)q>N&3dLGkrF7&5a
zRxsl^ssOM$vMkf=x=!k+W#DIi-`uO5n9p23pW4)F=O*(xzFM_22UHyt^YFz2&|M$z
zUCM>~!2T_hU#Nb4JfE4ot*V-Vb$!$fi-k6y<y>0D8H{E*pJGQb2Rp5(I4Gy{>bFLg
zYPZX2ZHHsW7t#gTR?_)x3vsZtsVPQ<z{_e|RY|CnGCIN~&jUC*6kf+&RvARS$H?KT
zV#R^Zig3b_Kf|k$3K}I;3{m7X=aqSZ^G3N~WL<<%kt_+JlFoiAlrV@YOk1j8m0u<O
z_em&vu203;nQ{HlDu$SfG6H<l)0t1MWvtb#<Y}-aI{lSnPpi#pzA9bL#_|KzI#;eW
z7m6Wri)MXFmDkeKLA};2Y8cTx+aC0XgSeA~ttr-?&Bu%DW2-6l{tadle+gkYC+q%G
zPFuXn>)?smbJ+>a$y0=NT00a{Y?1l8KXt})tVN#pe&pC5!ym76c?UC8EIOhDax9}z
zxjNyXi#Z348Aw&~6g|i$V0bf+aAFVMwJ5tlQ{)CjFzNt`F;&5LQPI;k2~~ZA_s9B!
zNi9$P9C+iZUG?gyy8Kn!xZsIbYNObon30dl-$7&|cGWD(i2r#}%kvqXQYzg!<Sth%
z;B?@n9-_9sS!6Hzg0dTRl6dEf-_tdEpF&UuN!Z{zebE_8E|b_IL0g{r_rn%iZKI%v
z8J!ujdj1`vgU&is<%e=)z$+ul`uea@Btj}l8r9QsAkMCGFa-27_jV}zsG-yeLZ`;>
z(LhTd^al5W-nEDL?z0opR|<W0DC1nv#=Il^>m)h}|3;~ZaFsWkm8eBYu+<KGy|{?)
zj6GI~RQ`x2t#W6PKo_XRjrGm-$x-1iu!^a&GJw@OjtVt)4#=+ZX<5Z#K^+)Ah260O
z10i@)6a5Vj@D@8j7KJxC-6MbIAC^$@;N+>Zs;YR1t#yvIRsLa$zsFgZ(5vg-NzPTR
zCRyYFt#ZVVvoBNm*Pp7bSW#Q4ndvs;s9^_m&Q(>DnyZ^KR8shIrs9Q==XFc?qUKT^
zl_PI%a|TKBZ7f@V5K%n;OL}aQplkwc6^BxV9t9l#RK};`Df6#QK7h$js2d{qA`di5
zSy_Er(1`$_Ans|N5Ah*QzfcFu%%(%;ivC&EKWoj>PB7HPS96?Ca%**3oyE&k!PNq6
z{(+gql<^E!K`mbeT=c0@%c=%R2k$p=U{GH!lx*-V{mb%49hMY~SdI_C#{-GO{;%||
z(D7BBO;ru9N;l1-pVABJerlctxV`tVhTUFx1tDw1ix3spQ_wb!{0rV2{ssTK|Hgj+
DA-eIR

literal 0
HcmV?d00001

diff --git a/base/glacier/src/_guided.py b/base/glacier/src/_guided.py
new file mode 100755
index 000000000..f2805e0eb
--- /dev/null
+++ b/base/glacier/src/_guided.py
@@ -0,0 +1,358 @@
+import warnings
+
+import numpy as np
+import tensorflow as tf
+from tensorflow import keras
+
+from wildboar.explain import IntervalImportance
+from .LIMESegment.Utils.explanations import LIMESegment
+
+
+class ModifiedLatentCF:
+    """Explanations by generating a counterfacutal sample in the latent space of
+    any autoencoder.
+
+    References
+    ----------
+    Learning Time Series Counterfactuals via Latent Space Representations,
+    Wang, Z., Samsten, I., Mochaourab, R., Papapetrou, P., 2021.
+    in: International Conference on Discovery Science, pp. 369–384. https://doi.org/10.1007/978-3-030-88942-5_29
+    """
+
+    def __init__(
+        self,
+        probability=0.5,
+        *,
+        tolerance=1e-6,
+        max_iter=100,
+        optimizer=None,
+        autoencoder=None,
+        pred_margin_weight=1.0,  # weighted_steps_weight = 1 - pred_margin_weight
+        step_weights="local",
+        random_state=None,
+    ):
+        """
+        Parameters
+        ----------
+        probability : float, optional
+            The desired probability assigned by the model
+
+        tolerance : float, optional
+            The maximum difference between the desired and assigned probability
+
+        optimizer :
+            Optimizer with a defined learning rate
+
+        max_iter : int, optional
+            The maximum number of iterations
+
+        autoencoder : int, optional
+            The autoencoder for the latent representation
+
+            - if None the sample is generated in the original space
+            - if given, the autoencoder is expected to have `k` decoder layer and `k`
+              encoding layers.
+        """
+        self.optimizer_ = (
+            tf.optimizers.Adam(learning_rate=1e-4) if optimizer is None else optimizer
+        )
+        self.mse_loss_ = keras.losses.MeanSquaredError()
+        self.probability_ = tf.constant([probability])
+        self.tolerance_ = tf.constant(tolerance)
+        self.max_iter = max_iter
+        self.autoencoder = autoencoder
+
+        # Weights of the different loss components
+        self.pred_margin_weight = pred_margin_weight
+        self.weighted_steps_weight = 1 - self.pred_margin_weight
+
+        self.step_weights = step_weights
+        self.random_state = random_state
+
+    def fit(self, model):
+        """Fit a new counterfactual explainer to the model
+
+        Paramaters
+        ----------
+
+        model : keras.Model
+            The model
+        """
+        if self.autoencoder:
+            (
+                encode_input,
+                encode_output,
+                decode_input,
+                decode_output,
+            ) = extract_encoder_decoder(self.autoencoder)
+            self.decoder_ = keras.Model(inputs=decode_input, outputs=decode_output)
+            self.encoder_ = keras.Model(inputs=encode_input, outputs=encode_output)
+        else:
+            self.decoder_ = None
+            self.encoder_ = None
+        self.model_ = model
+        return self
+
+    def predict(self, x):
+        """Compute the difference between the desired and actual probability
+
+        Parameters
+        ---------
+        x : Variable
+            Variable of the sample
+        """
+        if self.autoencoder is None:
+            z = x
+        else:
+            z = self.decoder_(x)
+
+        return self.model_(z)
+
+    # The "pred_margin_loss" is designed to measure the prediction probability to the desired decision boundary
+    def pred_margin_mse(self, prediction):
+        return self.mse_loss_(self.probability_, prediction)
+
+    # An auxiliary MAE loss function to measure the proximity with step_weights
+    def weighted_mae(self, original_sample, cf_sample, step_weights):
+        return tf.math.reduce_mean(
+            tf.math.multiply(tf.math.abs(original_sample - cf_sample), step_weights)
+        )
+
+    # An auxiliary normalized L2 loss function to measure the proximity with step_weights
+    def weighted_normalized_l2(self, original_sample, cf_sample, step_weights):
+        var_diff = tf.math.reduce_variance(original_sample - cf_sample)
+        var_orig = tf.math.reduce_variance(original_sample)
+        var_cf = tf.math.reduce_variance(cf_sample)
+
+        normalized_l2 = 0.5 * var_diff / (var_orig + var_cf)
+        return tf.math.reduce_mean(
+            tf.math.multiply(
+                normalized_l2,
+                step_weights,
+            )
+        )
+
+    # additional input of step_weights
+    def compute_loss(self, original_sample, z_search, step_weights, target_label):
+        loss = tf.zeros(shape=())
+        decoded = self.decoder_(z_search) if self.autoencoder is not None else z_search
+        pred = self.model_(decoded)[:, target_label]
+
+        pred_margin_loss = self.pred_margin_mse(pred)
+        loss += self.pred_margin_weight * pred_margin_loss
+
+        weighted_steps_loss = self.weighted_mae(
+            original_sample=tf.cast(original_sample, dtype=tf.float32),
+            cf_sample=tf.cast(decoded, dtype=tf.float32),
+            step_weights=tf.cast(step_weights, tf.float32),
+        )
+        # weighted_steps_loss = self.weighted_normalized_l2(
+        #     original_sample=tf.cast(original_sample, dtype=tf.float32),
+        #     decoded=tf.cast(decoded, dtype=tf.float32),
+        #     step_weights=tf.cast(step_weights, tf.float32)
+        # )
+        loss += self.weighted_steps_weight * weighted_steps_loss
+
+        return loss, pred_margin_loss, weighted_steps_loss
+
+    # TODO: compatible with the counterfactuals of wildboar
+    #       i.e., define the desired output target per label
+    def transform(self, x, pred_labels):
+        """Generate counterfactual explanations
+
+        x : array-like of shape [n_samples, n_timestep, n_dims]
+            The samples
+        """
+
+        result_samples = np.empty(x.shape)
+        losses = np.empty(x.shape[0])
+        # `weights_all` needed for debugging
+        weights_all = np.empty((x.shape[0], 1, x.shape[1], x.shape[2]))
+
+        for i in range(x.shape[0]):
+            if i % 25 == 0:
+                print(f"{i+1} samples been transformed.")
+
+            # if self.step_weights == "global" OR "uniform"
+            if isinstance(self.step_weights, np.ndarray):  #  "global" OR "uniform"
+                step_weights = self.step_weights
+            elif self.step_weights == "local":
+                # ignore warning of matrix multiplication, from LIMESegment: `https://stackoverflow.com/questions/29688168/mean-nanmean-and-warning-mean-of-empty-slice`
+                # ignore warning of scipy package warning, from LIMESegment: `https://github.com/paulvangentcom/heartrate_analysis_python/issues/31`
+                with warnings.catch_warnings():
+                    warnings.simplefilter("ignore", category=RuntimeWarning)
+                    warnings.simplefilter("ignore", category=UserWarning)
+                    step_weights = get_local_weights(
+                        x[i],
+                        self.model_,
+                        random_state=self.random_state,
+                        pred_label=pred_labels[i],
+                    )
+            else:
+                raise NotImplementedError(
+                    "step_weights not implemented, please choose 'local', 'global' or 'uniform'."
+                )
+            
+            # print(step_weights.reshape(-1))
+                        
+            x_sample, loss = self._transform_sample(
+                x[np.newaxis, i], step_weights, pred_labels[i]
+            )
+                        
+            result_samples[i] = x_sample
+            losses[i] = loss
+            weights_all[i] = step_weights
+
+        print(f"{i+1} samples been transformed, in total.")
+
+        return result_samples, losses, weights_all
+
+    def _transform_sample(self, x, step_weights, pred_label):
+        """Generate counterfactual explanations
+
+        x : array-like of shape [n_samples, n_timestep, n_dims]
+            The samples
+        """
+        # TODO: check_is_fitted(self)
+        if self.autoencoder is not None:
+            z = tf.Variable(self.encoder_(x))
+        else:
+            z = tf.Variable(x, dtype=tf.float32)
+
+        it = 0
+        target_label = 1 - pred_label  # for binary classification
+
+        with tf.GradientTape() as tape:
+            loss, pred_margin_loss, weighted_steps_loss = self.compute_loss(
+                x, z, step_weights, target_label
+            )
+
+        if self.autoencoder is not None:
+            pred = self.model_(self.decoder_(z))
+        else:
+            pred = self.model_(z)
+            
+        # # uncomment for debug
+        # print(
+        #     f"current loss: {loss}, pred_margin_loss: {pred_margin_loss}, weighted_steps_loss: {weighted_steps_loss}, pred prob:{pred}, iter: {it}."
+        # )
+
+        # TODO: modify the loss to check both validity and proximity; how to design the condition here?
+        # while (pred_margin_loss > self.tolerance_ or pred[:, 1] < self.probability_ or weighted_steps_loss > self.step_tolerance_)?
+        # loss > tf.multiply(self.tolerance_rate_, loss_original)
+        #
+        while (
+            pred_margin_loss > self.tolerance_
+            or pred[:, target_label] < self.probability_
+        ) and (it < self.max_iter if self.max_iter else True):
+            # Get gradients of loss wrt the sample
+            grads = tape.gradient(loss, z)
+            # Update the weights of the sample
+            self.optimizer_.apply_gradients([(grads, z)])
+
+            with tf.GradientTape() as tape:
+                loss, pred_margin_loss, weighted_steps_loss = self.compute_loss(
+                    x, z, step_weights, target_label
+                )
+            it += 1
+
+            if self.autoencoder is not None:
+                pred = self.model_(self.decoder_(z))
+            else:
+                pred = self.model_(z)
+
+        # # uncomment for debug
+        # print(
+        #     f"current loss: {loss}, pred_margin_loss: {pred_margin_loss}, weighted_steps_loss: {weighted_steps_loss}, pred prob:{pred}, iter: {it}. \n"
+        # )
+
+        res = z.numpy() if self.autoencoder is None else self.decoder_(z).numpy()
+        return res, float(loss)
+
+
+def extract_encoder_decoder(autoencoder):
+    """Extract the encoder and decoder from an autoencoder
+
+    autoencoder : keras.Model
+        The autoencoder of `k` encoders and `k` decoders
+    """
+    depth = len(autoencoder.layers) // 2
+    encoder = autoencoder.layers[1](autoencoder.input)
+    for i in range(2, depth):
+        encoder = autoencoder.layers[i](encoder)
+
+    encode_input = keras.Input(shape=encoder.shape[1:])
+    decoder = autoencoder.layers[depth](encode_input)
+    for i in range(depth + 1, len(autoencoder.layers)):
+        decoder = autoencoder.layers[i](decoder)
+
+    return autoencoder.input, encoder, encode_input, decoder
+
+
+def get_local_weights(
+    input_sample, classifier_model, random_state=None, pred_label=None
+):
+    n_timesteps, n_dims = input_sample.shape  # n_dims=1
+    # for binary classification, default to 1
+    desired_label = int(1 - pred_label) if pred_label is not None else 1
+    seg_imp, seg_idx = LIMESegment(
+        input_sample,
+        classifier_model,
+        model_type=desired_label,
+        cp=10,
+        window_size=10,
+        random_state=random_state,
+    )
+
+    if desired_label == 1:
+        # calculate the threshold of masking, lower 25 percentile (neg contribution for pos class)
+        masking_threshold = np.percentile(seg_imp, 25)
+        masking_idx = np.where(seg_imp <= masking_threshold)
+    else:  # desired_label == 0
+        # calculate the threshold of masking, upper 25 percentile (pos contribution for neg class)
+        masking_threshold = np.percentile(seg_imp, 75)
+        masking_idx = np.where(seg_imp >= masking_threshold)
+
+    weighted_steps = np.ones(n_timesteps)
+    for start_idx in masking_idx[0]:
+        weighted_steps[seg_idx[start_idx] : seg_idx[start_idx + 1]] = 0
+
+    # need to reshape for multiplication in `tf.math.multiply()`
+    weighted_steps = weighted_steps.reshape(1, n_timesteps, n_dims)
+    return weighted_steps
+
+
+def get_global_weights(
+    input_samples, input_labels, classifier_model, random_state=None
+):
+    n_samples, n_timesteps, n_dims = input_samples.shape  # n_dims=1
+
+    class ModelWrapper:
+        def __init__(self, model):
+            self.model = model
+
+        def predict(self, X):
+            p = self.model.predict(X.reshape(n_samples, n_timesteps, 1))
+            return np.argmax(p, axis=1)
+
+        def fit(self, X, y):
+            return self.model.fit(X, y)
+
+    clf = ModelWrapper(classifier_model)
+    clf.fit(input_samples.reshape(input_samples.shape[0], -1), input_labels)
+    
+    i = IntervalImportance(scoring="accuracy", n_intervals=10, random_state=random_state)
+    i.fit(clf, input_samples.reshape(input_samples.shape[0], -1), input_labels)
+
+    # calculate the threshold of masking, 75 percentile
+    masking_threshold = np.percentile(i.importances_.mean, 75)
+    masking_idx = np.where(i.importances_.mean >= masking_threshold)
+
+    weighted_steps = np.ones(n_timesteps)
+    seg_idx = i.intervals_
+    for start_idx in masking_idx[0]:
+        weighted_steps[seg_idx[start_idx][0] : seg_idx[start_idx][1]] = 0
+
+    # need to reshape for multiplication in `tf.math.multiply()`
+    weighted_steps = weighted_steps.reshape(1, n_timesteps, 1)
+    return weighted_steps
diff --git a/base/glacier/src/gc_latentcf_search_1dcnn_function.py b/base/glacier/src/gc_latentcf_search_1dcnn_function.py
new file mode 100644
index 000000000..73b85c50c
--- /dev/null
+++ b/base/glacier/src/gc_latentcf_search_1dcnn_function.py
@@ -0,0 +1,263 @@
+#!/usr/bin/env python
+# coding: utf-8
+import logging
+import os
+from argparse import ArgumentParser
+import numpy as np
+import pandas as pd
+import tensorflow as tf
+from sklearn.metrics import balanced_accuracy_score, confusion_matrix
+from sklearn.model_selection import train_test_split, StratifiedKFold
+from tensorflow import keras
+from keras.utils import to_categorical
+from wildboar.datasets import load_dataset
+import pickle
+from wildboar.explain import *
+from .help_functions import (
+    ResultWriter,
+    conditional_pad,
+    # remove_paddings,
+    # evaluate,
+    # find_best_lr,
+    reset_seeds,
+    time_series_normalize,
+    upsample_minority,
+    # fit_evaluation_models,
+    # time_series_revert,
+)
+from .keras_models import *
+# from ._guided import get_global_weights
+
+class ModelWrapper:
+    def __init__(self, model):
+        self.model = model
+
+    def predict(self, X):
+        p = self.model.predict(X.reshape(X.shape[0], -1, 1))
+        return np.argmax(p, axis=1) # I think this would work?
+
+    def fit(self, X, y):
+        return self.model.fit(X, y)
+
+def gc_latentcf_search_1dcnn(dataset, pos, neg, output, path, autoencoder="No"):
+    os.environ["TF_DETERMINISTIC_OPS"] = "1"
+    config = tf.compat.v1.ConfigProto()
+    config.gpu_options.allow_growth = True
+    session = tf.compat.v1.Session(config=config)
+    
+    logger = logging.getLogger(__name__)
+    print(f"Num GPUs Available: {len(tf.config.list_physical_devices('GPU'))}.")
+    numba_logger = logging.getLogger("numba")
+    numba_logger.setLevel(logging.WARNING)
+    # logger.info(f"LR list: {lr_list}.")  # for debugging
+    # logger.info(f"W type: {w_type}.")  # for debugging
+
+    RANDOM_STATE = 39
+    # PRED_MARGIN_W_LIST = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0] # TODO: write another script for ablation study
+    # pred_margin_weight = w_value
+    # logger.info(f"W value: {pred_margin_weight}.")  # for debugging
+    # logger.info(f"Tau value: {tau_value}.")  # for debugging
+
+    result_writer = ResultWriter(file_name=output, dataset_name=dataset)
+    print(f"Result writer is ready, writing to {output}...")
+    
+    # If `A.output` file already exists, no need to write head (directly append)
+    if not os.path.isfile(output):
+        result_writer.write_head()
+    
+    X = dataset.iloc[:, :-1].values
+    y = dataset.iloc[:, -1].values
+    
+    pos_label, neg_label = 1, 0
+    y_copy = y.copy()
+    
+    if pos != pos_label:
+        y_copy[y == pos] = pos_label  # convert/normalize positive label to 1
+    if neg != neg_label:
+        y_copy[y == neg] = neg_label  # convert negative label to 0
+
+    # skf = StratifiedKFold(n_splits=0, shuffle=True, random_state=RANDOM_STATE)
+    fold_idx = 0
+    # for train_index, test_index in skf.split(X, y_copy):
+    
+    train_index, test_index = train_test_split(np.arange(X.shape[0]), test_size=0.8, random_state=42)
+    # lr_list_temp = lr_list
+
+    X_train, X_test = X[train_index], X[test_index]
+    y_train, y_test = y_copy[train_index], y_copy[test_index]
+
+    # Get 50 samples for CF evaluation if test size larger than 50
+    test_size = len(y_test)
+    if test_size >= 50:
+        try:
+            test_indices = np.arange(test_size)
+            _, _, _, rand_test_idx = train_test_split(
+                y_test,
+                test_indices,
+                test_size=50,
+                random_state=RANDOM_STATE,
+                stratify=y_test,
+            )
+        except (
+            ValueError
+        ):  # ValueError: The train_size = 1 should be greater or equal to the number of classes = 2
+            rand_test_idx = np.arange(test_size)
+
+    else:
+        rand_test_idx = np.arange(test_size)
+
+    pd.DataFrame(X_test[rand_test_idx]).to_csv(path + "/X_test.csv", index=None)
+    np.save(path + "/y_test", (y_test[rand_test_idx]))
+    np.save(path + "/y_train", (y_train))
+
+    X_train, X_val, y_train, y_val = train_test_split(
+        X_train,
+        y_train,
+        test_size=0.125,
+        random_state=RANDOM_STATE,
+        stratify=y_train,
+    )
+
+    # Upsample the minority class
+    y_train_copy = y_train.copy()
+    X_train, y_train = upsample_minority(
+        X_train, y_train, pos_label=pos_label, neg_label=neg_label
+    )
+    if y_train.shape != y_train_copy.shape:
+        print(
+            f"Data upsampling performed, current distribution of y_train: \n{pd.value_counts(y_train)}."
+        )
+    else:
+        print(
+            f"Current distribution of y_train: \n{pd.value_counts(y_train)}."
+        )
+
+    nb_classes = len(np.unique(y_train))
+    y_train_classes, y_val_classes, y_test_classes = (
+        y_train.copy(),
+        y_val.copy(),
+        y_test.copy(),
+    )
+    y_train, y_val, y_test = (
+        to_categorical(y_train, nb_classes),
+        to_categorical(y_val, nb_classes),
+        to_categorical(y_test, nb_classes),
+    )
+
+    # ### 1.1 Normalization - fit scaler using training data
+    n_training, n_timesteps = X_train.shape
+    n_features = 1
+
+    X_train_processed, trained_scaler = time_series_normalize(
+        data=X_train, n_timesteps=n_timesteps
+    )
+    X_val_processed, _ = time_series_normalize(
+        data=X_val, n_timesteps=n_timesteps, scaler=trained_scaler
+    )
+    X_test_processed, _ = time_series_normalize(
+        data=X_test, n_timesteps=n_timesteps, scaler=trained_scaler
+    )
+
+    # add extra padding zeros if n_timesteps cannot be divided by 4, required for 1dCNN autoencoder structure
+    X_train_processed_padded, padding_size = conditional_pad(X_train_processed)
+    X_val_processed_padded, _ = conditional_pad(X_val_processed)
+    X_test_processed_padded, _ = conditional_pad(X_test_processed)
+    n_timesteps_padded = X_train_processed_padded.shape[1]
+    print(
+        f"Data pre-processed, original #timesteps={n_timesteps}, padded #timesteps={n_timesteps_padded}."
+    )
+
+    # ## 2. LatentCF models
+    # reset seeds for numpy, tensorflow, python random package and python environment seed
+    reset_seeds()
+
+    ###############################################
+    # ## 2.0 1DCNN classifier
+    ###############################################
+    # ### 1DCNN classifier
+    classifier = Classifier(n_timesteps_padded, n_features, n_output=2)
+
+    optimizer = keras.optimizers.legacy.Adam(learning_rate=0.0001)
+    classifier.compile(
+        optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
+    )
+
+    # Define the early stopping criteria
+    early_stopping_accuracy = keras.callbacks.EarlyStopping(
+        monitor="val_accuracy", patience=30, restore_best_weights=True
+    )
+
+    # Train the model
+    reset_seeds()
+    logger.info("Training log for 1DCNN classifier:")
+    classifier_history = classifier.fit(
+        X_train_processed_padded,
+        y_train,
+        epochs=150,
+        batch_size=32,
+        shuffle=True,
+        verbose=True,
+        validation_data=(X_val_processed_padded, y_val),
+        callbacks=[early_stopping_accuracy],
+    )
+    print(classifier_history)
+    y_pred = classifier.predict(X_test_processed_padded)
+    y_pred_classes = np.argmax(y_pred, axis=1)
+    
+    # clf = ModelWrapper(classifier)
+    # i = IntervalImportance(scoring="accuracy", n_intervals=10, random_state=RANDOM_STATE)
+    # i.fit(clf, X_train_processed_padded.reshape(X_train_processed_padded.shape[0], -1), y_train_classes)
+    # pickle.dump(i, open(path+f"/interval_importance.sav", "wb"))
+
+    acc = balanced_accuracy_score(y_true=y_test_classes, y_pred=y_pred_classes)
+    print(f"1dCNN classifier trained, with test accuracy {acc}.")
+
+    confusion_matrix_df = pd.DataFrame(
+        confusion_matrix(
+            y_true=y_test_classes, y_pred=y_pred_classes, labels=[1, 0]
+        ),
+        index=["True:pos", "True:neg"],
+        columns=["Pred:pos", "Pred:neg"],
+    )
+    print(f"Confusion matrix: \n{confusion_matrix_df}.")
+
+    # ###############################################
+    # # ## 2.1 CF search with 1dCNN autoencoder
+    # ###############################################
+    # # ### 1dCNN autoencoder
+    if autoencoder=="Yes":
+        autoencoder = Autoencoder(n_timesteps_padded, n_features)
+        optimizer = keras.optimizers.legacy.Adam(learning_rate=0.0005)
+        autoencoder.compile(optimizer=optimizer, loss="mse")
+
+        # Define the early stopping criteria
+        early_stopping = keras.callbacks.EarlyStopping(
+            monitor="val_loss", min_delta=0.0001, patience=5, restore_best_weights=True
+        )
+        # Train the model
+        reset_seeds()
+        logger.info("Training log for 1dCNN autoencoder:")
+        autoencoder_history = autoencoder.fit(
+            X_train_processed_padded,
+            X_train_processed_padded,
+            epochs=50,
+            batch_size=32,
+            shuffle=True,
+            verbose=True,
+            validation_data=(X_val_processed_padded, X_val_processed_padded),
+            callbacks=[early_stopping],
+        )
+
+        ae_val_loss = np.min(autoencoder_history.history["val_loss"])
+        pickle.dump(autoencoder, open(path+f"/autoencoder.sav", "wb"))
+        logger.info(f"1dCNN autoencoder trained, with validation loss: {ae_val_loss}.")
+
+    pickle.dump(classifier, open(path+f"/classifier.sav", "wb"))
+    
+    # logger.info(
+    #     f"Done for CF search [No autoencoder], pred_margin_weight={pred_margin_weight}."
+    # )
+    # logger.info("Done.")
+
+# python src/gc_latentcf_search.py --dataset TwoLeadECG --pos 1 --neg 2 --output twoleadecg-outfile.csv --n-lstmcells 8 --w-type unconstrained --w-value 1 --tau-value 0.5 --lr-list 0.0001 0.0001 0.0001;
+# gc_latentcf_search_1dcnn("TwoLeadECG", 1, 2, "twoleadecg-outfile.csv", 8, "unconstrained", 1, 0.5, [0.0001, 0.0001, 0.0001], "/home/lakes/extremum/base/glacier/src")
diff --git a/base/glacier/src/glacier_compute_counterfactuals.py b/base/glacier/src/glacier_compute_counterfactuals.py
new file mode 100644
index 000000000..4caf3d39d
--- /dev/null
+++ b/base/glacier/src/glacier_compute_counterfactuals.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+import numpy as np
+import pandas as pd
+from tensorflow import keras
+from keras.utils import to_categorical
+from sklearn.metrics import balanced_accuracy_score, confusion_matrix
+
+from .help_functions import (
+    conditional_pad,
+    remove_paddings,
+    find_best_lr,
+    reset_seeds,
+    time_series_normalize,
+    time_series_revert,
+)
+from .keras_models import *
+from ._guided import get_global_weights
+
+
+def gc_compute_counterfactuals(
+    data_path, output_path, type, l, w_value, tau_value, classifier, autoencoder=None
+):
+    testdata = pd.read_csv(data_path + "/X_test.csv", index_col=None)
+    X = pd.DataFrame(testdata).values
+    y = np.load(data_path + "/y_test.npy")
+    y_train = np.load(data_path + "/y_train.npy")
+
+    # X, y = X_test.iloc[:, :-1].values, X_test.iloc[:, -1].values
+
+    n_training, n_timesteps = X.shape
+    n_features = 1
+    x_processed, trained_scaler = time_series_normalize(data=X, n_timesteps=n_timesteps)
+
+    # add extra padding zeros if n_timesteps cannot be divided by 4, required for 1dCNN autoencoder structure
+    x_processed_padded, padding_size = conditional_pad(x_processed)
+    n_timesteps_padded = x_processed_padded.shape[1]
+
+    # ## 2. LatentCF models
+    # reset seeds for numpy, tensorflow, python random package and python environment seed
+    reset_seeds()
+
+    ###############################################
+    # ## 2.0 1DCNN classifier
+    ###############################################
+    # ### 1DCNN classifier
+    print("\t\t Y test: ", y)
+
+    y_pred = classifier.predict(x_processed_padded)
+    y_pred_classes = np.argmax(y_pred, axis=1)
+    acc = balanced_accuracy_score(y_true=y, y_pred=y_pred_classes)
+    np.save(output_path + "/y_pred.npy", y_pred_classes)
+
+    print("\t\t Y pred: ", y_pred_classes)
+    nb_classes = len(np.unique(y))
+    y_classes = y.copy()
+    y_classes = (
+        to_categorical(y_classes, nb_classes)
+    )
+
+    # ### 2.0.1 Get `step_weights` based on the input argument
+    if type == "global":
+        step_weights = get_global_weights(
+            x_processed_padded,
+            y_classes,
+            classifier,
+            random_state=39,
+        )
+    elif type == "uniform":
+        step_weights = np.ones((1, n_timesteps_padded, n_features))
+    elif type.lower() == "local":
+        step_weights = "local"
+    elif type == "unconstrained":
+        step_weights = np.zeros((1, n_timesteps_padded, n_features))
+    else:
+        raise NotImplementedError(
+            "A.w_type not implemented, please choose 'local', 'global', 'uniform', or 'unconstrained'."
+        )
+
+    pred_margin_weight = w_value
+
+    rand_X_test = x_processed_padded
+    rand_y_pred = y_pred_classes
+
+    # l = [0.0001, 0.0001, 0.0001]
+
+    lr_list3 = [l[0]] if l[0] is not None else [0.00001]
+
+    if autoencoder:
+        best_lr3, best_cf_model3, best_cf_samples3, _ = find_best_lr(
+            classifier,
+            X_samples=rand_X_test,
+            pred_labels=rand_y_pred,
+            autoencoder=autoencoder,
+            lr_list=lr_list3,
+            pred_margin_weight=pred_margin_weight,
+            step_weights=step_weights,
+            random_state=39,
+            padding_size=padding_size,
+            target_prob=tau_value,
+        )
+    else:
+        best_lr3, best_cf_model3, best_cf_samples3, _ = find_best_lr(
+            classifier,
+            X_samples=rand_X_test,
+            pred_labels=rand_y_pred,
+            lr_list=lr_list3,
+            pred_margin_weight=pred_margin_weight,
+            step_weights=step_weights,
+            random_state=39,
+            padding_size=padding_size,
+            target_prob=tau_value,
+        )
+
+    # ### Evaluation metrics
+    # predicted probabilities of CFs
+    z_pred3 = classifier.predict(best_cf_samples3)
+    cf_pred_labels3 = np.argmax(z_pred3, axis=1)
+
+    np.save(output_path + "/cf_pred.npy", cf_pred_labels3)
+    print("\t\t CF pred: ", cf_pred_labels3)
+
+    # remove extra paddings after counterfactual generation in 1dCNN autoencoder
+    best_cf_samples3 = remove_paddings(best_cf_samples3, padding_size)
+    best_cf_samples3_reverted = time_series_revert(
+        best_cf_samples3, n_timesteps, n_features, trained_scaler
+    )
+
+    np.save(output_path + "/X_cf.npy", best_cf_samples3_reverted)
+
+    cf_dataframe = pd.DataFrame(best_cf_samples3_reverted.reshape(-1, n_timesteps))
+    cf_dataframe[len(cf_dataframe.columns)] = cf_pred_labels3
+    cf_dataframe.to_csv(output_path + "/cf_test.csv", header=None, index=None)
diff --git a/base/glacier/src/help_functions.py b/base/glacier/src/help_functions.py
new file mode 100755
index 000000000..a500ef78f
--- /dev/null
+++ b/base/glacier/src/help_functions.py
@@ -0,0 +1,449 @@
+import os
+import csv
+import random as python_random
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import tensorflow as tf
+from sklearn.preprocessing import MinMaxScaler
+from sklearn.utils import resample, shuffle
+from sklearn.metrics import accuracy_score
+from sklearn.neighbors import LocalOutlierFactor, NearestNeighbors
+
+from ._guided import ModifiedLatentCF
+
+# from keras import backend as K
+
+
+class ResultWriter:
+    def __init__(self, file_name, dataset_name):
+        self.file_name = file_name
+        self.dataset_name = dataset_name
+
+    def write_head(self):
+        # write the head in csv file
+        with open(self.file_name, "w") as f:
+            writer = csv.writer(f)
+            writer.writerow(
+                [
+                    "dataset",
+                    "fold_id",
+                    "method",
+                    "classifier_accuracy",
+                    "autoencoder_loss",
+                    "best_lr",
+                    "proximity",
+                    "validity",
+                    "lof_score",
+                    "relative_proximity",
+                    "compactness",
+                    "pred_margin_weight",
+                    "step_weight_type",
+                    "threshold_tau",
+                ]
+            )
+
+    def write_result(
+        self,
+        fold_id,
+        method_name,
+        acc,
+        ae_loss,
+        best_lr,
+        evaluate_res,
+        pred_margin_weight=1.0,
+        step_weight_type="",
+        threshold_tau=0.5,
+    ):
+        (
+            proxi,
+            valid,
+            lof_score,
+            relative_proximity,
+            compactness,
+        ) = evaluate_res
+
+        with open(self.file_name, "a") as f:
+            writer = csv.writer(f)
+            writer.writerow(
+                [
+                    self.dataset_name,
+                    fold_id,
+                    method_name,
+                    acc,
+                    ae_loss,
+                    best_lr,
+                    proxi,
+                    valid,
+                    lof_score,
+                    relative_proximity,
+                    compactness,
+                    pred_margin_weight,
+                    step_weight_type,
+                    threshold_tau,
+                ]
+            )
+
+
+"""
+time series scaling
+"""
+
+
+def time_series_normalize(data, n_timesteps, n_features=1, scaler=None):
+    # reshape data to 1 column
+    data_reshaped = data.reshape(-1, 1)
+
+    if scaler is None:
+        scaler = MinMaxScaler(feature_range=(0, 1))
+        scaler.fit(data_reshaped)
+
+    normalized = scaler.transform(data_reshaped)
+
+    # return reshaped data into [samples, timesteps, features]
+    return normalized.reshape(-1, n_timesteps, n_features), scaler
+
+
+def time_series_revert(normalized_data, n_timesteps, n_features=1, scaler=None):
+    # reshape data to 1 column
+    data_reshaped = normalized_data.reshape(-1, 1)
+
+    reverted = scaler.inverse_transform(data_reshaped)
+
+    # return reverted data into [samples, timesteps, features]
+    return reverted.reshape(-1, n_timesteps, n_features)
+
+
+"""
+data pre-processing
+"""
+
+
+def conditional_pad(X):
+    num = X.shape[1]
+
+    if num % 4 != 0:
+        # find the next integer that can be divided by 4
+        next_num = (int(num / 4) + 1) * 4
+        padding_size = next_num - num
+        X_padded = np.pad(
+            X, pad_width=((0, 0), (0, padding_size), (0, 0))
+        )  # pad for 3d array
+
+        return X_padded, padding_size
+
+    # else return the original X
+    return X, 0  # padding size = 0
+
+
+def remove_paddings(cf_samples, padding_size):
+    if padding_size != 0:
+        # use np.squeeze() to cut the last time-series dimension, for evaluation
+        cf_samples = np.squeeze(cf_samples[:, :-padding_size, :])
+    else:
+        cf_samples = np.squeeze(cf_samples)
+    return cf_samples
+
+
+# Upsampling the minority class
+def upsample_minority(X, y, pos_label=1, neg_label=0, random_state=39):
+    # Get counts
+    pos_counts = pd.value_counts(y)[pos_label]
+    neg_counts = pd.value_counts(y)[neg_label]
+    # Divide by class
+    X_pos, X_neg = X[y == pos_label], X[y == neg_label]
+
+    if pos_counts == neg_counts:
+        # Balanced dataset
+        return X, y
+    elif pos_counts > neg_counts:
+        # Imbalanced dataset
+        X_neg_over = resample(
+            X_neg, replace=True, n_samples=pos_counts, random_state=random_state
+        )
+        X_concat = np.concatenate([X_pos, X_neg_over], axis=0)
+        y_concat = np.array(
+            [pos_label for i in range(pos_counts)]
+            + [neg_label for j in range(pos_counts)]
+        )
+    else:
+        # Imbalanced dataset
+        X_pos_over = resample(
+            X_pos, replace=True, n_samples=neg_counts, random_state=random_state
+        )
+        X_concat = np.concatenate([X_pos_over, X_neg], axis=0)
+        y_concat = np.array(
+            [pos_label for i in range(neg_counts)]
+            + [neg_label for j in range(neg_counts)]
+        )
+
+    # Shuffle the index after up-sampling
+    X_concat, y_concat = shuffle(X_concat, y_concat, random_state=random_state)
+
+    return X_concat, y_concat
+
+
+"""
+deep models needed
+"""
+
+
+# Method: For plotting the accuracy/loss of keras models
+def plot_graphs(history, string):
+    plt.plot(history.history[string])
+    plt.plot(history.history["val_" + string])
+    plt.xlabel("Epochs")
+    plt.ylabel(string)
+    plt.legend([string, "val_" + string])
+    plt.show()
+
+
+# Method: Fix the random seeds to get consistent models
+def reset_seeds(seed_value=39):
+    # ref: https://keras.io/getting_started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
+    os.environ["PYTHONHASHSEED"] = str(seed_value)
+    # necessary for starting Numpy generated random numbers in a well-defined initial state.
+    np.random.seed(seed_value)
+    # necessary for starting core Python generated random numbers in a well-defined state.
+    python_random.seed(seed_value)
+    # set_seed() will make random number generation
+    tf.random.set_seed(seed_value)
+
+
+"""
+evaluation metrics
+"""
+
+
+def fit_evaluation_models(n_neighbors_lof, n_neighbors_nn, training_data):
+    # Fit the LOF model for novelty detection (novelty=True)
+    lof_estimator = LocalOutlierFactor(
+        n_neighbors=n_neighbors_lof,
+        novelty=True,
+        metric="euclidean",
+    )
+    lof_estimator.fit(training_data)
+
+    # Fit an unsupervised 1NN with all the training samples from the desired class
+    nn_model = NearestNeighbors(n_neighbors=n_neighbors_nn, metric="euclidean")
+    nn_model.fit(training_data)
+    return lof_estimator, nn_model
+
+
+def evaluate(
+    X_pred_neg,
+    cf_samples,
+    pred_labels,
+    cf_labels,
+    lof_estimator_pos,
+    lof_estimator_neg,
+    nn_estimator_pos,
+    nn_estimator_neg,
+):
+    proxi = euclidean_distance(X_pred_neg, cf_samples)
+    valid = validity_score(pred_labels, cf_labels)
+    compact = compactness_score(X_pred_neg, cf_samples)
+
+    # TODO: add LOF and RP score for debugging training?
+    lof_score = calculate_lof(
+        cf_samples, pred_labels, lof_estimator_pos, lof_estimator_neg
+    )
+    rp_score = relative_proximity(
+        X_pred_neg, cf_samples, pred_labels, nn_estimator_pos, nn_estimator_neg
+    )
+
+    return proxi, valid, lof_score, rp_score, compact
+
+
+def euclidean_distance(X, cf_samples, average=True):
+    paired_distances = np.linalg.norm(X - cf_samples, axis=1)
+    return np.mean(paired_distances) if average else paired_distances
+
+
+def validity_score(pred_labels, cf_labels):
+    desired_labels = 1 - pred_labels  # for binary classification
+    return accuracy_score(y_true=desired_labels, y_pred=cf_labels)
+
+
+# originally from: https://github.com/isaksamsten/wildboar/blob/859758884677ba32a601c53a5e2b9203a644aa9c/src/wildboar/metrics/_counterfactual.py#L279
+def compactness_score(X, cf_samples):
+    # absolute tolerance atol=0.01, 0.001, OR 0.0001?
+    c = np.isclose(X, cf_samples, atol=0.01)
+
+    # return a positive compactness, instead of 1 - np.mean(..)
+    return np.mean(c, axis=(1, 0))
+
+
+# def sax_compactness(X, cf_samples, n_timesteps):
+#     from wildboar.transform import symbolic_aggregate_approximation
+
+#     X = symbolic_aggregate_approximation(X, window=window, n_bins=n_bins)
+#     cf_samples = symbolic_aggregate_approximation(
+#         cf_samples, window=window, n_bins=n_bins
+#     )
+
+#     # absolute tolerance atol=0.01, 0.001, OR 0.0001?
+#     c = np.isclose(X, cf_samples, atol=0.01)
+
+#     return np.mean(1 - np.sum(c, axis=1) / n_timesteps)
+
+
+def calculate_lof(cf_samples, pred_labels, lof_estimator_pos, lof_estimator_neg):
+    desired_labels = 1 - pred_labels  # for binary classification
+
+    pos_idx, neg_idx = (
+        np.where(desired_labels == 1)[0],  # pos_label = 1
+        np.where(desired_labels == 0)[0],  # neg_label - 0
+    )
+    # check if the NumPy array is empty
+    if pos_idx.any():
+        y_pred_cf1 = lof_estimator_pos.predict(cf_samples[pos_idx])
+        n_error_cf1 = y_pred_cf1[y_pred_cf1 == -1].size
+    else:
+        n_error_cf1 = 0
+
+    if neg_idx.any():
+        y_pred_cf2 = lof_estimator_neg.predict(cf_samples[neg_idx])
+        n_error_cf2 = y_pred_cf2[y_pred_cf2 == -1].size
+    else:
+        n_error_cf2 = 0
+
+    lof_score = (n_error_cf1 + n_error_cf2) / cf_samples.shape[0]
+    return lof_score
+
+
+def relative_proximity(
+    X_inputs, cf_samples, pred_labels, nn_estimator_pos, nn_estimator_neg
+):
+    desired_labels = 1 - pred_labels  # for binary classification
+
+    nn_distance_list = np.array([])
+    proximity_list = np.array([])
+
+    pos_idx, neg_idx = (
+        np.where(desired_labels == 1)[0],  # pos_label = 1
+        np.where(desired_labels == 0)[0],  # neg_label = 0
+    )
+    if pos_idx.any():
+        nn_distances1, _ = nn_estimator_pos.kneighbors(
+            X_inputs[pos_idx], return_distance=True
+        )
+        nn_distances1 = np.squeeze(nn_distances1, axis=-1)
+        proximity1 = euclidean_distance(
+            X_inputs[pos_idx], cf_samples[pos_idx], average=False
+        )
+
+        nn_distance_list = np.concatenate((nn_distance_list, nn_distances1), axis=0)
+        proximity_list = np.concatenate((proximity_list, proximity1), axis=0)
+
+    if neg_idx.any():
+        nn_distances2, _ = nn_estimator_neg.kneighbors(
+            X_inputs[neg_idx], return_distance=True
+        )
+        nn_distances2 = np.squeeze(nn_distances2, axis=-1)
+        proximity2 = euclidean_distance(
+            X_inputs[neg_idx], cf_samples[neg_idx], average=False
+        )
+
+        nn_distance_list = np.concatenate((nn_distance_list, nn_distances2), axis=0)
+        proximity_list = np.concatenate((proximity_list, proximity2), axis=0)
+
+    # TODO: paired proximity score for (X_pred_neg, cf_samples), if not average (?)
+    # relative_proximity = proximity / nn_distances.mean()
+    relative_proximity = proximity_list.mean() / nn_distance_list.mean()
+
+    return relative_proximity
+
+
+"""
+counterfactual model needed
+"""
+
+
+def find_best_lr(
+    classifier,
+    X_samples,
+    pred_labels,
+    autoencoder=None,
+    encoder=None,
+    decoder=None,
+    lr_list=[0.001, 0.0001],
+    pred_margin_weight=1.0,
+    step_weights=None,
+    random_state=None,
+    padding_size=0,
+    target_prob=0.5,
+):
+    # Find the best alpha for vanilla LatentCF
+    best_cf_model, best_cf_samples, best_cf_embeddings = None, None, None
+    best_losses, best_valid_frac, best_lr = 0, -1, 0
+
+    for lr in lr_list:
+        print(f"======================== CF search started, with lr={lr}.")
+        # Fit the LatentCF model
+        # TODO: fix the class name here: ModifiedLatentCF or GuidedLatentCF? from _guided or _composite?
+        if encoder and decoder:
+            cf_model = ModifiedLatentCF(
+                probability=target_prob,
+                only_encoder=encoder,
+                only_decoder=decoder,
+                optimizer=tf.optimizers.Adam(learning_rate=lr),
+                pred_margin_weight=pred_margin_weight,
+                step_weights=step_weights,
+                random_state=random_state,
+            )
+        else:
+            ## here
+            cf_model = ModifiedLatentCF(
+                probability=target_prob,
+                autoencoder=autoencoder,
+                optimizer=tf.optimizers.Adam(learning_rate=lr),
+                pred_margin_weight=pred_margin_weight,
+                step_weights=step_weights,
+                random_state=random_state,
+            )
+        ## here 
+        cf_model.fit(classifier)
+
+        if encoder and decoder:
+            cf_embeddings, losses, _ = cf_model.transform(X_samples, pred_labels)
+            cf_samples = decoder.predict(cf_embeddings)
+            # predicted probabilities of CFs
+            z_pred = classifier.predict(cf_embeddings)
+            cf_pred_labels = np.argmax(z_pred, axis=1)
+        else:
+            ## here
+            cf_samples, losses, _ = cf_model.transform(X_samples, pred_labels)
+            # predicted probabilities of CFs
+            z_pred = classifier.predict(cf_samples)
+            cf_pred_labels = np.argmax(z_pred, axis=1)
+
+        ## here
+        valid_frac = validity_score(pred_labels, cf_pred_labels)
+        proxi_score = euclidean_distance(
+            remove_paddings(X_samples, padding_size),
+            remove_paddings(cf_samples, padding_size),
+        )
+
+        # uncomment for debugging
+        print(f"lr={lr} finished. Validity: {valid_frac}, proximity: {proxi_score}.")
+
+        # TODO: fix (padding) dimensions of `lof_estimator` and `nn_estimator` during training, for debugging
+        # proxi_score, valid_frac, lof_score, rp_score, cost_mean, cost_std = evaluate(
+        #     X_pred_neg=X_samples,
+        #     cf_samples=cf_samples,
+        #     z_pred=z_pred,
+        #     n_timesteps=_,
+        #     lof_estimator=lof_estimator,
+        #     nn_estimator=nn_estimator,
+        # )
+
+        # if valid_frac >= best_valid_frac and proxi_score <= best_proxi_score:
+        if valid_frac >= best_valid_frac:
+            best_cf_model, best_cf_samples = cf_model, cf_samples
+            best_losses, best_lr, best_valid_frac = losses, lr, valid_frac
+            if encoder and decoder:
+                best_cf_embeddings = cf_embeddings
+
+    return best_lr, best_cf_model, best_cf_samples, best_cf_embeddings
diff --git a/base/glacier/src/keras_models.py b/base/glacier/src/keras_models.py
new file mode 100755
index 000000000..5b62eef27
--- /dev/null
+++ b/base/glacier/src/keras_models.py
@@ -0,0 +1,260 @@
+from tensorflow import keras
+
+"""
+1dCNN models
+"""
+
+
+def Autoencoder(n_timesteps, n_features):
+    # Define encoder and decoder structure
+    def Encoder(input):
+        x = keras.layers.Conv1D(
+            filters=64, kernel_size=3, activation="relu", padding="same"
+        )(input)
+        x = keras.layers.MaxPool1D(pool_size=2, padding="same")(x)
+        x = keras.layers.Conv1D(
+            filters=32, kernel_size=3, activation="relu", padding="same"
+        )(x)
+        x = keras.layers.MaxPool1D(pool_size=2, padding="same")(x)
+        return x
+
+    def Decoder(input):
+        x = keras.layers.Conv1D(
+            filters=32, kernel_size=3, activation="relu", padding="same"
+        )(input)
+        x = keras.layers.UpSampling1D(size=2)(x)
+        x = keras.layers.Conv1D(
+            filters=64, kernel_size=3, activation="relu", padding="same"
+        )(x)
+        # x = keras.layers.Conv1D(filters=64, kernel_size=2, activation="relu")(x)
+        x = keras.layers.UpSampling1D(size=2)(x)
+        x = keras.layers.Conv1D(
+            filters=1, kernel_size=3, activation="linear", padding="same"
+        )(x)
+        return x
+
+    # Define the AE model
+    orig_input = keras.Input(shape=(n_timesteps, n_features))
+    autoencoder = keras.Model(inputs=orig_input, outputs=Decoder(Encoder(orig_input)))
+
+    return autoencoder
+
+
+def Classifier(
+    n_timesteps, n_features, n_conv_layers=1, add_dense_layer=True, n_output=1
+):
+    # https://keras.io/examples/timeseries/timeseries_classification_from_scratch/
+    inputs = keras.Input(shape=(n_timesteps, n_features), dtype="float32")
+
+    if add_dense_layer:
+        x = keras.layers.Dense(128)(inputs)
+    else:
+        x = inputs
+
+    for i in range(n_conv_layers):
+        x = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(x)
+        x = keras.layers.BatchNormalization()(x)
+        x = keras.layers.ReLU()(x)
+
+    x = keras.layers.MaxPooling1D(pool_size=2, padding="same")(x)
+    x = keras.layers.Flatten()(x)
+
+    if n_output >= 2:
+        outputs = keras.layers.Dense(n_output, activation="softmax")(x)
+    else:
+        outputs = keras.layers.Dense(1, activation="sigmoid")(x)
+
+    classifier = keras.models.Model(inputs=inputs, outputs=outputs)
+
+    return classifier
+
+
+"""
+LSTM models
+"""
+
+
+def AutoencoderLSTM(n_timesteps, n_features):
+    # Define encoder and decoder structure
+    # structure from medium post: https://towardsdatascience.com/step-by-step-understanding-lstm-autoencoder-layers-ffab055b6352
+    def EncoderLSTM(input):
+        # x = keras.layers.LSTM(64, activation='relu', return_sequences=True)(input)
+        x = keras.layers.LSTM(64, activation="tanh", return_sequences=True)(input)
+        # encoded = keras.layers.LSTM(32, activation='relu', return_sequences=False)(x)
+        encoded = keras.layers.LSTM(32, activation="tanh", return_sequences=False)(x)
+        return encoded
+
+    def DecoderLSTM(encoded):
+        x = keras.layers.RepeatVector(n_timesteps)(encoded)
+        # x = keras.layers.LSTM(32, activation='relu', return_sequences=True)(x)
+        x = keras.layers.LSTM(32, activation="tanh", return_sequences=True)(x)
+        # x = keras.layers.LSTM(64, activation='relu', return_sequences=True)(x)
+        x = keras.layers.LSTM(64, activation="tanh", return_sequences=True)(x)
+        decoded = keras.layers.TimeDistributed(
+            keras.layers.Dense(n_features, activation="sigmoid")
+        )(x)
+        return decoded
+
+    # Define the AE model
+    orig_input2 = keras.Input(shape=(n_timesteps, n_features))
+
+    autoencoder2 = keras.Model(
+        inputs=orig_input2, outputs=DecoderLSTM(EncoderLSTM(orig_input2))
+    )
+
+    return autoencoder2
+
+
+def ClassifierLSTM(n_timesteps, n_features, extra_lstm_layer=True, n_output=1):
+    # Define the model structure - only LSTM layers
+    # https://www.kaggle.com/szaitseff/classification-of-time-series-with-lstm-rnn
+    inputs = keras.Input(shape=(n_timesteps, n_features), dtype="float32")
+    if extra_lstm_layer:
+        x = keras.layers.LSTM(64, activation="tanh", return_sequences=True)(
+            inputs
+        )  # set return_sequences true to feed next LSTM layer
+    else:
+        x = keras.layers.LSTM(32, activation="tanh", return_sequences=False)(
+            inputs
+        )  # set return_sequences false to feed dense layer directly
+    x = keras.layers.BatchNormalization()(x)
+    # x = keras.layers.LSTM(32, activation='tanh', return_sequences=True)(x)
+    # x = keras.layers.BatchNormalization()(x)
+    if extra_lstm_layer:
+        x = keras.layers.LSTM(16, activation="tanh", return_sequences=False)(x)
+        x = keras.layers.BatchNormalization()(x)
+
+    if n_output >= 2:
+        outputs = keras.layers.Dense(n_output, activation="softmax")(x)
+    else:
+        outputs = keras.layers.Dense(1, activation="sigmoid")(x)
+
+    classifier2 = keras.Model(inputs, outputs)
+
+    return classifier2
+
+
+"""
+composite autoencoder
+"""
+
+
+def CompositeAutoencoder(n_timesteps, n_features, n_output_classifier=1):
+    # https://machinelearningmastery.com/lstm-autoencoders/
+    # https://stackoverflow.com/questions/48603328/how-do-i-split-an-convolutional-autoencoder
+
+    # Composite 1dCNN Autoencoder
+    # encoder:
+    inputs = keras.layers.Input(shape=(n_timesteps, n_features))
+    x = keras.layers.Conv1D(
+        filters=64, kernel_size=3, activation="relu", padding="same"
+    )(inputs)
+    x = keras.layers.MaxPool1D(pool_size=2, padding="same")(x)
+    x = keras.layers.Conv1D(
+        filters=32, kernel_size=3, activation="relu", padding="same"
+    )(x)
+    encoded = keras.layers.MaxPool1D(pool_size=2, padding="same")(x)
+
+    encoder3 = keras.models.Model(inputs, encoded)
+
+    _, encoded_dim1, encoded_dim2 = encoder3.layers[-1].output_shape
+
+    # decoder
+    decoder_inputs = keras.layers.Input(shape=(encoded_dim1, encoded_dim2))
+    x = keras.layers.Conv1D(
+        filters=32, kernel_size=3, activation="relu", padding="same"
+    )(decoder_inputs)
+    x = keras.layers.UpSampling1D(size=2)(x)
+    x = keras.layers.Conv1D(
+        filters=64, kernel_size=3, activation="relu", padding="same"
+    )(x)
+    # x = keras.layers.Conv1D(filters=64, kernel_size=2, activation="relu")(x)
+    x = keras.layers.UpSampling1D(size=2)(x)
+    decoded = keras.layers.Conv1D(
+        filters=1, kernel_size=3, activation="linear", padding="same"
+    )(x)
+
+    decoder3 = keras.models.Model(decoder_inputs, decoded, name="only_decoder")
+
+    # classifier based on previous encoder
+    classifier_inputs = keras.layers.Input(shape=(encoded_dim1, encoded_dim2))
+    x = keras.layers.Conv1D(
+        filters=16, kernel_size=3, activation="relu", padding="same"
+    )(classifier_inputs)
+    x = keras.layers.MaxPool1D(pool_size=2, padding="same")(x)
+    x = keras.layers.Flatten()(x)
+    x = keras.layers.Dense(50, activation="relu")(x)
+    if n_output_classifier >= 2:
+        classifier_outputs = keras.layers.Dense(
+            n_output_classifier, activation="softmax"
+        )(x)
+    else:
+        classifier_outputs = keras.layers.Dense(1, activation="sigmoid")(x)
+
+    classifier3 = keras.models.Model(
+        classifier_inputs, classifier_outputs, name="only_classifier"
+    )
+
+    # composite autoencoder
+    encoded = encoder3(inputs)
+    decoded = decoder3(encoded)
+    classified = classifier3(encoded)
+
+    composite_autoencoder = keras.models.Model(inputs, [decoded, classified])
+
+    return composite_autoencoder, encoder3, decoder3, classifier3
+
+
+def LSTMFCNClassifier(n_timesteps, n_features, n_output=2, n_LSTM_cells=8):
+    # https://github.com/titu1994/LSTM-FCN/blob/master/hyperparameter_search.py
+    inputs = keras.Input(shape=(n_timesteps, n_features), dtype="float32")
+
+    x = keras.layers.LSTM(units=n_LSTM_cells)(inputs)
+    x = keras.layers.Dropout(rate=0.8)(x)
+
+    y = keras.layers.Permute((2, 1))(inputs)
+    y = keras.layers.Conv1D(128, 8, padding="same", kernel_initializer="he_uniform")(y)
+    y = keras.layers.BatchNormalization()(y)
+    y = keras.layers.ReLU()(y)
+
+    y = keras.layers.Conv1D(256, 5, padding="same", kernel_initializer="he_uniform")(y)
+    y = keras.layers.BatchNormalization()(y)
+    y = keras.layers.ReLU()(y)
+
+    y = keras.layers.Conv1D(128, 3, padding="same", kernel_initializer="he_uniform")(y)
+    y = keras.layers.BatchNormalization()(y)
+    y = keras.layers.ReLU()(y)
+
+    y = keras.layers.GlobalAveragePooling1D()(y)
+
+    x = keras.layers.concatenate([x, y])
+
+    outputs = keras.layers.Dense(n_output, activation="softmax")(x)
+
+    classifier = keras.models.Model(inputs=inputs, outputs=outputs)
+
+    return classifier
+
+
+def Classifier_FCN(input_shape, nb_classes):
+    input_layer = keras.layers.Input(input_shape)
+
+    conv1 = keras.layers.Conv1D(filters=128, kernel_size=8, padding="same")(input_layer)
+    conv1 = keras.layers.BatchNormalization()(conv1)
+    conv1 = keras.layers.Activation(activation="relu")(conv1)
+
+    conv2 = keras.layers.Conv1D(filters=256, kernel_size=5, padding="same")(conv1)
+    conv2 = keras.layers.BatchNormalization()(conv2)
+    conv2 = keras.layers.Activation("relu")(conv2)
+
+    conv3 = keras.layers.Conv1D(128, kernel_size=3, padding="same")(conv2)
+    conv3 = keras.layers.BatchNormalization()(conv3)
+    conv3 = keras.layers.Activation("relu")(conv3)
+
+    gap_layer = keras.layers.GlobalAveragePooling1D()(conv3)
+
+    output_layer = keras.layers.Dense(nb_classes, activation="softmax")(gap_layer)
+
+    model = keras.models.Model(inputs=input_layer, outputs=output_layer)
+
+    return model
diff --git a/base/glacier/src/requirements.txt b/base/glacier/src/requirements.txt
new file mode 100755
index 000000000..1ba9baf2a
--- /dev/null
+++ b/base/glacier/src/requirements.txt
@@ -0,0 +1,11 @@
+fastdtw==0.3.4
+joblib==1.4.2
+keras==3.5.0
+matplotlib==3.9.2
+numpy==2.1.0
+pandas==2.2.2
+scikit_learn==1.5.1
+scipy==1.14.1
+stumpy==1.13.0
+tensorflow==2.17.0
+wildboar==1.2.0