Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
d3a5927
Fixed PR changes for params_regression
tanzim10 Oct 1, 2024
fabd34b
Fixed Unittest for param_regression according to PR change
tanzim10 Oct 1, 2024
71f6c96
Fixed the mobility notebook and radp_library file(PR changes done)
tanzim10 Oct 1, 2024
deb3bc3
Resolved test_param_regression unittest cases
tanzim10 Oct 2, 2024
064e285
Updated Param Regression and radp library as instructed in the PR review
tanzim10 Oct 4, 2024
f774355
Changed the scipy dependency to solve dependency error
tanzim10 Oct 4, 2024
962e4db
Changed how seed value works especially made it user friendly
tanzim10 Oct 4, 2024
625be10
Refactored functions and changed the imports
tanzim10 Oct 11, 2024
9d507a1
Resolved test_param_regression imports
tanzim10 Oct 11, 2024
7ff9c34
All Black Changes
tanzim10 Oct 11, 2024
831f215
Merge branch 'main' into PR-Changes
tanzim10 Oct 11, 2024
d6bb401
Merge pull request #1 from tanzim10/PR-Changes
tanzim10 Oct 11, 2024
77bce45
Merge pull request #3 from lf-connectivity/main
tanzim10 Oct 23, 2024
c4bb836
Merge branch 'lf-connectivity:main' into main
tanzim10 May 6, 2025
363f7cf
MRO (#17) (#7)
tanzim10 May 16, 2025
49bfd35
Merge branch 'main' of https://github.com/tanzim10/maveric
WaterMenon09 May 16, 2025
a0b22a1
[v0.2]: SinrDB Bug Fix (#25) (#17)
tanzim10 Jul 15, 2025
6310d7d
Release v1.0 (#18)
tanzim10 Jul 15, 2025
28b3f37
added mro-ml, enchanced performattachment, enhanced params for mro, a…
WaterMenon09 Sep 12, 2025
0f43252
Merge branch 'main' into v1.0.2/mro/enhancement
WaterMenon09 Sep 12, 2025
47cd020
fixed test erro
WaterMenon09 Sep 12, 2025
a172c92
added minor fix
WaterMenon09 Sep 12, 2025
5d9641e
fixed testing error
WaterMenon09 Sep 12, 2025
823b245
reverted pre-commit.yml
WaterMenon09 Sep 18, 2025
03e13a2
CCO Modularized and Enhanced
tanzim10 Oct 7, 2025
85b0d72
Revert "CCO Modularized and Enhanced"
tanzim10 Oct 7, 2025
74da160
made minor bugfix for rl mro
WaterMenon09 Oct 13, 2025
331d565
Revert "feat: comprehensive validation system for RADP APIs"
paulvarkey Nov 6, 2025
09bd357
added ml_mro example to notebook & added all docstring for mro
WaterMenon09 Nov 20, 2025
b9baee8
added proper docstring for new cell attachment
WaterMenon09 Nov 20, 2025
c2f1992
fixed mro minor testing bug
WaterMenon09 Nov 20, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
import os
import pickle
import warnings
Expand All @@ -7,9 +8,7 @@

import numpy as np
import pandas as pd
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.settings import cholesky_jitter
import torch
from gpytorch.utils.warnings import NumericalWarning

from notebooks.radp_library import (
Expand All @@ -28,6 +27,9 @@
# Suppress the specific NumericalWarning from gpytorch
warnings.filterwarnings("ignore", category=NumericalWarning)

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)


class MobilityRobustnessOptimization(ABC):
"""
Expand All @@ -38,12 +40,15 @@ def __init__(
self,
mobility_model_params: Dict[str, Dict],
topology: pd.DataFrame,
new_data: Optional[pd.DataFrame] = None,
bdt: Optional[Dict[str, BayesianDigitalTwin]] = None,
):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.topology = topology
self.bayesian_digital_twins = bdt if bdt is not None else {}
self.mobility_model_params = mobility_model_params
self.simulation_data = None
self.new_data = new_data

def train_or_update_rf_twins(self, new_data: pd.DataFrame) -> None:
"""
Expand All @@ -67,45 +72,47 @@ def train_or_update_rf_twins(self, new_data: pd.DataFrame) -> None:

"""
try:
self.new_data = new_data

if not isinstance(new_data, pd.DataFrame):
raise TypeError("The input 'new_data' must be a pandas DataFrame.")
logger.error("The input 'new_data' must be a pandas DataFrame.")

expected_columns = {"longitude", "latitude", "cell_id", "cell_rxpwr_dbm"}
if not expected_columns.issubset(new_data.columns):
raise ValueError(f"The input DataFrame must contain the following columns: {expected_columns}")
if not expected_columns.issubset(self.new_data.columns):
logger.error(f"The input DataFrame must contain the following columns: {expected_columns}")

# normalize cell_id format - regardless of dtype
self.topology = normalize_cell_ids(self.topology)
new_data = normalize_cell_ids(new_data)
self.new_data = normalize_cell_ids(self.new_data)

# Check if the new data is in the expected cartesian format
check_cartesian_format(new_data, self.topology)
check_cartesian_format(self.new_data, self.topology)

# Prepare the new data for training or updating
prepared_data = self._prepare_train_or_update_data(new_data)
prepared_data = self._prepare_train_or_update_data(self.new_data)

# update if bayesian digital twins exist already
if self.bayesian_digital_twins:
print("Updating existing Bayesian Digital Twins with new data.")
logger.info("Updating existing Bayesian Digital Twins with new data.")

for cell_id, df in prepared_data.items():
self._update(cell_id, df)
print("Bayesian Digital Twins updated successfully.")
logger.info("Bayesian Digital Twins updated successfully.")

# If no Bayesian Digital Twins exist, train from scratch
else:
print("No Bayesian Digital Twins available for update. Training from scratch.")
logger.info("No Bayesian Digital Twins available for update. Training from scratch.")
self._training(maxiter=100, train_data=prepared_data)
print("\nBayesian Digital Twins trained successfully.")
logger.info("\nBayesian Digital Twins trained successfully.")

except TypeError as te:
print(f"TypeError: {te}")
logger.error(f"TypeError: {te}")
except ValueError as ve:
print(f"ValueError: {ve}")
logger.error(f"ValueError: {ve}")
except KeyError as ke:
print(f"KeyError: {ke}")
logger.error(f"KeyError: {ke}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
logger.exception(f"An unexpected error occurred: {e}")

def save_bdt(self, file_relative_path="data/mro_data") -> bool:
"""
Expand All @@ -116,28 +123,28 @@ def save_bdt(self, file_relative_path="data/mro_data") -> bool:

try:
if not isinstance(self.bayesian_digital_twins, dict):
raise TypeError("The attribute 'bayesian_digital_twins' must be a dictionary.")
logger.error("The attribute 'bayesian_digital_twins' must be a dictionary.")

# Ensure the directory exists
os.makedirs(file_relative_path, exist_ok=True)

with open(filename, "wb") as fp:
pickle.dump(self.bayesian_digital_twins, fp)

print(f"Twins Saved Successfully as Pickle at: {filename}")
logger.info(f"Twins Saved Successfully as Pickle at: {filename}")

return True # Indicate successful save

except TypeError as te:
print(f"TypeError: {te}")
logger.error(f"TypeError: {te}")
return False

except OSError as oe:
print(f"OSError: {oe}")
logger.error(f"OSError: {oe}")
return False

except Exception as e:
print(f"An unexpected error occurred: {e}")
logger.exception(f"An unexpected error occurred: {e}")
return False

def load_bdt(self, file_relative_path="data/mro_data/digital_twins.pkl") -> bool:
Expand All @@ -150,16 +157,16 @@ def load_bdt(self, file_relative_path="data/mro_data/digital_twins.pkl") -> bool
try:
with open(filename, "rb") as fp:
self.bayesian_digital_twins = pickle.load(fp)
print(f"Twins Loaded Successfully from Pickle at: {filename}")
logger.info(f"Twins Loaded Successfully from Pickle at: {filename}")

return True # Indicate successful load

except FileNotFoundError as fnf:
print(f"FileNotFoundError: {fnf}")
logger.error(f"FileNotFoundError: {fnf}")
return False

except Exception as e:
print(f"An unexpected error occurred: {e}")
logger.exception(f"An unexpected error occurred: {e}")
return False

@abstractmethod
Expand All @@ -175,7 +182,7 @@ def _training(self, maxiter: int, train_data: Dict[str, pd.DataFrame]) -> List[f
"""
Trains the Bayesian Digital Twins for each cell in the topology using the UE locations and features
like log distance, relative bearing, and cell received power (Rx power).

+---------+----------+-----------+----------------+--------------+-------------------+
| cell_id | latitude | longitude | cell_rxpwr_dbm | log_distance | relative_bearing |
+=========+==========+===========+================+==============+===================+
Expand All @@ -199,6 +206,7 @@ def _training(self, maxiter: int, train_data: Dict[str, pd.DataFrame]) -> List[f
norm_method=NormMethod.MINMAX,
)

bayesian_digital_twins[train_cell_id].model = bayesian_digital_twins[train_cell_id].model.to(self.device)
self.bayesian_digital_twins[train_cell_id] = bayesian_digital_twins[train_cell_id]

loss_vs_iters.append(
Expand All @@ -213,11 +221,10 @@ def _update(self, cell_id: str, df: pd.DataFrame) -> None:
"""
Updates the Bayesian Digital Twin (BDT) model for a specific cell.

Updates by deduplicating samples using 'log_distance' and 'relative_bearing', subsampling up to 500
strongest signals, reconfiguring the Gaussian Process with a Scale and RBF kernel, increasing observation
noise via GaussianLikelihood, and using higher jitter to stabilize Cholesky decomposition before training
on the processed data.

Deduplicates samples using 'log_distance' and 'relative_bearing'. If more than 500
samples remain, subsamples the 500 strongest signals before updating the trained
GP model with the processed data.

+---------+----------+-----------+----------------+--------------+-------------------+
| cell_id | latitude | longitude | cell_rxpwr_dbm | log_distance | relative_bearing |
+=========+==========+===========+================+==============+===================+
Expand All @@ -240,23 +247,12 @@ def _update(self, cell_id: str, df: pd.DataFrame) -> None:
][0]

twin = self.bayesian_digital_twins[cell_id]

# Reconfigure the kernel to include scale + RBF
twin.model.covar_module = ScaleKernel(RBFKernel())

# Increase observation noise via GaussianLikelihood
if not hasattr(twin, "likelihood"):
twin.likelihood = GaussianLikelihood() # type: ignore
twin.likelihood.noise = 1e-2 # type: ignore

# Use an increased jitter context
with cholesky_jitter(1e-1):
twin.update_trained_gpmodel([df])
twin.update_trained_gpmodel([df])

def _prepare_train_or_update_data(self, df: pd.DataFrame) -> Dict[str, pd.DataFrame]:
"""
Returnd key value pairs of cell_id and processed DataFrame for each cell_id.

+--------+----------+-----------+------+---------+----------+----------+--------------+------------------------+
| ue_id | latitude | longitude | tick | cell_id | cell_lon | cell_lat | cell_az_deg | cell_carrier_freq_mhz |
+========+==========+===========+======+=========+==========+==========+==============+========================+
Expand All @@ -266,7 +262,6 @@ def _prepare_train_or_update_data(self, df: pd.DataFrame) -> Dict[str, pd.DataFr
| 1 | 90.416 | 23.813 | 1 | 2 | 90.414 | 23.810 | 240 | 2100 |
+--------+----------+-----------+------+---------+----------+----------+--------------+------------------------+


"""
required_columns = {"cell_lat", "cell_lon", "cell_az_deg"}
if not required_columns.issubset(df.columns):
Expand Down Expand Up @@ -316,7 +311,7 @@ def _predictions(self, pred_data: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFr
"""
Predicts the received power for each User Equipment (UE) at different locations
and ticks using Bayesian Digital Twins.

+---------+-----------+------------+----------+
| ue_id | latitude | longitude | tick |
+=========+===========+============+==========+
Expand All @@ -325,7 +320,7 @@ def _predictions(self, pred_data: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFr
| 1 | 90.415 | 23.812 | 1 |
| 2 | 90.416 | 23.813 | 1 |
+---------+-----------+------------+----------+

It then determines the best cell for each UE to attach based on the predicted power values.
"""
# self.prediction_data = pred_data
Expand Down Expand Up @@ -357,7 +352,7 @@ def _predictions(self, pred_data: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFr

else:
# Handle missing models, e.g., log a warning or initialize a default model
print(f"No model available for cell_id {cell_id}, skipping prediction.")
logger.error(f"No model available for cell_id {cell_id}, skipping prediction.")

full_prediction_df = full_prediction_df.rename(columns={"latitude": "loc_y", "longitude": "loc_x"})
if full_prediction_df["cell_id"].dtype == object:
Expand All @@ -368,7 +363,9 @@ def _predictions(self, pred_data: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFr
return predicted, full_prediction_df

def _preprocess_simulation_data(self, df: pd.DataFrame) -> pd.DataFrame:
'''
"""Preprocess simulation data for MRO analysis.

Expected input format:
+------------+-------------+-------------+-------------+------------+------------+-------------------------+
| mock_ue_id | cell_id | rxpower_dbm | rxpower_stddev_dbm | log_distance | pred_means | tick |
+============+=============+=============+=====================+==============+=============+==============+
Expand All @@ -377,8 +374,7 @@ def _preprocess_simulation_data(self, df: pd.DataFrame) -> pd.DataFrame:
| 0 | "cell_2" | -82.1 | 1.1 | 0.207 | -84.2 | 1 |
| 1 | "cell_3" | -90.4 | 1.3 | 0.499 | -89.0 | 1 |
+------------+-------------+-------------+----------------------+--------------+-------------+-------------+

'''
"""
df.drop(
columns=["rxpower_stddev_dbm", "rxpower_dbm", "cell_rxpwr_dbm"],
inplace=True,
Expand All @@ -397,6 +393,7 @@ def _preprocess_simulation_data(self, df: pd.DataFrame) -> pd.DataFrame:
df["cell_id"] = df["cell_id"].str.extract(r"(\d+)").astype(int)
df = self._add_sinr_column(df)
return df

# TODO: Use Utils version of this function
def _add_sinr_column(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Expand All @@ -408,7 +405,7 @@ def _add_sinr_column(self, df: pd.DataFrame) -> pd.DataFrame:

Returns:
pd.DataFrame: Updated DataFrame with an additional 'sinr_db' column.

+--------+---------+------------------+------------------------+
| ue_id | cell_id | cell_rxpower_dbm | cell_carrier_freq_mhz |
+========+=========+==================+========================+
Expand All @@ -421,13 +418,14 @@ def _add_sinr_column(self, df: pd.DataFrame) -> pd.DataFrame:
| 3 | 1 | -100.987321 | 2100.0 |
| 3 | 2 | -100.864529 | 2100.0 |
+--------+---------+------------------+------------------------+

"""
df = df.copy()
sinr_column = []

# Group by location
for (_, group) in df.groupby(["ue_id", "tick"]):
for _, group in df.groupby(["ue_id", "tick"]):

# Group further by frequency layer within the same location
freq_groups = group.groupby("cell_carrier_freq_mhz")

Expand Down
Loading