Spaces:
Sleeping
Sleeping
Gil Stetler
commited on
Commit
·
c9aa5e1
1
Parent(s):
97443ec
fix
Browse files
app.py
CHANGED
|
@@ -16,8 +16,14 @@ from chronos import ChronosPipeline
|
|
| 16 |
AGTS_AVAILABLE = False
|
| 17 |
try:
|
| 18 |
from autogluon.timeseries import TimeSeriesPredictor, TimeSeriesDataFrame
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
AGTS_AVAILABLE = True
|
| 20 |
except Exception:
|
|
|
|
| 21 |
pass
|
| 22 |
|
| 23 |
# unsere bestehende Daten-Pipeline
|
|
@@ -51,6 +57,24 @@ AUTO_SEED = int(os.getenv("AUTO_SEED", "0"))
|
|
| 51 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 52 |
dtype = torch.bfloat16 if device == "cuda" else torch.float32
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
def _extract_close(df: pd.DataFrame) -> pd.Series:
|
| 55 |
if isinstance(df.columns, pd.MultiIndex):
|
| 56 |
for name in ["Adj Close", "Adj_Close", "adj close", "adj_close"]:
|
|
@@ -169,7 +193,6 @@ def ensure_finetuned_predictor(log_cb=print):
|
|
| 169 |
predictor = TimeSeriesPredictor(
|
| 170 |
prediction_length=PREDICTION_LENGTH,
|
| 171 |
target="target",
|
| 172 |
-
random_seed=AUTO_SEED,
|
| 173 |
eval_metric="WQL",
|
| 174 |
)
|
| 175 |
|
|
@@ -179,11 +202,10 @@ def ensure_finetuned_predictor(log_cb=print):
|
|
| 179 |
"fine_tune": True,
|
| 180 |
"fine_tune_steps": AUTO_STEPS,
|
| 181 |
"fine_tune_lr": AUTO_LR,
|
| 182 |
-
# "device": "gpu" # AutoGluon
|
| 183 |
}
|
| 184 |
}
|
| 185 |
|
| 186 |
-
# Training
|
| 187 |
predictor.fit(train_data=tsdf, hyperparameters=hyperparams, time_limit=None, presets=None)
|
| 188 |
predictor.save(FINETUNED_DIR)
|
| 189 |
log_cb(f"Saved finetuned predictor to: {FINETUNED_DIR}")
|
|
|
|
| 16 |
AGTS_AVAILABLE = False
|
| 17 |
try:
|
| 18 |
from autogluon.timeseries import TimeSeriesPredictor, TimeSeriesDataFrame
|
| 19 |
+
try:
|
| 20 |
+
# optional: AutoGluon global seeding helper (nicht in allen Versionen vorhanden)
|
| 21 |
+
from autogluon.common.utils.seed import set_seed as ag_set_seed
|
| 22 |
+
except Exception:
|
| 23 |
+
ag_set_seed = None
|
| 24 |
AGTS_AVAILABLE = True
|
| 25 |
except Exception:
|
| 26 |
+
ag_set_seed = None
|
| 27 |
pass
|
| 28 |
|
| 29 |
# unsere bestehende Daten-Pipeline
|
|
|
|
| 57 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 58 |
dtype = torch.bfloat16 if device == "cuda" else torch.float32
|
| 59 |
|
| 60 |
+
# ---- global seeding (kompatibel über Versionen hinweg) ----
|
| 61 |
+
def set_global_seed(seed: int):
|
| 62 |
+
random.seed(seed)
|
| 63 |
+
np.random.seed(seed)
|
| 64 |
+
try:
|
| 65 |
+
torch.manual_seed(seed)
|
| 66 |
+
if torch.cuda.is_available():
|
| 67 |
+
torch.cuda.manual_seed_all(seed)
|
| 68 |
+
except Exception:
|
| 69 |
+
pass
|
| 70 |
+
if ag_set_seed is not None:
|
| 71 |
+
try:
|
| 72 |
+
ag_set_seed(seed)
|
| 73 |
+
except Exception:
|
| 74 |
+
pass
|
| 75 |
+
|
| 76 |
+
set_global_seed(AUTO_SEED)
|
| 77 |
+
|
| 78 |
def _extract_close(df: pd.DataFrame) -> pd.Series:
|
| 79 |
if isinstance(df.columns, pd.MultiIndex):
|
| 80 |
for name in ["Adj Close", "Adj_Close", "adj close", "adj_close"]:
|
|
|
|
| 193 |
predictor = TimeSeriesPredictor(
|
| 194 |
prediction_length=PREDICTION_LENGTH,
|
| 195 |
target="target",
|
|
|
|
| 196 |
eval_metric="WQL",
|
| 197 |
)
|
| 198 |
|
|
|
|
| 202 |
"fine_tune": True,
|
| 203 |
"fine_tune_steps": AUTO_STEPS,
|
| 204 |
"fine_tune_lr": AUTO_LR,
|
| 205 |
+
# "device": "gpu" # AutoGluon nutzt automatisch CUDA, wenn verfügbar
|
| 206 |
}
|
| 207 |
}
|
| 208 |
|
|
|
|
| 209 |
predictor.fit(train_data=tsdf, hyperparameters=hyperparams, time_limit=None, presets=None)
|
| 210 |
predictor.save(FINETUNED_DIR)
|
| 211 |
log_cb(f"Saved finetuned predictor to: {FINETUNED_DIR}")
|