diff --git a/growthbook/common_types.py b/growthbook/common_types.py index 916f4d3..92f9d99 100644 --- a/growthbook/common_types.py +++ b/growthbook/common_types.py @@ -1,6 +1,7 @@ #!/usr/bin/env python import sys +from collections import OrderedDict from token import OP # Only require typing_extensions if using Python 3.7 or earlier if sys.version_info >= (3, 8): @@ -14,9 +15,9 @@ from abc import ABC, abstractmethod class VariationMeta(TypedDict): - key: str - name: str - passthrough: bool + key: Optional[str] + name: Optional[str] + passthrough: Optional[bool] class Filter(TypedDict): @@ -143,12 +144,12 @@ def update(self, data: Dict[str, Any]) -> None: class Result(object): def __init__( self, - variationId: int, - inExperiment: bool, - value: Any, - hashUsed: bool, - hashAttribute: str, - hashValue: str, + variationId: Optional[int], + inExperiment: Optional[bool], + value, + hashUsed: Optional[bool], + hashAttribute: Optional[str], + hashValue: Optional[str], featureId: Optional[str], meta: Optional[VariationMeta] = None, bucket: Optional[float] = None, @@ -164,17 +165,17 @@ def __init__( self.bucket = bucket self.stickyBucketUsed = stickyBucketUsed - self.key = str(variationId) + self.key = str(variationId) if variationId is not None else "" self.name = "" self.passthrough = False if meta: - if "name" in meta: - self.name = meta["name"] - if "key" in meta: - self.key = meta["key"] - if "passthrough" in meta: - self.passthrough = meta["passthrough"] + if "name" in meta and meta["name"] is not None: + self.name = str(meta["name"]) + if "key" in meta and meta["key"] is not None: + self.key = str(meta["key"]) + if "passthrough" in meta and meta["passthrough"] is not None: + self.passthrough = bool(meta["passthrough"]) def to_dict(self) -> Dict[str, Any]: obj: Dict[str, Any] = { @@ -198,11 +199,30 @@ def to_dict(self) -> Dict[str, Any]: return obj + @staticmethod + def from_dict(data: dict) -> "Result": + return Result( + variationId=data.get("variationId"), + inExperiment=data.get("inExperiment"), + value=data.get("value"), + hashUsed=data.get("hashUsed"), + hashAttribute=data.get("hashAttribute"), + hashValue=data.get("hashValue"), + featureId=data.get("featureId"), + bucket=data.get("bucket"), + stickyBucketUsed=data.get("stickyBucketUsed", False), + meta={ + "name": data.get("name"), + "key": data.get("key"), + "passthrough": data.get("passthrough"), + } + ) + class FeatureResult(object): def __init__( self, value: Any, - source: str, + source: Optional[str] = None, experiment: Optional[Experiment] = None, experimentResult: Optional[Result] = None, ruleId: Optional[str] = None, @@ -218,7 +238,7 @@ def __init__( def to_dict(self) -> Dict[str, Any]: data: Dict[str, Any] = { "value": self.value, - "source": self.source, + "source": self.source or "", "on": self.on, "off": self.off, "ruleId": self.ruleId or "", @@ -230,6 +250,17 @@ def to_dict(self) -> Dict[str, Any]: return data + @staticmethod + def from_dict(data: dict) -> "FeatureResult": + return FeatureResult( + value=data.get("value"), + source=data.get("source"), + experiment=Experiment(**data["experiment"]) if isinstance(data.get("experiment"), dict) else data.get( + "experiment"), + experimentResult=Result.from_dict(data["experimentResult"]) if isinstance(data.get("experimentResult"), dict) else data.get("experimentResult"), + ruleId=data.get("ruleId"), + ) + class Feature(object): def __init__(self, defaultValue: Any = None, rules: Optional[List[Any]] = None) -> None: if rules is None: @@ -263,6 +294,7 @@ def __init__(self, defaultValue: Any = None, rules: Optional[List[Any]] = None) bucketVersion=rule.get("bucketVersion", None), minBucketVersion=rule.get("minBucketVersion", None), parentConditions=rule.get("parentConditions", None), + tracks=rule.get("tracks", None), )) def to_dict(self) -> Dict[str, Any]: @@ -271,6 +303,17 @@ def to_dict(self) -> Dict[str, Any]: "rules": [rule.to_dict() for rule in self.rules], } +@dataclass +class TrackData: + experiment: Experiment + result: Result + + def to_dict(self) -> Dict[str, Any]: + return { + "experiment": self.experiment.to_dict() if hasattr(self.experiment, 'to_dict') else self.experiment, + "result": self.result.to_dict() if hasattr(self.result, 'to_dict') else self.result + } + class FeatureRule(object): def __init__( self, @@ -296,6 +339,7 @@ def __init__( bucketVersion: Optional[int] = None, minBucketVersion: Optional[int] = None, parentConditions: Optional[List[Dict[str, Any]]] = None, + tracks: List[TrackData] = None ) -> None: if disableStickyBucketing: @@ -323,6 +367,11 @@ def __init__( self.bucketVersion = bucketVersion or 0 self.minBucketVersion = minBucketVersion or 0 self.parentConditions = parentConditions + self.tracks = [] + if tracks: + for t in tracks: + if isinstance(t, TrackData): + self.tracks.append(t) def to_dict(self) -> Dict[str, Any]: data: Dict[str, Any] = {} @@ -370,6 +419,8 @@ def to_dict(self) -> Dict[str, Any]: data["minBucketVersion"] = self.minBucketVersion if self.parentConditions: data["parentConditions"] = self.parentConditions + if self.tracks: + data["tracks"] = [track.to_dict() for track in self.tracks] return data @@ -396,7 +447,7 @@ def get_all_assignments(self, attributes: Dict[str, str]) -> Dict[str, Dict]: return docs @dataclass -class StackContext: +class StackContext: id: Optional[str] = None evaluated_features: Set[str] = field(default_factory=set) @@ -424,14 +475,16 @@ class Options: enabled: bool = True qa_mode: bool = False enable_dev_mode: bool = False - # forced_variations: Dict[str, Any] = field(default_factory=dict) + forced_variations: Dict[str, Any] = field(default_factory=dict) refresh_strategy: Optional[FeatureRefreshStrategy] = FeatureRefreshStrategy.STALE_WHILE_REVALIDATE sticky_bucket_service: Optional[AbstractStickyBucketService] = None sticky_bucket_identifier_attributes: Optional[List[str]] = None on_experiment_viewed: Optional[Callable[[Experiment, Result, Optional[UserContext]], None]] = None on_feature_usage: Optional[Callable[[str, 'FeatureResult', UserContext], None]] = None tracking_plugins: Optional[List[Any]] = None - + remote_eval: bool = False + global_attributes: Dict[str, Any] = field(default_factory=dict) + forced_features: Dict[str, Any] = field(default_factory=dict) @dataclass class GlobalContext: diff --git a/growthbook/core.py b/growthbook/core.py index 61e94c1..e8ab99b 100644 --- a/growthbook/core.py +++ b/growthbook/core.py @@ -6,7 +6,6 @@ from typing import Callable, Optional, Any, Set, Tuple, List, Dict from .common_types import EvaluationContext, FeatureResult, Experiment, Filter, Result, UserContext, VariationMeta - logger = logging.getLogger("growthbook.core") def evalCondition(attributes: Dict[str, Any], condition: Dict[str, Any], savedGroups: Optional[Dict[str, Any]] = None) -> bool: @@ -299,7 +298,7 @@ def _isIncludedInRollout( def _isFilteredOut(filters: List[Filter], eval_context: EvaluationContext) -> bool: for filter in filters: - (_, hash_value) = _getHashValue(attr=filter.get("attribute", "id"), eval_context=eval_context) + (_, hash_value) = _getHashValue(attr=filter.get("attribute", "id"), eval_context=eval_context) if hash_value == "": return False @@ -420,7 +419,7 @@ def eval_feature( if evalContext is None: raise ValueError("evalContext is required - eval_feature") - + if key not in evalContext.global_ctx.features: logger.warning("Unknown feature %s", key) return FeatureResult(None, "unknownFeature") @@ -428,7 +427,7 @@ def eval_feature( if key in evalContext.stack.evaluated_features: logger.warning("Cyclic prerequisite detected, stack: %s", evalContext.stack.evaluated_features) return FeatureResult(None, "cyclicPrerequisite") - + evalContext.stack.evaluated_features.add(key) feature = evalContext.global_ctx.features[key] @@ -479,6 +478,14 @@ def eval_feature( ) continue + tracks = rule.tracks + + if tracks and tracking_cb: + for track in tracks: + tracked_experiment = track.experiment + tracked_experiment_result = track.result + tracking_cb(tracked_experiment, tracked_experiment_result, evalContext.user) + logger.debug("Force value from rule, feature %s", key) return FeatureResult(rule.force, "force", ruleId=rule.id) @@ -540,7 +547,7 @@ def eval_prereqs(parentConditions: List[dict], evalContext: EvaluationContext) - parent_id = parentCondition.get("id") if parent_id is None: continue # Skip if no valid ID - + parentRes = eval_feature(key=parent_id, evalContext=evalContext) if parentRes.source == "cyclicPrerequisite": @@ -549,7 +556,7 @@ def eval_prereqs(parentConditions: List[dict], evalContext: EvaluationContext) - parent_condition = parentCondition.get("condition") if parent_condition is None: continue # Skip if no valid condition - + if not evalCondition({'value': parentRes.value}, parent_condition, evalContext.global_ctx.saved_groups): if parentCondition.get("gate", False): return "gate" @@ -558,7 +565,7 @@ def eval_prereqs(parentConditions: List[dict], evalContext: EvaluationContext) - def _get_sticky_bucket_experiment_key(experiment_key: str, bucket_version: int = 0) -> str: return experiment_key + "__" + str(bucket_version) - + def _get_sticky_bucket_assignments(evalContext: EvaluationContext, attr: Optional[str] = None, fallback: Optional[str] = None) -> Dict[str, str]: @@ -631,9 +638,9 @@ def _get_sticky_bucket_variation( return {'variation': variation} -def run_experiment(experiment: Experiment, - featureId: Optional[str] = None, - evalContext: Optional[EvaluationContext] = None, +def run_experiment(experiment: Experiment, + featureId: Optional[str] = None, + evalContext: Optional[EvaluationContext] = None, tracking_cb: Optional[Callable[[Experiment, Result, UserContext], None]] = None ) -> Result: if evalContext is None: @@ -890,7 +897,7 @@ def _generate_sticky_bucket_assignment_doc(attribute_name: str, attribute_value: }, 'changed': changed } - + def _getExperimentResult( experiment: Experiment, evalContext: EvaluationContext, @@ -924,4 +931,4 @@ def _getExperimentResult( meta=meta, bucket=bucket, stickyBucketUsed=stickyBucketUsed - ) \ No newline at end of file + ) diff --git a/growthbook/growthbook.py b/growthbook/growthbook.py index 3706dfa..991b2b6 100644 --- a/growthbook/growthbook.py +++ b/growthbook/growthbook.py @@ -11,17 +11,18 @@ import logging import warnings +from collections import OrderedDict from abc import ABC, abstractmethod from typing import Optional, Any, Set, Tuple, List, Dict, Callable -from .common_types import ( EvaluationContext, - Experiment, - FeatureResult, +from .common_types import ( EvaluationContext, + Experiment, + FeatureResult, Feature, - GlobalContext, - Options, - Result, StackContext, - UserContext, + GlobalContext, + Options, + Result, StackContext, + UserContext, AbstractStickyBucketService, FeatureRule ) @@ -158,7 +159,7 @@ def disconnect(self, timeout=10): """Gracefully disconnect with timeout""" logger.debug("Initiating SSE client disconnect") self.is_running = False - + if self._loop and self._loop.is_running(): future = asyncio.run_coroutine_threadsafe(self._stop_session(timeout), self._loop) try: @@ -189,11 +190,11 @@ def _get_sse_url(self, api_host: str, client_key: str) -> str: async def _init_session(self): url = self._get_sse_url(self.api_host, self.client_key) - + try: while self.is_running: try: - async with aiohttp.ClientSession(headers=self.headers, + async with aiohttp.ClientSession(headers=self.headers, timeout=aiohttp.ClientTimeout(connect=self.timeout)) as session: self._sse_session = session @@ -234,7 +235,7 @@ async def _process_response(self, response): if not self.is_running: logger.debug("SSE processing stopped - is_running is False") break - + decoded_line = line.decode('utf-8').strip() if decoded_line.startswith("event:"): event_data['type'] = decoded_line[len("event:"):].strip() @@ -247,7 +248,7 @@ async def _process_response(self, response): except Exception as e: logger.warning(f"Error in event handler: {e}") event_data = {} - + # Process any remaining event data if 'type' in event_data and 'data' in event_data: try: @@ -276,7 +277,7 @@ async def _close_session(self): def _run_sse_channel(self): self._loop = asyncio.new_event_loop() - + try: self._loop.run_until_complete(self._init_session()) except asyncio.CancelledError: @@ -288,7 +289,7 @@ def _run_sse_channel(self): async def _stop_session(self, timeout=10): """Stop the SSE session and cancel all tasks with timeout""" logger.debug("Stopping SSE session") - + # Close the session first if self._sse_session and not self._sse_session.closed: try: @@ -301,15 +302,15 @@ async def _stop_session(self, timeout=10): if self._loop and self._loop.is_running(): try: # Get all tasks for this specific loop - tasks = [task for task in asyncio.all_tasks(self._loop) + tasks = [task for task in asyncio.all_tasks(self._loop) if not task.done() and task is not asyncio.current_task(self._loop)] - + if tasks: logger.debug(f"Cancelling {len(tasks)} SSE tasks") # Cancel all tasks for task in tasks: task.cancel() - + # Wait for tasks to complete with timeout try: await asyncio.wait_for( @@ -324,22 +325,18 @@ async def _stop_session(self, timeout=10): except Exception as e: logger.warning(f"Error during SSE task cleanup: {e}") -from collections import OrderedDict - -# ... (imports) - class FeatureRepository(object): def __init__(self) -> None: self.cache: AbstractFeatureCache = InMemoryFeatureCache() self.http: Optional[PoolManager] = None self.sse_client: Optional[SSEClient] = None self._feature_update_callbacks: List[Callable[[Dict], None]] = [] - + # Background refresh support self._refresh_thread: Optional[threading.Thread] = None self._refresh_stop_event = threading.Event() self._refresh_lock = threading.Lock() - + # ETag cache for bandwidth optimization # Using OrderedDict for LRU cache (max 100 entries) self._etag_cache: OrderedDict[str, Tuple[str, Dict[str, Any]]] = OrderedDict() @@ -375,16 +372,16 @@ def _notify_feature_update_callbacks(self, features_data: Dict) -> None: # Loads features with an in-memory cache in front using stale-while-revalidate approach def load_features( - self, api_host: str, client_key: str, decryption_key: str = "", ttl: int = 600 + self, api_host: str, client_key: str, decryption_key: str = "", ttl: int = 600, remote_eval: bool = False, payload: Optional[Dict[str, Any]] = None ) -> Optional[Dict]: if not client_key: raise ValueError("Must specify `client_key` to refresh features") - + key = api_host + "::" + client_key cached = self.cache.get(key) if not cached: - res = self._fetch_features(api_host, client_key, decryption_key) + res = self._fetch_features(api_host, client_key, decryption_key, remote_eval, payload) if res is not None: self.cache.set(key, res, ttl) logger.debug("Fetched features from API, stored in cache") @@ -392,16 +389,16 @@ def load_features( self._notify_feature_update_callbacks(res) return res return cached - - + async def load_features_async( - self, api_host: str, client_key: str, decryption_key: str = "", ttl: int = 600 + self, api_host: str, client_key: str, decryption_key: str = "", ttl: int = 600, remote_eval: bool = False, + payload: Optional[Dict[str, Any]] = None ) -> Optional[Dict]: key = api_host + "::" + client_key cached = self.cache.get(key) if not cached: - res = await self._fetch_features_async(api_host, client_key, decryption_key) + res = await self._fetch_features_async(api_host, client_key, decryption_key, remote_eval, payload) if res is not None: self.cache.set(key, res, ttl) logger.debug("Fetched features from API, stored in cache") @@ -409,11 +406,11 @@ async def load_features_async( self._notify_feature_update_callbacks(res) return res return cached - + @property def user_agent_suffix(self) -> Optional[str]: return getattr(self, "_user_agent_suffix", None) - + @user_agent_suffix.setter def user_agent_suffix(self, value: Optional[str]) -> None: self._user_agent_suffix = value @@ -422,23 +419,53 @@ def user_agent_suffix(self, value: Optional[str]) -> None: def _get(self, url: str, headers: Optional[Dict[str, str]] = None): self.http = self.http or PoolManager() return self.http.request("GET", url, headers=headers or {}) - + def _get_headers(self, client_key: str, existing_headers: Dict[str, str] = None) -> Dict[str, str]: headers = existing_headers or {} headers['Accept-Encoding'] = "gzip, deflate" - + # Add User-Agent with optional suffix ua = "Gb-Python" ua += f"-{self.user_agent_suffix}" if self.user_agent_suffix else f"-{client_key[-4:]}" headers['User-Agent'] = ua - + return headers + def _post(self, url: str, payload: Dict, headers: Optional[Dict] = None): + self.http = self.http or PoolManager() + encoded_body = json.dumps(payload).encode("utf-8") + return self.http.request( + "POST", + url, + body=encoded_body, + headers=headers or {"Content-Type": "application/json"}, + ) + + def _fetch_and_decode_post(self, api_host: str, client_key: str, payload: Dict) -> Optional[Dict]: + try: + r = self._post( + self._get_features_url(api_host, client_key, remote_eval=True), + payload=payload + ) + + if r.status >= 400: + logger.warning( + "Failed to fetch features, received status code %d", r.status + ) + return None + + decoded = json.loads(r.data.decode("utf-8")) + return decoded # type: ignore[no-any-return] + + except Exception as e: + logger.warning("Failed to decode feature JSON from API: %s", e) + return None + def _fetch_and_decode(self, api_host: str, client_key: str) -> Optional[Dict]: url = self._get_features_url(api_host, client_key) headers = self._get_headers(client_key) logger.debug(f"Fetching features from {url} with headers {headers}") - + # Check if we have a cached ETag for this URL cached_etag = None cached_data = None @@ -451,10 +478,10 @@ def _fetch_and_decode(self, api_host: str, client_key: str) -> Optional[Dict]: logger.debug(f"Using cached ETag for request: {cached_etag[:20]}...") else: logger.debug(f"No ETag cache found for URL: {url}") - + try: r = self._get(url, headers) - + # Handle 304 Not Modified - content hasn't changed if r.status == 304: logger.debug(f"ETag match! Server returned 304 Not Modified - using cached data (saved bandwidth)") @@ -464,15 +491,15 @@ def _fetch_and_decode(self, api_host: str, client_key: str) -> Optional[Dict]: else: logger.warning("Received 304 but no cached data available") return None - + if r.status >= 400: logger.warning( "Failed to fetch features, received status code %d", r.status ) return None - + decoded = json.loads(r.data.decode("utf-8")) - + # Store the new ETag if present response_etag = r.headers.get('ETag') if response_etag: @@ -481,7 +508,7 @@ def _fetch_and_decode(self, api_host: str, client_key: str) -> Optional[Dict]: # Enforce max size if len(self._etag_cache) > self._max_etag_entries: self._etag_cache.popitem(last=False) - + if cached_etag: logger.debug(f"ETag updated: {cached_etag[:20]}... -> {response_etag[:20]}...") else: @@ -489,17 +516,17 @@ def _fetch_and_decode(self, api_host: str, client_key: str) -> Optional[Dict]: logger.debug(f"ETag cache now contains {len(self._etag_cache)} entries") else: logger.debug("No ETag header in response") - + return decoded # type: ignore[no-any-return] except Exception as e: logger.error(f"Failed to decode feature JSON from GrowthBook API: {e}") return None - + async def _fetch_and_decode_async(self, api_host: str, client_key: str) -> Optional[Dict]: url = self._get_features_url(api_host, client_key) headers = self._get_headers(client_key=client_key) logger.debug(f"[Async] Fetching features from {url} with headers {headers}") - + # Check if we have a cached ETag for this URL cached_etag = None cached_data = None @@ -512,26 +539,27 @@ async def _fetch_and_decode_async(self, api_host: str, client_key: str) -> Optio logger.debug(f"[Async] Using cached ETag for request: {cached_etag[:20]}...") else: logger.debug(f"[Async] No ETag cache found for URL: {url}") - + try: async with aiohttp.ClientSession() as session: async with session.get(url, headers=headers) as response: # Handle 304 Not Modified - content hasn't changed if response.status == 304: - logger.debug(f"[Async] ETag match! Server returned 304 Not Modified - using cached data (saved bandwidth)") + logger.debug( + f"[Async] ETag match! Server returned 304 Not Modified - using cached data (saved bandwidth)") if cached_data is not None: logger.debug(f"[Async] Returning cached response ({len(str(cached_data))} bytes)") return cached_data else: logger.warning("[Async] Received 304 but no cached data available") return None - + if response.status >= 400: logger.warning("Failed to fetch features, received status code %d", response.status) return None - + decoded = await response.json() - + # Store the new ETag if present response_etag = response.headers.get('ETag') if response_etag: @@ -540,15 +568,16 @@ async def _fetch_and_decode_async(self, api_host: str, client_key: str) -> Optio # Enforce max size if len(self._etag_cache) > self._max_etag_entries: self._etag_cache.popitem(last=False) - + if cached_etag: logger.debug(f"[Async] ETag updated: {cached_etag[:20]}... -> {response_etag[:20]}...") else: - logger.debug(f"[Async] New ETag cached: {response_etag[:20]}... ({len(str(decoded))} bytes)") + logger.debug( + f"[Async] New ETag cached: {response_etag[:20]}... ({len(str(decoded))} bytes)") logger.debug(f"[Async] ETag cache now contains {len(self._etag_cache)} entries") else: logger.debug("[Async] No ETag header in response") - + return decoded # type: ignore[no-any-return] except aiohttp.ClientError as e: logger.warning(f"HTTP request failed: {e}") @@ -556,7 +585,28 @@ async def _fetch_and_decode_async(self, api_host: str, client_key: str) -> Optio except Exception as e: logger.error(f"Failed to decode feature JSON from GrowthBook API: {e}") return None - + + async def _fetch_and_decode_post_async(self, api_host: str, client_key: str, payload: Dict) -> Optional[Dict]: + try: + url = self._get_features_url(api_host, client_key, remote_eval=True) + + async with aiohttp.ClientSession() as session: + async with session.post(url, json=payload) as response: + if response.status >= 400: + logger.warning("Failed to fetch features for remote evaluation, received status code %d", response.status) + return None + + # aiohttp's .json() method decodes the JSON response + decoded = await response.json() + return decoded # type: ignore[no-any-return] + + except aiohttp.ClientError as e: + logger.warning(f"HTTP request failed: {e}") + return None + except Exception as e: + logger.warning(f"Failed to decode feature JSON from API in _fetch_and_decode_post_async function: {e}") + return None + def decrypt_response(self, data, decryption_key: str): if "encryptedFeatures" in data: if not decryption_key: @@ -572,7 +622,7 @@ def decrypt_response(self, data, decryption_key: str): return None elif "features" not in data: logger.warning("GrowthBook API response missing features") - + if "encryptedSavedGroups" in data: if not decryption_key: raise ValueError("Must specify decryption_key") @@ -585,25 +635,41 @@ def decrypt_response(self, data, decryption_key: str): logger.warning( "Failed to decrypt saved groups from GrowthBook API response" ) - + return data # Fetch features from the GrowthBook API def _fetch_features( - self, api_host: str, client_key: str, decryption_key: str = "" + self, api_host: str, client_key: str, decryption_key: str = "", remote_eval: bool = False, payload: Optional[Dict[str, Any]] = None, ) -> Optional[Dict]: - decoded = self._fetch_and_decode(api_host, client_key) + + if remote_eval: + if not payload: + logger.error("Payload is required for remote_eval POST request.") + return None + decoded = self._fetch_and_decode_post(api_host, client_key, payload) + else: + decoded = self._fetch_and_decode(api_host, client_key) + if not decoded: return None data = self.decrypt_response(decoded, decryption_key) return data # type: ignore[no-any-return] - + async def _fetch_features_async( - self, api_host: str, client_key: str, decryption_key: str = "" + self, api_host: str, client_key: str, decryption_key: str = "", remote_eval: bool = False, + payload: Optional[Dict[str, Any]] = None ) -> Optional[Dict]: - decoded = await self._fetch_and_decode_async(api_host, client_key) + if remote_eval: + if not payload: + logger.error("Payload is required for remote_eval POST request.") + return None + decoded = await self._fetch_and_decode_post_async(api_host, client_key, payload) + else: + decoded = await self._fetch_and_decode_async(api_host, client_key) + if not decoded: return None @@ -623,7 +689,7 @@ def stopAutoRefresh(self, timeout=10): if self.sse_client: self.sse_client.disconnect(timeout=timeout) self.sse_client = None - + def start_background_refresh(self, api_host: str, client_key: str, decryption_key: str, ttl: int = 600, refresh_interval: int = 300) -> None: """Start periodic background refresh task""" @@ -633,7 +699,7 @@ def start_background_refresh(self, api_host: str, client_key: str, decryption_ke with self._refresh_lock: if self._refresh_thread is not None: return # Already running - + self._refresh_stop_event.clear() self._refresh_thread = threading.Thread( target=self._background_refresh_worker, @@ -642,7 +708,7 @@ def start_background_refresh(self, api_host: str, client_key: str, decryption_ke ) self._refresh_thread.start() logger.debug("Started background refresh task") - + def _background_refresh_worker(self, api_host: str, client_key: str, decryption_key: str, ttl: int, refresh_interval: int) -> None: """Worker method for periodic background refresh""" while not self._refresh_stop_event.is_set(): @@ -650,7 +716,7 @@ def _background_refresh_worker(self, api_host: str, client_key: str, decryption_ # Wait for the refresh interval or stop event if self._refresh_stop_event.wait(refresh_interval): break # Stop event was set - + logger.debug("Background refresh for Features - started") res = self._fetch_features(api_host, client_key, decryption_key) if res is not None: @@ -663,11 +729,11 @@ def _background_refresh_worker(self, api_host: str, client_key: str, decryption_ logger.warning("Background refresh failed") except Exception as e: logger.warning(f"Background refresh error: {e}") - + def stop_background_refresh(self) -> None: """Stop background refresh task""" self._refresh_stop_event.set() - + with self._refresh_lock: if self._refresh_thread is not None: self._refresh_thread.join(timeout=1.0) # Wait up to 1 second @@ -675,9 +741,10 @@ def stop_background_refresh(self) -> None: logger.debug("Stopped background refresh task") @staticmethod - def _get_features_url(api_host: str, client_key: str) -> str: + def _get_features_url(api_host: str, client_key: str, remote_eval: bool = False) -> str: api_host = (api_host or "https://cdn.growthbook.io").rstrip("/") - return api_host + "/api/features/" + client_key + remote_eval_path = api_host + "/api/eval/" + client_key + return remote_eval_path if remote_eval else api_host + "/api/features/" + client_key # Singleton instance @@ -714,6 +781,8 @@ def __init__( groups: dict = {}, overrides: dict = {}, forcedVariations: dict = {}, + remoteEval: bool = False, + payload: Optional[Dict[str, Any]] = None ): self._enabled = enabled self._attributes = attributes @@ -755,6 +824,9 @@ def __init__( self._plugins: List[Any] = plugins if plugins is not None else [] self._initialized_plugins: List[Any] = [] + self._remoteEval = remoteEval + self._payload = payload + self._global_ctx = GlobalContext( options=Options( url=self._url, @@ -769,7 +841,7 @@ def __init__( ), features={}, saved_groups=self._saved_groups - ) + ) # Create a user context for the current user self._user_ctx: UserContext = UserContext( url=self._url, @@ -797,7 +869,7 @@ def __init__( # Start background refresh task for stale-while-revalidate self.load_features() # Initial load feature_repo.start_background_refresh( - self._api_host, self._client_key, self._decryption_key, + self._api_host, self._client_key, self._decryption_key, self._cache_ttl, self._stale_ttl ) @@ -810,7 +882,7 @@ def _on_feature_update(self, features_data: Dict) -> None: def load_features(self) -> None: response = feature_repo.load_features( - self._api_host, self._client_key, self._decryption_key, self._cache_ttl + self._api_host, self._client_key, self._decryption_key, self._cache_ttl, self._remoteEval, self._payload ) if response is not None and "features" in response.keys(): self.setFeatures(response["features"]) @@ -837,7 +909,7 @@ def _features_event_handler(self, features): decoded = json.loads(features) if not decoded: return None - + data = feature_repo.decrypt_response(decoded, self._decryption_key) if data is not None: @@ -859,9 +931,9 @@ def _dispatch_sse_event(self, event_data): def startAutoRefresh(self): if not self._client_key: raise ValueError("Must specify `client_key` to start features streaming") - + feature_repo.startAutoRefresh( - api_host=self._api_host, + api_host=self._api_host, client_key=self._client_key, cb=self._dispatch_sse_event, streaming_timeout=self._streaming_timeout @@ -916,6 +988,10 @@ def set_attributes(self, attributes: dict) -> None: self._attributes = attributes self.refresh_sticky_buckets() + def set_payload(self, payload: Optional[Dict[str, Any]] = None) -> None: + self._payload = payload + + # @deprecated, use get_attributes def getAttributes(self) -> dict: warnings.warn("getAttributes is deprecated, use get_attributes instead", DeprecationWarning) return self.get_attributes() @@ -926,34 +1002,34 @@ def get_attributes(self) -> dict: def destroy(self, timeout=10) -> None: """Gracefully destroy the GrowthBook instance""" logger.debug("Starting GrowthBook destroy process") - + try: # Clean up plugins logger.debug("Cleaning up plugins") self._cleanup_plugins() except Exception as e: logger.warning(f"Error cleaning up plugins: {e}") - + try: logger.debug("Stopping auto refresh during destroy") self.stopAutoRefresh(timeout=timeout) except Exception as e: logger.warning(f"Error stopping auto refresh during destroy: {e}") - + try: # Stop background refresh operations if self._stale_while_revalidate and self._client_key: feature_repo.stop_background_refresh() except Exception as e: logger.warning(f"Error stopping background refresh during destroy: {e}") - + try: # Clean up feature update callback if self._client_key: feature_repo.remove_feature_update_callback(self._on_feature_update) except Exception as e: logger.warning(f"Error removing feature update callback: {e}") - + # Clear all internal state try: self._subscriptions.clear() @@ -995,14 +1071,14 @@ def get_feature_value(self, key: str, fallback): def evalFeature(self, key: str) -> FeatureResult: warnings.warn("evalFeature is deprecated, use eval_feature instead", DeprecationWarning) return self.eval_feature(key) - + def _ensure_fresh_features(self) -> None: """Lazy refresh: Check cache expiry and refresh if needed, but only if client_key is provided""" - + # Prevent infinite recursion when updating features (e.g., during sticky bucket refresh) if self._is_updating_features: return - + if self._streaming or self._stale_while_revalidate or not self._client_key: return # Skip cache checks - SSE or background refresh handles freshness @@ -1014,7 +1090,7 @@ def _ensure_fresh_features(self) -> None: def _get_eval_context(self) -> EvaluationContext: # Lazy refresh: ensure features are fresh before evaluation self._ensure_fresh_features() - + # use the latest attributes for every evaluation. self._user_ctx.attributes = self._attributes self._user_ctx.url = self._url @@ -1028,8 +1104,8 @@ def _get_eval_context(self) -> EvaluationContext: ) def eval_feature(self, key: str) -> FeatureResult: - result = core_eval_feature(key=key, - evalContext=self._get_eval_context(), + result = core_eval_feature(key=key, + evalContext=self._get_eval_context(), callback_subscription=self._fireSubscriptions, tracking_cb=self._track ) @@ -1068,7 +1144,7 @@ def _fireSubscriptions(self, experiment: Experiment, result: Result): def run(self, experiment: Experiment) -> Result: # result = self._run(experiment) - result = run_experiment(experiment=experiment, + result = run_experiment(experiment=experiment, evalContext=self._get_eval_context(), tracking_cb=self._track ) @@ -1084,10 +1160,10 @@ def _track(self, experiment: Experiment, result: Result, user_context: UserConte if not self._trackingCallback: return None key = ( - result.hashAttribute - + str(result.hashValue) - + experiment.key - + str(result.variationId) + (result.hashAttribute or "") + + str(result.hashValue or "") + + (experiment.key or "") + + str(result.variationId or "") ) if not self._tracked.get(key): try: @@ -1157,7 +1233,7 @@ def _initialize_plugins(self) -> None: def user_agent_suffix(self) -> Optional[str]: """Get the suffix appended to the User-Agent header""" return feature_repo.user_agent_suffix - + @user_agent_suffix.setter def user_agent_suffix(self, value: Optional[str]) -> None: """Set a suffix to be appended to the User-Agent header""" diff --git a/growthbook/growthbook_client.py b/growthbook/growthbook_client.py index 70cf6c3..721318c 100644 --- a/growthbook/growthbook_client.py +++ b/growthbook/growthbook_client.py @@ -43,9 +43,9 @@ def __call__(cls, *args, **kwargs): class BackoffStrategy: """Exponential backoff with jitter for failed requests""" def __init__( - self, - initial_delay: float = 1.0, - max_delay: float = 60.0, + self, + initial_delay: float = 1.0, + max_delay: float = 60.0, multiplier: float = 2.0, jitter: float = 0.1 ): @@ -59,7 +59,7 @@ def __init__( def next_delay(self) -> float: """Calculate next delay with jitter""" delay = min( - self.current_delay * (self.multiplier ** self.attempt), + self.current_delay * (self.multiplier ** self.attempt), self.max_delay ) # Add random jitter @@ -252,7 +252,7 @@ async def refresh_loop() -> None: async def start_feature_refresh(self, strategy: FeatureRefreshStrategy, callback=None): """Initialize feature refresh based on strategy""" self._refresh_callback = callback - + if strategy == FeatureRefreshStrategy.SERVER_SENT_EVENTS: await self._start_sse_refresh() else: @@ -281,31 +281,37 @@ async def __aenter__(self): async def __aexit__(self, exc_type, exc_val, exc_tb): await self.stop_refresh() - + async def load_features_async( - self, api_host: str, client_key: str, decryption_key: str = "", ttl: int = 60 + self, + api_host: str, + client_key: str, + decryption_key: str = "", + ttl: int = 60, + remote_eval: bool = False, + payload: Optional[Dict[str, Any]] = None ) -> Optional[Dict]: # Use stored values when called internally if api_host == self._api_host and client_key == self._client_key: decryption_key = self._decryption_key ttl = self._cache_ttl - return await super().load_features_async(api_host, client_key, decryption_key, ttl) + return await super().load_features_async(api_host, client_key, decryption_key, ttl, remote_eval, payload) class GrowthBookClient: def __init__( self, options: Optional[Union[Dict[str, Any], Options]] = None - ): + ): self.options = ( options if isinstance(options, Options) else Options(**options) if options else Options() ) - + # Thread-safe tracking state self._tracked: Dict[str, bool] = {} # Access only within async context self._tracked_lock = threading.Lock() - + # Thread-safe subscription management self._subscriptions: Set[Callable[[Experiment, Result], None]] = set() self._subscriptions_lock = threading.Lock() @@ -316,25 +322,25 @@ def __init__( 'assignments': {} } self._sticky_bucket_cache_lock = False - + # Plugin support self._tracking_plugins: List[Any] = self.options.tracking_plugins or [] self._initialized_plugins: List[Any] = [] - + self._features_repository = ( EnhancedFeatureRepository( - self.options.api_host or "https://cdn.growthbook.io", - self.options.client_key or "", - self.options.decryption_key or "", + self.options.api_host or "https://cdn.growthbook.io", + self.options.client_key or "", + self.options.decryption_key or "", self.options.cache_ttl ) if self.options.client_key else None ) - + self._global_context: Optional[GlobalContext] = None self._context_lock = asyncio.Lock() - + # Initialize plugins self._initialize_plugins() @@ -346,10 +352,10 @@ def _track(self, experiment: Experiment, result: Result, user_context: UserConte # Create unique key for this tracking event key = ( - result.hashAttribute - + str(result.hashValue) - + experiment.key - + str(result.variationId) + (result.hashAttribute or "") + + str(result.hashValue or "") + + (experiment.key or "") + + str(result.variationId or "") ) with self._tracked_lock: @@ -383,8 +389,7 @@ def _fire_subscriptions(self, experiment: Experiment, result: Result) -> None: async def set_features(self, features: dict) -> None: await self._feature_update_callback({"features": features}) - - + async def _refresh_sticky_buckets(self, attributes: Dict[str, Any]) -> Dict[str, Any]: """Refresh sticky bucket assignments only if attributes have changed""" if not self.options.sticky_bucket_service: @@ -394,7 +399,7 @@ async def _refresh_sticky_buckets(self, attributes: Dict[str, Any]) -> Dict[str, while not self._sticky_bucket_cache_lock: if attributes == self._sticky_bucket_cache['attributes']: return self._sticky_bucket_cache['assignments'] - + self._sticky_bucket_cache_lock = True try: assignments = self.options.sticky_bucket_service.get_all_assignments(attributes) @@ -403,7 +408,7 @@ async def _refresh_sticky_buckets(self, attributes: Dict[str, Any]) -> Dict[str, return assignments finally: self._sticky_bucket_cache_lock = False - + # Fallback return for edge case where loop condition is never satisfied return {} @@ -414,12 +419,31 @@ async def initialize(self) -> bool: return False try: + payload = None + + if self.options.remote_eval: + forced_features_for_payload = [] + if self.options.forced_features: + forced_features_for_payload = [ + [key, value] + for key, value in self.options.forced_features.items() + ] + + payload = { + "attributes": self.options.global_attributes, + "forcedFeatures": forced_features_for_payload, + "forcedVariations": self.options.forced_variations, + "url": self.options.api_host + } + # Initial feature load initial_features = await self._features_repository.load_features_async( - self.options.api_host or "https://cdn.growthbook.io", - self.options.client_key or "", - self.options.decryption_key or "", - self.options.cache_ttl + self.options.api_host or "https://cdn.growthbook.io", + self.options.client_key or "", + self.options.decryption_key or "", + self.options.cache_ttl, + self.options.remote_eval, + payload ) if not initial_features: logger.error("Failed to load initial features") @@ -427,15 +451,15 @@ async def initialize(self) -> bool: # Create global context with initial features await self._feature_update_callback(initial_features) - + # Set up callback for future updates self._features_repository.add_callback(self._feature_update_callback) - + # Start feature refresh refresh_strategy = self.options.refresh_strategy or FeatureRefreshStrategy.STALE_WHILE_REVALIDATE await self._features_repository.start_feature_refresh(refresh_strategy) return True - + except Exception as e: logger.error(f"Initialization failed: {str(e)}", exc_info=True) traceback.print_exc() @@ -482,10 +506,10 @@ async def create_evaluation_context(self, user_context: UserContext) -> Evaluati """Create evaluation context for feature evaluation""" if self._global_context is None: raise RuntimeError("GrowthBook client not properly initialized") - + # Get sticky bucket assignments if needed sticky_assignments = await self._refresh_sticky_buckets(user_context.attributes) - + # update user context with sticky bucket assignments user_context.sticky_bucket_assignment_docs = sticky_assignments @@ -512,6 +536,7 @@ async def is_on(self, key: str, user_context: UserContext) -> bool: """Check if a feature is enabled with proper async context management""" async with self._context_lock: context = await self.create_evaluation_context(user_context) + result = core_eval_feature(key=key, evalContext=context, tracking_cb=self._track) # Call feature usage callback if provided if self.options.on_feature_usage: @@ -520,7 +545,7 @@ async def is_on(self, key: str, user_context: UserContext) -> bool: except Exception: logger.exception("Error in feature usage callback") return result.on - + async def is_off(self, key: str, user_context: UserContext) -> bool: """Check if a feature is set to off with proper async context management""" async with self._context_lock: @@ -533,7 +558,7 @@ async def is_off(self, key: str, user_context: UserContext) -> bool: except Exception: logger.exception("Error in feature usage callback") return result.off - + async def get_feature_value(self, key: str, fallback: Any, user_context: UserContext) -> Any: async with self._context_lock: context = await self.create_evaluation_context(user_context) @@ -551,14 +576,14 @@ async def run(self, experiment: Experiment, user_context: UserContext) -> Result async with self._context_lock: context = await self.create_evaluation_context(user_context) result = run_experiment( - experiment=experiment, + experiment=experiment, evalContext=context, tracking_cb=self._track ) # Fire subscriptions synchronously self._fire_subscriptions(experiment, result) return result - + async def close(self) -> None: """Clean shutdown with proper cleanup""" if self._features_repository: @@ -572,7 +597,7 @@ async def close(self) -> None: # Clear context async with self._context_lock: self._global_context = None - + # Cleanup plugins self._cleanup_plugins() @@ -614,4 +639,4 @@ def _cleanup_plugins(self) -> None: logger.debug(f"Cleaned up plugin: {plugin.__class__.__name__}") except Exception as e: logger.error(f"Error cleaning up plugin {plugin}: {e}") - self._initialized_plugins.clear() \ No newline at end of file + self._initialized_plugins.clear() diff --git a/tests/test_growthbook.py b/tests/test_growthbook.py index cbfd0a6..69f61ad 100644 --- a/tests/test_growthbook.py +++ b/tests/test_growthbook.py @@ -2,6 +2,8 @@ import json import os +from typing import Optional, Any, Dict + from growthbook import ( FeatureRule, GrowthBook, @@ -10,7 +12,7 @@ InMemoryStickyBucketService, decrypt, feature_repo, - logger, + logger, FeatureRepository, ) from growthbook.core import ( @@ -160,7 +162,7 @@ def test_stickyBucket(stickyBucket_data): gb = GrowthBook(**ctx) res = gb.eval_feature(key) - + if not res.experimentResult: assert None == expected_result else: @@ -218,10 +220,10 @@ def test_tracking(): def test_feature_usage_callback(): """Test that feature usage callback is called correctly""" calls = [] - + def feature_usage_cb(key, result, user_context): calls.append([key, result, user_context]) - + gb = GrowthBook( attributes={"id": "1"}, on_feature_usage=feature_usage_cb, @@ -236,7 +238,7 @@ def feature_usage_cb(key, result, user_context): ), } ) - + # Test eval_feature result1 = gb.eval_feature("feature-1") assert len(calls) == 1 @@ -244,14 +246,14 @@ def feature_usage_cb(key, result, user_context): assert calls[0][1].value is True assert calls[0][1].source == "defaultValue" assert calls[0][2].attributes == {"id": "1"} - + # Test is_on gb.is_on("feature-2") assert len(calls) == 2 assert calls[1][0] == "feature-2" assert calls[1][1].value is False assert calls[1][2].attributes == {"id": "1"} - + # Test get_feature_value value = gb.get_feature_value("feature-3", "blue") assert len(calls) == 3 @@ -259,27 +261,27 @@ def feature_usage_cb(key, result, user_context): assert calls[2][1].value == "red" assert value == "red" assert calls[2][2].attributes == {"id": "1"} - + # Test is_off gb.is_off("feature-1") assert len(calls) == 4 assert calls[3][0] == "feature-1" assert calls[3][2].attributes == {"id": "1"} - + # Calling same feature multiple times should trigger callback each time gb.eval_feature("feature-1") gb.eval_feature("feature-1") assert len(calls) == 6 - + gb.destroy() def test_feature_usage_callback_error_handling(): """Test that feature usage callback errors are handled gracefully""" - + def failing_callback(key, result, user_context): raise Exception("Callback error") - + gb = GrowthBook( attributes={"id": "1"}, on_feature_usage=failing_callback, @@ -287,14 +289,14 @@ def failing_callback(key, result, user_context): "feature-1": Feature(defaultValue=True), } ) - + # Should not raise an error even if callback fails result = gb.eval_feature("feature-1") assert result.value is True - + # Should work with is_on as well assert gb.is_on("feature-1") is True - + gb.destroy() @@ -324,7 +326,7 @@ def test_handles_weird_experiment_values(): def test_skip_all_experiments_flag(): """Test that skip_all_experiments flag prevents users from being put into experiments""" - + # Test with skip_all_experiments=True gb_skip = GrowthBook( attributes={"id": "1"}, @@ -342,22 +344,22 @@ def test_skip_all_experiments_flag(): ) } ) - + # User should NOT be in experiment due to skip_all_experiments flag result = gb_skip.eval_feature("feature-with-experiment") assert result.value == "control" # Should get default value assert result.source == "defaultValue" assert result.experiment is None # No experiment should be assigned assert result.experimentResult is None - + # Test running experiment directly exp = Experiment(key="direct-exp", variations=["a", "b"]) exp_result = gb_skip.run(exp) assert exp_result.inExperiment is False assert exp_result.value == "a" # Should get first variation (control) - + gb_skip.destroy() - + # Test with skip_all_experiments=False (default behavior) gb_normal = GrowthBook( attributes={"id": "1"}, @@ -375,13 +377,13 @@ def test_skip_all_experiments_flag(): ) } ) - + # User SHOULD be in experiment normally result_normal = gb_normal.eval_feature("feature-with-experiment") # With id="1", this user should be assigned a variation assert result_normal.value in ["control", "variation"] assert result_normal.source == "experiment" - + gb_normal.destroy() def test_force_variation(): @@ -1087,39 +1089,39 @@ def test_ttl_automatic_feature_refresh(mocker): {"features": {"test_feature": {"defaultValue": False}}, "savedGroups": {}}, {"features": {"test_feature": {"defaultValue": True}}, "savedGroups": {}} ] - + call_count = 0 - def mock_fetch_features(api_host, client_key, decryption_key=""): + def mock_fetch_features(api_host, client_key, decryption_key="", remote_eval: bool = False, payload: Optional[Dict[str, Any]] = None): nonlocal call_count response = mock_responses[min(call_count, len(mock_responses) - 1)] call_count += 1 return response - + # Clear cache and mock the fetch method feature_repo.clear_cache() m = mocker.patch.object(feature_repo, '_fetch_features', side_effect=mock_fetch_features) - + # Create GrowthBook instance with short TTL gb = GrowthBook( api_host="https://cdn.growthbook.io", client_key="test-key", cache_ttl=1 # 1 second TTL for testing ) - + try: # Initial evaluation - should trigger first load assert gb.is_on('test_feature') == False assert call_count == 1 - + # Manually expire the cache by setting expiry time to past cache_key = "https://cdn.growthbook.io::test-key" if hasattr(feature_repo.cache, 'cache') and cache_key in feature_repo.cache.cache: feature_repo.cache.cache[cache_key].expires = time() - 10 - + # Next evaluation should automatically refresh cache and update features assert gb.is_on('test_feature') == True assert call_count == 2 - + finally: gb.destroy() feature_repo.clear_cache() @@ -1131,47 +1133,87 @@ def test_multiple_instances_get_updated_on_cache_expiry(mocker): {"features": {"test_feature": {"defaultValue": "v1"}}, "savedGroups": {}}, {"features": {"test_feature": {"defaultValue": "v2"}}, "savedGroups": {}} ] - + call_count = 0 - def mock_fetch_features(api_host, client_key, decryption_key=""): + def mock_fetch_features(api_host, client_key, decryption_key="", remote_eval: bool = False, payload: Optional[Dict[str, Any]] = None): nonlocal call_count response = mock_responses[min(call_count, len(mock_responses) - 1)] call_count += 1 return response - + feature_repo.clear_cache() m = mocker.patch.object(feature_repo, '_fetch_features', side_effect=mock_fetch_features) - + # Create multiple GrowthBook instances gb1 = GrowthBook(api_host="https://cdn.growthbook.io", client_key="test-key") gb2 = GrowthBook(api_host="https://cdn.growthbook.io", client_key="test-key") - + try: # Initial evaluation from first instance - should trigger first load assert gb1.get_feature_value('test_feature', 'default') == "v1" assert call_count == 1 - + # Second instance should use cached value (no additional API call) assert gb2.get_feature_value('test_feature', 'default') == "v1" assert call_count == 1 # Still 1, used cache - + # Manually expire the cache cache_key = "https://cdn.growthbook.io::test-key" if hasattr(feature_repo.cache, 'cache') and cache_key in feature_repo.cache.cache: feature_repo.cache.cache[cache_key].expires = time() - 10 - + # Next evaluation should automatically refresh and notify both instances via callbacks assert gb1.get_feature_value('test_feature', 'default') == "v2" assert call_count == 2 - + # Second instance should also have the updated value due to callbacks assert gb2.get_feature_value('test_feature', 'default') == "v2" - + finally: gb1.destroy() gb2.destroy() feature_repo.clear_cache() +def test_post_request(mocker): + fetcher = feature_repo + mock_http = mocker.Mock() + mock_response = mocker.Mock() + mock_response.status = 200 + mock_response.data = b'{"ok": true}' + + mock_http.request.return_value = mock_response + fetcher.http = mock_http + + result = fetcher._post("https://cdn.growthbook.io/api/eval/abc123", {"foo": "bar"}) + + mock_http.request.assert_called_once_with( + "POST", + "https://cdn.growthbook.io/api/eval/abc123", + body=json.dumps({"foo": "bar"}).encode("utf-8"), + headers={"Content-Type": "application/json"}, + ) + assert result.data == b'{"ok": true}' + +def test_fetch_and_decode_remote_eval(mocker): + fetcher = feature_repo + mock_response = mocker.Mock() + mock_response.status = 200 + mock_response.data = b'{"feature": "value"}' + + mocker.patch.object(fetcher, "_post", return_value=mock_response) + + result = fetcher._fetch_and_decode_post( + "https://cdn.growthbook.io", + "abc123", + payload={"id": "user_1"} + ) + + assert result == {"feature": "value"} + +def test_get_features_url_remote_eval(): + url = feature_repo._get_features_url("https://cdn.growthbook.io", "abc123", remote_eval=True) + assert url == "https://cdn.growthbook.io/api/eval/abc123" + def test_stale_while_revalidate_basic_functionality(mocker): """Test basic stale-while-revalidate functionality""" @@ -1180,18 +1222,18 @@ def test_stale_while_revalidate_basic_functionality(mocker): {"features": {"test_feature": {"defaultValue": "v1"}}, "savedGroups": {}}, {"features": {"test_feature": {"defaultValue": "v2"}}, "savedGroups": {}} ] - + call_count = 0 - def mock_fetch_features(api_host, client_key, decryption_key=""): + def mock_fetch_features(api_host, client_key, decryption_key="", remoteEval = False, payload = None): nonlocal call_count response = mock_responses[min(call_count, len(mock_responses) - 1)] call_count += 1 return response - + # Clear cache and mock the fetch method feature_repo.clear_cache() m = mocker.patch.object(feature_repo, '_fetch_features', side_effect=mock_fetch_features) - + # Create GrowthBook instance with stale-while-revalidate enabled and short refresh interval gb = GrowthBook( api_host="https://cdn.growthbook.io", @@ -1200,22 +1242,22 @@ def mock_fetch_features(api_host, client_key, decryption_key=""): stale_while_revalidate=True, stale_ttl=1 # 1 second refresh interval for testing ) - + try: # Initial evaluation - should use initial loaded data assert gb.get_feature_value('test_feature', 'default') == "v1" assert call_count == 1 # Initial load - + # Wait for background refresh to happen import time as time_module time_module.sleep(1.5) # Wait longer than refresh interval - + # Should have triggered background refresh assert call_count >= 2 - + # Next evaluation should get updated data from background refresh assert gb.get_feature_value('test_feature', 'default') == "v2" - + finally: gb.destroy() feature_repo.clear_cache() @@ -1224,17 +1266,17 @@ def mock_fetch_features(api_host, client_key, decryption_key=""): def test_stale_while_revalidate_starts_background_task(mocker): """Test that stale-while-revalidate starts background refresh task""" mock_response = {"features": {"test_feature": {"defaultValue": "fresh"}}, "savedGroups": {}} - + call_count = 0 - def mock_fetch_features(api_host, client_key, decryption_key=""): + def mock_fetch_features(api_host, client_key, decryption_key="", remoteEval = False, payload = None): nonlocal call_count call_count += 1 return mock_response - + # Clear cache and mock the fetch method feature_repo.clear_cache() m = mocker.patch.object(feature_repo, '_fetch_features', side_effect=mock_fetch_features) - + # Create GrowthBook instance with stale-while-revalidate enabled gb = GrowthBook( api_host="https://cdn.growthbook.io", @@ -1242,16 +1284,16 @@ def mock_fetch_features(api_host, client_key, decryption_key=""): stale_while_revalidate=True, stale_ttl=5 ) - + try: # Should have started background refresh task assert feature_repo._refresh_thread is not None assert feature_repo._refresh_thread.is_alive() - + # Initial evaluation should work assert gb.get_feature_value('test_feature', 'default') == "fresh" assert call_count == 1 # Initial load - + finally: gb.destroy() feature_repo.clear_cache() @@ -1259,17 +1301,17 @@ def mock_fetch_features(api_host, client_key, decryption_key=""): def test_stale_while_revalidate_disabled_fallback(mocker): """Test that when stale_while_revalidate is disabled, it falls back to normal behavior""" mock_response = {"features": {"test_feature": {"defaultValue": "normal"}}, "savedGroups": {}} - + call_count = 0 - def mock_fetch_features(api_host, client_key, decryption_key=""): + def mock_fetch_features(api_host, client_key, decryption_key="", remoteEval = False, payload = None): nonlocal call_count call_count += 1 return mock_response - + # Clear cache and mock the fetch method feature_repo.clear_cache() m = mocker.patch.object(feature_repo, '_fetch_features', side_effect=mock_fetch_features) - + # Create GrowthBook instance with stale-while-revalidate disabled (default) gb = GrowthBook( api_host="https://cdn.growthbook.io", @@ -1277,24 +1319,24 @@ def mock_fetch_features(api_host, client_key, decryption_key=""): cache_ttl=1, # Short TTL stale_while_revalidate=False # Explicitly disabled ) - + try: # Should NOT have started background refresh task assert feature_repo._refresh_thread is None - + # Initial evaluation assert gb.get_feature_value('test_feature', 'default') == "normal" assert call_count == 1 - + # Manually expire the cache cache_key = "https://cdn.growthbook.io::test-key" if hasattr(feature_repo.cache, 'cache') and cache_key in feature_repo.cache.cache: feature_repo.cache.cache[cache_key].expires = time() - 10 - + # Next evaluation should fetch synchronously (normal behavior) assert gb.get_feature_value('test_feature', 'default') == "normal" assert call_count == 2 # Should have fetched again - + finally: gb.destroy() feature_repo.clear_cache() @@ -1303,31 +1345,31 @@ def mock_fetch_features(api_host, client_key, decryption_key=""): def test_stale_while_revalidate_cleanup(mocker): """Test that background refresh is properly cleaned up""" mock_response = {"features": {"test_feature": {"defaultValue": "test"}}, "savedGroups": {}} - + # Mock the fetch method feature_repo.clear_cache() m = mocker.patch.object(feature_repo, '_fetch_features', return_value=mock_response) - + # Create GrowthBook instance with stale-while-revalidate enabled gb = GrowthBook( api_host="https://cdn.growthbook.io", client_key="test-key", stale_while_revalidate=True ) - + try: # Should have started background refresh task assert feature_repo._refresh_thread is not None assert feature_repo._refresh_thread.is_alive() - + # Destroy should clean up the background task gb.destroy() - + # Background task should be stopped assert feature_repo._refresh_thread is None or not feature_repo._refresh_thread.is_alive() - + finally: # Ensure cleanup even if test fails if feature_repo._refresh_thread: feature_repo.stop_background_refresh() - feature_repo.clear_cache() \ No newline at end of file + feature_repo.clear_cache()