env('VISION_ENABLED', true), 'queue' => env('VISION_QUEUE', 'default'), 'clip' => [ 'base_url' => env('CLIP_BASE_URL', ''), 'endpoint' => env('CLIP_ANALYZE_ENDPOINT', '/analyze'), 'timeout_seconds' => (int) env('CLIP_TIMEOUT_SECONDS', 8), 'connect_timeout_seconds' => (int) env('CLIP_CONNECT_TIMEOUT_SECONDS', 2), 'retries' => (int) env('CLIP_HTTP_RETRIES', 1), 'retry_delay_ms' => (int) env('CLIP_HTTP_RETRY_DELAY_MS', 200), ], 'yolo' => [ 'enabled' => env('YOLO_ENABLED', true), 'base_url' => env('YOLO_BASE_URL', ''), 'endpoint' => env('YOLO_ANALYZE_ENDPOINT', '/analyze'), 'timeout_seconds' => (int) env('YOLO_TIMEOUT_SECONDS', 8), 'connect_timeout_seconds' => (int) env('YOLO_CONNECT_TIMEOUT_SECONDS', 2), 'retries' => (int) env('YOLO_HTTP_RETRIES', 1), 'retry_delay_ms' => (int) env('YOLO_HTTP_RETRY_DELAY_MS', 200), // Only run YOLO for photography content type. 'photography_only' => env('YOLO_PHOTOGRAPHY_ONLY', true), ], // Which derivative variant to send to vision services. 'image_variant' => env('VISION_IMAGE_VARIANT', 'md'), /* |-------------------------------------------------------------------------- | Vision Gateway (aggregates CLIP + BLIP + YOLO via /analyze/all) |-------------------------------------------------------------------------- | Falls back to CLIP base_url when VISION_GATEWAY_URL is not set. */ 'gateway' => [ 'base_url' => env('VISION_GATEWAY_URL', env('CLIP_BASE_URL', '')), 'timeout_seconds' => (int) env('VISION_GATEWAY_TIMEOUT', 10), 'connect_timeout_seconds'=> (int) env('VISION_GATEWAY_CONNECT_TIMEOUT', 3), ], 'vector_gateway' => [ 'enabled' => env('VISION_VECTOR_GATEWAY_ENABLED', true), 'base_url' => env('VISION_VECTOR_GATEWAY_URL', ''), 'api_key' => env('VISION_VECTOR_GATEWAY_API_KEY', ''), 'collection' => env('VISION_VECTOR_GATEWAY_COLLECTION', 'images'), 'timeout_seconds' => (int) env('VISION_VECTOR_GATEWAY_TIMEOUT', 20), 'connect_timeout_seconds' => (int) env('VISION_VECTOR_GATEWAY_CONNECT_TIMEOUT', 5), 'retries' => (int) env('VISION_VECTOR_GATEWAY_RETRIES', 1), 'retry_delay_ms' => (int) env('VISION_VECTOR_GATEWAY_RETRY_DELAY_MS', 250), 'upsert_endpoint' => env('VISION_VECTOR_GATEWAY_UPSERT_ENDPOINT', '/vectors/upsert'), 'search_endpoint' => env('VISION_VECTOR_GATEWAY_SEARCH_ENDPOINT', '/vectors/search'), 'delete_endpoint' => env('VISION_VECTOR_GATEWAY_DELETE_ENDPOINT', '/vectors/delete'), 'collections_endpoint' => env('VISION_VECTOR_GATEWAY_COLLECTIONS_ENDPOINT', '/vectors/collections'), ], /* |-------------------------------------------------------------------------- | LM Studio – local multimodal inference (tag generation) |-------------------------------------------------------------------------- */ 'lm_studio' => [ 'base_url' => env('LM_STUDIO_URL', 'http://192.168.0.100:8200'), 'model' => env('LM_STUDIO_MODEL', 'google/gemma-3-4b'), 'timeout' => (int) env('LM_STUDIO_TIMEOUT', 60), 'connect_timeout' => (int) env('LM_STUDIO_CONNECT_TIMEOUT', 5), 'temperature' => (float) env('LM_STUDIO_TEMPERATURE', 0.3), 'max_tokens' => (int) env('LM_STUDIO_MAX_TOKENS', 300), // Maximum number of AI-suggested tags to keep per artwork. 'max_tags' => (int) env('LM_STUDIO_MAX_TAGS', 12), ], ];