diff --git a/python/agents/trends-and-insights-agent/.gitignore b/python/agents/trends-and-insights-agent/.gitignore new file mode 100644 index 00000000..1fafcca5 --- /dev/null +++ b/python/agents/trends-and-insights-agent/.gitignore @@ -0,0 +1,12 @@ +.venv +/google-cloud-cli-linux-x86_64.tar.gz +/google-cloud-sdk +*.pyc +*.env +**/__pycache__/ +__pycache__/ +*.whl +trends_and_insights_agent/.adk/* +*.evalset.json +stash/* +files/* \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/.vscode/launch.json b/python/agents/trends-and-insights-agent/.vscode/launch.json new file mode 100644 index 00000000..9dc760cb --- /dev/null +++ b/python/agents/trends-and-insights-agent/.vscode/launch.json @@ -0,0 +1,11 @@ +{ + "configurations": [ + { + "name": "Python Debugger: Current File", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + } + ] +} \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/.vscode/settings.json b/python/agents/trends-and-insights-agent/.vscode/settings.json new file mode 100644 index 00000000..c27ec18f --- /dev/null +++ b/python/agents/trends-and-insights-agent/.vscode/settings.json @@ -0,0 +1,36 @@ +{ + "cSpell.words": [ + "aiohttp", + "aiplatform", + "dotenv", + "evalset", + "finalizer", + "genai", + "googleapiclient", + "googlenews", + "googlesearch", + "gtrends", + "humbucker", + "Humbuckers", + "imagen", + "ipykernel", + "ipython", + "jambands", + "KHTML", + "MNGR", + "pipx", + "psychographic", + "psychographics", + "pycache", + "pytest", + "reimagine", + "secretmanager", + "serviceusage", + "Totten", + "trafilatura", + "Uvicorn", + "vertexai", + "virtualenv", + "wortz" + ] +} \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/README.md b/python/agents/trends-and-insights-agent/README.md new file mode 100644 index 00000000..caf1f436 --- /dev/null +++ b/python/agents/trends-and-insights-agent/README.md @@ -0,0 +1,399 @@ +# Trends and Insights Agent + +> a multi-agent system finding the intersection between product, trend, and audience + +## About + +*Trends and Insights Agent* is a marketing tool for developing data-driven and culturally relevant marketing content. Built with Google’s [Agent Development Kit (ADK)](https://google.github.io/adk-docs/), this multi-agent system helps users generate ad creatives from trending themes in Google Search and YouTube. + +- Build LLM-based agents with [models supported in Vertex AI's Model Garden](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/available-models) +- Explore [trending Search terms](https://cloud.google.com/blog/products/data-analytics/top-25-google-search-terms-now-in-bigquery?e=48754805) and [trending YouTube videos](https://developers.google.com/youtube/v3/docs/videos/list) +- Conduct web research to better understand the campaign, Search trend, and trending YouTube video +- Draft ad creatives (e.g., image and video) based on trends, campaign themes, or specific prompts + +

+ +

+ +## How to use this repo + +1. **Clone the repository** + +```bash +git clone https://github.com/tottenjordan/zghost.git +``` + +2. **Create a virtual environment and install dependencies** + +```bash +python3 -m venv .venv && source .venv/bin/activate + +pip install pipx +pip install -U poetry packaging ipykernel + +poetry install +``` + +3. **Authenticate and Enable Google Cloud APIs** + +```bash +gcloud auth application-default login + +gcloud services enable artifactregistry.googleapis.com \ + bigquery.googleapis.com \ + logging.googleapis.com \ + run.googleapis.com \ + storage-component.googleapis.com \ + eventarc.googleapis.com \ + serviceusage.googleapis.com \ + secretmanager.googleapis.com \ + aiplatform.googleapis.com \ + youtube.googleapis.com +``` + +4. **Create and store YouTube API key** + + - See [these instructions](https://developers.google.com/youtube/v3/getting-started) for getting a `YOUTUBE_DATA_API_KEY` + - Store this API key in [Secret Manager](https://cloud.google.com/secret-manager/docs/creating-and-accessing-secrets) as `yt-data-api` (see `YT_SECRET_MNGR_NAME` in `.env` file) + - For step-by-step guidance, see [create a secret and access a secret version](https://cloud.google.com/secret-manager/docs/create-secret-quickstart#create_a_secret_and_access_a_secret_version) + + +5. **Create and populate `.env` file(s)** + +```bash +GOOGLE_GENAI_USE_VERTEXAI=1 +GOOGLE_CLOUD_PROJECT= +GOOGLE_CLOUD_PROJECT_NUMBER= # e.g., 1234756 +GOOGLE_CLOUD_LOCATION= # e.g., us-central1 +BUCKET=gs:// # create a GCS bucket +YT_SECRET_MNGR_NAME= # e.g., yt-data-api +# SESSION_STATE_JSON_PATH=example_state_pixel.json # uncomment to use default config values +``` + +*copy `.env` file to `root_agent` dir:* + +```bash +cp .env trends_and_insights_agent/.env +cat trends_and_insights_agent/.env + +source .env +``` + + 6. **Create Cloud Storage bucket** + +```bash +gcloud storage buckets create $BUCKET --location=$GOOGLE_CLOUD_LOCATION +``` + +7. **Launch the adk developer UI** + +```bash +poetry run adk web +``` + +Open your browser and navigate to [http://localhost:8000](http://localhost:8000) and select an agent from the drop-down (top left) + +```bash +INFO: Started server process [750453] +INFO: Waiting for application startup. + ++-----------------------------------------------------------------------------+ +| ADK Web Server started | +| | +| For local testing, access at http://localhost:8000. | ++-----------------------------------------------------------------------------+ + +INFO: Application startup complete. +INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) +``` + +
+ If port :8000 in use + +*find any processes listening to port `:8000`, kill them, then return to step (7):* + +```bash +lsof -i :8000 +kill -9 $PID +lsof -i :8000 +``` + +
+ +## How it works + +
+ Example usage + +#### [1] Capture campaign metadata & user-selected trends + +Agent will ask user for **campaign metadata** in the UI + +``` +> [agent]: Hello! I'm your AI Marketing Research & Strategy Assistant... To start, what please provide the following campaign metadata: + + * Brand + * Target Product + * Key Selling Points + * Target Audience +``` + +
+ [Optional] preload campaign metadata + +preload these values using one of the example json configs e.g., [shared_libraries/profiles/example_state_pixel.json](trends_and_insights_agent/shared_libraries/profiles/example_state_pixel.json) or upload your own. The json config you wish to reference should be set in your `.env` file like below. *Note: remove or comment out this variable to use default option (1)* + +``` +SESSION_STATE_JSON_PATH=example_state_prs.json +``` +
+ + +#### [2] Autonomous research workflow + +#### [3] Interactive ad content generator + +> Note: this section is configured for **human-in-the-loop** i.e., agent will iterate with user when generating image and video creatives + + - Choose a subset of ad copies to proceed with + - Choose a subset of visual concepts to proceed with + - Generate image and video creatives with visual concepts + +#### [4] Compile final research and creative report + +
+ +## Example ad creatives + +
+ Hulkamania & Pixel 9's Call Assist + +

+ +

+ +
+ + +
+ Titanic & PRS Guitars + +

+ +

+ +
+ + +
+ Adam Sandler (Waterboy) & PRS Guitars + +

+ +

+ +
+ + +
+ Mad Again & Pixel 9' Call Assist + +

+ +

+ +
+ + + +## Video walkthrough + +> Updated version coming soon! + + +## Sub-agents & Tools + +``` +root_agent (orchestrator) +├── trends_and_insights_agent # Display/capture trend selections +├── research_orchestrator # Coordinate research pipeline +│ ├── combined_research_pipeline # Sub-agent for SequentialAgent workflow +│ │ ├── merge_parallel_insights # Parallel research coordination +│ │ │ ├── parallel_planner_agent # Runs 3 research types simultaneously +│ │ │ │ ├── yt_sequential_planner # YouTube trend analysis +│ │ │ │ ├── gs_sequential_planner # Google Search trend analysis +│ │ │ │ └── ca_sequential_planner # Campaign research +│ │ │ └── merge_planners # Combines research plans +│ │ ├── combined_web_evaluator # Quality check +│ │ ├── enhanced_combined_searcher # Expand web search +│ │ └── combined_report_composer # Generate unified research report +├── ad_content_generator_agent # Create comprehensive ad campaigns +│ ├── ad_creative_pipeline # Ad copy actor-critic framework +│ │ ├── ad_copy_drafter +│ │ ├── ad_copy_critic +│ ├── visual_generation_pipeline # Visual concept actor-critic framework +│ │ ├── visual_concept_drafter +│ │ ├── visual_concept_critic +│ │ └── visual_concept_finalizer +│ └── visual_generator # Image/video generation +└── save_creatives_and_research_report # Compile PDF reports + +``` + +Expand sections below to visualize complex agent workflows + +
+ Trend and Insight Agent + +> This agent is responsible for gathering input from the user. + +

+ +

+ +
+ + +
+ Research Orchestrator Pipeline + +**The research workflow has two phases:** +1. Parallel web research for individual topics: search trend, YouTube video, and campaign metadata e.g., target audience, product, brand, etc. +2. Combined web research for the intersection of individual topics + +> This structure helps us achieve a deeper understanding of each subject first. And this helps us ask better questions for a second round of research where we are solely focused on finding any culturally relevant overlaps to exploit for ad creatives. + +

+ +

+ +
+ + +
+ Ad Content Generator Pipeline + +> This agent uses the research report to generate relevant ad copy, visual concepts, and creatives (image and video). + +

+ +

+ +
+ + +# CI And Testing + +Using `pytest`, users can test for tool coverage as well as Agent evaluations. + +More detail on agent evaluations [can be found here](https://google.github.io/adk-docs/evaluate/#2-pytest-run-tests-programmatically), along with how to run a `pytest` eval. + +#### Running `pytest` + +From the project root, run: + +```bash +pytest tests/*.py +``` + +## Deployment + +The agent can be deployed in a couple of different ways + +1. Agent Engine + * Here's an end-to-end guide on deploying + * Be sure to first run the `setup_ae_sm_access.sh` script to give Agent Engine access to Secret Manager + * Run the [deployment guide](.notebooks/deployment_guide.ipynb) to deploy the agent +2. Cloud Run + * Run `deploy_to_cloud_run.sh` + * Note this runs unit tests prior to deploying + +Script for Cloud Run: + +```bash +#!/bin/bash +source trends_and_insights_agent/.env + +# run unit tests before deploying +pytest tests/*.py + +# write requirements.txt to the agent folder +poetry export --without-hashes --format=requirements.txt > trends_and_insights_agent/requirements.txt + +#deploy to cloud run +adk deploy cloud_run \ + --project=$GOOGLE_CLOUD_PROJECT \ + --region=$GOOGLE_CLOUD_LOCATION \ + --service_name='trends-and-insights-agent' \ + --with_ui \ + trends_and_insights_agent/ +``` + +## Deployment to Agentspace + + +Create an Agent Engine in the `notebooks/deployment_guide.ipynb` notebook + +Then note the Agent Engine ID (last numeric portion of the Resource Name). e.g.: + +```bash +agent_engine = vertexai.agent_engines.get('projects/679926387543/locations/us-central1/reasoningEngines/1093257605637210112') +``` + +Update the `agent_config_example.json`, then run: + +```bash +./publish_to_agentspace_v2.sh --action create --config agent_config.json +``` + +Usage: `./publish_to_agentspace_v2.sh [OPTIONS]` + +```bash +Options: + -a, --action Action to perform (required) + -c, --config JSON configuration file + -p, --project-id Google Cloud project ID + -n, --project-number Google Cloud project number + -e, --app-id Agent Space application ID + -r, --reasoning-engine Reasoning Engine ID (required for create/update) + -d, --display-name Agent display name (required for create/update) + -s, --description Agent description (required for create) + -i, --agent-id Agent ID (required for update/delete) + -t, --instructions Agent instructions/tool description (required for create) + -u, --icon-uri Icon URI (optional) + -l, --location Location (default: us) + -h, --help Display this help message +``` + +### Example with config file: +```bash +./publish_to_agentspace_v2.sh --action create --config agent_config.json +./publish_to_agentspace_v2.sh --action update --config agent_config.json +./publish_to_agentspace_v2.sh --action list --config agent_config.json +./publish_to_agentspace_v2.sh --action delete --config agent_config.json +``` +### Example with command line args: + +Create agent: +```bash +./publish_to_agentspace_v2.sh --action create --project-id my-project --project-number 12345 \ +--app-id my-app --reasoning-engine 67890 --display-name 'My Agent' \ +--description 'Agent description' --instructions 'Agent instructions here' +``` + Update agent: +```bash +./publish_to_agentspace_v2.sh --action update --project-id my-project --project-number 12345 \ +--app-id my-app --reasoning-engine 67890 --display-name 'My Agent' \ +--agent-id 123456789 --description 'Updated description' +``` + List agents: +```bash +./publish_to_agentspace_v2.sh --action list --project-id my-project --project-number 12345 \ +--app-id my-app +``` + + Delete agent: +```bash +./publish_to_agentspace_v2.sh --action delete --project-id my-project --project-number 12345 \ +--app-id my-app --agent-id 123456789 +``` \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/agent_config_example.json b/python/agents/trends-and-insights-agent/agent_config_example.json new file mode 100644 index 00000000..41e2e2b0 --- /dev/null +++ b/python/agents/trends-and-insights-agent/agent_config_example.json @@ -0,0 +1,13 @@ +{ + "project_id": "wortz-project-352116", + "project_number": "679926387543", + "app_id": "grocery-demo_1738268844814", + "reasoning_engine_id": "3252927141007851520", + "display_name": "Trends and Insights Agent V3", + "description": "You are a complex agent that analyzes marketing briefs and transforms insights into creatives.", + "agent_id": "13864486446570265948", + "instructions": "You are an Expert AI Marketing Research & Strategy Assistant.\n\nYour primary function is to orchestrate a suite of specialized sub-agents (Agents) to provide users with comprehensive insights, creative ideas, and trend analysis for their marketing campaigns. Strictly follow all the steps one-by-one. Do not skip any steps or execute them out of order\n\n**Instructions:** Follow these steps to complete your objective:\n1. Complete all steps in the block to gather user inputs and establish a research baseline. Strictly follow all the steps one-by-one. Don't proceed until they are complete.\n2. Then make sure the user interacts with the `ad_content_generator_agent` agent and complete the steps in the block.\n3. Confirm with the user if they are satisfied with the research and creatives.\n\n\n\n1. Greet the user and give them a high-level overview of what you do. Inform them we will populate the 'campaign_guide' and other state keys using the default session state defined by the `SESSION_STATE_JSON_PATH` var in your .env file.\n2. Then, transfer to the `trends_and_insights_agent` subagent to help the user find interesting trends.\n3. Once the trends are selected, call the `stage_1_research_merger` subagent to coordinate multiple rounds of research.\n\n\n\n\n1. Call `ad_content_generator_agent` to generate ad creatives based on campaign themes, trend analysis, web research, and specific prompts.\n2. Work with the user to generate ad creatives (e.g., ad copy, image, video, etc.).\n3. Iterate with the user until they are satisfied with the generated creatives.\n4. Once they are satisfied, call `report_generator_agent` to generate a comprehensive report, in Markdown format, outlining the trends, research, and creatives explored during this session.\n\n\n\n**Sub-agents:**\n- Use `trends_and_insights_agent` to help the user find interesting trends.\n- Use `ad_content_generator_agent` to help the user create visual concepts for ads.\n- Use `report_generator_agent` to generate a research report.\n- Use `campaign_guide_data_generation_agent` to extract details from an uploaded PDF and store them in the 'campaign_guide' state key.\n- Use `stage_1_research_merger` to coordinate and execute all research tasks.", + "icon_uri": "https://fonts.gstatic.com/s/i/short-term/release/googlesymbols/corporate_fare/default/24px.svg", + "agentspace_location": "us", + "agent_engine_location": "us-central1" +} diff --git a/python/agents/trends-and-insights-agent/deploy_to_cloud_run.sh b/python/agents/trends-and-insights-agent/deploy_to_cloud_run.sh new file mode 100644 index 00000000..73447f04 --- /dev/null +++ b/python/agents/trends-and-insights-agent/deploy_to_cloud_run.sh @@ -0,0 +1,17 @@ +#!/bin/bash +source trends_and_insights_agent/.env + +# run unit tests before deploying +pytest tests/*.py + +# write requirements.txt to the agent folder +poetry export --without-hashes --format=requirements.txt > trends_and_insights_agent/requirements.txt + + +#deploy to cloud run +adk deploy cloud_run \ + --project=$GOOGLE_CLOUD_PROJECT \ + --region=$GOOGLE_CLOUD_LOCATION \ + --service_name='trends-and-insights-agent' \ + --with_ui \ + trends_and_insights_agent/ diff --git a/python/agents/trends-and-insights-agent/learning/configuring_bq_connector.md b/python/agents/trends-and-insights-agent/learning/configuring_bq_connector.md new file mode 100644 index 00000000..5a19235f --- /dev/null +++ b/python/agents/trends-and-insights-agent/learning/configuring_bq_connector.md @@ -0,0 +1,19 @@ +# Set up Integration Connectors + +see [setup integration connectors](https://cloud.google.com/integration-connectors/docs/setup-integration-connectors) from the docs + +**TODO:** configure this for an agent or tool + +```python +bigquery_toolset = ApplicationIntegrationToolset( + project="your-gcp-project-id", + location="your-gcp-project-location", + connection="your-connection-name", + entity_operations=["table_name": ["LIST"]], +) + +agent = LlmAgent( + ... + tools = bigquery_toolset.get_tools() +) +``` \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/learning/guided_search_examples.md b/python/agents/trends-and-insights-agent/learning/guided_search_examples.md new file mode 100644 index 00000000..b39f8d23 --- /dev/null +++ b/python/agents/trends-and-insights-agent/learning/guided_search_examples.md @@ -0,0 +1,444 @@ +# Guided Search with Google and YouTube + +This document shows different ways to use the underlying Google Search libraries and YouTube APIs. To simplifiy initial onboarding, we've made some of these parameters configurable only to the user :angel: / developer :neckbeard: , as opposed to giving the LLM-based agents... *total control* :smiling_imp: + +For example, consider the below function we could use as a tool for getting trending videos from YouTube: + +```python +def get_youtube_trends(region_code: str, max_results: int = 5,) -> dict: + """ + Makes request to YouTube Data API for most popular videos in a given region. + Returns a dictionary of videos that match the API request parameters e.g., trending videos + + Args: + region_code (str): selects a video chart available in the specified region. Values are ISO 3166-1 alpha-2 country codes. + For example, the region_code for the United Kingdom would be 'GB', whereas 'US' would represent The United States. + max_results (int): The number of video results to return. + + Returns: + dict: The response from the YouTube Data API. + """ + + request = youtube_client.videos().list( + part="snippet,contentDetails,statistics", + chart="mostPopular", + regionCode=region_code, + maxResults=max_results, + ) + trend_response = request.execute() + return trend_response +``` + +**We're giving the LLM-based agent the ability to change the `region_code` and `max_results`. Why?** +* This allows the agent to easily make seperate API calls, each for a different region. +* It could also change the number of results based off some user interaction e.g., :information_desk_person: "actually can I see the top 50 trending videos?" + + > Having a clear and informative doc strings goes a long way here! + +**We've hard-coded the `chart` and `part` parameters. Why?** +* We don't need the agent to do all the things the API can do. It just needs to focus on the *trending videos* --> `"mostPopular"` +* We want it to always return the `"snippet,contentDetails,statistics"` + + > We're mitigating some risk of it deviating too far from our expectations... mainly because we're more interested in how it deviates from other expectations :wink: + + + +## Google Search + +`googlesearch-python` is a Python library for searching Google + +**References** + +* [pypi project](https://pypi.org/project/googlesearch-python/) +* see [GitHub repo](https://github.com/Nv7-GitHub/googlesearch) for more examples +* see [supported country codes](https://developers.google.com/custom-search/docs/json_api_reference#countryCodes) for input arg: `region` + +**Example usage** + +1. **Simple Search:** search Google for URLs *related to given `query` string* + +```python +from googlesearch import search + +target_topic = "widespread panic" +query = target_topic + +results_generator = search( + term=query, + lang="en", + region="us", + num_results=10, + sleep_interval=2.0, + unique=True, + advanced=False, +) + +# convert result object to list +search_results_urls = list(results_generator) +search_results_urls +``` + +*returns list of related URLs:* + +```python +['https://widespreadpanic.com/', + 'https://en.wikipedia.org/wiki/Widespread_Panic', + 'http://www.widespreadpanic.com/', + 'https://www.youtube.com/channel/UCKmXntvZFs9VBYknXMMzIbw', + 'https://open.spotify.com/artist/54SHZF2YS3W87xuJKSvOVf',] + ``` + + +2. **Search Operators:** Combine `query` string with [search operators](https://developers.google.com/search/docs/monitor-debug/search-operators/all-search-site) (e.g., `site:`) to *request results from a particular domain, URL, or URL prefix:* + +```python +from googlesearch import search + +# Search Reddit for content related to "widespread panic" +target_topic = "widespread panic" +query = "site:reddit.com" + " " + target_topic + +results_generator = search( + term=query, + lang="en", + region="us", + num_results=10, + sleep_interval=2.0, + unique=True, + advanced=False, +) +search_results_urls = list(results_generator) +search_results_urls +``` + +*returns list of related URLs from `reddit.com` only:* + +```python +['https://www.reddit.com/r/WidespreadPanic/', + 'https://www.reddit.com/r/jambands/comments/12oyub2/why_the_hate_on_widespread_panic/', + 'https://www.reddit.com/r/jambands/comments/1c55dhz/widespread_panic_complete_concert_videos/', + 'https://www.reddit.com/r/WidespreadPanic/comments/165nt0l/dark_and_menacing_widespread_panic/', + 'https://www.reddit.com/r/gratefuldead/comments/1htfa31/widespread_panic/',] +``` + + +3. **Advanced Search:** set `advanced=True` to *return list of `SearchResult` objects (title, url, description):* + +```python +results_generator = search( + term=query, + lang="en", + region="us", + num_results=10, + sleep_interval=2.0, + unique=True, + advanced=True, +) +search_results = list(results_generator) +search_results[0] +``` +*returns `SearchResult` object:* + +```python +SearchResult( + url="https://www.reddit.com/r/jambands/comments/1e6hjl9/widespread_panic_appreciation_thread/", + title="Widespread Panic Appreciation Thread : r/jambands - Reddit", + description="Jul 18,2024·In a jam band world of the goofy, wookie, entitled and sometimes creepy-ass fans, panic's fans remain undefeated..." +) +``` + + +## Google News + +`GoogleNews` is a Python library for searching [Google News](https://news.google.com/) + +**References** +* [pypi project](https://pypi.org/project/GoogleNews/) + +**Example usage** + +1. *Can only search `query` terms; **cannot combine** with search operators (e.g., `site: `)* + +```python +from GoogleNews import GoogleNews + +# initialize +googlenews = GoogleNews( + lang='en', + region='US', + # period='7d', + # start='02/01/2020', + # end='02/28/2020', + # encode='utf-8', +) + +# check version +print(googlenews.getVersion()) + +# enable throw exception +googlenews.enableException(True) + +# topic/query +query = "widespread panic" + +# sets topic/query to search `news.google.com` +news_wsp = googlenews.get_news(query) +``` + +**get news article results dict** + +```python +news_wsp_results = googlenews.results() +news_wsp_results = [ + { + 'title': 'Widespread Panic Delivers a One, Two, Three Knock-Out Punch to Smashville', + 'desc': None, + 'date': '3 days ago', + 'datetime': datetime.datetime(2025, 5, 11, 11, 51, 18, 655146), + 'media': None, + 'site': None, + 'reporter': None, + 'link': 'https://news.google.com/read/CBMirwFBVV95cUxQbmo2NlpoMzVQZEtvUkUyQVk2Vi13X2FIMGlmc3l4bHJRN1VXQjQ3NzZnUWUxZ2xJMnBRMVlBeFBOWUx5dTBkWFBRZmV1ZkRLNmQ0RThCYmQyNURaWXc1ZWYxRTBnT2FBejZ3bFdxbDBHeTAwZ3YzcUF1QnA3Y0tubnBMaVhRVnhUZGt5d3F1ME4ydkJaOHByM2ZJUXRTTklSRktiNkJkWC1kT0N6bXZB?hl=en-US&gl=US&ceid=US%3Aen', + 'img': 'https://news.google.com/api/attachments/CC8iK0NnNXJZVk5RV0U1VlNuQnpOWE5RVFJERUF4aW1CU2dLTWdZQkFJYUVqZ2s=-w200-h112-p-df', + }, + ... +] +``` + +**get news article URLs only** + +```python +news_wsp_links = googlenews.get_links() +news_wsp_links[0] = 'https://news.google.com/read/CBMirwFBVV95cUxQbmo2NlpoMzVQZEtvUkUyQVk2Vi13X2FIMGlmc3l4bHJRN1VXQjQ3NzZnUWUxZ2xJMnBRMVlBeFBOWUx5dTBkWFBRZmV1ZkRLNmQ0RThCYmQyNURaWXc1ZWYxRTBnT2FBejZ3bFdxbDBHeTAwZ3YzcUF1QnA3Y0tubnBMaVhRVnhUZGt5d3F1ME4ydkJaOHByM2ZJUXRTTklSRktiNkJkWC1kT0N6bXZB?hl=en-US&gl=US&ceid=US%3Aen' +``` + +**get news article titles only** + +```python +news_wsp_titles = googlenews.get_texts() +news_wsp_titles = [ + 'Widespread Panic Delivers a One, Two, Three Knock-Out Punch to Smashville', + 'Widespread Panic Announce September Shows at New Richmond, Va. Venue', + 'Widespread Panic books two Richmond shows at Allianz', + 'Widespread Panic brings 40 years of jams', +] +``` + +**clear result list before doing another search with same `googlenews` object** + +```python +googlenews.clear() +``` + +2. *Get news by topics:* + +```python +SPORTS_TOPIC_ID = "CAAqJggKIiBDQkFTRWdvSUwyMHZNRFp1ZEdvU0FtVnVHZ0pWVXlnQVAB" +googlenews.set_topic(SPORTS_TOPIC_ID) +googlenews.get_news() +googlenews.results() +``` + +**topic IDs** + +* **HEALTH** = "CAAqIQgKIhtDQkFTRGdvSUwyMHZNR3QwTlRFU0FtVnVLQUFQAQ" +* **SPORTS** = "CAAqJggKIiBDQkFTRWdvSUwyMHZNRFp1ZEdvU0FtVnVHZ0pWVXlnQVAB" +* **SCIENCE** = "CAAqJggKIiBDQkFTRWdvSUwyMHZNRFp0Y1RjU0FtVnVHZ0pWVXlnQVAB" +* **US NEWS** = "CAAqIggKIhxDQkFTRHdvSkwyMHZNRGxqTjNjd0VnSmxiaWdBUAE" +* **BUSINESS** = "CAAqJggKIiBDQkFTRWdvSUwyMHZNRGx6TVdZU0FtVnVHZ0pWVXlnQVAB" +* **WORLD NEWS** = "CAAqJggKIiBDQkFTRWdvSUwyMHZNRGx1YlY4U0FtVnVHZ0pWVXlnQVAB" +* **TECHNOLOGY** = "CAAqJggKIiBDQkFTRWdvSUwyMHZNRGRqTVhZU0FtVnVHZ0pWVXlnQVAB" +* **ENTERTAINMENT** = "CAAqJggKIiBDQkFTRWdvSUwyMHZNREpxYW5RU0FtVnVHZ0pWVXlnQVAB" + +> topic URLs: `https://news.google.com/topics/{TOPIC_ID}?hl=en-US&gl=US&ceid=US%3Aen` + + +## YouTube Data API v3 + +
+ Create and store API key + +1. See [these instructions](https://developers.google.com/youtube/v3/getting-started) for getting a `YOUTUBE_DATA_API_KEY` + +2. Store this API key in [Secret Manager](https://cloud.google.com/secret-manager/docs/creating-and-accessing-secrets) as `yt-data-api`. See [create a secret and access a secret version](https://cloud.google.com/secret-manager/docs/create-secret-quickstart#create_a_secret_and_access_a_secret_version) or step-by-step guidance + +
+ +--- + +**Example usage** + +1. *REST API:* + * `GET https://www.googleapis.com/youtube/v3/videos?part=id&chart=mostPopular®ionCode=FR&key={YOUTUBE_DATA_API_KEY}` + +2. *Python client:* + +```python +import googleapiclient.discovery + +# config discovery client +youtube_client = googleapiclient.discovery.build( + serviceName="youtube", + version="v3", + developerKey=YOUTUBE_DATA_API_KEY +) +``` + +**Videos: list** - Returns a list of videos that match the API request parameters + +```python +# return most popular (i.e., trending) in US +request = youtube_client.videos().list( + part="snippet,contentDetails,statistics", + chart="mostPopular", + regionCode="US" +) +response = request.execute() +``` + +The response has 3 `parts`: "snippet", "contentDetails", & "statistics" + +1. the `snippet` object contains basic details about the video, such as its title, description, and category: + +```python +response['items'][0]['snippet'] + +{ + 'publishedAt': '2025-05-13T21:15:00Z', + 'channelId': 'UCdtXPiqI2cLorKaPrfpKc4g', + 'title': "Diddy Trial: Cassie's Testimony Breakdown With CBS News' Jericka Duncan", + 'description': "CBS News' Jericka Duncan breaks down Cassie Ventura's testimony against her ex, Sean 'Diddy' Combs, in his sex trafficking and racketeering trial on Tuesday, May 13. Diddy maintains his innocence against all charges, currently being explored in a New York City court. Jericka recounts Cassie's explanation of 'freak offs,' the supposed sex-fueled parties thrown by Diddy, and abuse she allegedly experienced while dating Diddy. Jericka also speculates on other famous faces who may take the stand in the coming days.", + 'thumbnails': { + 'default': { + 'url': 'https://i.ytimg.com/vi/ArIqryxHquo/default.jpg', + 'width': 120, + 'height': 90 + }, + 'medium': { + 'url': 'https://i.ytimg.com/vi/ArIqryxHquo/mqdefault.jpg', + 'width': 320, + 'height': 180 + }, + 'high': { + 'url': 'https://i.ytimg.com/vi/ArIqryxHquo/hqdefault.jpg', + 'width': 480, + 'height': 360 + }, + 'standard': { + 'url': 'https://i.ytimg.com/vi/ArIqryxHquo/sddefault.jpg', + 'width': 640, + 'height': 480 + }, + 'maxres': { + 'url': 'https://i.ytimg.com/vi/ArIqryxHquo/maxresdefault.jpg', + 'width': 1280, + 'height': 720 + } + }, + 'channelTitle': 'Entertainment Tonight', + 'tags': ['Cassie Ventura', 'Diddy', 'Sean Combs'], + 'categoryId': '24', + 'liveBroadcastContent': 'none', + 'defaultLanguage': 'en', + 'localized': { + 'title': "Diddy Trial: Cassie's Testimony Breakdown With CBS News' Jericka Duncan", + 'description': "CBS News' Jericka Duncan breaks down Cassie Ventura's testimony against her ex,Sean 'Diddy' Combs, in his sex trafficking and racketeering trial on Tuesday, May 13. Diddy maintains his innocence against all charges, currently being explored in a New York City court. Jericka recounts Cassie's explanation of 'freak offs,' the supposed sex-fueled parties thrown by Diddy, and abuse she allegedly experienced while dating Diddy. Jericka also speculates on other famous faces who may take the stand in the coming days." + }, + 'defaultAudioLanguage': 'en' +} +``` + +2. The `contentDetails` object contains information about the video content, including the length of the video and an indication of whether captions are available for the video: + +```python +response['items'][0]['contentDetails'] + +{'duration': 'PT8M50S', + 'dimension': '2d', + 'definition': 'hd', + 'caption': 'false', + 'licensedContent': True, + 'contentRating': {}, + 'projection': 'rectangular'} +``` + +3. The `statistics` object contains statistics about the video: + +```python +response['items'][0]['statistics'] + +{'viewCount': '533962', + 'likeCount': '7469', + 'favoriteCount': '0', + 'commentCount': '1811'} +``` + +**Search: list** - Returns a collection of search results that match the query parameters specified in the API request + +```python +import pandas as pd + +TARGET_QUERY = "time travel" +MAX_DAYS_AGO = 60 + +# get correct format +PUBLISHED_AFTER_TIMESTAMP = ( + (pd.Timestamp.now() - pd.DateOffset(days=MAX_DAYS_AGO)) + .tz_localize("UTC") + .isoformat() +) + +# search YouTube for reated videos +yt_data_api_request = youtube_client.search().list( + part="id,snippet", + type="video", + regionCode="US", + q=TARGET_QUERY, + videoDuration="medium", # "any" | "short" | "medium" | "long" + maxResults=3, + publishedAfter=PUBLISHED_AFTER_TIMESTAMP, + channelId="any", + order="relevance", +) +yt_data_api_response = yt_data_api_request.execute() +yt_data_api_response = { + 'kind': 'youtube#searchListResponse', + 'etag': 'dZfJzsSeeHDXXQRL2boFP0h06BA', + 'nextPageToken': 'CAEQAA', + 'regionCode': 'US', + 'pageInfo': {'totalResults': 3, 'resultsPerPage': 1}, + 'items': [ + { + 'kind': 'youtube#searchResult', + 'etag': 'JE7wL5DcJeZHj10m3RMyLB956n4', + 'id': {'kind': 'youtube#video', 'videoId': 'uHTrBuekQzg'}, + 'snippet': { + 'publishedAt': '2025-05-13T20:00:38Z', + 'channelId': 'UC9MAhZQQd9egwWCxrwSIsJQ', + 'title': 'History's Greatest Mysteries: Did the U.S. Government Steal the Tesla Files? (Season 6)', + 'description': "Dive into one of the most intriguing mysteries of modern history with this captivating clip from History's Greatest Mysteries!", + 'thumbnails': { + 'default': { + 'url': 'https://i.ytimg.com/vi/uHTrBuekQzg/default.jpg', + 'width': 120, + 'height': 90 + }, + 'medium': { + 'url': 'https://i.ytimg.com/vi/uHTrBuekQzg/mqdefault.jpg', + 'width': 320, + 'height': 180 + }, + 'high': { + 'url': 'https://i.ytimg.com/vi/uHTrBuekQzg/hqdefault.jpg', + 'width': 480, + 'height': 360 + } + }, + 'channelTitle': 'HISTORY', + 'liveBroadcastContent': 'none', + 'publishTime': '2025-05-13T20:00:38Z' + } + } + ] +} +``` \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/learning/key_adk_concepts.md b/python/agents/trends-and-insights-agent/learning/key_adk_concepts.md new file mode 100644 index 00000000..5f96ce7d --- /dev/null +++ b/python/agents/trends-and-insights-agent/learning/key_adk_concepts.md @@ -0,0 +1,133 @@ +# Key concepts for understanding the ADK + +*in no particular order.... yet* + +## Tools vs Agents + + +### AgentTool +* see [docs](https://google.github.io/adk-docs/tools/function-tools/#3-agent-as-a-tool) + +To understand how this works, consider this code snippet: + +```python +summary_agent = Agent( + model="gemini-2.0-flash", + name="summary_agent", + instruction="""You are an expert summarizer. Please read the following text and provide a concise summary.""", + description="Agent to summarize text", +) + +root_agent = Agent( + model='gemini-2.0-flash', + name='root_agent', + instruction="""You are a helpful assistant. When the user provides a text, use the 'summarize' tool to generate a summary. + Always forward the user's message exactly as received to the 'summarize' tool, without modifying or summarizing it yourself. + Present the response from the tool to the user. + """, + tools=[AgentTool(agent=summary_agent)] +) +``` + +**How it works** +1. When the `root_agent` receives some text input, its instruction tells it to use the `summarize` tool (e.g., `tools=[AgentTool(agent=XYZ)]`) +2. The framework recognizes `summarize` as an `AgentTool` that wraps the `summary_agent`. +3. Behind the scenes, the `root_agent` will call the `summary_agent` with the text as input. +4. The `summary_agent` will process the text according to its instruction and generate a summary. +5. The response from the `summary_agent` is then passed back to the `root_agent`. +6. The `root_agent` can then take the summary and formulate its final response to the user (e.g., "Here's a summary of the text: ...") + + +## Context + +*In the Agent Development Kit (ADK), "context" ([docs](https://google.github.io/adk-docs/context/#what-are-context)) refers to the crucial bundle of information available to your agent and its tools during specific operations. Think of it as the necessary background knowledge and resources needed to handle a current task or conversation turn effectively.* + +Not just the latest user message, **context is essential because it enables:** +- Maintaining State +- Passing Data +- Accessing Services +- Identity and Tracking +- Tool-Specific Actions + +The central piece holding all this information together for a single, complete user-request-to-final-response cycle (an *invocation*) is the *`InvocationContext`*. See below. + + +### InvocationContext + +An *invocation* in ADK represents the **entire process triggered by a single user query and continues until the agent has finished processing** and has no more events to generate, returning control back to the user. +- It's the complete cycle of agent execution in response to a user input. +- It's a crucial concept for managing the agent's execution, maintaining context, and orchestrating interactions within a session. +- *see [docs](https://google.github.io/adk-docs/agents/multi-agents/#c-explicit-invocation-agenttool)* + +The *`InvocationContext`* acts as the comprehensive internal container: +* **Use Case:** Primarily used when the agent's core logic needs direct access to the overall session or services +* **Purpose:** Provides access to the entire state of the current invocation. This is the most comprehensive context object +* **Key Contents:** Direct access to `session` (including `state` and `events`), the current `agent` instance, `invocation_id`, initial `user_content`, references to configured services (e.g., `artifact_service`), and fields related to live/streaming modes + + +## Multi-agent systems + + +### Global Instructions + +Why global instructions? +* they provide instructions for all the agents in the entire agent tree. +* BUT they ONLY take effect in `root_agent`. +* For example: use `global_instruction` to make all agents have a stable identity or personality. + + +## Google's `genai` sdk + + +### genai.types.Part() + +* A datatype containing media content. +* Exactly one field within a Part should be set, representing the specific type of content being conveyed. Using multiple fields within the same `Part` instance is considered invalid. +* [src](https://github.com/googleapis/python-genai/blob/main/google/genai/types.py#L904) + +```python +class Part(_common.BaseModel): + """A datatype containing media content. + + Exactly one field within a Part should be set, representing the specific type + of content being conveyed. Using multiple fields within the same `Part` + instance is considered invalid. + """ + + video_metadata: Optional[VideoMetadata] = Field( + default=None, description="""Metadata for a given video.""" + ) + thought: Optional[bool] = Field( + default=None, + description="""Indicates if the part is thought from the model.""", + ) + inline_data: Optional[Blob] = Field( + default=None, description="""Optional. Inlined bytes data.""" + ) + file_data: Optional[FileData] = Field( + default=None, description="""Optional. URI based data.""" + ) + thought_signature: Optional[bytes] = Field( + default=None, + description="""An opaque signature for the thought so it can be reused in subsequent requests.""", + ) + code_execution_result: Optional[CodeExecutionResult] = Field( + default=None, + description="""Optional. Result of executing the [ExecutableCode].""", + ) + executable_code: Optional[ExecutableCode] = Field( + default=None, + description="""Optional. Code generated by the model that is meant to be executed.""", + ) + function_call: Optional[FunctionCall] = Field( + default=None, + description="""Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values.""", + ) + function_response: Optional[FunctionResponse] = Field( + default=None, + description="""Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model.""", + ) + text: Optional[str] = Field( + default=None, description="""Optional. Text part (can be code).""" + ) +``` \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/learning/trends-2-creatives.md b/python/agents/trends-and-insights-agent/learning/trends-2-creatives.md new file mode 100644 index 00000000..2ee4cd8c --- /dev/null +++ b/python/agents/trends-and-insights-agent/learning/trends-2-creatives.md @@ -0,0 +1,118 @@ + +example output for research report, generated image and video creatives that intersect: +1) marketing campaign, +2) tending topic in Search, and +3) trending video from YouTube + +--- + +# Marketing Campaign Brief: Google Pixel 9 - "Surreal Moments, Seamlessly Shared" + + +

+ +

+ + +## 1. Campaign Guide Summary + +* **Campaign Name:** Marketing Campaign Guide for Google's Pixel 9 +* **Brand:** Google +* **Target Product:** Pixel 9 smartphone +* **Target Audience:** Gen Z individuals (18-24 years old), people who are frustrated with technology, developers or young adult technophiles, those who enjoy surreal memes. +* **Target Regions:** Atlanta, US +* **Campaign Objectives:** Increase brand awareness of Google Pixel 9 among Gen Z and drive consideration and desire for Pixel 9 features (camera, AI, etc.). +* **Media Strategy:** Social Media (Instagram, TikTok, YouTube), Influencer Marketing (Hyper-Localized), and Digital Ads (Highly Targeted). +* **Key Selling Points:** Best camera (AI-powered) and makes your life easier (Pixel Call Assist). + +## 2. Key Insights from Research + +### 2.1. Pixel 9: The AI-Powered Creative Companion for Gen Z + +* **Summary:** The Pixel 9's AI capabilities empower Gen Z to effortlessly create and share unique content, aligning with their desire for self-expression and creativity. +* **Key Entities:** Google Pixel 9, Gen Z, AI, Creative Tools +* **Key Relationships:** Pixel 9 provides AI-powered tools that enable Gen Z to express their creativity. +* **Key Audiences:** Gen Z individuals, content creators, social media enthusiasts +* **Key Product Insights:** AI-powered camera, Magic Editor, Pixel Studio app + +### 2.2. Ditch Tech Frustration: Pixel 9 Simplifies Creativity + +* **Summary:** The Pixel 9 addresses Gen Z's frustration with complicated technology by offering a user-friendly experience that simplifies content creation and sharing. +* **Key Entities:** Google Pixel 9, Gen Z, User Experience, Simplicity +* **Key Relationships:** Pixel 9 simplifies the creative process, removing technical barriers for Gen Z users. +* **Key Audiences:** Gen Z individuals, people frustrated with technology, social media users +* **Key Product Insights:** User-friendly interface, seamless sharing, intuitive design + +### 2.3. Seamless Sharing: Pixel 9 Connects Gen Z's World + +* **Summary:** The Pixel 9 facilitates instant and effortless sharing of content, catering to Gen Z's need for constant connectivity and social interaction. +* **Key Entities:** Google Pixel 9, Gen Z, Social Sharing, Connectivity +* **Key Relationships:** Pixel 9 enables Gen Z to seamlessly share their creations and experiences with their social networks. +* **Key Audiences:** Gen Z individuals, social media users, influencers +* **Key Product Insights:** Instant sharing capabilities, social media integration, cloud connectivity + +## 3. Trend Analysis + +### 3.1. Google Search Trend: Phil Mickelson - Golfing Legend in the Spotlight + +* **Context:** Phil Mickelson is trending due to his continued presence in major golf tournaments and public interest in his career. +* **Key Entities:** Phil Mickelson, Golf, PGA Tour, LIV Golf +* **Key Relationships:** Mickelson's participation in golf tournaments and his involvement with LIV Golf drive public interest. +* **Key Audiences:** Golf fans, sports enthusiasts, Gen Z individuals interested in sports and celebrity culture +* **Marketing Opportunity:** Leverage Mickelson's name recognition to create a playful connection with the Pixel 9, highlighting its ability to capture "winning moments" with crystal-clear quality. +* **Product Integration:** Use Mickelson's image (or a look-alike) in ad creatives, showcasing the Pixel 9's camera capabilities. +* **URLs:** (URLs related to Phil Mickelson's recent activities and trending status would be included here) + +### 3.2. YouTube Trend: I Built a Giant LEGO Railway for my Pets! + +* **Context:** This video showcases creativity, DIY projects, and a love for animals, resonating with Gen Z's interests in unique content and self-expression. +* **Key Themes:** Creativity, DIY, LEGO, Pets, Community +* **Key Entities:** Aquarium Info (YouTube channel), LEGO, Pets +* **Key Relationships:** The video creator builds a LEGO railway for their pets, showcasing creativity and craftsmanship. +* **Key Audiences:** LEGO enthusiasts, pet lovers, DIY enthusiasts, Gen Z individuals interested in unique and creative content +* **Marketing Opportunity:** Tap into the trend of DIY creativity and showcase the Pixel 9's ability to capture and share these creations with ease. +* **Product Integration:** Feature Gen Z creators using the Pixel 9 to capture their LEGO creations, highlighting the phone's camera quality and seamless sharing capabilities. +* **URLs:** [https://www.youtube.com/watch?v=p7q8NnVlbVY](https://www.youtube.com/watch?v=p7q8NnVlbVY) + + +[![LEGO Railway](../media/lego_railway_thumb_v2.png)](https://www.youtube.com/watch?v=p7q8NnVlbVY "I Built a Giant LEGO Railway for my Pets") + + + +## 4. Creative Campaign Ideas + +### 4.1. Image Creative + +* **Description:** A surreal close-up photograph of a golfer who resembles Phil Mickelson. He is on a vibrant green golf course, but instead of focusing on the ball or the fairway, he is intensely staring at a small pile of LEGO blocks in front of him, with a look of extreme concentration. The LEGO blocks are brightly colored and slightly out of focus. The background should be dreamlike with an oversized golf ball floating in the sky. +* **Reasoning:** This image combines the "Phil Mickelson" and "LEGO Railway" trends in a visually striking and attention-grabbing way, aligning with Gen Z's love for surreal memes. +* **Filename:** 31d72ab9-0813-4f99-99b0-144c4bf20c86.png +* **Ad Copy** "Is Phil Mickelson building a LEGO golf course? 🤔 Probably not, but with the Google Pixel 9, you can capture ANY amazing moment (even surreal ones) with crystal-clear quality and share it instantly with your friends! Seamless sharing for a connected generation! #Pixel9 #Golf #Lego #SeamlessSharing" + +

+ +

+ +### 4.2. Video Creative + +* **Description:** A short, captivating video ad for Instagram/TikTok, building upon the still image. It opens with a close-up of the face of a golfer resembling Phil Mickelson intensely staring at a small pile of LEGO blocks on a vibrant green golf course. The camera slowly zooms out to reveal a Gen Z individual holding a Google Pixel 9. The individual uses the Pixel 9 to quickly capture a series of high-quality photos, demonstrating the camera’s excellent image stabilization. No golf ball in the sky. The video transitions into a montage of other Gen Z creators showcasing their unique LEGO creations captured with their Google Pixel 9s. The montage includes shots of different lighting environments. All phones shown should clearly be Google Pixel 9 models. Upbeat, trendy music plays throughout. The final shot displays the Google Pixel 9 with the tagline: 'Capture Your Amazing Moments. Seamless Sharing. Google Pixel 9.' +* **Reasoning:** This video builds upon the surreal image, showcasing the Pixel 9's camera capabilities and seamless sharing features in a dynamic and engaging way. +* **Filename:** 621845fe-b139-4650-9a47-c8320eece961.mp4 + + +[![trends-2-creatives](../media/phil_mickelegos.png)](https://youtu.be/uHudXE5YSfk "Surreal Moments, Seamlessly Shared") + + +### 4.3. Ad Copy + +* **Ad Copy:** "Phil's secret hobby revealed?! 🤫🤯 Even golf legends need a LEGO break! 😂 But fr tho, the #Pixel9 camera snaps EVERYTHING in crystal clear quality. Build it, capture it, share it! #GolfMeetsLego #SeamlessSharing" +* **Reasoning:** This caption is playful, attention-grabbing, and incorporates both the "Phil Mickelson" and "LEGO Railway" trends, while highlighting the Pixel 9's key features and benefits. + +## 5. Opportunities to Enhance the Campaign Guide + +The `search_trends`, `yt_trends`, and `insights` present several opportunities to enhance the original campaign guide: + +* **Expand on AI-Powered Creative Tools:** The campaign guide mentions the AI-powered camera, but it could be enhanced by showcasing specific AI features like Magic Editor and Pixel Studio, highlighting their ease of use and creative potential for Gen Z. +* **Emphasize Seamless Sharing:** The campaign guide mentions social media as a core focus, but it could be strengthened by emphasizing the Pixel 9's seamless sharing capabilities and integration with popular social media platforms. +* **Leverage User-Generated Content:** Encourage Gen Z users to share their Pixel 9-captured creations using a specific hashtag, fostering a sense of community and user engagement. +* **Partner with Gen Z Influencers:** Collaborate with Gen Z influencers who are passionate about LEGO, DIY projects, and photography to create authentic and engaging content showcasing the Pixel 9's features and benefits. +* **Create Interactive Experiences:** Develop interactive social media experiences, such as polls, quizzes, and AR filters, that allow Gen Z users to engage with the Pixel 9 brand in a fun and creative way. \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/media/lego_railway_thumb.png b/python/agents/trends-and-insights-agent/media/lego_railway_thumb.png new file mode 100644 index 00000000..a3b58a26 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/lego_railway_thumb.png differ diff --git a/python/agents/trends-and-insights-agent/media/lego_railway_thumb_v2.png b/python/agents/trends-and-insights-agent/media/lego_railway_thumb_v2.png new file mode 100644 index 00000000..d1c8386a Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/lego_railway_thumb_v2.png differ diff --git a/python/agents/trends-and-insights-agent/media/mad_again_pixel.png b/python/agents/trends-and-insights-agent/media/mad_again_pixel.png new file mode 100644 index 00000000..e99898eb Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/mad_again_pixel.png differ diff --git a/python/agents/trends-and-insights-agent/media/marketing_guide_Pixel_9.pdf b/python/agents/trends-and-insights-agent/media/marketing_guide_Pixel_9.pdf new file mode 100644 index 00000000..f8331604 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/marketing_guide_Pixel_9.pdf differ diff --git a/python/agents/trends-and-insights-agent/media/phil_mickelegos.png b/python/agents/trends-and-insights-agent/media/phil_mickelegos.png new file mode 100644 index 00000000..c28ae65b Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/phil_mickelegos.png differ diff --git a/python/agents/trends-and-insights-agent/media/phil_still_legos.png b/python/agents/trends-and-insights-agent/media/phil_still_legos.png new file mode 100644 index 00000000..66ff0d72 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/phil_still_legos.png differ diff --git a/python/agents/trends-and-insights-agent/media/t2a_ad_overview_0725.png b/python/agents/trends-and-insights-agent/media/t2a_ad_overview_0725.png new file mode 100644 index 00000000..43c17776 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/t2a_ad_overview_0725.png differ diff --git a/python/agents/trends-and-insights-agent/media/t2a_hulk_call_Screen_v2.png b/python/agents/trends-and-insights-agent/media/t2a_hulk_call_Screen_v2.png new file mode 100644 index 00000000..d2f39c8c Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/t2a_hulk_call_Screen_v2.png differ diff --git a/python/agents/trends-and-insights-agent/media/t2a_new_research_orchestration.png b/python/agents/trends-and-insights-agent/media/t2a_new_research_orchestration.png new file mode 100644 index 00000000..bb0872fd Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/t2a_new_research_orchestration.png differ diff --git a/python/agents/trends-and-insights-agent/media/t2a_overview_0725_v2.png b/python/agents/trends-and-insights-agent/media/t2a_overview_0725_v2.png new file mode 100644 index 00000000..db52b029 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/t2a_overview_0725_v2.png differ diff --git a/python/agents/trends-and-insights-agent/media/t2a_research_overview_0725.png b/python/agents/trends-and-insights-agent/media/t2a_research_overview_0725.png new file mode 100644 index 00000000..5910c241 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/t2a_research_overview_0725.png differ diff --git a/python/agents/trends-and-insights-agent/media/t2a_subagent_overview_0725.png b/python/agents/trends-and-insights-agent/media/t2a_subagent_overview_0725.png new file mode 100644 index 00000000..5844afeb Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/t2a_subagent_overview_0725.png differ diff --git a/python/agents/trends-and-insights-agent/media/t2a_trend_ast_overview_0725.png b/python/agents/trends-and-insights-agent/media/t2a_trend_ast_overview_0725.png new file mode 100644 index 00000000..92783b25 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/t2a_trend_ast_overview_0725.png differ diff --git a/python/agents/trends-and-insights-agent/media/titanic_prs.png b/python/agents/trends-and-insights-agent/media/titanic_prs.png new file mode 100644 index 00000000..0b37a93d Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/titanic_prs.png differ diff --git a/python/agents/trends-and-insights-agent/media/vid_demo_teaser.png b/python/agents/trends-and-insights-agent/media/vid_demo_teaser.png new file mode 100644 index 00000000..bb46cd07 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/vid_demo_teaser.png differ diff --git a/python/agents/trends-and-insights-agent/media/waterboy_prs.png b/python/agents/trends-and-insights-agent/media/waterboy_prs.png new file mode 100644 index 00000000..e37c9552 Binary files /dev/null and b/python/agents/trends-and-insights-agent/media/waterboy_prs.png differ diff --git a/python/agents/trends-and-insights-agent/notebooks/deployment_guide.ipynb b/python/agents/trends-and-insights-agent/notebooks/deployment_guide.ipynb new file mode 100644 index 00000000..dcc788b3 --- /dev/null +++ b/python/agents/trends-and-insights-agent/notebooks/deployment_guide.ipynb @@ -0,0 +1,1101 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "893019e7", + "metadata": {}, + "source": [ + "# How to deploy ADK Apps to Agent Engine\n", + "\n", + "[Documentation link 🔗](https://google.github.io/adk-docs/deploy/agent-engine/)\n", + "\n", + "## 1. Set the local environment variables\n", + "These will be used in the Agent Engine deployment" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "eac665f1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from dotenv import load_dotenv\n", + "import os\n", + "\n", + "load_dotenv(\"../trends_and_insights_agent/.env\") # take environment variables" + ] + }, + { + "cell_type": "markdown", + "id": "54c0699d", + "metadata": {}, + "source": [ + "## 2. Set up the `AdkApp` Convenience wrapper for ADK Agents -> Agent Engine\n", + "\n", + "Note the injection of the environment variables." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f0e0e53c", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:\n", + "\n", + "`SESSION_STATE_JSON_PATH`: None\n", + "\n", + "\n", + "INFO:googleapiclient.discovery_cache:file_cache is only supported with oauth2client<4.0.0\n", + "/Users/jwortz/repos/zghost/zghost/.venv/lib/python3.12/site-packages/google/cloud/bigquery/table.py:1965: UserWarning: BigQuery Storage module not found, fetch data with the REST endpoint instead.\n", + " warnings.warn(\n", + "/Users/jwortz/repos/zghost/zghost/trends_and_insights_agent/common_agents/trend_assistant/tools.py:153: FutureWarning: Series.__getitem__ treating keys as positions is deprecated. In a future version, integer keys will always be treated as labels (consistent with DataFrame behavior). To access a value by position, use `ser.iloc[pos]`\n", + " return max_date.iloc[0][0].strftime(\"%m/%d/%Y\")\n", + "INFO:googleapiclient.discovery_cache:file_cache is only supported with oauth2client<4.0.0\n" + ] + } + ], + "source": [ + "from vertexai.preview.reasoning_engines import AdkApp\n", + "from google.adk.artifacts import GcsArtifactService\n", + "from google.adk.sessions import VertexAiSessionService\n", + "\n", + "from trends_and_insights_agent import agent\n", + "\n", + "env_vars = {}\n", + "\n", + "env_vars[\"GOOGLE_GENAI_USE_VERTEXAI\"] = os.getenv(\"GOOGLE_GENAI_USE_VERTEXAI\")\n", + "env_vars[\"BUCKET\"] = os.getenv(\"BUCKET\", \"gs://default-bucket\")\n", + "env_vars[\"GOOGLE_CLOUD_PROJECT_NUMBER\"] = os.getenv(\"GOOGLE_CLOUD_PROJECT_NUMBER\")\n", + "env_vars[\"YT_SECRET_MNGR_NAME\"] = os.getenv(\"YT_SECRET_MNGR_NAME\")\n", + "# env_vars[\"SESSION_STATE_JSON_PATH\"] = os.getenv(\"SESSION_STATE_JSON_PATH\")\n", + "\n", + "\n", + "my_agent = AdkApp(\n", + " agent=agent.root_agent,\n", + " enable_tracing=True,\n", + " env_vars=env_vars,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e500ea11", + "metadata": {}, + "source": [ + "# 3. Test the Agent Engine locally" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "33be24e1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Session(id='12345', app_name='default-app-name', user_id='jwortz', state={}, events=[], last_update_time=1753822682.469465)" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "session = my_agent.create_session(user_id=\"jwortz\", session_id=\"12345\")\n", + "session" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a1a96d83", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ListSessionsResponse(sessions=[Session(id='12345', app_name='default-app-name', user_id='jwortz', state={}, events=[], last_update_time=1753822682.469465)])" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "my_agent.list_sessions(user_id=\"jwortz\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "d371bfe6", + "metadata": {}, + "outputs": [], + "source": [ + "from pprint import pprint\n", + "\n", + "\n", + "def stream_agent(agent, prompt: str, session) -> None | Exception:\n", + " try:\n", + " session_id = session[\"id\"]\n", + " # local prefers attributes vs. remote prefers dict keys\n", + " except TypeError:\n", + " session_id = session.id\n", + " except Exception as e:\n", + " return f\"Session Object not valid: {e}\"\n", + "\n", + " for event in agent.stream_query(\n", + " user_id=\"jwortz\",\n", + " session_id=session_id,\n", + " message=prompt,\n", + " ):\n", + " pprint(event)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "6a10dd01", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:\n", + "\n", + "Loading Initial State (empty): {'state': {'final_select_ad_copies': {'final_select_ad_copies': []}, 'final_select_vis_concepts': {'final_select_vis_concepts': []}, 'img_artifact_keys': {'img_artifact_keys': []}, 'vid_artifact_keys': {'vid_artifact_keys': []}, 'brand': '', 'target_product': '', 'target_audience': '', 'key_selling_points': '', 'target_search_trends': {'target_search_trends': []}, 'target_yt_trends': {'target_yt_trends': []}}}\n", + "\n", + "\n", + "INFO:google_adk.google.adk.models.google_llm:Sending out request, model: gemini-2.5-flash, backend: GoogleLLMVariant.VERTEX_AI, stream: False\n", + "INFO:google_adk.google.adk.models.google_llm:\n", + "LLM Request:\n", + "-----------------------------------------------------------\n", + "System Instruction:\n", + "\n", + "You are a helpful AI assistant, part of a multi-agent system designed for advanced web research and ad creative generation.\n", + "Do not perform any research yourself. Your job is to **delegate**.\n", + "\n", + "\n", + "You are an Expert AI Marketing Research & Strategy Assistant. \n", + "\n", + "Your primary function is to orchestrate a suite of **specialized tools and sub-agents** to provide users with comprehensive insights, trend analysis, and creative ideas for their marketing campaigns. \n", + "\n", + "\n", + "**Instructions:**\n", + "Start by greeting the user and giving them a high-level overview of what you do. Then proceed sequentially with the tasks below:\n", + "\n", + "1. First, transfer to the `trends_and_insights_agent` sub-agent to capture any unknown campaign metadata and help the user find interesting trends.\n", + "2. Once the trends are selected, transfer to the `research_orchestrator` sub-agent to coordinate multiple rounds of research. Strictly follow all the steps one-by-one. Do not skip any steps or execute them out of order.\n", + "3. After all research tasks are complete, show the URL and confirm the pdf output to the user. Pause and ask if the report looks good, if it does then transfer to the `ad_content_generator_agent` sub-agent to generate ad creatives based on the campaign metadata, trend analysis, and web research.\n", + "4. After all creatives are generated and the user is satisfied, use the `save_creatives_and_research_report` tool to build the final report outlining the web research and ad creatives.\n", + "\n", + "\n", + "**Sub-agents:**\n", + "- Use `trends_and_insights_agent` to gather inputs from the user e.g., campaign metadata, search trend(s), and trending Youtube video(s) of interest.\n", + "- Use `research_orchestrator` to coordinate and execute all research tasks.\n", + "- Use `ad_content_generator_agent` to help the user create visual concepts for ads.\n", + "\n", + "\n", + "**Tools:**\n", + "- Use `save_creatives_and_research_report` tool to build the final report, detailing research and creatives generated during a session, and save it as an artifact. Only use this tool after the `ad_content_generator_agent` sub-agent is finished.\n", + "\n", + "\n", + "**Campaign metadata:**\n", + "\n", + " \n", + "\n", + " \n", + "\n", + " \n", + " \n", + " \n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "\n", + "You are an agent. Your internal name is \"root_agent\".\n", + "\n", + " The description about you is \"A trend and insight assistant using the services of multiple sub-agents.\"\n", + "\n", + "\n", + "You have a list of other agents to transfer to:\n", + "\n", + "\n", + "Agent name: research_orchestrator\n", + "Agent description: Orchestrate comprehensive research for the campaign metadata and trending topics.\n", + "\n", + "\n", + "Agent name: trends_and_insights_agent\n", + "Agent description: Captures campaign metadata and displays trending topics from Google Search and trending videos from YouTube.\n", + "\n", + "\n", + "Agent name: ad_content_generator_agent\n", + "Agent description: Help users with ad generation; brainstorm and refine ad copy and visual concept ideas with actor-critic workflows; iterate with the user to generate final ad creatives.\n", + "\n", + "\n", + "If you are the best to answer the question according to your description, you\n", + "can answer it.\n", + "\n", + "If another agent is better for answering the question according to its\n", + "description, call `transfer_to_agent` function to transfer the\n", + "question to that agent. When transferring, do not generate any text other than\n", + "the function call.\n", + "\n", + "-----------------------------------------------------------\n", + "Contents:\n", + "{\"parts\":[{\"text\":\"Hello\"}],\"role\":\"user\"}\n", + "-----------------------------------------------------------\n", + "Functions:\n", + "transfer_to_agent: {'agent_name': {'type': }} \n", + "save_creatives_and_research_report: {} -> {'type': }\n", + "-----------------------------------------------------------\n", + "\n", + "INFO:google_genai.models:AFC is enabled with max remote calls: 10.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'actions': {'artifact_delta': {},\n", + " 'requested_auth_configs': {},\n", + " 'state_delta': {'_state_init': True,\n", + " 'brand': '',\n", + " 'final_select_ad_copies': {'final_select_ad_copies': []},\n", + " 'final_select_vis_concepts': {'final_select_vis_concepts': []},\n", + " 'gcs_folder': '2025_07_29_20_58',\n", + " 'img_artifact_keys': {'img_artifact_keys': []},\n", + " 'key_selling_points': '',\n", + " 'target_audience': '',\n", + " 'target_product': '',\n", + " 'target_search_trends': {'target_search_trends': []},\n", + " 'target_yt_trends': {'target_yt_trends': []},\n", + " 'vid_artifact_keys': {'vid_artifact_keys': []}}},\n", + " 'author': 'root_agent',\n", + " 'id': '3cc14e05-2e5b-44ba-b813-46731215579a',\n", + " 'invocation_id': 'e-93e1dd23-03ec-4958-ac4a-9c043c66ab75',\n", + " 'timestamp': 1753822688.410064}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:google_genai.types:Warning: there are non-text parts in the response: ['function_call'], returning concatenated text result from text parts. Check the full candidates.content.parts accessor to get the full model response.\n", + "INFO:google_adk.google.adk.models.google_llm:\n", + "LLM Response:\n", + "-----------------------------------------------------------\n", + "Text:\n", + "Hello! I'm your AI Marketing Research & Strategy Assistant. I can help you with comprehensive insights, trend analysis, and creative ideas for your marketing campaigns.\n", + "\n", + "To get started, I'll transfer you to my `trends_and_insights_agent` sub-agent, who will help us capture your campaign metadata and identify interesting trends.\n", + "\n", + "-----------------------------------------------------------\n", + "Function calls:\n", + "name: transfer_to_agent, args: {'agent_name': 'trends_and_insights_agent'}\n", + "-----------------------------------------------------------\n", + "Raw response:\n", + "{\"sdk_http_response\":{\"headers\":{\"Content-Type\":\"application/json; charset=UTF-8\",\"Vary\":\"Referer\",\"Content-Encoding\":\"gzip\",\"Date\":\"Tue, 29 Jul 2025 20:58:11 GMT\",\"Server\":\"scaffolding on HTTPServer2\",\"X-XSS-Protection\":\"0\",\"X-Frame-Options\":\"SAMEORIGIN\",\"X-Content-Type-Options\":\"nosniff\",\"Alt-Svc\":\"h3=\\\":443\\\"; ma=2592000,h3-29=\\\":443\\\"; ma=2592000\",\"Transfer-Encoding\":\"chunked\"}},\"candidates\":[{\"content\":{\"parts\":[{\"text\":\"Hello! I'm your AI Marketing Research & Strategy Assistant. I can help you with comprehensive insights, trend analysis, and creative ideas for your marketing campaigns.\\n\\nTo get started, I'll transfer you to my `trends_and_insights_agent` sub-agent, who will help us capture your campaign metadata and identify interesting trends.\\n\"},{\"function_call\":{\"args\":{\"agent_name\":\"trends_and_insights_agent\"},\"name\":\"transfer_to_agent\"}}],\"role\":\"model\"},\"finish_reason\":\"STOP\",\"avg_logprobs\":-0.16866639603015987}],\"create_time\":\"2025-07-29T20:58:09.113600Z\",\"response_id\":\"4TWJaMD3Bs2eld8Pgs7B0Ak\",\"model_version\":\"gemini-2.5-flash\",\"usage_metadata\":{\"candidates_token_count\":86,\"candidates_tokens_details\":[{\"modality\":\"TEXT\",\"token_count\":86}],\"prompt_token_count\":876,\"prompt_tokens_details\":[{\"modality\":\"TEXT\",\"token_count\":876}],\"thoughts_token_count\":58,\"total_token_count\":1020,\"traffic_type\":\"ON_DEMAND\"},\"automatic_function_calling_history\":[]}\n", + "-----------------------------------------------------------\n", + "\n", + "INFO:google_adk.google.adk.models.google_llm:Sending out request, model: gemini-2.5-flash, backend: GoogleLLMVariant.VERTEX_AI, stream: False\n", + "INFO:google_adk.google.adk.models.google_llm:\n", + "LLM Request:\n", + "-----------------------------------------------------------\n", + "System Instruction:\n", + "\n", + "You are a helpful AI assistant, part of a multi-agent system designed for advanced web research and ad creative generation.\n", + "Do not perform any research yourself. Your job is to **delegate**.\n", + "\n", + "\n", + "\n", + "You are a planning agent who helps users create marketing campaign briefs that will guide and inform downstream research and creative processes.\n", + "- You do not conduct any research or creative processes. You are strictly helping users with their selections and preferences only.\n", + "- You want to gather specific campaign-related metadata from the user. The actual research will be handled by transferring to the `combined_research_merger` later.\n", + "\n", + "You are responsible for capturing three categories of information:\n", + "1. Campaign metadata e.g., brand, product, key selling points, and target audience.\n", + "2. Trending topics from Google Search.\n", + "3. Trending videos from YouTube.\n", + "\n", + "Your **objective** is to use the **available tools** to complete the **instructions** step-by-step.\n", + "\n", + "## Available Tools\n", + "* `get_daily_gtrends`: Use this tool to extract the top trends from Google Search for the current week.\n", + "* `get_youtube_trends`: Use this tool to query the YouTube Data API for the top trending YouTube videos.\n", + "* `save_yt_trends_to_session_state`: Use this tool to update the 'target_yt_trends' state variable with the user-selected video(s) trending on YouTube.\n", + "* `save_search_trends_to_session_state`: Use this tool to update the 'target_search_trends' state variable with the user-selected Search Trend.\n", + "* `memorize`: Use this tool to store user selections in the session state.\n", + "\n", + "## Instructions\n", + "1. Your goal is to help the user, by first completing the following information if any is blank:\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "2. Ask for missing information from the user.\n", + "3. Use the `memorize` tool to store campaign metadata into the following variables:\n", + " - `brand`, \n", + " - `target_audience`\n", + " - `target_product` and \n", + " - `key_selling_points`\n", + " To make sure everything is stored correctly, instead of calling memorize all at once, chain the calls such that \n", + " you only call another `memorize` after the last call has responded. \n", + "4. Use instructions from to find the user's desired Search trend.\n", + "5. Use instructions from to find the user's desired trending YouTube video.\n", + "6. Finally, once the above information is captured, reconfirm with user, if the user is satisfied, transfer to the `root_agent`.\n", + "\n", + "\n", + "- Use the `get_daily_gtrends` tool to display the top 25 trending Search terms to the user. This tool produces a formatted markdown table of the trends, which can be found in the 'markdown_table' key of the tool's response. You must display this markdown table to the user **in markdown format** \n", + "- Work with the user to understand which trending topic they'd like to proceed with. Do not proceed to the next step until the user has selected a Search trend topic.\n", + "- Once they choose a Search trend topic, use the `save_search_trends_to_session_state` tool to update the session state with the `term`, `rank`, and `refresh_date` from this Search trend topic.\n", + "\n", + "\n", + "\n", + "- Use the `get_youtube_trends` tool to extract the top trending videos on YouTube for the US. Display each trending video's title, duration, and URL to the user in a numbered list like this:\n", + " \n", + " 1. **Video Title** - Duration - URL\n", + " 2. **Video Title** - Duration - URL\n", + " 3. **Video Title** - Duration - URL\n", + " \n", + "\n", + "\n", + "\n", + "You are an agent. Your internal name is \"trends_and_insights_agent\".\n", + "\n", + " The description about you is \"Captures campaign metadata and displays trending topics from Google Search and trending videos from YouTube.\"\n", + "\n", + "\n", + "You have a list of other agents to transfer to:\n", + "\n", + "\n", + "Agent name: root_agent\n", + "Agent description: A trend and insight assistant using the services of multiple sub-agents.\n", + "\n", + "\n", + "Agent name: research_orchestrator\n", + "Agent description: Orchestrate comprehensive research for the campaign metadata and trending topics.\n", + "\n", + "\n", + "Agent name: ad_content_generator_agent\n", + "Agent description: Help users with ad generation; brainstorm and refine ad copy and visual concept ideas with actor-critic workflows; iterate with the user to generate final ad creatives.\n", + "\n", + "\n", + "If you are the best to answer the question according to your description, you\n", + "can answer it.\n", + "\n", + "If another agent is better for answering the question according to its\n", + "description, call `transfer_to_agent` function to transfer the\n", + "question to that agent. When transferring, do not generate any text other than\n", + "the function call.\n", + "\n", + "Your parent agent is root_agent. If neither the other agents nor\n", + "you are best for answering the question according to the descriptions, transfer\n", + "to your parent agent.\n", + "\n", + "-----------------------------------------------------------\n", + "Contents:\n", + "{\"parts\":[{\"text\":\"Hello\"}],\"role\":\"user\"}\n", + "{\"parts\":[{\"text\":\"For context:\"},{\"text\":\"[root_agent] said: Hello! I'm your AI Marketing Research & Strategy Assistant. I can help you with comprehensive insights, trend analysis, and creative ideas for your marketing campaigns.\\n\\nTo get started, I'll transfer you to my `trends_and_insights_agent` sub-agent, who will help us capture your campaign metadata and identify interesting trends.\\n\"},{\"text\":\"[root_agent] called tool `transfer_to_agent` with parameters: {'agent_name': 'trends_and_insights_agent'}\"}],\"role\":\"user\"}\n", + "{\"parts\":[{\"text\":\"For context:\"},{\"text\":\"[root_agent] `transfer_to_agent` tool returned result: {'result': None}\"}],\"role\":\"user\"}\n", + "-----------------------------------------------------------\n", + "Functions:\n", + "transfer_to_agent: {'agent_name': {'type': }} \n", + "memorize: {'key': {'type': }, 'value': {'type': }} \n", + "get_daily_gtrends: {'today_date': {'default': '07/28/2025', 'type': }} -> {'type': }\n", + "get_youtube_trends: {'region_code': {'default': 'US', 'type': }, 'max_results': {'default': 45, 'type': }} -> {'type': }\n", + "save_yt_trends_to_session_state: {'selected_trends': {'type': }} -> {'type': }\n", + "save_search_trends_to_session_state: {'new_trends': {'type': }} -> {'type': }\n", + "-----------------------------------------------------------\n", + "\n", + "INFO:google_genai.models:AFC is enabled with max remote calls: 10.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'actions': {'artifact_delta': {},\n", + " 'requested_auth_configs': {},\n", + " 'state_delta': {'request_count': 1,\n", + " 'timer_start': 1753822688.412184}},\n", + " 'author': 'root_agent',\n", + " 'content': {'parts': [{'text': \"Hello! I'm your AI Marketing Research & \"\n", + " 'Strategy Assistant. I can help you with '\n", + " 'comprehensive insights, trend analysis, and '\n", + " 'creative ideas for your marketing campaigns.\\n'\n", + " '\\n'\n", + " \"To get started, I'll transfer you to my \"\n", + " '`trends_and_insights_agent` sub-agent, who '\n", + " 'will help us capture your campaign metadata '\n", + " 'and identify interesting trends.\\n'},\n", + " {'function_call': {'args': {'agent_name': 'trends_and_insights_agent'},\n", + " 'id': 'adk-20ead9ce-173d-42ae-939f-4ec7788068be',\n", + " 'name': 'transfer_to_agent'}}],\n", + " 'role': 'model'},\n", + " 'id': 'c877dee8-06a8-40e4-9aae-b8f0b4c7a1a0',\n", + " 'invocation_id': 'e-93e1dd23-03ec-4958-ac4a-9c043c66ab75',\n", + " 'long_running_tool_ids': set(),\n", + " 'timestamp': 1753822688.412144,\n", + " 'usage_metadata': {'candidates_token_count': 86,\n", + " 'candidates_tokens_details': [{'modality': ,\n", + " 'token_count': 86}],\n", + " 'prompt_token_count': 876,\n", + " 'prompt_tokens_details': [{'modality': ,\n", + " 'token_count': 876}],\n", + " 'thoughts_token_count': 58,\n", + " 'total_token_count': 1020,\n", + " 'traffic_type': }}\n", + "{'actions': {'artifact_delta': {},\n", + " 'requested_auth_configs': {},\n", + " 'state_delta': {},\n", + " 'transfer_to_agent': 'trends_and_insights_agent'},\n", + " 'author': 'root_agent',\n", + " 'content': {'parts': [{'function_response': {'id': 'adk-20ead9ce-173d-42ae-939f-4ec7788068be',\n", + " 'name': 'transfer_to_agent',\n", + " 'response': {'result': None}}}],\n", + " 'role': 'user'},\n", + " 'id': '369e547f-c225-41e5-a704-c4db98f87cc8',\n", + " 'invocation_id': 'e-93e1dd23-03ec-4958-ac4a-9c043c66ab75',\n", + " 'timestamp': 1753822691.095631}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:google_adk.google.adk.models.google_llm:\n", + "LLM Response:\n", + "-----------------------------------------------------------\n", + "Text:\n", + "Hello! I'm your Trends and Insights Assistant. I can help you by gathering information for your marketing campaign.\n", + "\n", + "To start, please tell me the following:\n", + "* **Brand Name:**\n", + "* **Target Audience:**\n", + "* **Target Product:**\n", + "* **Key Selling Points of the product:**\n", + "-----------------------------------------------------------\n", + "Function calls:\n", + "\n", + "-----------------------------------------------------------\n", + "Raw response:\n", + "{\"sdk_http_response\":{\"headers\":{\"Content-Type\":\"application/json; charset=UTF-8\",\"Vary\":\"Referer\",\"Content-Encoding\":\"gzip\",\"Date\":\"Tue, 29 Jul 2025 20:58:13 GMT\",\"Server\":\"scaffolding on HTTPServer2\",\"X-XSS-Protection\":\"0\",\"X-Frame-Options\":\"SAMEORIGIN\",\"X-Content-Type-Options\":\"nosniff\",\"Alt-Svc\":\"h3=\\\":443\\\"; ma=2592000,h3-29=\\\":443\\\"; ma=2592000\",\"Transfer-Encoding\":\"chunked\"}},\"candidates\":[{\"content\":{\"parts\":[{\"text\":\"Hello! I'm your Trends and Insights Assistant. I can help you by gathering information for your marketing campaign.\\n\\nTo start, please tell me the following:\\n* **Brand Name:**\\n* **Target Audience:**\\n* **Target Product:**\\n* **Key Selling Points of the product:**\"}],\"role\":\"model\"},\"finish_reason\":\"STOP\",\"avg_logprobs\":-0.4005987020639273}],\"create_time\":\"2025-07-29T20:58:11.789748Z\",\"response_id\":\"4zWJaPSZMI38ld8PhZnKuQM\",\"model_version\":\"gemini-2.5-flash\",\"usage_metadata\":{\"candidates_token_count\":65,\"candidates_tokens_details\":[{\"modality\":\"TEXT\",\"token_count\":65}],\"prompt_token_count\":2203,\"prompt_tokens_details\":[{\"modality\":\"TEXT\",\"token_count\":2203}],\"thoughts_token_count\":82,\"total_token_count\":2350,\"traffic_type\":\"ON_DEMAND\"},\"automatic_function_calling_history\":[]}\n", + "-----------------------------------------------------------\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'actions': {'artifact_delta': {},\n", + " 'requested_auth_configs': {},\n", + " 'state_delta': {}},\n", + " 'author': 'trends_and_insights_agent',\n", + " 'content': {'parts': [{'text': \"Hello! I'm your Trends and Insights \"\n", + " 'Assistant. I can help you by gathering '\n", + " 'information for your marketing campaign.\\n'\n", + " '\\n'\n", + " 'To start, please tell me the following:\\n'\n", + " '* **Brand Name:**\\n'\n", + " '* **Target Audience:**\\n'\n", + " '* **Target Product:**\\n'\n", + " '* **Key Selling Points of the product:**'}],\n", + " 'role': 'model'},\n", + " 'id': 'f7798729-2f6a-4c9d-9cd5-006f955e8c7e',\n", + " 'invocation_id': 'e-93e1dd23-03ec-4958-ac4a-9c043c66ab75',\n", + " 'timestamp': 1753822691.101807,\n", + " 'usage_metadata': {'candidates_token_count': 65,\n", + " 'candidates_tokens_details': [{'modality': ,\n", + " 'token_count': 65}],\n", + " 'prompt_token_count': 2203,\n", + " 'prompt_tokens_details': [{'modality': ,\n", + " 'token_count': 2203}],\n", + " 'thoughts_token_count': 82,\n", + " 'total_token_count': 2350,\n", + " 'traffic_type': }}\n" + ] + } + ], + "source": [ + "first_prompt = f\"Hello\"\n", + "stream_agent(my_agent, first_prompt, session)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2a3bc0a2", + "metadata": {}, + "outputs": [], + "source": [ + "followup_prompt = \"Create images from these great ideas!\"\n", + "stream_agent(my_agent, followup_prompt, session)" + ] + }, + { + "cell_type": "markdown", + "id": "2c97ad97", + "metadata": {}, + "source": [ + "## 4. Deploy to Vertex AI Agent Engine\n", + "To call and manage agents in production, deploy the agent to Vertex AI Agent Engine.\n", + "\n", + "**Important - run `poetry build` to package the agent**" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "3a022bf9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Building \u001b[36mtrends_and_insights_agent\u001b[39m (\u001b[39;1m0.1.0\u001b[39;22m)\n", + "Building \u001b[34mwheel\u001b[39m\n", + " - Building \u001b[34mwheel\u001b[39m\n", + " - Built \u001b[32mtrends_and_insights_agent-0.1.0-py3-none-any.whl\u001b[39m\n" + ] + } + ], + "source": [ + "! poetry build --format=wheel --output=notebooks/deployment" + ] + }, + { + "cell_type": "markdown", + "id": "76974f85", + "metadata": {}, + "source": [ + "#### Initialize the Vertex client, then create a `remote_agent` that is deployed to Vertex\n", + "\n", + "This also takes the packaged agent code. This is required for more complex agents that have nested dependencies and require packaging\n", + "\n", + "Also, before running - be sure to give Secret Manager access to the Agent Engine service account. This can simply be done by running `. setup_ae_sm_access.sh`.\n", + "\n", + "The code can also be ran as follows (relative to repo root):\n", + "\n", + "```bash\n", + "source trends_and_insights_agent/.env\n", + "\n", + "export RE_SA=\"service-${GOOGLE_CLOUD_PROJECT_NUMBER}@gcp-sa-aiplatform-re.iam.gserviceaccount.com\"\n", + "gcloud secrets add-iam-policy-binding \"projects/$GOOGLE_CLOUD_PROJECT/secrets/$YT_SECRET_MNGR_NAME\" \\\n", + " --member=\"serviceAccount:$RE_SA\" \\\n", + " --role=\"roles/secretmanager.secretAccessor\"\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "f10d3166", + "metadata": {}, + "outputs": [], + "source": [ + "from vertexai import agent_engines # optional cleanup\n", + "\n", + "for agent in agent_engines.list():\n", + " agent.delete(force=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "1b7eea3c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Identified the following requirements: {'pydantic': '2.11.7', 'cloudpickle': '3.1.1', 'google-cloud-aiplatform': '1.105.0'}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:Identified the following requirements: {'pydantic': '2.11.7', 'cloudpickle': '3.1.1', 'google-cloud-aiplatform': '1.105.0'}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Failed to parse constraint: deployment/trends_and_insights_agent-0.1.0-py3-none-any.whl. Exception: Expected end or semicolon (after name and no valid version specifier)\n", + " deployment/trends_and_insights_agent-0.1.0-py3-none-any.whl\n", + " ^\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:vertexai.agent_engines:Failed to parse constraint: deployment/trends_and_insights_agent-0.1.0-py3-none-any.whl. Exception: Expected end or semicolon (after name and no valid version specifier)\n", + " deployment/trends_and_insights_agent-0.1.0-py3-none-any.whl\n", + " ^\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The following requirements are missing: {'pydantic', 'cloudpickle', 'google-cloud-aiplatform'}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:vertexai.agent_engines:The following requirements are missing: {'pydantic', 'cloudpickle', 'google-cloud-aiplatform'}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The following requirements are appended: {'pydantic==2.11.7', 'cloudpickle==3.1.1'}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:The following requirements are appended: {'pydantic==2.11.7', 'cloudpickle==3.1.1'}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The final list of requirements: ['deployment/trends_and_insights_agent-0.1.0-py3-none-any.whl', 'db-dtypes', 'tabulate', 'pydantic==2.11.7', 'cloudpickle==3.1.1']\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:The final list of requirements: ['deployment/trends_and_insights_agent-0.1.0-py3-none-any.whl', 'db-dtypes', 'tabulate', 'pydantic==2.11.7', 'cloudpickle==3.1.1']\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Using bucket zghost-media-center\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:Using bucket zghost-media-center\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Wrote to gs://zghost-media-center/agent_engine/agent_engine.pkl\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:Wrote to gs://zghost-media-center/agent_engine/agent_engine.pkl\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing to gs://zghost-media-center/agent_engine/requirements.txt\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:Writing to gs://zghost-media-center/agent_engine/requirements.txt\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating in-memory tarfile of extra_packages\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:Creating in-memory tarfile of extra_packages\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing to gs://zghost-media-center/agent_engine/dependencies.tar.gz\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:Writing to gs://zghost-media-center/agent_engine/dependencies.tar.gz\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating AgentEngine\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:Creating AgentEngine\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Create AgentEngine backing LRO: projects/679926387543/locations/us-central1/reasoningEngines/3252927141007851520/operations/6194730939902328832\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:Create AgentEngine backing LRO: projects/679926387543/locations/us-central1/reasoningEngines/3252927141007851520/operations/6194730939902328832\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "View progress and logs at https://console.cloud.google.com/logs/query?project=wortz-project-352116\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:View progress and logs at https://console.cloud.google.com/logs/query?project=wortz-project-352116\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AgentEngine created. Resource name: projects/679926387543/locations/us-central1/reasoningEngines/3252927141007851520\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:AgentEngine created. Resource name: projects/679926387543/locations/us-central1/reasoningEngines/3252927141007851520\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "To use this AgentEngine in another session:\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:To use this AgentEngine in another session:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "agent_engine = vertexai.agent_engines.get('projects/679926387543/locations/us-central1/reasoningEngines/3252927141007851520')\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:vertexai.agent_engines:agent_engine = vertexai.agent_engines.get('projects/679926387543/locations/us-central1/reasoningEngines/3252927141007851520')\n" + ] + } + ], + "source": [ + "import vertexai\n", + "from vertexai import agent_engines\n", + "\n", + "GOOGLE_CLOUD_PROJECT = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n", + "BUCKET = os.getenv(\"BUCKET\")\n", + "\n", + "vertexai.init(\n", + " project=GOOGLE_CLOUD_PROJECT,\n", + " location=\"us-central1\",\n", + " staging_bucket=BUCKET,\n", + ")\n", + "\n", + "remote_agent = agent_engines.create(\n", + " agent_engine=my_agent,\n", + " display_name=\"trends-and-insights\",\n", + " description=\"You are a helpful AI assistant, part of a multi-agent system designed for advanced web research and ad creative generation.\",\n", + " requirements=[\n", + " \"deployment/trends_and_insights_agent-0.1.0-py3-none-any.whl\",\n", + " \"db-dtypes\",\n", + " \"tabulate\",\n", + " ],\n", + " extra_packages=[\n", + " \"deployment/trends_and_insights_agent-0.1.0-py3-none-any.whl\",\n", + " \"installation_scripts/install_opencv.sh\",\n", + " \"installation_scripts/install_ffmpeg.sh\",\n", + " ],\n", + " env_vars=env_vars,\n", + " build_options={\n", + " \"installation\": [\n", + " \"installation_scripts/install_opencv.sh\",\n", + " \"installation_scripts/install_ffmpeg.sh\",\n", + " ]\n", + " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "b1687a38", + "metadata": {}, + "source": [ + "### Save the agent engine resource name\n", + "\n", + "This can be used by the command to deploy it to Agentspace\n", + "\n", + "```bash\n", + "agent_engine = vertexai.agent_engines.get('projects/679926387543/locations/us-central1/reasoningEngines/1093257605637210112')\n", + "```\n", + "\n", + "## 5. Try it remotely" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "592e9803", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'lastUpdateTime': 1754493816.518254,\n", + " 'events': [],\n", + " 'userId': 'jwortz',\n", + " 'appName': '3252927141007851520',\n", + " 'id': '2675871003157987328',\n", + " 'state': {}}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "online_session = remote_agent.create_session(user_id=\"jwortz\")\n", + "online_session" + ] + }, + { + "cell_type": "markdown", + "id": "5ecd44fa", + "metadata": {}, + "source": [ + "#### Same idea above applies to remote agents" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "63c680ce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'actions': {'artifact_delta': {},\n", + " 'requested_auth_configs': {},\n", + " 'state_delta': {'_state_init': True,\n", + " 'brand': '',\n", + " 'final_select_ad_copies': {'final_select_ad_copies': []},\n", + " 'final_select_vis_concepts': {'final_select_vis_concepts': []},\n", + " 'gcs_folder': '2025_08_06_15_23',\n", + " 'img_artifact_keys': {'img_artifact_keys': []},\n", + " 'key_selling_points': '',\n", + " 'target_audience': '',\n", + " 'target_product': '',\n", + " 'target_search_trends': {'target_search_trends': []},\n", + " 'target_yt_trends': {'target_yt_trends': []},\n", + " 'vid_artifact_keys': {'vid_artifact_keys': []}}},\n", + " 'author': 'root_agent',\n", + " 'id': 'd25979c5-5eb8-477a-ad87-14f5e0150bee',\n", + " 'invocation_id': 'e-d6b5a3bb-803a-4bf3-99d5-6c2aa33c1efd',\n", + " 'timestamp': 1754493831.309956}\n", + "{'actions': {'artifact_delta': {},\n", + " 'requested_auth_configs': {},\n", + " 'state_delta': {'request_count': 1,\n", + " 'timer_start': 1754493831.5061977}},\n", + " 'author': 'root_agent',\n", + " 'content': {'parts': [{'text': \"Hello! I'm your AI Marketing Research & \"\n", + " 'Strategy Assistant. I can help you with '\n", + " 'comprehensive insights, trend analysis, and '\n", + " 'creative ideas for your marketing campaigns.\\n'\n", + " '\\n'\n", + " \"To get started, I'll transfer you to my \"\n", + " '`trends_and_insights_agent` sub-agent, who '\n", + " 'will help us capture your campaign metadata '\n", + " 'and identify interesting trends.\\n'},\n", + " {'function_call': {'args': {'agent_name': 'trends_and_insights_agent'},\n", + " 'id': 'adk-d5516b8b-4ca4-4423-9869-75411ddaf2ad',\n", + " 'name': 'transfer_to_agent'}}],\n", + " 'role': 'model'},\n", + " 'id': '09803c0d-d416-43e4-a9d7-de3f051c66e9',\n", + " 'invocation_id': 'e-d6b5a3bb-803a-4bf3-99d5-6c2aa33c1efd',\n", + " 'long_running_tool_ids': [],\n", + " 'timestamp': 1754493831.506145,\n", + " 'usage_metadata': {'candidates_token_count': 86,\n", + " 'candidates_tokens_details': [{'modality': 'TEXT',\n", + " 'token_count': 86}],\n", + " 'prompt_token_count': 876,\n", + " 'prompt_tokens_details': [{'modality': 'TEXT',\n", + " 'token_count': 876}],\n", + " 'thoughts_token_count': 58,\n", + " 'total_token_count': 1020,\n", + " 'traffic_type': 'ON_DEMAND'}}\n", + "{'actions': {'artifact_delta': {},\n", + " 'requested_auth_configs': {},\n", + " 'state_delta': {},\n", + " 'transfer_to_agent': 'trends_and_insights_agent'},\n", + " 'author': 'root_agent',\n", + " 'content': {'parts': [{'function_response': {'id': 'adk-d5516b8b-4ca4-4423-9869-75411ddaf2ad',\n", + " 'name': 'transfer_to_agent',\n", + " 'response': {'result': None}}}],\n", + " 'role': 'user'},\n", + " 'id': '760c8d26-6769-4340-b606-9cd86f664ca4',\n", + " 'invocation_id': 'e-d6b5a3bb-803a-4bf3-99d5-6c2aa33c1efd',\n", + " 'timestamp': 1754493833.417884}\n", + "{'actions': {'artifact_delta': {},\n", + " 'requested_auth_configs': {},\n", + " 'state_delta': {}},\n", + " 'author': 'trends_and_insights_agent',\n", + " 'content': {'parts': [{'text': 'Hello! I can help you with your marketing '\n", + " 'campaign. To start, please tell me the '\n", + " 'following:\\n'\n", + " '\\n'\n", + " '* **Brand Name:**\\n'\n", + " '* **Target Audience:**\\n'\n", + " '* **Target Product:**\\n'\n", + " '* **Key Selling Points of the product:**'}],\n", + " 'role': 'model'},\n", + " 'id': 'a4aa195e-1f3e-4079-895c-902a1fb872a7',\n", + " 'invocation_id': 'e-d6b5a3bb-803a-4bf3-99d5-6c2aa33c1efd',\n", + " 'timestamp': 1754493833.651914,\n", + " 'usage_metadata': {'candidates_token_count': 52,\n", + " 'candidates_tokens_details': [{'modality': 'TEXT',\n", + " 'token_count': 52}],\n", + " 'prompt_token_count': 2203,\n", + " 'prompt_tokens_details': [{'modality': 'TEXT',\n", + " 'token_count': 2203}],\n", + " 'thoughts_token_count': 78,\n", + " 'total_token_count': 2333,\n", + " 'traffic_type': 'ON_DEMAND'}}\n" + ] + } + ], + "source": [ + "first_prompt = f\"Hello\"\n", + "\n", + "stream_agent(remote_agent, first_prompt, online_session)" + ] + }, + { + "cell_type": "markdown", + "id": "7b0a204b", + "metadata": {}, + "source": [ + "# 6. Optional Cleanup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f77a789", + "metadata": {}, + "outputs": [], + "source": [ + "remote_agent.delete(force=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "trends-and-insights-agent-py3.12 (3.12.2)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/python/agents/trends-and-insights-agent/notebooks/hello_marketing_assets_w_gemini2.ipynb b/python/agents/trends-and-insights-agent/notebooks/hello_marketing_assets_w_gemini2.ipynb new file mode 100644 index 00000000..abdafe94 --- /dev/null +++ b/python/agents/trends-and-insights-agent/notebooks/hello_marketing_assets_w_gemini2.ipynb @@ -0,0 +1,843 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Creating marekting assets with Gemini 2.0\n", + "\n", + "> adapted from [this notebook example](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/use-cases/marketing/creating_marketing_assets_gemini_2_0.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import os\n", + "\n", + "from IPython.display import Markdown, display\n", + "from google import genai\n", + "from google.genai import types\n", + "from google.genai.types import GenerateContentConfig, GoogleSearch, Tool\n", + "from pydantic import BaseModel" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### env setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from dotenv import load_dotenv\n", + "\n", + "load_dotenv() # take environment variables\n", + "\n", + "PROJECT_ID = \"hybrid-vertex\"\n", + "if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n", + " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n", + "\n", + "LOCATION = os.environ.get(\"GOOGLE_CLOUD_LOCATION\", \"us-central1\")\n", + "\n", + "# Instantiate client for Vertex AI\n", + "client = genai.Client(vertexai=True, project=PROJECT_ID, location=LOCATION)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "MODEL_ID = \"gemini-2.0-flash-001\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating a marketing campaign brief using a past campaign as reference\n", + "\n", + "Let's have a look at a sample past campaign brief" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Click to view the sample file:\n", + "https://storage.googleapis.com/github-repo/generative-ai/gemini2/use-cases/marketing_example/sample_marketing_campaign_brief.pdf\n" + ] + } + ], + "source": [ + "# Set the Cloud Storage path\n", + "marketing_brief_file_path = \"github-repo/generative-ai/gemini2/use-cases/marketing_example/sample_marketing_campaign_brief.pdf\"\n", + "marketing_brief_file_uri = f\"gs://{marketing_brief_file_path}\"\n", + "marketing_brief_file_url = f\"https://storage.googleapis.com/{marketing_brief_file_path}\"\n", + "\n", + "print(\"Click to view the sample file:\")\n", + "print(marketing_brief_file_url)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Controlled Generation" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class MarketingCampaignBrief(BaseModel):\n", + " campaign_name: str\n", + " campaign_objectives: list[str]\n", + " target_audience: str\n", + " media_strategy: list[str]\n", + " timeline: str\n", + " target_countries: list[str]\n", + " performance_metrics: list[str]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Extract details from sample past campaign brief\n", + "\n", + "1. Send the prompt together with the sample past campaign brief PDF to Gemini 2.0 Flash\n", + "2. Specify that Gemini returns the response in the MarketingCampaignBrief schema you defined previously by including `response_schema=MarketingCampaignBrief` in the request" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"campaign_name\": \"Connect Beyond Limits with Pix Phone 5\",\n", + " \"campaign_objectives\": [\n", + " \"Increase awareness of the latest model of the Pix Phone\",\n", + " \"Generate leads and drive phone sales\",\n", + " \"Position Pix Phone and the trendy phone to have\"\n", + " ],\n", + " \"target_audience\": \"Individuals aged 20-40 in major markets such as US, France, Japan\",\n", + " \"media_strategy\": [\n", + " \"Social Media Marketing: Run targeted social media ads on platforms where the target audience is active.\",\n", + " \"Influencer Marketing: Partner with influencers in the tech industry to promote Pix Phone 5\",\n", + " \"Paid Advertising: Run targeted display ads on websites and apps frequented by the target audience. Use search engine marketing (SEM) to bid on relevant keywords and appear in search results when potential customers are looking for asset protection insurance.\"\n", + " ],\n", + " \"timeline\": \"Activity in the 3 major markets in at least the online channels by early Oct 2023, Start from US, followed by France then Japan. The campaign should use the US version as anchor and localized accordingly\",\n", + " \"target_countries\": [\n", + " \"US\",\n", + " \"France\",\n", + " \"Japan\"\n", + " ],\n", + " \"performance_metrics\": [\n", + " \"Track website traffic, lead generation, and phone sales to measure campaign effectiveness.\",\n", + " \"Use social media analytics to monitor engagement, reach, and sentiment.\"\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "prompt = \"\"\"\n", + " Extract the details from the sample marketing brief.\n", + "\"\"\"\n", + "\n", + "marketing_brief_file = types.Part.from_uri(\n", + " file_uri=marketing_brief_file_url, mime_type=\"application/pdf\"\n", + ")\n", + "contents = [marketing_brief_file, prompt]\n", + "\n", + "response = client.models.generate_content(\n", + " model=MODEL_ID,\n", + " contents=contents,\n", + " config=GenerateContentConfig(\n", + " response_mime_type=\"application/json\",\n", + " response_schema=MarketingCampaignBrief,\n", + " ),\n", + ")\n", + "\n", + "sample_marketing_brief = response.text\n", + "sample_marketing_brief_json = json.loads(sample_marketing_brief)\n", + "print(json.dumps(sample_marketing_brief_json, indent=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Google Search as a tool" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### helper functions: grounding with search" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def print_grounding_response(response):\n", + " \"\"\"Prints Gemini response with grounding citations.\"\"\"\n", + " grounding_metadata = response.candidates[0].grounding_metadata\n", + "\n", + " # Citation indices are in byte units\n", + " ENCODING = \"utf-8\"\n", + " text_bytes = response.text.encode(ENCODING)\n", + "\n", + " prev_index = 0\n", + " markdown_text = \"\"\n", + "\n", + " for grounding_support in grounding_metadata.grounding_supports:\n", + " text_segment = text_bytes[\n", + " prev_index : grounding_support.segment.end_index\n", + " ].decode(ENCODING)\n", + "\n", + " footnotes_text = \"\"\n", + " for grounding_chunk_index in grounding_support.grounding_chunk_indices:\n", + " footnotes_text += f\"[[{grounding_chunk_index + 1}]]({grounding_metadata.grounding_chunks[grounding_chunk_index].web.uri})\\n\"\n", + "\n", + " markdown_text += f\"{text_segment} {footnotes_text}\\n\"\n", + " prev_index = grounding_support.segment.end_index\n", + "\n", + " if prev_index < len(text_bytes):\n", + " markdown_text += str(text_bytes[prev_index:], encoding=ENCODING)\n", + "\n", + " markdown_text += \"\\n----\\n## Grounding Sources\\n\"\n", + "\n", + " if grounding_metadata.web_search_queries:\n", + " markdown_text += (\n", + " f\"\\n**Web Search Queries:** {grounding_metadata.web_search_queries}\\n\"\n", + " )\n", + " if grounding_metadata.search_entry_point:\n", + " markdown_text += f\"\\n**Search Entry Point:**\\n {grounding_metadata.search_entry_point.rendered_content}\\n\"\n", + " elif grounding_metadata.retrieval_queries:\n", + " markdown_text += (\n", + " f\"\\n**Retrieval Queries:** {grounding_metadata.retrieval_queries}\\n\"\n", + " )\n", + "\n", + " markdown_text += \"### Grounding Chunks\\n\"\n", + "\n", + " for index, grounding_chunk in enumerate(\n", + " grounding_metadata.grounding_chunks, start=1\n", + " ):\n", + " context = grounding_chunk.web or grounding_chunk.retrieved_context\n", + " if not context:\n", + " print(f\"Skipping Grounding Chunk {grounding_chunk}\")\n", + " continue\n", + "\n", + " markdown_text += f\"{index}. [{context.title}]({context.uri})\\n\"\n", + "\n", + " display(Markdown(markdown_text))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Grounding + Google Search for market research" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Here's a breakdown of the latest trends in the mobile phone industry, focusing on top manufacturers and public sentiment:\n", + "\n", + "**Latest Phone Models and Selling Points (Top 2 Makers)**\n", + "\n", + "Based on the search results, the top two mobile phone brands are generally considered to be **Apple** and **Samsung.**\n", + "\n", + "* **Apple:**\n", + " * **iPhone 16 Pro Max:** It is considered the best iPhone overall, known for its well-rounded user experience, luxurious design, premium services, and versatile camera. The iPhone 16 Pro and Pro Max models have identical cameras, offering a 5x optical zoom and more fine-tuning capabilities than previous iPhones. [[1]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrySkuVVX1lXKz1SzIIsC5ue9oi8pDeuL6IsdHyPH-E13XLAPrVWIPT8Q6O7yRNf5tpHoIPbfkV9nhpeLv-6Z3oABeEP2FXDMEVr8kVnq3AynILclBR2n5bgiEyOSturM3KT3Ym6)\n", + "\n", + "\n", + " * Apple is increasing its use of recycled materials, aiming for 100% recycled cobalt in batteries. [[2]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrzllpx5CE4_gemVOmM-5_ObikLvnn6Duz50CMgoIn4pC85BEH21RgfhbmV8ogt6yDUUozt6dQtX6Wx6iXMnBLrSGeN3B_dGzoc8qqabkB07rPZ_ODHopk5aYF8q4wuXwxc0g0jE_BR93xJVoSwNcJZVvW-v3T1Y9j7glf3tWgn6KMlE3ci2wKeLgxpGUvOeVo6Qqtc4V6mUGzLaPVkk4awBX7keg9wkyw==)\n", + "\n", + "\n", + "\n", + "* **Samsung:**\n", + " * **Samsung Galaxy S25 Ultra:** As the best premium Android phone, it's like having a full camera bag in your pocket. While not a massive upgrade from the S24 Ultra, it boasts added megapixels and improved performance with the Snapdragon 8 Elite. [[1]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrySkuVVX1lXKz1SzIIsC5ue9oi8pDeuL6IsdHyPH-E13XLAPrVWIPT8Q6O7yRNf5tpHoIPbfkV9nhpeLv-6Z3oABeEP2FXDMEVr8kVnq3AynILclBR2n5bgiEyOSturM3KT3Ym6)\n", + "\n", + " It's considered Samsung's best camera phone and one of the best overall. [[1]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrySkuVVX1lXKz1SzIIsC5ue9oi8pDeuL6IsdHyPH-E13XLAPrVWIPT8Q6O7yRNf5tpHoIPbfkV9nhpeLv-6Z3oABeEP2FXDMEVr8kVnq3AynILclBR2n5bgiEyOSturM3KT3Ym6)\n", + "\n", + "\n", + " * **Galaxy A Series (e.g., A15, A35, A55):** These entry-level and mid-range phones prioritize affordability. They have received limited AI features via software updates. [[3]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwQ8B4UBW7lRPfgu5rjc6ttTWhWuzgTacI1JoQvcZOZOnbDrQGD9CJJitg25P2EMoFwzVnaIcdIG3viAzmvAy87OI_wWhGb0tKIuNJ-Ef-OmARet_Mcs2NS8OXZpI9Dcn4fGVCG5f3sir0qk1eLnUwBNU20jRVIg0xCJugb1HCg3kFzjS91fPI3oxZo-yhsKRYn9x18)\n", + "\n", + "\n", + "\n", + "**Key Trends and Features to Watch:**\n", + "\n", + "* **AI Integration:** Artificial Intelligence (AI) is becoming more deeply integrated into mobile phones, personalizing user experiences, optimizing battery life, and automating tasks. [[4]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwQCwugPsMpioZcwTmCNqtkqlzxiGIWX4ysGgDtzgAW5GEZLOmGoIANMWhGOxcn9zanpCcg7qYT5Xpu37J0vAaTHsNi-I4PzkM7LWx30E5l8lPbBrMC5UH3TzIxRzUcDMVxYfbdI5siCicbFWi6KnWrvcqg42529RHJnY_15672n9tN6oxOienmLhkdgblm0hsKPgbHKlPbPN96_judag==)\n", + "\n", + " Expect AI-powered cameras that recognize objects, scenes, and emotions for better photos. [[4]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwQCwugPsMpioZcwTmCNqtkqlzxiGIWX4ysGgDtzgAW5GEZLOmGoIANMWhGOxcn9zanpCcg7qYT5Xpu37J0vAaTHsNi-I4PzkM7LWx30E5l8lPbBrMC5UH3TzIxRzUcDMVxYfbdI5siCicbFWi6KnWrvcqg42529RHJnY_15672n9tN6oxOienmLhkdgblm0hsKPgbHKlPbPN96_judag==)\n", + "\n", + "\n", + "* **Foldable and Rollable Screens:** These designs are expected to become more mainstream and refined, offering larger screens for multitasking. [[5]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrzzI5O1UV7kBInBRdb9ITC5Xaeyq5ZA1Lhs1lGa77Z2u_MIUWXYSPSQ56awREzxK1HMYmueGbEpJOCjv_7us3FMWEq6KbnP8DDTM62ONmx4LKBx1qyzvTydhc4-pz7Hq0wPO-MKjr-h3y3HE-k4g1qYqS7izblmxN-JaVoOmb24lYia3Nxaw1QIuRGGSM_KqRuMeGIrEdfVfXhLz8kaYZhH3cCkDhyH)\n", + "\n", + "\n", + "* **5G and 6G Transition:** While 5G is becoming dominant, the transition to 6G technology could begin. [[4]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwQCwugPsMpioZcwTmCNqtkqlzxiGIWX4ysGgDtzgAW5GEZLOmGoIANMWhGOxcn9zanpCcg7qYT5Xpu37J0vAaTHsNi-I4PzkM7LWx30E5l8lPbBrMC5UH3TzIxRzUcDMVxYfbdI5siCicbFWi6KnWrvcqg42529RHJnY_15672n9tN6oxOienmLhkdgblm0hsKPgbHKlPbPN96_judag==)\n", + "\n", + "\n", + "* **Extended Reality (XR) and Metaverse Integration:** Augmented reality (AR), virtual reality (VR), and mixed reality (MR) will be more integrated, offering immersive experiences for various applications. [[4]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwQCwugPsMpioZcwTmCNqtkqlzxiGIWX4ysGgDtzgAW5GEZLOmGoIANMWhGOxcn9zanpCcg7qYT5Xpu37J0vAaTHsNi-I4PzkM7LWx30E5l8lPbBrMC5UH3TzIxRzUcDMVxYfbdI5siCicbFWi6KnWrvcqg42529RHJnY_15672n9tN6oxOienmLhkdgblm0hsKPgbHKlPbPN96_judag==)\n", + "\n", + "\n", + "* **Sustainability:** There's an increasing focus on sustainable design and materials in smartphone manufacturing.\n", + "* **Enhanced Security:** Features like theft detection are emerging, leveraging encryption, biometrics, and on-device AI to protect data. [[2]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrzllpx5CE4_gemVOmM-5_ObikLvnn6Duz50CMgoIn4pC85BEH21RgfhbmV8ogt6yDUUozt6dQtX6Wx6iXMnBLrSGeN3B_dGzoc8qqabkB07rPZ_ODHopk5aYF8q4wuXwxc0g0jE_BR93xJVoSwNcJZVvW-v3T1Y9j7glf3tWgn6KMlE3ci2wKeLgxpGUvOeVo6Qqtc4V6mUGzLaPVkk4awBX7keg9wkyw==)\n", + "\n", + "\n", + "* **Better Battery Technology:** Improvements in battery technology are expected to enhance battery life.\n", + "\n", + "**General Public Sentiment about Mobile Phones** [[6]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrxwRlI0HwwgVOp0JLmq5tCfzUhTm1706pbmRU8akfMqwnNzo-dtDQZHoo_n5sDW7Y1tmOnmRq3ZzGQRBi-DCsgY0yij15ZQ5IoUf0iuWSMDg8_oYLn95bVc719nhof1xkuiw7GHd5YYfiBQ0HKFvuyqi6NsXA==)\n", + "[[7]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwbpqAHQA_7MWbcOataQSqiwKZsfSzvc9P9Ol017L0avBMdMIT41uLxelGdx4O72teAJzdIv3FOf42SAmTqaA81eqZR0zpnTcAKDWtk95JZYn0fvGdOHFond5mxQ0LxU2eisY5GiMsPHOCMVHPl-s-k3JGds-qknW0FOPs2d3AiJoM=)\n", + "\n", + "\n", + "\n", + "* **Addiction and Overuse:** There's a growing concern about smartphone addiction and the amount of time people spend on their phones. A significant percentage of people feel uncomfortable without their phones and even sleep with them. [[7]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwbpqAHQA_7MWbcOataQSqiwKZsfSzvc9P9Ol017L0avBMdMIT41uLxelGdx4O72teAJzdIv3FOf42SAmTqaA81eqZR0zpnTcAKDWtk95JZYn0fvGdOHFond5mxQ0LxU2eisY5GiMsPHOCMVHPl-s-k3JGds-qknW0FOPs2d3AiJoM=)\n", + "\n", + "\n", + "* **Negative Impact on Mental Health and Attention:** Many school leaders and policymakers believe cell phones negatively impact students' mental health, attention spans, and academic performance. [[6]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrxwRlI0HwwgVOp0JLmq5tCfzUhTm1706pbmRU8akfMqwnNzo-dtDQZHoo_n5sDW7Y1tmOnmRq3ZzGQRBi-DCsgY0yij15ZQ5IoUf0iuWSMDg8_oYLn95bVc719nhof1xkuiw7GHd5YYfiBQ0HKFvuyqi6NsXA==)\n", + "\n", + "\n", + "* **Desire for Security:** As phones become more essential, the importance of keeping them secure is increasing.\n", + "* **Positive View of AI:** AI is generally viewed as a key driver of innovation in the smartphone market.\n", + "* **Mobile payments will be easier as well** [[8]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrzv2nedm7012Mzl5yUVYM_vUINrpQW2MS0zMzzngtLZ2FFMLHPXd_-02jDUOdrxX9n4qbIjy7VIYnUCbbb5OPcpnykJf51EbIZZ-4q_gnVPJMo5F-pWDrvHvXZkqsggruN8bIno75w6wZxOeIVDCcMHcbmqU_se1F4_WiY=)\n", + "\n", + "\n", + "* **High ownership:** Over 98% of Americans owned a cellphone in 2024 [[7]](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwbpqAHQA_7MWbcOataQSqiwKZsfSzvc9P9Ol017L0avBMdMIT41uLxelGdx4O72teAJzdIv3FOf42SAmTqaA81eqZR0zpnTcAKDWtk95JZYn0fvGdOHFond5mxQ0LxU2eisY5GiMsPHOCMVHPl-s-k3JGds-qknW0FOPs2d3AiJoM=)\n", + "\n", + "\n", + "\n", + "----\n", + "## Grounding Sources\n", + "\n", + "**Web Search Queries:** ['top 2 mobile phone makers 2024 2025 latest models selling points', 'latest mobile phone trends 2024 2025', 'public sentiment mobile phones 2024 2025', 'most popular mobile phones 2024 2025']\n", + "\n", + "**Search Entry Point:**\n", + " \n", + "
\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + "
\n", + " \n", + "
\n", + "\n", + "### Grounding Chunks\n", + "1. [techradar.com](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrySkuVVX1lXKz1SzIIsC5ue9oi8pDeuL6IsdHyPH-E13XLAPrVWIPT8Q6O7yRNf5tpHoIPbfkV9nhpeLv-6Z3oABeEP2FXDMEVr8kVnq3AynILclBR2n5bgiEyOSturM3KT3Ym6)\n", + "2. [forbes.com](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrzllpx5CE4_gemVOmM-5_ObikLvnn6Duz50CMgoIn4pC85BEH21RgfhbmV8ogt6yDUUozt6dQtX6Wx6iXMnBLrSGeN3B_dGzoc8qqabkB07rPZ_ODHopk5aYF8q4wuXwxc0g0jE_BR93xJVoSwNcJZVvW-v3T1Y9j7glf3tWgn6KMlE3ci2wKeLgxpGUvOeVo6Qqtc4V6mUGzLaPVkk4awBX7keg9wkyw==)\n", + "3. [counterpointresearch.com](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwQ8B4UBW7lRPfgu5rjc6ttTWhWuzgTacI1JoQvcZOZOnbDrQGD9CJJitg25P2EMoFwzVnaIcdIG3viAzmvAy87OI_wWhGb0tKIuNJ-Ef-OmARet_Mcs2NS8OXZpI9Dcn4fGVCG5f3sir0qk1eLnUwBNU20jRVIg0xCJugb1HCg3kFzjS91fPI3oxZo-yhsKRYn9x18)\n", + "4. [medium.com](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwQCwugPsMpioZcwTmCNqtkqlzxiGIWX4ysGgDtzgAW5GEZLOmGoIANMWhGOxcn9zanpCcg7qYT5Xpu37J0vAaTHsNi-I4PzkM7LWx30E5l8lPbBrMC5UH3TzIxRzUcDMVxYfbdI5siCicbFWi6KnWrvcqg42529RHJnY_15672n9tN6oxOienmLhkdgblm0hsKPgbHKlPbPN96_judag==)\n", + "5. [diyfixtool.com](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrzzI5O1UV7kBInBRdb9ITC5Xaeyq5ZA1Lhs1lGa77Z2u_MIUWXYSPSQ56awREzxK1HMYmueGbEpJOCjv_7us3FMWEq6KbnP8DDTM62ONmx4LKBx1qyzvTydhc4-pz7Hq0wPO-MKjr-h3y3HE-k4g1qYqS7izblmxN-JaVoOmb24lYia3Nxaw1QIuRGGSM_KqRuMeGIrEdfVfXhLz8kaYZhH3cCkDhyH)\n", + "6. [ed.gov](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrxwRlI0HwwgVOp0JLmq5tCfzUhTm1706pbmRU8akfMqwnNzo-dtDQZHoo_n5sDW7Y1tmOnmRq3ZzGQRBi-DCsgY0yij15ZQ5IoUf0iuWSMDg8_oYLn95bVc719nhof1xkuiw7GHd5YYfiBQ0HKFvuyqi6NsXA==)\n", + "7. [consumeraffairs.com](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrwbpqAHQA_7MWbcOataQSqiwKZsfSzvc9P9Ol017L0avBMdMIT41uLxelGdx4O72teAJzdIv3FOf42SAmTqaA81eqZR0zpnTcAKDWtk95JZYn0fvGdOHFond5mxQ0LxU2eisY5GiMsPHOCMVHPl-s-k3JGds-qknW0FOPs2d3AiJoM=)\n", + "8. [keebos.com](https://vertexaisearch.cloud.google.com/grounding-api-redirect/AQXblrzv2nedm7012Mzl5yUVYM_vUINrpQW2MS0zMzzngtLZ2FFMLHPXd_-02jDUOdrxX9n4qbIjy7VIYnUCbbb5OPcpnykJf51EbIZZ-4q_gnVPJMo5F-pWDrvHvXZkqsggruN8bIno75w6wZxOeIVDCcMHcbmqU_se1F4_WiY=)\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "market_research_prompt = \"\"\"\n", + " I am planning to launch a mobile phone campaign and I want to understand the latest trends in the phone industry.\n", + " Please answer the following questions:\n", + " - What are the latest phone models and their selling point from the top 2 phone makers?\n", + " - What is the general public sentiment about mobile phones?\n", + "\"\"\"\n", + "\n", + "contents = [market_research_prompt]\n", + "\n", + "google_search_tool = Tool(google_search=GoogleSearch())\n", + "\n", + "response = client.models.generate_content(\n", + " model=MODEL_ID,\n", + " contents=contents,\n", + " config=GenerateContentConfig(tools=[google_search_tool]),\n", + ")\n", + "\n", + "market_research = response.text\n", + "print_grounding_response(response)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## create marketing campaign\n", + "\n", + "1. Information about the phone that you're launching\n", + "2. Prompt to instruct Gemini to create a marketing campaign brief\n", + "3. Extracted information from the sample past campaign brief\n", + "4. Market research that was done with Grounding with Google Search\n", + "5. MarketingCampaignBrief schema that was defined previously" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"campaign_name\": \"Experience the Future with Pix Phone 10\",\n", + " \"campaign_objectives\": [\n", + " \"Generate excitement and awareness for the Pix Phone 10 launch.\",\n", + " \"Drive pre-orders and initial sales of Pix Phone 10.\",\n", + " \"Position Pix Phone 10 as the leading phone for AI-powered experiences and innovative design.\"\n", + " ],\n", + " \"target_audience\": \"Tech-savvy individuals aged 22-45 in the US, France, and Japan who value cutting-edge technology, sleek design, and advanced camera capabilities.\",\n", + " \"media_strategy\": [\n", + " \"Social Media Marketing: Utilize visually engaging content showcasing the phone's design and AI features on platforms like Instagram, TikTok, and YouTube. Run targeted ad campaigns focused on tech enthusiasts and early adopters.\",\n", + " \"Influencer Marketing: Partner with tech reviewers and lifestyle influencers to create authentic reviews and demonstrations of the Pix Phone 10's key features.\",\n", + " \"Online Advertising: Employ targeted display ads and search engine marketing (SEM) to reach potential customers actively searching for new phones or AI-powered devices.\",\n", + " \"Public Relations: Secure media coverage in leading tech publications and blogs to generate buzz and positive reviews.\",\n", + " \"Experiential Marketing: Organize launch events and hands-on demo experiences in major cities to allow consumers to interact with the phone firsthand.\"\n", + " ],\n", + " \"timeline\": \"Pre-launch campaign starting November 2024, leading up to the official launch in January 2025. Sustained marketing efforts throughout Q1 2025.\",\n", + " \"target_countries\": [\n", + " \"US\",\n", + " \"France\",\n", + " \"Japan\"\n", + " ],\n", + " \"performance_metrics\": [\n", + " \"Track website traffic, pre-order numbers, and initial sales figures to assess campaign effectiveness.\",\n", + " \"Monitor social media engagement (likes, shares, comments) and sentiment analysis to gauge public perception.\",\n", + " \"Analyze media coverage and reviews to evaluate the campaign's impact on brand reputation.\",\n", + " \"Measure website conversion rates from ad clicks to phone purchases.\",\n", + " \"Track the number of attendees at launch events and their feedback on the phone.\"\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "new_phone_details = \"\"\"\n", + " Phone Name: Pix Phone 10\n", + " Short description: Pix Phone 10 is the flagship phone with a focus on AI-powered features and a completely redesigned form factor.\n", + " Tech Specs:\n", + " - Camera: 50MP main sensor with 48MP ultrawide lens with autofocus for macro shots\n", + " - Performance: P5 processor for fast performance and AI capabilities\n", + " - Battery: 4700mAh battery for all-day usage\n", + " Key Highlights:\n", + " - Powerful camera system\n", + " - Redesigned software user experience to introduce more fun\n", + " - Compact form factor\n", + " Launch timeline: Jan 2025\n", + " Target countries: US, France and Japan\n", + "\"\"\"\n", + "\n", + "create_brief_prompt = f\"\"\"\n", + "Given the following details, create a marketing campaign brief for the new phone launch:\n", + "\n", + "Sample campaign brief:\n", + "{sample_marketing_brief}\n", + "\n", + "Market research:\n", + "{market_research}\n", + "\n", + "New phone details:\n", + "{new_phone_details}\n", + "\"\"\"\n", + "\n", + "contents = [create_brief_prompt]\n", + "\n", + "response = client.models.generate_content(\n", + " model=MODEL_ID,\n", + " contents=contents,\n", + " config=GenerateContentConfig(\n", + " response_mime_type=\"application/json\",\n", + " response_schema=MarketingCampaignBrief,\n", + " ),\n", + ")\n", + "\n", + "creative_brief = response.text\n", + "creative_brief_json = json.loads(creative_brief)\n", + "print(json.dumps(creative_brief_json, indent=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Creating Assets for the Marketing Campaign\n", + "\n", + "In the following sections, we will be looking at creating:\n", + "\n", + "* Social Media Ad Copy\n", + "* Storyboarding for short-form videos" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Social Media Ad Copy\n", + "\n", + "1. Define the JSON response schema for our ad copy\n", + "2. Send the prompt and response schema to Gemini 2.0 Flash" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "# JSON response schema for an ad copy\n", + "\n", + "class AdCopy(BaseModel):\n", + " ad_copy_options: list[str]\n", + " localization_notes: list[str]\n", + " visual_description: list[str]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"ad_copy_options\": [\n", + " \"🇺🇸 US: Experience the future with Pix Phone 10! 🚀 AI-powered brilliance meets stunning design. Pre-order now and be among the first to own the next generation. #PixPhone10 #AIphone #Innovation #Tech\",\n", + " \"🇫🇷 France: Découvrez le futur avec le Pix Phone 10 ! 🚀 L'intelligence artificielle rencontre un design époustouflant. Précommandez maintenant et soyez parmi les premiers à posséder la prochaine génération. #PixPhone10 #IAphone #Innovation #Tech\",\n", + " \"🇯🇵 Japan: 未来を体験しよう!Pix Phone 10登場!🚀 AIパワーと美しいデザインの融合。予約受付中。次世代スマホをいち早く手に入れよう。#PixPhone10 #AIスマホ #イノベーション #テック\"\n", + " ],\n", + " \"localization_notes\": [\n", + " \"US: Ad copy uses American English and focuses on direct call to action (pre-order).\",\n", + " \"France: Ad copy is translated into French and maintains a similar tone to the US version, emphasizing the discovery of the future.\",\n", + " \"Japan: Ad copy is translated into Japanese, highlighting the fusion of AI power and beautiful design, and encouraging early adoption.\"\n", + " ],\n", + " \"visual_description\": [\n", + " \"US: Image of a diverse group of young adults using the Pix Phone 10 in a modern city setting. Focus on the phone's sleek design and camera capabilities. Use vibrant, eye-catching colors.\",\n", + " \"France: Image of a stylish individual using the Pix Phone 10 at a Parisian café. Focus on the phone's AI features and seamless integration into daily life. Use a sophisticated and elegant aesthetic.\",\n", + " \"Japan: Image of a tech-savvy person using the Pix Phone 10 on a bustling Tokyo street. Focus on the phone's advanced camera and connectivity. Use bright, energetic colors and incorporate elements of Japanese culture.\"\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "ad_copy_prompt = f\"\"\"\n", + " Given the marketing campaign brief, create an Instagram ad-copy for each target market: {creative_brief_json[\"target_countries\"]}\n", + " Please localize the ad-copy and the visuals to the target markets for better relevancy to the target audience.\n", + " Marketing Campaign Brief:\n", + " {creative_brief}\n", + "\"\"\"\n", + "\n", + "contents = [ad_copy_prompt]\n", + "\n", + "response = client.models.generate_content(\n", + " model=MODEL_ID,\n", + " contents=contents,\n", + " config=GenerateContentConfig(\n", + " response_mime_type=\"application/json\",\n", + " response_schema=AdCopy,\n", + " ),\n", + ")\n", + "\n", + "ad_copy = response.text\n", + "ad_copy_json = json.loads(ad_copy)\n", + "print(json.dumps(ad_copy_json, indent=2, ensure_ascii=False))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Storyboard for short-form videos\n", + "\n", + "> Gemini 2.0 Flash to help us brainstorm a storyboard for a short-form video to accompany the phone launch campaign" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Okay, here's a storyboard outline for a YouTube Shorts video promoting the Pix Phone 10, localized for the US, France, and Japan. The core concept is a fast-paced, visually driven showcase of the phone's AI capabilities and sleek design, adapted for each region's cultural nuances.\n", + "\n", + "**Core Concept:** \"Unlock the Future in Your Pocket\" - Each version highlights a specific AI feature and ties it into a relatable, everyday scenario. The focus is on speed, ease of use, and how the phone simplifies life.\n", + "\n", + "**Music:** Upbeat, trendy, and globally appealing electronic music with subtle regional variations (e.g., a slightly more pop-infused track for the US, a touch of French house for France, and J-Pop/electronic fusion for Japan).\n", + "\n", + "**Overall Style:** Clean, modern, and visually stunning. Use dynamic camera angles, quick cuts, and seamless transitions to keep viewers engaged. Emphasize the phone's screen and its UI.\n", + "\n", + "**Storyboard Structure (All Versions):**\n", + "\n", + "* **Opening Scene (0-3 seconds):** A close-up of someone's hand reaching for the Pix Phone 10. The screen lights up, showcasing a visually appealing lock screen or animation. Text overlay: \"Pix Phone 10.\"\n", + "* **Feature Highlight (3-7 seconds):** Demonstrate a key AI feature in action. This is where the localization comes into play.\n", + "* **Benefit Showcase (7-10 seconds):** Show how that feature solves a problem or makes life easier.\n", + "* **Design Showcase (10-12 seconds):** Quick cuts showcasing the phone's sleek design, various color options, and key physical features (camera, buttons, etc.).\n", + "* **Call to Action (12-15 seconds):** Text overlay: \"Pre-order Now!\" or \"Learn More at [Website]\" A simple animation of the phone rotating, highlighting the pre-order webpage on the phone's screen.\n", + "\n", + "**Localized Storyboard Details:**\n", + "\n", + "**1. US Version:**\n", + "\n", + "* **Feature Highlight:** AI-powered real-time translation.\n", + " * **Scene:** A tourist in a bustling New York City food truck. They're struggling to understand the menu. They use the Pix Phone 10's camera to instantly translate the menu into English.\n", + " * **Benefit Showcase:** They order confidently, enjoy delicious food, and smile. Text overlay: \"Break down barriers. Connect with anyone, anywhere.\"\n", + "* **Visual Cues:** Energetic city life, diverse ethnicities, fast-paced editing.\n", + "* **Music:** Upbeat pop-infused electronic track.\n", + "\n", + "**2. French Version:**\n", + "\n", + "* **Feature Highlight:** AI-powered photo enhancement.\n", + " * **Scene:** Someone taking a photo of the Eiffel Tower with the Pix Phone 10. The photo is slightly blurry or has poor lighting. The AI automatically enhances the image, making it stunning.\n", + " * **Benefit Showcase:** They instantly share the beautiful photo on social media and get lots of likes. Text overlay: \"Capture the beauty. Every shot, perfect.\"\n", + "* **Visual Cues:** Focus on artistry, elegance, and sophisticated visuals. Eiffel tower in the background.\n", + "* **Music:** French house-inspired electronic music with a touch of romance.\n", + "\n", + "**3. Japanese Version:**\n", + "\n", + "* **Feature Highlight:** AI-powered smart assistant for scheduling and organization.\n", + " * **Scene:** A busy professional in Tokyo juggling multiple tasks on their phone. The Pix Phone 10's AI assistant automatically organizes their schedule, sends reminders, and manages emails.\n", + " * **Benefit Showcase:** The professional is now calm, collected, and productive. They bow slightly in gratitude to their phone. Text overlay: \"Simplify your life. AI that works for you.\"\n", + "* **Visual Cues:** Clean, minimalist aesthetics, bright colors, and focus on efficiency. Modern Tokyo backdrop.\n", + "* **Music:** Upbeat J-Pop/electronic fusion.\n", + "\n", + "**Common Elements Across All Versions:**\n", + "\n", + "* **Color Palette:** Modern and sleek, using the brand colors of Pix Phone 10.\n", + "* **Font:** Clean and readable, with subtle variations for each region (e.g., a slightly more rounded font for Japan).\n", + "* **Sound Effects:** Crisp and modern sound effects to emphasize interactions with the phone.\n", + "* **Emphasis on User Experience:** Show the phone's interface as intuitive and easy to use.\n", + "\n", + "**Shooting Notes:**\n", + "\n", + "* Use professional lighting and high-quality camera equipment.\n", + "* Focus on capturing the phone's screen clearly and showcasing its vibrant display.\n", + "* Use close-ups to emphasize the phone's design details.\n", + "* Ensure all actors are diverse and relatable to the target audience.\n", + "\n", + "**Distribution Notes:**\n", + "\n", + "* Run targeted YouTube ad campaigns to reach tech-savvy individuals in each country.\n", + "* Optimize video titles, descriptions, and tags for each language.\n", + "* Consider using captions in each language to improve accessibility.\n", + "\n", + "By localizing the content and focusing on relatable scenarios, these YouTube Shorts videos will effectively generate excitement and drive pre-orders for the Pix Phone 10 in the US, France, and Japan.\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "short_video_prompt = f\"\"\"\n", + " Given the marketing campaign brief, create a storyboard for a YouTube Shorts video for target markets: {creative_brief_json[\"target_countries\"]}.\n", + " Please localize the content to the target markets for better relevancy to the target audience.\n", + " Marketing Campaign Brief:\n", + " {creative_brief}\n", + "\n", + "\"\"\"\n", + "\n", + "contents = [short_video_prompt]\n", + "\n", + "response = client.models.generate_content(model=MODEL_ID, contents=contents)\n", + "\n", + "short_video_response = response.text\n", + "display(Markdown(short_video_response))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/agents/trends-and-insights-agent/notebooks/hello_search_trends.ipynb b/python/agents/trends-and-insights-agent/notebooks/hello_search_trends.ipynb new file mode 100644 index 00000000..87c33d15 --- /dev/null +++ b/python/agents/trends-and-insights-agent/notebooks/hello_search_trends.ipynb @@ -0,0 +1,1855 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Get trending topics from Google Search and YouTube" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### TODOs\n", + "\n", + "1. check out the [YouTube Reporting API](https://pantheon.corp.google.com/apis/api/youtubereporting.googleapis.com/metrics?project=hybrid-vertex&e=13802955&inv=1&invt=AbtH2w&mods=-ai_platform_fake_service,-ai_platform_staging_service) in Cloud console \n", + " * Schedules reporting jobs containing your YouTube Analytics data and downloads the resulting bulk data reports in the form of CSV files.\n", + "\n", + "2. create helper functions for YouTube Search, YouTube Trends, and displaying their results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### env config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PROJECT_ID: None\n", + "LOCATION: None\n", + "PREFIX: None\n", + "BQ_DATASET: None\n" + ] + } + ], + "source": [ + "import os\n", + "import sys\n", + "import ipykernel\n", + "\n", + "from dotenv import load_dotenv\n", + "\n", + "load_dotenv() # this loads the .env script for use below\n", + "PROJECT_ID = os.getenv(\"PROJECT_ID\")\n", + "LOCATION = os.getenv(\"LOCATION\")\n", + "PREFIX = os.getenv(\"PREFIX\")\n", + "BQ_DATASET = os.getenv(\"BQ_DATASET\")\n", + "\n", + "print(f\"PROJECT_ID: {PROJECT_ID}\")\n", + "print(f\"LOCATION: {LOCATION}\")\n", + "print(f\"PREFIX: {PREFIX}\")\n", + "print(f\"BQ_DATASET: {BQ_DATASET}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### imports" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "from typing import List\n", + "from pprint import pprint\n", + "from pandas import DataFrame\n", + "from datetime import date, datetime, timedelta\n", + "from IPython.display import HTML, Markdown, display, YouTubeVideo\n", + "\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "import logging\n", + "logging.disable(logging.WARNING)\n", + "\n", + "# google\n", + "from google.cloud import bigquery\n", + "bq_client = bigquery.Client(project = PROJECT_ID)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Google Search Trends\n", + "\n", + "references\n", + "* overview and samples for the [Search Trends](https://pantheon.corp.google.com/marketplace/product/bigquery-public-datasets/google-search-trends?_ga=2.261190030.2019434361.1656948847-1975246695.1656948843&e=13802955&mods=-ai_platform_fake_service,-ai_platform_staging_service&inv=1&invt=AbtEjw&project=hybrid-vertex) (BigQuery public dataset)\n", + "* code for [generating time intervals](https://source.corp.google.com/piper///depot/google3/experimental/marketing/trendcrawler/trend_crawler/src/data/transformers.py;ws=jwortz%2F310)\n", + "\n", + "previous version\n", + "1) get date\n", + "2) get params\n", + "3) perform search w/ SERPAPI\n", + "4) convert SERPAPI search results to dataframe" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### top 25 search terms, most recent week" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + " SELECT\n", + " term,\n", + " refresh_date,\n", + " ARRAY_AGG(STRUCT(rank,week) ORDER BY week DESC LIMIT 1) x\n", + " FROM `bigquery-public-data.google_trends.top_terms`\n", + " WHERE refresh_date = PARSE_DATE('%m/%d/%Y', '03/25/2025')\n", + " GROUP BY term, refresh_date\n", + " ORDER BY (SELECT rank FROM UNNEST(x))\n", + "\n" + ] + } + ], + "source": [ + "TARGET_DATE = \"03/25/2025\"\n", + "\n", + "query = f\"\"\"\n", + " SELECT\n", + " term,\n", + " refresh_date,\n", + " ARRAY_AGG(STRUCT(rank,week) ORDER BY week DESC LIMIT 1) x\n", + " FROM `bigquery-public-data.google_trends.top_terms`\n", + " WHERE refresh_date = PARSE_DATE('%m/%d/%Y', '{TARGET_DATE}')\n", + " GROUP BY term, refresh_date\n", + " ORDER BY (SELECT rank FROM UNNEST(x))\n", + "\"\"\"\n", + "print(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "bolivia vs uruguay\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
termrefresh_datex
0bolivia vs uruguay2025-03-25[{'rank': 1, 'week': 2025-03-23 00:00:00}]
1bolivia - uruguay2025-03-25[{'rank': 2, 'week': 2025-03-23 00:00:00}]
2jasmine crockett2025-03-25[{'rank': 3, 'week': 2025-03-23 00:00:00}]
3troy taylor2025-03-25[{'rank': 4, 'week': 2025-03-23 00:00:00}]
4outside lands 20252025-03-25[{'rank': 5, 'week': 2025-03-23 00:00:00}]
\n", + "
" + ], + "text/plain": [ + " term refresh_date x\n", + "0 bolivia vs uruguay 2025-03-25 [{'rank': 1, 'week': 2025-03-23 00:00:00}]\n", + "1 bolivia - uruguay 2025-03-25 [{'rank': 2, 'week': 2025-03-23 00:00:00}]\n", + "2 jasmine crockett 2025-03-25 [{'rank': 3, 'week': 2025-03-23 00:00:00}]\n", + "3 troy taylor 2025-03-25 [{'rank': 4, 'week': 2025-03-23 00:00:00}]\n", + "4 outside lands 2025 2025-03-25 [{'rank': 5, 'week': 2025-03-23 00:00:00}]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "target_trends_df = bq_client.query(query).to_dataframe()\n", + "print(target_trends_df['term'].iloc[0])\n", + "target_trends_df.head(5)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "TARGET_DATE = \"03/25/2025\"\n", + "\n", + "def get_daily_gtrends(target_date) -> pd.DataFrame:\n", + " \"\"\"\n", + " Get daily Google Trends data for a given date.\n", + "\n", + " Args:\n", + " target_date (str): The date for which to get daily Google Trends data.\n", + "\n", + " Returns:\n", + " pandas.DataFrame: A DataFrame containing the daily Google Trends data.\n", + " \"\"\"\n", + " \n", + " query = f\"\"\"\n", + " SELECT\n", + " term,\n", + " refresh_date,\n", + " ARRAY_AGG(STRUCT(rank,week) ORDER BY week DESC LIMIT 1) x\n", + " FROM `bigquery-public-data.google_trends.top_terms`\n", + " WHERE refresh_date = PARSE_DATE('%m/%d/%Y', '{target_date}')\n", + " GROUP BY term, refresh_date\n", + " ORDER BY (SELECT rank FROM UNNEST(x))\n", + " \"\"\"\n", + " df_t = bq_client.query(query).to_dataframe()\n", + " df_t.index += 1\n", + " \n", + " return df_t" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
termrefresh_datex
1bolivia vs uruguay2025-03-25[{'rank': 1, 'week': 2025-03-23 00:00:00}]
2bolivia - uruguay2025-03-25[{'rank': 2, 'week': 2025-03-23 00:00:00}]
3jasmine crockett2025-03-25[{'rank': 3, 'week': 2025-03-23 00:00:00}]
4troy taylor2025-03-25[{'rank': 4, 'week': 2025-03-23 00:00:00}]
5outside lands 20252025-03-25[{'rank': 5, 'week': 2025-03-23 00:00:00}]
6aaliyah chavez2025-03-25[{'rank': 6, 'week': 2025-03-23 00:00:00}]
7is chipotle closing2025-03-25[{'rank': 7, 'week': 2025-03-23 00:00:00}]
8erik per sullivan2025-03-25[{'rank': 8, 'week': 2025-03-23 00:00:00}]
9nigeria vs zimbabwe2025-03-25[{'rank': 9, 'week': 2025-03-23 00:00:00}]
10gme stock2025-03-25[{'rank': 10, 'week': 2025-03-23 00:00:00}]
11peru vs venezuela2025-03-25[{'rank': 11, 'week': 2025-03-23 00:00:00}]
12kick2025-03-25[{'rank': 12, 'week': 2025-03-23 00:00:00}]
13norway vs israel2025-03-25[{'rank': 13, 'week': 2025-03-23 00:00:00}]
14garcelle beauvais2025-03-25[{'rank': 14, 'week': 2025-03-23 00:00:00}]
15manoj bharathiraja2025-03-25[{'rank': 15, 'week': 2025-03-23 00:00:00}]
16e-zpass2025-03-25[{'rank': 16, 'week': 2025-03-23 00:00:00}]
17kris jenner2025-03-25[{'rank': 17, 'week': 2025-03-23 00:00:00}]
18final destination bloodlines trailer2025-03-25[{'rank': 18, 'week': 2025-03-23 00:00:00}]
19chelsea handler2025-03-25[{'rank': 19, 'week': 2025-03-23 00:00:00}]
20egypt vs sierra leone2025-03-25[{'rank': 20, 'week': 2025-03-23 00:00:00}]
21joshua blackledge2025-03-25[{'rank': 21, 'week': 2025-03-23 00:00:00}]
22mickey moniak2025-03-25[{'rank': 22, 'week': 2025-03-23 00:00:00}]
23elliot cadeau2025-03-25[{'rank': 23, 'week': 2025-03-23 00:00:00}]
24crossfire hurricane2025-03-25[{'rank': 24, 'week': 2025-03-23 00:00:00}]
25miami weather2025-03-25[{'rank': 25, 'week': 2025-03-23 00:00:00}]
\n", + "
" + ], + "text/plain": [ + " term refresh_date \\\n", + "1 bolivia vs uruguay 2025-03-25 \n", + "2 bolivia - uruguay 2025-03-25 \n", + "3 jasmine crockett 2025-03-25 \n", + "4 troy taylor 2025-03-25 \n", + "5 outside lands 2025 2025-03-25 \n", + "6 aaliyah chavez 2025-03-25 \n", + "7 is chipotle closing 2025-03-25 \n", + "8 erik per sullivan 2025-03-25 \n", + "9 nigeria vs zimbabwe 2025-03-25 \n", + "10 gme stock 2025-03-25 \n", + "11 peru vs venezuela 2025-03-25 \n", + "12 kick 2025-03-25 \n", + "13 norway vs israel 2025-03-25 \n", + "14 garcelle beauvais 2025-03-25 \n", + "15 manoj bharathiraja 2025-03-25 \n", + "16 e-zpass 2025-03-25 \n", + "17 kris jenner 2025-03-25 \n", + "18 final destination bloodlines trailer 2025-03-25 \n", + "19 chelsea handler 2025-03-25 \n", + "20 egypt vs sierra leone 2025-03-25 \n", + "21 joshua blackledge 2025-03-25 \n", + "22 mickey moniak 2025-03-25 \n", + "23 elliot cadeau 2025-03-25 \n", + "24 crossfire hurricane 2025-03-25 \n", + "25 miami weather 2025-03-25 \n", + "\n", + " x \n", + "1 [{'rank': 1, 'week': 2025-03-23 00:00:00}] \n", + "2 [{'rank': 2, 'week': 2025-03-23 00:00:00}] \n", + "3 [{'rank': 3, 'week': 2025-03-23 00:00:00}] \n", + "4 [{'rank': 4, 'week': 2025-03-23 00:00:00}] \n", + "5 [{'rank': 5, 'week': 2025-03-23 00:00:00}] \n", + "6 [{'rank': 6, 'week': 2025-03-23 00:00:00}] \n", + "7 [{'rank': 7, 'week': 2025-03-23 00:00:00}] \n", + "8 [{'rank': 8, 'week': 2025-03-23 00:00:00}] \n", + "9 [{'rank': 9, 'week': 2025-03-23 00:00:00}] \n", + "10 [{'rank': 10, 'week': 2025-03-23 00:00:00}] \n", + "11 [{'rank': 11, 'week': 2025-03-23 00:00:00}] \n", + "12 [{'rank': 12, 'week': 2025-03-23 00:00:00}] \n", + "13 [{'rank': 13, 'week': 2025-03-23 00:00:00}] \n", + "14 [{'rank': 14, 'week': 2025-03-23 00:00:00}] \n", + "15 [{'rank': 15, 'week': 2025-03-23 00:00:00}] \n", + "16 [{'rank': 16, 'week': 2025-03-23 00:00:00}] \n", + "17 [{'rank': 17, 'week': 2025-03-23 00:00:00}] \n", + "18 [{'rank': 18, 'week': 2025-03-23 00:00:00}] \n", + "19 [{'rank': 19, 'week': 2025-03-23 00:00:00}] \n", + "20 [{'rank': 20, 'week': 2025-03-23 00:00:00}] \n", + "21 [{'rank': 21, 'week': 2025-03-23 00:00:00}] \n", + "22 [{'rank': 22, 'week': 2025-03-23 00:00:00}] \n", + "23 [{'rank': 23, 'week': 2025-03-23 00:00:00}] \n", + "24 [{'rank': 24, 'week': 2025-03-23 00:00:00}] \n", + "25 [{'rank': 25, 'week': 2025-03-23 00:00:00}] " + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "trends_df = get_daily_gtrends(target_date=TARGET_DATE)\n", + "trends_df.shape\n", + "trends_df.head(25)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### get trends history\n", + "\n", + "TODOs\n", + "* Need to determine handling for `score` when aggregated (not yet normalized)\n", + "* add [anomaly detection](https://github.com/tottenjordan/social-pulse/blob/main/zeitghost/src/data/transformers.py)\n", + "* should visual be [stacking subplots](https://matplotlib.org/stable/gallery/subplots_axes_and_figures/subplots_demo.html) ?\n", + "\n", + "\n", + "example from zghost 1: [src](https://github.com/tottenjordan/social-pulse/blob/main/zeitghost/src/data/single_context_collector.py#L45C1-L67C15)\n", + "\n", + "```python\n", + "formatted_today = date.today().strftime(\"%m/%d/%Y\") # Month/Day/Year\n", + "print(formatted_today)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### determine key dates" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_date_x_days_ago(x):\n", + " \"\"\"\n", + " Calculates the date x days ago from today.\n", + " \n", + " Args:\n", + " x: An integer representing the number of days ago.\n", + "\n", + " Returns:\n", + " A datetime.date object representing the date x days ago.\n", + " \"\"\"\n", + " return date.today() - timedelta(days=x)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "TARGET_QUERY = \"widespread panic\"\n", + "TIMEFRAMES = [7, 30, 90] # days in past\n", + "q_dict = {}\n", + "interval_list = []\n", + "\n", + "_TODAY = date.today().strftime(\"%m/%d/%Y\")\n", + "\n", + "q_dict.update({\"topic\": TARGET_QUERY})\n", + "q_dict.update({\"target_date\": TARGET_DATE})\n", + "\n", + "for day in TIMEFRAMES:\n", + " prev_date = get_date_x_days_ago(day)\n", + " prev_date = prev_date.strftime(\"%m/%d/%Y\")\n", + " interval_list.append((prev_date, _TODAY))\n", + "\n", + "q_dict.update({f\"timeframes\": interval_list})\n", + "\n", + "pprint(q_dict)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### query trends public dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "test in BigQuery console" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "SELECT\n", + " term,\n", + " week,\n", + " SUM(score) AS score_agg,\n", + "FROM\n", + " `bigquery-public-data.google_trends.top_terms`\n", + "WHERE\n", + " refresh_date = PARSE_DATE('%m/%d/%Y', '03/25/2025')\n", + " AND term = \"gme stock\"\n", + " AND week BETWEEN DATE_ADD(PARSE_DATE('%m/%d/%Y', '03/25/2025'), INTERVAL -30 DAY)\n", + " AND PARSE_DATE('%m/%d/%Y', '03/25/2025')\n", + "GROUP BY\n", + " term,\n", + " week\n", + "ORDER BY\n", + " week ASC;\n", + "\n" + ] + } + ], + "source": [ + "TARGET_DATE = \"03/25/2025\"\n", + "TREND_TERM = \"gme stock\"\n", + "INTERVAL_GRANULARITY = \"DAY\"\n", + "INTERVAL_RANGE = \"-30\"\n", + "\n", + "query = f\"\"\"\n", + "SELECT\n", + " term,\n", + " week,\n", + " SUM(score) AS score_agg,\n", + "FROM\n", + " `bigquery-public-data.google_trends.top_terms`\n", + "WHERE\n", + " refresh_date = PARSE_DATE('%m/%d/%Y', '{TARGET_DATE}')\n", + " AND term = \"{TREND_TERM}\"\n", + " AND week BETWEEN DATE_ADD(PARSE_DATE('%m/%d/%Y', '{TARGET_DATE}'), INTERVAL {INTERVAL_RANGE} {INTERVAL_GRANULARITY})\n", + " AND PARSE_DATE('%m/%d/%Y', '{TARGET_DATE}')\n", + "GROUP BY\n", + " term,\n", + " week\n", + "ORDER BY\n", + " week ASC;\n", + "\"\"\"\n", + "print(query)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "test BigQuery python client" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "keys: dict_keys(['-1095', '-365', '-120'])\n" + ] + } + ], + "source": [ + "frames = {}\n", + "intervals_test = [\"-1095\",\"-365\",\"-120\"]\n", + "\n", + "for interval in intervals_test:\n", + " # here\n", + " query = f\"\"\"\n", + " SELECT\n", + " term,\n", + " week,\n", + " SUM(score) AS score_agg,\n", + " FROM\n", + " `bigquery-public-data.google_trends.top_terms`\n", + " WHERE\n", + " refresh_date = PARSE_DATE('%m/%d/%Y', '{TARGET_DATE}')\n", + " AND term = \"{TREND_TERM}\"\n", + " AND week BETWEEN DATE_ADD(PARSE_DATE('%m/%d/%Y', '{TARGET_DATE}'), INTERVAL {interval} {INTERVAL_GRANULARITY})\n", + " AND PARSE_DATE('%m/%d/%Y', '{TARGET_DATE}')\n", + " GROUP BY\n", + " term,\n", + " week\n", + " ORDER BY\n", + " week ASC;\n", + " \"\"\"\n", + " df_temp = bq_client.query(query).to_dataframe()\n", + "\n", + " # df_list.append(df_temp)\n", + " frames[interval] = df_temp\n", + "\n", + "print(f\"keys: {frames.keys()}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "example helper function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_trend_history(\n", + " target_date: date,\n", + " term: str,\n", + " interval_uom: str,\n", + " intervals: list,\n", + " ) -> dict:\n", + " \"\"\"\n", + " Get historical time series for a given trend term\n", + " \"\"\"\n", + " frames = {}\n", + " \n", + " for interval in intervals:\n", + " # here\n", + " query = f\"\"\"\n", + " SELECT\n", + " term,\n", + " week,\n", + " SUM(score) AS score_agg,\n", + " FROM\n", + " `bigquery-public-data.google_trends.top_terms`\n", + " WHERE\n", + " refresh_date = PARSE_DATE('%m/%d/%Y', '{target_date}')\n", + " AND term = \"{term}\"\n", + " AND week BETWEEN DATE_ADD(PARSE_DATE('%m/%d/%Y', '{target_date}'), INTERVAL {interval} {interval_uom})\n", + " AND PARSE_DATE('%m/%d/%Y', '{target_date}')\n", + " GROUP BY\n", + " term,\n", + " week\n", + " ORDER BY\n", + " week ASC;\n", + " \"\"\"\n", + " df_temp = bq_client.query(query).to_dataframe()\n", + " frames[interval] = df_temp\n", + " \n", + " return frames" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "keys: dict_keys(['-1095', '-365', '-120'])\n" + ] + } + ], + "source": [ + "TARGET_DATE = \"03/25/2025\"\n", + "TREND_TERM = \"gme stock\"\n", + "INTERVAL_GRANULARITY = \"DAY\"\n", + "INTERVALS = [\"-1095\",\"-365\",\"-120\"]\n", + "\n", + "trend_hist_dict = get_trend_history(\n", + " target_date=TARGET_DATE, term=TREND_TERM,\n", + " interval_uom=INTERVAL_GRANULARITY, intervals=INTERVALS\n", + ")\n", + "\n", + "print(f\"keys: {trend_hist_dict.keys()}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### visualize trend history" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA24AAAGbCAYAAAC8kcPkAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAhWRJREFUeJzt3Xd4U+X7BvA7o00HHexZhuyNoiCIiMpyouIe4BbEiRN/iuDCjfMrKoqI4FZQEGUVFEH2RqZsaJndI+v9/fHkZLRJmpS2Gb0/15Wr6clJ8ua0ac+d5x06pZQCERERERERhS19qBtARERERERE/jG4ERERERERhTkGNyIiIiIiojDH4EZERERERBTmGNyIiIiIiIjCHIMbERERERFRmGNwIyIiIiIiCnMMbkRERERERGGOwY2IiIiIiCjMMbgREVFQdDodHnjggVA3gyJQ8+bNcfnll4e6GUREEYnBjYgoRDZt2oRrr70WzZo1Q1xcHBo3bowBAwbg/fffD3XTKs3hw4cxbtw4rF+/PtRNITe//fYbxo0bF+pmEBGRHwxuREQhsGzZMpx99tnYsGED7rnnHnzwwQe4++67odfr8e6774a6eZXm8OHDGD9+PINbmPntt98wfvz4UDeDiIj8MIa6AURE1dHLL7+MlJQUrFq1CqmpqR63HT16tMrbk5+fj8TExCp/3ooS6e0nIiIqCytuREQhsHv3bnTs2LFUaAOAevXqldr21VdfoXv37oiPj0etWrVw44034sCBAx77/PXXX7juuuvQtGlTmEwmpKWl4dFHH0VhYaHHfrfffjtq1KiB3bt349JLL0VSUhJuueUWAIDdbse7776Lzp07Iy4uDnXr1sXgwYOxevXqUm2aOXMmOnXqBJPJhI4dO+L333/3+5oXL16Mc845BwBwxx13QKfTQafT4YsvvnDus2LFCgwePBgpKSlISEjABRdcgL///tvjccaNGwedToetW7fi5ptvRs2aNdGnTx8ArjFUixcvxtlnn434+Hh07twZixcvBgD89NNPztfWvXt3rFu3zuOxLRYLtm3bhiNHjvh9LZrvv/8eHTp0QFxcHDp16oSff/4Zt99+O5o3b+7cZ+/evdDpdHjzzTfx4Ycf4owzzkBCQgIGDhyIAwcOQCmFF198EU2aNEF8fDyGDBmCkydPlnquuXPn4vzzz0diYiKSkpJw2WWXYcuWLWW20WKxYPz48WjdujXi4uJQu3Zt9OnTB/Pnzwcgvw8ffvghADh/Jjqdznn//Px8PPbYY0hLS4PJZELbtm3x5ptvQilV6rm++uor9OjRAwkJCahZsyb69u2LefPm+W3f1KlTYTQa8cQTT5T5WoiIqjNW3IiIQqBZs2ZYvnw5Nm/ejE6dOvnd9+WXX8Zzzz2H66+/HnfffTeOHTuG999/H3379sW6deuc4e/7779HQUEBRo4cidq1a2PlypV4//33cfDgQXz//fcej2m1WjFo0CD06dMHb775JhISEgAAd911F7744gtccskluPvuu2G1WvHXX3/hn3/+wdlnn+28/9KlS/HTTz/h/vvvR1JSEt577z0MHToU+/fvR+3atb2+jvbt2+OFF17A2LFjce+99+L8888HAPTu3RsAsGjRIlxyySXo3r07nn/+eej1ekyZMgUXXXQR/vrrL/To0cPj8a677jq0bt0ar7zyikeI2LVrF26++Wbcd999uPXWW/Hmm2/iiiuuwKRJk/DMM8/g/vvvBwBMmDAB119/PbZv3w69Xj7HPHToENq3b4/hw4d7BEpv5syZgxtuuAGdO3fGhAkTcOrUKdx1111o3Lix1/2nT58Os9mMBx98ECdPnsTrr7+O66+/HhdddBEWL16Mp556Crt27cL777+Pxx9/HJ9//rnzvtOmTcPw4cMxaNAgvPbaaygoKMBHH32EPn36YN26dR5BsaRx48ZhwoQJuPvuu9GjRw/k5ORg9erVWLt2LQYMGID77rsPhw8fxvz58zFt2jSP+yqlcOWVVyI9PR133XUXunXrhj/++ANPPPEEDh06hIkTJzr3HT9+PMaNG4fevXvjhRdeQGxsLFasWIFFixZh4MCBXtv2ySefYMSIEXjmmWfw0ksv+T3eRETVniIioio3b948ZTAYlMFgUL169VJPPvmk+uOPP5TZbPbYb+/evcpgMKiXX37ZY/umTZuU0Wj02F5QUFDqeSZMmKB0Op3at2+fc9vw4cMVAPX000977Lto0SIFQD300EOlHsdutzuvA1CxsbFq165dzm0bNmxQANT777/v93WvWrVKAVBTpkwp9fitW7dWgwYN8niugoIC1aJFCzVgwADntueff14BUDfddFOpx2/WrJkCoJYtW+bc9scffygAKj4+3uM4fPzxxwqASk9Pd27bs2ePAqCGDx/u93UopVTnzp1VkyZNVG5urnPb4sWLFQDVrFmzUo9Zt25dlZWV5dw+ZswYBUB17dpVWSwW5/abbrpJxcbGqqKiIqWUUrm5uSo1NVXdc889Hs+fkZGhUlJSSm0vqWvXruqyyy7zu8+oUaOUt1OCmTNnKgDqpZde8th+7bXXKp1O5/wd2Llzp9Lr9erqq69WNpvNY1/3n2ezZs2cbXn33XeVTqdTL774ot+2ERGRYFdJIqIQGDBgAJYvX44rr7wSGzZswOuvv45BgwahcePG+OWXX5z7/fTTT7Db7bj++utx/Phx56VBgwZo3bo10tPTnfvGx8c7r+fn5+P48ePo3bs3lFKlugQCwMiRIz2+//HHH6HT6fD888+X2te96xwA9O/fHy1btnR+36VLFyQnJ+O///4L/mAAWL9+PXbu3Imbb74ZJ06ccL7O/Px8XHzxxfjzzz9ht9s97jNixAivj9WhQwf06tXL+X3Pnj0BABdddBGaNm1aart7m5s3bw6lVJnVtsOHD2PTpk0YNmwYatSo4dx+wQUXoHPnzl7vc9111yElJaXU8996660wGo0e281mMw4dOgQAmD9/PrKysnDTTTd5/A4YDAb07NnT43fAm9TUVGzZsgU7d+70u583v/32GwwGAx566CGP7Y899hiUUpg7dy4A6TZrt9sxduxYZ/VSU/J3BwBef/11PPzww3jttdfw7LPPBt0uIqLqiF0liYhC5JxzzsFPP/0Es9mMDRs24Oeff8bEiRNx7bXXYv369ejQoQN27twJpRRat27t9TFiYmKc1/fv34+xY8fil19+walTpzz2y87O9vjeaDSiSZMmHtt2796NRo0aoVatWmW23T0AaWrWrFnqeQOlhYrhw4f73Cc7Oxs1a9Z0ft+iRYuA2qaFpbS0NK/by9Pmffv2AQBatWpV6rZWrVph7dq1FdYu7dhcdNFFXtuSnJzst60vvPAChgwZgjZt2qBTp04YPHgwbrvtNnTp0sXv/QB5nY0aNUJSUpLH9vbt2ztvB+R3R6/Xo0OHDmU+5pIlSzBnzhw89dRTHNdGRBQEBjciohCLjY3FOeecg3POOQdt2rTBHXfcge+//x7PP/887HY7dDod5s6dC4PBUOq+WrXHZrNhwIABOHnyJJ566im0a9cOiYmJOHToEG6//fZS1SqTyVSqMhIMb20B4HXCikBo7XvjjTfQrVs3r/u4V7YAzwpjIG2r6DYHq7zt0o7NtGnT0KBBg1L7uVfrvOnbty92796NWbNmYd68eZg8eTImTpyISZMm4e677w7mJVSIjh07IisrC9OmTcN9993nM4ATEZEnBjciojCiTQCizWrYsmVLKKXQokULtGnTxuf9Nm3ahB07dmDq1KkYNmyYc7s2c2AgWrZsiT/++AMnT54MqOpWHt66zWnPDUj1qH///pXy3BWpWbNmAGQilJK8bTsd2rGpV69euY9NrVq1cMcdd+COO+5AXl4e+vbti3HjxjmDm6+fS7NmzbBgwQLk5uZ6VN22bdvmvF1ro91ux9atW30Gb02dOnXwww8/oE+fPrj44ouxdOlSNGrUqFyvi4ioOuEYNyKiEEhPT/da6fntt98AAG3btgUAXHPNNTAYDBg/fnyp/ZVSOHHiBABX1cZ9H6VUUIt5Dx06FEoprwsxV1RVSltrLSsry2N79+7d0bJlS7z55pvIy8srdb9jx45VyPOXJdDlABo1aoROnTrhyy+/9GjvkiVLsGnTpgpt06BBg5CcnIxXXnkFFoul1O1lHRvtd0RTo0YNtGrVCsXFxc5tvn4ul156KWw2Gz744AOP7RMnToROp8Mll1wCALjqqqug1+vxwgsvlKruevvdadKkCRYsWIDCwkIMGDCgVBuJiKg0VtyIiELgwQcfREFBAa6++mq0a9cOZrMZy5Ytw7fffovmzZvjjjvuACCVjJdeegljxozB3r17cdVVVyEpKQl79uzBzz//jHvvvRePP/442rVrh5YtW+Lxxx/HoUOHkJycjB9//DGo8VsXXnghbrvtNrz33nvYuXMnBg8eDLvdjr/++gsXXnghHnjggdN+3S1btkRqaiomTZqEpKQkJCYmomfPnmjRogUmT56MSy65BB07dsQdd9yBxo0b49ChQ0hPT0dycjJ+/fXX037+sgSzHMArr7yCIUOG4LzzzsMdd9yBU6dO4YMPPkCnTp28hs/ySk5OxkcffYTbbrsNZ511Fm688UbUrVsX+/fvx5w5c3DeeeeVClbuOnTogH79+qF79+6oVasWVq9ejR9++MHj59m9e3cAwEMPPYRBgwbBYDDgxhtvxBVXXIELL7wQ//d//4e9e/eia9eumDdvHmbNmoVHHnnEWQ1s1aoV/u///g8vvvgizj//fFxzzTUwmUxYtWoVGjVqhAkTJpRqV6tWrTBv3jz069cPgwYNwqJFi8ocr0dEVK1V/USWREQ0d+5cdeedd6p27dqpGjVqqNjYWNWqVSv14IMPqszMzFL7//jjj6pPnz4qMTFRJSYmqnbt2qlRo0ap7du3O/fZunWr6t+/v6pRo4aqU6eOuueee5zT9LtPvz98+HCVmJjotV1Wq1W98cYbql27dio2NlbVrVtXXXLJJWrNmjXOfQCoUaNGlbpvs2bNAppGf9asWapDhw7KaDSWatu6devUNddco2rXrq1MJpNq1qyZuv7669XChQud+2jLARw7dsxrG7xNfe+tzdo0/W+88UapbYG8DqWU+uabb1S7du2UyWRSnTp1Ur/88osaOnSoateund/nUUqp9PR0BUB9//33HtunTJmiAKhVq1aV2n/QoEEqJSVFxcXFqZYtW6rbb79drV692m8bX3rpJdWjRw+Vmpqq4uPjVbt27dTLL7/ssfSE1WpVDz74oKpbt67S6XQeSwPk5uaqRx99VDVq1EjFxMSo1q1bqzfeeMNjmn/N559/rs4880xlMplUzZo11QUXXKDmz5/vvN3bz2fFihUqKSlJ9e3b1+uSFkREJHRKVdGobCIiomqgW7duqFu3blDjC4mIiMrCMW5ERETlYLFYYLVaPbYtXrwYGzZsQL9+/ULTKCIiilqsuBEREZXD3r170b9/f9x6661o1KgRtm3bhkmTJiElJQWbN29G7dq1Q91EIiKKIpychIiIqBxq1qyJ7t27Y/LkyTh27BgSExNx2WWX4dVXX2VoIyKiCseKGxERERERUZjjGDciIiIiIqIwx+BGREREREQU5hjciIiIiIiIwhyDGxERERERUZhjcCMiIiIiIgpzDG5ERERERERhjsGNiIiIiIgozDG4ERERERERhTkGNyIiIiIiojDH4EZERERERBTmGNyIiIiIiIjCHIMbERERERFRmGNwIyIiIiIiCnMMbkRERERERGGOwY2IiIiIiCjMMbgRERERERGFOQY3IiIiIiKiMMfgRkREREREFOYY3IiIiIiIiMIcgxsREREREVGYM4a6AZHAbrfj8OHDSEpKgk6nC3VziIiIiIgoRJRSyM3NRaNGjaDXV10djMEtAIcPH0ZaWlqom0FERERERGHiwIEDaNKkSZU9H4NbAJKSkgDIDyc5OTnErSEiIiIiolDJyclBWlqaMyNUFQa3AGjdI5OTkxnciIiIiIioyodQcXISIiIiIiKiMMfgRkREREREFOYY3IiIiIiIiMIcgxsREREREVGYY3AjIiIiIiIKcwxuREREREREYY7BjYiIiIiIKMwxuBEREREREYU5BjciIiIiIqIwx+BGREREFEGKC+z4+5NsFBfYQ90UIqpCDG5EREREEeS7Gw7Bct86/HDr4VA3hYiqEIMbERERUQQpPlAEACjcVRDilhBRVWJwIyIiIookViVf822hbQcRVSkGNyIiIqJI4ghuugJriBtCRFWJwY2IiIgoktgkuOmLWHEjqk4Y3IiIiIgiiVVmkzSaWXEjqk4Y3IiIiIgiiaPiFmthxY2oOmFwIyIiIookjuBmsrLiRlSdMLgRERERRRCdI7jFK1bciKoTBjciIiKiSGJ3VNxgR1GePcSNIaKqwuBGREREFEG0ihsAZB1id0mi6oLBjYiIiCgEbFaFz2tvwOTW24K6n97mqrLlHGF3SaLqwhjqBhARERFVR/vXFOOMk6dgPwkA7QK+n87uqrjlZLLiRlRdsOJGREREFAKWYsdC2gCsZuV/ZzfuwS3/KCtuRNUFgxsRERFRCNiKXQHMXBD4JCOewY0VN6LqIqTB7aOPgC5dgORkufTqBcyd67q9Xz9Ap/O8jBjh+Rj79wOXXQYkJAD16gFPPAGUXNZk8WLgrLMAkwlo1Qr44otKfmFEREREZbAUugJYcV7gFTe9cu1bdIIVN6LqIqRj3Jo0AV59FWjdGlAKmDoVGDIEWLcO6NhR9rnnHuCFF1z3SUhwXbfZJLQ1aAAsWwYcOQIMGwbExACvvCL77Nkj+4wYAUyfDixcCNx9N9CwITBoUNW9ViIiIiJ37t0jzYVBBDe3ilvxKVbciKqLkAa3K67w/P7ll6UK988/ruCWkCDBzJt584CtW4EFC4D69YFu3YAXXwSeegoYNw6IjQUmTQJatADeekvu0749sHQpMHEigxsRERGFjrXIFcAshYF3lXSvuFmyWHEjqi7CZoybzQZ88w2Qny9dJjXTpwN16gCdOgFjxgAFBa7bli8HOneW0KYZNAjIyQG2bHHt07+/53MNGiTbfSkuLkZOTo7HhYiIiKgiuY9xswRRcTPYXSHPms2KG1F1EfLlADZtkqBWVATUqAH8/DPQoYPcdvPNQLNmQKNGwMaNUknbvh346Se5PSPDM7QBru8zMvzvk5MDFBYC8fGl2zRhwgSMHz++4l4kERERUQnWYlcACya4uVfc7HkMbkTVRciDW9u2wPr1QHY28MMPwPDhwJIlEt7uvde1X+fOMi7t4ouB3buBli0rr01jxozB6NGjnd/n5OQgLS2t8p6QiIiIqh2buXwVN/fghnx2lSSqLkIe3GJjZaZHAOjeHVi1Cnj3XeDjj0vv27OnfN21S4JbgwbAypWe+2RmyldtXFyDBq5t7vskJ3uvtgGAyWSCyWQq3wsiIiIiCoC1SDlPxIJZDsAAV3DTFbDiRlRdhM0YN43dDhQXe79t/Xr52rChfO3VS7paHj3q2mf+fAllWnfLXr1kJkl38+d7jqMjIiIiqmruFTf3iUrKYnCruOmLWHEjqi5CWnEbMwa45BKgaVMgNxeYMUPWXPvjD+kOOWMGcOmlQO3aMsbt0UeBvn1l7TcAGDhQAtpttwGvvy7j2Z59Fhg1StZsA2QZgA8+AJ58ErjzTmDRIuC774A5c0L2somIiIhgs5QzuLlV3IzFrLgRVRchDW5Hj8q6a0eOACkpEsj++AMYMAA4cECm+X/nHZlpMi0NGDpUgpnGYABmzwZGjpQKWmKijJFzX/etRQsJaY8+Kl0wmzQBJk/mUgBEREQUWu6zSrpPVFIW9+AWa2XFjai6CGlw++wz37elpckkJWVp1gz47Tf/+/TrJ4t6ExEREYULezkqbna7gtEtuJmsrLgRVRdhN8aNiIiIqDrwrLgFFtwsJQJegmJwI6ouGNyIiIiIQsC94mYLMLiVrMzFQqEwJ/BulkQUuRjciIiIiELAbnYFLmthYOHLXFA64GUfYdWNqDpgcCMiIiIKAY+KmznArpJulTkzdACAnCOcoISoOmBwIyIiIgoBuzn4rpKWQtd++foYAEAOK25E1QKDGxEREVEI2K2uEObebdIfi6NLpQU6FBsNAIC8o6y4EVUHDG5EREREIaBOo6ukDTqYY2RVp4LjrLgRVQcMbkREREQh4D7GzR5gcLMWuoKb1STBregEK25E1QGDGxEREVEIKPeuksWBdpWU+9h1OtjjpKtk8SlW3IiqAwY3IiIiohBQ5am4Ofaz6XRAglTcLAxuRNUCgxsRERFRCHhU3CzBLcBt0+mhqyEVN2sOu0oSVQcMbkREREQhoCx2t+uBBTdt2QC7Tgd9klTc7HmsuBFVBwxuRERERKFgc4U19xDnj9UxFs6u08GYLBU35LPiRlQdMLgRERERhYB7V8lAK25aV0m7TofYVKm46QpYcSOqDhjciIiIiELBPbhZA+wq6ZicxK7XwVRbgpu+iBU3ouqAwY2IiIgoFMrVVdIV3OJrS1dJYzErbkTVAYMbERERUSi4BTcE2FVSWzZA6XRIqCsVt1grK25E1QGDGxEREVEouAc3W5BdJQ161KgnFbc4KytuRNUBgxsRERFRKLiPawt0jJtjVkml1yGpgVTc4hWDG1F1wOBGREREFAp29+AW2Bg3baFupdchtYlU3GKhUJgT2P2JKHIxuBERERGFgM7mFrYC7SrpmJxEGXRIbWh0bs8+wqobUbRjcCMiIiIKAZ1bWNMFGNzsWpdKgw7GWB0KHady2Yc4QQlRtGNwIyIiIgoBnVtXSV2gXSXNroobABTppeqWm8mKG1G0Y3AjIiIiCgGP4GYPbjkA6B3BzSjBLe8oK25E0Y7BjYiIiCgEPIJboF0ltfXejHIKZ4mRCUoKjrPiRhTtGNyIiIiIQsA9uOkDrLgpi6NLpVEqblaTVNyKGNyIoh6DGxEREVEI6D26SgY2xk25TU4CAPY4qbgVn2JXSaJoF9Lg9tFHQJcuQHKyXHr1AubOdd1eVASMGgXUrg3UqAEMHQpkZno+xv79wGWXAQkJQL16wBNPANYSHzotXgycdRZgMgGtWgFffFHZr4yIiIjIP305Km6urpIS3JAoFTdLFituRNEupMGtSRPg1VeBNWuA1auBiy4ChgwBtmyR2x99FPj1V+D774ElS4DDh4FrrnHd32aT0GY2A8uWAVOnSigbO9a1z549ss+FFwLr1wOPPALcfTfwxx9V+EKJiIiIStCp8nSVlP10juCmS5SKmzWHFTeiaGcse5fKc8UVnt+//LJU4f75R0LdZ58BM2ZIoAOAKVOA9u3l9nPPBebNA7ZuBRYsAOrXB7p1A158EXjqKWDcOCA2Fpg0CWjRAnjrLXmM9u2BpUuBiROBQYOq8tUSERERuRjcukcaytlVUp8kp3L2XFbciKJd2Ixxs9mAb74B8vOly+SaNYDFAvTv79qnXTugaVNg+XL5fvlyoHNnCW2aQYOAnBxX1W75cs/H0PbRHsOb4uJi5OTkeFyIiIiIKpLeveKmAqu4wRHcdDFyCmdMloob8llxI4p2IQ9umzbJ+DWTCRgxAvj5Z6BDByAjQypmqame+9evL7cB8tU9tGm3a7f52ycnBygs9N6mCRMmICUlxXlJS0s7rddIREREVJJ7WDMEGNy0ipvWVTK2llTcdIWsuBFFu5AHt7ZtZezZihXAyJHA8OHS/TGUxowZg+zsbOflwIEDoW0QERERRZ3yBDdYpUulLkaCm6mmBDd9EStuRNEupGPcAKmqtWol17t3B1atAt59F7jhBpl0JCvLs+qWmQk0aCDXGzQAVq70fDxt1kn3fUrORJmZKbNYxsd7b5PJZILJZDqdl0VERETklwHuwS2wMW6urpIS3OJrS1dJYzErbkTRLuQVt5LsdqC4WEJcTAywcKHrtu3bZfr/Xr3k+169pKvl0aOufebPl1DWoYNrH/fH0PbRHoOIiIgoFNyrbO4hzi+b7Kd3dJVMqCufwZssDG5E0S6kFbcxY4BLLpEJR3JzZQbJxYtlqv6UFOCuu4DRo4FatSSMPfigBK5zz5X7DxwoAe2224DXX5fxbM8+K2u/aQWzESOADz4AnnwSuPNOYNEi4LvvgDlzQvayiYiIiKB3C2vGYINbrAS3GvUMyAZgsrGrJFG0C2lwO3oUGDYMOHJEglqXLhLaBgyQ2ydOBPR6WXi7uFhmg/zf/1z3NxiA2bNlbFyvXkBiooyRe+EF1z4tWkhIe/RR6YLZpAkweTKXAiAiIqLQMpYIbna7gl6v83+nEpOTpDQyIhtAvGLFjSjahTS4ffaZ/9vj4oAPP5SLL82aAb/95v9x+vUD1q0LunlERERElcJuV6WqbJYiBVNCGcHNrlXcZLRLciMZ4xYLhcIcO+KTw24UDBFVEL67iYiIiKqYzVy6a6S5oOzukjqbTGJicHSVTG3o+gw+6xCrbkTRjMGNiIiIqIpZikqHNEtAwc1RcXPMKmmM1aHQcTqXc4Tj3IiiGYMbERERURXzVl0zF5a9JIBO6yppcnWpLNRL1S03kxU3omjG4EZERERUxSzFXoJbfvAVNwAoNkpwyzvKihtRNGNwIyIiIqpiVreukmZICDMXBhDcHBU3o1vFzRIjE5QUHGXFjSiaMbgRERERVTGLI6TZAFgdp2PWorK7Suodwc1gcp3CWU1ScSs6yeBGFM0Y3IiIiIiqmLVYC2462HRSPbMEUnFTpbtK2uOl4lZ8il0liaIZgxsRERFRFXNV3NyCm5eZJkvS26UqZ4xzW+8tQSpulixW3IiiGYMbERERURVzVtx0eth0WlfJQIKbo6tkrCu46RKl4mbNZsWNKJoxuBERERFVMYtjPJsdOtgdFbeAxrip0pOT6JOk4mbPY8WNKJoxuBERERFVMatZq7gFN8bNoAU3t66SxhQJbshnxY0omjG4EREREVUxrVukXaeDXR9EV0mlzSrpCm6xNaWrpK6QFTeiaMbgRkRERFTFbMWuiptdr3NsK7urpFZxi4lzncKZakrFzcDgRhTVGNyIiIiIqpjN7F5xc4xxKw6kq6RjVkm3yUnia0vFzWhmV0miaMbgRkRERFTFtJBm1+ugnBW3ALpKwlFxi3cFt8T6UnGLtbDiRhTNGNyIiIiIqphWcVM6HewGOR0LJLgZvAS3GvWk4mayseJGFM0Y3IiIiIiqmM1rxS2AMW4ovRxAcgOpuMUrVtyIohmDGxEREVEVc1bc9Hoog4QwexkVN5tVweC47l5xS24kW2OhUJDFqhtRtGJwIyIiIqpiWnXNrtdBGR1dJc3+g5u5wHV7bLzrFC6prsF5Pf9U2VU7IopMDG5EREREVcxu0SpuOkCruJn9hy6L2zpv7gtwx8TpoNXZChnciKIWgxsRERFRFXN1lXQLbpYyKm6FrlAWm+AKbnq9DmbHKV1RDoMbUbRicCMiIiKqYnYtuBl0gFHnsc0Xq1vFLcat4gYAFh2DG1G0Y3AjIiIiqmJ2qyOEGXSAY4ybKqPiZimU220ADEbP4GZlcCOKegxuRERERFXMe8WtjDFuzuCmK32bQSYoKc5lcCOKVgxuRERERFXMOZ5Nr4MuRoJYWRU3a7Hv4GbTyymdOY/BjShaMbgRERERVTHneLbydJXUlT59sxkcwS2X67gRRauQBrcJE4BzzgGSkoB69YCrrgK2b/fcp18/QKfzvIwY4bnP/v3AZZcBCQnyOE88AVitnvssXgycdRZgMgGtWgFffFF5r4uIiIjIH6WNcTPqnRU3WMtaDsCx9pu3ipsj/FnyWXEjilYhDW5LlgCjRgH//APMnw9YLMDAgUB+vud+99wDHDniurz+uus2m01Cm9kMLFsGTJ0qoWzsWNc+e/bIPhdeCKxfDzzyCHD33cAff1TBiyQiIiIqQVkcAcvg1lXSWkZXSbNWcSsd3OyO4GYtYHAjilbGUD757797fv/FF1IxW7MG6NvXtT0hAWjQwPtjzJsHbN0KLFgA1K8PdOsGvPgi8NRTwLhxQGwsMGkS0KIF8NZbcp/27YGlS4GJE4FBgyrhhRERERH54aq46aB3VtwCWw7AW3BTMay4EUW7sBrjlp0tX2vV8tw+fTpQpw7QqRMwZgxQUOC6bflyoHNnCW2aQYOAnBxgyxbXPv37ez7moEGy3Zvi4mLk5OR4XIiIiIgqinJbDkDnCF2w+A9dNsfkJHZvwS1WHsPGihtR1Appxc2d3S5dGM87TwKa5uabgWbNgEaNgI0bpZK2fTvw009ye0aGZ2gDXN9nZPjfJycHKCwE4uM9b5swYQLGjx9fYa+NiIiIyJ02EYkuRgd9bIAVNy246UsHNzjCn62QwY0oWoVNcBs1Cti8Wbowurv3Xtf1zp2Bhg2Biy8Gdu8GWrasnLaMGTMGo0ePdn6fk5ODtLS0ynkyIiIiqnaUzRHcDG7BzVZ2V0kDvFfcEMvgRhTtwiK4PfAAMHs28OefQJMm/vft2VO+7tolwa1BA2DlSs99MjPlqzYurkED1zb3fZKTS1fbAMBkMsFkMgX/QoiIiIgC4Flxc3SVLCO42cyO4KYvPdJFFyfb7EUMbkTRKqRj3JSS0Pbzz8CiRTKBSFnWr5evDRvK1169gE2bgKNHXfvMny+hrEMH1z4LF3o+zvz5sp2IiIioymkVN6MOBkfFTWcra4ybYzkAL10ldSbHWnDFDG5E0SqkwW3UKOCrr4AZM2Qtt4wMuRQWyu27d8sMkWvWAHv3Ar/8AgwbJjNOduki+wwcKAHtttuADRtkiv9nn5XH1opmI0YA//0HPPkksG0b8L//Ad99Bzz6aEheNhEREVV3Vldw05u04Oa/4mZ3VOmUl66S+niD3MaKG1HUCmlw++gjmUmyXz+poGmXb7+V22NjZZr/gQOBdu2Axx4Dhg4Ffv3V9RgGg3SzNBikgnbrrRLuXnjBtU+LFsCcOVJl69pVlgWYPJlLARAREVGIaBW3GL1bxS2wyUmUwUtwc3SVhJnBjShahXSMm/L/9wlpabJId1maNQN++83/Pv36AevWBdw0IiIiospjlYClM+pgcHRz1JfRVdKuBTcvXSUN8QxuRNEurNZxIyIiIqoWHNU1fawOhjhHxc0eYFdJL8HNmCCndDqzrSJbSURhhMGNiIiIqKo5Qpo+RgejY4ybvozgZjNrXSVLn745g1sZi3gTUeRicCMiIiKqYjr3ipvWVTLQipuXMW7GRMdjWBnciKIVgxsRERFRVdOCm1EHo6OrpF6VMcZNG7/mJbjF1GBwI4p2DG5EREREVUyruBlMgXeV1Cpu3oJbrKPiZihjghMiilwMbkRERERVTOc+xs1RcTOUMd228hfcajC4EUU7BjciIiKiKqYFN4NJjxjHxCKGsrpKasHNWDq4mZLlMWLsDG5E0YrBjYiIiKiK6d0qbjGBVtysvitupiQ5pTMyuBFFraAX4L76akBX+u8FdDogLg5o1Qq4+WagbduKaB4RERFR9NE5ApYhVoeY+CC7ShpLf+4ep1XcyqjaEVHkCrrilpICLFoErF0rYU2nA9atk21WK/Dtt0DXrsDff1dGc4mIiIgin97uNjlJnKOrJMoKbhLKdDGlP0GPTzEAAGLB4EYUrYKuuDVoIBW1Dz4A9I7YZ7cDDz8MJCUB33wDjBgBPPUUsHRpRTeXiIiIKPLpHNU1Q6wOsY6Km7GM0OWvq2RciqOrJBQsRXbExHE0DFG0Cfpd/dlnwCOPuEIbINcffBD45BOpwD3wALB5cwW2koiIiCiKaBU3o8mtqyQAm9V31U0Lbt4rbq4Ts4IsVt2IolHQwc1qBbZtK7192zbAZpPrcXHex8EREREREaB3q7jFJLhOmswFfrpLOtZ+03mZVTIh1XVKV5jN4EYUjYLuKnnbbcBddwHPPAOcc45sW7UKeOUVYNgw+X7JEqBjx4psJhEREVH00CYiMcbpYEp0hS5zgR3xyT4+V7e6ZqIsyRirgwU6xEChKIfBjSgaBR3cJk4E6tcHXn8dyMyUbfXrA48+KuPaAGDgQGDw4IpsJhEREVH0cFbcTDrEulfcCv1U3JxdJb0HOwv0iIENhewqSRSVgg5uBgPwf/8nl5wc2Zac7LlP06YV0TQiIiKi6KRV3GLi9M513ADAUs6ukgBg1umRoGysuBFFqaCDm7uSgY2IiIiIyqZ3TP1vjNVBr9fBCp3MCOm34iaBTB/rPbhZ9XrABhTnMbgRRaOgg9uZZ5a9APfttwMXXlgBrSMiIiKKQgbHQtlGk5xUacHNXOQndNl8j3ED3IIbK25EUSnoWSUHDwb++w9ITJRwduGFQI0awO7dMlnJkSNA//7ArFmV0VwiIiKiyKcttq0tBWCDfLXk+6646RxLCPiquNkMclpnZsWNKCoFXXE7fhx47DHguec8t7/0ErBvHzBvHvD888CLLwJDhlRUM4mIiIiihxbctIqbTacHlA2WorLHuBnKCG4WBjeiqBR0xe2774Cbbiq9/cYb5TZAbt++/XSbRkRERBR97HYFg+O6s+LmGIdi8dNVUldGV0m7kRU3omgWdHCLiwOWLSu9fdkyuQ0A7HbXdSIiIiJyca+qGeM8g5vVT8XN1VXS++mbPUbioLWAwY0oGgXdVfLBB4ERI4A1azwX4J48WRblBoA//gC6davAVhIRERFFCbPblP+xjoqbPYDgprdJIPPVVdLuWN+NwY0oOgUd3J59FmjRAvjgA2DaNNnWti3w6afAzTfL9yNGACNHVmQziYiIiKKDezjT1nCz6fWlbitJq7j5Cm5wBDdbga0imklEYaZc67jdcotcfImPL29ziIiIiKKb2W2tttgECVt2vSPAFfsZ46YFN5OP4GZyBLdCVtyIolHQY9yIiIiIqPysxa7gpnd8hK51lfS3ALdelVFxc4x9s/tbC46IIlbQwc1mA958E+jRA2jQAKhVy/MSjAkTZJxcUhJQrx5w1VWlZ6MsKgJGjQJq15b14oYOBTIzPffZvx+47DIgIUEe54knAKvVc5/Fi4GzzgJMJlkk/IsvgnzhRERERBXA4qiIWaCD3lFpU46p/G3FfoKbo+KmTWhSks5RcbOz4kYUlYIObuPHA2+/DdxwA5CdDYweDVxzDaDXA+PGBfdYS5ZIKPvnH2D+fMBiAQYOBPLzXfs8+ijw66/A99/L/ocPy/NpbDYJbWazzGw5daqEsrFjXfvs2SP7XHghsH498MgjwN13yyQqRERERFXJ4ghn2qLbQGBdJcuquOniHMHNz2MQUeQKeozb9OkyEclll0lQu+kmoGVLoEsXCWAPPRT4Y/3+u+f3X3whFbM1a4C+fSUYfvYZMGMGcNFFss+UKUD79vJc554rC35v3QosWADUry+zWb74IvDUU9K+2Fhg0iSZUOWtt+Qx2rcHli4FJk4EBg0K9ggQERERlZ9WVbO7BTdlcAQ3c9ldJY0m75+76x3BDQxuRFEp6IpbRgbQubNcr1FDwhUAXH45MGfO6TVGeyyty+WaNVKF69/ftU+7dkDTpsDy5fL98uXSnvr1XfsMGgTk5ABbtrj2cX8MbR/tMYiIiIiqijaOTVu7DQCUo+Jm99NV0mCXQOarq6RBC25mBjeiaBR0cGvSBDhyRK63bCkVL0DWcjOZyt8Qu126MJ53HtCpk2zLyJCKWWqq577168tt2j7uoU27XbvN3z45OUBhYem2FBcXIycnx+NCREREVBGsXrpKOse4+au4wf+skoYEBjeiaBZ0cLv6amDhQrn+4IPAc88BrVsDw4YBd95Z/oaMGgVs3gx88035H6OiTJgwASkpKc5LWlpaqJtEREREUUJbq82u89JV0k83R4Ojq2SMj4qb0RHcdBYGN6JoFPQYt1dfdV2/4QagWTOZFKR1a+CKK8rXiAceAGbPBv78Uyp6mgYNZNKRrCzPqltmptym7bNypefjabNOuu9TcibKzEwgOdn7mnNjxozB6NGjnd/n5OQwvBEREVGFsJpLBzcYHV0l/VTcnMEtnsGNqDoq1wLc7s49Vy7loZRU7X7+Wabrb9HC8/bu3YGYGKnwDR0q27Zvl+n/e/WS73v1Al5+GTh6VCY2AWSGyuRkoEMH1z6//eb52PPnux6jJJPJBNPp9PskIiIi8kGruNn0bh2fjI4ZIU+jq2RMDYPsZ2VwI4pGpx3cTseoUTJj5KxZspabNiYtJUUqYSkpwF13yZIDtWpJGHvwQQlcWlgcOFAC2m23Aa+/Lo/x7LPy2Fr2GjEC+OAD4MknpTvnokXAd9+d/mQqRERERMGy+am4KT/VMqMjuMXGeR/pEpMo2w0MbkRRKegxbhXpo49kJsl+/YCGDV2Xb7917TNxosxYOXSoLBHQoAHw00+u2w0G6WZpMEigu/VWGW/3wguufVq0kJA2fz7QtassCzB5MpcCICIioqqnjWPT1m4DADjGuNkt3itudrtyBjdfs0o6g5vNVlFNJaIwEtKKm/LdG8ApLg748EO5+NKsWemukCX16wesWxdU84iIiIgqnM2sYACg3CtuMVrFzfvJkftsk77GuJmS9bACMNpYcSOKRiGtuBERERFVN86ukm4VN51jjJuv4GYucG2P9RXcashjGO0MbkTRqFzBLStLuhqOGQOcPCnb1q4FDh2qwJYRERERRSGbYx035R7cYvyPcbMUuVXcfHSVNCXJaV2MYnAjikZBd5XcuBHo318mDtm7F7jnHpk45KefZLbHL7+shFYSERERRQmbxV9w81FxK3SruCV4D25xqQxuRNEs6Irb6NHA7bcDO3fK+DPNpZfKOmxERERE5Jvda8XNcUpm9R7crG4VN0Os9+AWnyyPEQs77PYAJhIgoogSdHBbtQq4777S2xs3dk3nT0RERETe2f1U3OBjKn+Lo+JmhQ56vY+KW4qc1unhOSaOiKJD0MHNZAJyckpv37EDqFu3IppEREREFL20yUmUwXUapncGNx9dJYsk0FnhPbQBQGJN1+MVZrG7JFG0CTq4XXmlrJFmscj3Op2MbXvqKVlrjYiIiIh8c1bcDK4Qpte6P9p8LAfg6F5p9xPc4pJdp3UFDG5EUSfo4PbWW0BeHlCvHlBYCFxwAdCqFZCUBLz8cmU0kYiIiCh62M2OUOUe3Jxj3Px3lbTpfAc3vV6HYsepXWE2gxtRtAl6VsmUFGD+fODvv4ENGyTEnXWWzDRJRERERP4prTuke3BzTPGv81Fx05YDsPmpuAGARaeHSdlRnMPgRhRtggpuFgsQHw+sXw+cd55ciIiIiChwWldJ9+BmCLSrpJ+KGyDBDQoozLGdfkOJKKwE1VUyJgZo2hSw8W8BERERUbl4C276WDkl81Vx05YDsOn9n7pZHbebc1lxI4o2QY9x+7//A555Bjh5sjKaQ0RERBTdvHWVNJjkut7mPXBZzYFV3JzBLY/BjSjaBD3G7YMPgF27gEaNgGbNgMREz9vXrq2ophERERFFH6VV3GJKd5XU+Vg421YsQays4GZzLDHAMW5E0Sfo4HbVVZXQCiIiIqJqQqu46bxU3HwFN2uRghGA3cfi2xqbUYKbJZ/BjSjaBB3cnn++MppBREREVD04K25G14gVY7xcN/joKmkzO4JbGRU3uxbcChjciKJN0MFNs2YN8O+/cr1jR+DMMyuqSURERERRzDEBic7oCmFGbYyb8j+rpCqj4qYFNyvHuBFFnaCD29GjwI03AosXA6mpsi0rC7jwQuCbb4C6dSu0fURERERRRTkW2da5jXEzxulgA6D3NcbNElhwU47ZKa2FDG5E0SboWSUffBDIzQW2bJGZJU+eBDZvBnJygIceqowmEhEREUURq7eKm5ySlVVxsxvKOHWLkdttDG5EUSfoitvvvwMLFgDt27u2degAfPghMHBgRTaNiIiIKAppXSVLVNyKARjs3gOX3Szby6q4wVFxs3GMG1HUCbriZrfLQtwlxcTIbURERETkh6Pipjd6BjfAd8VNW7RbGcoIbiaD7F/EkzKiaBN0cLvoIuDhh4HDh13bDh0CHn0UuPjiimwaERERURTyUnGLiZfrRvjoKmkuvWi3NzpHl0sGN6LoE3Rw++ADGc/WvDnQsqVcWrSQbe+/XwktJCIiIoomjuCmdwtusQmO5QCUj66SAU5Ooo+Tx1HFttNuJhGFl6DHuKWlAWvXyji3bdtkW/v2QP/+Fd00IiIioijkJbhpFTeDj4qbPcCKm96xHhyKWXEjijblWsdNpwMGDJALEREREQVOpwW3WFfHp7K6SjqDm9F/ZymDFtzMDG5E0SborpIPPQS8917p7R98ADzySAW0iIiIiCiK6ezeKm6O5QAAWM2lw5uyBlZxY3Ajil5BB7cffwTOO6/09t69gR9+qIgmEREREUUvnU1ClSHWbYxbvOu62ctU/sri2Gb0H9yMjrFyOguDG1G0CTq4nTgBpKSU3p6cDBw/XhFNIiIiIopezoqbe3BLcA9u5a+4GRMdlTsrgxtRtAk6uLVqJYtwlzR3LnDGGcE91p9/AldcATRqJOPmZs70vP3222W7+2XwYM99Tp4EbrlFgmNqKnDXXUBenuc+GzcC558PxMXJ5Cqvvx5cO4mIiIgqihbc3CtuphquU7Li/NLBTZtVsqyKW4yj4qZnxY0o6gQ9Ocno0cADDwDHjsmabgCwcCHw1lvAO+8E91j5+UDXrsCddwLXXON9n8GDgSlTXN+bTJ6333ILcOQIMH8+YLEAd9wB3HsvMGOG3J6TAwwcKLNeTpoEbNokz5eaKvsRERERVSVvFTdjrA52yCfqlkJvXSUda7+VEdxikxzLCtgY3IiiTdDB7c47geJi4OWXgRdflG3NmwMffQQMGxbcY11yiVz8MZmABg283/bvv1L9W7UKOPts2fb++8CllwJvvimVvOnTAbMZ+PxzIDYW6NgRWL8eePttBjciIiKqes6KW4xnCLNCh1goWAq9zCxpCzC41WBwI4pWQXeVBICRI4GDB4HMTKlo/fdf8KEtUIsXA/XqAW3byvOeOOG6bflyqZxpoQ2QyppeD6xY4dqnb18JbZpBg4Dt24FTp7w/Z3FxMXJycjwuRERERBVBrySEGeM8Q5gN8r234OasuMX4P3XTgpvRzuBGFG2CDm6FhUBBgVyvW1eC1DvvAPPmVXDLIN0kv/xSumK+9hqwZIlU6Gw2uT0jQ0KdO6MRqFVLbtP2qV/fcx/te22fkiZMmICUlBTnJS0treJeFBEREVVrei9j3ADAppPTstOpuJmSDQAY3IiiUdDBbcgQCVMAkJUF9Ogh49uGDJHukhXpxhuBK68EOncGrroKmD1bukUuXlyxz1PSmDFjkJ2d7bwcOHCgcp+QiIiIqg2t4mYweZ6GOStuRV5Cl2OWSF2M/+AW5xjjFqMY3IiiTdDBbe1amaERkHXbGjQA9u2TMOdtYe6KdMYZQJ06wK5d8n2DBsDRo577WK0y06Q2Lq5BA+nS6U773tfYOZPJhOTkZI8LERERUUVwdpUsVXGT761FXipu1tKLdntjSpZTu1hlO91mElGYCTq4FRQASUlyfd48mQ1SrwfOPVcCXGU6eFC6ZjZsKN/36iVVvzVrXPssWgTY7UDPnq59/vxTZpzUzJ8vY+Zq1qzc9hIRERGV5HOMm97RVdLLOm7OrpJlBLeEVEdwg4Ld7uVxiChilWsdt5kzgQMHgD/+kKn2Aal8BVuYysuTGR7Xr5fv9+yR6/v3y21PPAH88w+wd6+McxsyRJ5/0CDZv317GQd3zz3AypXA33/LUgU33igzSgLAzTfLxCR33QVs2QJ8+y3w7ruyrAERERFRVTM4ujEaTJ4hzK5V3Iq9dHO0BVZxi09xndoVZrG7JFE0CTq4jR0LPP64LAHQs6dUtACpvp15ZnCPtXq13Ee73+jRcn3sWMBgkIWzr7wSaNNGglf37sBff3mu5TZ9OtCuHXDxxbIMQJ8+wCefuG5PSZG27dkj93/sMXl8LgVAREREoaBV3GJKVNzsej9dJQMNbqmuU7uCbAY3omgS9Dpu114r4ejIEVk8W3PxxcDVVwf3WP36AcpPFf+PP8p+jFq1XItt+9KliwQ+IiIiolAzaF0lfVbcSp8c6bTgFlvGcgAJOtgAGAAUMbgRRZWggxsgk3qUnNijR4+KaA4RERFRdDNAm1WyZMVNQpnNa3BzdK+M9V9x0+t1sEAPA+zsKkkUZcq1ADcRERERlY8W3GLjvXeVtHkZ46azB9ZVEgAsjvXginIY3IiiCYMbERERURXSglvJrpLK4HuMmzO4mQIPbsW5DG5E0YTBjYiIiKiKWM3KefIVE+95GmY3+Osq6eheGUDFzapnxY0oGjG4EREREVURi1s1LcZHxc1u9tJVUnkfF+eN1REAzXkMbkTRhMGNiIiIqIp4BLeEEiHMEdxs5tIVN71dC25ln7rZDAZ5rnwGN6JowuBGREREVEXMha4wVXIdN1fFzU9wK2NWSQCwGR0Vt1xbudtJROGHwY2IiIioirhPPFIyuMHRxdFrcFMS+Iwl7+OF3RHcrAWsuBFFEwY3IiIioipiKZRQZgNgMJYIYUbfY9yCqbipGAY3omjE4BZh/vgDuOUW4J13Qt0SIiIiCpa1WAtuXgKY0U9XScfkJIFU3JzBjWPciKIKg1uE+e8/YMYMYMmSULeEiIiIguWquHkLbnJapiylg5shiOCGWMeyAoUMbkTRhMEtwtSuLV9PngxtO4iIiCh4WsXN7qfipixeukpqwS2ArpLO4FbE4EYUTRjcIkytWvL1xInQtoOIiIiCpy0HYNOVPgXTxWjBzUvFDbItJi6AUzfHkgF2VtyIogqDW4RhxY2IiChyOce46UpXznRaxc3qraukhLCY+LIrbnpHcFPFDG5E0YTBLcK4V9xU6b/rREREFMa0xbXt3oJbjJ8xbo6Km9EUQHCLZ3AjikYMbhFGq7iZzUBBQWjbQkRERMGxOsadeQ1u2vg1L2PcnF0lA6m4ad0pGdyIogqDW4RJTARiY+U6x7kRERFFFq3i5q2rpN4xxg02z4qbzapgcFwPpOJmSHDs7WU9OCKKXAxuEUan4wQlREREkcrqmJxE6X13lUSJMW7ahCYAEJMQQHBzdJX0VrkjosjF4BaBOEEJERFRZPI3xk2vdZW0egYuc4EruMXGl33qZkyQffRmW3mbSURhiMEtArHiRkREFJmc67h5qbg5g1uJrpJW94pbAAtwx9RwBDcrK25E0YTBLQKx4kZERBSZ7I6Km/JScTM4gpuuRHAzu63HFhtAV8mYBAY3omjE4BaBtODGihsREVFkcXaVNJQ+BTM41l8rGdycVToABmPgFTeDjcGNKJowuEUgraskK25ERESRRQtu3iYn0Zu0iptn4LIUyn2sKDu0AYApicGNKBoxuEUgVtyIiIgik93iO7j56iqpBTdbgMEt1hHcjHYGN6JowuAWgTg5CRERUWSyO9ZWUwYvwc3RVVJv99VVMsCKm6OrZAyDG1FUYXCLQJychIiIKDJpFTd4DW6OiluJwKWt42bTBXbaFp/qCG6KwY0omoQ0uP35J3DFFUCjRrKw9MyZnrcrBYwdCzRsCMTHA/37Azt3eu5z8iRwyy1AcjKQmgrcdReQl+e5z8aNwPnnA3FxQFoa8PrrlfmqKh8rbkRERJHJ3xg3oyO4lay4WYokgNm8zETpTVyynN7FgsGNKJqENLjl5wNduwIffuj99tdfB957D5g0CVixAkhMBAYNAoqKXPvccguwZQswfz4we7aEwXvvdd2ekwMMHAg0awasWQO88QYwbhzwySeV+tIqFStuREREkUn5qbgZ4713lbQV+1602xstuBmhnKGPiCKfMZRPfsklcvFGKeCdd4BnnwWGDJFtX34J1K8vlbkbbwT+/Rf4/Xdg1Srg7LNln/ffBy69FHjzTankTZ8OmM3A558DsbFAx47A+vXA2297BrxI4h7clJJqJREREYU/f10ljXE6WAEYSnSVtDq7Sgb2Dz+hpsF5vTBHISaufG0lovAStmPc9uwBMjKke6QmJQXo2RNYvly+X75cukdqoQ2Q/fV6qdBp+/TtK6FNM2gQsH07cOpUpb+MSqF1lbTZpKJIREREkaGs4AYAelWi4mYOruIWn+zar+CUrTzNJKIwFLbBLSNDvtav77m9fn3XbRkZQL16nrcbjRJs3Pfx9hjuz1FScXExcnJyPC7hJC4OSEiQ6xznRkREFDmcXSWNpU/BtOBmUD5mlfQyLs6bmDi9c823ohx2lSSKFmEb3EJpwoQJSElJcV7S0tJC3aRSOEEJERFR5FFW3xW3mDjHwtklg5ujq6QKYmyE2XGKx+BGFD3CNrg1aCBfMzM9t2dmum5r0AA4etTzdqtVxn657+PtMdyfo6QxY8YgOzvbeTlw4ED5X0gl4QQlREREkccZ3IzegptWcfMMW1r3Srs+8NM2i2PpgMIsBjeiaBG2wa1FCwlWCxe6tuXkyNi1Xr3k+169gKwsmS1Ss2gRYLfLWDhtnz//BCwW1z7z5wNt2wI1a3p/bpPJhOTkZI9LuNGCGytuREREkUNZJEjpvAQ3Y7wjuKHkrJJyn0C7SgKA1RHyivMY3IiiRUiDW16ezPC4fr18v2ePXN+/X2ZKfOQR4KWXgF9+ATZtAoYNk5kir7pK9m/fHhg8GLjnHmDlSuDvv4EHHpAZJxs1kn1uvlkmJrnrLlk24NtvgXffBUaPrvKXW6G0rpKsuBEREUUOreLmLbiZEhxdJUsGNz9rv/licQQ3cy6DG1G0COlyAKtXAxde6PpeC1PDhwNffAE8+aSs9XbvvVJZ69NHpv+Pc5vWdvp0CWsXXyyzSQ4dKmu/aVJSgHnzgFGjgO7dgTp1ZFHvSF0KQMOKGxERUeTx21XSUXGLgYLdrqB3BDVncPMyLs4Xm0EPWIBiBjeiqBHS4Navn6xD5otOB7zwglx8qVULmDHD//N06QL89Ve5mhi2ODkJERFRBLLJiY8+xktwS3BtsxQpmBzf24uDr7jZDFJxs7CrJFHUCNsxbuQfJychIiKKQH66SsbGu7aZC1yfbGuTkwRTcbM7lhuwFDC4EUULBrcIxYobERFRBHJU3HQxpU/BYhNc2yxuwc3VVTLw0zZncGPFjShqMLhFKFbciIiIIpC/iptbV0lzoStwORftDqKrpHIEQysrbkRRg8EtQnFyEiIiogjkZ4ybwaiDzXHdUujeVdIRvryEPV9UrEGerpDBjShaMLhFKC4HQEREFIG04BbrPYRZHadmxfluwc3RVRJBjHFDrDyOrcBWxo5EFCkY3CKUVnHLygJs/JtMREQUEXQ2qYDpfVTPbJDt1iJXpcxuLX9wsxex4kYULRjcIlTNmvJVKeDUqdC2hYiIiAJk919xs+lku3tXSecYtyC6SuriGNyIog2DW4SKiQGSk+U6u0sSERFFBl0ZXSWdwa3ILbj5WbTbF32ijHGzZ1vL00wiCkMMbhGME5QQERFFFp2j4mbwGdwcs0EWla646YyBn7bVOjsRAGDal1uudhJR+GFwi2CcoISIiCiy6PzMKgkAdl3pMW6qHLNKdhwq3XIaFuYj/yQHwxNFAwa3CMaKGxERUWRxVtxM3k/BbHotuCm3jb7XfvPljF4mZOljYITCuu/zytlaIgonDG4RjBU3IiKiyKIvo6uk3dFV0mNyEm3Rbh9VOq/Po9fhRF2puu37PadcbSWi8MLgFsFYcSMiIoosOlVGV0lHxc1W7DYbpDX4ihsAGDtLcCtcx3FuRNGAwS2CaRU3BjciIqLIoFXcjHH+g5u1uHRXSV9hz5fGFycBAJIPseJGFA0Y3CKYVnFjV0kiIqLIoFdSSfPVVVI5K25uwc3ZVTK407ZuN0lwq2ctQsZ2c7BNJaIww+AWwdhVkoiIKLI4K24mHxU3g5ya2Sqg4lanWQwyYuMBAOu/YXdJokjH4BbBODkJERFRZNErbVZJHxU3g2y3m93GuNnkuq9Fu/3JayLj3DLS2V2SKNIxuEUwVtyIiIgiixbcfI1x04KbzW05AOfab0FOTgIAid0luNm2suJGFOkY3CIYK25ERESRxaAFN19j3LSukmYvXSXLUXFrcZmMc6t7PAd2uypjbyIKZwxuEUyruOXmAmaOOSYiIgp7ekh4ionzcQrmpaukVnHz1b3Sn25Da8AMHZKVFTsXFwV9fyIKHwxuESw1FdA5/oaz6uaSnw8UF4e6FURERKU5K26+QpgW3CxuXSXt5ZucBADiauiRkVgDALD1R45zI4pkDG4RTK8HataU6wxu4uRJoGVLYODAULeEiIioNINWcYv3EcKMWsWtdHAzmMp32mZuKePcTi5jcCOKZAxuEY4TlHj6+28gMxP4808gOzvUrSEiIvKkBTefFTfHWm3KreKm12aVLEfFDQBqnSfBzbiLE5QQRTIGtwjHCUo8rV7tur55c+jaQUREVJLdrmAsI7jpEg0AAMthV59/XRlLCJSl/dUyQUnDvFwU5dnL2JuIwhWDW4Rjxc0TgxsREYUr95kiYxK8h7BGl6YCAFK3n3DOAlnWot1laXtxPHJ1RsRCYePM/HI9BhGFHoNbhGNwc1EKWLPG9T2DGxERhRNzgVtw8xHC+jxUE8XQo46tGBtnSchyLtpdjuUAAECv1+FYLam67Z7DcW5EkYrBLcKxq6TLoUMyvk3D4EZEROHE4raodqyPiltSHQMO1k0FAGz8RP65OytuPhbtDoSug4xzy1/F4EYUqcI6uI0bJ9Pdu1/atXPdXlQEjBolVacaNYChQz1P3AFg/37gssuAhASgXj3giScAq7VKX0alYsXNResmGR8vXzdtkiocERFRODAXulXc/ISwhIvkn7t9mfxz1zuXECj/aVvDiyS4JR7gBCVEkSqsgxsAdOwIHDniuixd6rrt0UeBX38Fvv8eWLIEOHwYuOYa1+02m4Q2sxlYtgyYOhX44gtg7NgqfxmVhhU3F62b5FVXyVIJJ04AR4+GtElEREROVreKm79uj+c8KMEtLScbR3dbXF0lyznGDQC63uCYoMRcgOP7LOV+HCIKnbAPbkYj0KCB61KnjmzPzgY++wx4+23goouA7t2BKVMkoP3zj+wzbx6wdSvw1VdAt27AJZcAL74IfPihhLlowIqbi1Zx69MHaNVKrm/aFLr2EBERubM4Km5W6KDX+w5hrc6Lw2FTIgwA/n77JAxKZoI8na6SDdvHIjNGuqSs/oLdJYkiUdgHt507gUaNgDPOAG65Rbo+AlJdsViA/v1d+7ZrBzRtCixfLt8vXw507gzUr+/aZ9AgICcH2LLF93MWFxcjJyfH4xKuWHETSrmC29lnA506yXWOcyMionBhKXYFt7IUnSmfzJ6Yc8LVVbKck5NocpunAAAOz806rcchotAI6+DWs6d0bfz9d+Cjj4A9e4Dzzwdyc4GMDCA2FkhN9bxP/fpyGyBf3UObdrt2my8TJkxASkqK85KWllZRL6nCseIm9u8Hjh+XCm2XLgxuREQUfmyO4GYPILi1vk0+ma2//yRiHBW3mPjTC26pF6QCAAxbs0/rcYgoNMI6uF1yCXDddXIiPmgQ8NtvQFYW8N13lfu8Y8aMQXZ2tvNy4MCByn3C08DgJrTxbZ07A3FxDG5ERBR+LEUSwGy6sgPYuXcmI1dnRJKyIh6OrpKnGdy63CIVt0a5ucg/aTutxyKiqhfWwa2k1FSgTRtg1y4Z72Y2S5Bzl5kptwHyteQsk9r32j7emEwmJCcne1zCldZVsqgIKCwMbVtCSesm2b27fNWC25YtgN0emjYRERG5szoqbrYAKm4xcXpkNq3lue00JicBgFZ943BSH4sYKKz6MnyHgRCRdxEV3PLygN27gYYN5QQ9JgZYuNB1+/bt0mWuVy/5vlcvmZzCfWbB+fOB5GSgQ4eqbXtlSUqS7oFA9a66uY9vA2RykthY+Z3RxkUSERGFkjarpD2AihsA1L6stsf3MfGnd9qm1+twskkqAGDvL+wuSRRpwjq4Pf64TPO/d6/MFnn11YDBANx0E5CSAtx1FzB6NJCeLl3l7rhDwtq558r9Bw6UgHbbbcCGDcAffwDPPitrv5lMIX1pFUanc3WXPHIktG0JFaVcXSW14BYT41rzL5CZJQsK5PfDxp4jRERUSazm4ILbeaNrwf3fkvE0K24AkNhbukuq9Vmn/VhEVLXCOrgdPCghrW1b4PrrJaD88w9Qt67cPnEicPnlsvB2377S/fGnn1z3NxiA2bPla69ewK23AsOGAS+8EJrXU1m6dpWvK1aEth2hsnevzKoZG+vqIgnIeDcgsHFuDz8MDB4MPPhgpTSRiIgo6IpbvZYxOJCc4vw+9jTHuAFA++vl8RqeykFxAccSEEWSsA5u33wji2oXF0uI++YboGVL1+1xcbIm28mTQH6+hLaSY9eaNZNJTQoKgGPHgDffdHUtjBbnny9f3Rcnr060bpKdO3tWUgOdoCQzE/jyS7n+0UfA3LkV30YiIiKbo+Jm0wd++qXv7eouGZtw+sGt8xWJyNUZEQc71n6dV+b+G2blY2ryWvzxQjUej0EUJsI6uFFg+vSRr3/9Jd0Gq5uS49s0gQa3jz+WiW60QH/nndV7vCAREVUOLbipACtuANDlXpmgxIbTW4BbYzDqcLS+VN12/ZxV5v6rRu1Bs9wcHJ/w32k/NxGdHga3KNCjh4zpOnxYug1WpV27ZCKQp56q2ud1V3J8m0YLbtu2yWLt3pjNUmUDgEmTZFxcRgYwYkT1DMFERFR5nF0l9UEEtyGJ2HtBMxwc2AKmhIo5bYs9OxUAYF7tf4KSA+uL0eLQcQBA46J8bJiVXyHPT0Tlw+AWBRISXNPgV3V3ySeflJk+3347NJOjKOW74ta0KVCjhoSzXbu83/+77ySoNWwok9h89ZVU3n74AZg+vXLbTkRE1YvdEnxw0+t1uH1xCwz/o1mFtaP1tVJxq3c0Gzar708pFz2RAYPb92tey/S5LxFVPga3KOHeXbKqLF0K/PyzXLdagcmTq+65Nbt3A9nZMratY0fP2/R61zZv3SWVAt59V66PGiWTm3TvDjz/vGsblxIgIqKKYiuWyUCC6SpZGbrfUAOF0CNJWbHRRxXNalZITD8MANjdQMbZJa/M9Bv0iKhyMbhFiaqeoEQp4Ikn5HqrVvL1448lwFUlrdrWtat0Fy1Jm1nS25IAy5bJ/U0m4N57XdufflqWlMjJkSUniIiIKoJzjJshtMEtJk6PI7Wk6rbtW+/dJRe8chJ1bMXI1Rlx5bJ2KIABdWzF+HsS138jChUGtyjRu7d8/fdf4Pjxyn++H3+UpRkSEoAFC2SJhkOHgF9/rfznduerm6TG3wQlWrXtlltcS0wA0lVy2jSpwC1YENg6cERWq++xlEREgFtwC6KrZGXRd5Pglv9PltfbD02SatvRMxugbosYHG4t/yh3fMjukkShwuAWJerUAdq3l+t//10xj7l3rwTCBx4A8txmDDabgTFj5Prjj8uSC1pl6n//8/5YBw7IkgwV6cQJYMoUua51FS3JV3A7cMC15t/DD5e+X6tWwKWXynWOdaOyFBXJxDY9enARdyLyTRvjFg7BrfmVqQCA2oeyYbd7dn/8b3kRmmfK9MrnvtQIANDinvoAgPrbj6Ewh+u/EYUCg1sUqejuko88AixfLmvldesm1wHpErlrF1C/vqu75H33ATqdVKh27PB8nO++A5o3B9q0ke6JFeXZZ2UNv86dgeuu876PFtx27QIKC13bP/xQTrAvvBDo0sX7fW+9Vb7OmAHY+T+K/Fi9WsZbrl8PrFwZ6tYQUbiyF4dHV0kAOOe2JJihQ027GTsXF3nctuSpIzAA2JOaio6XJAAALng4FSf1sUhSViyawDVziEKBwS2KVOQEJQsWALNmAQYD0KSJnJT26SPjv154QfYZP15mbQQkmF12mVyfNMn1OH/9JbM12u3SlfKCC2QGytOdan/tWgmQAPD++74XVa9XT6qRSgFTpwITJgBXXQW8957c7q3aprnsMiAlRapzVTnpC0WeFStc12fPDl07iCi8OStuhtCffiXWMuBIUhIAYOXE486qW3GBHal/yzTRNYc1cu5vjNXh1Jn1AABHph2t4tZ62re6OKyrfqum5+Kzs3bh2B72n6eKFfq/HFRhtOC2Zs3pdUu0WqXaBsjMips3u8LXa6/JGLp27UpP3HH//fJ1yhR5/u3bgSFDpGvlkCHAjTfKYz/2GDB0KJCVVb72KQU8+KB8vekmCYO+6HSuqtvIkcAzz0ggLSyUStvll/u+b1wccO21cp3dJcmff/5xXWdwIyJf7NqMjGHQVRIAbJ1TAQBps3fjm4TVmD70IH4ZlYmadjOydTEY/HIdj/27PiHdJZseOo6T+6t4NjKHZZOzseuc5Zje6d+QPH8gtt27HS3XHcQv1+wNdVMoyjC4RZHmzYHGjSUceeuuFWh3v08+AbZsAWrVkqnxU1KAL7+ULo+1akkYeuON0lWuQYOAFi0kkL37LnDJJcCpU0DPntLdcMYM6aIYGyvLCJx9tlThgvXVV9LlMjFR2lGWoUOlzU2bSpfKt96S7qQrV0pF0Z9bbpGv338PFBcH31aqHtwrbhs3SpWWiKgku2NyEhjDI7hd+nkT7GrVAEXQo1FxPhr/tAt1v9gOADjRsyHianieJp51XQ0cMSUgFgoLnz8WiiZj68RMGACcceAYdiwuLHP/qrZhVj7SCmRigIbrM3DqYGgCLkUnBrcootN57y5ZUABccYWEqq1b/T/GqVPA2LFy/YUXJKhprrsO2LlTZln0VqnS64ERI+T6M88Ae/YAZ5wB/PKLzD6p00lV7u+/ZUKT3btdzxWonBzXuLrnnpOgWpYHHpDJI/btk/A5ejRw3nmyDEBZLrhAniMrC/jtt+DaStXD4cMS1PR6WZYCAObMCW2biCg8aV0lEQZj3ACgQdtY3L2zHc7e2wtHrmuNQ3GJAAAzdOgzoWGp/fV6HcwXSNUt/+cMWIqqtruizapQe5tMna0HsPSpw1X6/IFY/XKG83oCbJjzwJGA75v54xrkfLPA5+05e3KQaTwTxw0dsXrI57CZo3s2rBMbj8FuC98usaHA4BZltOCmTVBiNkvFafZsWUx66FAgN9f3/cePl9kaO3aUCUdKqlWr9ELX7u680xWIatUC5s6VcWbuzj4b+OYbuT51aunJTPx54QUgMxNo3drVnTMQsbGB7+tOrwduvlmuf/VV+R6DoptWbevUCbjhBrnO7pJE5I2yOE5CwyS4aeo0i8FN3zXGTflnI/Gr7qj7Q3e06Rfvdd/zxkpwa56djYXxSzE1aS0md9mJH+/NqPRxZ/98noOadrOr3auOoCArfMKL1axQa40sl7C7rnzybZpzCFZz2QP7M96bjbrX9kTMTTfh5JaTXvf574JnUd+2HnXsW3H2L3chN64tVl0+OSoD3Kqer6Nm1wY4EdsVu77YGOrmhA0GtyijzSy5bJl07bv1VuD334H4eKBBA2DbNhmb5m1ykH//BT74QK6/847vCT/8qVNHxrA1biyVtjZtvO937rlStbPZJCwGYssW19pr770XWMWsImjdJWfPLv+4PIpeWnDTfqcBYOHCil/+oipYLMBHH8mHPERU8ZQ1vCpuJen1OpxzSxLOHFrD5z6tzovD7u5NUAAD4mBHs7wctNp0CLU/3YYZrTeXWlqgIm2bLNW2XWl1cdxgQrKyYu7Toemy6c2iN06hpt2MXJ0RQ1a1R47OiLrWIsx9tuwFdudOboYiNEA8jiOj74Olbt/743Z0PiCzsu1OuAQWJCJV7cY5c+5Bblxb/PfVlgp/PaGy6orPcPbKp6GHHXXtm9H8jp5Y1eM1Vt/A4BZ1OnUCkpNl3bXBg2VsVmwsMHOmrFtmNMo2LQBp1q+XaoHNBlx5JdC/f/nb8PLLwMGD0h3RH212yq+/9r5Atju7XbphWq0y0cngweVvX7C6dJHjajYDP/xQdc9LkUGbmKRnT/k9adpUuuamp4e2XeXx7rvSnXngwMgMnkThZu0PefhvuWuqfWdXyTAZ41Zed61uhUGWPqgz5xycHNkeu89qAjN0aHn0JGY/UTlBym5XqLFeAlDda+si/0KZ8TJ/evh0l9z/qXSTzOxUD3WaxeB4L2njiY8P+r3f4S1mNNx0CjswGgDQ/uTX2PDkPI99bLc9AgPMyDB0R4uc2che9x/WN3sIFtRAqtoNde/jlfCK/LPb7Pi35o3YZ+qHwmMVM95w3YhZ6D57JHRQ2FFjCA4be8CIIpyz6mkcSuiHw4v2VcjzRCoGtyhjMLgC0+LF0tXv66/lRKxXL5mKH5BxYkuXAvn5cv3ss2XsWs2arn0q25lnyqyNSskkKP58/rm0NzFRpv+vSjqdq+rG2SXJndUqa7gBEtx0OlfVLdK6SyrlWtB++3ZgzJjQtofKR/uAKT/f9z5z5wJ168rfVao8i985hazrVmN775VY96NMVqFV3HQRHtwAwGDUodOlibjmf/Vx15pWONS3KQDA+s6uSpmQY+OsfNS3FMIMHfo9WQsXv9UAFujQNC8Hq6b7GQNSRU7utyJtnwTLTo81AAD0f78xLNCheU42VnyZ4/O+v997EHGwY1PCBdiefA10UGj+5ghnGFr/yG9oWfg7FHQ48fK70Bv0qNOtHrrtfRc7XpQB+C0KFyDz73LM+HYaNo1ZgPZZ36KZeQk23/jZaT/e5hf/ROePb4IeFuyJG4Azjv6A+vnLsLrTWNgQgzTzX6h9cVesvXfm6Tc+QjG4RSFtnBsgJ2LXXOP6/oEHXNPyX3edjFd7802ptF13nXRHbNmy6to6fryc7P70kyxj4M3Ro8CTT8r1F14A0tKqrn0abZzb4sWnP2Og2Sxd6YqKyt5Xc+KEdGP95JPTXwOPKs6WLXKCnJwMtG8v27T1DGfPjqyf1erVMnmR1kX6vfeARYtC2yYK3muvyd/yq67yPpPwyZPAHXfIsi7jxsnffm/sdlk65Whol+uKWNkZVhx7Yjv0AOJhx64bNyNzpwUqzCYnqUg3zGqKo8Y41LKb8dOVeyv88Td8KKHoQL1aSGlgRJMuJuxrWlduezH0VbcFzx2FCXYciU1Aj9tkfbymZ5mwr4UM9N801nvV7eR+K+ovk8BV84GmqLXofyhGKlLUHvzb8/9gM9uQ9t5jAIDtKdei41Oe3Zk6Pns+jus7QA8rDtwzqdTjV6akD1yf9Ddf/PFpPdauLzai9dghMKIQh4090GD/LBjjjTDEGnD2pvHYOXEpTupaw4hC1Oje9HSbHrEY3KLQ7bcDAwYAX3wBDBvmeZtOB3z6qZxkZmTITItNm8pJ5nffAQ1LTyJVqTp0cFWzfM0w+fjjMttlt27AQw9VWdM8NG0K9O0r1196qfwn5Lm5smxC//7y1V94UwpYskTGKTZuLGvX3XefLKVA4UEb33bOOVLdBoALL5QxpQcPShU7UkydKl+vu841O+zttwPZ2SFrEgVJKdfPccEC770nHn1UJngC5EOo33/3/lgTJkj4O+ussruyU2nfD/gPda1FOG4w4agxDnWtRfit5xYoxyyMupjoO/1KSDUgdZwMbG++7iBWf12xVTD9MgluyZe61pZr84R0RWyyPRPH94V2seuCmfLGslzUAHq3dfq6vtgEANB83zHsW116XaHZ9x5CImw4EpuAS16ug7rd62PjwAkAgG57PsCW5vejttomY9p+8d4lak+P2wEAbf79ssrGge39cTtaFM4HANhhQF37Zmx+8c9yPZbNbEOdO6+BCVk4oWuH+I1zEV/Xc3Kcdo/0QHzmBmx+7Be0ue+s025/xFJUpuzsbAVAZWdnh7opFWbbNqX69lXq8ceVys0NbVt27lTKYFAKUOrvvz1vW7BAtut0Sq1YEZr2aebMkXYASr32WvD3P3FCqR495P7aZcgQpSyW0vsuWKBUu3ae+9arJ1+bNVOqoOB0Xw1VhDvvlJ/J//2f5/YrrpDtL78cmnYFq6hIqZo1pc2//y5/E1q2lO+HDw9168JHVpZSnTvLz9duD3VrSlu50vX3ElAqJkap1atdt8+Z47q9f3+5fsUVpR8nL0+p2rVdf3tSUpRavLjKXkbEW/jmSZWOdJWOdLXgtRNq3U+5ag7+VOlIV3OxRKUjXX3e579QN7PSfJq2WaUjXU1NXK0sxRXzRtm5tFCOJ9LV4a3Fzu02m11NN61U6UhXX119oEKeqzy2pxc427d3VVGp26ekrFXpSFef9dzlsT3vhFX9rFuq0pGufrjviHO7zWpT+2P7eJwErGr/rM/nP7X9pLIgXilArX1gdqnbj284qlae+7o6tHDvabxKTxvr36UUoA4Ze6idCZcpBahtydeU67FWD/9eKUCZkaiOLAndzzEYocoG0feRDwWkbVup5rzxBlDD9+RRVaJVK+m6A8h6c9dfD/zvf8CGDcDIkbL9/vuBHj1C10YAuPRS1yfYTz0V3Hi3jAygXz9Z9LtWLRmnZzJJV6T77nNV8KxWWZ9uwACZATQxEbjnHmDVKuC//4AmTaRKGsjC4wCwdq103QuXSVV275auvCNHRkclx31iEneRNs5t9mypajdqJNXgGjWkcqPTydeZM0PdwvDwySdSRf311/D82c6YIV9vuEG6yFsswE03yWRV2dmuJV4efdQ1g/CcOaW7f3/+uXTPbtFC3q/Z2TJOOlz+jpyu7dtdVceKlnXYipNPbQMA7OrYCBc/WQvdrq4BNUb6UsdBq7hFX1dJzcCZrZAPA5rm5+L74RXThXHFW45ukikpaNjetb6PXq+D7mqpuhnmHKrUGS39WTZOJiXZV7smmp1desrruiOl6nbGigOY3OpfHNosSxr8MiIDqcqC4wYTLn/btXaS3qCHbcZkWBEHAMhFGjot/T+fz5/apiZ2pcg/nqTPP/K4zZJnhuWsQTjnnydR++JuWHPbt6fxSkXWjlNolynrOh257hHkj5LuUK1yZuPoygx/d/Wq7tfvAQC217kWDfo2Oe32RbUqjYkRKhorbuFm/36pJLlXmLRLw4bySXe4GD3a9Wn2ggWyzW6XT7vvvlteR58+So0apdTHHys1f75SrVu7XsumTXKfn39WSq+X7U8/rdShQ1IF1V73vfcqVfJX7ptv5La4OKX2lvHB2d9/K5WcLPsbjUotWlTRRyI4J096VhHT0lzHLxJlZbkqG5mZnrcdPOiqbBw9Gpr2BUOrED71lOf2p56S7XXrynu0OisuVqpxY9fv79lnh1fVzWqVvy+AUr/8IhV+rb133il/TwCppObny3369ZNtY8e6HsdsVqppU9n+0UdS3b/6atfv8/vvh+b1VZSlS6WHR2ysUvffX/G/15+236bSka6+My5XWUc8u1NMuWiPsxL3xYCKq3yEo+lDD6h0pKvZ+FNtmJV32o+nVay+uqr0D+zUIYuzovlZrfUV8nzBOLS5WH1nXK7Ska5+HJnhdR+rxa4+7bzd+fOfjT/VV1ftV98blql0pKsZNxz0er+Vfd9V+aiv1o36pcx2bBqXrhSgrIhRmStc1bu1TR8sdWK1sd6dqvBEYbler1JKreoyXilAZaOpshZblc1qUyd0bZUC1Op2Y4J6rN3TNis75J/pzikbyt2mqhaqbMDgFgAGt6pRVKTUn38q9eKLSl18sVLx8XKi8NNPoW6ZJ5tNqRtukL9/SUlKvfKKUl27eg+d7pfmzZXa5dlLQk2e7Lq9Rg3X16+/9v7cdrsr3F13ne82pqcrlZgo+2nhrWZNpXbsKL3v/PlK9eyp1IQJlXciajYrddFF0o4mTZQ64wzX6x41SrpmBcNiUerIETmZPl179yp17Fjw95s/X9rfooX32888U27/9NPTa19ly8hwdVXeutXztqIi1+92x45KnToVihaGh2nTXF2WExLk+pw5oW6Vy6JFrve59r5IT3d9uKBd3Ls8fv21bGvUyNVl2/11al2yrVYJOdpjLFxYpS+tQml/h7RLbKxSI0eefoDL2GFW067c7zwxT594stQ+Npvd2Y1w1uhML48SPSzFdjW1xhqVjnT1s26pWj+z/GHq8NZitcBxXHf85X2cwLfDDqk/sNjZXfHTrjvU0f/M5X7OspzYZ1Hf33VYfVZ7vbNtc/Cnyjlm9Xu/ZZOz1ZcJq1y/J0hXP+uXqrwT/u8XCPfwtKqTfBqzduQsZyha2edttbn2bc5f/uP69mrn5PVBP4+lwKJyIJ8KrTzLNR5gZe83lQJULhopS4GXMSA+bKp3h1KAdA2NIAxuYYzBLTSKi0tXMsJFUZHr02rtYjIpdcstcjI3bZqMHxwwQKn69ZXq3l2pAz66bU+Y4HqMbt28hyt369e7KnXeqmhz50pFDpDnP35cghmgVNu2UvlSSk7Gxo3zPLG76SalCsv/IZxXdrtUIrVQun69jKFyPxFs3lyqjosXS8jT5OdLVe7ZZ5W67DIJQw0auF5/jRpKXXONUp9/LgEkWPPmSTUyOTn4DwheeknacOON3m9/+WW5vVYtpfbtC75tVeXtt6WdPXp4v33fPlcl54IL5He/urHb5b2pjVt8/HHXMQuXqpv2HrvnHs/tzzzjep/df7/nbUVFStWpI7fNnCkfSnXsKN+/8ornvna7PDagVIcOnu/TSLFkiau3xIwZ8vvs/vd7dumhQX6dOmRRP9x3RE2uu0HNd4SGdKSrTztv93kfm82uti8pUDZbmPziVKKMHWY1LV5CykzdUrXup/INqP/u9sMqHelqWvwqv/ttW1igPm24yflzmKX7S313+2Gfx9pqsat5r5xQu5cF909v2pD9zpCoXaYmrlYLXjsR0P0txXb1zS2H1C+6v+R1XVlxZd+VZ8sJRRaaq4ylB1UBapcae7bq+q9UEVKUApQderU/9ny18vLJquBoYIPnVw39UilAFSNJZf/nOi/O2ZejipEkVbebZgT0WFm7Tikz5FPrVTf7+MQ6TDG4hTEGN/Lm1Cn5x9+1q1LvvCNdk8rDbldq0iQ5IQw0NGmhp1MnCTc7dsikEq++KiclcEw6oD3ekSPSNRGQSQkOH1Zq4EDXScvgwRJgAKV69fIdgqxWpTZsUOrDD5W6+WaZXOXnn2W7L2+8IY+r15c+MZo3Typw7gE4KUmpK6+U7qbaawnkotMpde65Sn33nZyAlmXTJlc1Urs89ZT3yWK80boXTpzo/faiIulOp53gV0R1sDJ06SJt/PBD3/usXy8/F0Cp668P7PhGk4UL5bUnJMj7PCNDegQA8kFJqBUXuyaXKflhjtks79PzzlMqJ6f0fZ94Qu53ySVK/fqr6z3orbp68qQr6Pn6vQ9n2odt993n2rZ4sfyt0X6+q/xnA6f1M/PUTMekEtplWvwqNW3IflWUX83eIH6UDG9rf3CFt/3ritRvzx1TvzxxVJ08UPoPr81mV/MnnFAzTCtUOtLVlIv2BPScC147oaY77pOOdDW57ga1b43nJ05rvs9VXzgqgrPxp5o/IbB/4DMfyXQ+7ozYFWrKRXvUvwvyA7pvSZm7zCr9nVMVGuJPbj2hLJBPbk/pznCGOPeApZRSB+bsVgdizvP4B1iEFLW59m1q1ZWfqV1TNyprsfd/7EcM3ZUC1IaG95a6TZuw5EDMeQG1VwuaOWji8/nCFYNbGGNwo3Bz/LhUcnyFmOuuKx0U1q1zdZ80meRrfLxSX3whty9cqFRqqmxv1kw+nZ43T6kPPlDqoYekelcy6GiXVq3kxD8vT4LooUNK/fabVMq0it6773p/LTk5Sn31lVK33ipjqUo+duPGUsn88EMJfmvWSBC1WOQk6/nnpaLpfp+OHWU8oK9AefiwK8j27avUo4+67nvRRWVXeu12V1uXL/e93549rhPqBx/0/5iBsFrl5zRihFRYliw5vYrPunWu7mJlffCwYIErSI8eLduysyUAz5kjYyr9tcVm8x4cIsEll8jrHjXKtU37nTn33NBX3X75RdrSsKH/D1G82bnT9cFHhw5y/cknfe//6aeyT3Jy+arcZSkulr87Dz2k1LBhFdfrIj3dVW0rWQE3m10fZNWrp9R/ZUz4eGBDkXNs0nfG5WrKBf+pzb+V7+S9OsjcZVZfOsLbLN1SNbneBvWT/m+P0PsHFqvJDTaqH0cckTFjdxx2zhapdUPcnh74dMrmQpv68tJ9zsrYL7q/1E/3Z6hThyxqcvedzu6Nztk/ka5m3Oh9nJlm2efZ6nfHrKCTu+8M26rptuRrnP/QrIhRW9/0/U9q5+T1an3jkaoAdUr987UgXh3Vd1a74wc5L3tMFysFKBsMav+vO0s9nvt4tV1TN/ptp81qc4ZLrWtnJGFwC2MMbhSO3MfHJSRI9e3KK2WpAl9Vo5kzXUGqbVulNpb4u7p9u4Qwf5WtGjUkxI0bJyd4WtgD5Lr7NOLaZdSowE5ubTaZvvyNN+T17doV+EnxoUMS4lJSXM/bvr2caB4/7tovL88V9Nq2dQWWb791BduGDaWi+PTTEhh//VVOcLW2/Pef6ySwrCrp7Nmu9nzzTWCvxV1+vlLLlin18MOuLovul7ZtlXrzzcDH6RUUKLV7t0zScOON8hjXXhvYfb/6yrMyWrItt9/ufamKzZtlGv3YWPnZ+qrY7dkjleP8MDoH3rLFFWzcx6gePuzqkjxvnv/H2LhRukSXNaFQeWk/x0cfLd/9L77Y9TOMjZXX5ovN5qok3357cM9z/Lj8DRo9Wj5cuvtuqfi98opU8IYOLf171aZNxRw3rVvkyJHeb8/Odo3nbNfO9wcZ2ZlW9WXCapWOdPVNzD8qY0cE9hkNgaP/mUuN7VqAdDXdtEJ9HfOPx3b3y29Yoj7tukNtX1K+NXA2zMrzeF5tEpN0pKtPG2+S7pVnbHVV57rt8LqEwZ6Vhc6wObnexgpb5qAybHxmofMNtPLsCQHdx1JgUatv/UZtSx6qjuvbKyv8d3fZmXCZz8fSljEoRrLaHTdQrTznVbV72mZls3r+4V87cpYzIB7fEAGzeJUQqmygU0qp0MxnWfU+/FCmUc/IALp2lSnZA5liPicnBykpKcjOzkZycnLlN5QoQLt3y9Tt9erJ1O2B+PFHWWrhiSeApKTSt584AQwfDixcKNOBt2njunTvDnTuDBiNrv3z8mSx94kTZckCQBajbttW3md9+8qSBu73qUxZWcC77wLvvCPXAXnuiy6SxaV/+UWmc69TR6bzb9nSdd+tW4GhQ2UpBm9q15a/GYmJMjX6OefIEg9leeYZWdC4Rg1g8WL5z7djh0xLvm+fq40xMfL11Ck5lnv2yN8rd6mp0kYA+OYbID/fdf+6deVnmpQkz2UwyKLveXnyNSdHLiXNni3LRgTi9ddlOQxNrVqyTMXmzYDdDnTrJr9jZ5whr/OTT4BHHvFcbH7QIFlmoH59+f7UKeDFF2WKeosFSEkBbrtNfm+6dAmsXb4oJcdxxQo5dn36eP+9z8oCVq+W19Otm2tB9bvvBj77TKbX//FHz/s8/DDw3nvAeecBf/1V+j148KAs7zF1qrQjIQF4/nmZjj8mxrXfvn1yXOfNk9+pm26SYxQbizLl58v7v6BAfhfPOSeIg+Pw/feyDAsgx/yTT/zvv2IFcO65cn3ZMqBXL+/7HTwI/PmnHJs//5T3VyDq15ffx4UL5dg0aSLHpn17z/0KC+VnW6uWvJ99/Y1JT5f3f2wssGsXkJbmfb9Dh+R1HTwIXNjHjrkL9DC5zexuNStMbbYZLTNOIEdnRJv5Z6HdxQmBvSjC8X0W/DrsIEx1Y9B0QBK6XF0DyfUMsNsVNvycj/XvHUPcP8fQ0FyAEwYTCgY1xiX/a4g6zWLKfnA/igvs+PqK/WiyaB+MUDhqjEPKc60xaGxtAIDdrjBt8H40m78HAPBf7Zqod1cjnDUsBY06xiLnqA2/NF+HJoV5OGxKxKX/nYnURlX0D60c7DY7NrR5AgDQdccb0BuCX/nLkmfGnmmbkD1rNXQZRz1uUyYTWnx8O+p0q+f1vlte+gutnrsSJmR5bM9DI+yrfTGKrhyCDq9disy0K9C8eCH+rXkj2p/8Oug2hlqoskG1CW7ffgsMGwZMmiRrLr3zjvyz2r5d/un5w+BG1ZFSgYdBALDZ5CQuIQHo0AGIj6+8tgUiO1ve7zNmABs3et5mMsnJnLcTzvx8YO5cYO9eYP9+12XrVqC42HPfBx6QD4DKYrXK+mhLlpTvtaSmApdcIif0AwfCeTKZmwt8/bWcaK9ZE/jjxcXJmm2NGkkQff11CXmB2rRJgkdamoRYAFi0CLjxRuDYMWnvpEnyN1YLO4MGyWt4+mkJcfXqyXphu3cD48cDJ0/KfrVry4cHmh49ZK3HU6dcl7y80m2qVQto2tR1iYmRYL50qWf4NRjkA4gLLpAPF9askX02b5bfeQBo2FCCw4UXyhqTZjPw999A796ez3n4sATU4mJZe/GssyRodugg/3PeeccVWFu1ktAAyO0ffSQB5dVXga++kt8RdzVrSkAfNAho3lxeU926pd+TX38N3HyzfACxc2dw71mN2SzHIiND3iutW5d9nzvvBKZMkWO5YoX83LdulcuqVRLU9u4tfb/27eXDnPbt5UME7WeakwN07ChreXbvLsH54EH5ff/3X/m9+P13OcZLlgDTpsmHJ7m5nsesXj35nbn2Wtd75YILJDzef798gOvPpk3Apb3N+CxvOQ4mJAGdUtCgfwrOGpaMOTfvR8u1B2GGDvH/64bzR6YEdZwpMIc2m1HnDCNMCRW71PC6H/Owa3YuLn2rHhJrlf6D98tjRxH79jbnWnsAkBkTD4vRgCaFecjWxaDDn2ehdZ8Q/3OLAJY8M/59cQms389Fo/1LUM+2EXq4/shZEQ8DiqGDHdveW4l2D5bjE6cQY3CrZD17yieR2qKjdrucdDz4oJxI+MPgRhTZduyQk7zvv5dq2ldfuapWgTKbpVK5YoUEgiNH5CSwXbvA7p+RIX+H9u8HGjRwVTHPOEPChNXquiQkyIn4GWfIpWbNsh9//34JPO4VNpvNswKXlCRhISWlfCf4ZTl4UE6YV6xwbYuJkWrjo4/KyfiWLRLwNm/2vG/HjsBbb0kAWrAA+PRTWfi7ZKApj5gY4MwzgePHXVVhb1q0AI4edVUxNb16yYcS3jz3HPDSS74f8/zzpadHjx4SNh57TNoByM9A+w/cv79U95Yvl9BXstIKSAhp0kR+hnXrSkj55x8JG889B7zwgu92lCUzU173GWcEvn/btvIBSVKSZ4DS6PUStM4/X8Janz5SGQvG8ePApZdKGKxRQ94L7guG16gh7fZ2JpOUJM/9229Sbdu9W45fWRa+fByGZzf7vD3rwfa46r36wb0QighrvsvDunGHkPBfDhoU50OLjmbokDCpG/rcx7BeHtm7s7Dz6dmoMf8XNM9ehDjIp3OHjT3QyLKijHuHJwa3SmQ2y4nQDz8AV13l2j58uHSPmTXLc//i4mIUu320npOTg7S0NAY3oigQbCWxIhUXyyWa/4wUFwOjRwP/+59Umb7+Gjj7bM99CguBxx+XferVk26Sd95ZuqtbZqZU7IqK5IRduyQlef4MlZKKj3uFNC9PnrdPH/mqVYD375eKzZIlciJ/5pmyT+/eEqiLi+W2X3+VLqQHDkgFdsAA36955Upg7VoJ9hs3SpBq0UIC3eWXe7b15EnpOvvJJ9LuK6+U73v2dO1js0kbvv1WHnP/fgly/v5bb91auithZfvgA/nwE5DX2LKlVBO7dJHA1KuX926pwcrNlf/dixbJ9ykp0rXzttukm6pSclyPHZMPD377Tf7fHzrkeoxAq+MAoJTCnqWFWDM1B9lLs5G4JxsNzQUAgH0DW2D4H81O/0VR2Du2x4I1X+YgY3Euml6egoseC+ATNCqTzWzDv6/9jaKf/0KDl25Ck0sD/LQozDC4VaLDh4HGjUv3xX/ySfnnuKJE2B83bhzGjx9f6nEY3IiIArNrl1Q34uJ877Nzp3TX1LpbhhulpOIXc3pDbLzaulUqrW3bBra/2SxB5OBBqQoeO+a6dOki1bqqppR0M01KktdRmd2ji4qk+22TJhKG/f1eAdKrZsUKqbJnZMhYxGCrfe4yd1qQ+a8ZXa4M019WIqpSDG6VKNjgxoobERERERF5E6rgFr7T4lSgOnXkk83MTM/tmZnSNaYkk8kEk/tUUkRERERERCFUsVP2hKnYWJmlauFC1za7Xb73NY0xERERERFRuKgWFTdABssPHy6D1Hv0kGma8/NlqmciIiIiIqJwVm2C2w03yCDusWNloHK3brImTH3O6EtERERERGGuWkxOcrq4jhsREREREQGhywbVYowbERERERFRJGNwIyIiIiIiCnMMbkRERERERGGOwY2IiIiIiCjMMbgRERERERGFOQY3IiIiIiKiMFdt1nE7HdqKCTk5OSFuCRERERERhZKWCap6VTUGtwDk5uYCANLS0kLcEiIiIiIiCge5ublISUmpsufjAtwBsNvtOHz4MJKSkqDT6ULdnEqTk5ODtLQ0HDhwgAuNlwOPX/nwuJUPj1v58diVH49d+fC4lR+PXfnx2AUv0GOmlEJubi4aNWoEvb7qRp6x4hYAvV6PJk2ahLoZVSY5OZlv8NPA41c+PG7lw+NWfjx25cdjVz48buXHY1d+PHbBC+SYVWWlTcPJSYiIiIiIiMIcgxsREREREVGYY3AjJ5PJhOeffx4mkynUTYlIPH7lw+NWPjxu5cdjV348duXD41Z+PHblx2MXvHA/ZpychIiIiIiIKMyx4kZERERERBTmGNyIiIiIiIjCHIMbERERERFRmGNwIyIiIiIiCnMMbhFgwoQJOOecc5CUlIR69erhqquuwvbt2z32KSoqwqhRo1C7dm3UqFEDQ4cORWZmpvP2DRs24KabbkJaWhri4+PRvn17vPvuux6P8dNPP2HAgAGoW7cukpOT0atXL/zxxx9ltk8phbFjx6Jhw4aIj49H//79sXPnTo99mjdvDp1O53F59dVXT+OoBCYajt3atWsxYMAApKamonbt2rj33nuRl5d3GkclMFV17JYuXYrzzjsPtWvXRnx8PNq1a4eJEyeW2b5Ajt3LL7+M3r17IyEhAampqeU/GAGKhmMW7e/Vyjx20f5edff333/DaDSiW7duZbYvHN+rQHQct2h/v7qr6GMX7e/XxYsXl/rd0Ol0yMjI8Nu+6v5+rczjViHvV0Vhb9CgQWrKlClq8+bNav369erSSy9VTZs2VXl5ec59RowYodLS0tTChQvV6tWr1bnnnqt69+7tvP2zzz5TDz30kFq8eLHavXu3mjZtmoqPj1fvv/++c5+HH35Yvfbaa2rlypVqx44dasyYMSomJkatXbvWb/teffVVlZKSombOnKk2bNigrrzyStWiRQtVWFjo3KdZs2bqhRdeUEeOHHFe3NtfWSL92B06dEjVrFlTjRgxQm3btk2tXLlS9e7dWw0dOrSCj1RpVXXs1q5dq2bMmKE2b96s9uzZo6ZNm6YSEhLUxx9/7Ld9gfzejR07Vr399ttq9OjRKiUlpeIOjg/RcMyi/b1aWceuOrxXNadOnVJnnHGGGjhwoOratWuZ7QvH96pS0XHcov39qqnoY1cd3q/p6ekKgNq+fbvH74fNZvPbvur+fq3M41YR71cGtwh09OhRBUAtWbJEKaVUVlaWiomJUd9//71zn3///VcBUMuXL/f5OPfff7+68MIL/T5Xhw4d1Pjx433ebrfbVYMGDdQbb7zh3JaVlaVMJpP6+uuvnduaNWumJk6cWNZLq3SRduw+/vhjVa9ePY8/GBs3blQA1M6dO/2/2ApWlcfu6quvVrfeeqvP2wP9vdNMmTKlyv65uIvEY1Yd36sVceyq03v1hhtuUM8++6x6/vnnyzyJjpT3qlKRedyqy/u1oo9ddXi/agHk1KlTAbeF79fKPW4V8X5lV8kIlJ2dDQCoVasWAGDNmjWwWCzo37+/c5927dqhadOmWL58ud/H0R7DG7vdjtzcXL/77NmzBxkZGR7PnZKSgp49e5Z67ldffRW1a9fGmWeeiTfeeANWq9X/C60EkXbsiouLERsbC73e9VaNj48HIF2+qlJVHbt169Zh2bJluOCCC3zuE8zvXShF6jGrTu/Vijp21eW9OmXKFPz33394/vnnA2pLpLxXgcg9btH+fq2MY1dd3q8A0K1bNzRs2BADBgzA33//7bctfL+6VNZxO933qzGovSnk7HY7HnnkEZx33nno1KkTACAjIwOxsbGl+hnXr1/fZ5/cZcuW4dtvv8WcOXN8Ptebb76JvLw8XH/99T730R6/fv36fp/7oYcewllnnYVatWph2bJlGDNmDI4cOYK3337b7+utSJF47C666CKMHj0ab7zxBh5++GHk5+fj6aefBgAcOXLE/wuuQFVx7Jo0aYJjx47BarVi3LhxuPvuu322J9Dfu1CK1GNWXd6rFX3sqsN7defOnXj66afx119/wWgM7PQhEt6rQOQet2h/v1bWsasO79eGDRti0qRJOPvss1FcXIzJkyejX79+WLFiBc466yyvj8P3a+Uet4p4vzK4RZhRo0Zh8+bNp/WJ0ObNmzFkyBA8//zzGDhwoNd9ZsyYgfHjx2PWrFmoV68eAGD69Om47777nPvMnTsXBoMhoOccPXq083qXLl0QGxuL++67DxMmTIDJZCr3awlGJB67jh07YurUqRg9ejTGjBkDg8GAhx56CPXr1/f4pLCyVcWx++uvv5CXl4d//vkHTz/9NFq1aoWbbrrptH7vQilSj1l1ea9W9LGL9veqzWbDzTffjPHjx6NNmzZe7xep71Ugco9bNL9fK/PYRfv7FQDatm2Ltm3bOr/v3bs3du/ejYkTJ2LatGl8v4bguFXI+/W0OlpSlRo1apRq0qSJ+u+//zy2L1y40Gt/3KZNm6q3337bY9uWLVtUvXr11DPPPOPzeb7++msVHx+vZs+e7bE9JydH7dy503kpKChQu3fvVgDUunXrPPbt27eveuihh3w+x+bNmxUAtW3bNj+vuOJEw7HLyMhQubm5Ki8vT+n1evXdd98F8MpPX1UdO3cvvviiatOmjVKqYo5dVffDj4ZjponW96q7ij520fhePXXqlAKgDAaD86LT6ZzbFi5cGJHvVaWi47hpoun9WlXHLhrfr748/vjj6txzz1VKReb/VqWi47hpyvN+ZXCLAHa7XY0aNUo1atRI7dixo9Tt2oDMH374wblt27ZtpQZkbt68WdWrV0898cQTPp9rxowZKi4uTs2cOTPgtjVo0EC9+eabzm3Z2dk+B7JqvvrqK6XX69XJkycDep7yisZj99lnn6mEhISgBs6WR1Ueu5LGjx+vmjVr5rdtwRy7qvrnEk3HTBON79WSKuvYRdN71WazqU2bNnlcRo4cqdq2bas2bdrkc2a0cH2vam2LluOmiab3a1Ufu2h6v/rSv39/dfXVV/ttW3V+v/pS0cdNU573K4NbBBg5cqRKSUlRixcv9phCtKCgwLnPiBEjVNOmTdWiRYvU6tWrVa9evVSvXr2ct2/atEnVrVtX3XrrrR6PcfToUec+06dPV0ajUX344Yce+2RlZflt36uvvqpSU1PVrFmz1MaNG9WQIUM8pkBdtmyZmjhxolq/fr3avXu3+uqrr1TdunXVsGHDKvhIlRbpx04ppd5//321Zs0atX37dvXBBx+o+Ph49e6771bgUfKuqo7dBx98oH755Re1Y8cOtWPHDjV58mSVlJSk/u///s9v+wI5dvv27VPr1q1T48ePVzVq1FDr1q1T69atU7m5uRV4pFwi/ZhVh/dqZf6+Rft7taRAZvhTKjzfq0pF/nGrDu/Xkirydy7a368TJ05UM2fOVDt37lSbNm1SDz/8sNLr9WrBggV+21fd36+Vddwq6v3K4BYBAHi9TJkyxblPYWGhuv/++1XNmjVVQkKCuvrqq9WRI0ectz///PNeH8P9U+YLLrjA6z7Dhw/32z673a6ee+45Vb9+fWUymdTFF1+stm/f7rx9zZo1qmfPniolJUXFxcWp9u3bq1deeUUVFRVV1CHyKdKPnVJK3XbbbapWrVoqNjZWdenSRX355ZcVcWjKVFXH7r333lMdO3ZUCQkJKjk5WZ155pnqf//7X5lrpgRy7IYPH+71+dPT0yviEJUS6cesOrxXK/P3LdrfqyUFehIdju9VpSL/uFWH92tJFfk7F+3v19dee021bNlSxcXFqVq1aql+/fqpRYsWldm+6v5+razjVlHvV53jYBAREREREVGY4jpuREREREREYY7BjYiIiIiIKMwxuBEREREREYU5BjciIiIiIqIwx+BGREREREQU5hjciIiIiIiIwhyDGxERERERUZhjcCMiIiIiIgpzDG5ERFTt3H777dDpdNDpdIiJiUH9+vUxYMAAfP7557Db7QE/zhdffIHU1NTKaygREZEDgxsREVVLgwcPxpEjR7B3717MnTsXF154IR5++GFcfvnlsFqtoW4eERGRBwY3IiKqlkwmExo0aIDGjRvjrLPOwjPPPINZs2Zh7ty5+OKLLwAAb7/9Njp37ozExESkpaXh/vvvR15eHgBg8eLFuOOOO5Cdne2s3o0bNw4AUFxcjMcffxyNGzdGYmIievbsicWLF4fmhRIRUVRgcCMiInK46KKL0LVrV/z0008AAL1ej/feew9btmzB1KlTsWjRIjz55JMAgN69e+Odd95BcnIyjhw5giNHjuDxxx8HADzwwANYvnw5vvnmG2zcuBHXXXcdBg8ejJ07d4bstRERUWTTKaVUqBtBRERUlW6//XZkZWVh5syZpW678cYbsXHjRmzdurXUbT/88ANGjBiB48ePA5Axbo888giysrKc++zfvx9nnHEG9u/fj0aNGjm39+/fHz169MArr7xS4a+HiIiinzHUDSAiIgonSinodDoAwIIFCzBhwgRs27YNOTk5sFqtKCoqQkFBARISErzef9OmTbDZbGjTpo3H9uLiYtSuXbvS209ERNGJwY2IiMjNv//+ixYtWmDv3r24/PLLMXLkSLz88suoVasWli5dirvuugtms9lncMvLy4PBYMCaNWtgMBg8bqtRo0ZVvAQiIopCDG5EREQOixYtwqZNm/Doo49izZo1sNvteOutt6DXy5Dw7777zmP/2NhY2Gw2j21nnnkmbDYbjh49ivPPP7/K2k5ERNGNwY2IiKql4uJiZGRkwGazITMzE7///jsmTJiAyy+/HMOGDcPmzZthsVjw/vvv44orrsDff/+NSZMmeTxG8+bNkZeXh4ULF6Jr165ISEhAmzZtcMstt2DYsGF46623cOaZZ+LYsWNYuHAhunTpgssuuyxEr5iIiCIZZ5UkIqJq6ffff0fDhg3RvHlzDB48GOnp6Xjvvfcwa9YsGAwGdO3aFW+//TZee+01dOrUCdOnT8eECRM8HqN3794YMWIEbrjhBtStWxevv/46AGDKlCkYNmwYHnvsMbRt2xZXXXUVVq1ahaZNm4bipRIRURTgrJJERERERERhjhU3IiIiIiKiMMfgRkREREREFOYY3IiIiIiIiMIcgxsREREREVGYY3AjIiIiIiIKcwxuREREREREYY7BjYiIiIiIKMwxuBEREREREYU5BjciIiIiIqIwx+BGREREREQU5hjciIiIiIiIwhyDGxERERERUZj7f0BEwK3V7KgaAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "SCORE_COL = \"score_agg\"\n", + "TIME_COLUMN = \"week\"\n", + "\n", + "fig, ax = plt.subplots(figsize=(10,4))\n", + "ax.set_xlabel('Date')\n", + "ax.set_ylabel(\"score agg\", color ='b')\n", + "ax.tick_params(axis='y', labelcolor = 'b')\n", + "\n", + "for e, key in enumerate(trend_hist_dict.keys()):\n", + " colors = ['b','m','r']\n", + " ax.plot(\n", + " trend_hist_dict[key][TIME_COLUMN], \n", + " trend_hist_dict[key][SCORE_COL], \n", + " color = colors[e], label = key\n", + " )\n", + "\n", + "plt.suptitle(f\"Search term: {TREND_TERM}\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### helper functions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from google.cloud.bigquery import QueryJob\n", + "from google.cloud.bigquery.table import RowIterator\n", + "\n", + "\n", + "def _query_bq(query_string: str) -> QueryJob:\n", + " \"\"\"\n", + "\n", + " :param query_string: str\n", + " Full SQL query string to execute against BigQuery\n", + "\n", + " :return: google.cloud.bigquery.job.QueryJob\n", + " \"\"\"\n", + " return bq_client.query(query_string)\n", + "\n", + "def pull_term_data_from_bq(\n", + " term: tuple = ('mascara', 'makeup')\n", + " , project_id='bigquery-public-data'\n", + " , dataset='google_trends'\n", + " , table_id='top_rising_terms'\n", + " ) -> RowIterator:\n", + " \"\"\"\n", + " Pull terms based on `in` sql clause from term\n", + " takes a tuple of terms (str) and produces pandas dataset\n", + "\n", + " :param term: tuple(str)\n", + " A tuple of terms to query for\n", + " :param project_id: str\n", + " project_id that holds the dataset.\n", + " :param dataset: str\n", + " dataset name that holds the table.\n", + " :param table_id: str\n", + " table name\n", + "\n", + " :return: google.cloud.bigguqery.table.RowIterator\n", + " \"\"\"\n", + " query = f\"\"\"\n", + " SELECT\n", + " week,\n", + " term,\n", + " rank\n", + " FROM `{project_id}.{dataset}.{table_id}`\n", + " WHERE\n", + " lower(term) in {term}\n", + " order by term, 1\n", + " \"\"\"\n", + " return _query_bq(query_string=query).result()\n", + "\n", + "def pull_term_data_from_bq(\n", + " term: tuple = ('mascara', 'makeup')\n", + " , project_id='bigquery-public-data'\n", + " , dataset='google_trends'\n", + " , table_id='top_rising_terms'\n", + " ) -> RowIterator:\n", + " \"\"\"\n", + " Pull terms based on `in` sql clause from term\n", + " takes a tuple of terms (str) and produces pandas dataset\n", + "\n", + " :param term: tuple(str)\n", + " A tuple of terms to query for\n", + " :param project_id: str\n", + " project_id that holds the dataset.\n", + " :param dataset: str\n", + " dataset name that holds the table.\n", + " :param table_id: str\n", + " table name\n", + "\n", + " :return: google.cloud.bigguqery.table.RowIterator\n", + " \"\"\"\n", + " query = f\"\"\"\n", + " SELECT\n", + " week,\n", + " term,\n", + " rank\n", + " FROM `{project_id}.{dataset}.{table_id}`\n", + " WHERE\n", + " lower(term) in {term}\n", + " order by term, 1\n", + " \"\"\"\n", + "\n", + " return _query_bq(query_string=query).result()\n", + "\n", + "\n", + "def pull_term_data_from_bq_df(\n", + " term: tuple = ('mascara', 'makeup')\n", + " , project_id='bigquery-public-data'\n", + " , dataset='google_trends'\n", + " , table_id='top_rising_terms'\n", + " ) -> pd.DataFrame:\n", + " \"\"\"\n", + " Pull terms based on `in` sql clause from term\n", + " takes a tuple of terms (str) and produces pandas dataset\n", + "\n", + " :param term: tuple(str)\n", + " A tuple of terms to query for\n", + " :param project_id: str\n", + " project_id that holds the dataset.\n", + " :param dataset: str\n", + " dataset name that holds the table.\n", + " :param table_id: str\n", + " table name\n", + "\n", + " :return: pandas.DataFrame\n", + " \"\"\"\n", + " result = pull_term_data_from_bq(term, project_id, dataset, table_id)\n", + "\n", + " return result.to_dataframe()\n", + "\n", + "# extract trends from BQ, convert to pandas dataframe\n", + "def pull_regexp_term_data_from_bq(\n", + " term: str\n", + " , project_id='bigquery-public-data'\n", + " , dataset='google_trends'\n", + " , table_id='top_rising_terms'\n", + " ) -> RowIterator:\n", + " \"\"\"\n", + " Pull terms based on `in` sql clause from term\n", + " takes a tuple of terms (str) and produces pandas dataset\n", + "\n", + " :param term: tuple(str)\n", + " A tuple of terms to query for\n", + " :param project_id: str\n", + " project_id that holds the dataset.\n", + " :param dataset: str\n", + " dataset name that holds the table.\n", + " :param table_id: str\n", + " table name\n", + "\n", + " :return: google.cloud.bigguqery.table.RowIterator\n", + " \"\"\"\n", + " query = f\"\"\"\n", + " SELECT\n", + " week,\n", + " term,\n", + " rank\n", + " FROM `{project_id}.{dataset}.{table_id}`\n", + " WHERE (\n", + " REGEXP_CONTAINS(LOWER(term), r'{term}')\n", + " )\n", + " order by term\n", + " \"\"\"\n", + " return _query_bq(query_string=query).result()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# YouTube Data API" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "references\n", + "* API reference: [parameters](https://developers.google.com/youtube/v3/docs/videos/list#parameters)\n", + "* video resource [JSON structure](https://developers.google.com/youtube/v3/docs/videos#resource-representation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### config" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from vertexai.generative_models import GenerativeModel, Part\n", + "\n", + "# Set Gemini Flash and Pro models to be used in this notebook\n", + "GEMINI_FLASH_MODEL_ID = \"gemini-1.5-flash-002\"\n", + "GEMINI_PRO_MODEL_ID = \"gemini-1.5-pro-002\"\n", + "\n", + "gemini_flash_model = GenerativeModel(GEMINI_FLASH_MODEL_ID)\n", + "gemini_pro_model = GenerativeModel(GEMINI_PRO_MODEL_ID)\n", + "\n", + "# Disable OAuthlib's HTTPS verification when running locally.\n", + "# *DO NOT* leave this option enabled in production.\n", + "os.environ[\"OAUTHLIB_INSECURE_TRANSPORT\"] = \"1\"" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "_SECRET_NAME: projects/hybrid-vertex/secrets/projects/934903580331/secrets/yt-data-api\n", + "\n" + ] + } + ], + "source": [ + "import pandas as pd\n", + "from IPython.display import HTML\n", + "\n", + "# import google_auth_oauthlib.flow\n", + "from google.cloud import secretmanager\n", + "import googleapiclient.discovery\n", + "import googleapiclient.errors\n", + "\n", + "scopes = [\"https://www.googleapis.com/auth/youtube.readonly\"]\n", + "sm_client = secretmanager.SecretManagerServiceClient()\n", + "\n", + "API_SERVICE_NAME = \"youtube\"\n", + "API_VERSION = \"v3\"\n", + "\n", + "_SECRET_ID = 'projects/934903580331/secrets/yt-data-api'\n", + "_SECRET_VERSION = '{}/versions/1'.format(_SECRET_ID)\n", + "_SECRET_NAME = sm_client.secret_path(PROJECT_ID, _SECRET_ID)\n", + "print(f\"_SECRET_NAME: {_SECRET_NAME}\\n\")\n", + "\n", + "response = sm_client.access_secret_version(request={\"name\": _SECRET_VERSION})\n", + "# print(f\"response: {response}\")\n", + "\n", + "YOUTUBE_DATA_API_KEY = response.payload.data.decode(\"UTF-8\")\n", + "# print(f\"YOUTUBE_DATA_API_KEY: {YOUTUBE_DATA_API_KEY}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### yt discovery client\n", + "\n", + "> see [src](https://github.com/googleapis/google-api-python-client/blob/main/googleapiclient/discovery.py)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "youtube = googleapiclient.discovery.build(\n", + " serviceName=API_SERVICE_NAME, \n", + " version=API_VERSION, \n", + " developerKey=YOUTUBE_DATA_API_KEY\n", + ")\n", + "\n", + "youtube" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Search YouTube" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**duration** (minutes): ['any', 'long', 'medium', 'short']\n", + "* short: (-inf, 4)\n", + "* medium: [4, 20]\n", + "* long: (20, inf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "search_query: time travel\n", + "channelId: UC9MAhZQQd9egwWCxrwSIsJQ\n", + "video_duration_type: medium\n", + "published_after: 2025-01-26T20:00:36.471857+00:00\n", + "order_criteria: relevance\n", + "num_results: 5\n" + ] + } + ], + "source": [ + "TARGET_QUERY = \"time travel\"\n", + "\n", + "# CHANNEL_ID = \"UCvW9uSNy6Lytcnib1CdXrow\" # TPB\n", + "CHANNEL_ID = \"UC9MAhZQQd9egwWCxrwSIsJQ\" # AA\n", + "\n", + "NUM_RESULTS = 5\n", + "VIDEO_DURATION = \"medium\"\n", + "\n", + "# order resources in API response\n", + "ORDER_CRITERIA = \"relevance\" # ['date', 'rating', 'relevance', 'title', 'viewCount']\n", + "\n", + "MAX_DAYS_AGO = 60\n", + "PUBLISHED_AFTER_TIMESTAMP = (\n", + " (pd.Timestamp.now() - pd.DateOffset(days=MAX_DAYS_AGO))\n", + " .tz_localize(\"UTC\")\n", + " .isoformat()\n", + ")\n", + "\n", + "print(f\"search_query: {TARGET_QUERY}\")\n", + "print(f\"channelId: {CHANNEL_ID}\")\n", + "print(f\"video_duration_type: {VIDEO_DURATION}\")\n", + "print(f\"published_after: {PUBLISHED_AFTER_TIMESTAMP}\")\n", + "print(f\"order_criteria: {ORDER_CRITERIA}\")\n", + "print(f\"num_results: {NUM_RESULTS}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### youtube search parameters\n", + "\n", + "* `mostPopular` - Return the most popular videos for the specified content region and video category\n", + "\n", + "* see options for `part` parameter:\n", + " * [contentDetails](https://developers.google.com/youtube/v3/docs/videos#contentDetails)\n", + " * [statisitcs](https://developers.google.com/youtube/v3/docs/videos#statistics)\n", + " * [topicDetails](https://developers.google.com/youtube/v3/docs/videos#topicDetails)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
publishedAtvideoIdvideoTitlevideoDescriptionvideoThumbnailvideoURLchannelTitlechannelId
02025-03-25T15:01:18ZvaMi79TmqJsMASSIVE Investigations Launched into Hindenburg Disaster | History's Most Shocking (S1)The Hindenburg, the pride of Germany, explodes in flames while flying over New Jersey in 1937, sparking a mystery. See more in ...https://i.ytimg.com/vi/vaMi79TmqJs/hqdefault.jpghttps://www.youtube.com/watch?v=vaMi79TmqJsHISTORYUC9MAhZQQd9egwWCxrwSIsJQ
12025-03-23T18:00:10ZHWz-7gQiLBQInvestigating Cosmic Mysteries | Ancient AliensFollow David Childress as he investigates mysteries around the world. See more in this Ancient Aliens compilation. Watch all new ...https://i.ytimg.com/vi/HWz-7gQiLBQ/hqdefault.jpghttps://www.youtube.com/watch?v=HWz-7gQiLBQHISTORYUC9MAhZQQd9egwWCxrwSIsJQ
22025-03-17T18:00:06Z6Em9JoAuOyY3 Groundbreaking Gold Finds | The Curse of Oak IslandThe crew investigates major finds that could be made of gold. See more in this The Curse of Oak Island compilation. 0:00 Testing ...https://i.ytimg.com/vi/6Em9JoAuOyY/hqdefault.jpghttps://www.youtube.com/watch?v=6Em9JoAuOyYHISTORYUC9MAhZQQd9egwWCxrwSIsJQ
" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Using Search:list - https://developers.google.com/youtube/v3/docs/search/list\n", + "yt_data_api_request = youtube.search().list(\n", + " part=\"id,snippet\",\n", + " type=\"video\",\n", + " q=TARGET_QUERY,\n", + " videoDuration=VIDEO_DURATION,\n", + " maxResults=NUM_RESULTS,\n", + " publishedAfter=PUBLISHED_AFTER_TIMESTAMP,\n", + " channelId=CHANNEL_ID,\n", + " order=ORDER_CRITERIA,\n", + " videoCaption=\"closedCaption\" # only include videos with CC?\n", + ")\n", + "yt_data_api_response = yt_data_api_request.execute()\n", + "\n", + "# prepare results dataframe\n", + "data_dict = {}\n", + "i=1\n", + "\n", + "for video in yt_data_api_response['items']:\n", + " row_name = f\"row{i}\"\n", + " data_dict.update(\n", + " {\n", + " row_name: {\n", + " 'publishedAt': video['snippet']['publishedAt'],\n", + " 'videoId': video['id']['videoId'], \n", + " 'videoTitle': video['snippet']['title'],\n", + " 'videoDescription': video['snippet']['description'],\n", + " 'videoThumbnail': video['snippet']['thumbnails']['high']['url'],\n", + " 'videoURL': f\"https://www.youtube.com/watch?v={video['id']['videoId']}\",\n", + " 'channelTitle': video['snippet']['channelTitle'],\n", + " 'channelId': video['snippet']['channelId'],\n", + " }\n", + " }\n", + " )\n", + " i+=1\n", + "\n", + "yt_df = pd.DataFrame.from_dict(data_dict, orient='index')\n", + "yt_df = yt_df.reset_index(drop=True)\n", + "\n", + "HTML(yt_df.to_html(render_links=True, escape=False))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'etag': 'jisC5xfUqF3p86EwzB_7k-JZMO4',\n", + " 'items': [{'etag': 'DDj68gUpa-kr1vdX1STM5SxgPnU',\n", + " 'id': {'kind': 'youtube#video', 'videoId': 'NxpZ_g3sMCw'},\n", + " 'kind': 'youtube#searchResult',\n", + " 'snippet': {'channelId': 'UC9MAhZQQd9egwWCxrwSIsJQ',\n", + " 'channelTitle': 'HISTORY',\n", + " 'description': 'Check out these groundbreaking time '\n", + " 'travel theories. See more in this '\n", + " 'Ancient Aliens compilation. 1) 0:00 2) '\n", + " '17:36 Watch all new ...',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-03-11T18:00:16Z',\n", + " 'publishedAt': '2025-03-11T18:00:16Z',\n", + " 'thumbnails': {'default': {'height': 90,\n", + " 'url': 'https://i.ytimg.com/vi/NxpZ_g3sMCw/default.jpg',\n", + " 'width': 120},\n", + " 'high': {'height': 360,\n", + " 'url': 'https://i.ytimg.com/vi/NxpZ_g3sMCw/hqdefault.jpg',\n", + " 'width': 480},\n", + " 'medium': {'height': 180,\n", + " 'url': 'https://i.ytimg.com/vi/NxpZ_g3sMCw/mqdefault.jpg',\n", + " 'width': 320}},\n", + " 'title': 'Time Travel Secrets Exposed! | Ancient '\n", + " 'Aliens'}},\n", + " {'etag': 'wIzrIdOnSGPbD3Hny7kzYwANxQY',\n", + " 'id': {'kind': 'youtube#video', 'videoId': '-UfqVbm3bGY'},\n", + " 'kind': 'youtube#searchResult',\n", + " 'snippet': {'channelId': 'UC9MAhZQQd9egwWCxrwSIsJQ',\n", + " 'channelTitle': 'HISTORY',\n", + " 'description': 'Highly advanced mathematical theories '\n", + " 'are discussed in this Ancient Aliens '\n", + " 'compilation. Watch all new episodes '\n", + " 'Fridays 9/8c; ...',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-03-06T19:00:00Z',\n", + " 'publishedAt': '2025-03-06T19:00:00Z',\n", + " 'thumbnails': {'default': {'height': 90,\n", + " 'url': 'https://i.ytimg.com/vi/-UfqVbm3bGY/default.jpg',\n", + " 'width': 120},\n", + " 'high': {'height': 360,\n", + " 'url': 'https://i.ytimg.com/vi/-UfqVbm3bGY/hqdefault.jpg',\n", + " 'width': 480},\n", + " 'medium': {'height': 180,\n", + " 'url': 'https://i.ytimg.com/vi/-UfqVbm3bGY/mqdefault.jpg',\n", + " 'width': 320}},\n", + " 'title': 'The Most Mysterious Theories in Mathematics '\n", + " '| Ancient Aliens'}},\n", + " {'etag': 'iW7s-pm-T-gLzG3Ui3GlVOPzhTQ',\n", + " 'id': {'kind': 'youtube#video', 'videoId': 'vaMi79TmqJs'},\n", + " 'kind': 'youtube#searchResult',\n", + " 'snippet': {'channelId': 'UC9MAhZQQd9egwWCxrwSIsJQ',\n", + " 'channelTitle': 'HISTORY',\n", + " 'description': 'The Hindenburg, the pride of Germany, '\n", + " 'explodes in flames while flying over '\n", + " 'New Jersey in 1937, sparking a '\n", + " 'mystery. See more in ...',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-03-25T15:01:18Z',\n", + " 'publishedAt': '2025-03-25T15:01:18Z',\n", + " 'thumbnails': {'default': {'height': 90,\n", + " 'url': 'https://i.ytimg.com/vi/vaMi79TmqJs/default.jpg',\n", + " 'width': 120},\n", + " 'high': {'height': 360,\n", + " 'url': 'https://i.ytimg.com/vi/vaMi79TmqJs/hqdefault.jpg',\n", + " 'width': 480},\n", + " 'medium': {'height': 180,\n", + " 'url': 'https://i.ytimg.com/vi/vaMi79TmqJs/mqdefault.jpg',\n", + " 'width': 320}},\n", + " 'title': 'MASSIVE Investigations Launched into '\n", + " 'Hindenburg Disaster | History's Most '\n", + " 'Shocking (S1)'}},\n", + " {'etag': '_3YLYtOWRryNiKSErZxhk_FW5TQ',\n", + " 'id': {'kind': 'youtube#video', 'videoId': 'HWz-7gQiLBQ'},\n", + " 'kind': 'youtube#searchResult',\n", + " 'snippet': {'channelId': 'UC9MAhZQQd9egwWCxrwSIsJQ',\n", + " 'channelTitle': 'HISTORY',\n", + " 'description': 'Follow David Childress as he '\n", + " 'investigates mysteries around the '\n", + " 'world. See more in this Ancient Aliens '\n", + " 'compilation. Watch all new ...',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-03-23T18:00:10Z',\n", + " 'publishedAt': '2025-03-23T18:00:10Z',\n", + " 'thumbnails': {'default': {'height': 90,\n", + " 'url': 'https://i.ytimg.com/vi/HWz-7gQiLBQ/default.jpg',\n", + " 'width': 120},\n", + " 'high': {'height': 360,\n", + " 'url': 'https://i.ytimg.com/vi/HWz-7gQiLBQ/hqdefault.jpg',\n", + " 'width': 480},\n", + " 'medium': {'height': 180,\n", + " 'url': 'https://i.ytimg.com/vi/HWz-7gQiLBQ/mqdefault.jpg',\n", + " 'width': 320}},\n", + " 'title': 'Investigating Cosmic Mysteries | Ancient '\n", + " 'Aliens'}},\n", + " {'etag': 'aqmJd-7STYG8PMslRg-p9OBfoSc',\n", + " 'id': {'kind': 'youtube#video', 'videoId': '6Em9JoAuOyY'},\n", + " 'kind': 'youtube#searchResult',\n", + " 'snippet': {'channelId': 'UC9MAhZQQd9egwWCxrwSIsJQ',\n", + " 'channelTitle': 'HISTORY',\n", + " 'description': 'The crew investigates major finds that '\n", + " 'could be made of gold. See more in '\n", + " 'this The Curse of Oak Island '\n", + " 'compilation. 0:00 Testing ...',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-03-17T18:00:06Z',\n", + " 'publishedAt': '2025-03-17T18:00:06Z',\n", + " 'thumbnails': {'default': {'height': 90,\n", + " 'url': 'https://i.ytimg.com/vi/6Em9JoAuOyY/default.jpg',\n", + " 'width': 120},\n", + " 'high': {'height': 360,\n", + " 'url': 'https://i.ytimg.com/vi/6Em9JoAuOyY/hqdefault.jpg',\n", + " 'width': 480},\n", + " 'medium': {'height': 180,\n", + " 'url': 'https://i.ytimg.com/vi/6Em9JoAuOyY/mqdefault.jpg',\n", + " 'width': 320}},\n", + " 'title': '3 Groundbreaking Gold Finds | The Curse of '\n", + " 'Oak Island'}}],\n", + " 'kind': 'youtube#searchListResponse',\n", + " 'nextPageToken': 'CAUQAA',\n", + " 'pageInfo': {'resultsPerPage': 5, 'totalResults': 14},\n", + " 'regionCode': 'ZZ'}\n" + ] + } + ], + "source": [ + "# yt_data_api_response['items'][0]\n", + "# \n", + "# # pprint(yt_data_api_response)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Get summary from Gemini for each video" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
publishedAtvideoIdvideoTitlevideoDescriptionvideoThumbnailvideoURLchannelTitlechannelIdgeminiVideoSummary
02025-03-25T15:01:18ZvaMi79TmqJsMASSIVE Investigations Launched into Hindenburg Disaster | History's Most Shocking (S1)The Hindenburg, the pride of Germany, explodes in flames while flying over New Jersey in 1937, sparking a mystery. See more in ...https://i.ytimg.com/vi/vaMi79TmqJs/hqdefault.jpghttps://www.youtube.com/watch?v=vaMi79TmqJsHISTORYUC9MAhZQQd9egwWCxrwSIsJQHere is a summary of the provided video from History’s Most Shocking:\\n\\nThe Hindenburg was a German zeppelin that was the ultimate experience in luxury air travel during the last century. In 1937, it was on its way to the United States when it tragically exploded. The disaster happened over a naval air station in Lakehurst, New Jersey on May 6, 1937. It was carrying 36 passengers and 61 crew members. The rear of the Hindenburg exploded just four minutes after landing procedures were initiated, killing 36 people, including one ground worker. Miraculously, 62 passengers and crew members survived.\\n\\nThe cause of the accident was initially unknown, leading to speculation of sabotage by a passenger, Joseph Spah. German and American investigations launched after the event determined that hydrogen ignited within the balloon’s tail section.\\n\\nThe Hindenburg disaster became a viral moment, as images of the explosion were seen on newsreels and in movie theaters around the world. \\n\\nHowever, the exact cause of the spark remained unknown. In the video, Dr. Michio Kaku explains that the skin of the zeppelin was probably supercharged with electricity, but it had nowhere to go. A similar situation is like walking across carpet with socks, creating static electricity. The combination of aluminum powder and iron oxide on the fabric created a flammable mixture that was likely ignited by an electric spark caused by a buildup of static electricity and rainy, stormy conditions.
12025-03-23T18:00:10ZHWz-7gQiLBQInvestigating Cosmic Mysteries | Ancient AliensFollow David Childress as he investigates mysteries around the world. See more in this Ancient Aliens compilation. Watch all new ...https://i.ytimg.com/vi/HWz-7gQiLBQ/hqdefault.jpghttps://www.youtube.com/watch?v=HWz-7gQiLBQHISTORYUC9MAhZQQd9egwWCxrwSIsJQAncient astronaut theorists explore the mysterious stone structures of Carnac on the northwest coast of France. The megaliths are more than 3,000 granite rocks arranged in various shapes. Local legend tells of invading Roman soldiers turned into stone by Merlin the Magician. But the rocks also have magnetic properties, are cut on one side, and nearly all of them come to a point. Did their placement create some kind of geomagnetic field? These rock formations may be one of the places on earth that have been recognized as points with high magnetic forces. Other places such as the Giza pyramids, Machu Picchu, and Baalbek in Lebanon all have similar properties. These locations may have been chosen to harness an ancient world energy grid. The Carnac stones, according to the legend, were built to be viewed from above. Three similar locations that can be viewed from space are the Great Wall of China, the Nazca lines, and Carnac. Could these stone structures have been erected for or by extraterrestrials?
22025-03-17T18:00:06Z6Em9JoAuOyY3 Groundbreaking Gold Finds | The Curse of Oak IslandThe crew investigates major finds that could be made of gold. See more in this The Curse of Oak Island compilation. 0:00 Testing ...https://i.ytimg.com/vi/6Em9JoAuOyY/hqdefault.jpghttps://www.youtube.com/watch?v=6Em9JoAuOyYHISTORYUC9MAhZQQd9egwWCxrwSIsJQMarty Lagina, Alex Lagina and Craig Tester visit Saint Mary’s University in Halifax. They want to find out if an antique brooch found on lot 21 is partially made of gold. Dr. Christa Brosseau, an associate professor of chemistry, and her colleague, Dr. Xiangyang, examine the brooch with a high-powered scanning electron microscope, which magnifies the brooch tens of thousands of times its actual size. They are able to examine the chemical composition of the brooch and determine that it is indeed made of gold. Later, back on Oak Island, Marty Lagina informs the rest of the team of the discovery, which is believed to be a momentous development in the 223-year history of the Oak Island treasure hunt. \\n\\nIn another discovery, the team has extracted wood samples from a depth of fifty-five feet inside the Garden Shaft, containing high-trace evidence of gold. The wood was dried and scanned by Emma Culligan, an archaeo metallurgist, using an X-ray fluorescence spectrometer, or XRF, which bombarded it with gamma rays and confirmed the presence of gold.
" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def get_gemini_summary_from_youtube_video_url(video_url):\n", + " video_summary_prompt = \"Summarize this video.\"\n", + " \n", + " # Gemini Pro: highest quality\n", + " video_summary_response = gemini_pro_model.generate_content(\n", + " [\n", + " video_summary_prompt,\n", + " Part.from_uri(mime_type=\"video/webm\", uri=video_url)\n", + " ]\n", + " )\n", + " \n", + " # Gemini Flash: prioritize latency/cost\n", + " # video_summary_response = gemini_flash_model.generate_content(\n", + " # [\n", + " # video_summary_prompt,\n", + " # Part.from_uri(mime_type=\"video/webm\", uri=video_url)\n", + " # ]\n", + " # )\n", + "\n", + " summary_text = video_summary_response.text \n", + " return summary_text\n", + "\n", + "# generate summaries \n", + "yt_df[\"geminiVideoSummary\"] = yt_df[\"videoURL\"].apply(\n", + " get_gemini_summary_from_youtube_video_url\n", + ")\n", + "HTML(yt_df.to_html(render_links=True, escape=False, max_rows=3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load response dataframe to BigQuery" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "BQ_DATASET = 'youtube_video_analysis' # os.getenv(\"BQ_DATASET\")\n", + "TRENDING_YT_RESPONSE_TABLE = \"yt_search_responses_v2\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "LoadJob" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "YOUTUBE_API_LOAD_JOB = bq_client.load_table_from_dataframe(\n", + " yt_df,\n", + " f\"{BQ_DATASET}.{TRENDING_YT_RESPONSE_TABLE}\",\n", + " # job_config=bigquery.LoadJobConfig(write_disposition=\"WRITE_TRUNCATE\"),\n", + ")\n", + "\n", + "# Wait for the load job to complete\n", + "YOUTUBE_API_LOAD_JOB.result()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## YouTube Trends" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "request = youtube.videos().list(\n", + " part=\"snippet,contentDetails,statistics\",\n", + " chart=\"mostPopular\",\n", + " regionCode=\"US\",\n", + " maxResults=7,\n", + ")\n", + "response = request.execute()\n", + "# print(response)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
publishedAtvideoIdvideoTitlevideoDescriptiondurationvideoTagsviewCountlikeCountfavoriteCountcommentCountvideoURLvideoThumbnailchannelTitlechannelId
02025-03-26T18:50:00ZvH0OetsEZ7AThe LeBron James Unfiltered Full Interview With The Pat McAfee ShowWelcome to The Pat McAfee Show LIVE from Noon-3PM EST Mon-Fri. You can also find us live on ESPN, ESPN+, & TikTok!\\n\\nBecome a #McAfeeMafia member! https://www.youtube.com/channel/UCxcTeAKWJca6XyJ37_ZoKIQ/join\\n\\nSUBSCRIBE:\\nhttps://www.youtube.com/channel/UCxcTeAKWJca6XyJ37_ZoKIQ?sub_confirmation=1\\n\\nMERCH: https://store.patmcafeeshow.com\\n\\nSubscribe to the Pat McAfee Show(s)!\\n @Hammer Dahn \\n @The CONCAFA Show \\n @The Pod \\n @The Pat McAfee Show \\n \\nMERCH: https://store.patmcafeeshow.com\\nSPONSOR THE SHOW: https://www.patmcafeeshow.com/partnerships\\n\\n#PatMcAfee #NFLPT1H12M42S[pat mcafee, podcast, sports, nfl, ncaa, football, for the brand, punter, kicker, pat mcafee show, american football, nfl news, pat mcafee clips, pat mcafee reacts, pat mcafee highlights, pat mcafee live, pat mcafee show live, pat mcafee podcast, the pat mcafee show, McAfeeESPN2023, lebron james, lebron james song, lebron james highlights, lebron james and bronny, lebron james stephen a smith, lebron james pat mcafee, pat mcafee lebron, lebron interview, lebron podcast]11351664505005340https://www.youtube.com/watch?v=vH0OetsEZ7Ahttps://i.ytimg.com/vi/vH0OetsEZ7A/hqdefault.jpgThe Pat McAfee ShowUCxcTeAKWJca6XyJ37_ZoKIQ
.............................................
62025-03-25T15:00:41ZUWMzKXsY9A4Final Destination Bloodlines | Official TrailerDeath is a relentless son of a *****. #FinalDestination #Bloodlines - Only in Theaters and IMAX May 16. #FilmedforIMAX\\n\\nThe newest chapter in New Line Cinema’s bloody successful franchise takes audiences back to the very beginning of Death’s twisted sense of justice—“Final Destination Bloodlines.” \\n\\nPlagued by a violent recurring nightmare, college student Stefanie heads home to track down the one person who might be able to break the cycle and save her family from the grisly demise that inevitably awaits them all. \\n“Final Destination Bloodlines” stars Kaitlyn Santa Juana, Teo Briones, Richard Harmon, Owen Patrick Joyner, Anna Lore, with Brec Bassinger, and Tony Todd.\\n\\nThe film is directed by Adam Stein & Zach Lipovsky. The screenplay is by Guy Busick & Lori Evans Taylor, and story is by Jon Watts and Guy Busick & Lori Evans Taylor. It is based on characters created by Jeffrey Reddick.\\n“Final Destination Bloodlines” is produced by Craig Perry, Sheila Hanahan Taylor, Jon Watts, Dianne McGunigle and Toby Emmerich. The executive producers are David Siegel and Warren Zide. The behind-the-camera talent includes director of photography Christian Sebaldt and production designer Rachel O’Toole. The film is edited by Sabrina Pitre. The music is by Tim Wynn. The costumes are designed by Michelle Hunter. The casting is by Rich Delia.\\n\\nNew Line Cinema presents A Practical Pictures / Freshman Year / Fireside Films Production: “Final Destination Bloodlines.” The film will be distributed in theaters and IMAX worldwide by Warner Bros. Pictures, in theaters only nationwide on May 16, 2025, and internationally beginning on 14 May 2025.PT2M25S[Adam B Stein, Anna Lore, Brec Bassinger, ComingSoon, Disaster Movie, FinalDestination, FinalDestinationBloodLines, Guy Busick, Horror, Jon Watts, Kaitlyn Santa Juana, Lori Evans Taylor, May2025, Owen Patrick Joyner, Richard Harmon, Rya Kihlstedt, Teo Briones, Tony Todd, Trailer, Trailer2025, WB, WBD, WarnerBros, WarnerBrothers, Zach Lipovsky]11938794170184014474https://www.youtube.com/watch?v=UWMzKXsY9A4https://i.ytimg.com/vi/UWMzKXsY9A4/hqdefault.jpgWarner Bros.UCjmJDM5pRKbUlVIzDYYWb6g
" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# prepare results dataframe\n", + "trend_dict = {}\n", + "i=1\n", + "\n", + "for video in response['items']:\n", + " row_name = f\"row{i}\"\n", + "\n", + " # the `tags` field is ommitted if video doesnt have tags\n", + " if \"tags\" in video['snippet'].keys():\n", + " TAGS_VALUE = video['snippet']['tags']\n", + " else:\n", + " TAGS_VALUE = []\n", + "\n", + " trend_dict.update(\n", + " {\n", + " row_name: {\n", + " 'publishedAt': video['snippet']['publishedAt'],\n", + " 'videoId': video['id'],\n", + " 'videoTitle': video['snippet']['title'],\n", + " 'videoDescription': video['snippet']['description'],\n", + " 'duration': video['contentDetails']['duration'],\n", + " # 'videoTags': video['snippet']['tags'],\n", + " 'videoTags': TAGS_VALUE,\n", + " 'viewCount': video['statistics']['viewCount'],\n", + " 'likeCount': video['statistics']['likeCount'],\n", + " 'favoriteCount': video['statistics']['favoriteCount'],\n", + " 'commentCount': video['statistics']['commentCount'],\n", + " 'videoURL': f\"https://www.youtube.com/watch?v={video['id']}\",\n", + " 'videoThumbnail': video['snippet']['thumbnails']['high']['url'],\n", + " 'channelTitle': video['snippet']['channelTitle'],\n", + " 'channelId': video['snippet']['channelId'],\n", + " }\n", + " }\n", + " )\n", + "\n", + " i+=1\n", + "\n", + "yt_trend_df = pd.DataFrame.from_dict(trend_dict, orient='index')\n", + "yt_trend_df = yt_trend_df.reset_index(drop=True)\n", + "\n", + "HTML(yt_trend_df.to_html(render_links=True, escape=False, max_rows=3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load response dataframe to BigQuery" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "LoadJob" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# BQ_DATASET = 'youtube_video_analysis' # os.getenv(\"BQ_DATASET\")\n", + "YT_TRENDS_RESPONSE_TABLE = \"yt_trending_videos_v2\"\n", + "\n", + "YOUTUBE_API_LOAD_JOB_v2 = bq_client.load_table_from_dataframe(\n", + " yt_trend_df,\n", + " f\"{BQ_DATASET}.{YT_TRENDS_RESPONSE_TABLE}\",\n", + " # job_config=bigquery.LoadJobConfig(write_disposition=\"WRITE_TRUNCATE\"),\n", + ")\n", + "\n", + "# Wait for the load job to complete\n", + "YOUTUBE_API_LOAD_JOB_v2.result()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```python\n", + "{\n", + " \"kind\": \"youtube#videoListResponse\",\n", + " \"etag\": etag,\n", + " \"nextPageToken\": string,\n", + " \"prevPageToken\": string,\n", + " \"pageInfo\": {\n", + " \"totalResults\": integer,\n", + " \"resultsPerPage\": integer\n", + " },\n", + " \"items\": [\n", + " video Resource\n", + " ]\n", + "}\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/agents/trends-and-insights-agent/notebooks/hello_youtube_analysis_w_gemini2.ipynb b/python/agents/trends-and-insights-agent/notebooks/hello_youtube_analysis_w_gemini2.ipynb new file mode 100644 index 00000000..47b446c9 --- /dev/null +++ b/python/agents/trends-and-insights-agent/notebooks/hello_youtube_analysis_w_gemini2.ipynb @@ -0,0 +1,2345 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# YouTube Video Analysis with Gemini" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "steps to complete:\n", + "\n", + "* Use the [YouTube Data API](https://developers.google.com/youtube/v3/getting-started#before-you-start) to find videos of interest, including by search query and channel.\n", + "* Summarize YouTube videos from a specific query and channel using Gemini\n", + "* Use batch prediction to extract a specific set of structured outputs from a larger set of YouTube videos\n", + "* Get information about and extract insights from those videos by aggregating Gemini's extracted results in BigQuery" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### env config" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PROJECT_ID: hybrid-vertex\n", + "LOCATION: us-central1\n", + "PREFIX: zghost_v1\n" + ] + } + ], + "source": [ + "import os\n", + "import sys\n", + "import ipykernel\n", + "\n", + "# from dotenv import load_dotenv\n", + "# load_dotenv() # this loads the .env script for use below\n", + "\n", + "PROJECT_ID = os.getenv(\"PROJECT_ID\")\n", + "LOCATION = os.getenv(\"LOCATION\")\n", + "PREFIX = os.getenv(\"PREFIX\")\n", + "\n", + "print(f\"PROJECT_ID: {PROJECT_ID}\")\n", + "print(f\"LOCATION: {LOCATION}\")\n", + "print(f\"PREFIX: {PREFIX}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Vertex AI SDK version = 1.85.0\n" + ] + } + ], + "source": [ + "import json\n", + "import time\n", + "import pandas as pd\n", + "from pprint import pprint\n", + "\n", + "from IPython.display import HTML, Markdown, display\n", + "from google.cloud import bigquery, secretmanager\n", + "import googleapiclient.discovery\n", + "import googleapiclient.errors\n", + "\n", + "import vertexai\n", + "from vertexai.batch_prediction import BatchPredictionJob\n", + "from vertexai.generative_models import GenerativeModel, Part\n", + "\n", + "print(f\"Vertex AI SDK version = {vertexai.__version__}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Config Gemini models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Set Gemini Flash and Pro models to be used in this notebook\n", + "GEMINI_FLASH_MODEL_ID = \"gemini-1.5-flash-002\" # \"gemini-2.0-flash-001\"\n", + "GEMINI_PRO_MODEL_ID = \"gemini-1.5-pro-002\"\n", + "\n", + "gemini_flash_model = GenerativeModel(GEMINI_FLASH_MODEL_ID)\n", + "gemini_pro_model = GenerativeModel(GEMINI_PRO_MODEL_ID)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### YouTube Data API key" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "_SECRET_NAME: projects/hybrid-vertex/secrets/projects/934903580331/secrets/yt-data-api\n", + "\n" + ] + } + ], + "source": [ + "from google.cloud import secretmanager\n", + "\n", + "# secret manager client\n", + "sm_client = secretmanager.SecretManagerServiceClient()\n", + "\n", + "_SECRET_ID = 'projects/934903580331/secrets/yt-data-api'\n", + "_SECRET_VERSION = '{}/versions/1'.format(_SECRET_ID)\n", + "_SECRET_NAME = sm_client.secret_path(PROJECT_ID, _SECRET_ID)\n", + "print(f\"_SECRET_NAME: {_SECRET_NAME}\\n\")\n", + "\n", + "response = sm_client.access_secret_version(request={\"name\": _SECRET_VERSION})\n", + "# print(f\"response: {response}\")\n", + "\n", + "YOUTUBE_DATA_API_KEY = response.payload.data.decode(\"UTF-8\")\n", + "# print(f\"YOUTUBE_DATA_API_KEY: {YOUTUBE_DATA_API_KEY}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup BigQuery client, dataset, and tables" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + "
" + ], + "text/plain": [ + "Empty DataFrame\n", + "Columns: []\n", + "Index: []" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Create BQ client\n", + "BQ_CLIENT = bigquery.Client(project=PROJECT_ID)\n", + "\n", + "# Function to run BQ query and return results as data frame\n", + "def get_bq_query_results_as_df(query_text):\n", + " bq_results_table = BQ_CLIENT.query(query_text).to_dataframe()\n", + " return bq_results_table\n", + "\n", + "# Names of BQ dataset and tables to be created/used\n", + "BQ_DATASET = \"youtube_video_analysis\"\n", + "BATCH_PREDICTION_REQUESTS_TABLE = (\n", + " \"video_analysis_batch_requests\"\n", + ")\n", + "BATCH_PREDICTION_RESULTS_TABLE = (\n", + " \"video_analysis_batch_results\"\n", + ")\n", + "\n", + "# Create BQ dataset if it doesn't already exist\n", + "create_dataset_if_nec_query = f\"\"\"\n", + " CREATE SCHEMA IF NOT EXISTS `{BQ_DATASET}`\n", + " OPTIONS(\n", + " location='{LOCATION}'\n", + " );\n", + " \"\"\"\n", + "\n", + "get_bq_query_results_as_df(create_dataset_if_nec_query)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# YouTube Data API \n", + "\n", + "* [API Reference](https://developers.google.com/youtube/v3/docs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## find videos by query" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "search_query: trailer park boys\n", + "video_duration_type: short\n", + "published_within_last_X_days: 30\n", + "order_criteria: relevance\n", + "num_results: 4\n" + ] + } + ], + "source": [ + "search_query = \"trailer park boys\"\n", + "\n", + "video_duration_type = (\n", + " \"short\" # ['any', 'long', 'medium', 'short']\n", + ")\n", + "\n", + "# To get newer/fresher videos, modify to lower # of days\n", + "published_within_last_X_days = 30 \n", + "\n", + "# Different ways to order results\n", + "order_criteria = \"relevance\" # ['date', 'rating', 'relevance', 'title', 'viewCount']\n", + "\n", + "# of results to be returned - max is 50 results on 1 API call\n", + "num_results = 3\n", + "\n", + "print(f\"search_query: {search_query}\")\n", + "print(f\"video_duration_type: {video_duration_type}\")\n", + "print(f\"published_within_last_X_days: {published_within_last_X_days}\")\n", + "print(f\"order_criteria: {order_criteria}\")\n", + "print(f\"num_results: {num_results}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_yt_data_api_response_for_search_query(\n", + " query, \n", + " video_duration, \n", + " max_num_days_ago,\n", + " channel_id, \n", + " video_order, \n", + " num_video_results\n", + "):\n", + " api_service_name = \"youtube\"\n", + " api_version = \"v3\"\n", + " developer_key = YOUTUBE_DATA_API_KEY\n", + " youtube = googleapiclient.discovery.build(\n", + " api_service_name, api_version, developerKey=developer_key\n", + " )\n", + " \n", + " published_after_timestamp = (\n", + " (pd.Timestamp.now() - pd.DateOffset(days=max_num_days_ago))\n", + " .tz_localize(\"UTC\")\n", + " .isoformat()\n", + " )\n", + "\n", + " # Using Search:list - https://developers.google.com/youtube/v3/docs/search/list\n", + " yt_data_api_request = youtube.search().list(\n", + " part=\"id,snippet\",\n", + " type=\"video\",\n", + " q=query,\n", + " videoDuration=video_duration,\n", + " maxResults=num_video_results,\n", + " publishedAfter=published_after_timestamp,\n", + " channelId=channel_id,\n", + " order=video_order,\n", + " )\n", + " yt_data_api_response = yt_data_api_request.execute()\n", + "\n", + " return yt_data_api_response" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'kind': 'youtube#searchListResponse', 'etag': 'pdonbvkjq3OfwogTrzdXnEQsRg8', 'nextPageToken': 'CAQQAA', 'regionCode': 'ZZ', 'pageInfo': {'totalResults': 5381, 'resultsPerPage': 4}, 'items': [{'kind': 'youtube#searchResult', 'etag': 'Q48eg7EnewvJJe_vEz33obHOivU', 'id': {'kind': 'youtube#video', 'videoId': 'o8iYmeqXU20'}, 'snippet': {'publishedAt': '2025-03-18T16:00:06Z', 'channelId': 'UCvW9uSNy6Lytcnib1CdXrow', 'title': 'Gettin' Cooked With Ricky - Sneak Preview!', 'description': \"Where the best setting is BAKED... Gettin' Cooked With Ricky launches on SwearNet Friday, March 28! #trailerparkboys ...\", 'thumbnails': {'default': {'url': 'https://i.ytimg.com/vi/o8iYmeqXU20/default.jpg', 'width': 120, 'height': 90}, 'medium': {'url': 'https://i.ytimg.com/vi/o8iYmeqXU20/mqdefault.jpg', 'width': 320, 'height': 180}, 'high': {'url': 'https://i.ytimg.com/vi/o8iYmeqXU20/hqdefault.jpg', 'width': 480, 'height': 360}}, 'channelTitle': 'Trailer Park Boys', 'liveBroadcastContent': 'none', 'publishTime': '2025-03-18T16:00:06Z'}}, {'kind': 'youtube#searchResult', 'etag': 'h2OvGAmC6-rjoJuuE-iZZk8YO6Q', 'id': {'kind': 'youtube#video', 'videoId': 'Aei-BO5SmLM'}, 'snippet': {'publishedAt': '2025-03-14T11:30:06Z', 'channelId': 'UCvW9uSNy6Lytcnib1CdXrow', 'title': 'Park After Dark S6E43 - Theory Of Fuckativity', 'description': \"Now streaming at https://bit.ly/PAD6-ep43 and the TPB SwearNet app: Spring is springin', hash seeds are growin', and Ricky is ...\", 'thumbnails': {'default': {'url': 'https://i.ytimg.com/vi/Aei-BO5SmLM/default.jpg', 'width': 120, 'height': 90}, 'medium': {'url': 'https://i.ytimg.com/vi/Aei-BO5SmLM/mqdefault.jpg', 'width': 320, 'height': 180}, 'high': {'url': 'https://i.ytimg.com/vi/Aei-BO5SmLM/hqdefault.jpg', 'width': 480, 'height': 360}}, 'channelTitle': 'Trailer Park Boys', 'liveBroadcastContent': 'none', 'publishTime': '2025-03-14T11:30:06Z'}}, {'kind': 'youtube#searchResult', 'etag': 'V1OLkuzAvgcDi6_4bmOm8M6uKsY', 'id': {'kind': 'youtube#video', 'videoId': '5tZkwb3bsmw'}, 'snippet': {'publishedAt': '2025-02-28T12:19:32Z', 'channelId': 'UCvW9uSNy6Lytcnib1CdXrow', 'title': 'Park After Dark S6E41 - The Fuck You Stick', 'description': 'Now streaming at https://bit.ly/PAD6-ep41 and the TPB SwearNet app: Fuck off February, and take your shitcicles with you! Bubs is ...', 'thumbnails': {'default': {'url': 'https://i.ytimg.com/vi/5tZkwb3bsmw/default.jpg', 'width': 120, 'height': 90}, 'medium': {'url': 'https://i.ytimg.com/vi/5tZkwb3bsmw/mqdefault.jpg', 'width': 320, 'height': 180}, 'high': {'url': 'https://i.ytimg.com/vi/5tZkwb3bsmw/hqdefault.jpg', 'width': 480, 'height': 360}}, 'channelTitle': 'Trailer Park Boys', 'liveBroadcastContent': 'none', 'publishTime': '2025-02-28T12:19:32Z'}}, {'kind': 'youtube#searchResult', 'etag': 'SG8pJMWCK-Eix6X0_lIuA-SOq5E', 'id': {'kind': 'youtube#video', 'videoId': 'E55Pw55MvGA'}, 'snippet': {'publishedAt': '2025-03-25T01:15:01Z', 'channelId': 'UCvW9uSNy6Lytcnib1CdXrow', 'title': 'Park After Dark S6E44 - Gimme The F**king Liquor!', 'description': \"Now streaming at https://bit.ly/PAD6-ep44 and the TPB SwearNet app: It's officially BOOZE MONDAY! The Boys are back at a new ...\", 'thumbnails': {'default': {'url': 'https://i.ytimg.com/vi/E55Pw55MvGA/default.jpg', 'width': 120, 'height': 90}, 'medium': {'url': 'https://i.ytimg.com/vi/E55Pw55MvGA/mqdefault.jpg', 'width': 320, 'height': 180}, 'high': {'url': 'https://i.ytimg.com/vi/E55Pw55MvGA/hqdefault.jpg', 'width': 480, 'height': 360}}, 'channelTitle': 'Trailer Park Boys', 'liveBroadcastContent': 'none', 'publishTime': '2025-03-25T01:15:01Z'}}]}\n" + ] + } + ], + "source": [ + "yt_data_api_results = get_yt_data_api_response_for_search_query(\n", + " query=search_query,\n", + " video_duration=video_duration_type,\n", + " max_num_days_ago=published_within_last_X_days,\n", + " channel_id=None,\n", + " video_order=order_criteria,\n", + " num_video_results=num_results,\n", + ")\n", + "\n", + "print(yt_data_api_results)" + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'kind': 'youtube#searchListResponse',\n", + " 'etag': 'pdonbvkjq3OfwogTrzdXnEQsRg8',\n", + " 'nextPageToken': 'CAQQAA',\n", + " 'regionCode': 'ZZ',\n", + " 'pageInfo': {'totalResults': 5381, 'resultsPerPage': 4},\n", + " 'items': [{'kind': 'youtube#searchResult',\n", + " 'etag': 'Q48eg7EnewvJJe_vEz33obHOivU',\n", + " 'id': {'kind': 'youtube#video', 'videoId': 'o8iYmeqXU20'},\n", + " 'snippet': {'publishedAt': '2025-03-18T16:00:06Z',\n", + " 'channelId': 'UCvW9uSNy6Lytcnib1CdXrow',\n", + " 'title': 'Gettin' Cooked With Ricky - Sneak Preview!',\n", + " 'description': \"Where the best setting is BAKED... Gettin' Cooked With Ricky launches on SwearNet Friday, March 28! #trailerparkboys ...\",\n", + " 'thumbnails': {'default': {'url': 'https://i.ytimg.com/vi/o8iYmeqXU20/default.jpg',\n", + " 'width': 120,\n", + " 'height': 90},\n", + " 'medium': {'url': 'https://i.ytimg.com/vi/o8iYmeqXU20/mqdefault.jpg',\n", + " 'width': 320,\n", + " 'height': 180},\n", + " 'high': {'url': 'https://i.ytimg.com/vi/o8iYmeqXU20/hqdefault.jpg',\n", + " 'width': 480,\n", + " 'height': 360}},\n", + " 'channelTitle': 'Trailer Park Boys',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-03-18T16:00:06Z'}},\n", + " {'kind': 'youtube#searchResult',\n", + " 'etag': 'h2OvGAmC6-rjoJuuE-iZZk8YO6Q',\n", + " 'id': {'kind': 'youtube#video', 'videoId': 'Aei-BO5SmLM'},\n", + " 'snippet': {'publishedAt': '2025-03-14T11:30:06Z',\n", + " 'channelId': 'UCvW9uSNy6Lytcnib1CdXrow',\n", + " 'title': 'Park After Dark S6E43 - Theory Of Fuckativity',\n", + " 'description': \"Now streaming at https://bit.ly/PAD6-ep43 and the TPB SwearNet app: Spring is springin', hash seeds are growin', and Ricky is ...\",\n", + " 'thumbnails': {'default': {'url': 'https://i.ytimg.com/vi/Aei-BO5SmLM/default.jpg',\n", + " 'width': 120,\n", + " 'height': 90},\n", + " 'medium': {'url': 'https://i.ytimg.com/vi/Aei-BO5SmLM/mqdefault.jpg',\n", + " 'width': 320,\n", + " 'height': 180},\n", + " 'high': {'url': 'https://i.ytimg.com/vi/Aei-BO5SmLM/hqdefault.jpg',\n", + " 'width': 480,\n", + " 'height': 360}},\n", + " 'channelTitle': 'Trailer Park Boys',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-03-14T11:30:06Z'}},\n", + " {'kind': 'youtube#searchResult',\n", + " 'etag': 'V1OLkuzAvgcDi6_4bmOm8M6uKsY',\n", + " 'id': {'kind': 'youtube#video', 'videoId': '5tZkwb3bsmw'},\n", + " 'snippet': {'publishedAt': '2025-02-28T12:19:32Z',\n", + " 'channelId': 'UCvW9uSNy6Lytcnib1CdXrow',\n", + " 'title': 'Park After Dark S6E41 - The Fuck You Stick',\n", + " 'description': 'Now streaming at https://bit.ly/PAD6-ep41 and the TPB SwearNet app: Fuck off February, and take your shitcicles with you! Bubs is ...',\n", + " 'thumbnails': {'default': {'url': 'https://i.ytimg.com/vi/5tZkwb3bsmw/default.jpg',\n", + " 'width': 120,\n", + " 'height': 90},\n", + " 'medium': {'url': 'https://i.ytimg.com/vi/5tZkwb3bsmw/mqdefault.jpg',\n", + " 'width': 320,\n", + " 'height': 180},\n", + " 'high': {'url': 'https://i.ytimg.com/vi/5tZkwb3bsmw/hqdefault.jpg',\n", + " 'width': 480,\n", + " 'height': 360}},\n", + " 'channelTitle': 'Trailer Park Boys',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-02-28T12:19:32Z'}},\n", + " {'kind': 'youtube#searchResult',\n", + " 'etag': 'SG8pJMWCK-Eix6X0_lIuA-SOq5E',\n", + " 'id': {'kind': 'youtube#video', 'videoId': 'E55Pw55MvGA'},\n", + " 'snippet': {'publishedAt': '2025-03-25T01:15:01Z',\n", + " 'channelId': 'UCvW9uSNy6Lytcnib1CdXrow',\n", + " 'title': 'Park After Dark S6E44 - Gimme The F**king Liquor!',\n", + " 'description': \"Now streaming at https://bit.ly/PAD6-ep44 and the TPB SwearNet app: It's officially BOOZE MONDAY! The Boys are back at a new ...\",\n", + " 'thumbnails': {'default': {'url': 'https://i.ytimg.com/vi/E55Pw55MvGA/default.jpg',\n", + " 'width': 120,\n", + " 'height': 90},\n", + " 'medium': {'url': 'https://i.ytimg.com/vi/E55Pw55MvGA/mqdefault.jpg',\n", + " 'width': 320,\n", + " 'height': 180},\n", + " 'high': {'url': 'https://i.ytimg.com/vi/E55Pw55MvGA/hqdefault.jpg',\n", + " 'width': 480,\n", + " 'height': 360}},\n", + " 'channelTitle': 'Trailer Park Boys',\n", + " 'liveBroadcastContent': 'none',\n", + " 'publishTime': '2025-03-25T01:15:01Z'}}]}" + ] + }, + "execution_count": 86, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "yt_data_api_results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert response to dataframe" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "def convert_yt_data_api_response_to_df(yt_data_api_response):\n", + "\n", + " # Convert API response into data frame for further analysis\n", + " yt_data_api_response_items_df = pd.json_normalize(yt_data_api_response[\"items\"])\n", + "\n", + " yt_data_api_response_df = yt_data_api_response_items_df.assign(\n", + " videoURL=\"https://www.youtube.com/watch?v=\"\n", + " + yt_data_api_response_items_df[\"id.videoId\"]\n", + " )[\n", + " [\n", + " \"id.videoId\",\n", + " \"videoURL\",\n", + " \"snippet.title\",\n", + " \"snippet.description\",\n", + " \"snippet.channelId\",\n", + " \"snippet.channelTitle\",\n", + " \"snippet.publishedAt\",\n", + " \"snippet.thumbnails.default.url\",\n", + " ]\n", + " ].rename(\n", + " columns={\n", + " \"id.videoId\": \"videoId\",\n", + " \"snippet.title\": \"videoTitle\",\n", + " \"snippet.description\": \"videoDescription\",\n", + " \"snippet.channelId\": \"channelId\",\n", + " \"snippet.channelTitle\": \"channelTitle\",\n", + " \"snippet.publishedAt\": \"publishedAt\",\n", + " \"snippet.thumbnails.default.url\": \"thumbnailURL\",\n", + " }\n", + " )\n", + "\n", + " return yt_data_api_response_df" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
videoIdvideoURLvideoTitlevideoDescriptionchannelIdchannelTitlepublishedAtthumbnailURL
0o8iYmeqXU20https://www.youtube.com/watch?v=o8iYmeqXU20Gettin&#39; Cooked With Ricky - Sneak Preview!Where the best setting is BAKED... Gettin' Coo...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-18T16:00:06Zhttps://i.ytimg.com/vi/o8iYmeqXU20/default.jpg
1Aei-BO5SmLMhttps://www.youtube.com/watch?v=Aei-BO5SmLMPark After Dark S6E43 - Theory Of FuckativityNow streaming at https://bit.ly/PAD6-ep43 and ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-14T11:30:06Zhttps://i.ytimg.com/vi/Aei-BO5SmLM/default.jpg
25tZkwb3bsmwhttps://www.youtube.com/watch?v=5tZkwb3bsmwPark After Dark S6E41 - The Fuck You StickNow streaming at https://bit.ly/PAD6-ep41 and ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-02-28T12:19:32Zhttps://i.ytimg.com/vi/5tZkwb3bsmw/default.jpg
3E55Pw55MvGAhttps://www.youtube.com/watch?v=E55Pw55MvGAPark After Dark S6E44 - Gimme The F**king Liquor!Now streaming at https://bit.ly/PAD6-ep44 and ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-25T01:15:01Zhttps://i.ytimg.com/vi/E55Pw55MvGA/default.jpg
\n", + "
" + ], + "text/plain": [ + " videoId videoURL \\\n", + "0 o8iYmeqXU20 https://www.youtube.com/watch?v=o8iYmeqXU20 \n", + "1 Aei-BO5SmLM https://www.youtube.com/watch?v=Aei-BO5SmLM \n", + "2 5tZkwb3bsmw https://www.youtube.com/watch?v=5tZkwb3bsmw \n", + "3 E55Pw55MvGA https://www.youtube.com/watch?v=E55Pw55MvGA \n", + "\n", + " videoTitle \\\n", + "0 Gettin' Cooked With Ricky - Sneak Preview! \n", + "1 Park After Dark S6E43 - Theory Of Fuckativity \n", + "2 Park After Dark S6E41 - The Fuck You Stick \n", + "3 Park After Dark S6E44 - Gimme The F**king Liquor! \n", + "\n", + " videoDescription \\\n", + "0 Where the best setting is BAKED... Gettin' Coo... \n", + "1 Now streaming at https://bit.ly/PAD6-ep43 and ... \n", + "2 Now streaming at https://bit.ly/PAD6-ep41 and ... \n", + "3 Now streaming at https://bit.ly/PAD6-ep44 and ... \n", + "\n", + " channelId channelTitle publishedAt \\\n", + "0 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-18T16:00:06Z \n", + "1 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-14T11:30:06Z \n", + "2 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-02-28T12:19:32Z \n", + "3 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-25T01:15:01Z \n", + "\n", + " thumbnailURL \n", + "0 https://i.ytimg.com/vi/o8iYmeqXU20/default.jpg \n", + "1 https://i.ytimg.com/vi/Aei-BO5SmLM/default.jpg \n", + "2 https://i.ytimg.com/vi/5tZkwb3bsmw/default.jpg \n", + "3 https://i.ytimg.com/vi/E55Pw55MvGA/default.jpg " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "yt_data_api_results_df = convert_yt_data_api_response_to_df(yt_data_api_results)\n", + "\n", + "display(yt_data_api_results_df.head())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Get summary from Gemini for each video" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_gemini_summary_from_youtube_video_url(video_url):\n", + " video_summary_prompt = \"Summarize this video.\"\n", + "\n", + " # Gemini Pro for highest quality (change to Flash if latency/cost are of concern)\n", + " video_summary_response = gemini_pro_model.generate_content(\n", + " [\n", + " video_summary_prompt, \n", + " Part.from_uri(mime_type=\"video/webm\", uri=video_url)\n", + " ]\n", + " )\n", + "\n", + " summary_text = video_summary_response.text\n", + "\n", + " return summary_text" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
videoIdvideoURLvideoTitlevideoDescriptionchannelIdchannelTitlepublishedAtthumbnailURLgeminiVideoSummary
0o8iYmeqXU20https://www.youtube.com/watch?v=o8iYmeqXU20Gettin&#39; Cooked With Ricky - Sneak Preview!Where the best setting is BAKED... Gettin' Coo...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-18T16:00:06Zhttps://i.ytimg.com/vi/o8iYmeqXU20/default.jpgThis trailer introduces “Gettin’ Cooked with R...
1Aei-BO5SmLMhttps://www.youtube.com/watch?v=Aei-BO5SmLMPark After Dark S6E43 - Theory Of FuckativityNow streaming at https://bit.ly/PAD6-ep43 and ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-14T11:30:06Zhttps://i.ytimg.com/vi/Aei-BO5SmLM/default.jpgIn this clip from Trailer Park Boys, the chara...
25tZkwb3bsmwhttps://www.youtube.com/watch?v=5tZkwb3bsmwPark After Dark S6E41 - The Fuck You StickNow streaming at https://bit.ly/PAD6-ep41 and ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-02-28T12:19:32Zhttps://i.ytimg.com/vi/5tZkwb3bsmw/default.jpgThree men discuss using a Pringle’s can as a s...
3E55Pw55MvGAhttps://www.youtube.com/watch?v=E55Pw55MvGAPark After Dark S6E44 - Gimme The F**king Liquor!Now streaming at https://bit.ly/PAD6-ep44 and ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-25T01:15:01Zhttps://i.ytimg.com/vi/E55Pw55MvGA/default.jpgThis Trailer Park Boys ad features a three-pac...
\n", + "
" + ], + "text/plain": [ + " videoId videoURL \\\n", + "0 o8iYmeqXU20 https://www.youtube.com/watch?v=o8iYmeqXU20 \n", + "1 Aei-BO5SmLM https://www.youtube.com/watch?v=Aei-BO5SmLM \n", + "2 5tZkwb3bsmw https://www.youtube.com/watch?v=5tZkwb3bsmw \n", + "3 E55Pw55MvGA https://www.youtube.com/watch?v=E55Pw55MvGA \n", + "\n", + " videoTitle \\\n", + "0 Gettin' Cooked With Ricky - Sneak Preview! \n", + "1 Park After Dark S6E43 - Theory Of Fuckativity \n", + "2 Park After Dark S6E41 - The Fuck You Stick \n", + "3 Park After Dark S6E44 - Gimme The F**king Liquor! \n", + "\n", + " videoDescription \\\n", + "0 Where the best setting is BAKED... Gettin' Coo... \n", + "1 Now streaming at https://bit.ly/PAD6-ep43 and ... \n", + "2 Now streaming at https://bit.ly/PAD6-ep41 and ... \n", + "3 Now streaming at https://bit.ly/PAD6-ep44 and ... \n", + "\n", + " channelId channelTitle publishedAt \\\n", + "0 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-18T16:00:06Z \n", + "1 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-14T11:30:06Z \n", + "2 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-02-28T12:19:32Z \n", + "3 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-25T01:15:01Z \n", + "\n", + " thumbnailURL \\\n", + "0 https://i.ytimg.com/vi/o8iYmeqXU20/default.jpg \n", + "1 https://i.ytimg.com/vi/Aei-BO5SmLM/default.jpg \n", + "2 https://i.ytimg.com/vi/5tZkwb3bsmw/default.jpg \n", + "3 https://i.ytimg.com/vi/E55Pw55MvGA/default.jpg \n", + "\n", + " geminiVideoSummary \n", + "0 This trailer introduces “Gettin’ Cooked with R... \n", + "1 In this clip from Trailer Park Boys, the chara... \n", + "2 Three men discuss using a Pringle’s can as a s... \n", + "3 This Trailer Park Boys ad features a three-pac... " + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "yt_data_api_results_df[\"geminiVideoSummary\"] = yt_data_api_results_df[\"videoURL\"].apply(\n", + " get_gemini_summary_from_youtube_video_url\n", + ")\n", + "\n", + "yt_data_api_results_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('This trailer introduces “Gettin’ Cooked with Ricky” and Randy. The show is '\n", + " 'about cooking when you’re high. They make several dishes, including donair '\n", + " 'spaghetti, cheeseburger meatloaf, and pickle pizza. The cooking is often '\n", + " 'haphazard and involves unconventional kitchen implements, like a drill as a '\n", + " 'mixer, and a reciprocating saw to cut cheese. The trailer contains much '\n", + " 'profanity. The show is slated to start on March 28th.')\n" + ] + } + ], + "source": [ + "pprint(yt_data_api_results_df['geminiVideoSummary'].iloc[0])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Summary of Video from Gemini:This trailer introduces “Gettin’ Cooked with Ricky” and Randy. The show is about cooking when you’re high. They make several dishes, including donair spaghetti, cheeseburger meatloaf, and pickle pizza. The cooking is often haphazard and involves unconventional kitchen implements, like a drill as a mixer, and a reciprocating saw to cut cheese. The trailer contains much profanity. The show is slated to start on March 28th." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Pick 1 video above to display video and its summary together\n", + "sample_video = yt_data_api_results_df.sample(1).iloc[0].to_dict()\n", + "sample_video_embed_url = sample_video[\"videoURL\"].replace(\"/watch?v=\", \"/embed/\")\n", + "\n", + "# Create HTML code to directly embed video\n", + "sample_video_embed_html_code = f\"\"\"\n", + "\n", + "\n", + "\"\"\"\n", + "\n", + "# Display embedded YouTube video\n", + "display(HTML(sample_video_embed_html_code))\n", + "\n", + "display(\n", + " Markdown(\n", + " f\"Summary of Video from Gemini:{sample_video['geminiVideoSummary']}\"\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Analyze larger set of video in batch\n", + "\n", + "return top 50 videos from a given channel ID (e.g., `UCvW9uSNy6Lytcnib1CdXrow`)\n", + "\n", + "to get channel ID (manually):\n", + "* Browse to the channel page\n", + "* Press Ctrl-U to view source\n", + "* Search for `` should reliably give the unique ID." + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "search_query: \n", + "video_duration_type: any\n", + "published_within_last_X_days: 365\n", + "order_criteria: viewCount\n", + "num_results: 50\n" + ] + } + ], + "source": [ + "# Intentionally leaving default empty to search for all videos w/in a channel\n", + "search_query = \"\"\n", + "\n", + "video_duration_type = (\n", + " \"any\" # ['any', 'long', 'medium', 'short']\n", + ")\n", + "\n", + "published_within_last_X_days = 365\n", + "\n", + "# for [Trailer Park Boys](https://www.youtube.com/@trailerparkboys)\n", + "channel_id = \"UCvW9uSNy6Lytcnib1CdXrow\"\n", + "\n", + "order_criteria = \"viewCount\" # ['date', 'rating', 'relevance', 'title', 'viewCount']\n", + "\n", + "# Max is 50 results on 1 API call\n", + "num_results = 50\n", + "\n", + "print(f\"search_query: {search_query}\")\n", + "print(f\"video_duration_type: {video_duration_type}\")\n", + "print(f\"published_within_last_X_days: {published_within_last_X_days}\")\n", + "print(f\"order_criteria: {order_criteria}\")\n", + "print(f\"num_results: {num_results}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dataframe shape: (50, 8)\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
videoIdvideoURLvideoTitlevideoDescriptionchannelIdchannelTitlepublishedAtthumbnailURL
0OnMX4fIgSnohttps://www.youtube.com/watch?v=OnMX4fIgSnoTrailer Park Boys Podcast Episode 55 - Ricky I...Another vintage Trailer Park Boys Podcast - wi...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-07-04T17:04:40Zhttps://i.ytimg.com/vi/OnMX4fIgSno/default.jpg
1PhIzGATStlshttps://www.youtube.com/watch?v=PhIzGATStlsBubbles And The Shitrockers - I Only Got Eyes ...Country music just got more DECENT!! Bubbles a...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-09-27T16:00:33Zhttps://i.ytimg.com/vi/PhIzGATStls/default.jpg
2mK5oSQObfSghttps://www.youtube.com/watch?v=mK5oSQObfSgStanding On The Shoulders Of Kitties - Now ava...Standing On The Shoulders Of Kitties now avail...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-10-17T15:49:31Zhttps://i.ytimg.com/vi/mK5oSQObfSg/default.jpg
3VuAICCz2iBUhttps://www.youtube.com/watch?v=VuAICCz2iBUTrailer Park Boys Chips at Giant Tiger!The Boys got their faces - and chips - on the ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-06-14T14:11:36Zhttps://i.ytimg.com/vi/VuAICCz2iBU/default.jpg
4o8iYmeqXU20https://www.youtube.com/watch?v=o8iYmeqXU20Gettin&#39; Cooked With Ricky - Sneak Preview!Where the best setting is BAKED... Gettin' Coo...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-18T16:00:06Zhttps://i.ytimg.com/vi/o8iYmeqXU20/default.jpg
\n", + "
" + ], + "text/plain": [ + " videoId videoURL \\\n", + "0 OnMX4fIgSno https://www.youtube.com/watch?v=OnMX4fIgSno \n", + "1 PhIzGATStls https://www.youtube.com/watch?v=PhIzGATStls \n", + "2 mK5oSQObfSg https://www.youtube.com/watch?v=mK5oSQObfSg \n", + "3 VuAICCz2iBU https://www.youtube.com/watch?v=VuAICCz2iBU \n", + "4 o8iYmeqXU20 https://www.youtube.com/watch?v=o8iYmeqXU20 \n", + "\n", + " videoTitle \\\n", + "0 Trailer Park Boys Podcast Episode 55 - Ricky I... \n", + "1 Bubbles And The Shitrockers - I Only Got Eyes ... \n", + "2 Standing On The Shoulders Of Kitties - Now ava... \n", + "3 Trailer Park Boys Chips at Giant Tiger! \n", + "4 Gettin' Cooked With Ricky - Sneak Preview! \n", + "\n", + " videoDescription \\\n", + "0 Another vintage Trailer Park Boys Podcast - wi... \n", + "1 Country music just got more DECENT!! Bubbles a... \n", + "2 Standing On The Shoulders Of Kitties now avail... \n", + "3 The Boys got their faces - and chips - on the ... \n", + "4 Where the best setting is BAKED... Gettin' Coo... \n", + "\n", + " channelId channelTitle publishedAt \\\n", + "0 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-07-04T17:04:40Z \n", + "1 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-09-27T16:00:33Z \n", + "2 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-10-17T15:49:31Z \n", + "3 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-06-14T14:11:36Z \n", + "4 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-18T16:00:06Z \n", + "\n", + " thumbnailURL \n", + "0 https://i.ytimg.com/vi/OnMX4fIgSno/default.jpg \n", + "1 https://i.ytimg.com/vi/PhIzGATStls/default.jpg \n", + "2 https://i.ytimg.com/vi/mK5oSQObfSg/default.jpg \n", + "3 https://i.ytimg.com/vi/VuAICCz2iBU/default.jpg \n", + "4 https://i.ytimg.com/vi/o8iYmeqXU20/default.jpg " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "yt_data_api_channel_results = get_yt_data_api_response_for_search_query(\n", + " query=search_query,\n", + " video_duration=video_duration_type,\n", + " max_num_days_ago=published_within_last_X_days,\n", + " channel_id=channel_id,\n", + " video_order=order_criteria,\n", + " num_video_results=num_results,\n", + ")\n", + "\n", + "yt_data_api_channel_results_df = convert_yt_data_api_response_to_df(\n", + " yt_data_api_channel_results\n", + ")\n", + "\n", + "print(f\"dataframe shape: {yt_data_api_channel_results_df.shape}\")\n", + "display(yt_data_api_channel_results_df.head())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Gemini video extraction task\n", + "\n", + "Specify `system instruction`, `prompt`, and `response schema` for [controlled generation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output) (i.e. creating structured outputs for further analysis)\n", + "\n", + "Create single Gemini cURL request per row - 1 for each YouTube video - in order to set up for using batch prediction." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Set up pieces (system instruction, prompt, response schema, config) for Gemini video extraction API calls\n", + "\n", + "video_extraction_system_instruction = \"\"\"You are a video analyst that carefully looks \n", + " through all frames of provided videos, extracting out the pieces necessary to respond to\n", + " user prompts. Make sure to look through and listen to the whole video, start to finish.\n", + " Only reference information in the video itself in your response.\"\"\"\n", + "\n", + "video_extraction_prompt = \"\"\"Provide a 2-3 sentence summary of the key themes from this video,\n", + " and also provide a list of each character, brand, and location that is referenced or shown.\n", + " Make sure to count only those involved in the actual video, and output only 1 entity per row.\"\"\"\n", + "\n", + "\n", + "video_extraction_response_schema = {\n", + " \"type\": \"array\",\n", + " \"items\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"summary\": {\"type\": \"string\"},\n", + " \"references\": {\n", + " \"type\": \"array\",\n", + " \"items\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"entity_name\": {\"type\": \"string\"},\n", + " \"entity_type\": {\n", + " \"type\": \"string\",\n", + " \"enum\": [\"character\", \"brand\", \"location\"],\n", + " },\n", + " },\n", + " },\n", + " },\n", + " },\n", + " },\n", + "}\n", + "\n", + "video_extraction_generation_config = {\n", + " \"temperature\": 0.0,\n", + " \"max_output_tokens\": 8192,\n", + " \"response_mime_type\": \"application/json\",\n", + " \"response_schema\": video_extraction_response_schema,\n", + "}\n", + "\n", + "# Function to build CURL request for given YT link, using pieces above\n", + "def get_video_extraction_curl_request_for_yt_video_link(youtube_video_link):\n", + " video_extraction_curl_request_dict = {\n", + " \"system_instruction\": {\n", + " \"parts\": [{\"text\": video_extraction_system_instruction}]\n", + " },\n", + " \"contents\": [\n", + " {\n", + " \"role\": \"user\",\n", + " \"parts\": [\n", + " {\"text\": video_extraction_prompt},\n", + " {\n", + " \"file_data\": {\n", + " \"mimeType\": \"video/*\",\n", + " \"fileUri\": youtube_video_link,\n", + " }\n", + " },\n", + " ],\n", + " }\n", + " ],\n", + " \"generation_config\": video_extraction_generation_config,\n", + " }\n", + "\n", + " video_extraction_curl_request = json.dumps(video_extraction_curl_request_dict)\n", + "\n", + " return video_extraction_curl_request" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create Gemini API CURL request for each YT video" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
videoIdvideoURLvideoTitlevideoDescriptionchannelIdchannelTitlepublishedAtthumbnailURLrequest
0OnMX4fIgSnohttps://www.youtube.com/watch?v=OnMX4fIgSnoTrailer Park Boys Podcast Episode 55 - Ricky I...Another vintage Trailer Park Boys Podcast - wi...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-07-04T17:04:40Zhttps://i.ytimg.com/vi/OnMX4fIgSno/default.jpg{\"system_instruction\": {\"parts\": [{\"text\": \"Yo...
1PhIzGATStlshttps://www.youtube.com/watch?v=PhIzGATStlsBubbles And The Shitrockers - I Only Got Eyes ...Country music just got more DECENT!! Bubbles a...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-09-27T16:00:33Zhttps://i.ytimg.com/vi/PhIzGATStls/default.jpg{\"system_instruction\": {\"parts\": [{\"text\": \"Yo...
2mK5oSQObfSghttps://www.youtube.com/watch?v=mK5oSQObfSgStanding On The Shoulders Of Kitties - Now ava...Standing On The Shoulders Of Kitties now avail...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-10-17T15:49:31Zhttps://i.ytimg.com/vi/mK5oSQObfSg/default.jpg{\"system_instruction\": {\"parts\": [{\"text\": \"Yo...
3VuAICCz2iBUhttps://www.youtube.com/watch?v=VuAICCz2iBUTrailer Park Boys Chips at Giant Tiger!The Boys got their faces - and chips - on the ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-06-14T14:11:36Zhttps://i.ytimg.com/vi/VuAICCz2iBU/default.jpg{\"system_instruction\": {\"parts\": [{\"text\": \"Yo...
4o8iYmeqXU20https://www.youtube.com/watch?v=o8iYmeqXU20Gettin&#39; Cooked With Ricky - Sneak Preview!Where the best setting is BAKED... Gettin' Coo...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-18T16:00:06Zhttps://i.ytimg.com/vi/o8iYmeqXU20/default.jpg{\"system_instruction\": {\"parts\": [{\"text\": \"Yo...
\n", + "
" + ], + "text/plain": [ + " videoId videoURL \\\n", + "0 OnMX4fIgSno https://www.youtube.com/watch?v=OnMX4fIgSno \n", + "1 PhIzGATStls https://www.youtube.com/watch?v=PhIzGATStls \n", + "2 mK5oSQObfSg https://www.youtube.com/watch?v=mK5oSQObfSg \n", + "3 VuAICCz2iBU https://www.youtube.com/watch?v=VuAICCz2iBU \n", + "4 o8iYmeqXU20 https://www.youtube.com/watch?v=o8iYmeqXU20 \n", + "\n", + " videoTitle \\\n", + "0 Trailer Park Boys Podcast Episode 55 - Ricky I... \n", + "1 Bubbles And The Shitrockers - I Only Got Eyes ... \n", + "2 Standing On The Shoulders Of Kitties - Now ava... \n", + "3 Trailer Park Boys Chips at Giant Tiger! \n", + "4 Gettin' Cooked With Ricky - Sneak Preview! \n", + "\n", + " videoDescription \\\n", + "0 Another vintage Trailer Park Boys Podcast - wi... \n", + "1 Country music just got more DECENT!! Bubbles a... \n", + "2 Standing On The Shoulders Of Kitties now avail... \n", + "3 The Boys got their faces - and chips - on the ... \n", + "4 Where the best setting is BAKED... Gettin' Coo... \n", + "\n", + " channelId channelTitle publishedAt \\\n", + "0 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-07-04T17:04:40Z \n", + "1 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-09-27T16:00:33Z \n", + "2 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-10-17T15:49:31Z \n", + "3 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-06-14T14:11:36Z \n", + "4 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-18T16:00:06Z \n", + "\n", + " thumbnailURL \\\n", + "0 https://i.ytimg.com/vi/OnMX4fIgSno/default.jpg \n", + "1 https://i.ytimg.com/vi/PhIzGATStls/default.jpg \n", + "2 https://i.ytimg.com/vi/mK5oSQObfSg/default.jpg \n", + "3 https://i.ytimg.com/vi/VuAICCz2iBU/default.jpg \n", + "4 https://i.ytimg.com/vi/o8iYmeqXU20/default.jpg \n", + "\n", + " request \n", + "0 {\"system_instruction\": {\"parts\": [{\"text\": \"Yo... \n", + "1 {\"system_instruction\": {\"parts\": [{\"text\": \"Yo... \n", + "2 {\"system_instruction\": {\"parts\": [{\"text\": \"Yo... \n", + "3 {\"system_instruction\": {\"parts\": [{\"text\": \"Yo... \n", + "4 {\"system_instruction\": {\"parts\": [{\"text\": \"Yo... " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "yt_data_api_channel_results_df[\"request\"] = yt_data_api_channel_results_df.apply(\n", + " lambda row: get_video_extraction_curl_request_for_yt_video_link(row[\"videoURL\"]),\n", + " axis=1,\n", + ")\n", + "\n", + "display(yt_data_api_channel_results_df.head())" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('{\"system_instruction\": {\"parts\": [{\"text\": \"You are a video analyst that '\n", + " 'carefully looks \\\\n through all frames of provided videos, extracting out '\n", + " 'the pieces necessary to respond to\\\\n user prompts. Make sure to look '\n", + " 'through and listen to the whole video, start to finish.\\\\n Only reference '\n", + " 'information in the video itself in your response.\"}]}, \"contents\": [{\"role\": '\n", + " '\"user\", \"parts\": [{\"text\": \"Provide a 2-3 sentence summary of the key themes '\n", + " 'from this video,\\\\n and also provide a list of each character, celebrity, '\n", + " 'and brand that is referenced or\\\\n shown. Refer to people by first name '\n", + " '(e.g., \\\\\"Ricky\\\\\" instead of \\\\\"Ricky LaFleur\\\\\") unless their last name '\n", + " 'is provided. \\\\n If a character or celebrity are referred to by first and '\n", + " 'last name, please use both of these.\\\\n Make sure to count only those '\n", + " 'involved in the actual video, and output only 1 entity per row.\"}, '\n", + " '{\"file_data\": {\"mimeType\": \"video/*\", \"fileUri\": '\n", + " '\"https://www.youtube.com/watch?v=OnMX4fIgSno\"}}]}], \"generation_config\": '\n", + " '{\"temperature\": 0.0, \"max_output_tokens\": 8192, \"response_mime_type\": '\n", + " '\"application/json\", \"response_schema\": {\"type\": \"array\", \"items\": {\"type\": '\n", + " '\"object\", \"properties\": {\"summary\": {\"type\": \"string\"}, \"references\": '\n", + " '{\"type\": \"array\", \"items\": {\"type\": \"object\", \"properties\": {\"entity_name\": '\n", + " '{\"type\": \"string\"}, \"entity_type\": {\"type\": \"string\", \"enum\": [\"character\", '\n", + " '\"celebrity\", \"brand\"]}}}}}}}}}')\n" + ] + } + ], + "source": [ + "pprint(yt_data_api_channel_results_df['request'].iloc[0])\n", + "# yt_data_api_channel_results_df['request'].iloc[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load API responses to BigQuery\n", + "\n", + "> Output table with YouTube API results and corresponding Gemini requests to BigQuery" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/google/home/jordantotten/repo/zghost/zghost/.venv/lib/python3.12/site-packages/google/cloud/bigquery/_pandas_helpers.py:489: FutureWarning: Loading pandas DataFrame into BigQuery will require pandas-gbq package version 0.26.1 or greater in the future. Tried to import pandas-gbq and got: No module named 'pandas_gbq'\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "text/plain": [ + "LoadJob" + ] + }, + "execution_count": 57, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "yt_api_results_with_bp_requests_table_load_job = BQ_CLIENT.load_table_from_dataframe(\n", + " yt_data_api_channel_results_df,\n", + " f\"{BQ_DATASET}.{BATCH_PREDICTION_REQUESTS_TABLE}\",\n", + " job_config=bigquery.LoadJobConfig(write_disposition=\"WRITE_TRUNCATE\"),\n", + ")\n", + "\n", + "# Wait for the load job to complete\n", + "yt_api_results_with_bp_requests_table_load_job.result()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Submit batch prediction job to analyze multiple YouTube videos at once\n", + "\n", + "You create a batch prediction job using the `BatchPredictionJob.submit()` method ([src](https://github.com/googleapis/python-aiplatform/blob/main/google/cloud/aiplatform/jobs.py#L321)), and specifying the `source model ID`, `input source`, and `output location` - either Cloud Storage or BigQuery. To learn more, see the [batch prediction API page](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/batch-prediction-api).\n", + "\n", + "Below, we'll use the BigQuery table with requests created in the previous section as input, and output results to another BigQuery table for further analysis." + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INPUT_URI: bq://hybrid-vertex.youtube_video_analysis.video_analysis_batch_requests\n", + "OUTPUT_URI: bq://hybrid-vertex.youtube_video_analysis.video_analysis_batch_results\n", + "MODEL_ID: gemini-1.5-flash-002\n" + ] + } + ], + "source": [ + "# BQ URI of input table in form bq://PROJECT_ID.DATASET.TABLE\n", + "# or Cloud Storage bucket URI\n", + "INPUT_URI = f\"bq://{PROJECT_ID}.{BQ_DATASET}.{BATCH_PREDICTION_REQUESTS_TABLE}\"\n", + "\n", + "# BQ URI of target output table in form bq://PROJECT_ID.DATASET.TABLE\n", + "# If the table doesn't already exist, then it is created for you\n", + "OUTPUT_URI = f\"bq://{PROJECT_ID}.{BQ_DATASET}.{BATCH_PREDICTION_RESULTS_TABLE}\"\n", + "\n", + "# Pick which Gemini model to use here (default Flash)\n", + "MODEL_ID = GEMINI_FLASH_MODEL_ID # ['GEMINI_FLASH_MODEL_ID', 'GEMINI_PRO_MODEL_ID']\n", + "\n", + "print(f\"INPUT_URI: {INPUT_URI}\")\n", + "print(f\"OUTPUT_URI: {OUTPUT_URI}\")\n", + "print(f\"MODEL_ID: {MODEL_ID}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Submit batch prediction request using Vertex AI SDK\n", + "\n", + "If the batch prediction job goes through, the output above should contain a link you can use to monitor the job in the [Vertex AI Batch predictions page](https://console.cloud.google.com/vertex-ai/batch-predictions) in the Google Cloud console." + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "BatchPredictionJob created. Resource name: projects/934903580331/locations/us-central1/batchPredictionJobs/3824643792695197696\n", + "To use this BatchPredictionJob in another session:\n", + "job = batch_prediction.BatchPredictionJob('projects/934903580331/locations/us-central1/batchPredictionJobs/3824643792695197696')\n", + "View Batch Prediction Job:\n", + "https://console.cloud.google.com/ai/platform/locations/us-central1/batch-predictions/3824643792695197696?project=934903580331\n" + ] + } + ], + "source": [ + "# Submit batch prediction request using Vertex AI SDK\n", + "batch_prediction_job = BatchPredictionJob.submit(\n", + " source_model=MODEL_ID, \n", + " input_dataset=INPUT_URI, \n", + " output_uri_prefix=OUTPUT_URI\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also print out the job status and other properties:" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Job resource name: projects/934903580331/locations/us-central1/batchPredictionJobs/3824643792695197696\n", + "Model resource name: publishers/google/models/gemini-1.5-flash-002\n", + "Job state: JOB_STATE_RUNNING\n" + ] + } + ], + "source": [ + "print(f\"Job resource name: {batch_prediction_job.resource_name}\")\n", + "print(f\"Model resource name: {batch_prediction_job.model_name}\")\n", + "print(f\"Job state: {batch_prediction_job.state.name}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### check job status" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Job succeeded!\n" + ] + } + ], + "source": [ + "# Refresh batch prediction job until complete\n", + "while not batch_prediction_job.has_ended:\n", + " time.sleep(5)\n", + " batch_prediction_job.refresh()\n", + "\n", + "# Check if the job succeeds\n", + "if batch_prediction_job.has_succeeded:\n", + " print(\"Job succeeded!\")\n", + "else:\n", + " print(f\"Job failed: {batch_prediction_job.error}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Check sample of results in BigQuery\n", + "\n", + "> Once the batch prediction job has finished successfully, we can run the following cell to check a sample of our results" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/google/home/jordantotten/repo/zghost/zghost/.venv/lib/python3.12/site-packages/google/cloud/bigquery/table.py:1933: UserWarning: BigQuery Storage module not found, fetch data with the REST endpoint instead.\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "text/markdown": [ + "Batch Prediction BigQuery Results Table" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
videoIdvideoURLvideoTitlevideoDescriptionchannelIdchannelTitlepublishedAtthumbnailURLstatusprocessed_timerequestresponse
0o8iYmeqXU20https://www.youtube.com/watch?v=o8iYmeqXU20Gettin&#39; Cooked With Ricky - Sneak Preview!Where the best setting is BAKED... Gettin' Coo...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-03-18T16:00:06Zhttps://i.ytimg.com/vi/o8iYmeqXU20/default.jpg2025-03-26 12:42:34.528000+00:00{\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ...{\"candidates\":[{\"avgLogprobs\":-0.0648541268848...
139f-BtsFffchttps://www.youtube.com/watch?v=39f-BtsFffcBubbles&#39; Lost Pet Appeal - Who&#39;s Getti...Bubbles needs your help! Send a photo or video...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-10-25T13:00:04Zhttps://i.ytimg.com/vi/39f-BtsFffc/default.jpg2025-03-26 12:42:38.573000+00:00{\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ...{\"candidates\":[{\"avgLogprobs\":-0.0811032310861...
2RXr4SAfR8pQhttps://www.youtube.com/watch?v=RXr4SAfR8pQBubbles has a message for ya! #bubblesandthesh...'I Only Got Eyes For You' now streaming on you...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-10-03T17:00:32Zhttps://i.ytimg.com/vi/RXr4SAfR8pQ/default.jpg2025-03-26 12:42:38.495000+00:00{\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ...{\"candidates\":[{\"avgLogprobs\":-0.0277483045718...
36NQCPCsO4g0https://www.youtube.com/watch?v=6NQCPCsO4g0Watch a Special Park After Dark on Christmas Day!Unwrap your presents, get your morning liquor ...UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2024-12-24T18:45:00Zhttps://i.ytimg.com/vi/6NQCPCsO4g0/default.jpg2025-03-26 12:42:34.486000+00:00{\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ...{\"candidates\":[{\"avgLogprobs\":-0.0996898842728...
4XpqlE5BDFUUhttps://www.youtube.com/watch?v=XpqlE5BDFUUGrocery Gank Gunfight #trailerparkboys #jrocJust another day in Sunnyvale Trailer Park!UCvW9uSNy6Lytcnib1CdXrowTrailer Park Boys2025-01-30T16:00:38Zhttps://i.ytimg.com/vi/XpqlE5BDFUU/default.jpg2025-03-26 12:42:36.252000+00:00{\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ...{\"candidates\":[{\"avgLogprobs\":-0.1039502750743...
\n", + "
" + ], + "text/plain": [ + " videoId videoURL \\\n", + "0 o8iYmeqXU20 https://www.youtube.com/watch?v=o8iYmeqXU20 \n", + "1 39f-BtsFffc https://www.youtube.com/watch?v=39f-BtsFffc \n", + "2 RXr4SAfR8pQ https://www.youtube.com/watch?v=RXr4SAfR8pQ \n", + "3 6NQCPCsO4g0 https://www.youtube.com/watch?v=6NQCPCsO4g0 \n", + "4 XpqlE5BDFUU https://www.youtube.com/watch?v=XpqlE5BDFUU \n", + "\n", + " videoTitle \\\n", + "0 Gettin' Cooked With Ricky - Sneak Preview! \n", + "1 Bubbles' Lost Pet Appeal - Who's Getti... \n", + "2 Bubbles has a message for ya! #bubblesandthesh... \n", + "3 Watch a Special Park After Dark on Christmas Day! \n", + "4 Grocery Gank Gunfight #trailerparkboys #jroc \n", + "\n", + " videoDescription \\\n", + "0 Where the best setting is BAKED... Gettin' Coo... \n", + "1 Bubbles needs your help! Send a photo or video... \n", + "2 'I Only Got Eyes For You' now streaming on you... \n", + "3 Unwrap your presents, get your morning liquor ... \n", + "4 Just another day in Sunnyvale Trailer Park! \n", + "\n", + " channelId channelTitle publishedAt \\\n", + "0 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-03-18T16:00:06Z \n", + "1 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-10-25T13:00:04Z \n", + "2 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-10-03T17:00:32Z \n", + "3 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2024-12-24T18:45:00Z \n", + "4 UCvW9uSNy6Lytcnib1CdXrow Trailer Park Boys 2025-01-30T16:00:38Z \n", + "\n", + " thumbnailURL status \\\n", + "0 https://i.ytimg.com/vi/o8iYmeqXU20/default.jpg \n", + "1 https://i.ytimg.com/vi/39f-BtsFffc/default.jpg \n", + "2 https://i.ytimg.com/vi/RXr4SAfR8pQ/default.jpg \n", + "3 https://i.ytimg.com/vi/6NQCPCsO4g0/default.jpg \n", + "4 https://i.ytimg.com/vi/XpqlE5BDFUU/default.jpg \n", + "\n", + " processed_time \\\n", + "0 2025-03-26 12:42:34.528000+00:00 \n", + "1 2025-03-26 12:42:38.573000+00:00 \n", + "2 2025-03-26 12:42:38.495000+00:00 \n", + "3 2025-03-26 12:42:34.486000+00:00 \n", + "4 2025-03-26 12:42:36.252000+00:00 \n", + "\n", + " request \\\n", + "0 {\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ... \n", + "1 {\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ... \n", + "2 {\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ... \n", + "3 {\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ... \n", + "4 {\"contents\":[{\"parts\":[{\"text\":\"Provide a 2-3 ... \n", + "\n", + " response \n", + "0 {\"candidates\":[{\"avgLogprobs\":-0.0648541268848... \n", + "1 {\"candidates\":[{\"avgLogprobs\":-0.0811032310861... \n", + "2 {\"candidates\":[{\"avgLogprobs\":-0.0277483045718... \n", + "3 {\"candidates\":[{\"avgLogprobs\":-0.0996898842728... \n", + "4 {\"candidates\":[{\"avgLogprobs\":-0.1039502750743... " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Pick sampling % and # of results for check of BQ results table - can leave\n", + "# 100% & total # of results for big tables, likely sample down for larger ones\n", + "\n", + "sampling_percentage = 100\n", + "\n", + "num_results = 50\n", + "\n", + "batch_prediction_results_sample_query = f\"\"\"\n", + " SELECT * \n", + " FROM `{BQ_DATASET}.{BATCH_PREDICTION_RESULTS_TABLE}`\n", + " TABLESAMPLE SYSTEM ({sampling_percentage} PERCENT)\n", + " LIMIT {num_results}\n", + " \"\"\"\n", + "\n", + "bq_results_table = get_bq_query_results_as_df(batch_prediction_results_sample_query)\n", + "\n", + "display(Markdown(\"Batch Prediction BigQuery Results Table\"))\n", + "\n", + "display(bq_results_table.head())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Further analysis of Gemini video extraction results\n", + "\n", + "With our results from Gemini video extraction in BigQuery, we can pull out various pieces that might interest us. It's possible to do this further analysis in Python or directly in BigQuery - we'll choose the latter here since the results are already there, and [BigQuery's native JSON functionality](https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions) provides convenient ways to pull out relevant outputs at scale." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Extract summaries for each YouTube video" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/google/home/jordantotten/repo/zghost/zghost/.venv/lib/python3.12/site-packages/google/cloud/bigquery/table.py:1933: UserWarning: BigQuery Storage module not found, fetch data with the REST endpoint instead.\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "text/markdown": [ + "Batch YouTube Video Analysis Summary Results" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
urltitledescriptiongeminiSummarypublishedAt
0https://www.youtube.com/watch?v=o8iYmeqXU20Gettin&#39; Cooked With Ricky - Sneak Preview!Where the best setting is BAKED... Gettin' Cooked With Ricky launches on SwearNet Friday, March 28! #trailerparkboys ...Ricky and Randy are co-hosting a cooking show called \"Gettin' Cooked with Ricky.\" The show features the two of them making various dishes while intoxicated. The dishes include donair sauce, cheeseburger meatloaf, pickle pizza, cinnamon roll nachos, and chicken finger tacos.2025-03-18T16:00:06Z
1https://www.youtube.com/watch?v=zV1g0Ny7GtoGet Ready For The Trades S2 - Todd&#39;s RecapYou have one week to catch up #TheTrades ‍♂️ From the producers of #TrailerParkBoys, stream Season 1 of the Crave ...This video is a recap of the last season of The Trades. Todd's sister joins the trades, and Todd tries to improve refinery productivity through automation. After a protest and a pigeon incident, Todd and Chelsea work together to meet with corporate.2025-03-07T23:15:04Z
2https://www.youtube.com/watch?v=e5QY44t4sxcScrump-dilly! #trailerparkboys #maplesyrup #canadalifeBubbles spills his maple syrup production secrets... holy frig that looks decent.Ricky from the Trailer Park Boys filters out bugs and other debris from a container of maple syrup. He expresses frustration with the process and the camera filming him.2025-03-06T13:00:54Z
3https://www.youtube.com/watch?v=Fbqk9xgOsugHas the Green Bastard met his match?! #trailerparkboys #outofthepark #copenhagenStream Out Of The Park, Trailer Park Boys Seasons 1-12 and more for just one friggin' payment of $19.99 at ...Ricky and Brian are discussing a fight plan for the Green Bastard. The Green Bastard will fight Brian, who is a much larger opponent. The plan involves two uppercuts to the balls within 30 seconds.2025-02-25T14:00:16Z
4https://www.youtube.com/watch?v=utt8PCqnMnkThank you, your Majesty 🚬 🤬 #trailerparkboys #canadalifeIf I can't smoke and swear I'm f*****d!This video is a clip from the Canadian television series Trailer Park Boys. Ricky is in court and is arguing with the judge about his right to smoke and swear. He is frustrated because he feels that he cannot properly defend himself without these things.2025-02-20T14:15:06Z
\n", + "
" + ], + "text/plain": [ + " url \\\n", + "0 https://www.youtube.com/watch?v=o8iYmeqXU20 \n", + "1 https://www.youtube.com/watch?v=zV1g0Ny7Gto \n", + "2 https://www.youtube.com/watch?v=e5QY44t4sxc \n", + "3 https://www.youtube.com/watch?v=Fbqk9xgOsug \n", + "4 https://www.youtube.com/watch?v=utt8PCqnMnk \n", + "\n", + " title \\\n", + "0 Gettin' Cooked With Ricky - Sneak Preview! \n", + "1 Get Ready For The Trades S2 - Todd's Recap \n", + "2 Scrump-dilly! #trailerparkboys #maplesyrup #canadalife \n", + "3 Has the Green Bastard met his match?! #trailerparkboys #outofthepark #copenhagen \n", + "4 Thank you, your Majesty 🚬 🤬 #trailerparkboys #canadalife \n", + "\n", + " description \\\n", + "0 Where the best setting is BAKED... Gettin' Cooked With Ricky launches on SwearNet Friday, March 28! #trailerparkboys ... \n", + "1 You have one week to catch up #TheTrades ‍♂️ From the producers of #TrailerParkBoys, stream Season 1 of the Crave ... \n", + "2 Bubbles spills his maple syrup production secrets... holy frig that looks decent. \n", + "3 Stream Out Of The Park, Trailer Park Boys Seasons 1-12 and more for just one friggin' payment of $19.99 at ... \n", + "4 If I can't smoke and swear I'm f*****d! \n", + "\n", + " geminiSummary \\\n", + "0 Ricky and Randy are co-hosting a cooking show called \"Gettin' Cooked with Ricky.\" The show features the two of them making various dishes while intoxicated. The dishes include donair sauce, cheeseburger meatloaf, pickle pizza, cinnamon roll nachos, and chicken finger tacos. \n", + "1 This video is a recap of the last season of The Trades. Todd's sister joins the trades, and Todd tries to improve refinery productivity through automation. After a protest and a pigeon incident, Todd and Chelsea work together to meet with corporate. \n", + "2 Ricky from the Trailer Park Boys filters out bugs and other debris from a container of maple syrup. He expresses frustration with the process and the camera filming him. \n", + "3 Ricky and Brian are discussing a fight plan for the Green Bastard. The Green Bastard will fight Brian, who is a much larger opponent. The plan involves two uppercuts to the balls within 30 seconds. \n", + "4 This video is a clip from the Canadian television series Trailer Park Boys. Ricky is in court and is arguing with the judge about his right to smoke and swear. He is frustrated because he feels that he cannot properly defend himself without these things. \n", + "\n", + " publishedAt \n", + "0 2025-03-18T16:00:06Z \n", + "1 2025-03-07T23:15:04Z \n", + "2 2025-03-06T13:00:54Z \n", + "3 2025-02-25T14:00:16Z \n", + "4 2025-02-20T14:15:06Z " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# Query to extract summary for each video from JSON Gemini API response\n", + "video_summaries_query = f\"\"\"\n", + " SELECT\n", + " videoUrl AS url,\n", + " videoTitle AS title,\n", + " videoDescription AS description,\n", + "\n", + " JSON_EXTRACT_SCALAR(\n", + " JSON_EXTRACT_ARRAY(\n", + " JSON_VALUE(response, '$.candidates[0].content.parts[0].text')\n", + " )[OFFSET(0)],\n", + " '$.summary'\n", + " ) AS geminiSummary,\n", + "\n", + " publishedAt\n", + "\n", + " FROM\n", + " `{BQ_DATASET}.{BATCH_PREDICTION_RESULTS_TABLE}`\n", + "\n", + " ORDER BY\n", + " publishedAt DESC\n", + " \"\"\"\n", + "\n", + "video_summaries = get_bq_query_results_as_df(video_summaries_query)\n", + "\n", + "# Change column width to be able to read all summary text for each row\n", + "pd.set_option(\"display.max_colwidth\", 500)\n", + "\n", + "# Display results\n", + "display(Markdown(\"Batch YouTube Video Analysis Summary Results\"))\n", + "\n", + "display(video_summaries.head())" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('Ricky and Randy are co-hosting a cooking show called \"Gettin\\' Cooked with '\n", + " 'Ricky.\" The show features the two of them making various dishes while '\n", + " 'intoxicated. The dishes include donair sauce, cheeseburger meatloaf, pickle '\n", + " 'pizza, cinnamon roll nachos, and chicken finger tacos.')\n" + ] + } + ], + "source": [ + "pprint(video_summaries['geminiSummary'].iloc[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Find most frequently appearing entities across videos\n", + "\n", + "> In the final step of our process of going from unstructured videos to structured data results from analyzing all those videos, we'll use BigQuery to count up the number of references to each entity across videos, and return those that appear most frequently" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/google/home/jordantotten/repo/zghost/zghost/.venv/lib/python3.12/site-packages/google/cloud/bigquery/table.py:1933: UserWarning: BigQuery Storage module not found, fetch data with the REST endpoint instead.\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "text/markdown": [ + "Most Referenced Entities in Videos Analyzed" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
nametypenum_videos
0Bubblescharacter24
1Juliancharacter19
2Rickycharacter18
3Rickycelebrity13
4Randycharacter11
5Bubblescelebrity10
6Juliancelebrity8
7Swearnetbrand7
8Trailer Park Boysbrand7
9Billy Bob Thorntoncelebrity6
10Ronnie Woodcelebrity5
11Adidasbrand3
12Laheycharacter3
13Trailer Park Boyscelebrity3
14Amsterdambrand2
15Applebrand2
16Berlinbrand2
17Buttonscharacter2
18Cravebrand2
19Glasgowbrand2
20Google Playbrand2
21Liverpoolbrand2
22Londonbrand2
23Old Dutchbrand2
24Praguebrand2
\n", + "
" + ], + "text/plain": [ + " name type num_videos\n", + "0 Bubbles character 24\n", + "1 Julian character 19\n", + "2 Ricky character 18\n", + "3 Ricky celebrity 13\n", + "4 Randy character 11\n", + "5 Bubbles celebrity 10\n", + "6 Julian celebrity 8\n", + "7 Swearnet brand 7\n", + "8 Trailer Park Boys brand 7\n", + "9 Billy Bob Thornton celebrity 6\n", + "10 Ronnie Wood celebrity 5\n", + "11 Adidas brand 3\n", + "12 Lahey character 3\n", + "13 Trailer Park Boys celebrity 3\n", + "14 Amsterdam brand 2\n", + "15 Apple brand 2\n", + "16 Berlin brand 2\n", + "17 Buttons character 2\n", + "18 Crave brand 2\n", + "19 Glasgow brand 2\n", + "20 Google Play brand 2\n", + "21 Liverpool brand 2\n", + "22 London brand 2\n", + "23 Old Dutch brand 2\n", + "24 Prague brand 2" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Query to extract entity references from Gemini results, count most frequently appearing\n", + "most_referenced_entities_query = f\"\"\"\n", + " WITH\n", + " ExtractedText AS\n", + " (\n", + " SELECT\n", + " *,\n", + " JSON_EXTRACT_ARRAY(JSON_VALUE(response, '$.candidates[0].content.parts[0].text'))[OFFSET(0)]\n", + " AS extracted_text\n", + "\n", + " FROM\n", + " `youtube_video_analysis.video_analysis_batch_results`\n", + " ),\n", + "\n", + " ExtractedRows AS\n", + " (\n", + " SELECT\n", + " ARRAY(\n", + " SELECT AS STRUCT \n", + " JSON_EXTRACT_SCALAR(references, '$.entity_name') AS entity_name,\n", + " JSON_EXTRACT_SCALAR(references, '$.entity_type') AS entity_type\n", + "\n", + " FROM \n", + " UNNEST(JSON_EXTRACT_ARRAY(extracted_text, '$.references')) AS references\n", + " ) AS reference,\n", + "\n", + " FROM\n", + " ExtractedText\n", + " )\n", + "\n", + " SELECT\n", + " References.entity_name AS name,\n", + " LOWER(References.entity_type) AS type,\n", + " COUNT(*) AS num_videos\n", + "\n", + " FROM\n", + " ExtractedRows,\n", + " UNNEST(ExtractedRows.reference) AS References\n", + "\n", + " GROUP BY\n", + " entity_name, entity_type\n", + "\n", + " ORDER BY\n", + " num_videos DESC,\n", + " name\n", + " \"\"\"\n", + "\n", + "most_referenced_entities = get_bq_query_results_as_df(most_referenced_entities_query)\n", + "\n", + "# Display results\n", + "display(Markdown(\"Most Referenced Entities in Videos Analyzed\"))\n", + "\n", + "display(most_referenced_entities.head(25))\n", + " " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/agents/trends-and-insights-agent/notebooks/installation_scripts/install_ffmpeg.sh b/python/agents/trends-and-insights-agent/notebooks/installation_scripts/install_ffmpeg.sh new file mode 100755 index 00000000..3bee58c9 --- /dev/null +++ b/python/agents/trends-and-insights-agent/notebooks/installation_scripts/install_ffmpeg.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Install ffmpeg on Ubuntu +# This script installs ffmpeg and its dependencies + +set -e + +echo "Installing ffmpeg on Ubuntu..." + +# Update package list +apt-get update + +# Install ffmpeg +apt-get install -y ffmpeg + +# Verify installation +if command -v ffmpeg &> /dev/null; then + echo "ffmpeg installed successfully!" + ffmpeg -version +else + echo "ffmpeg installation failed!" + exit 1 +fi \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/notebooks/installation_scripts/install_opencv.sh b/python/agents/trends-and-insights-agent/notebooks/installation_scripts/install_opencv.sh new file mode 100644 index 00000000..aae70098 --- /dev/null +++ b/python/agents/trends-and-insights-agent/notebooks/installation_scripts/install_opencv.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +apt-get update -y +apt-get install libgl1-mesa-glx libgl1-mesa-dev -y \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/publish_to_agentspace_v2.sh b/python/agents/trends-and-insights-agent/publish_to_agentspace_v2.sh new file mode 100755 index 00000000..bee1b34b --- /dev/null +++ b/python/agents/trends-and-insights-agent/publish_to_agentspace_v2.sh @@ -0,0 +1,394 @@ +#!/bin/bash + +# Script to create or update an agent in Agent Space +# Supports both JSON config files and command line arguments + +set -euo pipefail + +# Default values +ACTION="" +CONFIG_FILE="" +PROJECT_NUMBER="" +PROJECT_ID="" +AS_APP="" +REASONING_ENGINE_ID="" +AGENT_DISPLAY_NAME="" +AGENT_DESCRIPTION="" +AGENT_ID="" +AGENT_INSTRUCTIONS="" +ICON_URI="https://fonts.gstatic.com/s/i/short-term/release/googlesymbols/corporate_fare/default/24px.svg" +AGENTSPACE_LOCATION="us" +AGENT_ENGINE_LOCATION="us-central1" + +# Function to display usage +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " -a, --action Action to perform (required)" + echo " -c, --config JSON configuration file" + echo " -p, --project-id Google Cloud project ID" + echo " -n, --project-number Google Cloud project number" + echo " -e, --app-id Agent Space application ID" + echo " -r, --reasoning-engine Reasoning Engine ID (required for create/update)" + echo " -d, --display-name Agent display name (required for create/update)" + echo " -s, --description Agent description (required for create)" + echo " -i, --agent-id Agent ID (required for update/delete)" + echo " -t, --instructions Agent instructions/tool description (required for create)" + echo " -u, --icon-uri Icon URI (optional)" + echo " -asl, --agentspace-location Agentspace App Location (default: us)" + echo " -ael, --agent-engine-location Agent Engine Location (default: us-central1)" + echo " -h, --help Display this help message" + echo "" + echo "Example with config file:" + echo " $0 --action create --config agent_config.json" + echo " $0 --action update --config agent_config.json" + echo " $0 --action list --config agent_config.json" + echo " $0 --action delete --config agent_config.json" + echo "" + echo "Example with command line args:" + echo "" + echo " Create agent:" + echo " $0 --action create --project-id my-project --project-number 12345 \\" + echo " --app-id my-app --reasoning-engine 67890 --display-name 'My Agent' \\" + echo " --description 'Agent description' --instructions 'Agent instructions here'" + echo "" + echo " Update agent:" + echo " $0 --action update --project-id my-project --project-number 12345 \\" + echo " --app-id my-app --reasoning-engine 67890 --display-name 'My Agent' \\" + echo " --agent-id 123456789 --description 'Updated description'" + echo "" + echo " List agents:" + echo " $0 --action list --project-id my-project --project-number 12345 \\" + echo " --app-id my-app" + echo "" + echo " Delete agent:" + echo " $0 --action delete --project-id my-project --project-number 12345 \\" + echo " --app-id my-app --agent-id 123456789" + exit 1 +} + +# Function to load config from JSON file +load_config() { + local config_file=$1 + + if [[ ! -f "$config_file" ]]; then + echo "Error: Configuration file '$config_file' not found" >&2 + exit 1 + fi + + # Use jq to parse JSON config + if ! command -v jq &> /dev/null; then + echo "Error: 'jq' is required to parse JSON config files. Please install it." >&2 + exit 1 + fi + + # Load values from JSON, only if not already set via command line + [[ -z "$PROJECT_ID" ]] && PROJECT_ID=$(jq -r '.project_id // empty' "$config_file") + [[ -z "$PROJECT_NUMBER" ]] && PROJECT_NUMBER=$(jq -r '.project_number // empty' "$config_file") + [[ -z "$AS_APP" ]] && AS_APP=$(jq -r '.app_id // empty' "$config_file") + [[ -z "$REASONING_ENGINE_ID" ]] && REASONING_ENGINE_ID=$(jq -r '.reasoning_engine_id // empty' "$config_file") + [[ -z "$AGENT_DISPLAY_NAME" ]] && AGENT_DISPLAY_NAME=$(jq -r '.display_name // empty' "$config_file") + [[ -z "$AGENT_DESCRIPTION" ]] && AGENT_DESCRIPTION=$(jq -r '.description // empty' "$config_file") + [[ -z "$AGENT_ID" ]] && AGENT_ID=$(jq -r '.agent_id // empty' "$config_file") + [[ -z "$AGENT_INSTRUCTIONS" ]] && AGENT_INSTRUCTIONS=$(jq -r '.instructions // empty' "$config_file") + [[ -z "$ICON_URI" ]] || ICON_URI=$(jq -r '.icon_uri // empty' "$config_file") + [[ -z "$AGENTSPACE_LOCATION" ]] || AGENTSPACE_LOCATION=$(jq -r '.agentspace_location // "us"' "$config_file") + [[ -z "$AGENT_ENGINE_LOCATION" ]] || AGENT_ENGINE_LOCATION=$(jq -r '.agent_engine_location // "us-central1"' "$config_file") +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -a|--action) + ACTION="$2" + shift 2 + ;; + -c|--config) + CONFIG_FILE="$2" + shift 2 + ;; + -p|--project-id) + PROJECT_ID="$2" + shift 2 + ;; + -n|--project-number) + PROJECT_NUMBER="$2" + shift 2 + ;; + -e|--app-id) + AS_APP="$2" + shift 2 + ;; + -r|--reasoning-engine) + REASONING_ENGINE_ID="$2" + shift 2 + ;; + -d|--display-name) + AGENT_DISPLAY_NAME="$2" + shift 2 + ;; + -s|--description) + AGENT_DESCRIPTION="$2" + shift 2 + ;; + -i|--agent-id) + AGENT_ID="$2" + shift 2 + ;; + -t|--instructions) + AGENT_INSTRUCTIONS="$2" + shift 2 + ;; + -u|--icon-uri) + ICON_URI="$2" + shift 2 + ;; + -l|--agentspace-location) + AGENTSPACE_LOCATION="$2" + shift 2 + ;; + -l|--agent-engine-location) + AGENT_ENGINE_LOCATION="$2" + shift 2 + ;; + -h|--help) + usage + ;; + *) + echo "Error: Unknown option $1" >&2 + usage + ;; + esac +done + +# Load config file if specified +if [[ -n "$CONFIG_FILE" ]]; then + echo "Loading configuration from: $CONFIG_FILE" + load_config "$CONFIG_FILE" +fi + +# Validate required parameters +if [[ -z "$ACTION" ]]; then + echo "Error: Action (--action) is required" >&2 + usage +fi + +if [[ "$ACTION" != "create" && "$ACTION" != "update" && "$ACTION" != "list" && "$ACTION" != "delete" ]]; then + echo "Error: Action must be 'create', 'update', 'list', or 'delete'" >&2 + usage +fi + +# Validate required fields based on action +missing_params=() + +if [[ "$ACTION" == "list" ]]; then + # For list action, we only need minimal parameters + [[ -z "$PROJECT_ID" ]] && missing_params+=("project-id") + [[ -z "$PROJECT_NUMBER" ]] && missing_params+=("project-number") + [[ -z "$AS_APP" ]] && missing_params+=("app-id") +elif [[ "$ACTION" == "delete" ]]; then + # For delete action, we need these parameters + [[ -z "$PROJECT_ID" ]] && missing_params+=("project-id") + [[ -z "$PROJECT_NUMBER" ]] && missing_params+=("project-number") + [[ -z "$AS_APP" ]] && missing_params+=("app-id") + [[ -z "$AGENT_ID" ]] && missing_params+=("agent-id") +elif [[ "$ACTION" == "update" ]]; then + # For create and update actions, we need all parameters + [[ -z "$PROJECT_ID" ]] && missing_params+=("project-id") + [[ -z "$PROJECT_NUMBER" ]] && missing_params+=("project-number") + [[ -z "$AS_APP" ]] && missing_params+=("app-id") + [[ -z "$REASONING_ENGINE_ID" ]] && missing_params+=("reasoning-engine") + [[ -z "$AGENT_DISPLAY_NAME" ]] && missing_params+=("display-name") + [[ -z "$AGENT_ID" ]] && missing_params+=("agent-id") +else + # For create and update actions, we need all parameters + [[ -z "$PROJECT_ID" ]] && missing_params+=("project-id") + [[ -z "$PROJECT_NUMBER" ]] && missing_params+=("project-number") + [[ -z "$AS_APP" ]] && missing_params+=("app-id") + [[ -z "$REASONING_ENGINE_ID" ]] && missing_params+=("reasoning-engine") + [[ -z "$AGENT_DISPLAY_NAME" ]] && missing_params+=("display-name") + + if [[ "$ACTION" == "create" ]]; then + [[ -z "$AGENT_INSTRUCTIONS" ]] && missing_params+=("instructions") + [[ -z "$AGENT_DESCRIPTION" ]] && missing_params+=("description") + fi +fi + +if [[ ${#missing_params[@]} -gt 0 ]]; then + echo "Error: Missing required parameters: ${missing_params[*]}" >&2 + echo "Use --help for usage information" >&2 + exit 1 +fi + +# Build reasoning engine path (only for create/update actions) +if [[ "$ACTION" == "create" || "$ACTION" == "update" ]]; then + REASONING_ENGINE="projects/${PROJECT_NUMBER}/locations/${AGENT_ENGINE_LOCATION}/reasoningEngines/${REASONING_ENGINE_ID}" +fi + +# Display configuration +echo "==================================" +echo "Agent Space Deployment Configuration" +echo "==================================" +echo "Action: $ACTION" +echo "Project ID: $PROJECT_ID" +echo "Project Number: $PROJECT_NUMBER" +echo "App ID: $AS_APP" +echo "Agentspace Location: $AGENTSPACE_LOCATION" +echo "Agent Engine Location: $AGENT_ENGINE_LOCATION" + +if [[ "$ACTION" == "create" || "$ACTION" == "update" ]]; then + echo "Reasoning Engine: $REASONING_ENGINE" + echo "Agent Display Name: $AGENT_DISPLAY_NAME" + if [[ -n "$AGENT_DESCRIPTION" ]]; then + echo "Agent Description: $AGENT_DESCRIPTION" + fi +fi + +if [[ "$ACTION" == "update" || "$ACTION" == "delete" ]]; then + echo "Agent ID: $AGENT_ID" +fi + +if [[ "$ACTION" == "create" && -n "$AGENT_INSTRUCTIONS" ]]; then + echo "Instructions: ${AGENT_INSTRUCTIONS:0:50}..." # Show first 50 chars +fi + +echo "==================================" +echo "" + +# Check if user is authenticated +if ! gcloud auth print-access-token &> /dev/null; then + echo "Error: Not authenticated with Google Cloud. Please run 'gcloud auth login'" >&2 + exit 1 +fi + +# Get access token +ACCESS_TOKEN=$(gcloud auth print-access-token) + +if [[ "$ACTION" == "create" ]]; then + echo "Creating new agent..." + + # Create agent using POST request + response=$(curl -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H "Content-Type: application/json" \ + -H "X-Goog-User-Project: ${PROJECT_ID}" \ + "https://${AGENTSPACE_LOCATION}-discoveryengine.googleapis.com/v1alpha/projects/${PROJECT_NUMBER}/locations/${AGENTSPACE_LOCATION}/collections/default_collection/engines/${AS_APP}/assistants/default_assistant/agents" \ + -d '{ + "displayName": "'"${AGENT_DISPLAY_NAME}"'", + "description": "'"${AGENT_DESCRIPTION}"'", + "icon": { + "uri": "'"${ICON_URI}"'" + }, + "adk_agent_definition": { + "tool_settings": { + "tool_description": "'"${AGENT_INSTRUCTIONS}"'" + }, + "provisioned_reasoning_engine": { + "reasoning_engine": "'"${REASONING_ENGINE}"'" + } + } + }' 2>&1) + + # Check if the request was successful + if echo "$response" | grep -q '"name"'; then + echo "Success! Agent created successfully." + echo "Response: $response" + else + echo "Error: Failed to create agent" >&2 + echo "Response: $response" >&2 + exit 1 + fi + +elif [[ "$ACTION" == "update" ]]; then + echo "Updating existing agent..." + + # Build agent resource name + AGENT_RESOURCE_NAME="projects/${PROJECT_NUMBER}/locations/${AGENTSPACE_LOCATION}/collections/default_collection/engines/${AS_APP}/assistants/default_assistant/agents/${AGENT_ID}" + + # Update agent using PATCH request with new structure + response=$(curl -X PATCH \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H "Content-Type: application/json" \ + -H "X-Goog-User-Project: ${PROJECT_ID}" \ + "https://${AGENTSPACE_LOCATION}-discoveryengine.googleapis.com/v1alpha/${AGENT_RESOURCE_NAME}" \ + -d '{ + "displayName": "'"${AGENT_DISPLAY_NAME}"'", + "description": "'"${AGENT_DESCRIPTION}"'", + "adk_agent_definition": { + "tool_settings": { + "tool_description": "'"${AGENT_INSTRUCTIONS}"'" + }, + "provisioned_reasoning_engine": { + "reasoning_engine": "'"${REASONING_ENGINE}"'" + } + } + }' 2>&1) + + # Check if the request was successful + if echo "$response" | grep -q '"name"'; then + echo "Success! Agent updated successfully." + echo "Response: $response" + else + echo "Error: Failed to update agent" >&2 + echo "Response: $response" >&2 + exit 1 + fi + +elif [[ "$ACTION" == "list" ]]; then + echo "Listing agents..." + + # List agents using GET request + response=$(curl -X GET \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H "Content-Type: application/json" \ + -H "X-Goog-User-Project: ${PROJECT_ID}" \ + "https://${AGENTSPACE_LOCATION}-discoveryengine.googleapis.com/v1alpha/projects/${PROJECT_NUMBER}/locations/${AGENTSPACE_LOCATION}/collections/default_collection/engines/${AS_APP}/assistants/default_assistant/agents" 2>&1) + + # Check if the request was successful + if echo "$response" | grep -q '"agents"'; then + echo "Success! Agents listed successfully." + echo "" + echo "Response:" + # Pretty print the JSON response if jq is available + if command -v jq &> /dev/null; then + # Remove curl progress output and only pass JSON to jq + echo "$response" | grep -v "%" | jq . 2>/dev/null || echo "$response" + else + echo "$response" + fi + else + echo "Error: Failed to list agents" >&2 + echo "Response: $response" >&2 + exit 1 + fi + +elif [[ "$ACTION" == "delete" ]]; then + echo "Deleting agent..." + + # Build agent resource name + AGENT_RESOURCE_NAME="projects/${PROJECT_NUMBER}/locations/${AGENTSPACE_LOCATION}/collections/default_collection/engines/${AS_APP}/assistants/default_assistant/agents/${AGENT_ID}" + + # Delete agent using DELETE request + response=$(curl -X DELETE \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H "Content-Type: application/json" \ + -H "X-Goog-User-Project: ${PROJECT_ID}" \ + "https://${AGENTSPACE_LOCATION}-discoveryengine.googleapis.com/v1alpha/${AGENT_RESOURCE_NAME}" 2>&1) + + # Check if the request was successful + # DELETE requests might return empty response on success + if [[ -z "$response" ]] || echo "$response" | grep -q '"name"'; then + echo "Success! Agent deleted successfully." + if [[ -n "$response" ]]; then + echo "Response: $response" + fi + else + echo "Error: Failed to delete agent" >&2 + echo "Response: $response" >&2 + exit 1 + fi +fi + +echo "" +echo "Operation complete!" diff --git a/python/agents/trends-and-insights-agent/pyproject.toml b/python/agents/trends-and-insights-agent/pyproject.toml new file mode 100644 index 00000000..c0879a9e --- /dev/null +++ b/python/agents/trends-and-insights-agent/pyproject.toml @@ -0,0 +1,32 @@ +[tool.poetry] +name = "trends_and_insights_agent" +version = "0.1.0" +description = "" +authors = [ + "Jeremy Wortz ", + "Jordan Totten ", +] +readme = "README.md" +package-mode = true + +[tool.poetry.dependencies] +python = ">=3.11,<4.0" +jupyter = "^1.1.1" +google-genai = "^1.19.0" +pillow = "^11.1.0" +google-adk = "1.7.0" +google-cloud-aiplatform = { extras = ["agent-engines"], version = "^1.104.0" } +google-cloud-bigquery = "^3.34.0" +pandas = "^2.2.3" +aiohttp = "^3.11.16" +markdown-pdf = "^1.7" +pytest = "^8.3.5" +ipython = "^9.2.0" +db-dtypes = "^1.4.3" +tabulate = "^0.9.0" +opencv-python = "^4.12.0.88" + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/python/agents/trends-and-insights-agent/setup_ae_sm_access.sh b/python/agents/trends-and-insights-agent/setup_ae_sm_access.sh new file mode 100644 index 00000000..728f517c --- /dev/null +++ b/python/agents/trends-and-insights-agent/setup_ae_sm_access.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# This is to run one-time to give Agent Engine access to the Youtube API Secret in Secret Manager + +source trends_and_insights_agent/.env + +export RE_SA="service-${GOOGLE_CLOUD_PROJECT_NUMBER}@gcp-sa-aiplatform-re.iam.gserviceaccount.com" +gcloud secrets add-iam-policy-binding "projects/$GOOGLE_CLOUD_PROJECT_NUMBER/secrets/$YT_SECRET_MNGR_NAME" \ + --member="serviceAccount:$RE_SA" \ + --role="roles/secretmanager.secretAccessor" diff --git a/python/agents/trends-and-insights-agent/tests/__init__.py b/python/agents/trends-and-insights-agent/tests/__init__.py new file mode 100644 index 00000000..311a9168 --- /dev/null +++ b/python/agents/trends-and-insights-agent/tests/__init__.py @@ -0,0 +1 @@ +from tests.test_init import * \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/tests/capability.test.json b/python/agents/trends-and-insights-agent/tests/capability.test.json new file mode 100644 index 00000000..da98c9e9 --- /dev/null +++ b/python/agents/trends-and-insights-agent/tests/capability.test.json @@ -0,0 +1,13 @@ +[ + { + "query": "What can you do?", + "expected_tool_use": [], + "expected_intermediate_agent_responses": [ + { + "author": "marketing_idea_generator_agent", + "text": "Hello! I'm your AI Marketing Research & Strategy Assistant. I can help you develop comprehensive marketing campaigns by providing insights, creative ideas, and trend analysis.\n\nTo get started, do you have a marketing campaign guide you can share? It could be a URL, a PDF, or even just the text of the guide.\n\nFor example, you could say:\n\n* \"I have a campaign guide at [URL].\"\n* \"Here's the text of my campaign guide: [paste text here].\"\n* \"I want to create a campaign for [product/service] targeting [target audience] with a budget of [budget].\"\n\nOnce I have your guide, I can extract the key information and then use my other tools to help you brainstorm ideas, research trends, and even generate ad content!\n" + } + ], + "reference": "Agent marketing_idea_generator_agent setting default values for state variables: campaign_guide, trends, insights." + } +] \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/tests/env.py b/python/agents/trends-and-insights-agent/tests/env.py new file mode 100644 index 00000000..6f49acd4 --- /dev/null +++ b/python/agents/trends-and-insights-agent/tests/env.py @@ -0,0 +1,13 @@ +# Unit tests for env setup +import unittest +import os + + +class Env(unittest.TestCase): + def test_env_vars(self): + + self.assertIsNot(os.environ.get("GOOGLE_CLOUD_PROJECT_NUMBER"), None) + self.assertIsNot(os.environ.get("GOOGLE_GENAI_USE_VERTEXAI"), None) + self.assertIsNot(os.environ.get("BUCKET"), None) + self.assertIsNot(os.environ.get("GOOGLE_CLOUD_PROJECT"), None) + self.assertIsNot(os.environ.get("YT_SECRET_MNGR_NAME"), None) diff --git a/python/agents/trends-and-insights-agent/tests/gcs.py b/python/agents/trends-and-insights-agent/tests/gcs.py new file mode 100644 index 00000000..8beb6288 --- /dev/null +++ b/python/agents/trends-and-insights-agent/tests/gcs.py @@ -0,0 +1,44 @@ +# Unit tests for env setup +import unittest +import os + +BUCKET = os.environ.get("BUCKET") # get this after validation + + +# set envs before package imports +from trends_and_insights_agent.common_agents.ad_content_generator.tools import ( + download_blob, + upload_file_to_gcs, +) + +test_file_name = "test_file.txt" +file_contents = "This is a test file for gcs integration." +with open(test_file_name, "w") as f: + f.write(file_contents) + +# convert the file to bytes +with open(test_file_name, "rb") as f: + file_bytes = f.read() + + +class GCS(unittest.TestCase): + def upload_file_to_gcs(self): + # create a test file for upload to gcs + + # upload the test file to gcs + upload_response = upload_file_to_gcs( + file_path=test_file_name, file_data=file_bytes + ) + + self.assertEqual(upload_response, f"{BUCKET}/{test_file_name}") + + def download_blob(self): + # download the test file from gcs + download_response = download_blob( + bucket_name=BUCKET.replace("gs://", ""), source_blob_name=test_file_name + ) + self.assertEqual(download_response, file_contents.encode()) + + +# delete the test file +os.remove(test_file_name) diff --git a/python/agents/trends-and-insights-agent/tests/simple_agent_eval.py b/python/agents/trends-and-insights-agent/tests/simple_agent_eval.py new file mode 100644 index 00000000..d9fc8ad1 --- /dev/null +++ b/python/agents/trends-and-insights-agent/tests/simple_agent_eval.py @@ -0,0 +1,9 @@ +from google.adk.evaluation.agent_evaluator import AgentEvaluator +# example of how to run this, using Pytest +# Documentation links: https://google.github.io/adk-docs/evaluate/#2-pytest-run-tests-programmatically + +def test_capability_query(): + AgentEvaluator.evaluate( + agent_module="trends_and_insights_agent", + eval_dataset_file_path_or_dir="tests/capability.test.json", + ) diff --git a/python/agents/trends-and-insights-agent/tests/test_init.py b/python/agents/trends-and-insights-agent/tests/test_init.py new file mode 100644 index 00000000..4b28e8b4 --- /dev/null +++ b/python/agents/trends-and-insights-agent/tests/test_init.py @@ -0,0 +1,13 @@ +import os +from dotenv import load_dotenv + +dotenv_path = os.path.join( + os.path.dirname(__file__), "../trends_and_insights_agent/.env" +) +if os.path.exists(dotenv_path): + load_dotenv(dotenv_path) +else: + # error: + print("no .env file found") + raise (FileNotFoundError, "no .env file found") + diff --git a/python/agents/trends-and-insights-agent/tests/yt_web_tools.py b/python/agents/trends-and-insights-agent/tests/yt_web_tools.py new file mode 100644 index 00000000..2c392c45 --- /dev/null +++ b/python/agents/trends-and-insights-agent/tests/yt_web_tools.py @@ -0,0 +1,34 @@ +# Unit tests for youtube and web LLM tools + +import unittest + +from trends_and_insights_agent.tools import query_youtube_api, query_web +from trends_and_insights_agent.common_agents.trend_assistant.tools import ( + get_youtube_trends, +) +import asyncio + + +# unit test for query web +class YT_Web_Tools(unittest.TestCase): + def test_query_web(self): + # test query web + query = "what is the latest in AI" + response = asyncio.run(query_web(query=query, num_results=1)) + self.assertIsNot(response, None) + self.assertIsNot(response[0].get("website_text"), None) + + def test_query_youtube_api(self): + # test query youtube api + query = "what is the latest in AI" + response = query_youtube_api( + query=query, region_code="US", video_duration="any" + ) + self.assertIsNot(response, None) + self.assertIsNot(response.get("items"), None) + + def test_get_youtube_trends(self): + # test get youtube trends + response = get_youtube_trends(region_code="US") + self.assertIsNot(response, None) + self.assertIsNot(response.get("items"), None) diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/.env.example b/python/agents/trends-and-insights-agent/trends_and_insights_agent/.env.example new file mode 100644 index 00000000..4b3f00f5 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/.env.example @@ -0,0 +1,7 @@ +GOOGLE_GENAI_USE_VERTEXAI=1 +GOOGLE_CLOUD_PROJECT=this-my-project-id +GOOGLE_CLOUD_LOCATION=us-central1 +GOOGLE_CLOUD_PROJECT_NUMBER=12345678910 +BUCKET=gs://zghost-media-center +YT_SECRET_MNGR_NAME=yt-data-api +# SESSION_STATE_JSON_PATH=example_state_pixel.json \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/__init__.py new file mode 100644 index 00000000..dfdf94f0 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/__init__.py @@ -0,0 +1,3 @@ +from . import agent + +__all__ = ["agent"] \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/agent.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/agent.py new file mode 100644 index 00000000..cbe0b93c --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/agent.py @@ -0,0 +1,36 @@ +from google.genai import types +from google.adk.agents import Agent + +from .common_agents.trend_assistant.agent import trends_and_insights_agent +from .common_agents.staged_researcher.agent import research_orchestrator +from .common_agents.ad_content_generator.agent import ad_content_generator_agent +from .common_agents.ad_content_generator.tools import save_creatives_and_research_report + +from .shared_libraries import callbacks +from .shared_libraries.config import config +from .prompts import ( + GLOBAL_INSTR, + ROOT_AGENT_INSTR, +) + +root_agent = Agent( + model=config.worker_model, + name="root_agent", + description="A trend and insight assistant using the services of multiple sub-agents.", + instruction=ROOT_AGENT_INSTR, + global_instruction=GLOBAL_INSTR, + sub_agents=[ + research_orchestrator, + trends_and_insights_agent, + ad_content_generator_agent, + ], + tools=[save_creatives_and_research_report], + generate_content_config=types.GenerateContentConfig( + temperature=0.01, + response_modalities=["TEXT"], + ), + before_agent_callback=[ + callbacks._load_session_state, + ], + before_model_callback=callbacks.rate_limit_callback, +) diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/__init__.py new file mode 100644 index 00000000..c4211e84 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/__init__.py @@ -0,0 +1,9 @@ +from .trend_assistant.agent import trends_and_insights_agent +from .ad_content_generator.agent import ad_content_generator_agent +from .staged_researcher.agent import combined_research_pipeline + +__all__ = [ + "trends_and_insights_agent", + "ad_content_generator_agent", + "combined_research_pipeline", +] diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/__init__.py new file mode 100644 index 00000000..e3d60cdc --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/__init__.py @@ -0,0 +1,3 @@ +from .agent import ad_content_generator_agent + +__all__ = ["ad_content_generator_agent"] diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/agent.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/agent.py new file mode 100644 index 00000000..f09f22b4 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/agent.py @@ -0,0 +1,341 @@ +from google.genai import types +from google.adk.planners import BuiltInPlanner +from google.adk.tools.agent_tool import AgentTool +from google.adk.agents import Agent, SequentialAgent +from google.adk.tools import google_search, load_artifacts + +from trends_and_insights_agent.shared_libraries.config import config +from trends_and_insights_agent.shared_libraries import callbacks +from .tools import ( + generate_image, + generate_video, + save_img_artifact_key, + save_vid_artifact_key, + save_select_ad_copy, + save_select_visual_concept, +) +from .prompts import ( + AD_CREATIVE_SUBAGENT_INSTR, + VEO3_INSTR, +) + + +# --- AD CREATIVE SUBAGENTS --- +ad_copy_drafter = Agent( + model=config.worker_model, + name="ad_copy_drafter", + description="Generate 10-12 initial ad copy ideas based on campaign guidelines and trends", + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig(include_thoughts=False) + ), + instruction="""You are a creative copywriter generating initial ad copy ideas. + + Your goal is to review the research and trends provided in the **Input Data** to generate 10-12 culturally relevant ad copy ideas. + + --- + ### Input Data + + + {target_yt_trends} + + + + {target_search_trends} + + + + {combined_final_cited_report} + + + --- + ### Instructions + + 1. Review the campaign and trend research in the 'combined_final_cited_report' state key. + 2. Using insights related to the campaign metadata, trending YouTube video(s), and trending Search term(s), generate 10-12 diverse ad copy ideas that: + - Incorporate key selling points for the {target_product} + - Vary in tone, style, and approach + - Are suitable for Instagram/TikTok platforms + - Reference at least one of the topics from the 'target_search_trends' or 'target_yt_trends' state keys. + 3. **Out of all the copy ideas you generate**, be sure to include: + - A few that reference the Search trend from the 'target_search_trends' state key, + - A few that reference the YouTube trend from the 'target_yt_trends' state key, + - And if possible, a few that combine ideas from both trends in the 'target_search_trends' and 'target_yt_trends' state keys. + 4. **Each ad copy should include:** + - Headline (attention-grabbing) + - Body text (concise and compelling) + - Call-to-action + - Which trend(s) it references (e.g., which trend from the 'target_search_trends' and 'target_yt_trends' state keys) + - Brief rationale for target audience appeal + - A candidate social media caption + + Use the `google_search` tool to support your decisions. + + """, + generate_content_config=types.GenerateContentConfig( + temperature=1.5, + ), + tools=[google_search], + output_key="ad_copy_draft", +) + + +ad_copy_critic = Agent( + model=config.critic_model, + name="ad_copy_critic", + description="Critique and narrow down ad copies based on product, audience, and trends", + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig(include_thoughts=False) + ), + instruction="""You are a strategic marketing critic evaluating ad copy ideas. + + Your goal is to review the proposed candidates in the 'ad_copy_draft' state key and select the 6-8 BEST ad copies based on: + 1. Alignment with target audience. + 2. Effective use of trending topics that feel authentic. + 3. Clear communication of key selling points. + 4. Platform-appropriate tone and length. + + Use the `google_search` tool to support your decisions + + Provide detailed rationale for your selections, explaining why these specific copies will perform best. + + Each ad copy should include: + - Headline (attention-grabbing) + - Call-to-action + - A candidate social media caption + - Body text (concise and compelling) + - Which trend(s) it references (e.g., which trend from the 'target_search_trends' and 'target_yt_trends' state keys) + - Brief rationale for target audience appeal + - Detailed rationale explaining why this ad copy will perform well + + """, + tools=[google_search], + generate_content_config=types.GenerateContentConfig(temperature=0.7), + output_key="ad_copy_critique", +) + + +# ad_copy_finalizer = Agent( +# model=config.worker_model, +# name="ad_copy_finalizer", +# description="Finalize user-selected ad copy (or ad copies) to proceed with.", +# # planner=BuiltInPlanner(thinking_config=types.ThinkingConfig(include_thoughts=True)), +# instruction="""You are a senior copywriter finalizing ad campaigns. + +# 1. Display the ad copies from the 'ad_copy_critique' state key. +# 2. For each ad copy, be sure to include the following: +# - Headline (attention-grabbing) +# - Body text (concise and compelling) +# - Call-to-action +# - Which trend(s) it references (e.g., which trend from the 'target_search_trends' and 'target_yt_trends' state keys) +# - Brief rationale for target audience appeal +# - A candidate social media caption +# 3. Ask the user which ad copies they want to proceed with. They can choose one or multiple. +# +# """, +# generate_content_config=types.GenerateContentConfig(temperature=0.8), +# output_key="final_ad_copies", +# ) + + +# Sequential agent for ad creative generation +ad_creative_pipeline = SequentialAgent( + name="ad_creative_pipeline", + description="Generates ad copy drafts with an actor-critic workflow.", + sub_agents=[ + ad_copy_drafter, + ad_copy_critic, + # ad_copy_finalizer, + ], +) + + +# --- PROMPT GENERATION SUBAGENTS --- +visual_concept_drafter = Agent( + model=config.worker_model, + name="visual_concept_drafter", + description="Generate initial visual concepts for selected ad copies", + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig(include_thoughts=False) + ), + instruction=f"""You are a visual creative director generating initial concepts and an expert at creating AI prompts for {config.image_gen_model} and {config.video_gen_model}. + + Based on the user-selected ad copies in the 'final_select_ad_copies' state key, generate visual concepts that: + - Incorporate trending visual styles and themes. + - Consider platform-specific best practices. + - Find a clever way to market the 'target_product' + + Try generating at least one visual concept for each ad copy. + + In aggregate, the total set of visual concepts should: + - Balance the use of image and video creatives. + - Balance reference to the Search trend(s) and the trending Youtube video(s). + - Include a few concepts that attempt to combine both Search and YouTube trends + + For each visual concept, provide: + - Name (intuitive name of the concept) + - Type (image or video) + - Which trend(s) it relates to (e.g., which trend from the 'target_search_trends' and 'target_yt_trends' state keys) + - Which ad copy it connects to + - Creative concept explanation + - A draft {config.image_gen_model} or {config.video_gen_model} prompt. + - If this is a video concept: + - Consider generated videos are 8 seconds in length. + - Consider the prompting best practices in the block. + + Use the `google_search` tool to support your decisions. + + + {VEO3_INSTR} + + """, + tools=[google_search], + generate_content_config=types.GenerateContentConfig(temperature=1.5), + output_key="visual_draft", +) + + +visual_concept_critic = Agent( + model=config.critic_model, + name="visual_concept_critic", + description="Critique and narrow down visual concepts", + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig(include_thoughts=False) + ), + instruction=f"""You are a creative director evaluating visual concepts and high quality prompts that result in high impact. + + Review the concepts in the 'visual_draft' state key and critique the draft prompts on: + 1. Visual appeal and stopping power for social media + 2. Alignment with ad copy messaging + 3. Alignment with trend + 4. Platform optimization (aspect ratios, duration) + 5. Diversity of visual approaches + 6. Utilize techniques to maintain continuity in the prompts + 7. Prompts are maximizing descriptive possibilities to match the intended tone + 8. Descriptions of scenes, characters, tone, emotion are all extremely verbose (100+ words) and leverage ideas from the prompting best practices + 9. These verbose descriptions are maintained scene to scene to avoid saying things like "the same person", instead use the same provided description + + **Critical Guidelines** + * Ensure a good mix of images and videos in your selections. + * Explain which trend(s) each concept relates to. + * Provide detailed rationale for your selections. + * Consider the prompting best practices in the block. + * Use the `google_search` tool to support your decisions. + + **Final Output:** + Format the final output to include the following information for each visual concept: + - Name (intuitive name of the concept) + - Type (image or video) + - Which trend(s) it relates to (e.g., which trend from the 'target_search_trends' and 'target_yt_trends' state keys) + - Creative concept explanation + - Detailed rationale explaining why this concept will perform well + - A draft Imagen or Veo prompt + + + {VEO3_INSTR} + + """, + tools=[google_search], + generate_content_config=types.GenerateContentConfig(temperature=0.7), + output_key="visual_concept_critique", +) + + +visual_concept_finalizer = Agent( + model=config.worker_model, + name="visual_concept_finalizer", + description="Finalize visual concepts to proceed with.", + # planner=BuiltInPlanner(thinking_config=types.ThinkingConfig(include_thoughts=True)), + instruction="""You are a senior creative director finalizing visual concepts for ad creatives. + + 1. Review the 'visual_concept_critique' state key to understand the refined visual concepts. + 2. For each concept, provide the following: + - Name (intuitive name of the concept) + - Type (image or video) + - Which trend(s) it relates to (e.g., from the 'target_search_trends' and 'target_yt_trends' state keys) + - Headline (attention-grabbing) + - Call-to-action + - A candidate social media caption + - Creative concept explanation + - Brief rationale for target audience appeal + - Brief explanation of how this markets the target product + - A draft Imagen or Veo prompt. + + """, + generate_content_config=types.GenerateContentConfig(temperature=0.8), + output_key="final_visual_concepts", +) + + +# Sequential agent for visual generation +visual_generation_pipeline = SequentialAgent( + name="visual_generation_pipeline", + description="Generates visual concepts with an actor-critic workflow.", + sub_agents=[ + visual_concept_drafter, + visual_concept_critic, + visual_concept_finalizer, + ], +) + + +visual_generator = Agent( + model=config.critic_model, + name="visual_generator", + description="Generate final visuals using image and video generation tools", + instruction=f"""You are a visual content producer creating final assets. + + **Objective:** Generate visual content options (images and videos) based on the user-selected visual concepts. + + **Available Tools:** + - `generate_image`: Generate images using Google's Imagen model. + - `generate_video`: Generate videos using Google's Veo model. + + **Instructions:** + 1. For each user-selected visual concept in the 'final_select_vis_concepts' state key, generate the creative visual using the appropriate tool (`generate_image` or `generate_video`). + - For images, follow the instructions in the block, + - For videos, follow the instructions in the block and consider prompting best practices in the block, + + + - Create descriptive image prompts that visualize the ad copy concepts + - Include subject, context/background, and style elements + - Ensure prompts capture the essence of the trends and campaign highlights + - Generate diverse visual approaches (different styles, compositions, contexts) + + + + - Create dynamic video prompts that bring the ad copy to life + - Include subject, context, action, style, and optional camera/composition elements + - Consider continuity with the image concepts when appropriate + - Vary the approaches (different actions, camera angles, moods) + + + + {VEO3_INSTR} + + """, + tools=[ + generate_image, + generate_video, + ], + generate_content_config=types.GenerateContentConfig(temperature=1.2), + before_model_callback=callbacks.rate_limit_callback, +) + +# Main orchestrator agent +ad_content_generator_agent = Agent( + model=config.lite_planner_model, + name="ad_content_generator_agent", + description="Help users with ad generation; brainstorm and refine ad copy and visual concept ideas with actor-critic workflows; iterate with the user to generate final ad creatives.", + instruction=AD_CREATIVE_SUBAGENT_INSTR, + tools=[ + AgentTool(agent=ad_creative_pipeline), + AgentTool(agent=visual_generation_pipeline), + AgentTool(agent=visual_generator), + save_img_artifact_key, + save_vid_artifact_key, + save_select_ad_copy, + save_select_visual_concept, + load_artifacts, + ], + generate_content_config=types.GenerateContentConfig(temperature=1.0), +) diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/prompts.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/prompts.py new file mode 100644 index 00000000..85568a9b --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/prompts.py @@ -0,0 +1,160 @@ +"""Prompts for ad content generator new agent and subagents""" + +AD_CREATIVE_SUBAGENT_INSTR = """**Role:** You are the orchestrator for a comprehensive ad content generation workflow. + +**Objective:** Your goal is to generate a complete set of ad creatives including ad copy, images, and videos. To achieve this, use the **specialized tools and sub-agents** available to complete the **instructions** below. + +**You have access to specialized tools and sub-agents:** +1. Use the `ad_creative_pipeline` tool to generate ad copies for the user to review. +3. Use the `visual_generation_pipeline` tool to create visual concepts for each ad copy. +5. Use the `visual_generator` tool to generate image and video creatives. +6. Use the `save_img_artifact_key` tool to update the 'img_artifact_keys' state key for each image generated with the `generate_image` tool. +7. Use the `save_vid_artifact_key` tool to update the 'vid_artifact_keys' state key for each video generated with the `generate_video` tool. +8. Use the `load_artifacts` tool to load artifacts such as files, images, and videos. + +**Instructions:** +1. Greet the user and give them a high-level overview of what you do. +2. Then, complete all steps in the block to generate ad creatives with the user. Strictly follow all the steps one-by-one. Don't proceed until they are complete. +3. Once these steps are complete, transfer back to the `root_agent`. + + +1. Call `ad_creative_pipeline` as a tool to generate a set of candidate ad copies. +2. Once the previous step is complete, present the ad copies in the 'ad_copy_critique' state key to the user. + - For each ad copy, be sure to include: + - Headline (attention-grabbing) + - Call-to-action + - A candidate social media caption + - Body text (concise and compelling) + - Which trend(s) it references (e.g., which trend from the 'target_search_trends' and 'target_yt_trends' state keys) + - Brief rationale for target audience appeal + - How this markets the target product + - Work with the user to understand which ad copies they'd like to proceed with. +3. Once the user selects one or more ad copies, use the `save_select_ad_copy` tool to add these to the session state. + - To make sure everything is stored correctly, instead of calling `save_select_ad_copy` all at once, chain the calls such that you only call another `save_select_ad_copy` after the last call has responded. + - Once these complete, confirm with the user and then proceed to the next step. +4. Next, call the `visual_generation_pipeline` tool to generate visual concepts for each user-selected ad copy. +5. Once the previous step is complete, present the visual concepts in the 'final_visual_concepts' state key to the user. + - For each visual concept, be sure to include: + - Name (intuitive name of the concept) + - Type (image or video) + - Which trend(s) it relates to (e.g., from the 'target_search_trends' and 'target_yt_trends' state keys) + - Headline (attention-grabbing) + - Call-to-action + - A candidate social media caption + - Creative concept explanation + - Brief rationale for target audience appeal + - How this markets the target product + - A draft Imagen or Veo prompt + - Work with the user to understand which visual concepts they'd like to proceed with. +5. Once the user selects one or more visual concepts, use the `save_select_visual_concept` tool to add these to the session state. + - To make sure everything is stored correctly, instead of calling `save_select_visual_concept` all at once, chain the calls such that you only call another `save_select_visual_concept` after the last call has responded. + - Once these complete, proceed to the next step. +6. Next, call the `visual_generator` tool to generate ad creatives from the selected visual concepts. + - For each image generated, call the `save_img_artifact_key` tool to update the 'img_artifact_keys' state key. + - For each video generated, call the `save_vid_artifact_key` tool to update the 'vid_artifact_keys' state key. +7. Lastly, do a quality assurance check on the generated artifacts using `load_artifacts` tool. Once the user confirms satisfaction, you may proceed to the next step. + + + +**Key Responsibilities:** +- Ensure smooth handoff between subagents. +- Maintain context about campaign guidelines throughout the process. +- Handle any user feedback or iteration requests. +""" + +VEO3_INSTR = """Here are some example best practices when creating prompts for VEO3: +SUPPRESS SUBTITLES + +People: Man, woman, child, elderly person, specific professions (e.g., "a seasoned detective", "a joyful baker", "a futuristic astronaut"), historical figures, mythical beings (e.g., "a mischievous fairy", "a stoic knight"). +Animals: Specific breeds (e.g., "a playful Golden Retriever puppy", "a majestic bald eagle", "a sleek black panther"), fantastical creatures (e.g., "a miniature dragon with iridescent scales", "a wise, ancient talking tree"). +Objects: Everyday items (e.g., "a vintage typewriter", "a steaming cup of coffee", "a worn leather-bound book"), vehicles (e.g., "a classic 1960s muscle car", "a futuristic hovercraft", "a weathered pirate ship"), abstract shapes ("glowing orbs", "crystalline structures"). +Multiple Subjects: You can combine people, animals, objects, or any mix of them in the same video (e.g., "A group of diverse friends laughing around a campfire while a curious fox watches from the shadows", "a busy marketplace scene with vendors and shoppers." + + +Basic Movements: Walking, running, jumping, flying, swimming, dancing, spinning, falling, standing still, sitting. +Interactions: Talking, laughing, arguing, hugging, fighting, playing a game, cooking, building, writing, reading, observing. +Emotional Expressions: Smiling, frowning, looking surprised, concentrating deeply, appearing thoughtful, showing excitement, crying. +Subtle Actions: A gentle breeze ruffling hair, leaves rustling, a subtle nod, fingers tapping impatiently, eyes blinking slowly. +Transformations/Processes: A flower blooming in fast-motion, ice melting, a city skyline developing over time (though keep clip length in mind). + + +Location (Interior): A cozy living room with a crackling fireplace, a sterile futuristic laboratory, a cluttered artist's studio, a grand ballroom, a dusty attic. +Location (Exterior): A sun-drenched tropical beach, a misty ancient forest, a bustling futuristic cityscape at night, a serene mountain peak at dawn, a desolate alien planet. +Time of Day: Golden hour, midday sun, twilight, deep night, pre-dawn. +Weather: Clear blue sky, overcast and gloomy, light drizzle, heavy thunderstorm with visible lightning, gentle snowfall, swirling fog. +Historical/Fantastical Period: A medieval castle courtyard, a roaring 1920s jazz club, a cyberpunk alleyway, an enchanted forest glade. +Atmospheric Details: Floating dust motes in a sunbeam, shimmering heat haze, reflections on wet pavement, leaves scattered by the wind. + + +Eye-Level Shot: Offers a neutral, common perspective, as if viewed from human height. "Eye-level shot of a woman sipping tea." +Low-Angle Shot: Positions the camera below the subject, looking up, making the subject appear powerful or imposing. "Low-angle tracking shot of a superhero landing." +High-Angle Shot: Places the camera above the subject, looking down, which can make the subject seem small, vulnerable, or part of a larger pattern. "High-angle shot of a child lost in a crowd." +Bird's-Eye View / Top-Down Shot: A shot taken directly from above, offering a map-like perspective of the scene. "Bird's-eye view of a bustling city intersection." +Worm's-Eye View: A very low-angle shot looking straight up from the ground, emphasizing height and grandeur. "Worm's-eye view of towering skyscrapers." +Dutch Angle / Canted Angle: The camera is tilted to one side, creating a skewed horizon line, often used to convey unease, disorientation, or dynamism. "Dutch angle shot of a character running down a hallway." +Close-Up: Frames a subject tightly, typically focusing on a face to emphasize emotions or a specific detail. "Close-up of a character's determined eyes." +Extreme Close-Up: Isolates a very small detail of the subject, such as an eye or a drop of water. "Extreme close-up of a drop of water landing on a leaf." +Medium Shot: Shows the subject from approximately the waist up, balancing detail with some environmental context, common for dialogue. "Medium shot of two people conversing." +Full Shot / Long Shot: Shows the entire subject from head to toe, with some of the surrounding environment visible. "Full shot of a dancer performing." +Wide Shot / Establishing Shot: Shows the subject within their broad environment, often used to establish location and context at the beginning of a sequence. "Wide shot of a lone cabin in a snowy landscape." +Over-the-Shoulder Shot: Frames the shot from behind one person, looking over their shoulder at another person or object, common in conversations. "Over-the-shoulder shot during a tense negotiation. " +Point-of-View Shot: Shows the scene from the direct visual perspective of a character, as if the audience is seeing through their eyes. "POV shot as someone rides a rollercoaster.” + + +Static Shot (or fixed): The camera remains completely still; there is no movement. "Static shot of a serene landscape." +Pan (left/right): The camera rotates horizontally left or right from a fixed position. "Slow pan left across a city skyline at dusk." +Tilt (up/down): The camera rotates vertically up or down from a fixed position. "Tilt down from the character's shocked face to the revealing letter in their hands." +Dolly (In/Out): The camera physically moves closer to the subject or further away. "Dolly out from the character to emphasize their isolation." +Truck (Left/Right): The camera physically moves horizontally (sideways) left or right, often parallel to the subject or scene. "Truck right, following a character as they walk along a busy sidewalk." +Pedestal (Up/Down): The camera physically moves vertically up or down while maintaining a level perspective. "Pedestal up to reveal the full height of an ancient, towering tree." +Zoom (In/Out): The camera's lens changes its focal length to magnify or de-magnify the subject. This is different from a dolly, as the camera itself does not move. "Slow zoom in on a mysterious artifact on a table." +Crane Shot: The camera is mounted on a crane and moves vertically (up or down) or in sweeping arcs, often used for dramatic reveals or high-angle perspectives. "Crane shot revealing a vast medieval battlefield." +Aerial Shot / Drone Shot: A shot taken from a high altitude, typically using an aircraft or drone, often involving smooth, flying movements. "Sweeping aerial drone shot flying over a tropical island chain." +Handheld / Shaky Cam: The camera is held by the operator, resulting in less stable, often jerky movements that can convey realism, immediacy, or unease. "Handheld camera shot during a chaotic marketplace chase." +Whip Pan: An extremely fast pan that blurs the image, often used as a transition or to convey rapid movement or disorientation. "Whip pan from one arguing character to another." +Arc Shot: The camera moves in a circular or semi-circular path around the subject. "Arc shot around a couple embracing in the rain. + + +Wide-Angle Lens (e.g., "18mm lens," "24mm lens"): Captures a broader field of view than a standard lens. It can exaggerate perspective, making foreground elements appear larger and creating a sense of grand scale or, at closer distances, distortion. "Wide-angle lens shot of a grand cathedral interior, emphasizing its soaring arches." +Telephoto Lens (e.g., "85mm lens," "200mm lens"): Narrows the field of view and compresses perspective, making distant subjects appear closer and often isolating the subject by creating a shallow depth of field. "Telephoto lens shot capturing a distant eagle in flight against a mountain range." +Shallow Depth of Field / Bokeh: An optical effect where only a narrow plane of the image is in sharp focus, while the foreground and/or background are blurred. The aesthetic quality of this blur is known as 'bokeh'. "Portrait of a man with a shallow depth of field, their face sharp against a softly blurred park background with beautiful bokeh." +Deep Depth of Field: Keeps most or all of the image, from foreground to background, in sharp focus. "Landscape scene with deep depth of field, showing sharp detail from the wildflowers in the immediate foreground to the distant mountains." +Lens Flare: An effect created when a bright light source directly strikes the camera lens, causing streaks, starbursts, or circles of light to appear in the image. Often used for dramatic or cinematic effect. "Cinematic lens flare as the sun dips below the horizon behind a silhouetted couple." +Rack Focus: The technique of shifting the focus of the lens from one subject or plane of depth to another within a single, continuous shot. "Rack focus from a character's thoughtful face in the foreground to a significant photograph on the wall behind them." +Fisheye Lens Effect: An ultra-wide-angle lens that produces extreme barrel distortion, creating a circular or strongly convex, wide panoramic image. "Fisheye lens view from inside a car, capturing the driver and the entire curved dashboard and windscreen." +Vertigo Effect (Dolly Zoom): A camera effect achieved by dollying the camera towards or away from a subject while simultaneously zooming the lens in the opposite direction. This keeps the subject roughly the same size in the frame, but the background perspective changes dramatically, often conveying disorientation or unease. "Vertigo effect (dolly zoom) on a character standing at the edge of a cliff, the background rushing away. + + +Natural Light: "Soft morning sunlight streaming through a window," "Overcast daylight," "Moonlight." +Artificial Light: "Warm glow of a fireplace," "Flickering candlelight," "Harsh fluorescent office lighting," "Pulsating neon signs." +Cinematic Lighting: "Rembrandt lighting on a portrait," "Film noir style with deep shadows and stark highlights," "High-key lighting for a bright, cheerful scene," "Low-key lighting for a dark, mysterious mood." +Specific Effects: "Volumetric lighting creating visible light rays," "Backlighting to create a silhouette," "Golden hour glow," "Dramatic side lighting." +Happy/Joyful: Bright, vibrant, cheerful, uplifting, whimsical. +Sad/Melancholy: Somber, muted colors, slow pace, poignant, wistful. +Suspenseful/Tense: Dark, shadowy, quick cuts (if implying edit), sense of unease, thrilling. +Peaceful/Serene: Calm, tranquil, soft, gentle, meditative. +Epic/Grandiose: Sweeping, majestic, dramatic, awe-inspiring. +Futuristic/Sci-Fi: Sleek, metallic, neon, technological, dystopian, utopian. +Vintage/Retro: Sepia tone, grainy film, specific era aesthetics (e.g., "1950s Americana," "1980s vaporwave"). +Romantic: Soft focus, warm colors, intimate. +Horror: Dark, unsettling, eerie, gory (though be mindful of content filters). +Photorealistic: “Ultra-realistic rendering," "Shot on 8K camera." +Cinematic: "Cinematic film look," "Shot on 35mm film," "Anamorphic widescreen." +Animation Styles: "Japanese anime style," "Classic Disney animation style," "Pixar-like 3D animation," "Claymation style," "Stop-motion animation," "Cel-shaded animation." +Art Movements/Artists: "In the style of Van Gogh," "Surrealist painting," "Impressionistic," "Art Deco design," "Bauhaus aesthetic." +Specific Looks: "Gritty graphic novel illustration," "Watercolor painting coming to life," "Charcoal sketch animation," "Blueprint schematic style. +Color Palettes: "Monochromatic black and white," "Vibrant and saturated tropical colors," "Muted earthy tones," "Cool blue and silver futuristic palette," "Warm autumnal oranges and browns." +Atmospheric Effects: "Thick fog rolling across a moor," "Swirling desert sands," "Gentle falling snow creating a soft blanket," "Heat haze shimmering above asphalt," "Magical glowing particles in the air," "Subsurface scattering on a translucent object." +Textural Qualities: "Rough-hewn stone walls," "Smooth, polished chrome surfaces," "Soft, velvety fabric," "Dewdrops clinging to a spiderweb." + + +Pacing: "Slow-motion," "Fast-paced action," "Time-lapse," "Hyperlapse." +Evolution (subtle for short clips): "A flower bud slowly unfurling", "A candle burning down slightly", "Dawn breaking, the sky gradually lightening." +Rhythm: "Pulsating light", "Rhythmic movement." + + +""" diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/tools.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/tools.py new file mode 100644 index 00000000..0dd495c9 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/ad_content_generator/tools.py @@ -0,0 +1,680 @@ +import cv2 +import logging +from PIL import Image +from io import BytesIO +import uuid, shutil, time, os +from markdown_pdf import MarkdownPdf, Section + +logging.basicConfig(level=logging.INFO) + +from google import genai +from google.genai import types +from google.cloud import storage +from google.adk.tools import ToolContext +from google.genai.types import GenerateVideosConfig + +from ...shared_libraries.config import config +from ...shared_libraries.utils import ( + download_blob, + upload_blob_to_gcs, + download_image_from_gcs, +) + +# Get the cloud storage bucket from the environment variable +try: + GCS_BUCKET = os.environ["BUCKET"] +except KeyError: + raise Exception("BUCKET environment variable not set") + +client = genai.Client() +storage_client = storage.Client() + + +def save_select_ad_copy(select_ad_copy_dict: dict, tool_context: ToolContext) -> dict: + """ + Tool to save `select_ad_copy_dict` to the 'final_select_ad_copies' state key. + Use this tool after the user has selected one or more ad copies to proceed with in ad generation. + + Args: + select_ad_copy_dict (dict): A dict representing an ad copy specifically selected by the user for ad generation. Use the `tool_context` to extract the following schema: + name (str): An intuitive name of the ad copy concept. + headline (str): A concise, attention-grabbing phrase. + call_to_action (str): A catchy, action-oriented phrase intended for the target audience. + caption (str): The candidate social media caption proposed for the ad copy. + body_text (str): The main body of the ad copy. Should be compelling. + trend_ref (str): The trend(s) referenced in this ad copy (e.g., from the 'target_search_trends' and 'target_yt_trends' state keys). + rationale (str): A brief rationale explaining why this ad copy will perform well. + tool_context: The tool context. + + Returns: + A status message. + """ + existing_ad_copies = tool_context.state.get("final_select_ad_copies") + if existing_ad_copies is not {"final_select_ad_copies": []}: + existing_ad_copies["final_select_ad_copies"].append(select_ad_copy_dict) + tool_context.state["final_select_ad_copies"] = existing_ad_copies + return {"status": "ok"} + + +def save_select_visual_concept( + select_vis_concept_dict: dict, tool_context: ToolContext +) -> dict: + """ + Tool to save `select_vis_concept_dict` to the 'final_select_vis_concepts' state key. + Use this tool after the user has selected one or more visual concepts to proceed with in ad generation. + + Args: + select_vis_concept_dict (dict): A dict representing a visual concept specifically selected by the user for ad generation. Use the `tool_context` to extract the following schema: + name (str): An intuitive name of the visual concept. + type (str): the intended type of creative e.g., "image" or "video". + trend_ref (str): The trend(s) referenced in this visual concept (e.g., from the 'target_search_trends' and 'target_yt_trends' state keys). + headline (str): A concise, attention-grabbing phrase. + call_to_action (str): A catchy, action-oriented phrase intended for the target audience. + caption (str): The candidate social media caption proposed for the visual concept. + creative_explain (str): A brief explanation connecting the visual concept to the proposed creative direction. + rationale (str): A brief rationale explaining why this visual concept will perform well. + prompt (str): The suggested prompt to generate this creative. + tool_context: The tool context. + + Returns: + A status message. + """ + existing_vis_concepts = tool_context.state.get("final_select_vis_concepts") + if existing_vis_concepts is not {"final_select_vis_concepts": []}: + existing_vis_concepts["final_select_vis_concepts"].append( + select_vis_concept_dict + ) + tool_context.state["final_select_vis_concepts"] = existing_vis_concepts + return {"status": "ok"} + + +async def generate_image( + prompt: str, + tool_context: ToolContext, + concept_name: str, + number_of_images: int = 1, +) -> dict: + f"""Generates an image based on the prompt for {config.image_gen_model} + + Args: + prompt (str): The prompt to generate the image from. + tool_context (ToolContext): The tool context. + concept_name (str, optional): The name of the concept. + number_of_images (int, optional): The number of images to generate. Defaults to 1. + + Returns: + dict: Status and the artifact_key of the generated image. + + """ + response = client.models.generate_images( + model=config.image_gen_model, + prompt=prompt, + config={"number_of_images": number_of_images}, + ) + if not response.generated_images: + return {"status": "failed"} + + # Create output filename + if concept_name: + filename_prefix = f"{concept_name.replace(",", "").replace(" ", "_")}" + else: + filename_prefix = f"{str(uuid.uuid4())[:8]}" + + DIR = "session_media" + SUBDIR = f"{DIR}/imgs" + if not os.path.exists(SUBDIR): + os.makedirs(SUBDIR) + + for index, image_results in enumerate(response.generated_images): + if image_results.image is not None: + if image_results.image.image_bytes is not None: + + image_bytes = image_results.image.image_bytes + artifact_key = f"{filename_prefix}_{index}.png" + + await tool_context.save_artifact( + filename=artifact_key, + artifact=types.Part.from_bytes( + data=image_bytes, mime_type="image/png" + ), + ) + local_filepath = f"{SUBDIR}/{artifact_key}" + + # save the file locally for gcs upload + image = Image.open(BytesIO(image_bytes)) + image.save(local_filepath) + gcs_folder = tool_context.state["gcs_folder"] + artifact_path = os.path.join(gcs_folder, artifact_key) + logging.info(f"\n\n `generate_image` listdir: {os.listdir('.')}\n\n") + + upload_blob_to_gcs( + source_file_name=local_filepath, + destination_blob_name=artifact_path, + ) + logging.info( + f"Saved image artifact '{artifact_key}' to folder '{gcs_folder}'" + ) + + try: + shutil.rmtree(DIR) + logging.info(f"Directory '{DIR}' and its contents removed successfully") + except FileNotFoundError: + logging.exception(f"Directory '{DIR}' not found") + except OSError as e: + logging.exception(f"Error removing directory '{DIR}': {e}") + + return {"status": "ok", "artifact_key": f"{artifact_key}"} + + +async def generate_video( + prompt: str, + concept_name: str, + tool_context: ToolContext, + number_of_videos: int = 1, + # aspect_ratio: str = "16:9", + negative_prompt: str = "", + existing_image_filename: str = "", +): + f"""Generates a video based on the prompt for {config.video_gen_model}. + + Args: + prompt (str): The prompt to generate the video from. + concept_name (str, optional): The name of the creative/visual concept. + tool_context (ToolContext): The tool context. + number_of_videos (int, optional): The number of videos to generate. Defaults to 1. + negative_prompt (str, optional): The negative prompt to use. Defaults to "". + + Returns: + dict: Status and the `artifact_key` of the generated video. + """ + # Create output filename + if concept_name: + filename_prefix = f"{concept_name.replace(",", "").replace(" ", "_")}" + else: + filename_prefix = f"{str(uuid.uuid4())[:8]}" + + gen_config = GenerateVideosConfig( + aspect_ratio="16:9", + number_of_videos=number_of_videos, + output_gcs_uri=os.environ["BUCKET"], + negative_prompt=negative_prompt, + ) + if existing_image_filename != "": + gcs_location = f"{os.environ['BUCKET']}/{existing_image_filename}" + existing_image = types.Image(gcs_uri=gcs_location, mime_type="image/png") + operation = client.models.generate_videos( + model=config.video_gen_model, + prompt=prompt, + image=existing_image, + config=gen_config, + ) + else: + operation = client.models.generate_videos( + model=config.video_gen_model, prompt=prompt, config=gen_config + ) + while not operation.done: + time.sleep(15) + operation = client.operations.get(operation) + logging.info(operation) + + if operation.error: + return {"status": f"failed due to error: {operation.error}"} + + if operation.response: + if ( + operation.result is not None + and operation.result.generated_videos is not None + ): + for index, generated_video in enumerate(operation.result.generated_videos): + if ( + generated_video.video is not None + and generated_video.video.uri is not None + ): + video_uri = generated_video.video.uri + artifact_key = f"{filename_prefix}_{index}.mp4" + + BUCKET = os.getenv("BUCKET") + if BUCKET is not None: + + BUCKET_NAME = BUCKET.replace("gs://", "") + SOURCE_BLOB = video_uri.replace(BUCKET, "")[1:] + + video_bytes = download_blob( + bucket_name=BUCKET_NAME, source_blob_name=SOURCE_BLOB + ) + logging.info( + f"The artifact key for this video is: {artifact_key}" + ) + await tool_context.save_artifact( + filename=artifact_key, + artifact=types.Part.from_bytes( + data=video_bytes, mime_type="video/mp4" + ), + ) + + # save to common gcs location + DESTINATION_BLOB_NAME = ( + f"{tool_context.state["gcs_folder"]}/{artifact_key}" + ) + bucket = storage_client.get_bucket(BUCKET_NAME) + source_blob = bucket.blob(SOURCE_BLOB) + destination_bucket = storage_client.get_bucket(BUCKET_NAME) + new_blob = bucket.copy_blob( + source_blob, + destination_bucket, + new_name=DESTINATION_BLOB_NAME, + ) + logging.info( + f"Blob {source_blob} copied to {destination_bucket}/{new_blob.name}" + ) + + return {"status": "ok", "artifact_key": f"{artifact_key}"} + + +async def save_img_artifact_key( + artifact_key_dict: dict, + tool_context: ToolContext, +) -> dict: + """ + Tool to save image artifact details to the session state. + Use this tool after generating an image with the `generate_image` tool. + + Args: + artifact_key_dict (dict): A dict representing a generated image artifact. Use the `tool_context` to extract the following schema: + artifact_key (str): The filename used to identify the image artifact; the value returned in `generate_image` tool response. + img_prompt (str): The prompt used to generate the image artifact. + concept (str): A brief explanation of the creative concept used to generate this artifact. + headline (str): The attention-grabbing headline proposed for the artifact's ad-copy. + caption (str): The candidate social media caption proposed for the artifact's ad-copy. + trend (str): The trend(s) referenced by this creative. + rationale_perf (str): A brief rationale explaining why this ad copy will perform well. + audience_appeal (str): A brief explanation for the target audience appeal. + markets_product (str): A brief explanation of how this markets the target product. + tool_context (ToolContext) The tool context. + + Returns: + dict: the status of this functions overall outcome. + """ + existing_img_artifact_keys = tool_context.state.get("img_artifact_keys") + if existing_img_artifact_keys is not {"img_artifact_keys": []}: + existing_img_artifact_keys["img_artifact_keys"].append(artifact_key_dict) + tool_context.state["img_artifact_keys"] = existing_img_artifact_keys + return {"status": "ok"} + + +async def save_vid_artifact_key( + artifact_key_dict: dict, + tool_context: ToolContext, +) -> dict: + """ + Tool to save video artifact details to the session state. + Use this tool after generating an video with the `generate_video` tool. + + Args: + artifact_key_dict (dict): A dict representing a generated video artifact. Use the `tool_context` to extract the following schema: + artifact_key (str): The filename used to identify the video artifact; the value returned in `generate_video` tool response. + vid_prompt (str): The prompt used to generate the video artifact. + concept (str): A brief explanation of the creative concept used to generate this artifact. + headline (str): The attention-grabbing headline proposed for the artifact's ad-copy. + caption (str): The candidate social media caption proposed for the artifact's ad-copy. + trend (str): The trend(s) referenced by this creative. + rationale_perf (str): A brief rationale explaining why this ad copy will perform well. + audience_appeal (str): A brief explanation for the target audience appeal. + markets_product (str): A brief explanation of how this markets the target product. + tool_context (ToolContext) The tool context. + + Returns: + dict: the status of this functions overall outcome. + """ + existing_vid_artifact_keys = tool_context.state.get("vid_artifact_keys") + if existing_vid_artifact_keys is not {"vid_artifact_keys": []}: + existing_vid_artifact_keys["vid_artifact_keys"].append(artifact_key_dict) + tool_context.state["vid_artifact_keys"] = existing_vid_artifact_keys + return {"status": "ok"} + + +def extract_single_frame(video_path, frame_number, output_image_path) -> str: + """ + Extracts a single frame from a video at a specified frame number. + + Args: + video_path (str): The path to the input MP4 video file. + frame_number (int): The number of the frame to extract (0-indexed). + output_image_path (str): The path to save the extracted image (e.g., 'frame.jpg'). + + Returns: + str: local path to the extracted image (i.e., frame) + """ + cap = cv2.VideoCapture(video_path) + + if not cap.isOpened(): + logging.info(f"Error: Could not open video file {video_path}") + return f"Error: Could not open video file {video_path}" + + # Set the frame position + cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number) + + ret, frame = cap.read() + + if ret: + cv2.imwrite(output_image_path, frame) + logging.info(f"Frame {frame_number} extracted and saved to {output_image_path}") + else: + logging.info(f"Error: Could not read frame {frame_number} from {video_path}") + + cap.release() + cv2.destroyAllWindows() + + return output_image_path + + +async def save_creatives_and_research_report(tool_context: ToolContext) -> dict: + """ + Saves generated PDF report bytes as an artifact. + + Args: + tool_context (ToolContext): The tool context. + + Returns: + dict: Status and the location of the PDF artifact file. + """ + processed_report = tool_context.state["final_report_with_citations"] + gcs_folder = tool_context.state["gcs_folder"] + + try: + + DIR = f"report_creatives" + + # ==================== # + # get image creatives + # ==================== # + IMG_SUBDIR = f"{DIR}/imgs" + if not os.path.exists(IMG_SUBDIR): + os.makedirs(IMG_SUBDIR) + + # get artifact details + img_artifact_state_dict = tool_context.state.get("img_artifact_keys") + img_artifact_list = img_artifact_state_dict["img_artifact_keys"] + + IMG_CREATIVE_STRING = "" + for entry in img_artifact_list: + logging.info(entry) + LOCAL_FILE_PATH = os.path.join(IMG_SUBDIR, entry["artifact_key"]) + ARTIFACT_KEY_NAME = entry["artifact_key"].replace(".png", "") + # download locally + download_image_from_gcs( + source_blob_name=os.path.join(gcs_folder, entry["artifact_key"]), + destination_file_name=LOCAL_FILE_PATH, + ) + # TODO: optimize + path_str = f"![Example Image]({LOCAL_FILE_PATH})\n" + str_1 = f"## {entry["headline"]}\n" + str_2 = ( + f"*{os.path.join(GCS_BUCKET, gcs_folder, entry["artifact_key"])}*\n\n" + ) + str_3 = f"{path_str}\n\n" + str_4 = f"**{entry["caption"]}**\n\n" + str_5 = f"**Trend(s):** {entry["trend"]}\n\n" + str_6 = f"**Visual Concept:** {entry["concept"]}\n\n" + str_7 = f"**How it markets target product:** {entry["markets_product"]}\n\n" + str_8 = f"**Target audience appeal:** {entry["audience_appeal"]}\n\n" + str_9 = f"**Why this will perform well:** {entry["rationale_perf"]}\n\n" + str_10 = f"**Prompt:** {entry["img_prompt"]}\n\n" + result = ( + str_1 + + " " + + str_2 + + " " + + str_3 + + " " + + str_4 + + " " + + str_5 + + " " + + str_6 + + " " + + str_7 + + " " + + str_8 + + " " + + str_9 + + " " + + str_10 + ) + + IMG_CREATIVE_STRING += result + + # ==================== # + # get video creatives + # ==================== # + VID_SUBDIR = f"{DIR}/vids" + if not os.path.exists(VID_SUBDIR): + os.makedirs(VID_SUBDIR) + + # get artifact details + vid_artifact_state_dict = tool_context.state.get("vid_artifact_keys") + vid_artifact_list = vid_artifact_state_dict["vid_artifact_keys"] + + VID_CREATIVE_STRING = "" + for entry in vid_artifact_list: + logging.info(entry) + LOCAL_VID_PATH = os.path.join(VID_SUBDIR, entry["artifact_key"]) + ARTIFACT_KEY_NAME = entry["artifact_key"].replace(".mp4", "") + # download locally + download_image_from_gcs( + source_blob_name=os.path.join(gcs_folder, entry["artifact_key"]), + destination_file_name=LOCAL_VID_PATH, + ) + LOCAL_FRAME_PATH = os.path.join(VID_SUBDIR, f"{ARTIFACT_KEY_NAME}.png") + LOCAL_VID_FRAME = extract_single_frame(LOCAL_VID_PATH, 1, LOCAL_FRAME_PATH) + + path_str = f"![Thumbnail Image]({LOCAL_VID_FRAME})\n" + str_1 = f"## {entry["headline"]}\n" + str_2 = ( + f"*{os.path.join(GCS_BUCKET, gcs_folder, entry["artifact_key"])}*\n\n" + ) + str_3 = f"{path_str}\n\n" + str_4 = f"**{entry["caption"]}**\n\n" + str_5 = f"**Trend(s):** {entry["trend"]}\n\n" + str_6 = f"**Visual Concept:** {entry["concept"]}\n\n" + str_7 = f"**How it markets target product:** {entry["markets_product"]}\n\n" + str_8 = f"**Target audience appeal:** {entry["audience_appeal"]}\n\n" + str_9 = f"**Why this will perform well:** {entry["rationale_perf"]}\n\n" + str_10 = f"**Prompt:** {entry["vid_prompt"]}\n\n" + + result = ( + str_1 + + " " + + str_2 + + " " + + str_3 + + " " + + str_4 + + " " + + str_5 + + " " + + str_6 + + " " + + str_7 + + " " + + str_8 + + " " + + str_9 + + " " + + str_10 + ) + + VID_CREATIVE_STRING += result + + # ==================== # + # create local PDF file + # ==================== # + artifact_key = "final_trends_and_creatives_report.pdf" + report_filepath = f"{DIR}/{artifact_key}" + + # create PDF object + pdf = MarkdownPdf(toc_level=4) + pdf.add_section(Section(f" {processed_report}\n")) + pdf.add_section( + Section(f"# Ad Creatives\n\n{IMG_CREATIVE_STRING}\n\n{VID_CREATIVE_STRING}") + ) + pdf.meta["title"] = "[Final] trends-2-creatives Report" + pdf.save(report_filepath) + + # open pdf and read bytes for types.Part() object + with open(report_filepath, "rb") as f: + document_bytes = f.read() + + # artifact build + document_part = types.Part( + inline_data=types.Blob(data=document_bytes, mime_type="application/pdf") + ) + version = await tool_context.save_artifact( + filename=artifact_key, artifact=document_part + ) + logging.info( + f"\n\nSaved report artifact: '{artifact_key}' as version {version}\n\n" + ) + upload_blob_to_gcs( + source_file_name=report_filepath, + destination_blob_name=os.path.join(gcs_folder, artifact_key), + ) + logging.info( + f"\n\nSaved artifact doc '{artifact_key}', version {version}, to folder '{gcs_folder}'\n\n" + ) + # clean up + shutil.rmtree(DIR) + logging.info(f"Directory '{DIR}' and its contents removed successfully") + return { + "status": "ok", + "gcs_bucket": GCS_BUCKET, + "gcs_folder": gcs_folder, + "artifact_key": artifact_key, + } + except Exception as e: + logging.error(f"Error saving artifact: {e}") + return {"status": "failed", "error": str(e)} + + +# TODO: Get ffmpeg install working on agent engine +# async def concatenate_videos( +# video_filenames: List[str], +# tool_context: ToolContext, +# concept_name: str, +# ): +# """Concatenates multiple videos into a single longer video for a concept. + +# Args: +# video_filenames (List[str]): List of video filenames from tool_context artifacts. +# tool_context (ToolContext): The tool context. +# concept_name (str, optional): The name of the concept. + +# Returns: +# dict: Status and the location of the concatenated video file. +# """ +# if not video_filenames: +# return {"status": "failed", "error": "No video filenames provided"} + +# try: +# # Create temporary directory for processing +# with tempfile.TemporaryDirectory() as temp_dir: +# # Load videos from artifacts and save locally +# local_video_paths = [] +# for idx, video_filename in enumerate(video_filenames): +# # Load artifact +# video_part = await tool_context.load_artifact(video_filename) +# if not video_part: +# return { +# "status": "failed", +# "error": f"Could not load artifact: {video_filename}", +# } +# if not video_part.inline_data: +# return { +# "status": "failed", +# "error": f"Could not load artifact inline_data: {video_filename}", +# } +# if not video_part.inline_data.data: +# return { +# "status": "failed", +# "error": f"Could not load artifact inline_data.data: {video_filename}", +# } + +# # Extract bytes from the Part object +# video_bytes = video_part.inline_data.data + +# # Save locally for ffmpeg processing +# local_path = os.path.join(temp_dir, f"video_{idx}.mp4") +# with open(local_path, "wb") as f: +# f.write(video_bytes) +# local_video_paths.append(local_path) + +# # Create output filename +# if concept_name: +# output_filename = f"{concept_name}.mp4" +# else: +# output_filename = f"{uuid.uuid4()}.mp4" + +# output_path = os.path.join(temp_dir, output_filename) + +# if len(local_video_paths) == 1: +# # If only one video, just copy it +# subprocess.run(["cp", local_video_paths[0], output_path], check=True) +# else: +# # Create ffmpeg filter complex for concatenation with transitions +# # Simple concatenation without transitions +# concat_file = os.path.join(temp_dir, "concat_list.txt") +# with open(concat_file, "w") as f: +# for video_path in local_video_paths: +# f.write(f"file '{video_path}'\n") + +# subprocess.run( +# [ +# "ffmpeg", +# "-f", +# "concat", +# "-safe", +# "0", +# "-i", +# concat_file, +# "-c", +# "copy", +# output_path, +# ], +# check=True, +# capture_output=True, +# text=True, +# ) + +# # Read the output video +# with open(output_path, "rb") as f: +# video_bytes = f.read() + +# # Save as artifact +# await tool_context.save_artifact( +# output_filename, +# types.Part.from_bytes(data=video_bytes, mime_type="video/mp4"), +# ) + +# # Also upload to GCS for persistence +# gcs_uri = upload_file_to_gcs( +# file_path=output_filename, +# file_data=video_bytes, +# content_type="video/mp4", +# ) +# new_entry = {output_filename: gcs_uri} +# tool_context.state["artifact_keys"]["video_creatives"].update(new_entry) + +# return { +# "status": "ok", +# "video_filename": output_filename, +# "gcs_uri": gcs_uri, +# "num_videos_concatenated": len(video_filenames), +# } + +# except subprocess.CalledProcessError as e: +# return { +# "status": "failed", +# "error": f"FFmpeg error: {e.stderr if hasattr(e, 'stderr') else str(e)}", +# } +# except Exception as e: +# return {"status": "failed", "error": str(e)} diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/agent.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/agent.py new file mode 100644 index 00000000..d5b0034b --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/agent.py @@ -0,0 +1,204 @@ +import datetime +import logging + +logging.basicConfig(level=logging.INFO) + +from google.genai import types +from google.adk.tools import google_search +from google.adk.planners import BuiltInPlanner +from google.adk.tools.agent_tool import AgentTool +from google.adk.agents import Agent, SequentialAgent, ParallelAgent + +from trends_and_insights_agent.shared_libraries.config import config +from trends_and_insights_agent.shared_libraries import callbacks, schema_types + +from .tools import save_draft_report_artifact +from .sub_agents.campaign_web_researcher.agent import ca_sequential_planner +from .sub_agents.search_web_researcher.agent import gs_sequential_planner +from .sub_agents.youtube_web_researcher.agent import yt_sequential_planner + + +# --- PARALLEL RESEARCH SUBAGENTS --- # +parallel_planner_agent = ParallelAgent( + name="parallel_planner_agent", + sub_agents=[yt_sequential_planner, gs_sequential_planner, ca_sequential_planner], + description="Runs multiple research planning agents in parallel.", +) + +merge_planners = Agent( + name="merge_planners", + model=config.worker_model, + # include_contents="none", + description="Combine results from state keys 'campaign_web_search_insights', 'gs_web_search_insights', and 'yt_web_search_insights'", + instruction="""You are an AI Assistant responsible for combining initial research findings into a comprehensive summary. + Your primary task is to organize the following research summaries, clearly attributing findings to their source areas. + Structure your response using headings for each topic. Ensure the report is coherent and integrates the key points smoothly. + + --- + **Output Format:** + + # Summary of Campaign and Trend Research + + ## Campaign Guide + {campaign_web_search_insights} + + ## Search Trend + {gs_web_search_insights} + + ## YouTube Trends Findings + {yt_web_search_insights} + + Output *only* the structured report following this format. Do not include introductory or concluding phrases outside this structure, and strictly adhere to using only the provided input summary content. + """, + output_key="combined_web_search_insights", +) + +merge_parallel_insights = SequentialAgent( + name="merge_parallel_insights", + sub_agents=[parallel_planner_agent, merge_planners], + description="Coordinates parallel research and synthesizes the results.", +) + + +# --- COMBINED RESEARCH SUBAGENTS --- # +combined_web_evaluator = Agent( + model=config.critic_model, + name="combined_web_evaluator", + description="Critically evaluates research about the campaign guide and generates follow-up queries.", + instruction=f""" + You are a meticulous quality assurance analyst evaluating the research findings in 'combined_web_search_insights'. + + Be critical of the completeness of the research. + Consider the bigger picture and the intersection of the `target_product` and `target_audience`. + Consider the trends in each of the 'target_search_trends' and 'target_yt_trends' state keys. + + Look for any gaps in depth or coverage, as well as any areas that need more clarification. + - If you find significant gaps in depth or coverage, write a detailed comment about what's missing, and generate 5-7 specific follow-up queries to fill those gaps. + - If you don't find any significant gaps, write a detailed comment about any aspect of the campaign guide or trends to research further. Provide 5-7 related queries. + + Current date: {datetime.datetime.now().strftime("%Y-%m-%d")} + Your response must be a single, raw JSON object validating against the 'CampaignFeedback' schema. + """, + output_schema=schema_types.CampaignFeedback, + disallow_transfer_to_parent=True, + disallow_transfer_to_peers=True, + output_key="combined_research_evaluation", + before_model_callback=callbacks.rate_limit_callback, +) + + +enhanced_combined_searcher = Agent( + model=config.worker_model, + name="enhanced_combined_searcher", + description="Executes follow-up searches and integrates new findings.", + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig(include_thoughts=False) + ), + instruction=""" + You are a specialist researcher executing a refinement pass. + You are tasked to conduct a second round of web research and gather insights related to the trending YouTube video, the trending Search terms, the target audience, and the target product. + + 1. Review the 'combined_research_evaluation' state key to understand the previous round of research. + 2. Execute EVERY query listed in 'follow_up_queries' using the 'google_search' tool. + 3. Synthesize the new findings and COMBINE them with the existing information in 'combined_web_search_insights'. + 4. Your output MUST be the new, complete, and improved set of research insights for the trending Search terms, trending YouTube video, and campaign guide. + """, + tools=[google_search], + output_key="combined_web_search_insights", + after_agent_callback=callbacks.collect_research_sources_callback, +) + + +combined_report_composer = Agent( + model=config.critic_model, + name="combined_report_composer", + include_contents="none", + description="Transforms research data and a markdown outline into a final, cited report.", + instruction=""" + Transform the provided data into a polished, professional, and meticulously cited research report. + + --- + **INPUT DATA** + + * **Search Trends:** + {target_search_trends} + + * **YouTube Trends:** + {target_yt_trends} + + * **YouTube Video Analysis:** + {yt_video_analysis} + + * **Final Research:** + {combined_web_search_insights} + + * **Citation Sources:** + `{sources}` + + --- + **CRITICAL: Citation System** + To cite a source, you MUST insert a special citation tag directly after the claim it supports. + + **The only correct format is:** `` + + --- + **OUTPUT FORMAT** + Organize the output to include these sections: + * **Campaign Guide** + * **Search Trend** + * **YouTube Trend** + * **Key Insights from Research** + + You can use any format you prefer, but here's a suggested structure: + # Campaign Title + ## Section Name + An overview of what this section covers, including specific insights from web research. + Feel free to add subsections or bullet points if needed to better organize the content. + Make sure your outline is clear and easy to follow. + + --- + **Final Instructions** + Generate a comprehensive report using ONLY the `` tag system for all citations. + Ensure the final report follows a structure similar to the one proposed in the **OUTPUT FORMAT** + Do not include a "References" or "Sources" section; all citations must be in-line. + """, + output_key="combined_final_cited_report", + after_agent_callback=callbacks.citation_replacement_callback, + before_model_callback=callbacks.rate_limit_callback, +) + + +# --- COMPLETE RESEARCH PIPELINE SUBAGENT --- # +combined_research_pipeline = SequentialAgent( + name="combined_research_pipeline", + description="Executes a pipeline of web research. It performs iterative research, evaluation, and insight generation.", + sub_agents=[ + merge_parallel_insights, + combined_web_evaluator, + enhanced_combined_searcher, + combined_report_composer, + ], +) + + +# Main orchestrator agent +research_orchestrator = Agent( + model=config.worker_model, + name="research_orchestrator", + description="Orchestrate comprehensive research for the campaign metadata and trending topics.", + instruction="""**Role:** You are the orchestrator for a comprehensive research workflow. + + **Objective:** Your task is to facilitate several research tasks and produce a draft research report. + + **Workflow:** + 1. First, use the `combined_research_pipeline` tool (agent tool) to conduct web research on the campaign metadata and selected trends. + 2. Once all research tasks are complete, use the `save_draft_report_artifact` tool to save a PDF draft of the research. + 3. Finally, transfer back to the `root_agent`. + + """, + tools=[ + save_draft_report_artifact, + AgentTool(agent=combined_research_pipeline), + ], + generate_content_config=types.GenerateContentConfig(temperature=1.0), +) diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/campaign_web_researcher/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/campaign_web_researcher/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/campaign_web_researcher/agent.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/campaign_web_researcher/agent.py new file mode 100644 index 00000000..614be52c --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/campaign_web_researcher/agent.py @@ -0,0 +1,79 @@ +import logging + +logging.basicConfig(level=logging.INFO) + +from google.genai import types +from google.adk.tools import google_search +from google.adk.planners import BuiltInPlanner +from google.adk.agents import Agent, SequentialAgent + +from trends_and_insights_agent.shared_libraries import callbacks +from trends_and_insights_agent.shared_libraries.config import config + + +campaign_web_planner = Agent( + model=config.lite_planner_model, + name="campaign_web_planner", + include_contents="none", + description="Generates initial queries to guide web research about concepts described in the campaign metadata.", + instruction="""You are a research strategist. + Your job is to create high-level queries that will help marketers better understand the 'target_audience', 'target_product', and 'key_selling_points' state keys. + + Review the campaign metadata provided in the **Input Data**, then generate a list of 4-6 web queries to better understand them. + + --- + ### Input Data + + + {target_audience} + + + + {target_product} + + + + {key_selling_points} + + + --- + ### Important Guidelines + The queries should help answer questions like: + * What's relevant, distinctive, or helpful about the {target_product}? + * What are some key attributes about the target audience? + * Which key selling points would the target audience best resonate with? Why? + * How could marketers make a culturally relevant advertisement related to product insights? + + --- + ### Final Instructions + Generate a list of web queries that address the **Important Guidelines**. + **CRITICAL RULE: Your output should just include a numbered list of queries. Nothing else.** + """, + output_key="initial_campaign_queries", +) + + +campaign_web_searcher = Agent( + model=config.worker_model, + name="campaign_web_searcher", + description="Performs the crucial first pass of web research about the campaign guide.", + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig(include_thoughts=False) + ), + instruction=""" + You are a diligent and exhaustive researcher. Your task is to conduct initial web research for concepts described in the campaign guide. + You will be provided with a list of web queries in the 'initial_campaign_queries' state key. + Use the 'google_search' tool to execute all queries. + Synthesize the results into a detailed summary. + """, + tools=[google_search], + output_key="campaign_web_search_insights", + after_agent_callback=callbacks.collect_research_sources_callback, +) + + +ca_sequential_planner = SequentialAgent( + name="ca_sequential_planner", + description="Executes sequential research tasks for concepts described in the campaign guide.", + sub_agents=[campaign_web_planner, campaign_web_searcher], +) diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/search_web_researcher/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/search_web_researcher/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/search_web_researcher/agent.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/search_web_researcher/agent.py new file mode 100644 index 00000000..8f93102b --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/search_web_researcher/agent.py @@ -0,0 +1,69 @@ +import logging + +logging.basicConfig(level=logging.INFO) + +from google.genai import types +from google.adk.tools import google_search +from google.adk.planners import BuiltInPlanner +from google.adk.agents import Agent, SequentialAgent + +from trends_and_insights_agent.shared_libraries import callbacks +from trends_and_insights_agent.shared_libraries.config import config + + +gs_web_planner = Agent( + model=config.lite_planner_model, + name="gs_web_planner", + include_contents="none", + description="Generates initial queries to understand why the 'target_search_trends' are trending.", + instruction="""You are a research strategist. + Your job is to create high-level queries that will help marketers better understand the cultural significance of Google Search trends in the 'target_search_trends' state key. + + Review the search trend provided in the **Input Data**, then proceed to the **Instructions**. + + --- + ### Input Data + + + {target_search_trends} + + + --- + ### Instructions + 1. Read the 'target_search_trends' state key to get the Search trend. + 2. Generate 4-5 queries that will provide more context for this trend, and answer questions like: + - Why are these search terms trending? Who is involved? + - Are there any related themes that would resonate with our target audience? + - Describe any key entities involved (i.e., people, places, organizations, named events, etc.), and the relationships between these key entities, especially in the context of the trending topic, or if possible the target product + - Explain the cultural significance of the trend. + + **CRITICAL RULE: Your output should just include a numbered list of queries. Nothing else.** + """, + output_key="initial_gs_queries", +) + + +gs_web_searcher = Agent( + model=config.worker_model, + name="gs_web_searcher", + description="Performs the crucial first pass of web research about the trending Search terms.", + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig(include_thoughts=False) + ), + instruction=""" + You are a diligent and exhaustive researcher. + Your task is to conduct initial web research for the trending Search terms. + Use the 'google_search' tool to execute all queries listed in 'initial_gs_queries'. + Synthesize the results into a detailed summary. + """, + tools=[google_search], + output_key="gs_web_search_insights", + after_agent_callback=callbacks.collect_research_sources_callback, +) + + +gs_sequential_planner = SequentialAgent( + name="gs_sequential_planner", + description="Executes sequential research tasks for trends in Google Search.", + sub_agents=[gs_web_planner, gs_web_searcher], +) diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/youtube_web_researcher/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/youtube_web_researcher/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/youtube_web_researcher/agent.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/youtube_web_researcher/agent.py new file mode 100644 index 00000000..0d817e40 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/sub_agents/youtube_web_researcher/agent.py @@ -0,0 +1,93 @@ +import logging + +logging.basicConfig(level=logging.INFO) + +from google.genai import types +from google.adk.planners import BuiltInPlanner +from google.adk.agents import Agent, SequentialAgent +from google.adk.tools import google_search + +from trends_and_insights_agent.shared_libraries import callbacks +from trends_and_insights_agent.shared_libraries.config import config +from trends_and_insights_agent.tools import analyze_youtube_videos + + +yt_analysis_generator_agent = Agent( + model=config.worker_model, + name="yt_analysis_generator_agent", + description="Process YouTube videos, extract key details, and provide an overall summary.", + instruction=""" + Your goal is to **understand the content** of the trending YouTube video in the 'target_yt_trends' state key: + + + {target_yt_trends} + + + 1. Use the `analyze_youtube_videos` tool to analyze the `video_url` in the 'target_yt_trends' state variable. + 2. Provide a concise summary covering: + - **Main Thesis/Claim:** What is the video about? What is being discussed? + - **Key Entities:** Describe any key entities (e.g., people, places, things) involved and how they are related. + - **Trend Context:** Why might this video be trending? + - **Summary:** Provide a concise summary of the video content. + """, + tools=[analyze_youtube_videos], + output_key="yt_video_analysis", +) + + +yt_web_planner = Agent( + model=config.lite_planner_model, + name="yt_web_planner", + include_contents="none", + description="Generates initial queries to understand why the 'target_yt_trends' are trending.", + instruction="""You are a research strategist. + Your job is to create high-level queries that will help marketers better understand the cultural significance of the selected trending YouTube video(s) in the 'target_yt_trends' state key. + + Review the trending YouTube video and analysis provided in the **Input Data**, then proceed to the **Instructions**. + + --- + ### Input Data + + + {target_yt_trends} + + + + {yt_video_analysis} + + + --- + ### Instructions + 1. Read the 'target_yt_trends' and 'yt_video_analysis' state keys to understand the trending YouTube video. + 2. Generate 2-3 web queries to better understanding the context of the video. + + Your output should just include a numbered list of queries. Nothing else. + """, + output_key="initial_yt_queries", +) + + +yt_web_searcher = Agent( + model=config.worker_model, + name="yt_web_searcher", + description="Performs web research to better understand the context of the trending YouTube video.", + planner=BuiltInPlanner( + thinking_config=types.ThinkingConfig(include_thoughts=False) + ), + instruction=""" + You are a diligent and exhaustive researcher. + Your task is to conduct initial web research for concepts described in the 'yt_video_analysis' state key. + You will be provided with a list of web queries in the 'initial_yt_queries' state key. + Execute all of these queries using the 'google_search' tool and synthesize the results into a detailed summary + """, + tools=[google_search], + output_key="yt_web_search_insights", + after_agent_callback=callbacks.collect_research_sources_callback, +) + + +yt_sequential_planner = SequentialAgent( + name="yt_sequential_planner", + description="Executes sequential research tasks for trending YouTube videos.", + sub_agents=[yt_analysis_generator_agent, yt_web_planner, yt_web_searcher], +) diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/tools.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/tools.py new file mode 100644 index 00000000..90a8cdde --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/staged_researcher/tools.py @@ -0,0 +1,77 @@ +import os +import shutil +import logging +from markdown_pdf import MarkdownPdf, Section + +logging.basicConfig(level=logging.INFO) + +from google.genai import types +from google.adk.tools import ToolContext + +from ...shared_libraries.utils import upload_blob_to_gcs + +# Get the cloud storage bucket from the environment variable +try: + GCS_BUCKET = os.environ["BUCKET"] +except KeyError: + raise Exception("BUCKET environment variable not set") + + +# --- Tools --- +async def save_draft_report_artifact(tool_context: ToolContext) -> dict: + """ + Saves generated PDF report bytes as an artifact. + + Args: + tool_context (ToolContext): The tool context. + + Returns: + dict: Status and the location of the generated PDF artifact. + """ + processed_report = tool_context.state["final_report_with_citations"] + + # create local dir to save PDF file + try: + DIR = "files" + SUBDIR = f"{DIR}/research" + if not os.path.exists(SUBDIR): + os.makedirs(SUBDIR) + + artifact_key = "draft_research_report_with_citations.pdf" + filepath = f"{SUBDIR}/{artifact_key}" + + pdf = MarkdownPdf(toc_level=4) + pdf.add_section(Section(f" {processed_report}\n")) + pdf.meta["title"] = "[Draft] Trend & Campaign Research Report" + pdf.save(filepath) + + # open pdf and read bytes for types.Part() object + with open(filepath, "rb") as f: + document_bytes = f.read() + + document_part = types.Part( + inline_data=types.Blob(data=document_bytes, mime_type="application/pdf") + ) + version = await tool_context.save_artifact( + filename=artifact_key, artifact=document_part + ) + gcs_folder = tool_context.state["gcs_folder"] + + upload_blob_to_gcs( + source_file_name=filepath, + destination_blob_name=os.path.join(gcs_folder, artifact_key), + ) + logging.info( + f"\n\nSaved artifact doc '{artifact_key}', version {version}, to folder '{gcs_folder}' \n\n" + ) + + shutil.rmtree(DIR) + return { + "status": "ok", + "gcs_bucket": GCS_BUCKET, + "gcs_folder": gcs_folder, + "artifact_key": artifact_key, + } + except Exception as e: + logging.error(f"Error saving artifact: {e}") + return {"status": "failed", "error": str(e)} diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/agent.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/agent.py new file mode 100644 index 00000000..7f6bfab3 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/agent.py @@ -0,0 +1,34 @@ +import logging + +logging.basicConfig(level=logging.INFO) + +from google.genai import types +from google.adk.agents import Agent + +from .tools import ( + memorize, + get_daily_gtrends, + get_youtube_trends, + save_yt_trends_to_session_state, + save_search_trends_to_session_state, +) +from .prompts import AUTO_TREND_AGENT_INSTR +from ...shared_libraries.config import config + + +trends_and_insights_agent = Agent( + model=config.worker_model, + name="trends_and_insights_agent", + description="Captures campaign metadata and displays trending topics from Google Search and trending videos from YouTube.", + instruction=AUTO_TREND_AGENT_INSTR, + tools=[ + memorize, + get_daily_gtrends, + get_youtube_trends, + save_yt_trends_to_session_state, + save_search_trends_to_session_state, + ], + generate_content_config=types.GenerateContentConfig( + temperature=1.0, + ), +) diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/prompts.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/prompts.py new file mode 100644 index 00000000..67d87fae --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/prompts.py @@ -0,0 +1,81 @@ +"""Prompt for trend assistant sub-agent""" + +from ...shared_libraries.config import config + +N_YOUTUBE_TREND_VIDEOS = config.max_results_yt_trends +N_SEARCH_TREND_TOPICS = 25 + + +AUTO_TREND_AGENT_INSTR = """ +You are a planning agent who helps users create marketing campaign briefs that will guide and inform downstream research and creative processes. +- You do not conduct any research or creative processes. You are strictly helping users with their selections and preferences only. +- You want to gather specific campaign-related metadata from the user. The actual research will be handled by transferring to the `combined_research_merger` later. + +You are responsible for capturing three categories of information: +1. Campaign metadata e.g., brand, product, key selling points, and target audience. +2. Trending topics from Google Search. +3. Trending videos from YouTube. + +Your **objective** is to use the **available tools** to complete the **instructions** step-by-step. + +## Available Tools +* `get_daily_gtrends`: Use this tool to extract the top trends from Google Search for the current week. +* `get_youtube_trends`: Use this tool to query the YouTube Data API for the top trending YouTube videos. +* `save_yt_trends_to_session_state`: Use this tool to update the 'target_yt_trends' state variable with the user-selected video(s) trending on YouTube. +* `save_search_trends_to_session_state`: Use this tool to update the 'target_search_trends' state variable with the user-selected Search Trend. +* `memorize`: Use this tool to store user selections in the session state. + +## Instructions +1. Your goal is to help the user, by first completing the following information if any is blank: + {brand} + {target_audience} + {target_product} + {key_selling_points} + +2. Ask for missing information from the user. +3. Use the `memorize` tool to store campaign metadata into the following variables: + - `brand`, + - `target_audience` + - `target_product` and + - `key_selling_points` + To make sure everything is stored correctly, instead of calling memorize all at once, chain the calls such that + you only call another `memorize` after the last call has responded. +4. Use instructions from to find the user's desired Search trend. +5. Use instructions from to find the user's desired trending YouTube video. +6. Finally, once the above information is captured, reconfirm with user, if the user is satisfied, transfer to the `root_agent`. + + +- Use the `get_daily_gtrends` tool to display the top 25 trending Search terms to the user. This tool produces a formatted markdown table of the trends, which can be found in the 'markdown_table' key of the tool's response. You must display this markdown table to the user **in markdown format** +- Work with the user to understand which trending topic they'd like to proceed with. Do not proceed to the next step until the user has selected a Search trend topic. +- Once they choose a Search trend topic, use the `save_search_trends_to_session_state` tool to update the session state with the `term`, `rank`, and `refresh_date` from this Search trend topic. + + + +- Use the `get_youtube_trends` tool to extract the top trending videos on YouTube for the US. Display each trending video's title, duration, and URL to the user in a numbered list like this: + + 1. **Video Title** - Duration - URL + 2. **Video Title** - Duration - URL + 3. **Video Title** - Duration - URL + + +""" + +GUIDE_DATA_EXTRACT_INSTR = """ +Extract **ALL** text from the provided campaign guide. + +**Important:** Grab as much details as possible from the sections below: + +* campaign_name: [should be the title of the document] +* brand: [infer this from the target product] +* target_product: [should be explicitly defined] +* target_audience: [extract bulleted description] +* target_regions: [should be explicitly defined] +* campaign_objectives: [extract bulleted list of objectives] +* media_strategy: [extract bulleted list of media channels] +* key_selling_points: [extract bulleted list of features and their description] + +Your response must be a single, raw JSON object validating against the 'MarketingCampaignGuide' schema. +""" diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/tools.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/tools.py new file mode 100644 index 00000000..707d9910 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/common_agents/trend_assistant/tools.py @@ -0,0 +1,200 @@ +import os +import logging + +logging.basicConfig(level=logging.INFO) + +import googleapiclient.discovery +from google.cloud import bigquery +from google.adk.tools import ToolContext + +from ...shared_libraries.config import config +from ...shared_libraries.secrets import access_secret_version + + +# ======================== +# clients +# ======================== +try: + yt_secret_id = os.environ["YT_SECRET_MNGR_NAME"] +except KeyError: + raise Exception("YT_SECRET_MNGR_NAME environment variable not set") + +# youtube client +YOUTUBE_DATA_API_KEY = access_secret_version(secret_id=yt_secret_id, version_id="1") +youtube_client = googleapiclient.discovery.build( + serviceName="youtube", version="v3", developerKey=YOUTUBE_DATA_API_KEY +) + +BQ_PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +bq_client = bigquery.Client(project=BQ_PROJECT) + + +def memorize(key: str, value: str, tool_context: ToolContext): + """ + Memorize pieces of information, one key-value pair at a time. + + Args: + key: the label indexing the memory to store the value. + value: the information to be stored. + tool_context: The ADK tool context. + + Returns: + A status message. + """ + mem_dict = tool_context.state + mem_dict[key] = value + return {"status": f'Stored "{key}": "{value}"'} + + +async def save_yt_trends_to_session_state( + selected_trends: dict, tool_context: ToolContext +) -> dict: + """ + Tool to save `selected_trends` to the 'target_yt_trends' state key. + Use this tool after the user has selected trending YouTube content to target for the campaign. + + Args: + selected_trends: dict -> The selected trends from the markdown table. + video_title: str -> The title of the user-selected video from YouTube Trends (`videoTitle`). + video_duration: str -> The user-selected video's duration (`duration`). + video_url: str -> The user-selected video's URL (`videoURL`). + tool_context: The tool context. + + Returns: + A status message. + """ + existing_target_yt_trends = tool_context.state.get("target_yt_trends") + if existing_target_yt_trends is not {"target_yt_trends": []}: + existing_target_yt_trends["target_yt_trends"].append(selected_trends) + tool_context.state["target_yt_trends"] = existing_target_yt_trends + return {"status": "ok"} + + +def get_youtube_trends( + region_code: str = "US", + max_results: int = config.max_results_yt_trends, +) -> dict: + """ + Makes request to YouTube Data API for most popular videos in a given region. + Returns a dictionary of videos that match the API request parameters e.g., trending videos + + Args: + region_code (str): selects a video chart available in the specified region. Values are ISO 3166-1 alpha-2 country codes. + For example, the region_code for the United Kingdom would be 'GB', whereas 'US' would represent The United States. + max_results (int): The number of video results to return. + + Returns: + dict: The response from the YouTube Data API. + """ + + request = youtube_client.videos().list( + part="snippet,contentDetails", # statistics + chart="mostPopular", + regionCode=region_code, + maxResults=max_results, + ) + trend_response = request.execute() + # return trend_response + + # TODO: only return select fields + trend_dict = {} + i = 1 + for video in trend_response["items"]: + row_name = f"row_{i}" + trend_dict.update( + { + row_name: { + "videoId": video["id"], + "videoTitle": video["snippet"]["title"], + # 'videoDescription': video['snippet']['description'], + "duration": video["contentDetails"]["duration"], + "videoURL": f"https://www.youtube.com/watch?v={video['id']}", + } + } + ) + i += 1 + return trend_dict + + +async def save_search_trends_to_session_state( + new_trends: dict, tool_context: ToolContext +) -> dict: + """ + Tool to save `new_trends` to the 'target_search_trends' state key. + Use this tool after the user has selected a Trending Search topic to target for the campaign. + + Args: + new_trends: The selected trends from the markdown table. Use the `tool_context` to extract the following schema: + trend_title: str -> The trend's `term` from the markdown table. Should be the exact same words as seen in the markdown table. + trend_rank: int -> The trend's `rank` in the markdown table. Should be the exact same number as seen in the markdown table. + trend_refresh_date: str -> The trend's `refresh_date` from the markdown table. Should be the same date string as seen in the markdown table, and formatted as 'MM/DD/YYYY' + tool_context: The tool context. + + Returns: + A status message. + """ + existing_target_search_trends = tool_context.state.get("target_search_trends") + if existing_target_search_trends is not {"target_search_trends": []}: + existing_target_search_trends["target_search_trends"].append(new_trends) + tool_context.state["target_search_trends"] = existing_target_search_trends + return {"status": "ok"} + + +# ============================== +# Google Search Trends (context) +# ============================= +def get_gtrends_max_date() -> str: + query = f""" + SELECT + MAX(refresh_date) as max_date + FROM `bigquery-public-data.google_trends.top_terms` + """ + max_date = bq_client.query(query).to_dataframe() + return max_date.iloc[0][0].strftime("%m/%d/%Y") + + +max_date = get_gtrends_max_date() + + +def get_daily_gtrends(today_date: str = max_date) -> dict: + """ + Retrieves the top 25 Google Search Trends (term, rank, refresh_date). + + Args: + today_date: Today's date in the format 'MM/DD/YYYY'. Use the default value provided. + + Returns: + dict: key is the latest date for the trends, the value is a markdown table containing the Google Search Trends. + The table includes columns for 'term', 'rank', and 'refresh_date'. + Returns 25 terms ordered by their rank (ascending order) for the current week. + """ + # get latest refresh date + max_date = get_gtrends_max_date() + # max_date = "07/15/2025" + logging.info(f"\n\nmax_date in trends_assistant: {max_date}\n\n") + + query = f""" + SELECT + term, + refresh_date, + ARRAY_AGG(STRUCT(rank,week) ORDER BY week DESC LIMIT 1) x + FROM `bigquery-public-data.google_trends.top_terms` + WHERE refresh_date = PARSE_DATE('%m/%d/%Y', '{max_date}') + GROUP BY term, refresh_date + ORDER BY (SELECT rank FROM UNNEST(x)) + """ + try: + df_t = bq_client.query(query).to_dataframe() + df_t.index += 1 + df_t["rank"] = df_t.index + df_t = df_t.drop("x", axis=1) + new_order = ["term", "rank", "refresh_date"] + df_t = df_t[new_order] + markdown_string = df_t.to_markdown(index=True) + except Exception as e: + return {"status": "error", "error_message": str(e)} + + return { + "status": "ok", + f"markdown_table": markdown_string, + } diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/prompts.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/prompts.py new file mode 100644 index 00000000..08e0d241 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/prompts.py @@ -0,0 +1,46 @@ +"""Prompt for root agent""" + +GLOBAL_INSTR = """ +You are a helpful AI assistant, part of a multi-agent system designed for advanced web research and ad creative generation. +Do not perform any research yourself. Your job is to **delegate**. +""" + +ROOT_AGENT_INSTR = """You are an Expert AI Marketing Research & Strategy Assistant. + +Your primary function is to orchestrate a suite of **specialized tools and sub-agents** to provide users with comprehensive insights, trend analysis, and creative ideas for their marketing campaigns. + + +**Instructions:** +Start by greeting the user and giving them a high-level overview of what you do. Then proceed sequentially with the tasks below: + +1. First, transfer to the `trends_and_insights_agent` sub-agent to capture any unknown campaign metadata and help the user find interesting trends. +2. Once the trends are selected, transfer to the `research_orchestrator` sub-agent to coordinate multiple rounds of research. Strictly follow all the steps one-by-one. Do not skip any steps or execute them out of order. +3. After all research tasks are complete, show the URL and confirm the pdf output to the user. Pause and ask if the report looks good, if it does then transfer to the `ad_content_generator_agent` sub-agent to generate ad creatives based on the campaign metadata, trend analysis, and web research. +4. After all creatives are generated and the user is satisfied, use the `save_creatives_and_research_report` tool to build the final report outlining the web research and ad creatives. + + +**Sub-agents:** +- Use `trends_and_insights_agent` to gather inputs from the user e.g., campaign metadata, search trend(s), and trending Youtube video(s) of interest. +- Use `research_orchestrator` to coordinate and execute all research tasks. +- Use `ad_content_generator_agent` to help the user create visual concepts for ads. + + +**Tools:** +- Use `save_creatives_and_research_report` tool to build the final report, detailing research and creatives generated during a session, and save it as an artifact. Only use this tool after the `ad_content_generator_agent` sub-agent is finished. + + +**Campaign metadata:** + + {brand} + + {target_product} + + + {key_selling_points} + + + + {target_audience} + + +""" diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/requirements.txt b/python/agents/trends-and-insights-agent/trends_and_insights_agent/requirements.txt new file mode 100644 index 00000000..88ab4fad --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/requirements.txt @@ -0,0 +1,190 @@ +aiohappyeyeballs==2.6.1 ; python_version >= "3.11" and python_version < "4.0" +aiohttp==3.12.13 ; python_version >= "3.11" and python_version < "4.0" +aiosignal==1.3.2 ; python_version >= "3.11" and python_version < "4.0" +annotated-types==0.7.0 ; python_version >= "3.11" and python_version < "4.0" +anyio==4.9.0 ; python_version >= "3.11" and python_version < "4.0" +appnope==0.1.4 ; python_version >= "3.11" and python_version < "4.0" and platform_system == "Darwin" +argon2-cffi-bindings==21.2.0 ; python_version >= "3.11" and python_version < "4.0" +argon2-cffi==25.1.0 ; python_version >= "3.11" and python_version < "4.0" +arrow==1.3.0 ; python_version >= "3.11" and python_version < "4.0" +asttokens==3.0.0 ; python_version >= "3.11" and python_version < "4.0" +async-lru==2.0.5 ; python_version >= "3.11" and python_version < "4.0" +attrs==25.3.0 ; python_version >= "3.11" and python_version < "4.0" +authlib==1.6.0 ; python_version >= "3.11" and python_version < "4.0" +babel==2.17.0 ; python_version >= "3.11" and python_version < "4.0" +beautifulsoup4==4.13.4 ; python_version >= "3.11" and python_version < "4.0" +bleach[css]==6.2.0 ; python_version >= "3.11" and python_version < "4.0" +cachetools==5.5.2 ; python_version >= "3.11" and python_version < "4.0" +certifi==2025.6.15 ; python_version >= "3.11" and python_version < "4.0" +cffi==1.17.1 ; python_version >= "3.11" and python_version < "4.0" +charset-normalizer==3.4.2 ; python_version >= "3.11" and python_version < "4.0" +click==8.2.1 ; python_version >= "3.11" and python_version < "4.0" +cloudpickle==3.1.1 ; python_version >= "3.11" and python_version < "4.0" +colorama==0.4.6 ; python_version >= "3.11" and python_version < "4.0" and (platform_system == "Windows" or sys_platform == "win32") +comm==0.2.2 ; python_version >= "3.11" and python_version < "4.0" +cryptography==45.0.4 ; python_version >= "3.11" and python_version < "4.0" +dateparser==1.2.1 ; python_version >= "3.11" and python_version < "4.0" +debugpy==1.8.14 ; python_version >= "3.11" and python_version < "4.0" +decorator==5.2.1 ; python_version >= "3.11" and python_version < "4.0" +defusedxml==0.7.1 ; python_version >= "3.11" and python_version < "4.0" +docstring-parser==0.16 ; python_version >= "3.11" and python_version < "4.0" +executing==2.2.0 ; python_version >= "3.11" and python_version < "4.0" +fastapi==0.115.13 ; python_version >= "3.11" and python_version < "4.0" +fastjsonschema==2.21.1 ; python_version >= "3.11" and python_version < "4.0" +fqdn==1.5.1 ; python_version >= "3.11" and python_version < "4" +frozenlist==1.7.0 ; python_version >= "3.11" and python_version < "4.0" +google-adk==1.5.0 ; python_version >= "3.11" and python_version < "4.0" +google-api-core==2.25.1 ; python_version >= "3.11" and python_version < "4.0" +google-api-core[grpc]==2.25.1 ; python_version >= "3.11" and python_version < "4.0" +google-api-python-client==2.172.0 ; python_version >= "3.11" and python_version < "4.0" +google-auth-httplib2==0.2.0 ; python_version >= "3.11" and python_version < "4.0" +google-auth==2.40.3 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-aiplatform[agent-engines]==1.97.0 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-appengine-logging==1.6.2 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-audit-log==0.3.2 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-bigquery==3.34.0 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-core==2.4.3 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-logging==3.12.1 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-resource-manager==1.14.2 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-secret-manager==2.24.0 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-speech==2.33.0 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-storage==2.19.0 ; python_version >= "3.11" and python_version < "4.0" +google-cloud-trace==1.16.2 ; python_version >= "3.11" and python_version < "4.0" +google-crc32c==1.7.1 ; python_version >= "3.11" and python_version < "4.0" +google-genai==1.23.0 ; python_version >= "3.11" and python_version < "4.0" +google-resumable-media==2.7.2 ; python_version >= "3.11" and python_version < "4.0" +googleapis-common-protos==1.70.0 ; python_version >= "3.11" and python_version < "4.0" +googleapis-common-protos[grpc]==1.70.0 ; python_version >= "3.11" and python_version < "4.0" +googlenews==1.6.15 ; python_version >= "3.11" and python_version < "4.0" +graphviz==0.21 ; python_version >= "3.11" and python_version < "4.0" +greenlet==3.2.3 ; python_version < "3.14" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version >= "3.11" +grpc-google-iam-v1==0.14.2 ; python_version >= "3.11" and python_version < "4.0" +grpcio-status==1.73.0 ; python_version >= "3.11" and python_version < "4.0" +grpcio==1.73.0 ; python_version >= "3.11" and python_version < "4.0" +h11==0.16.0 ; python_version >= "3.11" and python_version < "4.0" +httpcore==1.0.9 ; python_version >= "3.11" and python_version < "4.0" +httplib2==0.22.0 ; python_version >= "3.11" and python_version < "4.0" +httpx-sse==0.4.0 ; python_version >= "3.11" and python_version < "4.0" +httpx==0.28.1 ; python_version >= "3.11" and python_version < "4.0" +idna==3.10 ; python_version >= "3.11" and python_version < "4.0" +importlib-metadata==8.7.0 ; python_version >= "3.11" and python_version < "4.0" +iniconfig==2.1.0 ; python_version >= "3.11" and python_version < "4.0" +ipykernel==6.29.5 ; python_version >= "3.11" and python_version < "4.0" +ipython-pygments-lexers==1.1.1 ; python_version >= "3.11" and python_version < "4.0" +ipython==9.3.0 ; python_version >= "3.11" and python_version < "4.0" +ipywidgets==8.1.7 ; python_version >= "3.11" and python_version < "4.0" +isoduration==20.11.0 ; python_version >= "3.11" and python_version < "4.0" +jedi==0.19.2 ; python_version >= "3.11" and python_version < "4.0" +jinja2==3.1.6 ; python_version >= "3.11" and python_version < "4.0" +json5==0.12.0 ; python_version >= "3.11" and python_version < "4.0" +jsonpointer==3.0.0 ; python_version >= "3.11" and python_version < "4.0" +jsonschema-specifications==2025.4.1 ; python_version >= "3.11" and python_version < "4.0" +jsonschema==4.24.0 ; python_version >= "3.11" and python_version < "4.0" +jsonschema[format-nongpl]==4.24.0 ; python_version >= "3.11" and python_version < "4.0" +jupyter-client==8.6.3 ; python_version >= "3.11" and python_version < "4.0" +jupyter-console==6.6.3 ; python_version >= "3.11" and python_version < "4.0" +jupyter-core==5.8.1 ; python_version >= "3.11" and python_version < "4.0" +jupyter-events==0.12.0 ; python_version >= "3.11" and python_version < "4.0" +jupyter-lsp==2.2.5 ; python_version >= "3.11" and python_version < "4.0" +jupyter-server-terminals==0.5.3 ; python_version >= "3.11" and python_version < "4.0" +jupyter-server==2.16.0 ; python_version >= "3.11" and python_version < "4.0" +jupyter==1.1.1 ; python_version >= "3.11" and python_version < "4.0" +jupyterlab-pygments==0.3.0 ; python_version >= "3.11" and python_version < "4.0" +jupyterlab-server==2.27.3 ; python_version >= "3.11" and python_version < "4.0" +jupyterlab-widgets==3.0.15 ; python_version >= "3.11" and python_version < "4.0" +jupyterlab==4.4.3 ; python_version >= "3.11" and python_version < "4.0" +markdown-it-py==3.0.0 ; python_version >= "3.11" and python_version < "4.0" +markdown-pdf==1.7 ; python_version >= "3.11" and python_version < "4.0" +markupsafe==3.0.2 ; python_version >= "3.11" and python_version < "4.0" +matplotlib-inline==0.1.7 ; python_version >= "3.11" and python_version < "4.0" +mcp==1.9.4 ; python_version >= "3.11" and python_version < "4.0" +mdurl==0.1.2 ; python_version >= "3.11" and python_version < "4.0" +mistune==3.1.3 ; python_version >= "3.11" and python_version < "4.0" +multidict==6.5.0 ; python_version >= "3.11" and python_version < "4.0" +nbclient==0.10.2 ; python_version >= "3.11" and python_version < "4.0" +nbconvert==7.16.6 ; python_version >= "3.11" and python_version < "4.0" +nbformat==5.10.4 ; python_version >= "3.11" and python_version < "4.0" +nest-asyncio==1.6.0 ; python_version >= "3.11" and python_version < "4.0" +notebook-shim==0.2.4 ; python_version >= "3.11" and python_version < "4.0" +notebook==7.4.3 ; python_version >= "3.11" and python_version < "4.0" +numpy==2.3.0 ; python_version >= "3.11" and python_version < "4.0" +opentelemetry-api==1.34.1 ; python_version >= "3.11" and python_version < "4.0" +opentelemetry-exporter-gcp-trace==1.9.0 ; python_version >= "3.11" and python_version < "4.0" +opentelemetry-resourcedetector-gcp==1.9.0a0 ; python_version >= "3.11" and python_version < "4.0" +opentelemetry-sdk==1.34.1 ; python_version >= "3.11" and python_version < "4.0" +opentelemetry-semantic-conventions==0.55b1 ; python_version >= "3.11" and python_version < "4.0" +overrides==7.7.0 ; python_version >= "3.11" and python_version < "4.0" +packaging==25.0 ; python_version >= "3.11" and python_version < "4.0" +pandas==2.3.0 ; python_version >= "3.11" and python_version < "4.0" +pandocfilters==1.5.1 ; python_version >= "3.11" and python_version < "4.0" +parso==0.8.4 ; python_version >= "3.11" and python_version < "4.0" +pexpect==4.9.0 ; python_version >= "3.11" and python_version < "4.0" and sys_platform != "win32" and sys_platform != "emscripten" +pillow==11.2.1 ; python_version >= "3.11" and python_version < "4.0" +platformdirs==4.3.8 ; python_version >= "3.11" and python_version < "4.0" +pluggy==1.6.0 ; python_version >= "3.11" and python_version < "4.0" +prometheus-client==0.22.1 ; python_version >= "3.11" and python_version < "4.0" +prompt-toolkit==3.0.51 ; python_version >= "3.11" and python_version < "4.0" +propcache==0.3.2 ; python_version >= "3.11" and python_version < "4.0" +proto-plus==1.26.1 ; python_version >= "3.11" and python_version < "4.0" +protobuf==6.31.1 ; python_version >= "3.11" and python_version < "4.0" +psutil==7.0.0 ; python_version >= "3.11" and python_version < "4.0" +ptyprocess==0.7.0 ; python_version >= "3.11" and python_version < "4.0" and (sys_platform != "win32" and sys_platform != "emscripten" or os_name != "nt") +pure-eval==0.2.3 ; python_version >= "3.11" and python_version < "4.0" +pyasn1-modules==0.4.2 ; python_version >= "3.11" and python_version < "4.0" +pyasn1==0.6.1 ; python_version >= "3.11" and python_version < "4.0" +pycparser==2.22 ; python_version >= "3.11" and python_version < "4.0" +pydantic-core==2.33.2 ; python_version >= "3.11" and python_version < "4.0" +pydantic-settings==2.9.1 ; python_version >= "3.11" and python_version < "4.0" +pydantic==2.11.7 ; python_version >= "3.11" and python_version < "4.0" +pygments==2.19.1 ; python_version >= "3.11" and python_version < "4.0" +pymupdf==1.25.3 ; python_version >= "3.11" and python_version < "4.0" +pyparsing==3.2.3 ; python_version >= "3.11" and python_version < "4.0" +pytest==8.4.1 ; python_version >= "3.11" and python_version < "4.0" +python-dateutil==2.9.0.post0 ; python_version >= "3.11" and python_version < "4.0" +python-dotenv==1.1.0 ; python_version >= "3.11" and python_version < "4.0" +python-json-logger==3.3.0 ; python_version >= "3.11" and python_version < "4.0" +python-multipart==0.0.20 ; python_version >= "3.11" and python_version < "4.0" +pytz==2025.2 ; python_version >= "3.11" and python_version < "4.0" +pywin32==310 ; sys_platform == "win32" and platform_python_implementation != "PyPy" and python_version >= "3.11" and python_version < "4.0" +pywinpty==2.0.15 ; python_version >= "3.11" and python_version < "4.0" and os_name == "nt" +pyyaml==6.0.2 ; python_version >= "3.11" and python_version < "4.0" +pyzmq==27.0.0 ; python_version >= "3.11" and python_version < "4.0" +referencing==0.36.2 ; python_version >= "3.11" and python_version < "4.0" +regex==2024.11.6 ; python_version >= "3.11" and python_version < "4.0" +requests==2.32.4 ; python_version >= "3.11" and python_version < "4.0" +rfc3339-validator==0.1.4 ; python_version >= "3.11" and python_version < "4.0" +rfc3986-validator==0.1.1 ; python_version >= "3.11" and python_version < "4.0" +rpds-py==0.25.1 ; python_version >= "3.11" and python_version < "4.0" +rsa==4.9.1 ; python_version >= "3.11" and python_version < "4" +send2trash==1.8.3 ; python_version >= "3.11" and python_version < "4.0" +setuptools==80.9.0 ; python_version >= "3.11" and python_version < "4.0" +shapely==2.1.1 ; python_version >= "3.11" and python_version < "4.0" +six==1.17.0 ; python_version >= "3.11" and python_version < "4.0" +sniffio==1.3.1 ; python_version >= "3.11" and python_version < "4.0" +soupsieve==2.7 ; python_version >= "3.11" and python_version < "4.0" +sqlalchemy==2.0.41 ; python_version >= "3.11" and python_version < "4.0" +sse-starlette==2.3.6 ; python_version >= "3.11" and python_version < "4.0" +stack-data==0.6.3 ; python_version >= "3.11" and python_version < "4.0" +starlette==0.46.2 ; python_version >= "3.11" and python_version < "4.0" +tenacity==8.5.0 ; python_version >= "3.11" and python_version < "4.0" +terminado==0.18.1 ; python_version >= "3.11" and python_version < "4.0" +tinycss2==1.4.0 ; python_version >= "3.11" and python_version < "4.0" +tornado==6.5.1 ; python_version >= "3.11" and python_version < "4.0" +traitlets==5.14.3 ; python_version >= "3.11" and python_version < "4.0" +types-python-dateutil==2.9.0.20250516 ; python_version >= "3.11" and python_version < "4.0" +typing-extensions==4.14.0 ; python_version >= "3.11" and python_version < "4.0" +typing-inspection==0.4.1 ; python_version >= "3.11" and python_version < "4.0" +tzdata==2025.2 ; python_version >= "3.11" and python_version < "4.0" +tzlocal==5.3.1 ; python_version >= "3.11" and python_version < "4.0" +uri-template==1.3.0 ; python_version >= "3.11" and python_version < "4.0" +uritemplate==4.2.0 ; python_version >= "3.11" and python_version < "4.0" +urllib3==2.5.0 ; python_version >= "3.11" and python_version < "4.0" +uvicorn==0.34.3 ; python_version >= "3.11" and python_version < "4.0" +wcwidth==0.2.13 ; python_version >= "3.11" and python_version < "4.0" +webcolors==24.11.1 ; python_version >= "3.11" and python_version < "4.0" +webencodings==0.5.1 ; python_version >= "3.11" and python_version < "4.0" +websocket-client==1.8.0 ; python_version >= "3.11" and python_version < "4.0" +websockets==15.0.1 ; python_version >= "3.11" and python_version < "4.0" +widgetsnbextension==4.0.14 ; python_version >= "3.11" and python_version < "4.0" +yarl==1.20.1 ; python_version >= "3.11" and python_version < "4.0" +zipp==3.23.0 ; python_version >= "3.11" and python_version < "4.0" diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/__init__.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/__init__.py new file mode 100644 index 00000000..b45a8380 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/__init__.py @@ -0,0 +1,9 @@ +from . import callbacks +from . import config +from . import secrets +from . import schema_types +from . import utils + + +__all__ = ["callbacks", "config", "secrets", "schema_types", "utils"] + diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/callbacks.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/callbacks.py new file mode 100644 index 00000000..8e645345 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/callbacks.py @@ -0,0 +1,396 @@ +"""callbacks - currently exploring how these work by observing log output""" + +from typing import Dict, Any, Optional +import os, re, json, time +import pandas as pd +import requests +import logging + +logging.basicConfig(level=logging.INFO) + +from google.genai import types +from google.adk.sessions.state import State +from google.adk.models.llm_request import LlmRequest +from google.adk.agents.callback_context import CallbackContext + +from .config import config, setup_config + + +# Get the cloud storage bucket from the environment variable +try: + GCS_BUCKET = os.environ["BUCKET"] +except KeyError: + raise Exception("BUCKET environment variable not set") + + +# get initial session state json +SESSION_STATE_JSON_PATH = os.getenv("SESSION_STATE_JSON_PATH", default=None) +logging.info(f"\n\n`SESSION_STATE_JSON_PATH`: {SESSION_STATE_JSON_PATH}\n\n") + +# TODO: this is a short term fix for deployment to agent space +if SESSION_STATE_JSON_PATH: + PROFILE_PATH = "http://raw.githubusercontent.com/tottenjordan/zghost/refs/heads/deployment-fix-july-25/trends_and_insights_agent/shared_libraries/profiles" + FULL_JSON_PATH = os.path.join(PROFILE_PATH, SESSION_STATE_JSON_PATH) +else: + FULL_JSON_PATH = None + + +def _set_initial_states(source: Dict[str, Any], target: State | dict[str, Any]): + """ + Setting the initial session state given a JSON object of states. + + Args: + source: A JSON object of states. + target: The session state object to insert into. + """ + if setup_config.state_init not in target: + target[setup_config.state_init] = True + target["gcs_folder"] = pd.Timestamp.utcnow().strftime("%Y_%m_%d_%H_%M") + + target.update(source) + + +def _load_session_state(callback_context: CallbackContext): + """ + Sets up the initial state. + Set this as a callback as before_agent_call of the `root_agent`. + This gets called before the system instruction is constructed. + + Args: + callback_context: The callback context. + """ + data = {} + if FULL_JSON_PATH: + resp = requests.get(FULL_JSON_PATH) + data = json.loads(resp.text) + logging.info(f"\n\nLoading Initial State: {data}\n\n") + else: + data = setup_config.empty_session_state + logging.info(f"\n\nLoading Initial State (empty): {data}\n\n") + + _set_initial_states(data["state"], callback_context.state) + + +def rate_limit_callback( + callback_context: CallbackContext, llm_request: LlmRequest +) -> None: + # pylint: disable=unused-argument + """Callback function that implements a query rate limit. + + Args: + callback_context: A CallbackContext object representing the active + callback context. + llm_request: A LlmRequest object representing the active LLM request. + """ + now = time.time() + if "timer_start" not in callback_context.state: + callback_context.state["timer_start"] = now + callback_context.state["request_count"] = 1 + logging.debug( + "rate_limit_callback [timestamp: %i, req_count: 1, " "elapsed_secs: 0]", + now, + ) + return + + request_count = callback_context.state["request_count"] + 1 + elapsed_secs = now - callback_context.state["timer_start"] + logging.debug( + "rate_limit_callback [timestamp: %i, request_count: %i," " elapsed_secs: %i]", + now, + request_count, + elapsed_secs, + ) + + if request_count > config.rpm_quota: + delay = config.rate_limit_seconds - elapsed_secs + 1 + if delay > 0: + logging.debug("Sleeping for %i seconds", delay) + time.sleep(delay) + callback_context.state["timer_start"] = now + callback_context.state["request_count"] = 1 + else: + callback_context.state["request_count"] = request_count + + return + + +def campaign_callback_function( + callback_context: CallbackContext, +) -> Optional[types.Content]: + """ + This sets default values for: + * brand + * target_audience + * target_product + * key_selling_points + * img_artifact_keys + * vid_artifact_keys + * target_search_trends + * target_yt_trends + * final_select_ad_copies + * final_select_vis_concepts + """ + + agent_name = callback_context.agent_name + # invocation_id = callback_context.invocation_id + # current_state = callback_context.state.to_dict() + + # Check the condition in session state dictionary + brand = callback_context.state.get("brand") + target_audience = callback_context.state.get("target_audience") + target_product = callback_context.state.get("target_product") + key_selling_points = callback_context.state.get("key_selling_points") + final_select_ad_copies = callback_context.state.get("final_select_ad_copies") + final_select_vis_concepts = callback_context.state.get("final_select_vis_concepts") + img_artifact_keys = callback_context.state.get("img_artifact_keys") + vid_artifact_keys = callback_context.state.get("vid_artifact_keys") + target_yt_trends = callback_context.state.get("target_yt_trends") + target_search_trends = callback_context.state.get("target_search_trends") + + return_content = None # placeholder for optional returned parts + + if brand is None: + return_content = "brand" + callback_context.state["brand"] = "" + + if target_audience is None: + callback_context.state["target_audience"] = "" + if return_content is None: + return_content = "target_audience" + else: + return_content += ", target_audience" + + if target_product is None: + callback_context.state["target_product"] = "" + if return_content is None: + return_content = "target_product" + else: + return_content += ", target_product" + + if key_selling_points is None: + callback_context.state["key_selling_points"] = "" + if return_content is None: + return_content = "key_selling_points" + else: + return_content += ", key_selling_points" + + if final_select_ad_copies is None: + callback_context.state["final_select_ad_copies"] = { + "final_select_ad_copies": [] + } + if return_content is None: + return_content = "final_select_ad_copies" + else: + return_content += ", final_select_ad_copies" + + if final_select_vis_concepts is None: + callback_context.state["final_select_vis_concepts"] = { + "final_select_vis_concepts": [] + } + if return_content is None: + return_content = "final_select_vis_concepts" + else: + return_content += ", final_select_vis_concepts" + + if img_artifact_keys is None: + callback_context.state["img_artifact_keys"] = {"img_artifact_keys": []} + if return_content is None: + return_content = "img_artifact_keys" + else: + return_content += ", img_artifact_keys" + + if vid_artifact_keys is None: + callback_context.state["vid_artifact_keys"] = {"vid_artifact_keys": []} + if return_content is None: + return_content = "vid_artifact_keys" + else: + return_content += ", vid_artifact_keys" + + if target_search_trends is None: + callback_context.state["target_search_trends"] = {"target_search_trends": []} + if return_content is None: + return_content = "target_search_trends" + else: + return_content += ", target_search_trends" + + if target_yt_trends is None: + callback_context.state["target_yt_trends"] = {"target_yt_trends": []} + if return_content is None: + return_content = "target_yt_trends" + else: + return_content += ", target_yt_trends" + + if return_content is not None: + return types.Content( + parts=[ + types.Part( + text=f"Agent {agent_name} setting default values for state variables: \n\n{return_content}." + ) + ], + role="model", # Assign model role to the overriding response + ) + + else: + return None + + +def collect_research_sources_callback(callback_context: CallbackContext) -> None: + """Collects and organizes web-based research sources and their supported claims from agent events. + + This function processes the agent's `session.events` to extract web source details (URLs, + titles, domains from `grounding_chunks`) and associated text segments with confidence scores + (from `grounding_supports`). The aggregated source information and a mapping of URLs to short + IDs are cumulatively stored in `callback_context.state`. + + Args: + callback_context (CallbackContext): The context object providing access to the agent's + session events and persistent state. + """ + session = callback_context._invocation_context.session + url_to_short_id = callback_context.state.get("url_to_short_id", {}) + sources = callback_context.state.get("sources", {}) + id_counter = len(url_to_short_id) + 1 + for event in session.events: + if not (event.grounding_metadata and event.grounding_metadata.grounding_chunks): + continue + chunks_info = {} + for idx, chunk in enumerate(event.grounding_metadata.grounding_chunks): + if not chunk.web: + continue + url = chunk.web.uri + title = ( + chunk.web.title + if chunk.web.title != chunk.web.domain + else chunk.web.domain + ) + if url not in url_to_short_id: + short_id = f"src-{id_counter}" + url_to_short_id[url] = short_id + sources[short_id] = { + "short_id": short_id, + "title": title, + "url": url, + "domain": chunk.web.domain, + "supported_claims": [], + } + id_counter += 1 + chunks_info[idx] = url_to_short_id[url] + if event.grounding_metadata.grounding_supports: + for support in event.grounding_metadata.grounding_supports: + confidence_scores = support.confidence_scores or [] + chunk_indices = support.grounding_chunk_indices or [] + for i, chunk_idx in enumerate(chunk_indices): + if chunk_idx in chunks_info: + short_id = chunks_info[chunk_idx] + confidence = ( + confidence_scores[i] if i < len(confidence_scores) else 0.5 + ) + text_segment = support.segment.text if support.segment else "" + sources[short_id]["supported_claims"].append( + { + "text_segment": text_segment, + "confidence": confidence, + } + ) + callback_context.state["url_to_short_id"] = url_to_short_id + callback_context.state["sources"] = sources + + +def citation_replacement_callback( + callback_context: CallbackContext, +) -> Optional[types.Content]: + """Replaces citation tags in a report with Markdown-formatted links. + + Processes 'combined_final_cited_report' from context state, converting tags like + `` into hyperlinks using source information from + `callback_context.state["sources"]`. Also fixes spacing around punctuation. + + Args: + callback_context (CallbackContext): Contains the report and source information. + + Returns: + types.Content: The processed report with Markdown citation links. + """ + # types.Content: The processed report with Markdown citation links. + final_report = callback_context.state.get("combined_final_cited_report", "") + sources = callback_context.state.get("sources", {}) + + def tag_replacer(match: re.Match) -> str: + short_id = match.group(1) + if not (source_info := sources.get(short_id)): + logging.warning(f"Invalid citation tag found and removed: {match.group(0)}") + return "" + display_text = source_info.get("title", source_info.get("domain", short_id)) + return f" [{display_text}]({source_info['url']})" + + processed_report = re.sub( + r'', + tag_replacer, + final_report, + ) + processed_report = re.sub(r"\s+([.,;:])", r"\1", processed_report) + callback_context.state["final_report_with_citations"] = processed_report + # return types.Content(parts=[types.Part(text=processed_report)]) + return types.Content(parts=[types.Part(text="PDF report saved to memory 📝 !!")]) + + +# TODO: add logic for processing PDF contents for session state +async def before_agent_get_user_file( + callback_context: CallbackContext, +) -> Optional[types.Content]: + """ + Checks for a user-uploaded file before the agent runs. + + If a file is found in the user's message, this callback processes it, + converts it to a PNG (if it's a PDF), and saves it as an artifact named + 'user_uploaded_file'. It then returns a direct confirmation message to the + user and halts further agent processing for the current turn. + + If no file is found, it returns None, allowing the agent to proceed normally. + """ + + parts = [] + if callback_context.user_content and callback_context.user_content.parts: + parts = [ + p for p in callback_context.user_content.parts if p.inline_data is not None + ] + + # if no file then continue to agent by returning empty + if not parts: + return None + + # if file then save as artifact + part = parts[-1] + if part.inline_data and part.inline_data.data and part.inline_data.mime_type: + artifact_key = "user_uploaded_file" + file_bytes = part.inline_data.data + file_type = part.inline_data.mime_type + + # confirm file_type is pdf, else let user know the expected type + if file_type not in ["application/pdf"]: + issue_message = f"The file you provided is of type {file_type} which is not supported here. Please provide a PDF." + response = types.Content( + parts=[types.Part(text=issue_message)], role="model" + ) + return response + + # create & save artifact + artifact = types.Part.from_bytes(data=file_bytes, mime_type=file_type) + version = await callback_context.save_artifact( + filename=artifact_key, artifact=artifact + ) + callback_context.state["user_document_artifact_key"] = artifact_key + + # Formulate a confirmation message + confirmation_message = ( + f"Thank you! I've successfully processed your uploaded file.\n\n" + f"It's now stored as an artifact with key " + f"'{artifact_key}' (version: {version}, size: {len(file_bytes)} bytes).\n\n" + f"What would you like to do with it?" + ) + + response = types.Content( + parts=[types.Part(text=confirmation_message)], role="model" + ) + + return response diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/config.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/config.py new file mode 100644 index 00000000..620a2e51 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/config.py @@ -0,0 +1,69 @@ +from dataclasses import dataclass + + +@dataclass +class ResearchConfiguration: + """Configuration for research-related models and parameters. + + Attributes: + critic_model (str): Model for evaluation tasks. + worker_model (str): Model for working/generation tasks. + video_analysis_model (str): Model for video understanding. + image_gen_model (str): Model for generating images. + video_gen_model (str): Model for generating video. + max_results_yt_trends (int): The value to set for `max_results` with the YouTube API + i.e., the number of video results to return. + rate_limit_seconds (int): total duration to calculate the rate at which the agent queries the LLM API. + rpm_quota (int): requests per minute threshold for agent LLM API rate limiter + + """ + + critic_model: str = "gemini-2.5-pro" # "gemini-2.5-pro" | "gemini-2.5-flash" + worker_model: str = "gemini-2.5-flash" # "gemini-2.5-flash" | "gemini-2.0-flash" + video_analysis_model: str = "gemini-2.5-pro" + lite_planner_model: str = ( + "gemini-2.0-flash-001" # "gemini-2.5-flash-lite" | "gemini-2.0-flash-001" + ) + image_gen_model: str = "imagen-4.0-fast-generate-preview-06-06" + video_gen_model: str = ( + "veo-3.0-generate-preview" # "veo-2.0-generate-001" | veo-3.0-generate-preview + ) + + max_results_yt_trends: int = 45 + + # Adjust these values to limit the rate at which the agent queries the LLM API. + rate_limit_seconds: int = 60 + rpm_quota: int = 1000 + + +config = ResearchConfiguration() + + +@dataclass +class SetupConfiguration: + """Configuration for general setup + + Attributes: + state_init (str): a key indicating the state dict is initialized + empty_session_state (dict): Empty dictionary with keys for initial ADK session state. + + """ + + state_init = "_state_init" + empty_session_state = { + "state": { + "final_select_ad_copies": {"final_select_ad_copies": []}, + "final_select_vis_concepts": {"final_select_vis_concepts": []}, + "img_artifact_keys": {"img_artifact_keys": []}, + "vid_artifact_keys": {"vid_artifact_keys": []}, + "brand": "", + "target_product": "", + "target_audience": "", + "key_selling_points": "", + "target_search_trends": {"target_search_trends": []}, + "target_yt_trends": {"target_yt_trends": []}, + } + } + + +setup_config = SetupConfiguration() diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/install_ffmpeg.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/install_ffmpeg.py new file mode 100755 index 00000000..d085c895 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/install_ffmpeg.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 + +import subprocess +import sys +import os +import logging +import platform +import shutil + +def run_command(command, check=True): + """Run a shell command and return the result.""" + try: + result = subprocess.run( + command, + shell=True, + check=check, + capture_output=True, + text=True + ) + return result.returncode == 0, result.stdout, result.stderr + except subprocess.CalledProcessError as e: + return False, e.stdout, e.stderr + +def check_ffmpeg_installed(): + """Check if ffmpeg is already installed.""" + return shutil.which("ffmpeg") is not None + +def get_ffmpeg_version(): + """Get the installed ffmpeg version.""" + success, stdout, stderr = run_command("ffmpeg -version", check=False) + if success and stdout: + return stdout.split('\n')[0] + return None + +def install_ffmpeg(): + """Install or verify ffmpeg installation.""" + logger = logging.getLogger(__name__) + system = platform.system() + + # First check if ffmpeg is already installed + if check_ffmpeg_installed(): + version = get_ffmpeg_version() + if version: + logger.info(f"ffmpeg is already installed: {version}") + else: + logger.info("ffmpeg is already installed") + return + + logger.info(f"ffmpeg not found. Attempting to install on {system}...") + + if system == "Linux": + # Check if we can use snap (doesn't require sudo) + if shutil.which("snap"): + logger.info("Attempting to install ffmpeg via snap (no sudo required)...") + success, stdout, stderr = run_command("snap install ffmpeg", check=False) + if success: + logger.info("ffmpeg installed successfully via snap!") + return + + # Check for conda/mamba + if shutil.which("conda"): + logger.info("Attempting to install ffmpeg via conda (no sudo required)...") + success, stdout, stderr = run_command("conda install -y -c conda-forge ffmpeg", check=False) + if success: + logger.info("ffmpeg installed successfully via conda!") + return + + # Provide instructions for manual installation + logger.warning("Unable to install ffmpeg automatically without sudo.") + logger.info("Please install ffmpeg using one of these methods:") + logger.info(" 1. With sudo: sudo apt-get install ffmpeg") + logger.info(" 2. With snap: snap install ffmpeg") + logger.info(" 3. With conda: conda install -c conda-forge ffmpeg") + logger.info(" 4. Download static build: https://johnvansickle.com/ffmpeg/") + + elif system == "Darwin": # macOS + if shutil.which("brew"): + logger.info("Attempting to install ffmpeg via Homebrew...") + success, stdout, stderr = run_command("brew install ffmpeg", check=False) + if success: + logger.info("ffmpeg installed successfully via Homebrew!") + return + + logger.warning("Unable to install ffmpeg automatically.") + logger.info("Please install ffmpeg using Homebrew: brew install ffmpeg") + logger.info("Or download from: https://evermeet.cx/ffmpeg/") + + elif system == "Windows": + logger.warning("Automatic installation not supported on Windows.") + logger.info("Please download ffmpeg from: https://www.gyan.dev/ffmpeg/builds/") + logger.info("Or use chocolatey: choco install ffmpeg") + + else: + logger.warning(f"Unsupported system: {system}") + logger.info("Please install ffmpeg manually for your system.") + +if __name__ == "__main__": + # Configure logging for standalone execution + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' + ) + install_ffmpeg() \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_pixel.json b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_pixel.json new file mode 100644 index 00000000..1345ef8e --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_pixel.json @@ -0,0 +1,34 @@ +{ + "state": { + "img_artifact_keys": { + "img_artifact_keys": [] + }, + "vid_artifact_keys": { + "vid_artifact_keys": [] + }, + "brand": "Google Pixel", + "target_product": "Pixel 9 smartphone", + "target_audience": [ + "Demographics: millennials who follow jam bands such as Widespread Panic and Phish.", + "Psychographics: millennials who respond positively to nostalgic messages.", + "Lifestyle or profession: frequent travelers; spending most income on concert experiences.", + "Hobbies, interests, humor: music lovers, attend lots of jam band concerts.", + "Actively researching concert and music festival tickets; musical instruments." + + ], + "key_selling_points": [ + "Best Take - Group pics, perfected. Pixel’s Best Take combines similar photos into one fantastic picture where everyone looks their best. AI is able to blend multiple still images to give everyone their best look", + "Night Sight + Astrophotography - Capture the cosmos with Pixel’s signature 'Night Sight' feature. 'Night Sight' helps capture everything from the city lights to starry skies in the dark with Astrophotography.", + "Magic Editor - Magic Editor with Pixel lets you use generative AI to reimagine your photos. Remove distractions, improve background, and more. It's an intuitive new way to edit, so you're in control of your images, whether you want to better capture the moment or add your own creative touch. ", + "Call Screen - Goodbye, spam calls. With Call Screen, Pixel can now detect and filter out even more spam calls. For other calls, it can tell you who’s calling and why before you pick up. Detect and decline spam calls without distracting you.", + "Live Translate - Live Translate enables real-time translation without an app and without an internet connection - not just text-based but also spoken words, interpreting live audio from one speaker to another. You can read text in another language by pointing the camera at a sign or a menu, or watch a video that isn’t in your native tongue with Live Caption. ", + "Real Tone - this feature represents the nuances of more skin tones beautifully, authentically, and accurately in photos and video, with improvements for low light scenarios as well." + ], + "target_search_trends": { + "target_search_trends": [] + }, + "target_yt_trends": { + "target_yt_trends": [] + } + } +} \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_pixel_w_surreal_meme.json b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_pixel_w_surreal_meme.json new file mode 100644 index 00000000..35fef54b --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_pixel_w_surreal_meme.json @@ -0,0 +1,31 @@ +{ + "state": { + "img_artifact_keys": { + "img_artifact_keys": [] + }, + "vid_artifact_keys": { + "vid_artifact_keys": [] + }, + "brand": "Google Pixel", + "target_product": "Pixel 9 smartphone", + "target_audience": [ + "Demographics: millennials who follow jam bands such as Widespread Panic and Phish.", + "Psychographics: millennials who respond positively to nostalgic messages.", + "Lifestyle or profession: frequent travelers; spending most income on concert experiences.", + "Hobbies, interests, humor: music lovers, attend lots of jam band concerts.", + "Actively researching concert and music festival tickets; musical instruments, love surreal memes (e.g., https://www.reddit.com/r/surrealmemes/)." + ], + "key_selling_points": [ + "Best Take - Group pics, perfected. Pixel’s Best Take combines similar photos into one fantastic picture where everyone looks their best. AI is able to blend multiple still images to give everyone their best look", + "Night Sight + Astrophotography - Capture the cosmos with Pixel’s signature 'Night Sight' feature. 'Night Sight' helps capture everything from the city lights to starry skies in the dark with Astrophotography.", + "Magic Editor - Magic Editor with Pixel lets you use generative AI to reimagine your photos. Remove distractions, improve background, and more. It's an intuitive new way to edit, so you're in control of your images, whether you want to better capture the moment or add your own creative touch. ", + "Call Screen - Goodbye, spam calls. With Call Screen, Pixel can now detect and filter out even more spam calls. For other calls, it can tell you who’s calling and why before you pick up. Detect and decline spam calls without distracting you." + ], + "target_search_trends": { + "target_search_trends": [] + }, + "target_yt_trends": { + "target_yt_trends": [] + } + } +} \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_prs.json b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_prs.json new file mode 100644 index 00000000..8c681450 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/profiles/example_state_prs.json @@ -0,0 +1,31 @@ +{ + "state": { + "img_artifact_keys": { + "img_artifact_keys": [] + }, + "vid_artifact_keys": { + "vid_artifact_keys": [] + }, + "brand": "Paul Reed Smith (PRS)", + "target_product": "PRS SE CE24 Electric Guitar", + "target_audience": [ + "Demographics: millennials who follow jam bands such as Widespread Panic and Phish.", + "Psychographics: millennials who respond positively to nostalgic messages.", + "Lifestyle or profession: frequent travelers; spending most income on concert experiences.", + "Hobbies, interests, humor: music lovers, attend lots of jam band concerts.", + "Actively researching concert and music festival tickets; musical instruments, love surreal memes (e.g., https://www.reddit.com/r/surrealmemes/)." + ], + "key_selling_points": [ + "Bolt-on Maple Neck - The bolt-on construction with a maple neck provides a distinct tone with enhanced brightness and a slightly more percussive attack", + "Wide Thin Profile- The Wide Thin neck carve offers a comfortable and fast playing experience, allowing for easy access to all 24 frets.", + "Satin Finish - The satin finish on the neck and body allows for a smooth, comfortable feel and a more intimate playing experience, as it doesn't stick to the hand like some gloss finishes.", + "85/15 S Humbuckers - These pickups deliver a wide tonal range, from thick humbucker tones to clear single-coil sounds, making the guitar suitable for various genres." + ], + "target_search_trends": { + "target_search_trends": [] + }, + "target_yt_trends": { + "target_yt_trends": [] + } + } +} \ No newline at end of file diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/schema_types.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/schema_types.py new file mode 100644 index 00000000..0f8c210a --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/schema_types.py @@ -0,0 +1,170 @@ +"""Common data schema and types for the Trends & Insights Agent""" + +from google.genai import types +from pydantic import BaseModel, Field + + +# Convenient declaration for controlled generation. +json_response_config = types.GenerateContentConfig( + response_mime_type="application/json" +) + + +# ============================= +# Research Structured Feedback +# ============================= +class CampaignSearchQuery(BaseModel): + """Model representing a specific search query for web search.""" + + search_query: str = Field( + description="A highly specific and targeted query for web search." + ) + + +class CampaignFeedback(BaseModel): + """Model for providing evaluation feedback on research quality.""" + + comment: str = Field( + description="Detailed explanation of the evaluation, highlighting strengths and/or weaknesses of the research." + ) + follow_up_queries: list[CampaignSearchQuery] | None = Field( + default=None, + description="A list of specific, targeted follow-up search queries needed to fix research gaps. This should be null or empty if no follow-up questions needed.", + ) + + +# ========================== +# Marketing Guide Data Gen +# ========================== +class MarketingCampaignGuide(BaseModel): + "Data model for the marketing campaign guide." + + campaign_name: str = Field( + description="given name of campaign; could be title of uploaded campaign metadata" + ) + brand: str = Field(description="target product's brand") + target_product: str = Field( + description="the subject of the marketing campaign objectives" + ) + target_audience: list[str] = Field( + description="specific group(s) we intended to reach. Typically described with demographic, psychographic, and behavioral profile of the ideal customer or user" + ) + target_regions: list[str] = Field( + description="specific cities and/or countries we intend to reach" + ) + campaign_objectives: list[str] = Field( + description="goals that define what we plan to achieve" + ) + media_strategy: list[str] = Field( + description="media channels or formats we intend to use to reach our audiences" + ) + # key_insights: list[str] = Field( # TODO: fix + # description="Referencable data points that show intersection between goals and broad information sources" + # ) + key_selling_points: list[str] = Field( + description="Aspects of the `target_product` that distinguish it from competitors and persuades customers to choose it" + ) + + +# ======================== +# Insight Generation +# ======================== +class Insight(BaseModel): + "Data model for insights from Google and YouTube research." + + insight_title: str = Field( + description="Come up with a unique title for the insight." + ) + insight_text: str = Field( + description="Generate a summary of the insight from the web research." + ) + insight_urls: list[str] = Field( + description="Get the url(s) used to generate the insight." + ) + key_entities: list[str] = Field( + description="Extract any key entities discussed in the gathered context." + ) + key_relationships: list[str] = Field( + description="Describe the relationships between the Key Entities you have identified." + ) + key_audiences: str = Field( + description="Considering the guide, how does this insight intersect with the audience?" + ) + key_product_insights: str = Field( + description="Referencable data points that show intersection between campaign goals, target product, target audience, and web research." + ) + + +class Insights(BaseModel): + "Data model for insights from Google and YouTube research." + + insights: list[Insight] + + +# ========================== +# YouTube Trends +# ========================== + + +class YT_Trend(BaseModel): + "Data model for trending content on YouTube." + + # trend_title: str = Field(description="a unique title for the trend") + video_title: str = Field(description="exact name of the trending YouTube video") + trend_text: str = Field( + description="source text from video or URL analysis e.g., output from the `analyze_youtube_videos` or `query_web` tools" + ) + trend_urls: list[str] = Field(description="url for the trending video") + key_entities: list[str] = Field(description="Key Entities discussed in the video") + key_relationships: list[str] = Field( + description="the relationships between any Key Entities" + ) + key_audiences: list[str] = Field( + description="ideas for relating this trend to the target audiences described in the campaign metadata" + ) + key_product_insights: list[str] = Field( + description="a few insights from the intersection of the trending content and the target product." + ) + + +class YT_Trends(BaseModel): + "Data model for all trending content gathered from YouTube." + + yt_trends: list[YT_Trend] + + +# ============================== +# Google Search Trends +# ============================== + + +class Search_Trend(BaseModel): + "Data model for a trending topic from Google Search." + + trend_title: str = Field( + description="The user-selected topic from Google Search Trends. Should be the exact words as seen in the source data." + ) + trend_text: str = Field( + description="Source text from the URL(s) providing context on the `search_trend`." + ) + trend_urls: list[str] = Field( + description="Any url(s) that provided reliable context for the `search_trend`." + ) + key_entities: list[str] = Field( + description="Any Key Entities discussed in the gathered context." + ) + key_relationships: list[str] = Field( + description="Relationships between the Key Entities, especially as they relate to the `search_trend`." + ) + key_audiences: list[str] = Field( + description="Ideas or angles for helping this trend resonate with the target audience described in the campaign metadata." + ) + key_product_insights: list[str] = Field( + description="Suggestions for finding the intersection of this `search_trend` with the target product." + ) + + +class Search_Trends(BaseModel): + "Data model for many trending topics gathered from Google Search." + + search_trends: list[Search_Trend] diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/secrets.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/secrets.py new file mode 100644 index 00000000..4ed7d1fb --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/secrets.py @@ -0,0 +1,46 @@ +import os +import google_crc32c +from typing import Optional +from google.cloud import secretmanager as sm + + +# Get the project ID from the environment variable +try: + project_number = os.environ["GOOGLE_CLOUD_PROJECT_NUMBER"] +except KeyError: + raise Exception("GOOGLE_CLOUD_PROJECT_NUMBER environment variable not set") + + +# [START secretmanager_get_secret_version] + + +def access_secret_version( + secret_id: str, version_id: str +) -> Optional[sm.AccessSecretVersionResponse | str]: + """ + Access the payload for the given secret version if one exists. The version + can be a version number as a string (e.g. "5") or an alias (e.g. "latest"). + """ + + # Create the Secret Manager client. + sm_client = sm.SecretManagerServiceClient() + + # Build the resource name of the secret version. + name = f"projects/{project_number}/secrets/{secret_id}/versions/{version_id}" + + # Access the secret version. + response = sm_client.access_secret_version(request={"name": name}) + + # Verify payload checksum. + crc32c = google_crc32c.Checksum() + crc32c.update(response.payload.data) + if response.payload.data_crc32c != int(crc32c.hexdigest(), 16): + print("Data corruption detected.") + return response + + # Print the secret payload. + # + # WARNING: Do not print the secret in a production environment - this + # snippet is showing how to access the secret material. + payload = response.payload.data.decode("UTF-8") + return payload diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/utils.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/utils.py new file mode 100644 index 00000000..04bef9c0 --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/shared_libraries/utils.py @@ -0,0 +1,97 @@ +import os +import logging + +logging.basicConfig(level=logging.INFO) + +from google.cloud import storage + + +def download_image_from_gcs( + source_blob_name: str, + destination_file_name: str, + gcs_bucket: str = os.environ.get("BUCKET", "tmp"), +): + """ + Downloads a blob (image) from a GCS bucket. + + Args: + source_blob_name (str): full path to file within bucket e.g., "path/to/your/image.png" + destination_file_name (str): local path to save file e.g., "local_image.png" + Returns: + str: Message indicating local path to file + """ + storage_client = storage.Client() + gcs_bucket = gcs_bucket.replace("gs://", "") + bucket = storage_client.bucket(gcs_bucket) + blob = bucket.blob(source_blob_name) + blob.download_to_filename(destination_file_name) + return f"Downloaded gcs object {source_blob_name} from {gcs_bucket} to (local) {destination_file_name}." + + +def download_blob(bucket_name, source_blob_name): + """ + Downloads a blob from the bucket. + Args: + bucket_name (str): The ID of your GCS bucket + source_blob_name (str): The ID of your GCS object + Returns: + Blob content as bytes. + """ + storage_client = storage.Client() + bucket = storage_client.bucket(bucket_name) + + # Construct a client side representation of a blob. + # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve + # any content from Google Cloud Storage. As we don't need additional data, + # using `Bucket.blob` is preferred here. + blob = bucket.blob(source_blob_name) + return blob.download_as_bytes() + + +def upload_file_to_gcs( + file_path: str, + file_data: bytes, + content_type: str = "image/png", + gcs_bucket: str = os.environ.get("BUCKET", "tmp"), +): + """ + Uploads a file to a GCS bucket. + Args: + file_path (str): The path to the file to upload. + file_data (str): The file bytes to upload. + content_type (str): The file's mime type. + gcs_bucket (str): The name of the GCS bucket. + Returns: + str: The GCS URI of the uploaded file. + """ + gcs_bucket = gcs_bucket.replace("gs://", "") + storage_client = storage.Client() + bucket = storage_client.bucket(gcs_bucket) + blob = bucket.blob(os.path.basename(file_path)) + blob.upload_from_string(file_data, content_type=content_type) + return f"gs://{gcs_bucket}/{os.path.basename(file_path)}" + + +def upload_blob_to_gcs( + source_file_name: str, + destination_blob_name: str, + gcs_bucket: str = os.environ.get("BUCKET", "tmp"), +) -> str: + """ + Uploads a blob to a GCS bucket. + Args: + source_file_name (str): The path to the file to upload. + destination_blob_name (str): The desired folder path in gcs + gcs_bucket (str): The name of the GCS bucket. + Returns: + str: The GCS URI of the uploaded file. + """ + # bucket_name = "your-bucket-name" (no 'gs://') + # source_file_name = "local/path/to/file" (file to upload) + # destination_blob_name = "folder/paths-to/storage-object-name" + storage_client = storage.Client(project=os.environ.get("GOOGLE_CLOUD_PROJECT")) + gcs_bucket = gcs_bucket.replace("gs://", "") + bucket = storage_client.bucket(gcs_bucket) + blob = bucket.blob(destination_blob_name) + blob.upload_from_filename(source_file_name) + return f"File {source_file_name} uploaded to {destination_blob_name}." diff --git a/python/agents/trends-and-insights-agent/trends_and_insights_agent/tools.py b/python/agents/trends-and-insights-agent/trends_and_insights_agent/tools.py new file mode 100644 index 00000000..9d8d4c7c --- /dev/null +++ b/python/agents/trends-and-insights-agent/trends_and_insights_agent/tools.py @@ -0,0 +1,145 @@ +import os +import logging +import pandas as pd +from typing import Optional + +logging.basicConfig(level=logging.INFO) + +import googleapiclient.discovery +from google.genai import types, Client + +from .shared_libraries.config import config +from .shared_libraries.secrets import access_secret_version + + +# ======================== +# clients +# ======================== +try: + yt_secret_id = os.environ["YT_SECRET_MNGR_NAME"] +except KeyError: + raise Exception("YT_SECRET_MNGR_NAME environment variable not set") + +# youtube client +YOUTUBE_DATA_API_KEY = access_secret_version(secret_id=yt_secret_id, version_id="1") +youtube_client = googleapiclient.discovery.build( + serviceName="youtube", version="v3", developerKey=YOUTUBE_DATA_API_KEY +) + +# google genai client +client = Client() + + +# ======================== +# YouTube tools +# ======================== +def query_youtube_api( + query: str, + video_duration: str, + video_order: str = "relevance", + num_video_results: int = 5, + max_num_days_ago: int = 30, + video_caption: str = "closedCaption", + channel_type: Optional[str] = "any", + channel_id: Optional[str] = None, + event_type: Optional[str] = None, +) -> dict: + """ + Gets a response from the YouTube Data API for a given search query. + + Args: + query (str): The search query. + video_duration (str): The duration (minutes) of the videos to search for. + Must be one of: 'any', 'long', 'medium', 'short', where short=(-inf, 4), + medium=[4, 20], long=(20, inf) + video_order (str): The order in which the videos should be returned. + Must be one of 'date', 'rating', 'relevance', 'title', 'viewCount' + num_video_results (int): The number of video results to return. + max_num_days_ago (int): The maximum number of days ago the videos should have been published. + youtube_client (googleapiclient.discovery.Resource): The YouTube Data API client. + video_caption (str): whether API should filter video search results based on whether they have captions. + Must be one of "any", "closedCaption", "none" + "any" = Do not filter results based on caption availability. + "closedCaption" = Only include videos with closed captions. + "none" = Only include videos that do not have captions. + channel_type (Optional[str]): The type of channel to search within. + Must be one of "show", "any", or "channelTypeUnspecified". Specifying "show" retrieves only TV shows + channel_id (Optional[str]): The ID of the channel to search within. + eventType (str): restricts a search to broadcast events. Must be one of "upcoming", "live", "completed", None + None = does not restrict to broadcast events + "completed" = Only include completed broadcasts. + "live" = Only include active broadcasts. + "upcoming" = Only include upcoming broadcasts. + + Returns: + dict: The response from the YouTube Data API. + """ + + published_after_timestamp = ( + (pd.Timestamp.now() - pd.DateOffset(days=max_num_days_ago)) + .tz_localize("UTC") + .isoformat() + ) + + # Using Search:list - https://developers.google.com/youtube/v3/docs/search/list + yt_data_api_request = youtube_client.search().list( + type="video", + part="id,snippet", + relevanceLanguage="en", + regionCode="US", + q=query, + videoDuration=video_duration, + order=video_order, + maxResults=num_video_results, + videoCaption=video_caption, + channelType=channel_type, + channelId=channel_id, + eventType=event_type, + publishedAfter=published_after_timestamp, + ) + yt_data_api_response = yt_data_api_request.execute() + return yt_data_api_response + + +# region_code: str = "US", +# region_code (str): selects a video chart available in the specified region. +# Values are ISO 3166-1 alpha-2 country codes. For example, the region_code for the United Kingdom would be 'GB', +# whereas 'US' would represent The United States. + + +def analyze_youtube_videos( + prompt: str, + youtube_url: str, +) -> Optional[str]: + """ + Analyzes youtube videos given a prompt and the video's URL + + Args: + prompt (str): The prompt to use for the analysis. + youtube_url (str): The url of a YouTube video to analyze. + The URL should be formatted similarly to: `https://www.youtube.com/watch?v=dmF8oJ5JAVE`, where 'dmF8oJ5JAVE' is the video's ID. + Returns: + Results from the youtube video analysis prompt. + """ + + if "youtube.com" not in youtube_url: + return "Not a valid youtube URL" + else: + video = types.Part.from_uri( + file_uri=youtube_url, + mime_type="video/*", + ) + contents = types.Content( + role="user", + parts=[types.Part.from_text(text=prompt), video], + ) + result = client.models.generate_content( + model=config.video_analysis_model, + contents=contents, + config=types.GenerateContentConfig( + temperature=0.1, + ), + ) + if result and result.text is not None: + + return result.text