import os from dotenv import load_dotenv import google.generativeai as genai from google.api_core import exceptions as gexceptions account_inf_path = "d:/MyWebs/Database/accounts.env" gemini_model = "gemini-1.5-pro-latest" load_dotenv(dotenv_path = account_inf_path) GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") genai.configure(api_key = GOOGLE_API_KEY) def generate_with_grounding(prompt): """Google検索によるグラウンディングを用いてテキストを生成します。""" model = genai.GenerativeModel(gemini_model) model.use_google_search = True # allows the model to *decide* when to call Google Search model.use_grounding = True # allows grounded results (i.e. citation snippets) response = model.generate_content( prompt, generation_config={ "temperature": 0.0, # recommended for grounding "top_p": 0.8, "top_k": 40, }, safety_settings=[ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, ], ) ''' try: response = model.generate_content( prompt, generation_config={ "temperature": 0.2, # 生成のランダム性を調整 "top_p": 0.8, "top_k": 40 }, safety_settings=[ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, ], tools=[ { "function_declarations": [ { "name": "google_search", "description": "Google検索を実行して、最新の情報を取得します。", "parameters": { "type": "OBJECT", "properties": { "query": { "type": "STRING", "description": "検索クエリ", } }, "required": ["query"], }, } ] } ] ) # 正常な応答の処理 (上記1または2の方法でエラーチェックも行う) if response and hasattr(response, "parts"): pass # print("Response:\n", response.text) # for part in response.parts: # print(part.text) else: print("Error in revise_with_gemini(): 応答がありませんでした。") except gexceptions.ResourceExhausted as e: print(f"Error in revise_with_gemini(): Gemini APIエラーが発生しました: {e}") if e.code == 429: # レート制限のエラーコードの例 print(" 出力クォータに達しました。時間をおいてから再度お試しください。") return None else: print(f" エラー詳細 (コード: {e.code}): {e.message}") # エラーの詳細 (e.message, e.code など) を確認して処理 except Exception as e: print(f"Error in revise_with_gemini(): 予期せぬエラーが発生しました: {e}") #, f" type(e)={type(e)}") return None if response.prompt_feedback: if response.prompt_feedback.block_reason: print(f"Error in revise_with_gemini(): プロンプトがブロックされました。理由: {response.prompt_feedback.block_reason}") if response.prompt_feedback.safety_ratings: print("安全性評価:") for rating in response.prompt_feedback.safety_ratings: print(f"- カテゴリ: {rating.category}, 評価: {rating.probability}") return None ''' return response if __name__ == "__main__": user_prompt = "Google検索を使って、2025年のトランプ大統領の政策について要約してください" response = generate_with_grounding(user_prompt) print() print(f"prompt: {user_prompt}") if response and hasattr(response, "parts"): print("60 Succeeded") for idx, part in enumerate(response.parts): print(f"idx {idx}: [{part.text}]") # グラウンディングされた情報源を確認 if response.prompt_feedback and response.prompt_feedback.tools_status: print("66 Succeeded") for tool_status in response.prompt_feedback.tools_status: if tool_status.name == "google_search" and tool_status.output: print("\n検索結果:") search_results = tool_status.output.get("results", []) for result in search_results: print(f"- タイトル: {result.get('title')}") print(f" URL: {result.get('url')}") print(f" 概要: {result.get('snippet')}") else: print("応答がありませんでした。")