-
Notifications
You must be signed in to change notification settings - Fork 22
Expand file tree
/
Copy pathconfig.yaml
More file actions
68 lines (63 loc) · 1.85 KB
/
config.yaml
File metadata and controls
68 lines (63 loc) · 1.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# Configuration for Production Quant Paper Agent Example
# Uses Gemini 2.5 Pro & Flash for production-level performance
# Source configuration
source:
type: "arxiv"
config:
max_results: 1
retry_delay: 1.0
# Parser configuration
parser:
type: "llama"
config:
api_key: ${LLAMA_CLOUD_API_KEY}
parsing_instructions: "Extract comprehensive quantitative finance content including methodologies, mathematical models, empirical findings, and practical implications."
# Storage configuration
storage:
type: "local"
config:
base_path: "./data"
auto_create_dirs: true
enable_indexing: true
# Flow configurations
flows:
# Summary flow using Gemini models
summary_flow:
type: "summary"
config:
name: "summary_flow"
prompt_templates_path: "flows/summary_flow/prompts.yaml"
use_chunking: true
chunk_size: 4000
chunk_strategy: "by_size"
llm_blocks:
cheap_summarizer:
model: "gemini/gemini-2.5-flash"
temperature: 0.3
max_tokens: 8192
api_key: ${GOOGLE_API_KEY}
powerful_combiner:
model: "gemini/gemini-2.5-pro"
temperature: 0.2
max_tokens: 8192
api_key: ${GOOGLE_API_KEY}
# QA flow using Gemini models for production
qa_flow:
type: "qa"
config:
name: "qa_flow"
prompt_templates_path: "flows/qa_flow/prompts.yaml"
max_questions: 7
question_depth: "deep"
focus_areas: ["methodology", "findings", "implications", "applications", "limitations"]
llm_blocks:
question_generator:
model: "gemini/gemini-2.5-pro"
temperature: 0.8
max_tokens: 8192
api_key: ${GOOGLE_API_KEY}
question_refiner:
model: "gemini/gemini-2.5-pro"
temperature: 0.3
max_tokens: 8192
api_key: ${GOOGLE_API_KEY}