Skip to content

Commit f13cc2b

Browse files
feat(api): api update
1 parent 1c9099a commit f13cc2b

3 files changed

Lines changed: 7 additions & 16 deletions

File tree

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 20
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/brand-dev%2Fbrand.dev-e1e6c71c933578dccc0c110054a5fa21347c4795dff14fd48f5873536b4ce959.yml
3-
openapi_spec_hash: 12b5773c2b15c2f7cdce7590237a1125
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/brand-dev%2Fbrand.dev-2a7727122f4ea5f9240c4c701f70f91b48f861bf5d87ec64f6904d635be5aef8.yml
3+
openapi_spec_hash: 51884daf875aba2f255aaaf69ef1c42d
44
config_hash: 91cf2dcefb99c39eb9cd3e98e15d6011

src/brand/dev/resources/brand.py

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2539,13 +2539,10 @@ def web_scrape_sitemap(
25392539
timeout: float | httpx.Timeout | None | NotGiven = not_given,
25402540
) -> BrandWebScrapeSitemapResponse:
25412541
"""
2542-
Crawls the sitemap of the given domain and returns all discovered page URLs.
2543-
Supports sitemap index files (recursive), parallel fetching with concurrency
2544-
control, deduplication, and filters out non-page resources (images, PDFs, etc.).
2542+
Crawl an entire website's sitemap and return all discovered page URLs
25452543
25462544
Args:
2547-
domain: Domain name to crawl sitemaps for (e.g., 'example.com'). The domain will be
2548-
automatically normalized and validated.
2545+
domain: Domain to build a sitemap for
25492546
25502547
max_links: Maximum number of links to return from the sitemap crawl. Defaults to 10,000.
25512548
Minimum is 1, maximum is 100,000.
@@ -5054,13 +5051,10 @@ async def web_scrape_sitemap(
50545051
timeout: float | httpx.Timeout | None | NotGiven = not_given,
50555052
) -> BrandWebScrapeSitemapResponse:
50565053
"""
5057-
Crawls the sitemap of the given domain and returns all discovered page URLs.
5058-
Supports sitemap index files (recursive), parallel fetching with concurrency
5059-
control, deduplication, and filters out non-page resources (images, PDFs, etc.).
5054+
Crawl an entire website's sitemap and return all discovered page URLs
50605055
50615056
Args:
5062-
domain: Domain name to crawl sitemaps for (e.g., 'example.com'). The domain will be
5063-
automatically normalized and validated.
5057+
domain: Domain to build a sitemap for
50645058
50655059
max_links: Maximum number of links to return from the sitemap crawl. Defaults to 10,000.
50665060
Minimum is 1, maximum is 100,000.

src/brand/dev/types/brand_web_scrape_sitemap_params.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,7 @@
1111

1212
class BrandWebScrapeSitemapParams(TypedDict, total=False):
1313
domain: Required[str]
14-
"""Domain name to crawl sitemaps for (e.g., 'example.com').
15-
16-
The domain will be automatically normalized and validated.
17-
"""
14+
"""Domain to build a sitemap for"""
1815

1916
max_links: Annotated[int, PropertyInfo(alias="maxLinks")]
2017
"""Maximum number of links to return from the sitemap crawl.

0 commit comments

Comments
 (0)