test: increase coverage from 88% to 94% with comprehensive test suite (#143)

Adds ~6,400 lines of new tests across integration and unit test files,
raising total branch coverage from 88% to 93.61% and enforcing a 90%
minimum threshold in CI.

New test classes cover:
- _core.py: HTTP error codes (400/429/500), auth retry, FIFO cache eviction,
  source ID helpers
- _artifacts.py: ArtifactParseError paths, mind map parsing, revise_slide,
  download error paths, poll status variants, deprecated wait API
- _chat.py: ask() error handling, conversation ID edge cases, history parsing,
  citation/reference extraction chain (74 new tests)
- _sources.py: wait_until_ready, add_url/text/file error paths, drive wait,
  freshness checks, fulltext parsing, YouTube ID extraction (64 new tests)
- _notebooks.py: describe/share edge cases
- _research.py: poll edge cases, import source edge cases
- cli/generate.py: resolve_language, output helpers, revise-slide command
- cli/language.py: config error paths, server sync/get paths
- cli/source.py: auto-detect, fulltext, wait commands

pyproject.toml: raise coverage fail_under from 70 to 90
This commit is contained in:
Teng Lin 2026-03-03 11:18:33 -08:00 committed by GitHub
parent abe067a4e7
commit ac58d7f225
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 6385 additions and 4 deletions

View file

@ -103,7 +103,7 @@ branch = true
[tool.coverage.report]
show_missing = true
fail_under = 70
fail_under = 90
[tool.mypy]
python_version = "3.10"

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,22 @@
"""Integration tests for client initialization and core functionality."""
from unittest.mock import AsyncMock, MagicMock, patch
import httpx
import pytest
from notebooklm import NotebookLMClient
from notebooklm._core import MAX_CONVERSATION_CACHE_SIZE, ClientCore, is_auth_error
from notebooklm.rpc import (
AuthError,
ClientError,
NetworkError,
RateLimitError,
RPCError,
RPCMethod,
RPCTimeoutError,
ServerError,
)
class TestClientInitialization:
@ -23,3 +37,412 @@ class TestClientInitialization:
client = NotebookLMClient(auth_tokens)
with pytest.raises(RuntimeError, match="not initialized"):
await client.notebooks.list()
class TestIsAuthError:
"""Tests for the is_auth_error() helper function."""
def test_returns_true_for_auth_error(self):
assert is_auth_error(AuthError("invalid credentials")) is True
def test_returns_false_for_network_error(self):
assert is_auth_error(NetworkError("network down")) is False
def test_returns_false_for_rate_limit_error(self):
assert is_auth_error(RateLimitError("rate limited")) is False
def test_returns_false_for_server_error(self):
assert is_auth_error(ServerError("500 error")) is False
def test_returns_false_for_client_error(self):
assert is_auth_error(ClientError("400 bad request")) is False
def test_returns_false_for_rpc_timeout_error(self):
assert is_auth_error(RPCTimeoutError("timed out")) is False
def test_returns_true_for_401_http_status_error(self):
mock_response = MagicMock()
mock_response.status_code = 401
error = httpx.HTTPStatusError("401", request=MagicMock(), response=mock_response)
assert is_auth_error(error) is True
def test_returns_true_for_403_http_status_error(self):
mock_response = MagicMock()
mock_response.status_code = 403
error = httpx.HTTPStatusError("403", request=MagicMock(), response=mock_response)
assert is_auth_error(error) is True
def test_returns_false_for_500_http_status_error(self):
mock_response = MagicMock()
mock_response.status_code = 500
error = httpx.HTTPStatusError("500", request=MagicMock(), response=mock_response)
assert is_auth_error(error) is False
def test_returns_true_for_rpc_error_with_auth_message(self):
assert is_auth_error(RPCError("authentication expired")) is True
def test_returns_false_for_rpc_error_with_generic_message(self):
assert is_auth_error(RPCError("some generic error")) is False
def test_returns_false_for_plain_exception(self):
assert is_auth_error(ValueError("not an rpc error")) is False
class TestRPCCallHTTPErrors:
"""Tests for HTTP error handling in rpc_call()."""
@pytest.mark.asyncio
async def test_rate_limit_429_with_retry_after_header(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
mock_response = MagicMock()
mock_response.status_code = 429
mock_response.headers = {"retry-after": "60"}
mock_response.reason_phrase = "Too Many Requests"
error = httpx.HTTPStatusError("429", request=MagicMock(), response=mock_response)
with (
patch.object(core._http_client, "post", side_effect=error),
pytest.raises(RateLimitError) as exc_info,
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
assert exc_info.value.retry_after == 60
@pytest.mark.asyncio
async def test_rate_limit_429_without_retry_after_header(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
mock_response = MagicMock()
mock_response.status_code = 429
mock_response.headers = {}
mock_response.reason_phrase = "Too Many Requests"
error = httpx.HTTPStatusError("429", request=MagicMock(), response=mock_response)
with (
patch.object(core._http_client, "post", side_effect=error),
pytest.raises(RateLimitError) as exc_info,
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
assert exc_info.value.retry_after is None
@pytest.mark.asyncio
async def test_rate_limit_429_with_invalid_retry_after_header(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
mock_response = MagicMock()
mock_response.status_code = 429
mock_response.headers = {"retry-after": "not-a-number"}
mock_response.reason_phrase = "Too Many Requests"
error = httpx.HTTPStatusError("429", request=MagicMock(), response=mock_response)
with (
patch.object(core._http_client, "post", side_effect=error),
pytest.raises(RateLimitError) as exc_info,
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
assert exc_info.value.retry_after is None
@pytest.mark.asyncio
async def test_client_error_400(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
mock_response = MagicMock()
mock_response.status_code = 400
mock_response.reason_phrase = "Bad Request"
error = httpx.HTTPStatusError("400", request=MagicMock(), response=mock_response)
with (
patch.object(core._http_client, "post", side_effect=error),
pytest.raises(ClientError),
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
@pytest.mark.asyncio
async def test_server_error_500(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
mock_response = MagicMock()
mock_response.status_code = 500
mock_response.reason_phrase = "Internal Server Error"
error = httpx.HTTPStatusError("500", request=MagicMock(), response=mock_response)
with (
patch.object(core._http_client, "post", side_effect=error),
pytest.raises(ServerError),
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
@pytest.mark.asyncio
async def test_connect_timeout_raises_network_error(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
with (
patch.object(
core._http_client,
"post",
side_effect=httpx.ConnectTimeout("connect timeout"),
),
pytest.raises(NetworkError),
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
@pytest.mark.asyncio
async def test_read_timeout_raises_rpc_timeout_error(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
with (
patch.object(
core._http_client,
"post",
side_effect=httpx.ReadTimeout("read timeout"),
),
pytest.raises(RPCTimeoutError),
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
@pytest.mark.asyncio
async def test_connect_error_raises_network_error(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
with (
patch.object(
core._http_client,
"post",
side_effect=httpx.ConnectError("connection refused"),
),
pytest.raises(NetworkError),
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
@pytest.mark.asyncio
async def test_generic_request_error_raises_network_error(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
with (
patch.object(
core._http_client,
"post",
side_effect=httpx.RequestError("something went wrong"),
),
pytest.raises(NetworkError),
):
await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
class TestRPCCallAuthRetry:
"""Tests for auth retry path after decode_response raises RPCError."""
@pytest.mark.asyncio
async def test_auth_retry_on_decode_rpc_error(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
refresh_callback = AsyncMock()
core._refresh_callback = refresh_callback
import asyncio
core._refresh_lock = asyncio.Lock()
success_response = MagicMock()
success_response.status_code = 200
success_response.text = "some_valid_response"
with (
patch.object(core._http_client, "post", return_value=success_response),
patch(
"notebooklm._core.decode_response",
side_effect=[
RPCError("authentication expired"),
["result_data"],
],
),
):
result = await core.rpc_call(RPCMethod.LIST_NOTEBOOKS, [])
assert result == ["result_data"]
refresh_callback.assert_called_once()
class TestGetHttpClient:
"""Tests for get_http_client() RuntimeError when not initialized."""
def test_get_http_client_raises_when_not_initialized(self, auth_tokens):
core = ClientCore(auth_tokens)
with pytest.raises(RuntimeError, match="not initialized"):
core.get_http_client()
@pytest.mark.asyncio
async def test_get_http_client_returns_client_when_initialized(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
http_client = client._core.get_http_client()
assert isinstance(http_client, httpx.AsyncClient)
class TestConversationCacheFIFOEviction:
"""Tests for FIFO eviction when conversation cache exceeds MAX_CONVERSATION_CACHE_SIZE."""
def test_fifo_eviction_when_cache_is_full(self, auth_tokens):
core = ClientCore(auth_tokens)
# Fill the cache to capacity
for i in range(MAX_CONVERSATION_CACHE_SIZE):
core.cache_conversation_turn(f"conv_{i}", f"q{i}", f"a{i}", i)
assert len(core._conversation_cache) == MAX_CONVERSATION_CACHE_SIZE
# Adding one more should evict the oldest (conv_0)
core.cache_conversation_turn("conv_new", "q_new", "a_new", 0)
assert len(core._conversation_cache) == MAX_CONVERSATION_CACHE_SIZE
assert "conv_0" not in core._conversation_cache
assert "conv_new" in core._conversation_cache
def test_fifo_eviction_preserves_order(self, auth_tokens):
core = ClientCore(auth_tokens)
# Fill cache to capacity
for i in range(MAX_CONVERSATION_CACHE_SIZE):
core.cache_conversation_turn(f"conv_{i}", f"q{i}", f"a{i}", i)
# Add two new conversations - should evict conv_0 then conv_1
core.cache_conversation_turn("conv_new_1", "q1", "a1", 0)
core.cache_conversation_turn("conv_new_2", "q2", "a2", 0)
assert "conv_0" not in core._conversation_cache
assert "conv_1" not in core._conversation_cache
assert "conv_new_1" in core._conversation_cache
assert "conv_new_2" in core._conversation_cache
def test_adding_turns_to_existing_conversation_does_not_evict(self, auth_tokens):
core = ClientCore(auth_tokens)
# Fill cache to capacity
for i in range(MAX_CONVERSATION_CACHE_SIZE):
core.cache_conversation_turn(f"conv_{i}", f"q{i}", f"a{i}", i)
# Adding a second turn to an EXISTING conversation should NOT evict anything
core.cache_conversation_turn("conv_0", "q_extra", "a_extra", 1)
assert len(core._conversation_cache) == MAX_CONVERSATION_CACHE_SIZE
assert len(core._conversation_cache["conv_0"]) == 2
class TestClearConversationCacheNotFound:
"""Tests for clear_conversation_cache() returning False when ID not found."""
def test_clear_nonexistent_conversation_returns_false(self, auth_tokens):
core = ClientCore(auth_tokens)
result = core.clear_conversation_cache("nonexistent_id")
assert result is False
def test_clear_existing_conversation_returns_true(self, auth_tokens):
core = ClientCore(auth_tokens)
core.cache_conversation_turn("conv_abc", "question", "answer", 1)
result = core.clear_conversation_cache("conv_abc")
assert result is True
assert "conv_abc" not in core._conversation_cache
def test_clear_all_conversations_returns_true(self, auth_tokens):
core = ClientCore(auth_tokens)
core.cache_conversation_turn("conv_1", "q1", "a1", 1)
core.cache_conversation_turn("conv_2", "q2", "a2", 1)
result = core.clear_conversation_cache()
assert result is True
assert len(core._conversation_cache) == 0
class TestGetSourceIds:
"""Tests for get_source_ids() extracting source IDs from notebook data."""
@pytest.mark.asyncio
async def test_returns_source_ids_from_nested_data(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
mock_notebook_data = [
[
"notebook_title",
[
[["src_id_1", "extra"]],
[["src_id_2", "extra"]],
],
]
]
with patch.object(
core, "rpc_call", new_callable=AsyncMock, return_value=mock_notebook_data
):
ids = await core.get_source_ids("nb_123")
assert ids == ["src_id_1", "src_id_2"]
@pytest.mark.asyncio
async def test_returns_empty_list_when_data_is_none(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
with patch.object(core, "rpc_call", new_callable=AsyncMock, return_value=None):
ids = await core.get_source_ids("nb_123")
assert ids == []
@pytest.mark.asyncio
async def test_returns_empty_list_when_data_is_empty_list(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
with patch.object(core, "rpc_call", new_callable=AsyncMock, return_value=[]):
ids = await core.get_source_ids("nb_123")
assert ids == []
@pytest.mark.asyncio
async def test_returns_empty_list_when_sources_list_is_empty(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
# Notebook with no sources
mock_notebook_data = [["notebook_title", []]]
with patch.object(
core, "rpc_call", new_callable=AsyncMock, return_value=mock_notebook_data
):
ids = await core.get_source_ids("nb_123")
assert ids == []
@pytest.mark.asyncio
async def test_returns_empty_list_when_data_is_not_list(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
with patch.object(
core, "rpc_call", new_callable=AsyncMock, return_value="unexpected_string"
):
ids = await core.get_source_ids("nb_123")
assert ids == []
@pytest.mark.asyncio
async def test_returns_empty_list_when_notebook_info_missing_sources(self, auth_tokens):
async with NotebookLMClient(auth_tokens) as client:
core = client._core
# notebook_data[0] exists but notebook_info[1] is missing
mock_notebook_data = [["notebook_title_only"]]
with patch.object(
core, "rpc_call", new_callable=AsyncMock, return_value=mock_notebook_data
):
ids = await core.get_source_ids("nb_123")
assert ids == []

View file

@ -562,3 +562,133 @@ class TestNotebookEdgeCases:
# Should only include valid topics
assert len(description.suggested_topics) == 1
assert description.suggested_topics[0].question == "Valid question"
class TestDescribeEdgeCases:
"""Tests for get_description() branch edge cases."""
@pytest.mark.asyncio
async def test_get_description_no_topics_key(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 171->188: result has only [0] (no result[1]) so topics stay empty."""
# result = [["A summary"]] — len is 1, so result[1] branch is never entered
response = build_rpc_response(
RPCMethod.SUMMARIZE,
[["A summary"]],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
description = await client.notebooks.get_description("nb_123")
assert description.summary == "A summary"
assert description.suggested_topics == []
@pytest.mark.asyncio
async def test_get_description_result_1_is_empty_list(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 173->177: result[1] exists but is an empty list, so inner block skipped."""
# result = [["A summary"], []] — result[1] has len 0, so the inner if is false
response = build_rpc_response(
RPCMethod.SUMMARIZE,
[["A summary"], []],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
description = await client.notebooks.get_description("nb_123")
assert description.summary == "A summary"
assert description.suggested_topics == []
@pytest.mark.asyncio
async def test_get_description_result_1_not_list(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 173->177: result[1] is present but not a list, inner block skipped."""
# result = [["A summary"], "not-a-list"]
response = build_rpc_response(
RPCMethod.SUMMARIZE,
[["A summary"], "not-a-list"],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
description = await client.notebooks.get_description("nb_123")
assert description.summary == "A summary"
assert description.suggested_topics == []
class TestShareEdgeCases:
"""Tests for share() and get_share_url() branch edge cases."""
@pytest.mark.asyncio
async def test_share_with_artifact_id(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 260: share() public=True with artifact_id builds deep-link URL."""
response = build_rpc_response(RPCMethod.SHARE_ARTIFACT, None)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.notebooks.share("nb_123", public=True, artifact_id="art_456")
assert result["public"] is True
assert result["url"] == "https://notebooklm.google.com/notebook/nb_123?artifactId=art_456"
assert result["artifact_id"] == "art_456"
@pytest.mark.asyncio
async def test_share_public_false_returns_none_url(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 264: share() public=False sets url to None."""
response = build_rpc_response(RPCMethod.SHARE_ARTIFACT, None)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.notebooks.share("nb_123", public=False)
assert result["public"] is False
assert result["url"] is None
@pytest.mark.asyncio
async def test_get_share_url_without_artifact(
self,
auth_tokens,
httpx_mock: HTTPXMock,
):
"""Line 288: get_share_url() without artifact_id returns base URL."""
async with NotebookLMClient(auth_tokens) as client:
url = client.notebooks.get_share_url("nb_123")
assert url == "https://notebooklm.google.com/notebook/nb_123"
@pytest.mark.asyncio
async def test_get_share_url_with_artifact(
self,
auth_tokens,
httpx_mock: HTTPXMock,
):
"""Lines 285-287: get_share_url() with artifact_id appends query param."""
async with NotebookLMClient(auth_tokens) as client:
url = client.notebooks.get_share_url("nb_123", artifact_id="art_789")
assert url == "https://notebooklm.google.com/notebook/nb_123?artifactId=art_789"

View file

@ -4,6 +4,7 @@ import pytest
from pytest_httpx import HTTPXMock
from notebooklm import NotebookLMClient
from notebooklm.rpc import RPCMethod
class TestResearchAPI:
@ -244,3 +245,454 @@ class TestResearchAPI:
result = await client.research.import_sources("nb_123", "task_123", [])
assert result == []
class TestPollEdgeCases:
"""Tests for poll() parsing branch edge cases."""
@pytest.mark.asyncio
async def test_poll_unwrap_nested_result(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 132: result[0] is a list whose first element is also a list — unwrap one level."""
# Outer list wraps the inner task list: result[0][0] is a list → unwrap
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[
[
[
"task_wrap",
[None, ["wrapped query"], None, [], 1],
]
]
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["task_id"] == "task_wrap"
assert result["query"] == "wrapped query"
@pytest.mark.asyncio
async def test_poll_skips_non_list_task_data(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 137: task_data is not a list — continue, eventually return no_research."""
# Outer list contains a non-list item then a too-short list
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
["not_a_list", ["only_one_elem"]],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["status"] == "no_research"
@pytest.mark.asyncio
async def test_poll_skips_non_string_task_id(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 143: task_id is not str — continue, eventually return no_research."""
# task_id is an integer (not str) and task_info is a list
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[[42, [None, ["query"], None, [], 1]]],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["status"] == "no_research"
@pytest.mark.asyncio
async def test_poll_skips_non_list_task_info(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 143: task_info is not a list — continue, eventually return no_research."""
# task_id is str but task_info is a string, not list
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[["task_bad", "not_a_list"]],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["status"] == "no_research"
@pytest.mark.asyncio
async def test_poll_sources_and_summary_has_only_sources_no_summary(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 157->160: sources_and_summary has len 1 (sources only, no summary string)."""
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[
[
"task_nosummary",
[
None,
["no summary query"],
None,
[
[
["https://example.com", "Title", "desc"],
]
# No second element — summary is absent
],
2,
],
]
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["status"] == "completed"
assert result["summary"] == ""
assert len(result["sources"]) == 1
@pytest.mark.asyncio
async def test_poll_skips_short_source_entry(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 163: a source entry in sources_data is too short (len < 2) — skipped."""
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[
[
"task_shortsrc",
[
None,
["short src query"],
None,
[
[
["only_one_element"], # len < 2 → skipped
["https://valid.com", "Valid"], # kept
],
"Summary text",
],
2,
],
]
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
# Only the valid source is returned
assert len(result["sources"]) == 1
assert result["sources"][0]["url"] == "https://valid.com"
@pytest.mark.asyncio
async def test_poll_deep_research_source_none_first_element(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Lines 171-172: deep research source where src[0] is None — title extracted, url=''."""
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[
[
"task_deep",
[
None,
["deep query"],
None,
[
[
[None, "Deep Research Title", None, "web"],
],
"Deep summary",
],
2,
],
]
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["status"] == "completed"
assert len(result["sources"]) == 1
assert result["sources"][0]["title"] == "Deep Research Title"
assert result["sources"][0]["url"] == ""
@pytest.mark.asyncio
async def test_poll_fast_research_source_with_url(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Lines 173-175: fast research source where src[0] is a str URL."""
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[
[
"task_fast",
[
None,
["fast query"],
None,
[
[
["https://fast.example.com", "Fast Title", "desc", "web"],
],
"Fast summary",
],
1,
],
]
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["status"] == "in_progress"
assert result["sources"][0]["url"] == "https://fast.example.com"
assert result["sources"][0]["title"] == "Fast Title"
@pytest.mark.asyncio
async def test_poll_source_with_no_title_or_url_skipped(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 177->161: src has two elements but neither is title nor url — not appended."""
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[
[
"task_empty_src",
[
None,
["empty src query"],
None,
[
[
# src[0] is not None and not str (e.g. integer), len < 3
# so url="", title="" and nothing is appended
[42, 99],
],
"summary here",
],
2,
],
]
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["status"] == "completed"
assert result["sources"] == []
@pytest.mark.asyncio
async def test_poll_all_tasks_invalid_returns_no_research(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 193: all items in the loop fail validation — final no_research is returned."""
# All task_data entries are short lists (len < 2) so every iteration hits `continue`
response = build_rpc_response(
RPCMethod.POLL_RESEARCH,
[["only_one"], ["also_one"]],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.poll("nb_123")
assert result["status"] == "no_research"
class TestImportSourcesEdgeCases:
"""Tests for import_sources() parsing branch edge cases."""
@pytest.mark.asyncio
async def test_import_sources_skips_no_url_sources(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Lines 226, 228: sources without URLs are skipped; if ALL lack URLs, return []."""
# No HTTP call should be made when all sources lack URLs
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.import_sources(
"nb_123",
"task_123",
[{"title": "No URL source"}, {"title": "Also no URL"}],
)
assert result == []
@pytest.mark.asyncio
async def test_import_sources_filters_some_no_url(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 226: sources without URLs are filtered, valid ones are imported."""
# Double-wrap so the unwrap logic peels one layer: result[0][0] is a list
response = build_rpc_response(
RPCMethod.IMPORT_RESEARCH,
[
[
[["src_good"], "Good Source"],
]
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.import_sources(
"nb_123",
"task_123",
[
{"url": "https://good.com", "title": "Good Source"},
{"title": "No URL source"}, # filtered out
],
)
assert len(result) == 1
assert result[0]["id"] == "src_good"
@pytest.mark.asyncio
async def test_import_sources_no_double_nesting(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 257->265: result[0][0] is not a list — no unwrap, loop runs on original result.
The unwrap condition requires result[0][0] to be a list. When result[0][0] is a
non-list value (e.g. None), the if-block is skipped and the for loop runs directly.
"""
# result[0] = [None, "Flat Title"] so result[0][0] = None (not a list) → no unwrap
# The loop then processes each item in the original result directly.
# [None, "Flat Title"] has src_data[0]=None → src_id = None → skipped (covers 270->265)
# So we also include a valid entry to verify the loop ran:
# However, we need result[0][0] to NOT be a list to avoid unwrap.
# A valid entry looks like [["src_id"], "Title"] but result[0][0]=["src_id"] IS a list.
# The only way to avoid unwrap AND get results is if result[0] is a list but
# result[0][0] is not a list. Use result = ["not_a_list_entry", [["src_nw"], "Title"]].
# result[0] = "not_a_list_entry" → isinstance(result[0], list) is False → no unwrap.
response = build_rpc_response(
RPCMethod.IMPORT_RESEARCH,
# result[0] is a string, not a list → isinstance(result[0], list) is False
# condition fails → no unwrap → loop runs on the original result
["string_not_list", [["src_nw"], "No-Wrap Title"]],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.import_sources(
"nb_123",
"task_123",
[{"url": "https://nowrap.example.com", "title": "No-Wrap Title"}],
)
# "string_not_list" is not a list → skipped; [["src_nw"], "No-Wrap Title"] is valid
assert len(result) == 1
assert result[0]["id"] == "src_nw"
assert result[0]["title"] == "No-Wrap Title"
@pytest.mark.asyncio
async def test_import_sources_src_data_too_short_skipped(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 266->265: src_data in result has len < 2 — skipped in loop."""
# First entry is too short (len 1), second is valid
response = build_rpc_response(
RPCMethod.IMPORT_RESEARCH,
[
["short_only"], # len 1 — skipped
[["src_valid"], "Valid"], # len 2 — kept
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.import_sources(
"nb_123",
"task_123",
[{"url": "https://example.com", "title": "Valid"}],
)
assert len(result) == 1
assert result[0]["id"] == "src_valid"
@pytest.mark.asyncio
async def test_import_sources_src_id_none_skipped(
self,
auth_tokens,
httpx_mock: HTTPXMock,
build_rpc_response,
):
"""Line 270->265: src_data[0] is None (not a list) — src_id is None, entry skipped."""
# src_data[0] is None — not a list, so src_id = None → skipped
response = build_rpc_response(
RPCMethod.IMPORT_RESEARCH,
[
[None, "Title with no ID"], # src_data[0] is None → skipped
[["src_real"], "Real Title"], # valid
],
)
httpx_mock.add_response(content=response.encode())
async with NotebookLMClient(auth_tokens) as client:
result = await client.research.import_sources(
"nb_123",
"task_123",
[{"url": "https://example.com", "title": "anything"}],
)
assert len(result) == 1
assert result[0]["id"] == "src_real"

File diff suppressed because it is too large Load diff

View file

@ -893,3 +893,691 @@ class TestRateLimitDetection:
data = json.loads(result.output)
assert data["error"] is True
assert data["code"] == "RATE_LIMITED"
# =============================================================================
# RESOLVE_LANGUAGE DIRECT TESTS
# =============================================================================
class TestResolveLanguageDirect:
"""Direct tests for resolve_language() covering uncovered branches."""
def test_invalid_language_raises_bad_parameter(self):
"""Line 111: language not in SUPPORTED_LANGUAGES raises click.BadParameter."""
import importlib
import click
generate_module = importlib.import_module("notebooklm.cli.generate")
with pytest.raises(click.BadParameter) as exc_info:
generate_module.resolve_language("xx_INVALID")
assert "Unknown language code: xx_INVALID" in str(exc_info.value)
assert "notebooklm language list" in str(exc_info.value)
def test_none_language_with_config_returns_config(self):
"""Line 118: language is None, config_lang is not None → returns config_lang."""
import importlib
generate_module = importlib.import_module("notebooklm.cli.generate")
with patch.object(generate_module, "get_language", return_value="fr"):
result = generate_module.resolve_language(None)
assert result == "fr"
def test_none_language_no_config_returns_default(self):
"""Line 139: language is None and config_lang is None → returns DEFAULT_LANGUAGE."""
import importlib
generate_module = importlib.import_module("notebooklm.cli.generate")
with patch.object(generate_module, "get_language", return_value=None):
result = generate_module.resolve_language(None)
assert result == "en"
# =============================================================================
# _OUTPUT_GENERATION_STATUS DIRECT TESTS
# =============================================================================
class TestOutputGenerationStatusDirect:
"""Direct tests for _output_generation_status() covering uncovered branches."""
def setup_method(self):
import importlib
self.generate_module = importlib.import_module("notebooklm.cli.generate")
def _make_status(
self, *, is_complete=False, is_failed=False, task_id=None, url=None, error=None
):
status = MagicMock()
status.is_complete = is_complete
status.is_failed = is_failed
status.task_id = task_id
status.url = url
status.error = error
return status
def test_json_completed_with_url(self):
"""Lines 200-201, 243: JSON output for completed status with URL."""
status = self._make_status(
is_complete=True, task_id="task_123", url="https://example.com/audio.mp3"
)
with patch.object(self.generate_module, "json_output_response") as mock_json:
self.generate_module._output_generation_status(status, "audio", json_output=True)
mock_json.assert_called_once_with(
{"task_id": "task_123", "status": "completed", "url": "https://example.com/audio.mp3"}
)
def test_json_failed(self):
"""Line 251: JSON output for failed status."""
status = self._make_status(is_failed=True, error="Something went wrong")
with patch.object(self.generate_module, "json_error_response") as mock_err:
self.generate_module._output_generation_status(status, "audio", json_output=True)
mock_err.assert_called_once_with("GENERATION_FAILED", "Something went wrong")
def test_json_failed_no_error_message(self):
"""Line 251: JSON failed output falls back to default message when error is None."""
status = self._make_status(is_failed=True, error=None)
with patch.object(self.generate_module, "json_error_response") as mock_err:
self.generate_module._output_generation_status(status, "audio", json_output=True)
mock_err.assert_called_once_with("GENERATION_FAILED", "Audio generation failed")
def test_json_pending_with_task_id(self):
"""Lines 205-207, 257: JSON output for pending status extracts task_id from list."""
# Use a list result (lines 205-207: list path in handle_generation_result)
# and pending path in _output_generation_status (lines 255-257)
status = MagicMock()
status.is_complete = False
status.is_failed = False
status.task_id = "task_456"
with patch.object(self.generate_module, "json_output_response") as mock_json:
self.generate_module._output_generation_status(status, "audio", json_output=True)
mock_json.assert_called_once_with({"task_id": "task_456", "status": "pending"})
def test_text_completed_with_url(self):
"""Line 262: Text output for completed status with URL."""
status = self._make_status(
is_complete=True, task_id="task_123", url="https://example.com/audio.mp3"
)
with patch.object(self.generate_module, "console") as mock_console:
self.generate_module._output_generation_status(status, "audio", json_output=False)
mock_console.print.assert_called_once_with(
"[green]Audio ready:[/green] https://example.com/audio.mp3"
)
def test_text_completed_without_url(self):
"""Line 264: Text output for completed status without URL."""
status = self._make_status(is_complete=True, task_id="task_123", url=None)
with patch.object(self.generate_module, "console") as mock_console:
self.generate_module._output_generation_status(status, "audio", json_output=False)
mock_console.print.assert_called_once_with("[green]Audio ready[/green]")
def test_text_failed(self):
"""Line 266: Text output for failed status."""
status = self._make_status(is_failed=True, error="Transcription error")
with patch.object(self.generate_module, "console") as mock_console:
self.generate_module._output_generation_status(status, "audio", json_output=False)
mock_console.print.assert_called_once_with("[red]Failed:[/red] Transcription error")
def test_text_pending_with_task_id(self):
"""Line 268: Text output for pending status shows task_id."""
status = self._make_status(task_id="task_789")
with patch.object(self.generate_module, "console") as mock_console:
self.generate_module._output_generation_status(status, "audio", json_output=False)
mock_console.print.assert_called_once_with("[yellow]Started:[/yellow] task_789")
def test_text_pending_without_task_id_shows_status(self):
"""Line 268: Text output for pending status shows status object when no task_id."""
status = MagicMock()
status.is_complete = False
status.is_failed = False
# Make _extract_task_id return None by having no task_id attr and not a dict/list
del status.task_id
with (
patch.object(self.generate_module, "_extract_task_id", return_value=None),
patch.object(self.generate_module, "console") as mock_console,
):
self.generate_module._output_generation_status(status, "audio", json_output=False)
mock_console.print.assert_called_once()
call_args = mock_console.print.call_args[0][0]
assert "[yellow]Started:[/yellow]" in call_args
class TestExtractTaskIdDirect:
"""Direct tests for _extract_task_id() covering list path."""
def setup_method(self):
import importlib
self.generate_module = importlib.import_module("notebooklm.cli.generate")
def test_extract_from_list_first_string(self):
"""Lines 231-232: list where first element is a string."""
result = self.generate_module._extract_task_id(["task_abc", "other"])
assert result == "task_abc"
def test_extract_from_list_first_not_string(self):
"""Line 233: list where first element is not a string → returns None."""
result = self.generate_module._extract_task_id([123, "other"])
assert result is None
def test_extract_from_empty_list(self):
"""Line 233: empty list → returns None."""
result = self.generate_module._extract_task_id([])
assert result is None
def test_extract_from_dict_task_id(self):
"""Line 228: dict with task_id key."""
result = self.generate_module._extract_task_id({"task_id": "t1", "status": "pending"})
assert result == "t1"
def test_extract_from_dict_artifact_id(self):
"""Line 228: dict with artifact_id key (no task_id)."""
result = self.generate_module._extract_task_id({"artifact_id": "a1"})
assert result == "a1"
def test_extract_from_object_with_task_id(self):
"""Line 228: object with task_id attribute."""
status = MagicMock()
status.task_id = "task_obj"
result = self.generate_module._extract_task_id(status)
assert result == "task_obj"
# =============================================================================
# _OUTPUT_MIND_MAP_RESULT DIRECT TESTS
# =============================================================================
class TestOutputMindMapResultDirect:
"""Direct tests for _output_mind_map_result() covering uncovered branches."""
def setup_method(self):
import importlib
self.generate_module = importlib.import_module("notebooklm.cli.generate")
def test_falsy_result_json_calls_error(self):
"""Lines 624-626: falsy result with json_output → json_error_response."""
with patch.object(self.generate_module, "json_error_response") as mock_err:
self.generate_module._output_mind_map_result(None, json_output=True)
mock_err.assert_called_once_with("GENERATION_FAILED", "Mind map generation failed")
def test_falsy_result_no_json_prints_message(self):
"""Lines 627-628: falsy result without json_output → console.print yellow."""
with patch.object(self.generate_module, "console") as mock_console:
self.generate_module._output_mind_map_result(None, json_output=False)
mock_console.print.assert_called_with("[yellow]No result[/yellow]")
def test_truthy_result_json_calls_output(self):
"""Line 631: truthy result with json_output → json_output_response."""
result_data = {"note_id": "n1", "mind_map": {"name": "Root", "children": []}}
with patch.object(self.generate_module, "json_output_response") as mock_json:
self.generate_module._output_mind_map_result(result_data, json_output=True)
mock_json.assert_called_once_with(result_data)
def test_truthy_result_dict_text_output(self):
"""Lines 633-635: truthy result dict with text output prints note_id and children count."""
result_data = {
"note_id": "n1",
"mind_map": {"name": "Root", "children": [{"label": "Child1"}, {"label": "Child2"}]},
}
with patch.object(self.generate_module, "console") as mock_console:
self.generate_module._output_mind_map_result(result_data, json_output=False)
printed_args = [call[0][0] for call in mock_console.print.call_args_list]
assert any("n1" in arg for arg in printed_args)
assert any("Root" in arg for arg in printed_args)
assert any("2" in arg for arg in printed_args)
def test_truthy_result_non_dict_text_output(self):
"""Non-dict truthy result with text output → console.print(result)."""
result_data = "some-string-result"
with patch.object(self.generate_module, "console") as mock_console:
self.generate_module._output_mind_map_result(result_data, json_output=False)
# Should print the result directly
printed_args = [call[0][0] for call in mock_console.print.call_args_list]
assert any("some-string-result" in str(arg) for arg in printed_args)
# =============================================================================
# GENERATE REVISE-SLIDE CLI TESTS
# =============================================================================
class TestGenerateReviseSlide:
"""Tests for the 'generate revise-slide' CLI command (lines 971-989)."""
def test_revise_slide_basic(self, runner, mock_auth):
"""Lines 971-975: revise-slide command invokes client.artifacts.revise_slide."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.revise_slide = AsyncMock(
return_value={"artifact_id": "art_rev_1", "status": "processing"}
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
[
"generate",
"revise-slide",
"Make the title bigger",
"--artifact",
"art_1",
"--slide",
"0",
"-n",
"nb_123",
],
)
assert result.exit_code == 0
mock_client.artifacts.revise_slide.assert_called_once()
def test_revise_slide_passes_correct_args(self, runner, mock_auth):
"""Lines 985-989: verify artifact_id, slide_index, and prompt are forwarded."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.revise_slide = AsyncMock(
return_value={"artifact_id": "art_rev_2", "status": "processing"}
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
[
"generate",
"revise-slide",
"Remove taxonomy",
"--artifact",
"art_1",
"--slide",
"3",
"-n",
"nb_123",
],
)
assert result.exit_code == 0
call_kwargs = mock_client.artifacts.revise_slide.call_args
assert call_kwargs is not None, "revise_slide was not called"
assert call_kwargs.kwargs.get("artifact_id") == "art_1"
assert call_kwargs.kwargs.get("slide_index") == 3
assert call_kwargs.kwargs.get("prompt") == "Remove taxonomy"
def test_revise_slide_missing_artifact_fails(self, runner, mock_auth):
"""revise-slide requires --artifact option."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
[
"generate",
"revise-slide",
"Make bigger",
"--slide",
"0",
"-n",
"nb_123",
],
)
assert result.exit_code != 0
def test_revise_slide_missing_slide_fails(self, runner, mock_auth):
"""revise-slide requires --slide option."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
[
"generate",
"revise-slide",
"Make bigger",
"--artifact",
"art_1",
"-n",
"nb_123",
],
)
assert result.exit_code != 0
def test_revise_slide_json_output(self, runner, mock_auth):
"""revise-slide with --json flag produces JSON output."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.revise_slide = AsyncMock(
return_value={"artifact_id": "art_rev_3", "status": "processing"}
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
[
"generate",
"revise-slide",
"Bold the title",
"--artifact",
"art_1",
"--slide",
"1",
"-n",
"nb_123",
"--json",
],
)
assert result.exit_code == 0
data = json.loads(result.output)
assert "task_id" in data or "artifact_id" in data or "status" in data
# =============================================================================
# GENERATE REPORT WITH DESCRIPTION (LINE 1057)
# =============================================================================
class TestGenerateReportWithNonBriefingFormat:
"""Test generate report when description is provided with non-briefing-doc format.
Line 1057: the else-branch that sets custom_prompt = description when
report_format != 'briefing-doc' and description is provided.
"""
def test_report_description_with_study_guide_format(self, runner, mock_auth):
"""Line 1057: description + non-default format → custom_prompt = description."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_report = AsyncMock(
return_value={"artifact_id": "report_xyz", "status": "processing"}
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
[
"generate",
"report",
"Focus on beginners",
"--format",
"study-guide",
"-n",
"nb_123",
],
)
assert result.exit_code == 0
mock_client.artifacts.generate_report.assert_called_once()
call_kwargs = mock_client.artifacts.generate_report.call_args.kwargs
# custom_prompt should be the description argument
assert call_kwargs.get("custom_prompt") == "Focus on beginners"
def test_report_description_with_blog_post_format(self, runner, mock_auth):
"""Line 1057: description + blog-post format → custom_prompt set."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_report = AsyncMock(
return_value={"artifact_id": "report_abc", "status": "processing"}
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
[
"generate",
"report",
"Write in casual tone",
"--format",
"blog-post",
"-n",
"nb_123",
],
)
assert result.exit_code == 0
mock_client.artifacts.generate_report.assert_called_once()
call_kwargs = mock_client.artifacts.generate_report.call_args.kwargs
assert call_kwargs.get("custom_prompt") == "Write in casual tone"
# =============================================================================
# HANDLE_GENERATION_RESULT PATHS (GenerationStatus and list result formats)
# =============================================================================
class TestHandleGenerationResultPaths:
"""Test handle_generation_result branches: GenerationStatus input and list input."""
def test_generation_result_with_generation_status_object(self, runner, mock_auth):
"""Lines 200-201: result is a GenerationStatus → task_id = result.task_id."""
from notebooklm.types import GenerationStatus
status = GenerationStatus(
task_id="task_gen_1", status="pending", error=None, error_code=None
)
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_audio = AsyncMock(return_value=status)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["generate", "audio", "-n", "nb_123"])
assert result.exit_code == 0
assert "task_gen_1" in result.output or "Started" in result.output
def test_generation_result_with_list_input(self, runner, mock_auth):
"""Lines 205-207: result is a list → task_id from first element."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_audio = AsyncMock(return_value=["task_list_1", "extra"])
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["generate", "audio", "-n", "nb_123"])
assert result.exit_code == 0
assert "task_list_1" in result.output or "Started" in result.output
def test_generation_result_falsy_shows_failed_message(self, runner, mock_auth):
"""Line 173: falsy result → text error message."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_audio = AsyncMock(return_value=None)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["generate", "audio", "-n", "nb_123"])
assert result.exit_code == 0
assert "generation failed" in result.output.lower()
def test_generation_result_falsy_json_shows_error(self, runner, mock_auth):
"""Line 173: falsy result with --json → json_error_response (exits with code 1)."""
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_audio = AsyncMock(return_value=None)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["generate", "audio", "-n", "nb_123", "--json"])
# json_error_response calls sys.exit(1), so exit_code is 1
data = json.loads(result.output)
assert data["error"] is True
assert data["code"] == "GENERATION_FAILED"
def test_generation_with_wait_and_generation_status(self, runner, mock_auth):
"""Line 213: wait=True with GenerationStatus triggers wait_for_completion."""
from notebooklm.types import GenerationStatus
initial_status = GenerationStatus(
task_id="task_wait_1", status="pending", error=None, error_code=None
)
completed_status = GenerationStatus(
task_id="task_wait_1",
status="completed",
error=None,
error_code=None,
url="https://example.com/result.mp3",
)
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_audio = AsyncMock(return_value=initial_status)
mock_client.artifacts.wait_for_completion = AsyncMock(return_value=completed_status)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["generate", "audio", "-n", "nb_123", "--wait"])
assert result.exit_code == 0
mock_client.artifacts.wait_for_completion.assert_called_once()
# =============================================================================
# ADDITIONAL TARGETED COVERAGE TESTS
# =============================================================================
class TestGenerateWithRetryConsoleOutput:
"""Test generate_with_retry console output branch (line 111)."""
@pytest.mark.asyncio
async def test_retry_shows_console_message_when_not_json(self):
"""Line 111: console.print shown during retry when json_output=False."""
import importlib
from notebooklm.types import GenerationStatus
generate_module = importlib.import_module("notebooklm.cli.generate")
rate_limited = GenerationStatus(
task_id="", status="failed", error="Rate limited", error_code="USER_DISPLAYABLE_ERROR"
)
success_result = GenerationStatus(
task_id="task_123", status="pending", error=None, error_code=None
)
generate_fn = AsyncMock(side_effect=[rate_limited, success_result])
with (
patch.object(generate_module, "console") as mock_console,
patch("asyncio.sleep", new_callable=AsyncMock),
):
result = await generate_module.generate_with_retry(
generate_fn, max_retries=1, artifact_type="audio", json_output=False
)
assert result == success_result
# Console should have been called with the retry message
mock_console.print.assert_called_once()
call_text = mock_console.print.call_args[0][0]
assert "rate limited" in call_text.lower() or "Retrying" in call_text
class TestHandleGenerationResultListPathAndWait:
"""Test handle_generation_result: list path and wait with console message."""
def test_wait_with_task_id_shows_generating_message(self, runner, mock_auth):
"""Line 211->213: wait=True, task_id present, not json → console.print generating."""
from notebooklm.types import GenerationStatus
initial_status = GenerationStatus(
task_id="task_console_1", status="pending", error=None, error_code=None
)
completed_status = GenerationStatus(
task_id="task_console_1",
status="completed",
error=None,
error_code=None,
url="https://example.com/audio.mp3",
)
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_audio = AsyncMock(return_value=initial_status)
mock_client.artifacts.wait_for_completion = AsyncMock(return_value=completed_status)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["generate", "audio", "-n", "nb_123", "--wait"])
assert result.exit_code == 0
# The console message "Generating audio... Task: task_console_1" should appear
assert "task_console_1" in result.output or "Generating" in result.output
mock_client.artifacts.wait_for_completion.assert_called_once()
def test_list_result_extracts_task_id_for_wait(self, runner, mock_auth):
"""Lines 205->210, 213: list result + wait=True → task_id from list[0]."""
from notebooklm.types import GenerationStatus
completed_status = GenerationStatus(
task_id="task_list_wait",
status="completed",
error=None,
error_code=None,
url="https://example.com/audio.mp3",
)
with patch_client_for_module("generate") as mock_client_cls:
mock_client = create_mock_client()
mock_client.artifacts.generate_audio = AsyncMock(
return_value=["task_list_wait", "extra"]
)
mock_client.artifacts.wait_for_completion = AsyncMock(return_value=completed_status)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["generate", "audio", "-n", "nb_123", "--wait"])
assert result.exit_code == 0
mock_client.artifacts.wait_for_completion.assert_called_once()
class TestOutputMindMapNonDictMindMap:
"""Test _output_mind_map_result when mind_map value is not a dict (line 985->else)."""
def setup_method(self):
import importlib
self.generate_module = importlib.import_module("notebooklm.cli.generate")
def test_mind_map_non_dict_value_prints_directly(self):
"""Line 985->else (988-989): mind_map is not a dict → console.print(result)."""
result_data = {
"note_id": "n1",
"mind_map": ["node1", "node2"], # list, not dict → else branch
}
with patch.object(self.generate_module, "console") as mock_console:
self.generate_module._output_mind_map_result(result_data, json_output=False)
printed_calls = [call[0][0] for call in mock_console.print.call_args_list]
# Should print the header and Note ID, then the raw result
assert any("n1" in str(arg) for arg in printed_calls)

View file

@ -2,7 +2,7 @@
import importlib
import json
from unittest.mock import patch
from unittest.mock import MagicMock, patch
import pytest
from click.testing import CliRunner
@ -180,3 +180,233 @@ class TestGenerateUsesConfigLanguage:
assert result.exit_code == 0
assert "--language" in result.output
assert "from config" in result.output.lower() or "default" in result.output.lower()
# =============================================================================
# GET_CONFIG ERROR PATHS (lines 116-121)
# =============================================================================
class TestGetConfigErrorPaths:
def test_get_config_json_decode_error(self, tmp_path):
"""Test get_config() returns {} when config file has invalid JSON."""
config_file = tmp_path / "config.json"
config_file.write_text("this is not valid json{{{")
with patch.object(language_module, "get_config_path", return_value=config_file):
result = language_module.get_config()
assert result == {}
def test_get_config_oserror(self, tmp_path):
"""Test get_config() returns {} when config file can't be read (OSError)."""
config_file = tmp_path / "config.json"
# Create the file so exists() returns True, then mock read_text to raise OSError
config_file.write_text('{"language": "en"}')
with (
patch.object(language_module, "get_config_path", return_value=config_file),
patch.object(
config_file.__class__, "read_text", side_effect=OSError("permission denied")
),
):
result = language_module.get_config()
assert result == {}
# =============================================================================
# _SYNC_LANGUAGE_TO_SERVER AND _GET_LANGUAGE_FROM_SERVER (lines 162-164, 176-186)
# =============================================================================
class TestSyncLanguageToServer:
def test_sync_language_to_server_success(self):
"""Test _sync_language_to_server returns run_async result on success."""
mock_ctx = MagicMock()
mock_ctx.obj = {"auth": {"SID": "test", "HSID": "test", "SSID": "test"}}
with (
patch.object(language_module, "get_auth_tokens", return_value={"SID": "test"}),
patch.object(language_module, "run_async", return_value="en") as mock_run,
):
result = language_module._sync_language_to_server("en", mock_ctx)
assert result == "en"
mock_run.assert_called_once()
def test_sync_language_to_server_exception_returns_none(self):
"""Test _sync_language_to_server returns None when exception occurs."""
mock_ctx = MagicMock()
mock_ctx.obj = {}
with patch.object(language_module, "get_auth_tokens", side_effect=Exception("no auth")):
result = language_module._sync_language_to_server("en", mock_ctx)
assert result is None
def test_sync_language_to_server_run_async_exception(self):
"""Test _sync_language_to_server returns None when run_async raises."""
mock_ctx = MagicMock()
mock_ctx.obj = {}
with (
patch.object(language_module, "get_auth_tokens", return_value={"SID": "test"}),
patch.object(language_module, "run_async", side_effect=Exception("connection error")),
):
result = language_module._sync_language_to_server("en", mock_ctx)
assert result is None
class TestGetLanguageFromServer:
def test_get_language_from_server_success(self):
"""Test _get_language_from_server returns the server language on success."""
mock_ctx = MagicMock()
mock_ctx.obj = {"auth": {"SID": "test"}}
with (
patch.object(language_module, "get_auth_tokens", return_value={"SID": "test"}),
patch.object(language_module, "run_async", return_value="fr") as mock_run,
):
result = language_module._get_language_from_server(mock_ctx)
assert result == "fr"
mock_run.assert_called_once()
def test_get_language_from_server_exception_returns_none(self):
"""Test _get_language_from_server returns None when exception occurs."""
mock_ctx = MagicMock()
mock_ctx.obj = {}
with patch.object(language_module, "get_auth_tokens", side_effect=Exception("no auth")):
result = language_module._get_language_from_server(mock_ctx)
assert result is None
def test_get_language_from_server_run_async_exception(self):
"""Test _get_language_from_server returns None when run_async raises."""
mock_ctx = MagicMock()
mock_ctx.obj = {}
with (
patch.object(language_module, "get_auth_tokens", return_value={"SID": "test"}),
patch.object(language_module, "run_async", side_effect=Exception("rpc error")),
):
result = language_module._get_language_from_server(mock_ctx)
assert result is None
# =============================================================================
# LANGUAGE GET SERVER SYNC PATHS (lines 244-250, 270)
# =============================================================================
class TestLanguageGetServerSyncPaths:
def test_language_get_server_has_different_value_updates_local(self, runner, mock_config_file):
"""Test 'language get' updates local config when server has a different value."""
# Local is "en", server returns "fr" → local should be updated to "fr"
mock_config_file.write_text(json.dumps({"language": "en"}))
with patch.object(language_module, "_get_language_from_server", return_value="fr"):
result = runner.invoke(cli, ["language", "get"])
assert result.exit_code == 0
# Local config should be updated to "fr"
config = json.loads(mock_config_file.read_text())
assert config["language"] == "fr"
# Output should show "fr" (the server value)
assert "fr" in result.output
def test_language_get_server_different_shows_synced(self, runner, mock_config_file):
"""Test 'language get' shows synced message when server differs from local."""
mock_config_file.write_text(json.dumps({"language": "en"}))
with patch.object(language_module, "_get_language_from_server", return_value="ja"):
result = runner.invoke(cli, ["language", "get"])
assert result.exit_code == 0
assert "synced" in result.output.lower()
def test_language_get_server_same_value_no_update(self, runner, mock_config_file):
"""Test 'language get' does not update local when server value matches."""
mock_config_file.write_text(json.dumps({"language": "en"}))
with (
patch.object(language_module, "_get_language_from_server", return_value="en"),
patch.object(language_module, "set_language") as mock_set,
):
result = runner.invoke(cli, ["language", "get"])
assert result.exit_code == 0
mock_set.assert_not_called()
def test_language_get_no_language_shows_not_set(self, runner, mock_config_file):
"""Test 'language get' shows 'not set' when no language is configured and server returns None."""
# No language configured locally
with patch.object(language_module, "_get_language_from_server", return_value=None):
result = runner.invoke(cli, ["language", "get"])
assert result.exit_code == 0
assert "not set" in result.output
def test_language_get_server_sync_json_output(self, runner, mock_config_file):
"""Test 'language get --json' reflects synced_from_server when values differ."""
mock_config_file.write_text(json.dumps({"language": "en"}))
with patch.object(language_module, "_get_language_from_server", return_value="de"):
result = runner.invoke(cli, ["language", "get", "--json"])
assert result.exit_code == 0
data = json.loads(result.output)
assert data["language"] == "de"
assert data["synced_from_server"] is True
# =============================================================================
# LANGUAGE SET SYNC FAILED AND JSON PATHS (lines 316-320, 335-336)
# =============================================================================
class TestLanguageSetSyncFailedAndJsonPaths:
def test_language_set_sync_failed_shows_local_only_message(self, runner, mock_config_file):
"""Test 'language set' shows local-only message when server sync fails."""
with patch.object(language_module, "_sync_language_to_server", return_value=None):
result = runner.invoke(cli, ["language", "set", "en"])
assert result.exit_code == 0
assert "saved locally" in result.output or "server sync failed" in result.output
def test_language_set_sync_success_shows_synced_message(self, runner, mock_config_file):
"""Test 'language set' shows synced message when server sync succeeds."""
with patch.object(language_module, "_sync_language_to_server", return_value="en"):
result = runner.invoke(cli, ["language", "set", "en"])
assert result.exit_code == 0
assert "synced" in result.output.lower()
# Should NOT show "server sync failed"
assert "server sync failed" not in result.output
def test_language_set_json_output_with_server_sync(self, runner, mock_config_file):
"""Test 'language set --json' includes synced_to_server field."""
with patch.object(language_module, "_sync_language_to_server", return_value="fr"):
result = runner.invoke(cli, ["language", "set", "fr", "--json"])
assert result.exit_code == 0
data = json.loads(result.output)
assert data["language"] == "fr"
assert data["name"] == "Français"
assert "synced_to_server" in data
assert data["synced_to_server"] is True
def test_language_set_json_output_sync_failed(self, runner, mock_config_file):
"""Test 'language set --json' shows synced_to_server=False when sync fails."""
with patch.object(language_module, "_sync_language_to_server", return_value=None):
result = runner.invoke(cli, ["language", "set", "ko", "--json"])
assert result.exit_code == 0
data = json.loads(result.output)
assert data["language"] == "ko"
assert "synced_to_server" in data
assert data["synced_to_server"] is False

View file

@ -8,7 +8,13 @@ import pytest
from click.testing import CliRunner
from notebooklm.notebooklm_cli import cli
from notebooklm.types import Source
from notebooklm.types import (
Source,
SourceFulltext,
SourceNotFoundError,
SourceProcessingError,
SourceTimeoutError,
)
from .conftest import create_mock_client, patch_client_for_module
@ -630,3 +636,428 @@ class TestSourceCommandsExist:
assert result.exit_code == 0
assert "SOURCE_ID" in result.output
assert "exit code" in result.output.lower()
# =============================================================================
# SOURCE ADD AUTO-DETECT TESTS
# =============================================================================
class TestSourceAddAutoDetect:
def test_source_add_autodetect_file(self, runner, mock_auth, tmp_path):
"""Pass a real file path without --type; should auto-detect as 'file'."""
test_file = tmp_path / "notes.txt"
test_file.write_text("Some file content")
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.add_file = AsyncMock(
return_value=Source(id="src_file", title="notes.txt")
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
["source", "add", str(test_file), "-n", "nb_123"],
)
assert result.exit_code == 0
mock_client.sources.add_file.assert_called_once()
def test_source_add_autodetect_plain_text(self, runner, mock_auth):
"""Pass plain text (not URL, not existing path) without --type.
Should auto-detect as 'text' with default title 'Pasted Text'.
"""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.add_text = AsyncMock(
return_value=Source(id="src_text", title="Pasted Text")
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
["source", "add", "This is just some plain text content", "-n", "nb_123"],
)
assert result.exit_code == 0
# Verify add_text was called with the default "Pasted Text" title
mock_client.sources.add_text.assert_called_once()
call_args = mock_client.sources.add_text.call_args
assert call_args[0][1] == "Pasted Text" # title arg
def test_source_add_autodetect_text_with_custom_title(self, runner, mock_auth):
"""Pass plain text without --type but with --title.
Title should be the custom title, not 'Pasted Text'.
"""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.add_text = AsyncMock(
return_value=Source(id="src_text", title="Custom Title")
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
[
"source",
"add",
"This is just some plain text content",
"--title",
"Custom Title",
"-n",
"nb_123",
],
)
assert result.exit_code == 0
mock_client.sources.add_text.assert_called_once()
call_args = mock_client.sources.add_text.call_args
assert call_args[0][1] == "Custom Title" # title arg
# =============================================================================
# SOURCE FULLTEXT TESTS
# =============================================================================
class TestSourceFulltext:
def test_source_fulltext_console_output(self, runner, mock_auth):
"""Short content (<= 2000 chars) is displayed in full."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.get_fulltext = AsyncMock(
return_value=SourceFulltext(
source_id="src_123",
title="Test Source",
content="This is the full text content.",
char_count=30,
url=None,
)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "fulltext", "src_123", "-n", "nb_123"])
assert result.exit_code == 0
assert "src_123" in result.output
assert "Test Source" in result.output
assert "This is the full text content." in result.output
# Should NOT show truncation message for short content
assert "more chars" not in result.output
def test_source_fulltext_truncated_output(self, runner, mock_auth):
"""Long content (> 2000 chars) is truncated with a 'more chars' message."""
long_content = "A" * 3000
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.get_fulltext = AsyncMock(
return_value=SourceFulltext(
source_id="src_123",
title="Long Source",
content=long_content,
char_count=3000,
url=None,
)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "fulltext", "src_123", "-n", "nb_123"])
assert result.exit_code == 0
assert "more chars" in result.output
def test_source_fulltext_save_to_file(self, runner, mock_auth, tmp_path):
"""-o flag saves content to file."""
output_file = tmp_path / "output.txt"
content = "Full text content to save."
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.get_fulltext = AsyncMock(
return_value=SourceFulltext(
source_id="src_123",
title="Test Source",
content=content,
char_count=len(content),
url=None,
)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli,
["source", "fulltext", "src_123", "-n", "nb_123", "-o", str(output_file)],
)
assert result.exit_code == 0
assert "Saved" in result.output
assert output_file.read_text(encoding="utf-8") == content
def test_source_fulltext_json_output(self, runner, mock_auth):
"""--json outputs JSON with fulltext fields."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.get_fulltext = AsyncMock(
return_value=SourceFulltext(
source_id="src_123",
title="Test Source",
content="Some content",
char_count=12,
url=None,
)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(
cli, ["source", "fulltext", "src_123", "-n", "nb_123", "--json"]
)
assert result.exit_code == 0
data = json.loads(result.output)
assert data["source_id"] == "src_123"
assert data["title"] == "Test Source"
assert data["content"] == "Some content"
assert data["char_count"] == 12
def test_source_fulltext_with_url(self, runner, mock_auth):
"""Shows URL field when present in fulltext."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Web Source")]
)
mock_client.sources.get_fulltext = AsyncMock(
return_value=SourceFulltext(
source_id="src_123",
title="Web Source",
content="Web page content.",
char_count=17,
url="https://example.com/page",
)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "fulltext", "src_123", "-n", "nb_123"])
assert result.exit_code == 0
assert "https://example.com/page" in result.output
# =============================================================================
# SOURCE WAIT TESTS
# =============================================================================
class TestSourceWait:
def test_source_wait_success(self, runner, mock_auth):
"""wait_until_ready returns a Source → prints 'ready'."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.wait_until_ready = AsyncMock(
return_value=Source(id="src_123", title="Test Source", status=2)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123"])
assert result.exit_code == 0
assert "ready" in result.output.lower()
def test_source_wait_success_with_title(self, runner, mock_auth):
"""Source has a title → prints the title after 'ready' message."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="My Source Title")]
)
mock_client.sources.wait_until_ready = AsyncMock(
return_value=Source(id="src_123", title="My Source Title", status=2)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123"])
assert result.exit_code == 0
assert "My Source Title" in result.output
def test_source_wait_success_json(self, runner, mock_auth):
"""--json output on successful wait."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.wait_until_ready = AsyncMock(
return_value=Source(id="src_123", title="Test Source", status=2)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123", "--json"])
assert result.exit_code == 0
data = json.loads(result.output)
assert data["source_id"] == "src_123"
assert data["status"] == "ready"
def test_source_wait_not_found(self, runner, mock_auth):
"""Raises SourceNotFoundError → exit code 1."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.wait_until_ready = AsyncMock(
side_effect=SourceNotFoundError("src_123")
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123"])
assert result.exit_code == 1
assert "not found" in result.output.lower()
def test_source_wait_not_found_json(self, runner, mock_auth):
"""--json on SourceNotFoundError → JSON with status 'not_found', exit 1."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.wait_until_ready = AsyncMock(
side_effect=SourceNotFoundError("src_123")
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123", "--json"])
assert result.exit_code == 1
data = json.loads(result.output)
assert data["status"] == "not_found"
assert data["source_id"] == "src_123"
def test_source_wait_processing_error(self, runner, mock_auth):
"""Raises SourceProcessingError → exit code 1."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.wait_until_ready = AsyncMock(
side_effect=SourceProcessingError("src_123", status=3)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123"])
assert result.exit_code == 1
assert "processing failed" in result.output.lower()
def test_source_wait_processing_error_json(self, runner, mock_auth):
"""--json on SourceProcessingError → JSON with status 'error', exit 1."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.wait_until_ready = AsyncMock(
side_effect=SourceProcessingError("src_123", status=3)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123", "--json"])
assert result.exit_code == 1
data = json.loads(result.output)
assert data["status"] == "error"
assert data["source_id"] == "src_123"
assert data["status_code"] == 3
def test_source_wait_timeout(self, runner, mock_auth):
"""Raises SourceTimeoutError → exit code 2."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.wait_until_ready = AsyncMock(
side_effect=SourceTimeoutError("src_123", timeout=30.0, last_status=1)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123"])
assert result.exit_code == 2
assert "timeout" in result.output.lower()
def test_source_wait_timeout_json(self, runner, mock_auth):
"""--json on SourceTimeoutError → JSON with status 'timeout', exit 2."""
with patch_client_for_module("source") as mock_client_cls:
mock_client = create_mock_client()
mock_client.sources.list = AsyncMock(
return_value=[Source(id="src_123", title="Test Source")]
)
mock_client.sources.wait_until_ready = AsyncMock(
side_effect=SourceTimeoutError("src_123", timeout=30.0, last_status=1)
)
mock_client_cls.return_value = mock_client
with patch("notebooklm.cli.helpers.fetch_tokens", new_callable=AsyncMock) as mock_fetch:
mock_fetch.return_value = ("csrf", "session")
result = runner.invoke(cli, ["source", "wait", "src_123", "-n", "nb_123", "--json"])
assert result.exit_code == 2
data = json.loads(result.output)
assert data["status"] == "timeout"
assert data["source_id"] == "src_123"
assert data["timeout_seconds"] == 30
assert data["last_status_code"] == 1