Skip to content

Commit bb41c54

Browse files
committed
chore: format
1 parent a93c2bc commit bb41c54

File tree

7 files changed

+225
-124
lines changed

7 files changed

+225
-124
lines changed

extensions/cli/src/commands/chat.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,10 @@ async function handleAutoCompaction(
174174
// Custom callbacks for headless mode console output
175175
const callbacks = {
176176
onSystemMessage: (message: string) => {
177-
if (message.includes("Auto-compacting") || message.includes("Approaching")) {
177+
if (
178+
message.includes("Auto-compacting") ||
179+
message.includes("Approaching")
180+
) {
178181
if (!isHeadless) {
179182
console.info(chalk.yellow(`\n${message}`));
180183
} else if (format === "json") {

extensions/cli/src/compaction.infiniteLoop.test.ts

Lines changed: 37 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -24,25 +24,29 @@ describe("compaction infinite loop prevention", () => {
2424
defaultCompletionOptions: {
2525
maxTokens: 1000,
2626
contextLength: 4000,
27-
}
27+
},
2828
} as ModelConfig;
2929

3030
const mockLlmApi = {} as BaseLlmApi;
3131

3232
it("should not loop infinitely when pruning doesn't reduce history size", async () => {
33-
const { countChatHistoryTokens, getModelContextLimit } = await import("./util/tokenizer.js");
33+
const { countChatHistoryTokens, getModelContextLimit } = await import(
34+
"./util/tokenizer.js"
35+
);
3436
const mockStreamResponse = vi.mocked(streamChatResponse);
3537
const mockCountTokens = vi.mocked(countChatHistoryTokens);
3638
const mockGetContextLimit = vi.mocked(getModelContextLimit);
3739

3840
// Setup mocks
3941
mockGetContextLimit.mockReturnValue(4000);
4042
mockCountTokens.mockReturnValue(5000); // Always too big
41-
mockStreamResponse.mockImplementation(async (history, model, api, controller, callbacks) => {
42-
callbacks?.onContent?.("Summary");
43-
callbacks?.onContentComplete?.();
44-
return "Summary";
45-
});
43+
mockStreamResponse.mockImplementation(
44+
async (history, model, api, controller, callbacks) => {
45+
callbacks?.onContent?.("Summary");
46+
callbacks?.onContentComplete?.();
47+
return "Summary";
48+
},
49+
);
4650

4751
// History that can't be pruned further (only system message)
4852
const history: ChatCompletionMessageParam[] = [
@@ -51,26 +55,30 @@ describe("compaction infinite loop prevention", () => {
5155

5256
// This should not hang - it should break out of the loop
5357
const result = await compactChatHistory(history, mockModel, mockLlmApi);
54-
58+
5559
// Should complete successfully even though token count is still too high
5660
expect(result.compactedHistory).toBeDefined();
5761
expect(mockCountTokens).toHaveBeenCalled();
5862
});
5963

6064
it("should not loop infinitely with history ending in assistant message", async () => {
61-
const { countChatHistoryTokens, getModelContextLimit } = await import("./util/tokenizer.js");
65+
const { countChatHistoryTokens, getModelContextLimit } = await import(
66+
"./util/tokenizer.js"
67+
);
6268
const mockStreamResponse = vi.mocked(streamChatResponse);
6369
const mockCountTokens = vi.mocked(countChatHistoryTokens);
6470
const mockGetContextLimit = vi.mocked(getModelContextLimit);
6571

6672
// Setup mocks
6773
mockGetContextLimit.mockReturnValue(4000);
6874
mockCountTokens.mockReturnValue(5000); // Always too big
69-
mockStreamResponse.mockImplementation(async (history, model, api, controller, callbacks) => {
70-
callbacks?.onContent?.("Summary");
71-
callbacks?.onContentComplete?.();
72-
return "Summary";
73-
});
75+
mockStreamResponse.mockImplementation(
76+
async (history, model, api, controller, callbacks) => {
77+
callbacks?.onContent?.("Summary");
78+
callbacks?.onContentComplete?.();
79+
return "Summary";
80+
},
81+
);
7482

7583
// History that ends with assistant - pruning won't change it
7684
const history: ChatCompletionMessageParam[] = [
@@ -81,19 +89,21 @@ describe("compaction infinite loop prevention", () => {
8189

8290
// This should not hang
8391
const result = await compactChatHistory(history, mockModel, mockLlmApi);
84-
92+
8593
expect(result.compactedHistory).toBeDefined();
8694
});
8795

8896
it("should successfully prune when pruning actually reduces size", async () => {
89-
const { countChatHistoryTokens, getModelContextLimit } = await import("./util/tokenizer.js");
97+
const { countChatHistoryTokens, getModelContextLimit } = await import(
98+
"./util/tokenizer.js"
99+
);
90100
const mockStreamResponse = vi.mocked(streamChatResponse);
91101
const mockCountTokens = vi.mocked(countChatHistoryTokens);
92102
const mockGetContextLimit = vi.mocked(getModelContextLimit);
93103

94104
// Setup mocks
95105
mockGetContextLimit.mockReturnValue(4000);
96-
106+
97107
// Mock token counting to show reduction after pruning
98108
let callCount = 0;
99109
mockCountTokens.mockImplementation(() => {
@@ -102,12 +112,14 @@ describe("compaction infinite loop prevention", () => {
102112
if (callCount === 2) return 3000; // After pruning, fits
103113
return 2000; // Subsequent calls
104114
});
105-
106-
mockStreamResponse.mockImplementation(async (history, model, api, controller, callbacks) => {
107-
callbacks?.onContent?.("Summary");
108-
callbacks?.onContentComplete?.();
109-
return "Summary";
110-
});
115+
116+
mockStreamResponse.mockImplementation(
117+
async (history, model, api, controller, callbacks) => {
118+
callbacks?.onContent?.("Summary");
119+
callbacks?.onContentComplete?.();
120+
return "Summary";
121+
},
122+
);
111123

112124
// History that can be successfully pruned
113125
const history: ChatCompletionMessageParam[] = [
@@ -118,9 +130,9 @@ describe("compaction infinite loop prevention", () => {
118130
];
119131

120132
const result = await compactChatHistory(history, mockModel, mockLlmApi);
121-
133+
122134
expect(result.compactedHistory).toBeDefined();
123135
// The function will call countTokens multiple times during the process
124136
expect(mockCountTokens).toHaveBeenCalled();
125137
});
126-
});
138+
});

extensions/cli/src/compaction.pruneLastMessage.test.ts

Lines changed: 28 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,18 @@ describe("pruneLastMessage", () => {
5353
];
5454

5555
const result = pruneLastMessage(history);
56-
expect(result).toEqual([
57-
{ role: "system", content: "System" },
58-
]);
56+
expect(result).toEqual([{ role: "system", content: "System" }]);
5957
});
6058

6159
it("should remove assistant+tool sequence when second-to-last is assistant with tool calls", () => {
6260
const history: ChatCompletionMessageParam[] = [
6361
{ role: "system", content: "System" },
6462
{ role: "user", content: "Run command" },
65-
{ role: "assistant", content: "Running...", tool_calls: [{ id: "1" } as any] },
63+
{
64+
role: "assistant",
65+
content: "Running...",
66+
tool_calls: [{ id: "1" } as any],
67+
},
6668
{ role: "tool", content: "Command output", tool_call_id: "1" },
6769
];
6870

@@ -95,16 +97,18 @@ describe("pruneLastMessage", () => {
9597
];
9698

9799
const result = pruneLastMessage(history);
98-
expect(result).toEqual([
99-
{ role: "system", content: "System" },
100-
]);
100+
expect(result).toEqual([{ role: "system", content: "System" }]);
101101
});
102102

103103
it("should handle tool call sequences by removing user after tool", () => {
104104
const history: ChatCompletionMessageParam[] = [
105105
{ role: "system", content: "System" },
106106
{ role: "user", content: "Do something" },
107-
{ role: "assistant", content: "I'll help", tool_calls: [{ id: "1" } as any] },
107+
{
108+
role: "assistant",
109+
content: "I'll help",
110+
tool_calls: [{ id: "1" } as any],
111+
},
108112
{ role: "tool", content: "Tool result", tool_call_id: "1" },
109113
{ role: "user", content: "Follow up question" },
110114
];
@@ -113,7 +117,11 @@ describe("pruneLastMessage", () => {
113117
expect(result).toEqual([
114118
{ role: "system", content: "System" },
115119
{ role: "user", content: "Do something" },
116-
{ role: "assistant", content: "I'll help", tool_calls: [{ id: "1" } as any] },
120+
{
121+
role: "assistant",
122+
content: "I'll help",
123+
tool_calls: [{ id: "1" } as any],
124+
},
117125
{ role: "tool", content: "Tool result", tool_call_id: "1" },
118126
]);
119127
});
@@ -134,7 +142,11 @@ describe("pruneLastMessage", () => {
134142
{ role: "user", content: "Request 1" },
135143
{ role: "assistant", content: "Response 1" },
136144
{ role: "user", content: "Request 2" },
137-
{ role: "assistant", content: "Using tool", tool_calls: [{ id: "1" } as any] },
145+
{
146+
role: "assistant",
147+
content: "Using tool",
148+
tool_calls: [{ id: "1" } as any],
149+
},
138150
{ role: "tool", content: "Tool result", tool_call_id: "1" },
139151
{ role: "assistant", content: "Final response" },
140152
{ role: "user", content: "Follow up 1" },
@@ -147,7 +159,11 @@ describe("pruneLastMessage", () => {
147159
{ role: "user", content: "Request 1" },
148160
{ role: "assistant", content: "Response 1" },
149161
{ role: "user", content: "Request 2" },
150-
{ role: "assistant", content: "Using tool", tool_calls: [{ id: "1" } as any] },
162+
{
163+
role: "assistant",
164+
content: "Using tool",
165+
tool_calls: [{ id: "1" } as any],
166+
},
151167
{ role: "tool", content: "Tool result", tool_call_id: "1" },
152168
{ role: "assistant", content: "Final response" },
153169
]);
@@ -166,4 +182,4 @@ describe("pruneLastMessage", () => {
166182
{ role: "assistant", content: "Hi" },
167183
]);
168184
});
169-
});
185+
});

0 commit comments

Comments
 (0)