Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,43 @@
],
"type": "llm"
},
{
"input": {
"kind": "undefined"
},
"metadata": {
"operation": "stream-fixture"
},
"metrics": {
"has_time_to_first_token": false
},
"name": "openai-stream-fixture-operation",
"output": null,
"type": null
},
{
"input": [
{
"content_kind": "string",
"role": "user"
}
],
"metadata": {
"model": "gpt-4o-mini-2024-07-18",
"provider": "openai"
},
"metrics": {
"has_time_to_first_token": true
},
"name": "Chat Completion",
"output": [
{
"json_keys": [],
"role": "assistant"
}
],
"type": "llm"
},
{
"input": {
"kind": "undefined"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,31 @@
"name": "Chat Completion",
"type": "llm"
},
{
"has_input": false,
"has_output": false,
"metadata": {
"operation": "stream-fixture"
},
"metrics": {
"has_time_to_first_token": false
},
"name": "openai-stream-fixture-operation",
"type": null
},
{
"has_input": true,
"has_output": true,
"metadata": {
"model": "gpt-4o-mini-2024-07-18",
"provider": "openai"
},
"metrics": {
"has_time_to_first_token": true
},
"name": "Chat Completion",
"type": "llm"
},
{
"has_input": false,
"has_output": false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,43 @@
],
"type": "llm"
},
{
"input": {
"kind": "undefined"
},
"metadata": {
"operation": "stream-fixture"
},
"metrics": {
"has_time_to_first_token": false
},
"name": "openai-stream-fixture-operation",
"output": null,
"type": null
},
{
"input": [
{
"content_kind": "string",
"role": "user"
}
],
"metadata": {
"model": "gpt-4o-mini-2024-07-18",
"provider": "openai"
},
"metrics": {
"has_time_to_first_token": true
},
"name": "Chat Completion",
"output": [
{
"json_keys": [],
"role": "assistant"
}
],
"type": "llm"
},
{
"input": {
"kind": "undefined"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,31 @@
"name": "Chat Completion",
"type": "llm"
},
{
"has_input": false,
"has_output": false,
"metadata": {
"operation": "stream-fixture"
},
"metrics": {
"has_time_to_first_token": false
},
"name": "openai-stream-fixture-operation",
"type": null
},
{
"has_input": true,
"has_output": true,
"metadata": {
"model": "gpt-4o-mini-2024-07-18",
"provider": "openai"
},
"metrics": {
"has_time_to_first_token": true
},
"name": "Chat Completion",
"type": "llm"
},
{
"has_input": false,
"has_output": false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,43 @@
],
"type": "llm"
},
{
"input": {
"kind": "undefined"
},
"metadata": {
"operation": "stream-fixture"
},
"metrics": {
"has_time_to_first_token": false
},
"name": "openai-stream-fixture-operation",
"output": null,
"type": null
},
{
"input": [
{
"content_kind": "string",
"role": "user"
}
],
"metadata": {
"model": "gpt-4o-mini-2024-07-18",
"provider": "openai"
},
"metrics": {
"has_time_to_first_token": true
},
"name": "Chat Completion",
"output": [
{
"json_keys": [],
"role": "assistant"
}
],
"type": "llm"
},
{
"input": {
"kind": "undefined"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,31 @@
"name": "Chat Completion",
"type": "llm"
},
{
"has_input": false,
"has_output": false,
"metadata": {
"operation": "stream-fixture"
},
"metrics": {
"has_time_to_first_token": false
},
"name": "openai-stream-fixture-operation",
"type": null
},
{
"has_input": true,
"has_output": true,
"metadata": {
"model": "gpt-4o-mini-2024-07-18",
"provider": "openai"
},
"metrics": {
"has_time_to_first_token": true
},
"name": "Chat Completion",
"type": "llm"
},
{
"has_input": false,
"has_output": false,
Expand Down
32 changes: 32 additions & 0 deletions e2e/scenarios/openai-instrumentation/assertions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,28 @@ type OperationSpec = {
validate?: (span: CapturedLogEvent | undefined) => void;
};

function asRecord(value: unknown): Record<string, unknown> | undefined {
return typeof value === "object" && value !== null && !Array.isArray(value)
? (value as Record<string, unknown>)
: undefined;
}

function validateStreamFixtureOutput(span: CapturedLogEvent | undefined): void {
const firstChoice = Array.isArray(span?.output) ? span?.output[0] : undefined;
const choice = asRecord(firstChoice);
const message = asRecord(choice?.message);

expect(choice?.logprobs).toEqual(
expect.objectContaining({
content: expect.arrayContaining([
expect.objectContaining({ token: "NO" }),
expect.objectContaining({ token: "PE" }),
]),
}),
);
expect(message?.refusal).toBe("NOPE");
}

const OPERATION_SPECS: readonly OperationSpec[] = [
{
childNames: ["Chat Completion"],
Expand Down Expand Up @@ -77,6 +99,16 @@ const OPERATION_SPECS: readonly OperationSpec[] = [
testName:
"captures trace for streamed chat completion with response metadata",
},
{
childNames: ["Chat Completion"],
expectsOutput: true,
expectsTimeToFirstToken: true,
name: "openai-stream-fixture-operation",
operation: "stream-fixture",
testName:
"captures trace for streamed chat completion with logprobs and refusal",
validate: validateStreamFixtureOutput,
},
{
childNames: ["Chat Completion"],
expectsOutput: true,
Expand Down
51 changes: 51 additions & 0 deletions e2e/scenarios/openai-instrumentation/scenario.impl.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,16 @@ const EMBEDDING_MODEL = "text-embedding-3-small";
const MODERATION_MODEL = "omni-moderation-2024-09-26";
const ROOT_NAME = "openai-instrumentation-root";
const SCENARIO_NAME = "openai-instrumentation";
const MOCK_CHAT_STREAM_SSE = [
'data: {"id":"chatcmpl-fixture","object":"chat.completion.chunk","created":1740000000,"model":"gpt-4o-mini","choices":[{"index":0,"delta":{"role":"assistant"},"logprobs":null,"finish_reason":null}]}',
"",
'data: {"id":"chatcmpl-fixture","object":"chat.completion.chunk","created":1740000000,"model":"gpt-4o-mini","choices":[{"index":0,"delta":{"refusal":"NO"},"logprobs":{"content":[{"token":"NO","logprob":-0.1,"bytes":[78,79],"top_logprobs":[{"token":"NO","logprob":-0.1,"bytes":[78,79]}]}]},"finish_reason":null}]}',
"",
'data: {"id":"chatcmpl-fixture","object":"chat.completion.chunk","created":1740000000,"model":"gpt-4o-mini","choices":[{"index":0,"delta":{"refusal":"PE"},"logprobs":{"content":[{"token":"PE","logprob":-0.2,"bytes":[80,69],"top_logprobs":[{"token":"PE","logprob":-0.2,"bytes":[80,69]}]}]},"finish_reason":"stop"}]}',
"",
"data: [DONE]",
"",
].join("\n");

const CHAT_PARSE_SCHEMA = {
type: "object",
Expand Down Expand Up @@ -53,6 +63,24 @@ function parseMajorVersion(version) {
return Number.isNaN(major) ? null : major;
}

function createMockStreamingClient(options) {
const baseClient = new options.OpenAI({
apiKey: process.env.OPENAI_API_KEY ?? "test-openai-key",
baseURL: "https://example.test/v1",
fetch: async () =>
new Response(MOCK_CHAT_STREAM_SSE, {
headers: {
"content-type": "text/event-stream",
},
status: 200,
}),
});

return options.decorateClient
? options.decorateClient(baseClient)
: baseClient;
}

export async function runOpenAIInstrumentationScenario(options) {
const baseClient = new options.OpenAI({
apiKey: process.env.OPENAI_API_KEY,
Expand All @@ -61,6 +89,7 @@ export async function runOpenAIInstrumentationScenario(options) {
const client = options.decorateClient
? options.decorateClient(baseClient)
: baseClient;
const streamFixtureClient = createMockStreamingClient(options);
const openAIMajorVersion = parseMajorVersion(options.openaiSdkVersion);
const shouldCheckPrivateFieldMethods =
typeof options.decorateClient === "function" &&
Expand Down Expand Up @@ -169,6 +198,28 @@ export async function runOpenAIInstrumentationScenario(options) {
},
);

await runOperation(
"openai-stream-fixture-operation",
"stream-fixture",
async () => {
const chatStream = await streamFixtureClient.chat.completions.create({
model: OPENAI_MODEL,
messages: [
{
role: "user",
content: "Reply with a refusal stream fixture.",
},
],
stream: true,
logprobs: true,
top_logprobs: 2,
max_tokens: 12,
temperature: 0,
});
await collectAsync(chatStream);
},
);

await runOperation("openai-parse-operation", "parse", async () => {
const parseArgs = {
messages: [{ role: "user", content: "What is 2 + 2?" }],
Expand Down
Loading
Loading