Skip to content

Commit

Permalink
Update to semantic-conventions 1.26.0
Browse files Browse the repository at this point in the history
  • Loading branch information
crossoverJie committed Jun 3, 2024
1 parent af962ee commit c0d1218
Show file tree
Hide file tree
Showing 2 changed files with 104 additions and 8 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/

package io.opentelemetry.semconv.incubating;

import static io.opentelemetry.api.common.AttributeKey.doubleKey;
import static io.opentelemetry.api.common.AttributeKey.longKey;
import static io.opentelemetry.api.common.AttributeKey.stringArrayKey;
import static io.opentelemetry.api.common.AttributeKey.stringKey;

import io.opentelemetry.api.common.AttributeKey;
import java.util.List;

// DO NOT EDIT, this is an Auto-generated file from
// buildscripts/templates/SemanticAttributes.java.j2
@SuppressWarnings("unused")
public final class GenAiIncubatingAttributes {

/**
* The full response received from the LLM.
*
* <p>Notes:
*
* <ul>
* <li>It's RECOMMENDED to format completions as JSON string matching <a
* href="https://platform.openai.com/docs/guides/text-generation">OpenAI messages format</a>
* </ul>
*/
public static final AttributeKey<String> GEN_AI_COMPLETION = stringKey("gen_ai.completion");

/**
* The full prompt sent to an LLM.
*
* <p>Notes:
*
* <ul>
* <li>It's RECOMMENDED to format prompts as JSON string matching <a
* href="https://platform.openai.com/docs/guides/text-generation">OpenAI messages format</a>
* </ul>
*/
public static final AttributeKey<String> GEN_AI_PROMPT = stringKey("gen_ai.prompt");

/** The maximum number of tokens the LLM generates for a request. */
public static final AttributeKey<Long> GEN_AI_REQUEST_MAX_TOKENS =
longKey("gen_ai.request.max_tokens");

/** The name of the LLM a request is being made to. */
public static final AttributeKey<String> GEN_AI_REQUEST_MODEL = stringKey("gen_ai.request.model");

/** The temperature setting for the LLM request. */
public static final AttributeKey<Double> GEN_AI_REQUEST_TEMPERATURE =
doubleKey("gen_ai.request.temperature");

/** The top_p sampling setting for the LLM request. */
public static final AttributeKey<Double> GEN_AI_REQUEST_TOP_P = doubleKey("gen_ai.request.top_p");

/**
* Array of reasons the model stopped generating tokens, corresponding to each generation
* received.
*/
public static final AttributeKey<List<String>> GEN_AI_RESPONSE_FINISH_REASONS =
stringArrayKey("gen_ai.response.finish_reasons");

/** The unique identifier for the completion. */
public static final AttributeKey<String> GEN_AI_RESPONSE_ID = stringKey("gen_ai.response.id");

/** The name of the LLM a response was generated from. */
public static final AttributeKey<String> GEN_AI_RESPONSE_MODEL =
stringKey("gen_ai.response.model");

/**
* The Generative AI product as identified by the client instrumentation.
*
* <p>Notes:
*
* <ul>
* <li>The actual GenAI product may differ from the one identified by the client. For example,
* when using OpenAI client libraries to communicate with Mistral, the {@code gen_ai.system}
* is set to {@code openai} based on the instrumentation's best knowledge.
* </ul>
*/
public static final AttributeKey<String> GEN_AI_SYSTEM = stringKey("gen_ai.system");

/** The number of tokens used in the LLM response (completion). */
public static final AttributeKey<Long> GEN_AI_USAGE_COMPLETION_TOKENS =
longKey("gen_ai.usage.completion_tokens");

/** The number of tokens used in the LLM prompt. */
public static final AttributeKey<Long> GEN_AI_USAGE_PROMPT_TOKENS =
longKey("gen_ai.usage.prompt_tokens");

// Enum definitions
/** Values for {@link #GEN_AI_SYSTEM}. */
public static final class GenAiSystemValues {
/** OpenAI. */
public static final String OPENAI = "openai";

private GenAiSystemValues() {}
}

private GenAiIncubatingAttributes() {}
}
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,6 @@ public final class MessagingIncubatingAttributes {
/** A unique identifier for the client that consumes or produces a message. */
public static final AttributeKey<String> MESSAGING_CLIENT_ID = stringKey("messaging.client.id");

/**
* Deprecated, use {@code messaging.client.id} instead.
*
* @deprecated Deprecated, use `messaging.client.id` instead.
*/
@Deprecated
public static final AttributeKey<String> MESSAGING_CLIENT_ID = stringKey("messaging.client_id");

/**
* A boolean that is true if the message destination is anonymous (could be unnamed or have
* auto-generated name).
Expand Down

0 comments on commit c0d1218

Please sign in to comment.