Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
423 changes: 234 additions & 189 deletions README.md

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@samchon/openapi",
"version": "5.1.0",
"description": "OpenAPI definitions and converters for 'typia' and 'nestia'.",
"version": "6.0.0",
"description": "Universal OpenAPI to LLM function calling schemas. Transform any Swagger/OpenAPI document into type-safe schemas for OpenAI, Claude, Qwen, and more.",
"main": "./lib/index.js",
"module": "./lib/index.mjs",
"typings": "./lib/index.d.ts",
Expand All @@ -26,7 +26,7 @@
"openai",
"chatgpt",
"claude",
"gemini",
"qwen",
"llama"
],
"repository": {
Expand Down
110 changes: 38 additions & 72 deletions src/HttpLlm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,13 @@ import { OpenApiV3 } from "./OpenApiV3";
import { OpenApiV3_1 } from "./OpenApiV3_1";
import { SwaggerV2 } from "./SwaggerV2";
import { HttpLlmComposer } from "./composers/HttpLlmApplicationComposer";
import { LlmSchemaComposer } from "./composers/LlmSchemaComposer";
import { HttpLlmFunctionFetcher } from "./http/HttpLlmFunctionFetcher";
import { IHttpConnection } from "./structures/IHttpConnection";
import { IHttpLlmApplication } from "./structures/IHttpLlmApplication";
import { IHttpLlmFunction } from "./structures/IHttpLlmFunction";
import { IHttpMigrateApplication } from "./structures/IHttpMigrateApplication";
import { IHttpResponse } from "./structures/IHttpResponse";
import { ILlmFunction } from "./structures/ILlmFunction";
import { ILlmSchema } from "./structures/ILlmSchema";
import { LlmDataMerger } from "./utils/LlmDataMerger";

/**
Expand All @@ -30,36 +28,28 @@ import { LlmDataMerger } from "./utils/LlmDataMerger";
* {@link HttpLlm.propagate HttpLlm.propagate()}.
*
* By the way, if you have configured the
* {@link IHttpLlmApplication.IOptions.separate} option to separate the
* parameters into human and LLM sides, you can merge these human and LLM sides'
* parameters into one through
* {@link HttpLlm.mergeParameters HttpLlm.mergeParameters()} before the actual
* LLM function call execution.
* {@link IHttpLlmApplication.IConfig.separate} option to separate the parameters
* into human and LLM sides, you can merge these human and LLM sides' parameters
* into one through {@link HttpLlm.mergeParameters HttpLlm.mergeParameters()}
* before the actual LLM function call execution.
*
* @author Jeongho Nam - https://github.com/samchon
*/
export namespace HttpLlm {
/* -----------------------------------------------------------
COMPOSERS
----------------------------------------------------------- */
/**
* Properties for the LLM function calling application composer.
*
* @template Model Target LLM model
*/
export interface IApplicationProps<Model extends ILlmSchema.Model> {
/** Target LLM model. */
model: Model;

/** Properties for the LLM function calling application composer. */
export interface IApplicationProps {
/** OpenAPI document to convert. */
document:
| OpenApi.IDocument
| SwaggerV2.IDocument
| OpenApiV3.IDocument
| OpenApiV3_1.IDocument;

/** Options for the LLM function calling schema conversion. */
options?: Partial<IHttpLlmApplication.IOptions<Model>>;
/** Configuration for the LLM function calling schema conversion. */
config?: Partial<IHttpLlmApplication.IConfig>;
}

/**
Expand All @@ -72,57 +62,44 @@ export namespace HttpLlm {
* converted to the {@link IHttpLlmFunction LLM function} type, and they would
* be used for the LLM function calling.
*
* If you have configured the {@link IHttpLlmApplication.IOptions.separate}
* If you have configured the {@link IHttpLlmApplication.IConfig.separate}
* option, every parameters in the {@link IHttpLlmFunction} would be separated
* into both human and LLM sides. In that case, you can merge these human and
* LLM sides' parameters into one through {@link HttpLlm.mergeParameters}
* before the actual LLM function call execution.
*
* Additionally, if you have configured the
* {@link IHttpLlmApplication.IOptions.keyword} as `true`, the number of
* {@link IHttpLlmFunction.parameters} are always 1 and the first parameter
* type is always {@link ILlmSchemaV3.IObject}. I recommend this option because
* LLM can understand the keyword arguments more easily.
*
* @param props Properties for composition
* @returns LLM function calling application
*/
export const application = <Model extends ILlmSchema.Model>(
props: IApplicationProps<Model>,
): IHttpLlmApplication<Model> => {
export const application = (
props: IApplicationProps,
): IHttpLlmApplication => {
// MIGRATE
const migrate: IHttpMigrateApplication = HttpMigration.application(
props.document,
);
const defaultConfig: ILlmSchema.IConfig<Model> =
LlmSchemaComposer.defaultConfig(props.model);
return HttpLlmComposer.application<Model>({
return HttpLlmComposer.application({
migrate,
model: props.model,
options: {
...Object.fromEntries(
Object.entries(defaultConfig).map(
([key, value]) =>
[key, (props.options as any)?.[key] ?? value] as const,
),
),
separate: props.options?.separate ?? null,
maxLength: props.options?.maxLength ?? 64,
equals: props.options?.equals ?? false,
} as any as IHttpLlmApplication.IOptions<Model>,
config: {
reference: props.config?.reference ?? true,
strict: props.config?.strict ?? false,
separate: props.config?.separate ?? null,
maxLength: props.config?.maxLength ?? 64,
equals: props.config?.equals ?? false,
},
});
};

/* -----------------------------------------------------------
FETCHERS
----------------------------------------------------------- */
/** Properties for the LLM function call. */
export interface IFetchProps<Model extends ILlmSchema.Model> {
export interface IFetchProps {
/** Application of the LLM function calling. */
application: IHttpLlmApplication<Model>;
application: IHttpLlmApplication;

/** LLM function schema to call. */
function: IHttpLlmFunction<ILlmSchema.Model>;
function: IHttpLlmFunction;

/** Connection info to the HTTP server. */
connection: IHttpConnection;
Expand All @@ -140,16 +117,12 @@ export namespace HttpLlm {
* sometimes).
*
* By the way, if you've configured the
* {@link IHttpLlmApplication.IOptions.separate}, so that the parameters are
* separated to human and LLM sides, you have to merge these humand and LLM
* {@link IHttpLlmApplication.IConfig.separate}, so that the parameters are
* separated to human and LLM sides, you have to merge these human and LLM
* sides' parameters into one through {@link HttpLlm.mergeParameters}
* function.
*
* About the {@link IHttpLlmApplication.IOptions.keyword} option, don't worry
* anything. This `HttmLlm.execute()` function will automatically recognize
* the keyword arguments and convert them to the proper sequence.
*
* For reference, if the target API endpoinnt responds none 200/201 status,
* For reference, if the target API endpoint responds none 200/201 status,
* this would be considered as an error and the {@link HttpError} would be
* thrown. Otherwise you don't want such rule, you can use the
* {@link HttpLlm.propagate} function instead.
Expand All @@ -158,9 +131,8 @@ export namespace HttpLlm {
* @returns Return value (response body) from the API endpoint
* @throws HttpError when the API endpoint responds none 200/201 status
*/
export const execute = <Model extends ILlmSchema.Model>(
props: IFetchProps<Model>,
): Promise<unknown> => HttpLlmFunctionFetcher.execute<Model>(props);
export const execute = (props: IFetchProps): Promise<unknown> =>
HttpLlmFunctionFetcher.execute(props);

/**
* Propagate the LLM function call.
Expand All @@ -171,15 +143,11 @@ export namespace HttpLlm {
* sometimes).
*
* By the way, if you've configured the
* {@link IHttpLlmApplication.IOptions.separate}, so that the parameters are
* {@link IHttpLlmApplication.IConfig.separate}, so that the parameters are
* separated to human and LLM sides, you have to merge these humand and LLM
* sides' parameters into one through {@link HttpLlm.mergeParameters}
* function.
*
* About the {@link IHttpLlmApplication.IOptions.keyword} option, don't worry
* anything. This `HttmLlm.propagate()` function will automatically recognize
* the keyword arguments and convert them to the proper sequence.
*
* For reference, the propagation means always returning the response from the
* API endpoint, even if the status is not 200/201. This is useful when you
* want to handle the response by yourself.
Expand All @@ -188,17 +156,16 @@ export namespace HttpLlm {
* @returns Response from the API endpoint
* @throws Error only when the connection is failed
*/
export const propagate = <Model extends ILlmSchema.Model>(
props: IFetchProps<Model>,
): Promise<IHttpResponse> => HttpLlmFunctionFetcher.propagate<Model>(props);
export const propagate = (props: IFetchProps): Promise<IHttpResponse> =>
HttpLlmFunctionFetcher.propagate(props);

/* -----------------------------------------------------------
MERGERS
----------------------------------------------------------- */
/** Properties for the parameters' merging. */
export interface IMergeProps<Model extends ILlmSchema.Model> {
export interface IMergeProps {
/** Metadata of the target function. */
function: ILlmFunction<Model>;
function: ILlmFunction;

/** Arguments composed by the LLM. */
llm: object | null;
Expand All @@ -210,22 +177,21 @@ export namespace HttpLlm {
/**
* Merge the parameters.
*
* If you've configured the {@link IHttpLlmApplication.IOptions.separate}
* If you've configured the {@link IHttpLlmApplication.IConfig.separate}
* option, so that the parameters are separated to human and LLM sides, you
* can merge these humand and LLM sides' parameters into one through this
* `HttpLlm.mergeParameters()` function before the actual LLM function call
* wexecution.
* execution.
*
* On contrary, if you've not configured the
* {@link IHttpLlmApplication.IOptions.separate} option, this function would
* {@link IHttpLlmApplication.IConfig.separate} option, this function would
* throw an error.
*
* @param props Properties for the parameters' merging
* @returns Merged parameter values
*/
export const mergeParameters = <Model extends ILlmSchema.Model>(
props: IMergeProps<Model>,
): object => LlmDataMerger.parameters(props);
export const mergeParameters = (props: IMergeProps): object =>
LlmDataMerger.parameters(props);

/**
* Merge two values.
Expand Down
2 changes: 1 addition & 1 deletion src/HttpMigration.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { OpenApi } from "./OpenApi";
import { OpenApiV3 } from "./OpenApiV3";
import { OpenApiV3_1 } from "./OpenApiV3_1";
import { SwaggerV2 } from "./SwaggerV2";
import { HttpMigrateApplicationComposer } from "./composers/migrate/HttpMigrateApplicationComposer";
import { HttpMigrateApplicationComposer } from "./composers/HttpMigrateApplicationComposer";
import { HttpMigrateRouteFetcher } from "./http/HttpMigrateRouteFetcher";
import { IHttpConnection } from "./structures/IHttpConnection";
import { IHttpMigrateApplication } from "./structures/IHttpMigrateApplication";
Expand Down
61 changes: 23 additions & 38 deletions src/McpLlm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,8 @@ import { OpenApiValidator } from "./utils/OpenApiValidator";
* @author Jeongho Nam - https://github.com/samchon
*/
export namespace McpLlm {
/**
* Properties for the LLM function calling application composer.
*
* @template Model Target LLM model
*/
export interface IApplicationProps<Model extends ILlmSchema.Model> {
/** Target LLM model. */
model: Model;

/** Properties for the LLM function calling application composer. */
export interface IApplicationProps {
/**
* List of tools.
*
Expand All @@ -49,8 +42,8 @@ export namespace McpLlm {
*/
tools: Array<IMcpTool>;

/** Options for the LLM function calling schema conversion. */
options?: Partial<IMcpLlmApplication.IOptions<Model>>;
/** Configuration for the LLM function calling schema conversion. */
config?: Partial<IMcpLlmApplication.IConfig>;
}

/**
Expand All @@ -72,19 +65,14 @@ export namespace McpLlm {
* @param props Properties for composition
* @returns LLM function calling application
*/
export const application = <Model extends ILlmSchema.Model>(
props: IApplicationProps<Model>,
): IMcpLlmApplication<Model> => {
const options: IMcpLlmApplication.IOptions<Model> = {
...Object.fromEntries(
Object.entries(LlmSchemaComposer.defaultConfig(props.model)).map(
([key, value]) =>
[key, (props.options as any)?.[key] ?? value] as const,
),
),
maxLength: props.options?.maxLength ?? 64,
} as IMcpLlmApplication.IOptions<Model>;
const functions: IMcpLlmFunction<Model>[] = [];
export const application = (props: IApplicationProps): IMcpLlmApplication => {
const config: IMcpLlmApplication.IConfig = {
reference: props.config?.reference ?? true,
strict: props.config?.strict ?? false,
maxLength: props.config?.maxLength ?? 64,
equals: props.config?.equals ?? false,
};
const functions: IMcpLlmFunction[] = [];
const errors: IMcpLlmApplication.IError[] = [];

props.tools.forEach((tool, i) => {
Expand Down Expand Up @@ -114,17 +102,15 @@ export namespace McpLlm {
}

// CONVERT TO LLM PARAMETERS
const parameters: IResult<
ILlmSchema.IParameters<Model>,
IOpenApiSchemaError
> = LlmSchemaComposer.parameters(props.model)({
config: options as any,
components,
schema: schema as
| OpenApi.IJsonSchema.IObject
| OpenApi.IJsonSchema.IReference,
accessor: `$input.tools[${i}].inputSchema`,
}) as IResult<ILlmSchema.IParameters<Model>, IOpenApiSchemaError>;
const parameters: IResult<ILlmSchema.IParameters, IOpenApiSchemaError> =
LlmSchemaComposer.parameters({
config,
components,
schema: schema as
| OpenApi.IJsonSchema.IObject
| OpenApi.IJsonSchema.IReference,
accessor: `$input.tools[${i}].inputSchema`,
});
if (parameters.success)
functions.push({
name: tool.name,
Expand All @@ -134,7 +120,7 @@ export namespace McpLlm {
components,
schema,
required: true,
equals: options.equals,
equals: config.equals,
}),
});
else
Expand All @@ -149,9 +135,8 @@ export namespace McpLlm {
});
});
return {
model: props.model,
functions,
options,
config,
errors,
};
};
Expand Down
Loading
Loading