File size: 3,780 Bytes
2e1ab99 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import type { LanguageModelV1 } from 'ai';
import type { ProviderInfo, ProviderConfig, ModelInfo } from './types';
import type { IProviderSetting } from '~/types/model';
import { createOpenAI } from '@ai-sdk/openai';
import { LLMManager } from './manager';
export abstract class BaseProvider implements ProviderInfo {
abstract name: string;
abstract staticModels: ModelInfo[];
abstract config: ProviderConfig;
cachedDynamicModels?: {
cacheId: string;
models: ModelInfo[];
};
getApiKeyLink?: string;
labelForGetApiKey?: string;
icon?: string;
getProviderBaseUrlAndKey(options: {
apiKeys?: Record<string, string>;
providerSettings?: IProviderSetting;
serverEnv?: Record<string, string>;
defaultBaseUrlKey: string;
defaultApiTokenKey: string;
}) {
const { apiKeys, providerSettings, serverEnv, defaultBaseUrlKey, defaultApiTokenKey } = options;
let settingsBaseUrl = providerSettings?.baseUrl;
const manager = LLMManager.getInstance();
if (settingsBaseUrl && settingsBaseUrl.length == 0) {
settingsBaseUrl = undefined;
}
const baseUrlKey = this.config.baseUrlKey || defaultBaseUrlKey;
let baseUrl =
settingsBaseUrl ||
serverEnv?.[baseUrlKey] ||
process?.env?.[baseUrlKey] ||
manager.env?.[baseUrlKey] ||
this.config.baseUrl;
if (baseUrl && baseUrl.endsWith('/')) {
baseUrl = baseUrl.slice(0, -1);
}
const apiTokenKey = this.config.apiTokenKey || defaultApiTokenKey;
const apiKey =
apiKeys?.[this.name] || serverEnv?.[apiTokenKey] || process?.env?.[apiTokenKey] || manager.env?.[baseUrlKey];
return {
baseUrl,
apiKey,
};
}
getModelsFromCache(options: {
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
serverEnv?: Record<string, string>;
}): ModelInfo[] | null {
if (!this.cachedDynamicModels) {
// console.log('no dynamic models',this.name);
return null;
}
const cacheKey = this.cachedDynamicModels.cacheId;
const generatedCacheKey = this.getDynamicModelsCacheKey(options);
if (cacheKey !== generatedCacheKey) {
// console.log('cache key mismatch',this.name,cacheKey,generatedCacheKey);
this.cachedDynamicModels = undefined;
return null;
}
return this.cachedDynamicModels.models;
}
getDynamicModelsCacheKey(options: {
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
serverEnv?: Record<string, string>;
}) {
return JSON.stringify({
apiKeys: options.apiKeys?.[this.name],
providerSettings: options.providerSettings?.[this.name],
serverEnv: options.serverEnv,
});
}
storeDynamicModels(
options: {
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
serverEnv?: Record<string, string>;
},
models: ModelInfo[],
) {
const cacheId = this.getDynamicModelsCacheKey(options);
// console.log('caching dynamic models',this.name,cacheId);
this.cachedDynamicModels = {
cacheId,
models,
};
}
// Declare the optional getDynamicModels method
getDynamicModels?(
apiKeys?: Record<string, string>,
settings?: IProviderSetting,
serverEnv?: Record<string, string>,
): Promise<ModelInfo[]>;
abstract getModelInstance(options: {
model: string;
serverEnv: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1;
}
type OptionalApiKey = string | undefined;
export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
const openai = createOpenAI({
baseURL,
apiKey,
});
return openai(model);
}
|