AI plug issue with embeddings - v2

I tried to set up embeddings with openAI as a proiveder. But, I run the connectivity test, it always tells me it doesn’t find the AI API key.

If I comment out the embeddings part, the connectivity test passes for the chat part.

Below is my setup (CONFIG page).

local openai_key = "sk-********************"

config.set { 
 ai = {
    -- Provider-level configuration
    providers = {
      openai = {
        apiKey = openai_key,
        -- Could also be something like: apiKey = config.get("ai.keys.OPENAI_API_KEY"),
        useProxy = false,
        preferredModels = {"gpt-4o", "gpt-4o-mini"}
      }
--      , ollama = {
--        baseUrl = "http://host.docker.internal:11434",
--        preferredModels = {"bge-m3"}
--     }
--      ,gemini = {
--        apiKey = "your-gemini-key",
--        preferredModels = {"gemini-2.0-flash"}
--      }
    },
    -- Default model to use (format: "provider:modelName")
    -- This is auto-selected on startup if no model is already selected on this client
    defaultTextModel = "openai:gpt-4o-mini",

    -- Embedding
    indexEmbeddings = true,
    --indexEmbeddingsExcludePages = {"my_passwords"},
    --indexEmbeddingsExcludeStrings = {"**user**:", "Daily Quote:"},
    embeddingModels = {
      -- Only the first model is currently used
      {
        name = "openaiemb",
        modelName = "text-embedding-3-small",
        provider = "openai",
 --       baseUrl = "http://localhost:11434",
        requireAuth = true,
        secretName = ai.keys,
        useProxy = false
      }
    },
    -- Chat settings
    chat = {
      -- Enable AI tools (read/write notes, search, etc.)
      enableTools = true,

      -- Parse [[wiki-links]] and include their content as context
      parseWikiLinks = true,

      -- Search embeddings for relevant context (requires indexEmbeddings)
      searchEmbeddings = true,
      bakeMessages = true,
      -- Default agent to use (by name)
      defaultAgent = nil,
      -- When using chat, the userInformation and userInstructions
      -- are included in the system prompt.
      userInformation = "I'm a italian IT Architect who likes taking notes.",
      userInstructions = "Please give short and concise responses. When providing code, do so in python unless requested otherwise with comment in line in english. All other word must be in italian. ",
      -- Dynamic context (Lua expression evaluated at chat time)
      customContext = [["Today is " .. os.date("%Y-%m-%d")]],

      -- Custom enrichment functions to run on messages
      customEnrichFunctions = {},

      -- Skip tool approval prompts (useful for benchmarks/automation)
      skipToolApproval = false

    }
  }
}

Thanks in advance

Cc @justyns

1 Like

hey @disoardi ,

The new “providers” top level key is still pretty new so I haven’t integrated the embeddings or image model stuff into it yet. That means we’re kind of re-defining the provider just for the embbedding config.

I think what you’re missing is the secretName in your embedding config:

secretName = ai.keys

should be something like

secretName = "OPENAI_API_KEY"

and then you still need to also set:

config.set("ai.keys.OPENAI_API_KEY", "sk-xxxxxx")

Let me know if that works for you. I’ll probably try to improve that error message too so that it’s more obvious where it’s looking for the key

ok, if I set config.set(“ai.keys.OPENAI_API_KEY”, “sk-xxxxxx”) the embedding works but the chat stops working and tells me there is no api key for the chat

Just to confirm, you have both secretName = "OPENAI_API_KEY" in the embedding config and also still have apiKey = openai_key in your openai provider config? You might also need to refresh the page a couple times to make sure the new config is loaded.

e.g. something like this config:

local openai_key = "sk-********************"
config.set("ai.keys.OPENAI_API_KEY", openai_key)

config.set { 
 ai = {
    providers = {
      openai = {
        apiKey = openai_key,
        useProxy = false,
        preferredModels = {"gpt-4o", "gpt-4o-mini"}
      }
    },
    defaultTextModel = "openai:gpt-4o-mini",
    indexEmbeddings = true,
    embeddingModels = {
      -- Only the first model is currently used
      {
        name = "openaiemb",
        modelName = "text-embedding-3-small",
        provider = "openai",
        requireAuth = true,
        secretName = "OPENAI_API_KEY",
        useProxy = false
      }
    },
  }
}

If so, there might be a bug somewhere. I’ll do some testing, but I’m also working on just getting rid of the separate embeddingModels option that would simplify the config a bit too.

local openai_key = "sk-svcacct-***"

config.set("ai.keys.OPENAI_API_KEY", "sk-svcacct-****")

config.set {
ai = {
    -- Provider-level configuration
    providers = {
      openai = {
        apiKey = openai_key,
        -- Could also be something like: apiKey = config.get("ai.keys.OPENAI_API_KEY"),
        useProxy = false,
        preferredModels = {"gpt-4o", "gpt-4o-mini"}
      }
--      , ollama = {
--        baseUrl = "http://host.docker.internal:11434",
--        preferredModels = {"bge-m3"}
--     }
--      ,gemini = {
--        apiKey = "your-gemini-key",
--        preferredModels = {"gemini-2.0-flash"}
--      }
    },
    -- Default model to use (format: "provider:modelName")
    -- This is auto-selected on startup if no model is already selected on this client
    defaultTextModel = "openai:gpt-4o-mini",

    -- Embedding
    indexEmbeddings = true,
    --indexEmbeddingsExcludePages = {"my_passwords"},
    --indexEmbeddingsExcludeStrings = {"**user**:", "Daily Quote:"},
    embeddingModels = {
      -- Only the first model is currently used
      {
        name = "openaiemb",
        modelName = "text-embedding-3-small",
        provider = "openai",
 --       baseUrl = "http://localhost:11434",
        requireAuth = true,
        secretName = ai.keys.OPENAI_API_KEY,
        useProxy = false
      }
    },
    -- Chat settings
    chat = {
      -- Enable AI tools (read/write notes, search, etc.)
      enableTools = true,

      -- Parse [[wiki-links]] and include their content as context
      parseWikiLinks = true,

      -- Search embeddings for relevant context (requires indexEmbeddings)
      searchEmbeddings = true,
      bakeMessages = true,
      -- Default agent to use (by name)
      defaultAgent = nil,
      -- When using chat, the userInformation and userInstructions
      -- are included in the system prompt.
      userInformation = "I'm a italian IT Architect who likes taking notes.",
      userInstructions = "Please give short and concise responses. When providing code, do so in python unless requested otherwise with comment in line in english. All other word must be in italian. ",
      -- Dynamic context (Lua expression evaluated at chat time)
      customContext = [["Today is " .. os.date("%Y-%m-%d")]],

      -- Custom enrichment functions to run on messages
      customEnrichFunctions = {},

      -- Skip tool approval prompts (useful for benchmarks/automation)
      skipToolApproval = false

    }
  }
}

With this configuration the embeddings works but gives an error on the chat (doing the Connectivity test it says it does not have the key API for chat).

If I comment out the embedding configuration, the Connectivity test is successful.

Just a head’s up, it looks like you used an actual openai key in that post? You’ll likely want to revoke that and get a new one.

2 Likes

Sorry for the oversight.

I deleted the API keys from the openai platform.

Thank you for the notice and I confirm that we are all good people because no one took advantage of it.

1 Like

This part should be:

secretName = "OPENAI_API_KEY"

But! I just released version 0.6.4 - can you update to it? And then you no longer need embeddingModels at all. Just set defaultEmbeddingModel like this:

config.set {
  ai = {
    providers = {
      openai = {
        apiKey = "your-openai-key-here"
      }
    },
    defaultTextModel = "openai:gpt-4o",
    defaultEmbeddingModel = "openai:text-embedding-3-small",
    indexEmbeddings = true
  }
}

perfect, now it works. you’re a Great

1 Like