[GH-ISSUE #10633] Possible to add LoRA folder for some kind of auto detection/triggering? #53508

Open
opened 2026-04-29 03:28:27 -05:00 by GiteaMirror · 1 comment
Owner

Originally created by @AncientMystic on GitHub (May 9, 2025).
Original GitHub issue: https://github.com/ollama/ollama/issues/10633

I know loras can be manually added to a modelfile, but would it be possible to just add a folder for loras and enable or disable them on models?

maybe even allow models to trigger loras automatically based on keywords,
so a collection of loras could be used with a model, then they would trigger automatically based on context to enhance the response?

(Which could also allow for more models in the folder without all of them having to be loaded every time when they are irrelevant to the prompt)

Seems like this could be a potential enhancement/performance boost for everyone using cpu/low vram/smaller models and just a potential huge improvement in quality for everyone using local LLMs regardless of your vram amounts.

Originally created by @AncientMystic on GitHub (May 9, 2025). Original GitHub issue: https://github.com/ollama/ollama/issues/10633 I know loras can be manually added to a modelfile, but would it be possible to just add a folder for loras and enable or disable them on models? maybe even allow models to trigger loras automatically based on keywords, so a collection of loras could be used with a model, then they would trigger automatically based on context to enhance the response? (Which could also allow for more models in the folder without all of them having to be loaded every time when they are irrelevant to the prompt) Seems like this could be a potential enhancement/performance boost for everyone using cpu/low vram/smaller models and just a potential huge improvement in quality for everyone using local LLMs regardless of your vram amounts.
GiteaMirror added the feature request label 2026-04-29 03:28:27 -05:00
Author
Owner

@galoisgroupcn commented on GitHub (May 10, 2025):

@AncientMystic I'll start by creating a loras/ directory convention and adding code to scan it for LoRA adapters and keyword mappings, then update the model logic to auto-activate LoRAs by keyword when relevant.

package llm

import (
    "encoding/json"
    "io/fs"
    "os"
    "path/filepath"
    "strings"
)

// LoRAInfo holds the LoRA adapter path and its trigger keywords.
type LoRAInfo struct {
    Path     string   // Path to the LoRA adapter file
    Keywords []string // List of keywords that trigger this LoRA
}

// ScanLoRAFolder scans the loras/ directory for LoRA adapters and optional keyword mapping.
// Expects: loras/<lora_name>.bin and (optionally) loras/<lora_name>.json (with "keywords": [])
func ScanLoRAFolder(loraDir string) ([]LoRAInfo, error) {
    var loras []LoRAInfo

    err := filepath.WalkDir(loraDir, func(path string, d fs.DirEntry, err error) error {
	    if err != nil {
		    return err
	    }
	    if d.IsDir() || !strings.HasSuffix(d.Name(), ".bin") {
		    return nil
	    }
	    base := strings.TrimSuffix(d.Name(), ".bin")
	    info := LoRAInfo{Path: path}

	    // Check for optional .json metadata
	    metaFile := filepath.Join(loraDir, base+".json")
	    if _, err := os.Stat(metaFile); err == nil {
		    b, err := os.ReadFile(metaFile)
		    if err == nil {
			    var meta struct {
				    Keywords []string `json:"keywords"`
			    }
			    if json.Unmarshal(b, &meta) == nil {
				    info.Keywords = meta.Keywords
			    }
		    }
	    }
	    loras = append(loras, info)
	    return nil
    })

    return loras, err
}

// SelectLoRAsForPrompt returns the paths of LoRAs whose keywords appear in the prompt.
func SelectLoRAsForPrompt(loras []LoRAInfo, prompt string) []string {
    var active []string
    promptLower := strings.ToLower(prompt)
    for _, lora := range loras {
	    for _, kw := range lora.Keywords {
		    if strings.Contains(promptLower, strings.ToLower(kw)) {
			    active = append(active, lora.Path)
			    break
		    }
	    }
    }
    return active
}
<!-- gh-comment-id:2868363636 --> @galoisgroupcn commented on GitHub (May 10, 2025): @AncientMystic I'll start by creating a loras/ directory convention and adding code to scan it for LoRA adapters and keyword mappings, then update the model logic to auto-activate LoRAs by keyword when relevant. package llm import ( "encoding/json" "io/fs" "os" "path/filepath" "strings" ) // LoRAInfo holds the LoRA adapter path and its trigger keywords. type LoRAInfo struct { Path string // Path to the LoRA adapter file Keywords []string // List of keywords that trigger this LoRA } // ScanLoRAFolder scans the loras/ directory for LoRA adapters and optional keyword mapping. // Expects: loras/<lora_name>.bin and (optionally) loras/<lora_name>.json (with "keywords": []) func ScanLoRAFolder(loraDir string) ([]LoRAInfo, error) { var loras []LoRAInfo err := filepath.WalkDir(loraDir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } if d.IsDir() || !strings.HasSuffix(d.Name(), ".bin") { return nil } base := strings.TrimSuffix(d.Name(), ".bin") info := LoRAInfo{Path: path} // Check for optional .json metadata metaFile := filepath.Join(loraDir, base+".json") if _, err := os.Stat(metaFile); err == nil { b, err := os.ReadFile(metaFile) if err == nil { var meta struct { Keywords []string `json:"keywords"` } if json.Unmarshal(b, &meta) == nil { info.Keywords = meta.Keywords } } } loras = append(loras, info) return nil }) return loras, err } // SelectLoRAsForPrompt returns the paths of LoRAs whose keywords appear in the prompt. func SelectLoRAsForPrompt(loras []LoRAInfo, prompt string) []string { var active []string promptLower := strings.ToLower(prompt) for _, lora := range loras { for _, kw := range lora.Keywords { if strings.Contains(promptLower, strings.ToLower(kw)) { active = append(active, lora.Path) break } } } return active }
Sign in to join this conversation.
1 Participants
Notifications
Due Date
No due date set.
Dependencies

No dependencies set.

Reference: github-starred/ollama#53508