diff --git a/README.md b/README.md index d526d2b3..8a4d5d87 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ Since November 2024: Work on RAG (integration of your data and files) has begun. - [x] ~~App: Implement dialog for checking & handling [pandoc](https://pandoc.org/) installation ([PR #393](https://github.com/MindWorkAI/AI-Studio/pull/393), [PR #487](https://github.com/MindWorkAI/AI-Studio/pull/487))~~ - [ ] App: Implement external embedding providers - [ ] App: Implement the process to vectorize one local file using embeddings -- [ ] Runtime: Integration of the vector database [LanceDB](https://github.com/lancedb/lancedb) +- [ ] Runtime: Integration of the vector database [Qdrant](https://github.com/qdrant/qdrant) - [ ] App: Implement the continuous process of vectorizing data - [x] ~~App: Define a common retrieval context interface for the integration of RAG processes in chats (PR [#281](https://github.com/MindWorkAI/AI-Studio/pull/281), [#284](https://github.com/MindWorkAI/AI-Studio/pull/284), [#286](https://github.com/MindWorkAI/AI-Studio/pull/286), [#287](https://github.com/MindWorkAI/AI-Studio/pull/287))~~ - [x] ~~App: Define a common augmentation interface for the integration of RAG processes in chats (PR [#288](https://github.com/MindWorkAI/AI-Studio/pull/288), [#289](https://github.com/MindWorkAI/AI-Studio/pull/289))~~ @@ -78,7 +78,7 @@ Since March 2025: We have started developing the plugin system. There will be la Features we have recently released - +- v0.9.55: Added support for newer models like Mistral 3 & GPT 5.2, OpenRouter as LLM and embedding provider, and the possibility to use file attachments in chats. - v0.9.51: Added support for [Perplexity](https://www.perplexity.ai/); citations added so that LLMs can provide source references (e.g., some OpenAI models, Perplexity); added support for OpenAI's Responses API so that all text LLMs from OpenAI now work in MindWork AI Studio, including Deep Research models; web searches are now possible (some OpenAI models, Perplexity). - v0.9.50: Added support for self-hosted LLMs using [vLLM](https://blog.vllm.ai/2023/06/20/vllm.html). - v0.9.46: Released our plugin system, a German language plugin, early support for enterprise environments, and configuration plugins. Additionally, we added the Pandoc integration for future data processing and file generation. @@ -114,6 +114,7 @@ MindWork AI Studio is a free desktop app for macOS, Windows, and Linux. It provi - [xAI](https://x.ai/) (Grok) - [DeepSeek](https://www.deepseek.com/en) - [Alibaba Cloud](https://www.alibabacloud.com) (Qwen) + - [OpenRouter](https://openrouter.ai/) - [Hugging Face](https://huggingface.co/) using their [inference providers](https://huggingface.co/docs/inference-providers/index) such as Cerebras, Nebius, Sambanova, Novita, Hyperbolic, Together AI, Fireworks, Hugging Face - Self-hosted models using [llama.cpp](https://github.com/ggerganov/llama.cpp), [ollama](https://github.com/ollama/ollama), [LM Studio](https://lmstudio.ai/), and [vLLM](https://github.com/vllm-project/vllm) - [Groq](https://groq.com/) diff --git a/app/MindWork AI Studio/Pages/Home.razor.cs b/app/MindWork AI Studio/Pages/Home.razor.cs index bdd46e06..7facd6e3 100644 --- a/app/MindWork AI Studio/Pages/Home.razor.cs +++ b/app/MindWork AI Studio/Pages/Home.razor.cs @@ -31,7 +31,7 @@ public partial class Home : MSGComponentBase { this.itemsAdvantages = [ new(this.T("Free of charge"), this.T("The app is free to use, both for personal and commercial purposes.")), - new(this.T("Independence"), this.T("You are not tied to any single provider. Instead, you might choose the provider that best suits your needs. Right now, we support OpenAI (GPT5, o1, etc.), Perplexity, Mistral, Anthropic (Claude), Google Gemini, xAI (Grok), DeepSeek, Alibaba Cloud (Qwen), Hugging Face, and self-hosted models using vLLM, llama.cpp, ollama, LM Studio, Groq, or Fireworks. For scientists and employees of research institutions, we also support Helmholtz and GWDG AI services. These are available through federated logins like eduGAIN to all 18 Helmholtz Centers, the Max Planck Society, most German, and many international universities.")), + new(this.T("Independence"), this.T("You are not tied to any single provider. Instead, you might choose the provider that best suits your needs. Right now, we support OpenAI (GPT5, o1, etc.), Perplexity, Mistral, Anthropic (Claude), Google Gemini, xAI (Grok), DeepSeek, Alibaba Cloud (Qwen), OpenRouter, Hugging Face, and self-hosted models using vLLM, llama.cpp, ollama, LM Studio, Groq, or Fireworks. For scientists and employees of research institutions, we also support Helmholtz and GWDG AI services. These are available through federated logins like eduGAIN to all 18 Helmholtz Centers, the Max Planck Society, most German, and many international universities.")), new(this.T("Assistants"), this.T("You just want to quickly translate a text? AI Studio has so-called assistants for such and other tasks. No prompting is necessary when working with these assistants.")), new(this.T("Unrestricted usage"), this.T("Unlike services like ChatGPT, which impose limits after intensive use, MindWork AI Studio offers unlimited usage through the providers API.")), new(this.T("Cost-effective"), this.T("You only pay for what you use, which can be cheaper than monthly subscription services like ChatGPT Plus, especially if used infrequently. But beware, here be dragons: For extremely intensive usage, the API costs can be significantly higher. Unfortunately, providers currently do not offer a way to display current costs in the app. Therefore, check your account with the respective provider to see how your costs are developing. When available, use prepaid and set a cost limit.")),