<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>Llm-Agents on Synaptech Blog</title>
    <link>https://9d274f8c.synaptech-blog.pages.dev/tags/llm-agents/</link>
    <description>Recent content in Llm-Agents on Synaptech Blog</description>
    <generator>Hugo</generator>
    <language>en</language>
    <managingEditor>g.martinez@synaptech-solutions.com (German Martinez)</managingEditor>
    <webMaster>g.martinez@synaptech-solutions.com (German Martinez)</webMaster>
    <lastBuildDate>Fri, 09 Jan 2026 00:00:00 +0000</lastBuildDate>
    <atom:link href="https://9d274f8c.synaptech-blog.pages.dev/tags/llm-agents/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>Graph Memory for Good</title>
      <link>https://9d274f8c.synaptech-blog.pages.dev/post/graph-memory-for-good/</link>
      <pubDate>Fri, 09 Jan 2026 00:00:00 +0000</pubDate><author>g.martinez@synaptech-solutions.com (German Martinez)</author>
      <guid>https://9d274f8c.synaptech-blog.pages.dev/post/graph-memory-for-good/</guid>
      <description>&lt;h1 id=&#34;1-introduction&#34;&gt;1. Introduction&lt;/h1&gt;&#xA;&lt;p&gt;AI agents powered by large language models (LLMs) hold great promise for revolutionizing how knowledge workers do their job, but often fall short in one crucial area: memory. They may answer accurately in the moment, only to &amp;ldquo;forget&amp;rdquo; vital context in the next interaction. Retrieval-Augmented Generation (RAG) emerged as a popular workaround—pairing LLMs with external knowledge stored in vector databases—but standard RAG pipelines frequently struggle with accuracy and continuity.&lt;/p&gt;</description>
    </item>
  </channel>
</rss>
