Optimiser la gestion des procédures internes grâce à l’automatisation

				
					{
  "id": "HnqGW0eq5asKfZxf",
  "meta": {
    "instanceId": "03907a25f048377a8789a4332f28148522ba31ee907fababf704f1d88130b1b6",
    "templateCredsSetupCompleted": true
  },
  "name": "ud83dudd0dud83dudee0ufe0fPerplexity Researcher to HTML Web Page",
  "tags": [],
  "nodes": [
    {
      "id": "ad5d96c6-941a-4ab3-b349-10bae99e5988",
      "name": "Sticky Note",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        320,
        1360
      ],
      "parameters": {
        "color": 3,
        "width": 625.851492623043,
        "height": 465.2493344282225,
        "content": "## Create Article from Perplexity Research"
      },
      "typeVersion": 1
    },
    {
      "id": "19b3ca66-5fd2-4d04-b25a-a17fb38642f8",
      "name": "Sticky Note1",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        1240,
        1360
      ],
      "parameters": {
        "color": 4,
        "width": 479.02028317328745,
        "height": 464.14912719677955,
        "content": "## Convert Article into HTML"
      },
      "typeVersion": 1
    },
    {
      "id": "7fad54e8-5a50-42da-b38d-08f6912615ab",
      "name": "gpt-4o-mini",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
      "position": [
        1380,
        1660
      ],
      "parameters": {
        "model": "gpt-4o-mini-2024-07-18",
        "options": {
          "responseFormat": "text"
        }
      },
      "credentials": {
        "openAiApi": {
          "id": "h597GY4ZJQD47RQd",
          "name": "OpenAi account"
        }
      },
      "typeVersion": 1
    },
    {
      "id": "5291869f-3ac6-4ce2-88f3-b572924b6082",
      "name": "gpt-4o-mini1",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
      "position": [
        1560,
        1040
      ],
      "parameters": {
        "options": {
          "topP": 1,
          "timeout": 60000,
          "maxTokens": -1,
          "maxRetries": 2,
          "temperature": 0,
          "responseFormat": "text",
          "presencePenalty": 0,
          "frequencyPenalty": 0
        }
      },
      "credentials": {
        "openAiApi": {
          "id": "h597GY4ZJQD47RQd",
          "name": "OpenAi account"
        }
      },
      "typeVersion": 1
    },
    {
      "id": "a232f6ca-ad4c-40fa-a641-f0dd83c8f18a",
      "name": "Structured Output Parser1",
      "type": "@n8n/n8n-nodes-langchain.outputParserStructured",
      "position": [
        640,
        1660
      ],
      "parameters": {
        "schemaType": "manual",
        "inputSchema": "{n  "type": "object",n  "properties": {n    "article": {n      "type": "object",n      "required": ["category", "title", "metadata", "content", "hashtags"],n      "properties": {n        "category": {n          "type": "string",n          "description": "Article category"n        },n        "title": {n          "type": "string",n          "description": "Article title"n        },n        "metadata": {n          "type": "object",n          "properties": {n            "timePosted": {n              "type": "string",n              "description": "Time since article was posted"n            },n            "author": {n              "type": "string",n              "description": "Article author name"n            },n            "tag": {n              "type": "string",n              "description": "Article primary tag"n            }n          },n          "required": ["timePosted", "author", "tag"]n        },n        "content": {n          "type": "object",n          "properties": {n            "mainText": {n              "type": "string",n              "description": "Main article content"n            },n            "sections": {n              "type": "array",n              "items": {n                "type": "object",n                "properties": {n                  "title": {n                    "type": "string",n                    "description": "Section title"n                  },n                  "text": {n                    "type": "string",n                    "description": "Section content"n                  },n                  "quote": {n                    "type": "string",n                    "description": "Blockquote text"n                  }n                },n                "required": ["title", "text", "quote"]n              }n            }n          },n          "required": ["mainText", "sections"]n        },n        "hashtags": {n          "type": "array",n          "items": {n            "type": "string"n          },n          "description": "Article hashtags"n        }n      }n    }n  }n}"
      },
      "typeVersion": 1.2
    },
    {
      "id": "e7d1adac-88aa-4f76-92bf-bbac3aa6386a",
      "name": "gpt-4o-mini2",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
      "position": [
        420,
        1660
      ],
      "parameters": {
        "options": {
          "topP": 1,
          "timeout": 60000,
          "maxTokens": -1,
          "maxRetries": 2,
          "temperature": 0,
          "responseFormat": "json_object",
          "presencePenalty": 0,
          "frequencyPenalty": 0
        }
      },
      "credentials": {
        "openAiApi": {
          "id": "h597GY4ZJQD47RQd",
          "name": "OpenAi account"
        }
      },
      "typeVersion": 1
    },
    {
      "id": "156e51db-03f7-4099-afe8-6f0361c5b497",
      "name": "Webhook",
      "type": "n8n-nodes-base.webhook",
      "position": [
        160,
        860
      ],
      "webhookId": "6a8e3ae7-02ae-4663-a27a-07df448550ab",
      "parameters": {
        "path": "pblog",
        "options": {},
        "responseMode": "responseNode"
      },
      "typeVersion": 2
    },
    {
      "id": "6dd3eba7-e779-4e4a-960e-c5a7b6b3a929",
      "name": "Respond to Webhook",
      "type": "n8n-nodes-base.respondToWebhook",
      "position": [
        2820,
        1480
      ],
      "parameters": {
        "options": {},
        "respondWith": "text",
        "responseBody": "={{ $json.text }}"
      },
      "typeVersion": 1.1
    },
    {
      "id": "27ee681e-4259-4323-b4fe-629f99cb33d0",
      "name": "Telegram",
      "type": "n8n-nodes-base.telegram",
      "position": [
        2320,
        880
      ],
      "parameters": {
        "text": "={{ $('Perplexity Topic Agent').item.json.output.slice(0, 300) }}",
        "chatId": "={{ $json.telegram_chat_id }}",
        "additionalFields": {
          "parse_mode": "HTML",
          "appendAttribution": false
        }
      },
      "credentials": {
        "telegramApi": {
          "id": "BIE64nzfpGeesXUn",
          "name": "Telegram account"
        }
      },
      "typeVersion": 1.2
    },
    {
      "id": "f437d40c-2bf6-43e2-b77b-e5c2cdc35055",
      "name": "gpt-4o-mini5",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
      "position": [
        2480,
        1660
      ],
      "parameters": {
        "options": {
          "topP": 1,
          "timeout": 60000,
          "maxTokens": -1,
          "maxRetries": 2,
          "temperature": 0,
          "responseFormat": "text",
          "presencePenalty": 0,
          "frequencyPenalty": 0
        }
      },
      "credentials": {
        "openAiApi": {
          "id": "h597GY4ZJQD47RQd",
          "name": "OpenAi account"
        }
      },
      "typeVersion": 1
    },
    {
      "id": "275bce4a-4252-41d4-bcba-174f0c51bf4a",
      "name": "Basic LLM Chain",
      "type": "@n8n/n8n-nodes-langchain.chainLlm",
      "position": [
        2340,
        1480
      ],
      "parameters": {
        "text": "=Create a modern, responsive single-line HTML document. Convert any markdown to Tailwind CSS classes. Replace markdown lists with proper HTML list elements. Remove all newline characters while preserving </br> tags in content. Enhance the layout with Tailwind CSS cards where appropriate. Use the following base structure, but improve the styling and responsiveness:nnnnnn    n    n    <title>Comprehensive Overview of DeepSeek V3</title>n    nnnn    <div class="relative p-4">n        <div class="max-w-3xl mx-auto text-sm">n            <div class="mt-3 bg-white rounded-lg shadow-lg flex flex-col justify-between leading-normal">n                <div class="p-6">n                    <h1 class="text-gray-900 font-bold text-4xl mb-4">Comprehensive Overview of DeepSeek V3</h1>n                    <div class="mb-4">n                        <p class="leading-8"><strong>Time Posted:</strong> Just now</p>n                        <p class="leading-8"><strong>Author:</strong> AI Research Team</p>n                        <p class="leading-8"><strong>Tag:</strong> AI Models</p>n                    </div>n                    <p class="leading-8 my-4"><strong>DeepSeek V3</strong> is a state-of-the-art AI model that leveragesn                        advanced architectures and techniques to deliver high performance across various applications.n                        This overview covers its key concepts, practical applications, advantages, limitations, and bestn                        practices for implementation.</p>n                    <section class="mb-6">n                        <h2 class="text-2xl font-bold my-3">Key Concepts and Core Components</h2>n                        <p class="leading-8 my-3"><strong>1. Mixture-of-Experts (MoE) Architecture:</strong> DeepSeek V3n                            employs a Mixture-of-Experts (MoE) architecture, which consists of multiple neural networks,n                            each optimized for different tasks. This architecture allows for efficient processing byn                            activating only a portion of the network for each task, reducing hardware costs.</p>n                        <p class="leading-8 my-3"><strong>2. Parameters:</strong> The model boasts a total of 671n                            billionn                            parameters, with 37 billion active parameters for each token during processing. The additionn                            ofn                            the Multi-Token Prediction (MTP) module increases the total parameters to 685 billion,n                            making itn                            significantly larger than other models like Meta's Llama 3.1 (405B).</p>n                        <p class="leading-8 my-3"><strong>3. Multi-head Latent Attention (MLA):</strong> DeepSeek V3n                            usesn                            Multi-head Latent Attention (MLA) to extract key details from text multiple times, improvingn                            itsn                            accuracy.</p>n                        <p class="leading-8 my-3"><strong>4. Multi-Token Prediction (MTP):</strong> The model utilizesn                            Multi-Token Prediction to generate several tokens at once, speeding up inference andn                            enablingn                            speculative decoding.</p>n                        n                            DeepSeek V3 employs a Mixture-of-Experts architecture for efficient processing.</blockquote>n                    </section>n                    <section class="mb-6">n                        <h2 class="text-2xl font-bold my-3">Practical Applications</h2>n                        <ol class="list-decimal pl-5">n                            <li class="leading-8 my-3"><strong>Translation, Coding, and Content Generation:</strong>n                                DeepSeek V3 is designed for a wide range of tasks including translation, coding, contentn                                generation, and reasoning. It excels in English, Chinese, coding, and mathematics,n                                rivaling leading commercial models like OpenAI's GPT-4.</li>n                            <li class="leading-8 my-3"><strong>Research and Development:</strong> The open-source naturen                                of DeepSeek V3 fuels innovation, allowing researchers to experiment with and build uponn                                its technology.</li>n                            <li class="leading-8 my-3"><strong>Commercial Applications:</strong> The licensing ofn                                DeepSeek V3 makes it permissible for commercial use, opening it up to numerousn                                applications across different industries.</li>n                            <li class="leading-8 my-3"><strong>Democratization of AI:</strong> By making powerful AIn                                accessible, DeepSeek V3 levels the playing field, allowing smaller organizations ton                                compete with larger ones.</li>n                        </ol>n                        n                            DeepSeek V3 democratizes AI access for smaller organizations.</blockquote>n                    </section>n                    <section class="mb-6">n                        <h2 class="text-2xl font-bold my-3">Advantages</h2>n                        <ol class="list-decimal pl-5">n                            <li class="leading-8 my-3"><strong>Speed and Efficiency:</strong> DeepSeek V3 processesn                                information at a blistering 60 tokens per second, a threefold increase over itsn                                predecessor. It uses advanced inference capabilities, deploying 32 H800 GPUs for prefilln                                and 320 H800 GPUs for decoding.</li>n                            <li class="leading-8 my-3"><strong>Cost-Effectiveness:</strong> The model was trained for an                                mere $5.5 million, a fraction of the estimated over $100 million invested by OpenAI inn                                GPT-4. DeepSeek V3 offers significantly lower prices for its online services, with 1n                                million tokens priced at just $1.1, currently offered at a promotional rate of $0.28.n                            </li>n                            <li class="leading-8 my-3"><strong>Innovation in Inference:</strong> The model's advancedn                                inference capabilities set the standard for future model deployment, making it an                                powerful tool in the digital realm.</li>n                        </ol>n                        n                            DeepSeek V3 processes information at 60 tokens per second.</blockquote>n                    </section>n                    <section class="mb-6">n                        <h2 class="text-2xl font-bold my-3">Limitations</h2>n                        <ol class="list-decimal pl-5">n                            <li class="leading-8 my-3"><strong>Deployment Complexity:</strong> Deploying DeepSeek V3n                                requires advanced hardware and a deployment strategy that separates the prefilling andn                                decoding stages, which might be unachievable for small companies due to a lack ofn                                resources. The recommended deployment unit for DeepSeek V3 is relatively large, posing an                                burden for small-sized teams.</li>n                            <li class="leading-8 my-3"><strong>Potential for Further Enhancement:</strong> Althoughn                                DeepSeek V3 has achieved an end-to-end generation speed of more than two times that ofn                                DeepSeek V2, there still remains potential for further enhancement with the developmentn                                of more advanced hardware.</li>n                        </ol>n                        n                            Deployment of DeepSeek V3 may be complex for small companies.</blockquote>n                    </section>n                    <section class="mb-6">n                        <h2 class="text-2xl font-bold my-3">Best Practices for Implementation</h2>n                        <ol class="list-decimal pl-5">n                            <li class="leading-8 my-3"><strong>Hardware Requirements:</strong> Ensure that then                                deployment environment has the necessary advanced hardware to handle the model'sn                                requirements, including multiple GPUs for prefill and decoding.</li>n                            <li class="leading-8 my-3"><strong>Deployment Strategy:</strong> Implement a deploymentn                                strategy that separates the prefilling and decoding stages to optimize performance andn                                efficiency.</li>n                            <li class="leading-8 my-3"><strong>Monitoring and Optimization:</strong> Continuouslyn                                monitor the model's performance and optimize it as needed to address any limitations andn                                improve efficiency.</li>n                            <li class="leading-8 my-3"><strong>Community Engagement:</strong> Engage with then                                open-source community to leverage the collective knowledge and resources available,n                                which can help in addressing any challenges and improving the model further.</li>n                        </ol>n                        n                            Engage with the open-source community for better implementation.</blockquote>n                    </section>n                    <p class="leading-8 my-6"><strong>Hashtags:</strong> #DeepSeekV3</span> <span class="text-indigo-600">#AI</span> #MachineLearning</span> #OpenSource</span></p>n                </div>n            </div>n        </div>n    </div>nnnnn-------nnRequirements:n- Output must be a single line of HTMLn- Enhanced with modern Tailwind CSS stylingn- Proper HTML list structuresn- Responsive designn- No newlines except </br> in contentn- No markdown formattingn- Clean, readable layoutn- Properly formatted hashtagsn- No explanation or additional text in outputn- No code block markers or escape charactersn- Wnsure Metadata, Title and Content are included in HTMLnnMetadata: {{ $('Article').item.json.article.metadata.toJsonString() }}nTitle: {{ $json.title }}nContent: {{ $json.html }}n",
        "promptType": "define"
      },
      "typeVersion": 1.4
    },
    {
      "id": "cddd9324-8471-4dcb-a46b-836015db9833",
      "name": "Do Nothing1",
      "type": "n8n-nodes-base.noOp",
      "position": [
        560,
        1080
      ],
      "parameters": {},
      "typeVersion": 1
    },
    {
      "id": "432a0ae9-451a-4830-b065-8b0593de92ea",
      "name": "gpt-4o-mini3",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
      "position": [
        1020,
        1040
      ],
      "parameters": {
        "options": {
          "topP": 1,
          "timeout": 60000,
          "maxTokens": -1,
          "maxRetries": 2,
          "temperature": 0,
          "responseFormat": "text",
          "presencePenalty": 0,
          "frequencyPenalty": 0
        }
      },
      "credentials": {
        "openAiApi": {
          "id": "h597GY4ZJQD47RQd",
          "name": "OpenAi account"
        }
      },
      "typeVersion": 1
    },
    {
      "id": "55e00886-b6c1-4f7a-81ae-e8e0d4102cab",
      "name": "Sticky Note4",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        2200,
        1360
      ],
      "parameters": {
        "color": 6,
        "width": 531,
        "height": 465,
        "content": "## Create HTML Page with TailwindCSS Styling"
      },
      "typeVersion": 1
    },
    {
      "id": "1ed7f754-1279-4511-a085-6ed4e4c36de1",
      "name": "Sticky Note2",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        320,
        760
      ],
      "parameters": {
        "width": 450.54438902818094,
        "height": 489.5271576259337,
        "content": "## Parse Topic from Get Request"
      },
      "typeVersion": 1
    },
    {
      "id": "e9dcb568-7f8d-40c5-94cb-6f25386436cf",
      "name": "Sticky Note5",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        820,
        760
      ],
      "parameters": {
        "color": 5,
        "width": 380,
        "height": 488,
        "content": "## Improve the Users Topic"
      },
      "typeVersion": 1
    },
    {
      "id": "a7fdaddb-d6fc-4d45-85cc-a372cfb90327",
      "name": "If2",
      "type": "n8n-nodes-base.if",
      "position": [
        2120,
        1140
      ],
      "parameters": {
        "options": {},
        "conditions": {
          "options": {
            "version": 2,
            "leftValue": "",
            "caseSensitive": true,
            "typeValidation": "strict"
          },
          "combinator": "and",
          "conditions": [
            {
              "id": "8e35de0a-ac16-4555-94f4-24e97bdf4b33",
              "operator": {
                "type": "string",
                "operation": "notEmpty",
                "singleValue": true
              },
              "leftValue": "{{ $json.output }}",
              "rightValue": ""
            }
          ]
        }
      },
      "typeVersion": 2.2
    },
    {
      "id": "57d056b8-7e91-41e4-8b74-dce15847a09b",
      "name": "Prompts",
      "type": "n8n-nodes-base.set",
      "position": [
        1300,
        2080
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "efbe7563-8502-407e-bfa0-a4a26d8cddd4",
              "name": "user",
              "type": "string",
              "value": "={{ $('Execute Workflow Trigger').item.json.topic }}"
            },
            {
              "id": "05e0b629-bb9f-4010-96a8-10872764705a",
              "name": "system",
              "type": "string",
              "value": "Assistant is a large language model.  Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.  Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.  Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.  "
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "8209cece-fde4-485f-81a1-2d24a6eac474",
      "name": "Execute Workflow Trigger",
      "type": "n8n-nodes-base.executeWorkflowTrigger",
      "position": [
        420,
        2180
      ],
      "parameters": {},
      "typeVersion": 1
    },
    {
      "id": "445e4d15-c2b0-4152-a0f8-d6b93ad5bae6",
      "name": "Telegram2",
      "type": "n8n-nodes-base.telegram",
      "position": [
        860,
        2180
      ],
      "parameters": {
        "text": "=<i>{{ $('Execute Workflow Trigger').item.json.topic }}</i>",
        "chatId": "={{ $json.telegram_chat_id }}",
        "additionalFields": {
          "parse_mode": "HTML",
          "appendAttribution": false
        }
      },
      "credentials": {
        "telegramApi": {
          "id": "BIE64nzfpGeesXUn",
          "name": "Telegram account"
        }
      },
      "typeVersion": 1.2
    },
    {
      "id": "57a5b3ce-5490-4d50-91cc-c36e508eee4d",
      "name": "If",
      "type": "n8n-nodes-base.if",
      "position": [
        1080,
        2180
      ],
      "parameters": {
        "options": {},
        "conditions": {
          "options": {
            "version": 2,
            "leftValue": "",
            "caseSensitive": true,
            "typeValidation": "strict"
          },
          "combinator": "and",
          "conditions": [
            {
              "id": "7e2679dc-c898-415d-a693-c2c1e7259b6a",
              "operator": {
                "type": "string",
                "operation": "notContains"
              },
              "leftValue": "={{ $('Execute Workflow Trigger').item.json.topic }}",
              "rightValue": "undefined"
            }
          ]
        }
      },
      "typeVersion": 2.2
    },
    {
      "id": "fdf827dc-96b1-4ed3-895b-2a0f5f4c41a3",
      "name": "No Operation, do nothing",
      "type": "n8n-nodes-base.noOp",
      "position": [
        1300,
        2300
      ],
      "parameters": {},
      "typeVersion": 1
    },
    {
      "id": "944aa564-f449-47a6-9d9c-c20a48946ab6",
      "name": "Sticky Note6",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        320,
        1940
      ],
      "parameters": {
        "color": 5,
        "width": 1614,
        "height": 623,
        "content": "## ud83dudee0ufe0fperplexity_research_toolnn"
      },
      "typeVersion": 1
    },
    {
      "id": "3806c079-8c08-48b7-a3ed-a26f6d86c67f",
      "name": "Perplexity Topic Agent",
      "type": "@n8n/n8n-nodes-langchain.agent",
      "position": [
        1580,
        860
      ],
      "parameters": {
        "text": "=Topic: {{ $json.text }}",
        "options": {
          "systemMessage": "Use the perplexity_research_tool to provide research on the users topic.nn"
        },
        "promptType": "define",
        "hasOutputParser": true
      },
      "typeVersion": 1.6
    },
    {
      "id": "cfc55dbb-78e6-47ef-bf55-810311bd37e8",
      "name": "Call Perplexity Researcher",
      "type": "@n8n/n8n-nodes-langchain.toolWorkflow",
      "position": [
        1780,
        1040
      ],
      "parameters": {
        "name": "perplexity_research_tool",
        "fields": {
          "values": [
            {
              "name": "topic",
              "stringValue": "= {{ $json.text }}"
            }
          ]
        },
        "workflowId": {
          "__rl": true,
          "mode": "id",
          "value": "HnqGW0eq5asKfZxf"
        },
        "description": "Call this tool to perform Perplexity research.",
        "jsonSchemaExample": "{n  "topic": ""n}"
      },
      "typeVersion": 1.2
    },
    {
      "id": "5ca35a40-506d-4768-a65c-a331718040bc",
      "name": "Do Nothing",
      "type": "n8n-nodes-base.noOp",
      "position": [
        2320,
        1140
      ],
      "parameters": {},
      "typeVersion": 1
    },
    {
      "id": "17028837-4706-43f3-8291-f150860caa4c",
      "name": "Do Nothing2",
      "type": "n8n-nodes-base.noOp",
      "position": [
        1020,
        1700
      ],
      "parameters": {},
      "typeVersion": 1
    },
    {
      "id": "adebf1ad-62d9-4b79-b9a1-4a9395067803",
      "name": "Do Nothing3",
      "type": "n8n-nodes-base.noOp",
      "position": [
        2000,
        1700
      ],
      "parameters": {},
      "typeVersion": 1
    },
    {
      "id": "fe19e472-3b2b-4c07-b957-fb2afc426998",
      "name": "Do Nothing4",
      "type": "n8n-nodes-base.noOp",
      "position": [
        1260,
        1080
      ],
      "parameters": {},
      "typeVersion": 1
    },
    {
      "id": "41e23462-a7fa-42a8-adbc-83a662f63f0c",
      "name": "Sticky Note7",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        1460,
        760
      ],
      "parameters": {
        "color": 3,
        "width": 480,
        "height": 488,
        "content": "## ud83eudd16Perform Perplexity Research"
      },
      "typeVersion": 1
    },
    {
      "id": "dcc3bd83-1f8c-4000-a832-c2c6e7c157ba",
      "name": "Get Topic",
      "type": "n8n-nodes-base.set",
      "position": [
        380,
        860
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "57f0eab2-ef1b-408c-82d5-a8c54c4084a6",
              "name": "topic",
              "type": "string",
              "value": "={{ $json.query.topic }}"
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "5572e5b1-0b4c-4e6d-b413-5592aab59571",
      "name": "If Topic Exists",
      "type": "n8n-nodes-base.if",
      "position": [
        560,
        860
      ],
      "parameters": {
        "options": {},
        "conditions": {
          "options": {
            "version": 2,
            "leftValue": "",
            "caseSensitive": true,
            "typeValidation": "strict"
          },
          "combinator": "and",
          "conditions": [
            {
              "id": "2c565aa5-0d11-47fb-8621-6db592579fa8",
              "operator": {
                "type": "string",
                "operation": "notEmpty",
                "singleValue": true
              },
              "leftValue": "={{ $json.topic }}",
              "rightValue": ""
            }
          ]
        }
      },
      "typeVersion": 2.2
    },
    {
      "id": "509ee61f-defb-41e8-84cf-70ac5a7448d0",
      "name": "Improve Users Topic",
      "type": "@n8n/n8n-nodes-langchain.chainLlm",
      "position": [
        880,
        860
      ],
      "parameters": {
        "text": "=How would you improve the following prompt as of {{ $now }}, focusing on:nn1. Key Concepts &amp; Definitionsn   - Main terminology and foundational conceptsn   - Technical background and contextnn2. Core Componentsn   - Essential elements and their relationshipsn   - Critical processes and workflowsnn3. Practical Applicationsn   - Real-world use casesn   - Implementation considerationsnn4. Analysis &amp; Insightsn   - Advantages and limitationsn   - Best practices and recommendationsnnThe final output should be a maximum 2 sentence pure text prompt without any preamble or further explanation.  The final output will be providced to Perplexity as a research prompt.nnPrompt to analyze: {{ $json.topic }}",
        "promptType": "define"
      },
      "typeVersion": 1.4
    },
    {
      "id": "69ee4c6a-f6ef-47a2-bd5c-ccaf49ec7c94",
      "name": "If Topic",
      "type": "n8n-nodes-base.if",
      "position": [
        1260,
        860
      ],
      "parameters": {
        "options": {},
        "conditions": {
          "options": {
            "version": 2,
            "leftValue": "",
            "caseSensitive": true,
            "typeValidation": "strict"
          },
          "combinator": "and",
          "conditions": [
            {
              "id": "329653d4-330f-4b41-96e7-4652c1448902",
              "operator": {
                "type": "string",
                "operation": "notEmpty",
                "singleValue": true
              },
              "leftValue": "={{ $json.text }}",
              "rightValue": ""
            }
          ]
        }
      },
      "typeVersion": 2.2
    },
    {
      "id": "daa3027b-774d-44b1-b0a5-27008768c65d",
      "name": "Chat Id",
      "type": "n8n-nodes-base.set",
      "position": [
        2120,
        880
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "0aa8fcc9-26f4-485c-8fc1-a5c13d0dd279",
              "name": "telegram_chat_id",
              "type": "number",
              "value": 1234567890
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "97f32ad1-f91e-4ccc-8248-d10da823b26a",
      "name": "Article",
      "type": "n8n-nodes-base.set",
      "position": [
        780,
        1480
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "0eb5952b-c133-4b63-8102-d4b8ec7b9b5a",
              "name": "article",
              "type": "object",
              "value": "={{ $json.output.article }}"
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "e223dee3-c79f-421d-b2b8-2f3551a45f71",
      "name": "Extract JSON",
      "type": "@n8n/n8n-nodes-langchain.agent",
      "position": [
        440,
        1480
      ],
      "parameters": {
        "text": "=Extract a JSON object from this content: {{ $json.output }}",
        "options": {},
        "promptType": "define",
        "hasOutputParser": true
      },
      "retryOnFail": true,
      "typeVersion": 1.6
    },
    {
      "id": "de8aafb6-b05d-4278-8719-9b3c266fcf3a",
      "name": "If Article",
      "type": "n8n-nodes-base.if",
      "position": [
        1020,
        1480
      ],
      "parameters": {
        "options": {},
        "conditions": {
          "options": {
            "version": 2,
            "leftValue": "",
            "caseSensitive": true,
            "typeValidation": "strict"
          },
          "combinator": "and",
          "conditions": [
            {
              "id": "329653d4-330f-4b41-96e7-4652c1448902",
              "operator": {
                "type": "string",
                "operation": "notEmpty",
                "singleValue": true
              },
              "leftValue": "{{ $json.article }}",
              "rightValue": ""
            }
          ]
        }
      },
      "typeVersion": 2.2
    },
    {
      "id": "f9450b58-3b81-4b61-8cbf-2cdf5a2f56a0",
      "name": "Create HTML Article",
      "type": "@n8n/n8n-nodes-langchain.agent",
      "position": [
        1360,
        1480
      ],
      "parameters": {
        "text": "=Convert this verbatim into HTML: {{ $json.article.toJsonString() }}nn## Formatting Guidelinesn- HTML document must be single line document without tabs or line breaksn- Use proper HTML tags throughoutn- Do not use these tags:     n- Use <h1> tag for main titlen- Use <h2> tags for secondary titlesn- Structure with <p> tags for paragraphsn- Include appropriate spacingn- Use <blockquote> for direct quotesn- Maintain consistent formattingn- Write in clear, professional tonen- Break up long paragraphsn- Use engaging subheadingsn- Include transitional phrasesnnThe final JSON response should contain only the title and content fields, with the content including all HTML formatting.n{nt"title": "the title",nt"content": "the HTML"n}",
        "agent": "conversationalAgent",
        "options": {},
        "promptType": "define"
      },
      "retryOnFail": true,
      "typeVersion": 1.6
    },
    {
      "id": "53cbaa6e-6508-48e3-9a5a-58f5bc111c2d",
      "name": "If HTML",
      "type": "n8n-nodes-base.if",
      "position": [
        1780,
        1480
      ],
      "parameters": {
        "options": {},
        "conditions": {
          "options": {
            "version": 2,
            "leftValue": "",
            "caseSensitive": true,
            "typeValidation": "strict"
          },
          "combinator": "and",
          "conditions": [
            {
              "id": "329653d4-330f-4b41-96e7-4652c1448902",
              "operator": {
                "type": "string",
                "operation": "notEmpty",
                "singleValue": true
              },
              "leftValue": "={{ $json.output.parseJson().title }}",
              "rightValue": ""
            },
            {
              "id": "0a05f73a-2901-4157-8194-cb81d259ce71",
              "operator": {
                "type": "string",
                "operation": "notEmpty",
                "singleValue": true
              },
              "leftValue": "={{ $json.output.parseJson().content }}",
              "rightValue": ""
            },
            {
              "id": "b61c1d25-a010-42d3-9f9d-fa927c483bae",
              "operator": {
                "name": "filter.operator.equals",
                "type": "string",
                "operation": "equals"
              },
              "leftValue": "",
              "rightValue": ""
            }
          ]
        }
      },
      "typeVersion": 2.2
    },
    {
      "id": "33e4e2cd-be0c-4fc9-b705-b0e8aac496f9",
      "name": "Contents",
      "type": "n8n-nodes-base.set",
      "position": [
        2000,
        1480
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "af335333-acb8-4c9e-8184-d20cd03e08f6",
              "name": "title",
              "type": "string",
              "value": "={{ $json.output.parseJson().title }}"
            },
            {
              "id": "7fbd2264-c0e1-4bdc-b754-b0faa538879c",
              "name": "content",
              "type": "string",
              "value": "={{ $json.output.parseJson().content }}"
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "8bf36853-8a04-4a0b-8715-e03a8fc8359d",
      "name": "Chat Id1",
      "type": "n8n-nodes-base.set",
      "position": [
        660,
        2180
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "0aa8fcc9-26f4-485c-8fc1-a5c13d0dd279",
              "name": "telegram_chat_id",
              "type": "number",
              "value": 1234567890
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "a3fe75d1-8db0-45cb-87f6-76fc27cb59f6",
      "name": "Sticky Note3",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        600,
        2080
      ],
      "parameters": {
        "width": 420,
        "height": 340,
        "content": "## Optional"
      },
      "typeVersion": 1
    },
    {
      "id": "22e9edbc-7aa6-4549-ae9f-2c31ad7d0542",
      "name": "Sticky Note8",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        2060,
        760
      ],
      "parameters": {
        "width": 420,
        "height": 340,
        "content": "## Optional"
      },
      "typeVersion": 1
    },
    {
      "id": "e62ff7d5-bd54-434c-b048-0dc7cd2c7f9b",
      "name": "Success Response",
      "type": "n8n-nodes-base.set",
      "position": [
        1700,
        2080
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "eb89464a-5919-4962-880c-3f5903e267de",
              "name": "response",
              "type": "string",
              "value": "={{ $('Perplexity').item.json.choices[0].message.content }}"
            }
          ]
        },
        "includeOtherFields": true
      },
      "typeVersion": 3.4
    },
    {
      "id": "c6ba0613-47c6-442f-99e8-0eaec8cacc20",
      "name": "Error Response",
      "type": "n8n-nodes-base.set",
      "position": [
        1700,
        2300
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "eb89464a-5919-4962-880c-3f5903e267de",
              "name": "response",
              "type": "string",
              "value": "=Error.  No topic provided."
            }
          ]
        },
        "includeOtherFields": true
      },
      "typeVersion": 3.4
    },
    {
      "id": "30d8065c-55d8-4099-abb2-ddb01635129d",
      "name": "Perplexity",
      "type": "n8n-nodes-base.httpRequest",
      "position": [
        1500,
        2080
      ],
      "parameters": {
        "url": "https://api.perplexity.ai/chat/completions",
        "method": "POST",
        "options": {},
        "jsonBody": "={n  "model": "llama-3.1-sonar-small-128k-online",n  "messages": [n    {n      "role": "system",n      "content": "{{ $json.system }}"n    },n    {n      "role": "user",n      "content": "{{ $json.user }}"n    }n  ],n  "max_tokens": "4000",n  "temperature": 0.2,n  "top_p": 0.9,n  "return_citations": true,n  "search_domain_filter": [n    "perplexity.ai"n  ],n  "return_images": false,n  "return_related_questions": false,n  "search_recency_filter": "month",n  "top_k": 0,n  "stream": false,n  "presence_penalty": 0,n  "frequency_penalty": 1n}",
        "sendBody": true,
        "specifyBody": "json",
        "authentication": "genericCredentialType",
        "genericAuthType": "httpHeaderAuth"
      },
      "credentials": {
        "httpCustomAuth": {
          "id": "vxjFugFpr4Od6gws",
          "name": "Confluence REST API"
        },
        "httpHeaderAuth": {
          "id": "wokWVLDQUDi0DC7I",
          "name": "Perplexity"
        }
      },
      "typeVersion": 4.2
    }
  ],
  "active": false,
  "pinData": {},
  "settings": {
    "executionOrder": "v1"
  },
  "versionId": "9ebf0569-4d9d-4783-b797-e5df2a8e8415",
  "connections": {
    "If": {
      "main": [
        [
          {
            "node": "Prompts",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "No Operation, do nothing",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "If2": {
      "main": [
        [
          {
            "node": "Extract JSON",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Do Nothing",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Article": {
      "main": [
        [
          {
            "node": "If Article",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Chat Id": {
      "main": [
        [
          {
            "node": "Telegram",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "If HTML": {
      "main": [
        [
          {
            "node": "Contents",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Do Nothing3",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Prompts": {
      "main": [
        [
          {
            "node": "Perplexity",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Webhook": {
      "main": [
        [
          {
            "node": "Get Topic",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Chat Id1": {
      "main": [
        [
          {
            "node": "Telegram2",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Contents": {
      "main": [
        [
          {
            "node": "Basic LLM Chain",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "If Topic": {
      "main": [
        [
          {
            "node": "Perplexity Topic Agent",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Do Nothing4",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Get Topic": {
      "main": [
        [
          {
            "node": "If Topic Exists",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Telegram2": {
      "main": [
        [
          {
            "node": "If",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "If Article": {
      "main": [
        [
          {
            "node": "Create HTML Article",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Do Nothing2",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Perplexity": {
      "main": [
        [
          {
            "node": "Success Response",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "gpt-4o-mini": {
      "ai_languageModel": [
        [
          {
            "node": "Create HTML Article",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "Extract JSON": {
      "main": [
        [
          {
            "node": "Article",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "gpt-4o-mini1": {
      "ai_languageModel": [
        [
          {
            "node": "Perplexity Topic Agent",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "gpt-4o-mini2": {
      "ai_languageModel": [
        [
          {
            "node": "Extract JSON",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "gpt-4o-mini3": {
      "ai_languageModel": [
        [
          {
            "node": "Improve Users Topic",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "gpt-4o-mini5": {
      "ai_languageModel": [
        [
          {
            "node": "Basic LLM Chain",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "Basic LLM Chain": {
      "main": [
        [
          {
            "node": "Respond to Webhook",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "If Topic Exists": {
      "main": [
        [
          {
            "node": "Improve Users Topic",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Do Nothing1",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Create HTML Article": {
      "main": [
        [
          {
            "node": "If HTML",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Improve Users Topic": {
      "main": [
        [
          {
            "node": "If Topic",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Perplexity Topic Agent": {
      "main": [
        [
          {
            "node": "If2",
            "type": "main",
            "index": 0
          },
          {
            "node": "Chat Id",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Execute Workflow Trigger": {
      "main": [
        [
          {
            "node": "Chat Id1",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "No Operation, do nothing": {
      "main": [
        [
          {
            "node": "Error Response",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Structured Output Parser1": {
      "ai_outputParser": [
        [
          {
            "node": "Extract JSON",
            "type": "ai_outputParser",
            "index": 0
          }
        ]
      ]
    },
    "Call Perplexity Researcher": {
      "ai_tool": [
        [
          {
            "node": "Perplexity Topic Agent",
            "type": "ai_tool",
            "index": 0
          }
        ]
      ]
    }
  }
}