Créer une base automatisée de suivi des incidents opérationnels

La gestion des incidents opérationnels est cruciale pour assurer la continuité d’activité et la qualité de service. Une base automatisée permet de déclarer rapidement chaque incident, de l’attribuer à l’équipe compétente et d’en suivre la résolution pas à pas. Les notifications informent tous les acteurs concernés de l’avancement, tandis que les analyses statistiques facilitent l’identification des causes récurrentes et l’optimisation des processus. L’archivage structuré garantit une traçabilité totale pour préparer audits et retours d’expérience. En automatisant ce suivi, l’entreprise limite les temps d’interruption, capitalise sur les retours terrain et améliore sa résilience face aux imprévus.

				
					{
  "id": "h2uiciRa1D3ntSTT",
  "meta": {
    "instanceId": "ddfdf733df99a65c801a91865dba5b7c087c95cc22a459ff3647e6deddf2aee6"
  },
  "name": "My workflow",
  "tags": [],
  "nodes": [
    {
      "id": "4b885b7d-0976-4dd3-bc1c-091ab0dff437",
      "name": "Split Topics into Items",
      "type": "n8n-nodes-base.code",
      "position": [
        420,
        420
      ],
      "parameters": {
        "jsCode": "// Input data (from $json.Topics)nconst topicsString = $json.Topics;nn// Split the string by newlines and trim whitespacenconst topicsArray = topicsString.split('\n').map(topic => topic.trim());nn// Create an array of items for each topicnconst items = topicsArray.map(topic => {n  return { json: { Topic: topic } };n});nn// Output the new array of itemsnreturn items;n"
      },
      "typeVersion": 2
    },
    {
      "id": "935d0266-feda-48cb-b441-b4da19d8b163",
      "name": "Search Posts",
      "type": "n8n-nodes-base.reddit",
      "position": [
        620,
        420
      ],
      "parameters": {
        "keyword": "meta",
        "location": "allReddit",
        "operation": "search",
        "returnAll": true,
        "additionalFields": {
          "sort": "hot"
        }
      },
      "typeVersion": 1
    },
    {
      "id": "cea577c8-c025-4132-926a-74d6946d81b8",
      "name": "Upvotes Requirement Filtering",
      "type": "n8n-nodes-base.if",
      "position": [
        800,
        420
      ],
      "parameters": {
        "options": {},
        "conditions": {
          "options": {
            "version": 2,
            "leftValue": "",
            "caseSensitive": true,
            "typeValidation": "strict"
          },
          "combinator": "and",
          "conditions": [
            {
              "id": "f767f7a8-a2e8-4566-be80-bd735249e069",
              "operator": {
                "type": "number",
                "operation": "gt"
              },
              "leftValue": "={{ $json.ups }}",
              "rightValue": 100
            },
            {
              "id": "3af82bef-5a78-4e6e-91ef-a5bd0141c87f",
              "operator": {
                "name": "filter.operator.equals",
                "type": "string",
                "operation": "equals"
              },
              "leftValue": "={{ $json.post_hint }}",
              "rightValue": "link"
            },
            {
              "id": "980a84ed-d640-47a7-b49a-bf638e811f20",
              "operator": {
                "type": "string",
                "operation": "notContains"
              },
              "leftValue": "={{ $json.url }}",
              "rightValue": "bsky.app"
            }
          ]
        }
      },
      "typeVersion": 2.2
    },
    {
      "id": "eec2d833-9a63-4cf6-a6bd-56b300ede5e0",
      "name": "Set Reddit Posts",
      "type": "n8n-nodes-base.set",
      "position": [
        1040,
        420
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "8d5ae4fa-2f54-48d7-8f61-766f4ecf9d96",
              "name": "Title",
              "type": "string",
              "value": "={{ $json.title }}"
            },
            {
              "id": "8eb33a06-d8e7-4eea-bcd3-f956e20e06e6",
              "name": "Subreddit",
              "type": "string",
              "value": "={{ $json.subreddit }}"
            },
            {
              "id": "5ff8c76e-a8d5-4f76-a7d0-faa69b7960e4",
              "name": "Upvotes",
              "type": "string",
              "value": "={{ $json.ups }}"
            },
            {
              "id": "05a2b453-0e29-4a81-8f10-5934ae721f64",
              "name": "Comments",
              "type": "string",
              "value": "={{ $json.num_comments }}"
            },
            {
              "id": "78f73e89-19a7-4dd5-9db0-ead55dfd5606",
              "name": "Reddit URL",
              "type": "string",
              "value": "=https://www.reddit.com{{ $json.permalink }}"
            },
            {
              "id": "6f92bce7-2dc5-4dfd-b216-efc12c5411bb",
              "name": "URL",
              "type": "string",
              "value": "={{ $json.url }}"
            },
            {
              "id": "0b20d78c-1d6b-4c84-99ef-978ee39fd35e",
              "name": "Is_URL",
              "type": "string",
              "value": "={{ $json.post_hint }}"
            },
            {
              "id": "489807f6-25ef-47d5-bd47-711ca75dedea",
              "name": "Date",
              "type": "string",
              "value": "={{ new Date($json.created * 1000).toISOString().split('T')[0] }}"
            },
            {
              "id": "0a9fb817-bfb7-4ea7-9182-1eddc404035f",
              "name": "Post ID",
              "type": "string",
              "value": "={{ $json.id }}"
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "9b45abb0-866a-47f4-b2b3-03e4cf41c988",
      "name": "Remove Duplicates",
      "type": "n8n-nodes-base.code",
      "position": [
        1220,
        420
      ],
      "parameters": {
        "jsCode": "// Get all input itemsnconst inputItems = $input.all();nn// Create a Map to store the most upvoted item for each URLnconst uniqueItemsMap = new Map();nnfor (const item of inputItems) {n  const url = item.json.URL;n  n  // Skip items where URL contains "redd.it"n  if (url && url.includes("redd.it")) {n    continue;n  }n  n  const upvotes = parseInt(item.json.Upvotes, 10) || 0; // Ensure upvotes is a numbernn  if (!uniqueItemsMap.has(url)) {n    // Add the first occurrence of the URLn    uniqueItemsMap.set(url, item);n  } else {n    // Compare upvotes and keep the item with the most upvotesn    const existingItem = uniqueItemsMap.get(url);n    const existingUpvotes = parseInt(existingItem.json.Upvotes, 10) || 0;n    if (upvotes > existingUpvotes) {n      uniqueItemsMap.set(url, item);n    }n  }n}nn// Extract all unique itemsnconst uniqueItems = Array.from(uniqueItemsMap.values());nn// Return each unique item as a separate outputnreturn uniqueItems;"
      },
      "typeVersion": 2
    },
    {
      "id": "39672fd4-3f8c-4cdb-acd5-bb862ae5eddd",
      "name": "Loop Over Items",
      "type": "n8n-nodes-base.splitInBatches",
      "position": [
        40,
        660
      ],
      "parameters": {
        "options": {}
      },
      "typeVersion": 3
    },
    {
      "id": "ad70aec7-a610-42f8-b87c-0d3dbee00e7b",
      "name": "Get Comments",
      "type": "n8n-nodes-base.reddit",
      "position": [
        480,
        640
      ],
      "parameters": {
        "postId": "={{ $json["Post ID"] }}",
        "resource": "postComment",
        "operation": "getAll",
        "subreddit": "={{ $json.Subreddit }}"
      },
      "typeVersion": 1
    },
    {
      "id": "af7f0b35-4250-49e5-afa7-608155df0fd5",
      "name": "Extract Top Comments",
      "type": "n8n-nodes-base.code",
      "position": [
        660,
        640
      ],
      "parameters": {
        "jsCode": "/**n * n8n Code Node for filtering top 30 Reddit-style comments by score/upsn * and ensuring replies are included in the comment tree.n * Excludes deleted comments.n */nn// Get all input itemsnconst inputItems = $input.all();nconst commentsArray = inputItems.flatMap(item => item.json);nn/**n * Checks if a comment is deleted.n * @param {Object} commentObj - The comment to check.n * @returns {boolean} - True if the comment is deleted, false otherwise.n */nfunction isDeletedComment(commentObj) {n  return commentObj.author === "[deleted]" && commentObj.body === "[removed]";n}nn// Function to recursively flatten a comment and its repliesnfunction flattenCommentTree(commentObj) {n  // Skip deleted commentsn  if (isDeletedComment(commentObj)) {n    return null;n  }nn  const { body, ups, score, replies, author } = commentObj;nn  // Calculate scoren  const finalScore = typeof ups === 'number' ? ups : (score || 0);nn  // Process commentn  const flatComment = {n    body: body || '',n    score: finalScore,n    author: author || 'Unknown',n    replies: [],n  };nn  // Process repliesn  if (n    replies &&n    replies.data &&n    Array.isArray(replies.data.children)n  ) {n    flatComment.replies = replies.data.childrenn      .filter(child => child.kind === 't1' && child.data)n      .map(child => flattenCommentTree(child.data)) // Recursively flatten repliesn      .filter(reply => reply !== null); // Filter out null replies (deleted comments)n  }nn  return flatComment;n}nn// Flatten all comments, preserving hierarchynconst allComments = commentsArrayn  .map(flattenCommentTree)n  .filter(comment => comment !== null); // Filter out null comments (deleted comments)nn// Flatten the hierarchy to a list for scoring and filteringnfunction flattenForScoring(tree) {n  const result = [];n  tree.forEach(comment => {n    result.push(comment); // Add current commentn    if (comment.replies && comment.replies.length > 0) {n      result.push(...flattenForScoring(comment.replies)); // Add replies recursivelyn    }n  });n  return result;n}nn// Flatten the hierarchy and sort by scorenconst flatList = flattenForScoring(allComments);nflatList.sort((a, b) => b.score - a.score);nn// Select the top 30 commentsnconst top30 = flatList.slice(0, 30);nn// Rebuild the hierarchy from the top 30nfunction filterHierarchy(tree, allowedBodies) {n  return treen    .filter(comment => allowedBodies.has(comment.body))n    .map(comment => ({n      ...comment,n      replies: filterHierarchy(comment.replies || [], allowedBodies), // Recurse for repliesn    }));n}nnconst allowedBodies = new Set(top30.map(comment => comment.body));nconst filteredHierarchy = filterHierarchy(allComments, allowedBodies);nn// Return in n8n formatnreturn [n  {n    json: {n      comments: filteredHierarchy,n    },n  },n];"
      },
      "executeOnce": true,
      "typeVersion": 2
    },
    {
      "id": "e709d131-b8fa-42d5-bc66-479cb13574e6",
      "name": "Format Comments",
      "type": "n8n-nodes-base.code",
      "position": [
        840,
        640
      ],
      "parameters": {
        "jsCode": "/**n * Convert comments data into Markdown format with accurate hierarchy visualization.n * Excludes deleted comments.n */nn// Input data (replace this with your actual comments data)nconst data = $input.all()[0].json.comments;nn/**n * Checks if a comment is deleted.n * @param {Object} comment - The comment to check.n * @returns {boolean} - True if the comment is deleted, false otherwise.n */nfunction isDeletedComment(comment) {n  return comment.author === "[deleted]" && comment.body === "[removed]";n}nn/**n * Filters out deleted comments and their replies.n * @param {Array} comments - Array of comments.n * @returns {Array} - Filtered array of comments.n */nfunction filterDeletedComments(comments) {n  if (!comments || !comments.length) return [];n  n  return commentsn    .filter(comment => !isDeletedComment(comment))n    .map(comment => {n      if (comment.replies && comment.replies.length > 0) {n        comment.replies = filterDeletedComments(comment.replies);n      }n      return comment;n    });n}nn/**n * Recursive function to format comments and replies into Markdown.n * @param {Array} comments - Array of comments.n * @param {number} level - Current level of the comment hierarchy for indentation.n * @returns {string} - Formatted Markdown string.n */nfunction formatCommentsToMarkdown(comments, level = 0) {n  let markdown = '';n  const indent = '  '.repeat(level); // Indentation for repliesnn  for (const comment of comments) {n    // Format the main commentn    markdown += `${indent}- **Author**: ${comment.author}\n`;n    markdown += `${indent}  **Score**: ${comment.score}\n`;n    markdown += `${indent}  **Comment**:\n\n`;n    markdown += `${indent}    > ${comment.body.replace(/\n/g, `\n${indent}    > `)}\n\n`;nn    // Process replies if they existn    if (comment.replies && comment.replies.length > 0) {n      markdown += `${indent}  **Replies:**\n\n`;n      markdown += formatCommentsToMarkdown(comment.replies, level + 1);n    }n  }nn  return markdown;n}nn// Filter out deleted comments firstnconst filteredData = filterDeletedComments(data);nn// Generate the Markdownnconst markdownOutput = formatCommentsToMarkdown(filteredData);nn// Return the Markdown as an output for n8nnreturn [n  {n    json: {n      markdown: markdownOutput,n    },n  },n];"
      },
      "typeVersion": 2
    },
    {
      "id": "284d511b-7d80-46ba-add0-6ff59aff176c",
      "name": "Set for Loop",
      "type": "n8n-nodes-base.set",
      "position": [
        280,
        640
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "ac7c257d-544f-44e5-abc6-d0436f12517f",
              "name": "Title",
              "type": "string",
              "value": "={{ $json.Title }}"
            },
            {
              "id": "fb22c6a5-a809-4588-9f6e-49c3e11f5ed2",
              "name": "Subreddit",
              "type": "string",
              "value": "={{ $json.Subreddit }}"
            },
            {
              "id": "4bfcc849-539b-48cd-856f-1b7f3be113ed",
              "name": "Upvotes",
              "type": "string",
              "value": "={{ $json.Upvotes }}"
            },
            {
              "id": "9a3a3a2a-8f43-4419-9203-bc83f5b0c0bc",
              "name": "Comments",
              "type": "string",
              "value": "={{ $json.Comments }}"
            },
            {
              "id": "2d31f321-fbdc-43d3-8a92-a78f418f112f",
              "name": "Reddit URL",
              "type": "string",
              "value": "={{ $json["Reddit URL"] }}"
            },
            {
              "id": "f224323a-79ef-4f66-ae10-d77c8fddbccd",
              "name": "URL",
              "type": "string",
              "value": "={{ $json.URL }}"
            },
            {
              "id": "dbbc5a98-b5e2-45bb-bc18-2c438522d683",
              "name": "Date",
              "type": "string",
              "value": "={{ $json.Date }}"
            },
            {
              "id": "837cae4e-858a-48ba-bab9-bb66a2e51837",
              "name": "Post ID",
              "type": "string",
              "value": "={{ $json["Post ID"] }}"
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "b88fad49-edc4-4749-8984-a8e81f6a2899",
      "name": "Get News Content",
      "type": "n8n-nodes-base.httpRequest",
      "maxTries": 5,
      "position": [
        1360,
        640
      ],
      "parameters": {
        "url": "=https://r.jina.ai/{{ $('Set for Loop').first().json.URL }}",
        "options": {},
        "sendHeaders": true,
        "headerParameters": {
          "parameters": [
            {
              "name": "Accept",
              "value": "text/event-stream"
            },
            {
              "name": "Authorization",
              "value": "=Bearer {{ $('Set Data').first().json['Jina API Key'] }}"
            },
            {
              "name": "X-Retain-Images",
              "value": "none"
            },
            {
              "name": "X-Respond-With",
              "value": "readerlm-v2"
            },
            {
              "name": "X-Remove-Selector",
              "value": "header, footer, sidebar"
            }
          ]
        }
      },
      "retryOnFail": true,
      "typeVersion": 4.2,
      "waitBetweenTries": 5000
    },
    {
      "id": "26a8906c-2966-4ebf-8465-18a48b359f7d",
      "name": "Set Final Report",
      "type": "n8n-nodes-base.set",
      "position": [
        2400,
        640
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "0782b9a6-d659-4695-8696-6ff0e574f77a",
              "name": "Final Report",
              "type": "string",
              "value": "=// Reddit Metrics:nPost Link: {{ $('Set for Loop').first().json['Reddit URL'] }}nUpvotes: {{ $('Set for Loop').first().json.Upvotes }}nComments: {{ $('Set for Loop').first().json.Comments }}nn# FINAL REPORTn{{ $json.text.replace(/[\s\S]*/, '').replace(/[\s\S]*/, '') }}nn# RAW ANALYSIS DATA (FOR FURTHER ANALYSIS)nn## NEWS CONTENT ANALYSISn{{ $('News Analysis').item.json.text.replace(/[\s\S]*/, '').replace(/[\s\S]*/, '') }}nn## REDDIT COMMENTS ANALYSISn{{ $('Comments Analysis').first().json.text.replace(/[\s\S]*/, '').replace(/[\s\S]*/, '') }}"
            }
          ]
        }
      },
      "typeVersion": 3.4
    },
    {
      "id": "219ccb20-1b36-4c70-866a-0fded9c9b9fd",
      "name": "Convert to File",
      "type": "n8n-nodes-base.convertToFile",
      "position": [
        2580,
        640
      ],
      "parameters": {
        "options": {
          "encoding": "utf8",
          "fileName": "={{ $json["Final Report"].match(/Headline:\s*["u201c](.*?)["u201d]/i)?.[1] }}.txt"
        },
        "operation": "toText",
        "sourceProperty": "Final Report"
      },
      "typeVersion": 1.1
    },
    {
      "id": "427d5a2d-6927-4427-9902-e033736410ca",
      "name": "Compress files",
      "type": "n8n-nodes-base.compression",
      "position": [
        600,
        940
      ],
      "parameters": {
        "fileName": "=Trending_Stories_{{$now.format("yyyy_MM_dd")}}_{{Math.floor(Math.random() * 10000).toString().padStart(4, '0')}}.zip",
        "operation": "compress",
        "outputFormat": "zip",
        "binaryPropertyName": "={{ $json["binary_keys"] }}",
        "binaryPropertyOutput": "files_combined"
      },
      "typeVersion": 1
    },
    {
      "id": "7f6ef656-0f76-433f-95a8-782de21caa53",
      "name": "Merge Binary Files",
      "type": "n8n-nodes-base.code",
      "position": [
        420,
        940
      ],
      "parameters": {
        "jsCode": "// Get the first (and only) item since you're using Aggregatenconst item = items[0];nlet binary_keys = [];nn// Generate the list of binary keys from your aggregated itemnfor (let key in item.binary) {n    binary_keys.push(key);n}nnreturn [{n    json: {n        binary_keys: binary_keys.join(',')n    },n    binary: item.binary  // Keep the original binary datan}];"
      },
      "executeOnce": true,
      "typeVersion": 2
    },
    {
      "id": "20411444-5ce8-452b-869c-97928200b205",
      "name": "Google Drive6",
      "type": "n8n-nodes-base.googleDrive",
      "position": [
        780,
        940
      ],
      "parameters": {
        "driveId": {
          "__rl": true,
          "mode": "list",
          "value": "My Drive",
          "cachedResultUrl": "https://drive.google.com/drive/my-drive",
          "cachedResultName": "My Drive"
        },
        "options": {},
        "folderId": {
          "__rl": true,
          "mode": "id",
          "value": "1HCTq5YupRHcgRd7FIlSeUMMjqqOZ4Q9x"
        },
        "inputDataFieldName": "files_combined"
      },
      "typeVersion": 3
    },
    {
      "id": "2eb8112a-8655-4f06-998f-a9ffef74d72a",
      "name": "Google Drive7",
      "type": "n8n-nodes-base.googleDrive",
      "position": [
        960,
        940
      ],
      "parameters": {
        "fileId": {
          "__rl": true,
          "mode": "id",
          "value": "={{ $json.id }}"
        },
        "options": {},
        "operation": "share",
        "permissionsUi": {
          "permissionsValues": {
            "role": "reader",
            "type": "anyone"
          }
        }
      },
      "typeVersion": 3
    },
    {
      "id": "7f4e5e0c-49cc-4024-b62b-f7e099d4867d",
      "name": "Send files to Mattermost3",
      "type": "n8n-nodes-base.httpRequest",
      "position": [
        1140,
        940
      ],
      "parameters": {
        "url": "https://team.YOUR_DOMAIN.com/hooks/REPLACE_THIS_WITH_YOUR_HOOK_ID",
        "method": "POST",
        "options": {},
        "jsonBody": "={n    "channel": "digital-pr",n    "username": "NotifyBot",n    "icon_url": "https://team.YOUR_DOMAIN.com/api/v4/users/YOUR_USER_ID/image?_=0",n    "text": "@channel New trending stories have been generated ud83cudf89\n\n\n You can download it here: https://drive.google.com/file/d/{{ $('Google Drive6').item.json.id }}/view?usp=drive_link"n}",
        "sendBody": true,
        "specifyBody": "json"
      },
      "typeVersion": 4.2
    },
    {
      "id": "3c47f58d-8006-4565-b220-033d71239126",
      "name": "Aggregate",
      "type": "n8n-nodes-base.aggregate",
      "position": [
        260,
        940
      ],
      "parameters": {
        "options": {
          "includeBinaries": true
        },
        "aggregate": "aggregateAllItemData"
      },
      "executeOnce": false,
      "typeVersion": 1
    },
    {
      "id": "5611cdce-91ae-4037-9479-3b513eb07b77",
      "name": "Schedule Trigger",
      "type": "n8n-nodes-base.scheduleTrigger",
      "position": [
        40,
        420
      ],
      "parameters": {
        "rule": {
          "interval": [
            {
              "field": "weeks",
              "triggerAtDay": [
                1
              ],
              "triggerAtHour": 6
            }
          ]
        }
      },
      "typeVersion": 1.2
    },
    {
      "id": "5cfeb9ea-45b6-4a0a-8702-34539738f280",
      "name": "Anthropic Chat Model",
      "type": "@n8n/n8n-nodes-langchain.lmChatAnthropic",
      "position": [
        960,
        800
      ],
      "parameters": {
        "model": "=claude-3-7-sonnet-20250219",
        "options": {
          "temperature": 0.5,
          "maxTokensToSample": 8096
        }
      },
      "typeVersion": 1.2
    },
    {
      "id": "b11b2fa6-f92a-4791-b255-51ce1b07181b",
      "name": "Anthropic Chat Model1",
      "type": "@n8n/n8n-nodes-langchain.lmChatAnthropic",
      "position": [
        1640,
        800
      ],
      "parameters": {
        "model": "=claude-3-7-sonnet-20250219",
        "options": {
          "temperature": 0.5,
          "maxTokensToSample": 8096
        }
      },
      "typeVersion": 1.2
    },
    {
      "id": "ffa45242-1dd4-46be-bacc-55bde63d0227",
      "name": "Keep Last",
      "type": "n8n-nodes-base.code",
      "position": [
        1540,
        640
      ],
      "parameters": {
        "jsCode": "// Extract input data from n8nnconst inputData = $json.data;nn// Ensure input is validnif (!inputData || typeof inputData !== 'string') {n    return [{ error: "Invalid input data" }];n}nn// Split the data into linesnlet lines = inputData.split("\n");nn// Extract only JSON entriesnlet jsonEntries = linesn    .map(line => line.trim()) // Remove spacesn    .filter(line => line.startsWith('data: {')) // Keep valid JSON objectsn    .map(line => line.replace('data: ', '')); // Remove the prefixnn// Ensure there are entriesnif (jsonEntries.length === 0) {n    return [{ error: "No valid JSON entries found" }];n}nn// Get only the LAST entrynlet lastEntry = jsonEntries[jsonEntries.length - 1];nntry {n    // Parse the last entry as JSONn    let jsonObject = JSON.parse(lastEntry);nn    // Extract title and contentn    return [{n        title: jsonObject.title || "No Title",n        content: jsonObject.content || "No Content"n    }];n} catch (error) {n    return [{ error: "JSON parsing failed", raw: lastEntry }];n}"
      },
      "typeVersion": 2
    },
    {
      "id": "956672cc-8ceb-4a2c-93e8-bad2b9497043",
      "name": "Anthropic Chat Model2",
      "type": "@n8n/n8n-nodes-langchain.lmChatAnthropic",
      "position": [
        1980,
        800
      ],
      "parameters": {
        "model": "=claude-3-7-sonnet-20250219",
        "options": {
          "temperature": 0.5,
          "maxTokensToSample": 8096
        }
      },
      "typeVersion": 1.2
    },
    {
      "id": "b55df80f-dbdf-4d8d-8b62-93533d1fb6ef",
      "name": "Sticky Note",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        0,
        0
      ],
      "parameters": {
        "width": 1020,
        "height": 340,
        "content": "## Automatic Weekly Digital PR Stories SuggestionsnA weekly automated system that identifies trending news on Reddit, evaluates public sentiment through comment analysis, extracts key information from source articles, and generates strategic angles for potential digital PR campaigns. This workflow delivers curated, sentiment-analyzed news opportunities based on current social media trends. The final comprehensive report is automatically uploaded to Google Drive for storage and simultaneously shared with team members via a dedicated Mattermost channel for immediate collaboration.nn### Set up instructions:n1. Add a new credential "Reddit OAuth2 API" by following this [guide](https://docs.n8n.io/integrations/builtin/credentials/reddit/). Assign your Reddit OAuth2 account to the Reddit nodes.n2. Add a new credential "Anthropic Account" by following this [guide]n(https://docs.n8n.io/integrations/builtin/credentials/anthropic/). Assign your Anthropic account to the nodes "Anthropic Chat Model".n3. Add a new credential "Google Drive OAuth2 API" by following this [guide](https://docs.n8n.io/integrations/builtin/credentials/google/oauth-single-service/). Assign your Google Drive OAuth2 account to the node "Gmail Drive" nodes.n4. Set your interested topics (one per line) and Jina API key in the "Set Data" node. You can obtain your Jina API key [here](https://jina.ai/api-dashboard/key-manager).n5. Update your Mattermost information (Mattermost instance URL, Webhook ID and Channel) in the Mattermost node. You can follow this [guide](https://developers.mattermost.com/integrate/webhooks/incoming/).n6. You can adjust the cron if needed. It currently run every Monday at 6am."
      },
      "typeVersion": 1
    },
    {
      "id": "07f1e0ff-892c-4aaf-ad77-e636138570a1",
      "name": "Comments Analysis",
      "type": "@n8n/n8n-nodes-langchain.chainLlm",
      "position": [
        1020,
        640
      ],
      "parameters": {
        "text": "=Please analyze the following Reddit post and its comments:nnCONTEXT:nnPost Title: {{ $('Set for Loop').first().json.Title.replace(/\"/g, '\\\"') }}nPost Date: {{ $('Set for Loop').first().json.Date }}nShared URL: {{ $('Set for Loop').first().json.URL }}nTotal Upvotes: {{ $('Set for Loop').first().json.Upvotes }}nTotal Comments: {{ $('Set for Loop').first().json.Comments }}nnnComment Thread Data:nn{{ $json.markdown.replace(/\"/g, '\\\"') }}nnnAnalyze this discussion through these dimensions:nn1. CONTENT CONTEXT:n   u2022 Main topic/subject mattern   u2022 Why this is trending (based on engagement metrics)n   u2022 News cycle timing implicationsn   u2022 Relationship to broader industry/market trendsnn2. SENTIMENT ANALYSIS:n   u2022 Overall sentiment score (Scale: -5 to +5)n   u2022 Primary emotional undertonesn   u2022 Sentiment progression in discussion threadsn   u2022 Consensus vs. controversial viewpointsn   u2022 Changes in sentiment based on comment depthnn3. ENGAGEMENT INSIGHTS:n   u2022 Most upvoted perspectives (with exact scores)n   u2022 Controversial discussion pointsn   u2022 Comment chains with deepest engagementn   u2022 Types of responses generating most interactionnn4. NARRATIVE MAPPING:n   u2022 Dominant narrativesn   u2022 Counter-narrativesn   u2022 Emerging sub-themesn   u2022 Unexplored anglesn   u2022 Missing perspectivesnnOutput Format (Place inside XML tags ):nnPOST OVERVIEW:nTitle: [Original title]nEngagement Metrics:nu2022 Upvotes: [count]nu2022 Comments: [count]nu2022 Virality Assessment: [analysis of why this gained traction]nnSENTIMENT ANALYSIS:nu2022 Overall Score: [numerical score with explanation]nu2022 Sentiment Distribution: [percentage breakdown]nu2022 Key Emotional Drivers:n  - Primary: [emotion]n  - Secondary: [emotion]n  - Notable Shifts: [pattern analysis]nnTOP NARRATIVES:n[List 3-5 dominant narratives]nFor each narrative:nu2022 Key Pointsnu2022 Supporting Comments [with scores]nu2022 Counter-Argumentsnu2022 Engagement LevelnnAUDIENCE INSIGHTS:nu2022 Knowledge Level: [assessment]nu2022 Pain Points: [list key concerns]nu2022 Misconceptions: [list with evidence]nu2022 Information Gaps: [identified missing information]nnPR IMPLICATIONS:n1. Story Opportunities:n   u2022 [List potential angles]n   u2022 [Supporting evidence from comments]nn2. Risk Factors:n   u2022 [List potential PR risks]n   u2022 [Supporting evidence from comments]nn3. Narrative Recommendations:n   u2022 [Strategic guidance for messaging]n   u2022 [Areas to address/avoid]nnNEXT STEPS CONSIDERATIONS:nu2022 Key data points for content analysisnu2022 Suggested focus areas for PR story developmentnu2022 Critical elements to address in messagingnu2022 Potential expert perspectives needednnMETA INSIGHTS:nu2022 Pattern connections to similar discussionsnu2022 Unique aspects of this conversationnu2022 Viral elements to notenu2022 Community-specific nuancesnnFocus on extracting insights that will:n1. Inform the subsequent content analysis stepn2. Guide PR story developmentn3. Identify unique angles and opportunitiesn4. Highlight potential risks and challengesn5. Suggest effective narrative approachesnnNote: Prioritize insights that will be valuable for the following workflow steps of content analysis and PR story development. Flag any particularly unique or compelling elements that could inform breakthrough story angles.",
        "messages": {
          "messageValues": [
            {
              "message": "=You are an expert Social Media Intelligence Analyst specialized in Reddit discourse analysis. Your task is to analyze Reddit posts and comments to extract meaningful patterns, sentiments, and insights for PR strategy development."
            }
          ]
        },
        "promptType": "define"
      },
      "typeVersion": 1.5
    },
    {
      "id": "4cdc4e49-6aae-4e6a-844e-c3c339638950",
      "name": "News Analysis",
      "type": "@n8n/n8n-nodes-langchain.chainLlm",
      "position": [
        1720,
        640
      ],
      "parameters": {
        "text": "=CONTEXT IMPORTANCE:nReddit data is used as a critical indicator of news story potential because:nu2022 High upvotes indicate strong public interestnu2022 Comment volume shows discussion engagementnu2022 Comment sentiment reveals public perceptionnu2022 Discussion threads expose knowledge gaps and controversiesnu2022 Community reaction predicts potential viral spreadnu2022 Sub-discussions highlight unexplored anglesnu2022 Engagement patterns suggest story longevitynnINPUT CONTEXT:nNews URL: {{ $('Set for Loop').first().json.URL }}nNews Content:nn{{ $json.content }}nnReddit Metrics:nu2022 Post Title (Understanding how the story was shared): {{ $('Set for Loop').first().json.Title }}nu2022 Upvotes (Indicator of initial interest): {{ $('Set for Loop').first().json.Upvotes }}nu2022 Total Comments (Engagement level): {{ $('Set for Loop').first().json.Comments }}nReddit Sentiment Analysis:nn{{ $('Comments Analysis').first().json.text.replace(/[\s\S]*/, '').replace(/[\s\S]*/, '') }}nnnFor each story, analyze through these dimensions:nn1. POPULARITY ASSESSMENT:n   A. Reddit Performance:n      u2022 Upvote ratio and volumen      u2022 Comment engagement raten      u2022 Discussion quality metricsn      u2022 Viral spread indicatorsn      n   B. Audience Reception:n      u2022 Initial reaction patternsn      u2022 Discussion evolutionn      u2022 Community consensus vs. debaten      u2022 Information seeking behaviornn1. CONTENT ANALYSIS:n   A. Core Story Elements:n      u2022 Central narrativen      u2022 Key stakeholdersn      u2022 Market implicationsn      u2022 Industry impactn      n   B. Technical Analysis:n      u2022 Writing stylen      u2022 Data presentationn      u2022 Expert citationsn      u2022 Supporting evidencenn2. SOCIAL PROOF INTEGRATION:n   A. Engagement Metrics:n      u2022 Reddit performance metricsn      u2022 Discussion quality indicatorsn      u2022 Viral spread patternsn      n   B. Sentiment Patterns:n      u2022 Primary audience reactionsn      u2022 Controversial elementsn      u2022 Support vs. criticism ration      u2022 Knowledge gaps identifiednn3. NARRATIVE OPPORTUNITY MAPPING:n   A. Current Coverage:n      u2022 Main angles coveredn      u2022 Supporting argumentsn      u2022 Counter-argumentsn      u2022 Expert perspectivesn      n   B. Gap Analysis:n      u2022 Unexplored perspectivesn      u2022 Missing stakeholder voicesn      u2022 Underutilized data pointsn      u2022 Potential counter-narrativesnnOUTPUT FORMAT (Place inside XML tags ):nnSTORY OVERVIEW:nTitle: [Most compelling angle]nURL: [Source]nCategory: [Industry/Topic]nnCONTENT SUMMARY:nTLDR: [3-5 sentences emphasizing viral potential]nCore Message: [One-line essence]nnKEY POINTS:nu2022 [Strategic point 1]nu2022 [Strategic point 2]nu2022 [Continue as needed]nnSOCIAL PROOF ANALYSIS:nEngagement Metrics:nu2022 Reddit Performance: [Metrics + Interpretation]nu2022 Discussion Quality: [Analysis of conversation depth]nu2022 Sentiment Distribution: [From sentiment analysis]nnVIRAL ELEMENTS:n1. Current Drivers:n   u2022 [What's making it spread]n   u2022 [Why people are engaging]n   u2022 [Emotional triggers identified]nn2. Potential Amplifiers:n   u2022 [Untapped viral elements]n   u2022 [Engagement opportunities]n   u2022 [Emotional hooks not yet used]nnNARRATIVE OPPORTUNITIES:n1. Unexplored Angles:n   u2022 [Angle 1 + Why it matters]n   u2022 [Angle 2 + Why it matters]n   u2022 [Angle 3 + Why it matters]nn2. Content Gaps:n   u2022 [Missing perspectives]n   u2022 [Underutilized data]n   u2022 [Stakeholder voices needed]nn3. Controversy Points:n   u2022 [Debate opportunities]n   u2022 [Conflicting viewpoints]n   u2022 [Areas of misconception]nnSTRATEGIC RECOMMENDATIONS:n1. Immediate Opportunities:n   u2022 [Quick-win suggestions]n   u2022 [Timing considerations]nn2. Development Needs:n   u2022 [Required research]n   u2022 [Expert input needed]n   u2022 [Data gaps to fill]nnPR POTENTIAL SCORE: [1-10 scale with explanation]nnFocus on elements that:nu2022 Show strong viral potentialnu2022 Address identified audience concernsnu2022 Fill gaps in current coveragenu2022 Leverage positive sentiment patternsnu2022 Address or utilize controversial elementsnu2022 Can be developed into unique anglesnnNote: Prioritize insights that:n1. Build on identified sentiment patternsn2. Address audience knowledge gapsn3. Leverage existing engagement driversn4. Can create breakthrough narrativesn5. Have immediate PR potential",
        "messages": {
          "messageValues": [
            {
              "message": "=You are an expert PR Content Analyst specialized in identifying viral potential in news stories. Your mission is to analyze news content while leveraging Reddit engagement metrics and sentiment data to evaluate news popularity and potential PR opportunities."
            }
          ]
        },
        "promptType": "define"
      },
      "typeVersion": 1.5
    },
    {
      "id": "c4905ed1-324a-4b08-a1f4-f5465229b56c",
      "name": "Stories Report",
      "type": "@n8n/n8n-nodes-langchain.chainLlm",
      "position": [
        2060,
        640
      ],
      "parameters": {
        "text": "=INPUT CONTEXT:nNews Analysis: nn{{ $json.text.replace(/[\s\S]*/, '').replace(/[\s\S]*/, '') }}nnReddit Metrics:nu2022 Post Title (Understanding how the story was shared): {{ $('Set for Loop').first().json.Title }}nu2022 Upvotes (Indicator of initial interest): {{ $('Set for Loop').first().json.Upvotes }}nu2022 Total Comments (Engagement level): {{ $('Set for Loop').first().json.Comments }}nReddit Sentiment Analysis:nn{{ $('Comments Analysis').first().json.text.replace(/[\s\S]*/, '').replace(/[\s\S]*/, '') }}nnnOUTPUT FORMAT (Place inside XML tags ):nnTREND ANALYSIS SUMMARY:nTopic: [News topic/category]nCurrent Coverage Status: [Overview of existing coverage]nAudience Reception: [From Reddit/sentiment analysis]nMarket Timing: [Why now is relevant]nnSTORY OPPORTUNITIES:nn1. FIRST-MOVER STORIES:n[For each story idea (2-3)]nnStory #1:nu2022 Headline: [Compelling title]nu2022 Hook: [One-line grabber]nu2022 Story Summary: [2-3 sentences]nu2022 Why It Works:n  - Audience Evidence: [From Reddit data]n  - Market Gap: [From news analysis]n  - Timing Advantage: [Why now]nu2022 Development Needs:n  - Research Required: [List]n  - Expert Input: [Specific needs]n  - Supporting Data: [What's needed]nu2022 Media Strategy:n  - Primary Targets: [Publications]n  - Secondary Targets: [Publications]n  - Exclusive Potential: [Yes/No + Rationale]nu2022 Success Metrics:n  - Coverage Goals: [Specific targets]n  - Engagement Expectations: [Based on Reddit data]nn2. TREND-AMPLIFIER STORIES:n[Same format as above for 2-3 stories]nnPRIORITY RANKING:n1. [Story Title] - Score: [X/10]n   u2022 Impact Potential: [Score + Rationale]n   u2022 Resource Requirements: [High/Medium/Low]n   u2022 Timeline: [Immediate/Short-term/Long-term]n   n2. [Continue for all stories]nnEXECUTION ROADMAP:nu2022 Immediate Actions (24-48 hours)nu2022 Week 1 Prioritiesnu2022 Risk Managementnu2022 Contingency PlansnnSTRATEGIC RECOMMENDATIONS:nu2022 Core Strategynu2022 Alternative Anglesnu2022 Resource Requirementsnu2022 Timeline ConsiderationsnnANALYTICAL FRAMEWORK:nn1. TREND VALIDATION:n   A. Story Performance Indicators:n      u2022 Reddit engagement metricsn      u2022 Public sentiment patternsn      u2022 Discussion qualityn      u2022 Viral elements identifiednn   B. Current Narrative Landscape:n      u2022 Dominant themes from news analysisn      u2022 Public perception gapsn      u2022 Controversial elementsn      u2022 Underserved perspectivesnn2. OPPORTUNITY MAPPING:n   A. Content Gap Analysis:n      u2022 Unexplored angles from news analysisn      u2022 Audience questions from commentsn      u2022 Missing expert perspectivesn      u2022 Data/research opportunitiesnn   B. Timing Assessment:n      u2022 News cycle positionn      u2022 Trend trajectoryn      u2022 Optimal launch windown      u2022 Competition considerationnnPR STORY OPPORTUNITIES:nGenerate 4-6 high-potential story ideas, categorized as:nnA. \"FIRST-MOVER\" OPPORTUNITIES (2-3 ideas):nFor each idea:nn1. Story Concept:n   u2022 Headlinen   u2022 Sub-headlinen   u2022 Key messagen   u2022 Unique selling pointnn2. Why It Works:n   u2022 Gap in current coveragen   u2022 Evidence from Reddit discussionsn   u2022 Sentiment analysis supportn   u2022 Market timing rationalenn3. Development Requirements:n   u2022 Required data/researchn   u2022 Expert perspectives neededn   u2022 Supporting elementsn   u2022 Potential challengesnn4. Media Strategy:n   u2022 Target publicationsn   u2022 Journalist appeal factorsn   u2022 Exclusive potentialn   u2022 Supporting assets needednnB. \"TREND-AMPLIFIER\" OPPORTUNITIES (2-3 ideas):n[Same structure as above, but focused on enhancing existing narratives]nnSTORY PRIORITIZATION MATRIX:nFor each story idea:n1. Impact Potential (1-10):n   u2022 Audience interest indicatorsn   u2022 Media appeal factorsn   u2022 Viral potentialn   u2022 Business valuenn2. Resource Requirements:n   u2022 Time to developn   u2022 Research needsn   u2022 Expert inputn   u2022 Asset creationnn3. Risk Assessment:n   u2022 Competition factorsn   u2022 Timing risksn   u2022 Narrative challengesn   u2022 Mitigation strategiesnnEXECUTION ROADMAP:n1. Immediate Actions (Next 24-48 hours):n   u2022 Priority research needsn   u2022 Expert outreachn   u2022 Data gatheringn   u2022 Asset developmentnn2. Development Timeline:n   u2022 Story development sequencen   u2022 Key milestonesn   u2022 Decision pointsn   u2022 Launch windowsnn3. Success Metrics:n   u2022 Coverage targetsn   u2022 Engagement goalsn   u2022 Share of voice objectivesn   u2022 Impact measurementsnnSTRATEGIC RECOMMENDATIONS:n1. Primary Strategy:n   u2022 Core approachn   u2022 Key differentiatorsn   u2022 Critical success factorsn   u2022 Risk mitigationnn2. Alternative Approaches:n   u2022 Backup anglesn   u2022 Pivot opportunitiesn   u2022 Alternative narrativesn   u2022 Contingency plansnnFocus on creating stories that:nu2022 Address identified audience interests (from Reddit data)nu2022 Fill gaps in current coveragenu2022 Leverage positive sentiment patternsnu2022 Solve for identified pain pointsnu2022 Offer unique, data-backed perspectivesnu2022 Present clear competitive advantagesnnBased on the provided news analysis, Reddit metrics, and sentiment analysis, please generate a comprehensive PR strategy report following the format above.",
        "messages": {
          "messageValues": [
            {
              "message": "=You are an elite PR Strategy Consultant specialized in crafting breakthrough story angles that capture media attention. Your mission is to analyze trending story patterns and develop high-impact PR opportunities based on comprehensive data analysis.nnCONTEXT IMPORTANCE:nThis analysis combines three critical data sources:n1. Reddit Engagement Data:n   u2022 Indicates public interest levelsn   u2022 Shows organic discussion patternsn   u2022 Reveals audience sentimentn   u2022 Highlights knowledge gapsn   u2022 Demonstrates viral potentialnn2. News Content Analysis:n   u2022 Provides core story elementsn   u2022 Shows current media anglesn   u2022 Identifies market implicationsn   u2022 Reveals coverage gapsn   u2022 Maps expert perspectivesnn3. Sentiment Analysis:n   u2022 Reveals public perceptionn   u2022 Identifies controversy pointsn   u2022 Shows emotional triggersn   u2022 Highlights audience concernsn   u2022 Indicates story longevitynnThis combined data helps us:nu2022 Validate story potentialnu2022 Identify unexplored anglesnu2022 Understand audience needsnu2022 Predict media interestnu2022 Craft compelling narratives"
            }
          ]
        },
        "promptType": "define"
      },
      "typeVersion": 1.5
    },
    {
      "id": "1379c60b-387c-4eba-a7c2-2bcb1cda48fd",
      "name": "Set Data",
      "type": "n8n-nodes-base.set",
      "position": [
        240,
        420
      ],
      "parameters": {
        "options": {},
        "assignments": {
          "assignments": [
            {
              "id": "b4da0605-b5e1-47e1-8e7e-00158ecaba33",
              "name": "Topics",
              "type": "string",
              "value": "=Donald TrumpnPolitics"
            },
            {
              "id": "d7602355-7082-4e98-a0b5-a400fade6dbc",
              "name": "Jina API Key",
              "type": "string",
              "value": "YOUR_API_KEY"
            }
          ]
        }
      },
      "typeVersion": 3.4
    }
  ],
  "active": false,
  "pinData": {},
  "settings": {
    "executionOrder": "v1"
  },
  "versionId": "dad1fb7a-599f-4b98-9461-8b27baa774d9",
  "connections": {
    "Set Data": {
      "main": [
        [
          {
            "node": "Split Topics into Items",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Aggregate": {
      "main": [
        [
          {
            "node": "Merge Binary Files",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Keep Last": {
      "main": [
        [
          {
            "node": "News Analysis",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Get Comments": {
      "main": [
        [
          {
            "node": "Extract Top Comments",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Search Posts": {
      "main": [
        [
          {
            "node": "Upvotes Requirement Filtering",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Set for Loop": {
      "main": [
        [
          {
            "node": "Get Comments",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Google Drive6": {
      "main": [
        [
          {
            "node": "Google Drive7",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Google Drive7": {
      "main": [
        [
          {
            "node": "Send files to Mattermost3",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "News Analysis": {
      "main": [
        [
          {
            "node": "Stories Report",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Compress files": {
      "main": [
        [
          {
            "node": "Google Drive6",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Stories Report": {
      "main": [
        [
          {
            "node": "Set Final Report",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Convert to File": {
      "main": [
        [
          {
            "node": "Loop Over Items",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Format Comments": {
      "main": [
        [
          {
            "node": "Comments Analysis",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Loop Over Items": {
      "main": [
        [
          {
            "node": "Aggregate",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Set for Loop",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Get News Content": {
      "main": [
        [
          {
            "node": "Keep Last",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Schedule Trigger": {
      "main": [
        [
          {
            "node": "Set Data",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Set Final Report": {
      "main": [
        [
          {
            "node": "Convert to File",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Set Reddit Posts": {
      "main": [
        [
          {
            "node": "Remove Duplicates",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Comments Analysis": {
      "main": [
        [
          {
            "node": "Get News Content",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Remove Duplicates": {
      "main": [
        [
          {
            "node": "Loop Over Items",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Merge Binary Files": {
      "main": [
        [
          {
            "node": "Compress files",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Anthropic Chat Model": {
      "ai_languageModel": [
        [
          {
            "node": "Comments Analysis",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "Extract Top Comments": {
      "main": [
        [
          {
            "node": "Format Comments",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Anthropic Chat Model1": {
      "ai_languageModel": [
        [
          {
            "node": "News Analysis",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "Anthropic Chat Model2": {
      "ai_languageModel": [
        [
          {
            "node": "Stories Report",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "Split Topics into Items": {
      "main": [
        [
          {
            "node": "Search Posts",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Upvotes Requirement Filtering": {
      "main": [
        [
          {
            "node": "Set Reddit Posts",
            "type": "main",
            "index": 0
          }
        ]
      ]
    }
  }
}