« get me outta code hell

withParsedContentEntries.js « wiki-data « composite « data « src - hsmusic-wiki - HSMusic - static wiki software cataloguing collaborative creation
about summary refs log tree commit diff
path: root/src/data/composite/wiki-data/withParsedContentEntries.js
blob: 2a9b3f6a863bc2cb57dc2a800222e350b37c247e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import {input, templateCompositeFrom} from '#composite';
import {stitchArrays} from '#sugar';
import {isContentString, validateInstanceOf} from '#validators';

import {withPropertiesFromList} from '#composite/data';

export default templateCompositeFrom({
  annotation: `withParsedContentEntries`,

  inputs: {
    // TODO: Is there any way to validate this input based on the *other*
    // inputs proivded, i.e. regexes? This kind of just assumes the string
    // has already been validated according to the form the regex expects,
    // which *is* always the case (as used), but it seems a bit awkward.
    from: input({validate: isContentString}),

    caseSensitiveRegex: input({
      validate: validateInstanceOf(RegExp),
    }),
  },

  outputs: [
    '#parsedContentEntryHeadings',
    '#parsedContentEntryBodies',
  ],

  steps: () => [
    {
      dependencies: [
        input('from'),
        input('caseSensitiveRegex'),
      ],

      compute: (continuation, {
        [input('from')]: commentaryText,
        [input('caseSensitiveRegex')]: caseSensitiveRegex,
      }) => continuation({
        ['#rawMatches']:
          Array.from(commentaryText.matchAll(caseSensitiveRegex)),
      }),
    },

    withPropertiesFromList({
      list: '#rawMatches',
      properties: input.value([
        '0', // The entire match as a string.
        'groups',
        'index',
      ]),
    }).outputs({
      '#rawMatches.0': '#rawMatches.text',
      '#rawMatches.groups': '#parsedContentEntryHeadings',
      '#rawMatches.index': '#rawMatches.startIndex',
    }),

    {
      dependencies: [
        '#rawMatches.text',
        '#rawMatches.startIndex',
      ],

      compute: (continuation, {
        ['#rawMatches.text']: text,
        ['#rawMatches.startIndex']: startIndex,
      }) => continuation({
        ['#rawMatches.endIndex']:
          stitchArrays({text, startIndex})
            .map(({text, startIndex}) => startIndex + text.length),
      }),
    },

    {
      dependencies: [
        input('from'),
        '#rawMatches.startIndex',
        '#rawMatches.endIndex',
      ],

      compute: (continuation, {
        [input('from')]: commentaryText,
        ['#rawMatches.startIndex']: startIndex,
        ['#rawMatches.endIndex']: endIndex,
      }) => continuation({
        ['#parsedContentEntryBodies']:
          stitchArrays({startIndex, endIndex})
            .map(({endIndex}, index, stitched) =>
              (index === stitched.length - 1
                ? commentaryText.slice(endIndex)
                : commentaryText.slice(
                    endIndex,
                    stitched[index + 1].startIndex)))
            .map(body => body.trim()),
      }),
    },

    {
      dependencies: [
        '#parsedContentEntryHeadings',
        '#parsedContentEntryBodies',
      ],

      compute: (continuation, {
        ['#parsedContentEntryHeadings']: parsedContentEntryHeadings,
        ['#parsedContentEntryBodies']: parsedContentEntryBodies,
      }) => continuation({
        ['#parsedContentEntryHeadings']: parsedContentEntryHeadings,
        ['#parsedContentEntryBodies']: parsedContentEntryBodies,
      })
    }
  ],
});