« get me outta code hell

mtui - Music Text User Interface - user-friendly command line music player
about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--crawlers.js126
-rw-r--r--general-util.js4
-rwxr-xr-xindex.js22
-rw-r--r--todo.txt7
4 files changed, 131 insertions, 28 deletions
diff --git a/crawlers.js b/crawlers.js
index 82ef78e..caf3c0e 100644
--- a/crawlers.js
+++ b/crawlers.js
@@ -11,6 +11,15 @@ const { promisify } = require('util')
 const readDir = promisify(fs.readdir)
 const stat = promisify(fs.stat)
 
+// Each value is a function with these additional properties:
+// * crawlerName: The name of the crawler, such as "crawl-http". Used by
+//   getCrawlerByName.
+// * isAppropriateForArg: A function returning whether an argument is valid for
+//   the crawler. For example, crawlHTTP.isAppropriateForArg returns whether or
+//   not the passed argument is a valid URL of the HTTP/HTTPS protocol. Used by
+//   getAllCrawlersForArg.
+const allCrawlers = {}
+
 function sortIgnoreCase(sortFunction) {
   return function(a, b) {
     return sortFunction(a.toLowerCase(), b.toLowerCase())
@@ -173,6 +182,39 @@ function crawlHTTP(absURL, opts = {}, internals = {}) {
     })
 }
 
+crawlHTTP.crawlerName = 'crawl-http'
+
+crawlHTTP.isAppropriateForArg = function(arg) {
+  // It is only used for HTTP(S) servers:
+  if (!(arg.startsWith('http://') || arg.startsWith('https://'))) {
+    return false
+  }
+
+  // It will definitely only work for valid URLs:
+  let url
+  try {
+    url = new URL(arg)
+  } catch (error) {
+    return false
+  }
+
+  // If the URL ends with a .json, it is probably meant to be used for a direct
+  // playlist download, not to be crawled.
+  if (path.extname(url.pathname) === '.json') {
+    return false
+  }
+
+  // Just to avoid conflict with crawl-youtube, assume crawl-http is not used
+  // for URLs on YouTube:
+  if (crawlYouTube.isAppropriateForArg(arg)) {
+    return false
+  }
+
+  return true
+}
+
+allCrawlers.crawlHTTP = crawlHTTP
+
 function getHTMLLinks(text) {
   // Never parse HTML with a regex!
   const $ = cheerio.load(text)
@@ -183,8 +225,6 @@ function getHTMLLinks(text) {
   })
 }
 
-
-
 function crawlLocal(dirPath, extensions = [
   'ogg', 'oga',
   'wav', 'mp3', 'mp4', 'm4a', 'aac',
@@ -238,6 +278,28 @@ function crawlLocal(dirPath, extensions = [
     .then(filteredItems => ({items: filteredItems}))
 }
 
+crawlLocal.crawlerName = 'crawl-local'
+
+crawlLocal.isAppropriateForArg = function(arg) {
+  // When the passed argument is a valid URL, it is only used for file://
+  // URLs:
+  try {
+    const url = new URL(arg)
+    if (url.protocol !== 'file:') {
+      return false
+    }
+  } catch (error) {}
+
+  // If the passed argument ends with .json, it is probably not a directory.
+  if (path.extname(arg) === '.json') {
+    return false
+  }
+
+  return true
+}
+
+allCrawlers.crawlLocal = crawlLocal
+
 async function crawlYouTube(url) {
   const ytdl = spawn('youtube-dl', [
     '-j', // Output as JSON
@@ -266,23 +328,53 @@ async function crawlYouTube(url) {
   }
 }
 
+crawlYouTube.crawlerName = 'crawl-youtube'
+
+crawlYouTube.isAppropriateForArg = function(arg) {
+  // It is definitely not used for arguments that are not URLs:
+  let url
+  try {
+    url = new URL(arg)
+  } catch (error) {
+    return false
+  }
+
+  // It is only used for URLs on the YouTube domain:
+  if (!(url.hostname === 'youtube.com' || url.hostname === 'www.youtube.com')) {
+    return false
+  }
+
+  // It is only used for playlist pages:
+  if (url.pathname !== '/playlist') {
+    return false
+  }
+
+  return true
+}
+
+allCrawlers.crawlYouTube = crawlYouTube
+
 async function openFile(input) {
   return JSON.parse(await downloadPlaylistFromOptionValue(input))
 }
 
-module.exports = {
-  crawlHTTP,
-  crawlLocal,
-  crawlYouTube,
-  openFile,
-
-  getCrawlerByName: function(name) {
-    switch (name) {
-      case 'crawl-http': return crawlHTTP
-      case 'crawl-local': return crawlLocal
-      case 'crawl-youtube': return crawlYouTube
-      case 'open-file': return openFile
-      default: return null
-    }
-  }
+openFile.crawlerName = 'open-file'
+
+openFile.isAppropriateForArg = function(arg) {
+  // It is only valid for arguments that end with .json:
+  return path.extname(arg) === '.json'
+}
+
+allCrawlers.openFile = openFile
+
+// Actual module.exports stuff:
+
+Object.assign(module.exports, allCrawlers)
+
+module.exports.getCrawlerByName = function(name) {
+  return Object.values(allCrawlers).find(fn => fn.crawlerName === name)
+}
+
+module.exports.getAllCrawlersForArg = function(arg) {
+  return Object.values(allCrawlers).filter(fn => fn.isAppropriateForArg(arg))
 }
diff --git a/general-util.js b/general-util.js
index 2ce7ed4..7156274 100644
--- a/general-util.js
+++ b/general-util.js
@@ -1,6 +1,10 @@
 const { spawn } = require('child_process')
+const { promisify } = require('util')
+const fs = require('fs')
 const npmCommandExists = require('command-exists')
 
+const readFile = promisify(fs.readFile)
+
 module.exports.promisifyProcess = function(proc, showLogging = true) {
   // Takes a process (from the child_process module) and returns a promise
   // that resolves when the process exits (or rejects, if the exit code is
diff --git a/index.js b/index.js
index 3abbe83..59c91ec 100755
--- a/index.js
+++ b/index.js
@@ -4,6 +4,7 @@
 
 const { AppElement } = require('./ui')
 const { updatePlaylistFormat } = require('./playlist-utils')
+const { getAllCrawlersForArg } = require('./crawlers')
 const fs = require('fs')
 const util = require('util')
 const processSmartPlaylist = require('./smart-playlist')
@@ -67,21 +68,20 @@ async function main() {
   if (process.argv[2]) {
     console.log('Opening playlist...')
 
-    let grouplikeText
-    try {
-      grouplikeText = await readFile(process.argv[2])
-    } catch (error) {
-      console.error('Error opening the passed file "' + process.argv[2] + '"!')
+    const crawlers = getAllCrawlersForArg(process.argv[2])
+
+    if (crawlers.length === 0) {
+      console.error(`No suitable playlist crawler for "${process.argv[2]}".`)
       process.exit(1)
     }
 
-    try {
-      grouplike = JSON.parse(grouplikeText)
-    } catch (error) {
-      flushable.write('Error parsing the passed file as JSON!')
-      flushable.flush()
-      process.exit(1)
+    const crawler = crawlers[0]
+
+    if (crawlers.length > 1) {
+      console.warn(`More than one suitable crawler for "${process.argv[2]}" - using ${crawler.name}.`)
     }
+
+    grouplike = await crawler(process.argv[2])
   }
 
   grouplike = await processSmartPlaylist(grouplike)
diff --git a/todo.txt b/todo.txt
index b06ef7d..f5bff02 100644
--- a/todo.txt
+++ b/todo.txt
@@ -32,3 +32,10 @@ TODO: Warn if no mkfifo (means controls won't work).
 
 TODO: file:// support for crawl-local.
       (Done!)
+
+TODO: Pass YouTube playlist or http://.../playlist.json-like URLs to use them
+      as playlists (crawl automatically).
+      (Done!)
+
+TODO: There's some weird glitch where, if downloaderArg is missing (=== ""),
+      it'll play.. something by Jake Chudnow??