2016-11-24 01:11:22 +01:00
|
|
|
<?php
|
|
|
|
/**
|
2018-01-04 18:03:15 +01:00
|
|
|
* @file src/Util/ParseUrl.php
|
2016-11-24 01:11:22 +01:00
|
|
|
* @brief Get informations about a given URL
|
|
|
|
*/
|
2018-01-04 18:03:15 +01:00
|
|
|
namespace Friendica\Util;
|
2016-11-24 01:11:22 +01:00
|
|
|
|
2018-01-01 02:58:09 +01:00
|
|
|
use Friendica\Content\OEmbed;
|
2017-12-07 14:56:11 +01:00
|
|
|
use Friendica\Object\Image;
|
2017-11-10 13:45:33 +01:00
|
|
|
use Friendica\Util\XML;
|
2016-11-24 01:11:22 +01:00
|
|
|
|
2017-08-10 01:08:25 +02:00
|
|
|
use dba;
|
2017-12-17 21:24:57 +01:00
|
|
|
use DOMXPath;
|
2017-05-11 17:53:04 +02:00
|
|
|
use DOMDocument;
|
|
|
|
|
2017-12-17 21:24:57 +01:00
|
|
|
require_once 'include/dba.php';
|
2017-11-08 23:02:50 +01:00
|
|
|
require_once "include/network.php";
|
2016-11-24 01:11:22 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Class with methods for extracting certain content from an url
|
|
|
|
*/
|
2017-11-08 23:02:50 +01:00
|
|
|
class ParseUrl
|
|
|
|
{
|
2016-11-27 23:41:55 +01:00
|
|
|
/**
|
|
|
|
* @brief Search for chached embeddable data of an url otherwise fetch it
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2017-12-17 21:27:50 +01:00
|
|
|
* @param string $url The url of the page which should be scraped
|
|
|
|
* @param bool $no_guessing If true the parse doens't search for
|
2017-11-08 23:02:50 +01:00
|
|
|
* preview pictures
|
2017-12-17 21:27:50 +01:00
|
|
|
* @param bool $do_oembed The false option is used by the function fetch_oembed()
|
2017-11-08 23:02:50 +01:00
|
|
|
* to avoid endless loops
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-27 23:41:55 +01:00
|
|
|
* @return array which contains needed data for embedding
|
|
|
|
* string 'url' => The url of the parsed page
|
|
|
|
* string 'type' => Content type
|
|
|
|
* string 'title' => The title of the content
|
|
|
|
* string 'text' => The description for the content
|
|
|
|
* string 'image' => A preview image of the content (only available
|
|
|
|
* if $no_geuessing = false
|
|
|
|
* array'images' = Array of preview pictures
|
|
|
|
* string 'keywords' => The tags which belong to the content
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-27 23:41:55 +01:00
|
|
|
* @see ParseUrl::getSiteinfo() for more information about scraping
|
2017-02-18 04:32:33 +01:00
|
|
|
* embeddable content
|
2016-11-27 23:41:55 +01:00
|
|
|
*/
|
2017-11-08 23:02:50 +01:00
|
|
|
public static function getSiteinfoCached($url, $no_guessing = false, $do_oembed = true)
|
|
|
|
{
|
2016-11-24 01:11:22 +01:00
|
|
|
if ($url == "") {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-11-08 23:02:50 +01:00
|
|
|
$r = q(
|
|
|
|
"SELECT * FROM `parsed_url` WHERE `url` = '%s' AND `guessing` = %d AND `oembed` = %d",
|
|
|
|
dbesc(normalise_link($url)),
|
|
|
|
intval(!$no_guessing),
|
|
|
|
intval($do_oembed)
|
|
|
|
);
|
2016-11-24 01:11:22 +01:00
|
|
|
|
|
|
|
if ($r) {
|
|
|
|
$data = $r[0]["content"];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_null($data)) {
|
|
|
|
$data = unserialize($data);
|
|
|
|
return $data;
|
|
|
|
}
|
|
|
|
|
|
|
|
$data = self::getSiteinfo($url, $no_guessing, $do_oembed);
|
|
|
|
|
2017-11-08 23:02:50 +01:00
|
|
|
dba::insert(
|
|
|
|
'parsed_url',
|
|
|
|
array(
|
|
|
|
'url' => normalise_link($url), 'guessing' => !$no_guessing,
|
2017-08-10 01:02:57 +02:00
|
|
|
'oembed' => $do_oembed, 'content' => serialize($data),
|
2017-11-08 23:02:50 +01:00
|
|
|
'created' => datetime_convert()),
|
|
|
|
true
|
|
|
|
);
|
2016-11-24 01:11:22 +01:00
|
|
|
|
|
|
|
return $data;
|
|
|
|
}
|
2016-11-27 23:41:55 +01:00
|
|
|
/**
|
|
|
|
* @brief Parse a page for embeddable content information
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-27 23:41:55 +01:00
|
|
|
* This method parses to url for meta data which can be used to embed
|
|
|
|
* the content. If available it prioritizes Open Graph meta tags.
|
|
|
|
* If this is not available it uses the twitter cards meta tags.
|
|
|
|
* As fallback it uses standard html elements with meta informations
|
|
|
|
* like \<title\>Awesome Title\</title\> or
|
|
|
|
* \<meta name="description" content="An awesome description"\>
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2017-12-17 21:27:50 +01:00
|
|
|
* @param string $url The url of the page which should be scraped
|
|
|
|
* @param bool $no_guessing If true the parse doens't search for
|
2017-11-08 23:02:50 +01:00
|
|
|
* preview pictures
|
2017-12-17 21:27:50 +01:00
|
|
|
* @param bool $do_oembed The false option is used by the function fetch_oembed()
|
2017-11-08 23:02:50 +01:00
|
|
|
* to avoid endless loops
|
2017-12-17 21:27:50 +01:00
|
|
|
* @param int $count Internal counter to avoid endless loops
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-27 23:41:55 +01:00
|
|
|
* @return array which contains needed data for embedding
|
|
|
|
* string 'url' => The url of the parsed page
|
|
|
|
* string 'type' => Content type
|
|
|
|
* string 'title' => The title of the content
|
|
|
|
* string 'text' => The description for the content
|
|
|
|
* string 'image' => A preview image of the content (only available
|
|
|
|
* if $no_geuessing = false
|
|
|
|
* array'images' = Array of preview pictures
|
|
|
|
* string 'keywords' => The tags which belong to the content
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-27 23:41:55 +01:00
|
|
|
* @todo https://developers.google.com/+/plugins/snippet/
|
|
|
|
* @verbatim
|
|
|
|
* <meta itemprop="name" content="Awesome title">
|
|
|
|
* <meta itemprop="description" content="An awesome description">
|
|
|
|
* <meta itemprop="image" content="http://maple.libertreeproject.org/images/tree-icon.png">
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-27 23:41:55 +01:00
|
|
|
* <body itemscope itemtype="http://schema.org/Product">
|
|
|
|
* <h1 itemprop="name">Shiny Trinket</h1>
|
|
|
|
* <img itemprop="image" src="{image-url}" />
|
|
|
|
* <p itemprop="description">Shiny trinkets are shiny.</p>
|
|
|
|
* </body>
|
|
|
|
* @endverbatim
|
|
|
|
*/
|
2017-11-08 23:02:50 +01:00
|
|
|
public static function getSiteinfo($url, $no_guessing = false, $do_oembed = true, $count = 1)
|
|
|
|
{
|
2016-11-24 01:11:22 +01:00
|
|
|
$a = get_app();
|
|
|
|
|
|
|
|
$siteinfo = array();
|
|
|
|
|
|
|
|
// Check if the URL does contain a scheme
|
|
|
|
$scheme = parse_url($url, PHP_URL_SCHEME);
|
|
|
|
|
|
|
|
if ($scheme == "") {
|
|
|
|
$url = "http://".trim($url, "/");
|
|
|
|
}
|
|
|
|
|
|
|
|
if ($count > 10) {
|
|
|
|
logger("parseurl_getsiteinfo: Endless loop detected for ".$url, LOGGER_DEBUG);
|
|
|
|
return($siteinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
$url = trim($url, "'");
|
|
|
|
$url = trim($url, '"');
|
|
|
|
|
2017-02-18 04:35:46 +01:00
|
|
|
$url = strip_tracking_query_params($url);
|
2016-11-24 01:11:22 +01:00
|
|
|
|
|
|
|
$siteinfo["url"] = $url;
|
|
|
|
$siteinfo["type"] = "link";
|
|
|
|
|
2017-10-15 21:29:58 +02:00
|
|
|
$data = z_fetch_url($url);
|
|
|
|
if (!$data['success']) {
|
2016-11-24 01:11:22 +01:00
|
|
|
return($siteinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the file is too large then exit
|
2017-10-15 21:29:58 +02:00
|
|
|
if ($data["info"]["download_content_length"] > 1000000) {
|
2016-11-24 01:11:22 +01:00
|
|
|
return($siteinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it isn't a HTML file then exit
|
2017-10-15 21:29:58 +02:00
|
|
|
if (($data["info"]["content_type"] != "") && !strstr(strtolower($data["info"]["content_type"]), "html")) {
|
2016-11-24 01:11:22 +01:00
|
|
|
return($siteinfo);
|
|
|
|
}
|
|
|
|
|
2017-10-15 21:29:58 +02:00
|
|
|
$header = $data["header"];
|
|
|
|
$body = $data["body"];
|
|
|
|
|
2016-11-24 01:11:22 +01:00
|
|
|
if ($do_oembed) {
|
2018-01-01 02:58:09 +01:00
|
|
|
$oembed_data = OEmbed::fetchURL($url);
|
2016-11-24 01:11:22 +01:00
|
|
|
|
2017-11-07 22:53:11 +01:00
|
|
|
if (!in_array($oembed_data->type, array("error", "rich", ""))) {
|
2016-11-24 01:11:22 +01:00
|
|
|
$siteinfo["type"] = $oembed_data->type;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (($oembed_data->type == "link") && ($siteinfo["type"] != "photo")) {
|
|
|
|
if (isset($oembed_data->title)) {
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["title"] = trim($oembed_data->title);
|
2016-11-24 01:11:22 +01:00
|
|
|
}
|
|
|
|
if (isset($oembed_data->description)) {
|
|
|
|
$siteinfo["text"] = trim($oembed_data->description);
|
|
|
|
}
|
|
|
|
if (isset($oembed_data->thumbnail_url)) {
|
|
|
|
$siteinfo["image"] = $oembed_data->thumbnail_url;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the first mentioned charset. Can be in body or header
|
|
|
|
$charset = "";
|
|
|
|
if (preg_match('/charset=(.*?)['."'".'"\s\n]/', $header, $matches)) {
|
|
|
|
$charset = trim(trim(trim(array_pop($matches)), ';,'));
|
|
|
|
}
|
|
|
|
|
|
|
|
if ($charset == "") {
|
|
|
|
$charset = "utf-8";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (($charset != "") && (strtoupper($charset) != "UTF-8")) {
|
|
|
|
logger("parseurl_getsiteinfo: detected charset ".$charset, LOGGER_DEBUG);
|
|
|
|
//$body = mb_convert_encoding($body, "UTF-8", $charset);
|
|
|
|
$body = iconv($charset, "UTF-8//TRANSLIT", $body);
|
|
|
|
}
|
|
|
|
|
|
|
|
$body = mb_convert_encoding($body, 'HTML-ENTITIES', "UTF-8");
|
|
|
|
|
2017-05-11 17:53:04 +02:00
|
|
|
$doc = new DOMDocument();
|
2016-11-24 01:11:22 +01:00
|
|
|
@$doc->loadHTML($body);
|
|
|
|
|
2017-11-10 13:45:33 +01:00
|
|
|
XML::deleteNode($doc, "style");
|
|
|
|
XML::deleteNode($doc, "script");
|
|
|
|
XML::deleteNode($doc, "option");
|
|
|
|
XML::deleteNode($doc, "h1");
|
|
|
|
XML::deleteNode($doc, "h2");
|
|
|
|
XML::deleteNode($doc, "h3");
|
|
|
|
XML::deleteNode($doc, "h4");
|
|
|
|
XML::deleteNode($doc, "h5");
|
|
|
|
XML::deleteNode($doc, "h6");
|
|
|
|
XML::deleteNode($doc, "ol");
|
|
|
|
XML::deleteNode($doc, "ul");
|
2017-05-11 17:53:04 +02:00
|
|
|
|
2017-12-17 21:24:57 +01:00
|
|
|
$xpath = new DOMXPath($doc);
|
2016-11-24 01:11:22 +01:00
|
|
|
|
|
|
|
$list = $xpath->query("//meta[@content]");
|
|
|
|
foreach ($list as $node) {
|
|
|
|
$attr = array();
|
|
|
|
if ($node->attributes->length) {
|
|
|
|
foreach ($node->attributes as $attribute) {
|
|
|
|
$attr[$attribute->name] = $attribute->value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (@$attr["http-equiv"] == "refresh") {
|
|
|
|
$path = $attr["content"];
|
|
|
|
$pathinfo = explode(";", $path);
|
|
|
|
$content = "";
|
|
|
|
foreach ($pathinfo as $value) {
|
|
|
|
if (substr(strtolower($value), 0, 4) == "url=") {
|
|
|
|
$content = substr($value, 4);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ($content != "") {
|
|
|
|
$siteinfo = self::getSiteinfo($content, $no_guessing, $do_oembed, ++$count);
|
|
|
|
return($siteinfo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
$list = $xpath->query("//title");
|
|
|
|
if ($list->length > 0) {
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["title"] = trim($list->item(0)->nodeValue);
|
2016-11-24 01:11:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//$list = $xpath->query("head/meta[@name]");
|
|
|
|
$list = $xpath->query("//meta[@name]");
|
|
|
|
foreach ($list as $node) {
|
|
|
|
$attr = array();
|
|
|
|
if ($node->attributes->length) {
|
|
|
|
foreach ($node->attributes as $attribute) {
|
|
|
|
$attr[$attribute->name] = $attribute->value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
$attr["content"] = trim(html_entity_decode($attr["content"], ENT_QUOTES, "UTF-8"));
|
|
|
|
|
|
|
|
if ($attr["content"] != "") {
|
|
|
|
switch (strtolower($attr["name"])) {
|
|
|
|
case "fulltitle":
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["title"] = trim($attr["content"]);
|
2016-11-24 01:11:22 +01:00
|
|
|
break;
|
|
|
|
case "description":
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["text"] = trim($attr["content"]);
|
2016-11-24 01:11:22 +01:00
|
|
|
break;
|
|
|
|
case "thumbnail":
|
|
|
|
$siteinfo["image"] = $attr["content"];
|
|
|
|
break;
|
|
|
|
case "twitter:image":
|
|
|
|
$siteinfo["image"] = $attr["content"];
|
|
|
|
break;
|
|
|
|
case "twitter:image:src":
|
|
|
|
$siteinfo["image"] = $attr["content"];
|
|
|
|
break;
|
|
|
|
case "twitter:card":
|
|
|
|
if (($siteinfo["type"] == "") || ($attr["content"] == "photo")) {
|
|
|
|
$siteinfo["type"] = $attr["content"];
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case "twitter:description":
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["text"] = trim($attr["content"]);
|
2016-11-24 01:11:22 +01:00
|
|
|
break;
|
|
|
|
case "twitter:title":
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["title"] = trim($attr["content"]);
|
2016-11-24 01:11:22 +01:00
|
|
|
break;
|
|
|
|
case "dc.title":
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["title"] = trim($attr["content"]);
|
2016-11-24 01:11:22 +01:00
|
|
|
break;
|
|
|
|
case "dc.description":
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["text"] = trim($attr["content"]);
|
2016-11-24 01:11:22 +01:00
|
|
|
break;
|
|
|
|
case "keywords":
|
|
|
|
$keywords = explode(",", $attr["content"]);
|
|
|
|
break;
|
|
|
|
case "news_keywords":
|
|
|
|
$keywords = explode(",", $attr["content"]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ($siteinfo["type"] == "summary") {
|
|
|
|
$siteinfo["type"] = "link";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isset($keywords)) {
|
|
|
|
$siteinfo["keywords"] = array();
|
|
|
|
foreach ($keywords as $keyword) {
|
|
|
|
if (!in_array(trim($keyword), $siteinfo["keywords"])) {
|
|
|
|
$siteinfo["keywords"][] = trim($keyword);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//$list = $xpath->query("head/meta[@property]");
|
|
|
|
$list = $xpath->query("//meta[@property]");
|
|
|
|
foreach ($list as $node) {
|
|
|
|
$attr = array();
|
|
|
|
if ($node->attributes->length) {
|
|
|
|
foreach ($node->attributes as $attribute) {
|
|
|
|
$attr[$attribute->name] = $attribute->value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
$attr["content"] = trim(html_entity_decode($attr["content"], ENT_QUOTES, "UTF-8"));
|
|
|
|
|
|
|
|
if ($attr["content"] != "") {
|
|
|
|
switch (strtolower($attr["property"])) {
|
|
|
|
case "og:image":
|
|
|
|
$siteinfo["image"] = $attr["content"];
|
|
|
|
break;
|
|
|
|
case "og:title":
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["title"] = trim($attr["content"]);
|
2016-11-24 01:11:22 +01:00
|
|
|
break;
|
|
|
|
case "og:description":
|
2017-11-07 22:53:11 +01:00
|
|
|
$siteinfo["text"] = trim($attr["content"]);
|
2016-11-24 01:11:22 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((@$siteinfo["image"] == "") && !$no_guessing) {
|
|
|
|
$list = $xpath->query("//img[@src]");
|
|
|
|
foreach ($list as $node) {
|
|
|
|
$attr = array();
|
|
|
|
if ($node->attributes->length) {
|
|
|
|
foreach ($node->attributes as $attribute) {
|
|
|
|
$attr[$attribute->name] = $attribute->value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
$src = self::completeUrl($attr["src"], $url);
|
2017-12-07 14:56:11 +01:00
|
|
|
$photodata = Image::getInfoFromURL($src);
|
2016-11-24 01:11:22 +01:00
|
|
|
|
|
|
|
if (($photodata) && ($photodata[0] > 150) && ($photodata[1] > 150)) {
|
|
|
|
if ($photodata[0] > 300) {
|
|
|
|
$photodata[1] = round($photodata[1] * (300 / $photodata[0]));
|
|
|
|
$photodata[0] = 300;
|
|
|
|
}
|
|
|
|
if ($photodata[1] > 300) {
|
|
|
|
$photodata[0] = round($photodata[0] * (300 / $photodata[1]));
|
|
|
|
$photodata[1] = 300;
|
|
|
|
}
|
|
|
|
$siteinfo["images"][] = array("src" => $src,
|
|
|
|
"width" => $photodata[0],
|
|
|
|
"height" => $photodata[1]);
|
|
|
|
}
|
2017-11-08 23:02:50 +01:00
|
|
|
}
|
2016-11-24 01:11:22 +01:00
|
|
|
} elseif ($siteinfo["image"] != "") {
|
|
|
|
$src = self::completeUrl($siteinfo["image"], $url);
|
|
|
|
|
|
|
|
unset($siteinfo["image"]);
|
|
|
|
|
2017-12-07 14:56:11 +01:00
|
|
|
$photodata = Image::getInfoFromURL($src);
|
2016-11-24 01:11:22 +01:00
|
|
|
|
|
|
|
if (($photodata) && ($photodata[0] > 10) && ($photodata[1] > 10)) {
|
|
|
|
$siteinfo["images"][] = array("src" => $src,
|
|
|
|
"width" => $photodata[0],
|
|
|
|
"height" => $photodata[1]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((@$siteinfo["text"] == "") && (@$siteinfo["title"] != "") && !$no_guessing) {
|
|
|
|
$text = "";
|
|
|
|
|
|
|
|
$list = $xpath->query("//div[@class='article']");
|
|
|
|
foreach ($list as $node) {
|
|
|
|
if (strlen($node->nodeValue) > 40) {
|
|
|
|
$text .= " ".trim($node->nodeValue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ($text == "") {
|
|
|
|
$list = $xpath->query("//div[@class='content']");
|
|
|
|
foreach ($list as $node) {
|
|
|
|
if (strlen($node->nodeValue) > 40) {
|
|
|
|
$text .= " ".trim($node->nodeValue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If none text was found then take the paragraph content
|
|
|
|
if ($text == "") {
|
|
|
|
$list = $xpath->query("//p");
|
|
|
|
foreach ($list as $node) {
|
|
|
|
if (strlen($node->nodeValue) > 40) {
|
|
|
|
$text .= " ".trim($node->nodeValue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ($text != "") {
|
|
|
|
$text = trim(str_replace(array("\n", "\r"), array(" ", " "), $text));
|
|
|
|
|
|
|
|
while (strpos($text, " ")) {
|
|
|
|
$text = trim(str_replace(" ", " ", $text));
|
|
|
|
}
|
|
|
|
|
|
|
|
$siteinfo["text"] = trim(html_entity_decode(substr($text, 0, 350), ENT_QUOTES, "UTF-8").'...');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
logger("parseurl_getsiteinfo: Siteinfo for ".$url." ".print_r($siteinfo, true), LOGGER_DEBUG);
|
|
|
|
|
|
|
|
call_hooks("getsiteinfo", $siteinfo);
|
|
|
|
|
|
|
|
return($siteinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Convert tags from CSV to an array
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-24 01:11:22 +01:00
|
|
|
* @param string $string Tags
|
|
|
|
* @return array with formatted Hashtags
|
|
|
|
*/
|
2017-11-08 23:02:50 +01:00
|
|
|
public static function convertTagsToArray($string)
|
|
|
|
{
|
2016-11-24 01:11:22 +01:00
|
|
|
$arr_tags = str_getcsv($string);
|
|
|
|
if (count($arr_tags)) {
|
|
|
|
// add the # sign to every tag
|
|
|
|
array_walk($arr_tags, array("self", "arrAddHashes"));
|
|
|
|
|
|
|
|
return $arr_tags;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Add a hasht sign to a string
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-24 01:11:22 +01:00
|
|
|
* This method is used as callback function
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-24 01:11:22 +01:00
|
|
|
* @param string $tag The pure tag name
|
2017-11-08 23:02:50 +01:00
|
|
|
* @param int $k Counter for internal use
|
2017-11-19 23:04:40 +01:00
|
|
|
* @return void
|
2016-11-24 01:11:22 +01:00
|
|
|
*/
|
2017-11-08 23:02:50 +01:00
|
|
|
private static function arrAddHashes(&$tag, $k)
|
|
|
|
{
|
2016-11-24 01:11:22 +01:00
|
|
|
$tag = "#" . $tag;
|
|
|
|
}
|
|
|
|
|
2016-11-27 23:41:55 +01:00
|
|
|
/**
|
|
|
|
* @brief Add a scheme to an url
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-27 23:41:55 +01:00
|
|
|
* The src attribute of some html elements (e.g. images)
|
|
|
|
* can miss the scheme so we need to add the correct
|
|
|
|
* scheme
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2017-11-08 23:02:50 +01:00
|
|
|
* @param string $url The url which possibly does have
|
|
|
|
* a missing scheme (a link to an image)
|
2016-11-27 23:41:55 +01:00
|
|
|
* @param string $scheme The url with a correct scheme
|
2017-11-08 23:02:50 +01:00
|
|
|
* (e.g. the url from the webpage which does contain the image)
|
2017-02-18 04:32:33 +01:00
|
|
|
*
|
2016-11-27 23:41:55 +01:00
|
|
|
* @return string The url with a scheme
|
|
|
|
*/
|
2017-11-08 23:02:50 +01:00
|
|
|
private static function completeUrl($url, $scheme)
|
|
|
|
{
|
2016-11-24 01:11:22 +01:00
|
|
|
$urlarr = parse_url($url);
|
|
|
|
|
2016-11-27 23:41:55 +01:00
|
|
|
// If the url does allready have an scheme
|
|
|
|
// we can stop the process here
|
2016-11-24 01:11:22 +01:00
|
|
|
if (isset($urlarr["scheme"])) {
|
|
|
|
return($url);
|
|
|
|
}
|
|
|
|
|
|
|
|
$schemearr = parse_url($scheme);
|
|
|
|
|
|
|
|
$complete = $schemearr["scheme"]."://".$schemearr["host"];
|
|
|
|
|
|
|
|
if (@$schemearr["port"] != "") {
|
|
|
|
$complete .= ":".$schemearr["port"];
|
|
|
|
}
|
|
|
|
|
2017-11-08 23:02:50 +01:00
|
|
|
if (strpos($urlarr["path"], "/") !== 0) {
|
2016-11-24 01:11:22 +01:00
|
|
|
$complete .= "/";
|
|
|
|
}
|
|
|
|
|
|
|
|
$complete .= $urlarr["path"];
|
|
|
|
|
|
|
|
if (@$urlarr["query"] != "") {
|
|
|
|
$complete .= "?".$urlarr["query"];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (@$urlarr["fragment"] != "") {
|
|
|
|
$complete .= "#".$urlarr["fragment"];
|
|
|
|
}
|
|
|
|
|
|
|
|
return($complete);
|
|
|
|
}
|
|
|
|
}
|