Remove unused parameter

This commit is contained in:
Michael 2021-03-16 07:15:20 +00:00
parent d498d15200
commit 08771d96c2
5 changed files with 13 additions and 18 deletions

View File

@ -151,7 +151,7 @@ class OEmbed
// Improve the OEmbed data with data from OpenGraph, Twitter cards and other sources
if ($use_parseurl) {
$data = ParseUrl::getSiteinfoCached($embedurl, true, false);
$data = ParseUrl::getSiteinfoCached($embedurl, false);
if (($oembed->type == 'error') && empty($data['title']) && empty($data['text'])) {
return $oembed;

View File

@ -187,7 +187,7 @@ class PageInfo
*/
public static function queryUrl(string $url, string $photo = '', bool $keywords = false, string $keyword_denylist = '')
{
$data = ParseUrl::getSiteinfoCached($url, true);
$data = ParseUrl::getSiteinfoCached($url);
if ($photo != '') {
$data['images'][0]['src'] = $photo;

View File

@ -322,14 +322,14 @@ class BBCode
$data = ['url' => $url, 'type' => 'photo'];
} else {
// Checking, if the link goes to a picture
$data = ParseUrl::getSiteinfoCached($pictures[0][1], true);
$data = ParseUrl::getSiteinfoCached($pictures[0][1]);
}
// Workaround:
// Sometimes photo posts to the own album are not detected at the start.
// So we seem to cannot use the cache for these cases. That's strange.
if (($data['type'] != 'photo') && strstr($pictures[0][1], "/photos/")) {
$data = ParseUrl::getSiteinfo($pictures[0][1], true);
$data = ParseUrl::getSiteinfo($pictures[0][1]);
}
if ($data['type'] == 'photo') {
@ -416,7 +416,7 @@ class BBCode
$post['text'] = trim($body);
}
} elseif (isset($post['url']) && ($post['type'] == 'video')) {
$data = ParseUrl::getSiteinfoCached($post['url'], true);
$data = ParseUrl::getSiteinfoCached($post['url']);
if (isset($data['images'][0])) {
$post['image'] = $data['images'][0]['src'];

View File

@ -484,7 +484,7 @@ class Feed
$item["body"] = trim($item["title"]);
}
$data = ParseUrl::getSiteinfoCached($item['plink'], true);
$data = ParseUrl::getSiteinfoCached($item['plink']);
if (!empty($data['text']) && !empty($data['title']) && (mb_strlen($item['body']) < mb_strlen($data['text']))) {
// When the fetched page info text is longer than the body, we do try to enhance the body
if (!empty($item['body']) && (strpos($data['title'], $item['body']) === false) && (strpos($data['text'], $item['body']) === false)) {

View File

@ -75,8 +75,6 @@ class ParseUrl
* Search for chached embeddable data of an url otherwise fetch it
*
* @param string $url The url of the page which should be scraped
* @param bool $no_guessing If true the parse doens't search for
* preview pictures
* @param bool $do_oembed The false option is used by the function fetch_oembed()
* to avoid endless loops
*
@ -85,7 +83,7 @@ class ParseUrl
* string 'type' => Content type
* string 'title' => (optional) The title of the content
* string 'text' => (optional) The description for the content
* string 'image' => (optional) A preview image of the content (only available if $no_geuessing = false)
* string 'image' => (optional) A preview image of the content
* array 'images' => (optional) Array of preview pictures
* string 'keywords' => (optional) The tags which belong to the content
*
@ -93,7 +91,7 @@ class ParseUrl
* @see ParseUrl::getSiteinfo() for more information about scraping
* embeddable content
*/
public static function getSiteinfoCached($url, $no_guessing = false, $do_oembed = true): array
public static function getSiteinfoCached($url, $do_oembed = true): array
{
if (empty($url)) {
return [
@ -105,14 +103,14 @@ class ParseUrl
$urlHash = hash('sha256', $url);
$parsed_url = DBA::selectFirst('parsed_url', ['content'],
['url_hash' => $urlHash, 'guessing' => !$no_guessing, 'oembed' => $do_oembed]
['url_hash' => $urlHash, 'oembed' => $do_oembed]
);
if (!empty($parsed_url['content'])) {
$data = unserialize($parsed_url['content']);
return $data;
}
$data = self::getSiteinfo($url, $no_guessing, $do_oembed);
$data = self::getSiteinfo($url, $do_oembed);
$expires = $data['expires'];
@ -122,7 +120,6 @@ class ParseUrl
'parsed_url',
[
'url_hash' => $urlHash,
'guessing' => !$no_guessing,
'oembed' => $do_oembed,
'url' => $url,
'content' => serialize($data),
@ -146,8 +143,6 @@ class ParseUrl
* \<meta name="description" content="An awesome description"\>
*
* @param string $url The url of the page which should be scraped
* @param bool $no_guessing If true the parse doens't search for
* preview pictures
* @param bool $do_oembed The false option is used by the function fetch_oembed()
* to avoid endless loops
* @param int $count Internal counter to avoid endless loops
@ -157,7 +152,7 @@ class ParseUrl
* string 'type' => Content type (error, link, photo, image, audio, video)
* string 'title' => (optional) The title of the content
* string 'text' => (optional) The description for the content
* string 'image' => (optional) A preview image of the content (only available if $no_guessing = false)
* string 'image' => (optional) A preview image of the content
* array 'images' => (optional) Array of preview pictures
* string 'keywords' => (optional) The tags which belong to the content
*
@ -175,7 +170,7 @@ class ParseUrl
* </body>
* @endverbatim
*/
public static function getSiteinfo($url, $no_guessing = false, $do_oembed = true, $count = 1)
public static function getSiteinfo($url, $do_oembed = true, $count = 1)
{
if (empty($url)) {
return [
@ -343,7 +338,7 @@ class ParseUrl
}
}
if ($content != '') {
$siteinfo = self::getSiteinfo($content, $no_guessing, $do_oembed, ++$count);
$siteinfo = self::getSiteinfo($content, $do_oembed, ++$count);
return $siteinfo;
}
}