Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • funkwhale/funkwhale
  • Luclu7/funkwhale
  • mbothorel/funkwhale
  • EorlBruder/funkwhale
  • tcit/funkwhale
  • JocelynDelalande/funkwhale
  • eneiluj/funkwhale
  • reg/funkwhale
  • ButterflyOfFire/funkwhale
  • m4sk1n/funkwhale
  • wxcafe/funkwhale
  • andybalaam/funkwhale
  • jcgruenhage/funkwhale
  • pblayo/funkwhale
  • joshuaboniface/funkwhale
  • n3ddy/funkwhale
  • gegeweb/funkwhale
  • tohojo/funkwhale
  • emillumine/funkwhale
  • Te-k/funkwhale
  • asaintgenis/funkwhale
  • anoadragon453/funkwhale
  • Sakada/funkwhale
  • ilianaw/funkwhale
  • l4p1n/funkwhale
  • pnizet/funkwhale
  • dante383/funkwhale
  • interfect/funkwhale
  • akhardya/funkwhale
  • svfusion/funkwhale
  • noplanman/funkwhale
  • nykopol/funkwhale
  • roipoussiere/funkwhale
  • Von/funkwhale
  • aurieh/funkwhale
  • icaria36/funkwhale
  • floreal/funkwhale
  • paulwalko/funkwhale
  • comradekingu/funkwhale
  • FurryJulie/funkwhale
  • Legolars99/funkwhale
  • Vierkantor/funkwhale
  • zachhats/funkwhale
  • heyjake/funkwhale
  • sn0w/funkwhale
  • jvoisin/funkwhale
  • gordon/funkwhale
  • Alexander/funkwhale
  • bignose/funkwhale
  • qasim.ali/funkwhale
  • fakegit/funkwhale
  • Kxze/funkwhale
  • stenstad/funkwhale
  • creak/funkwhale
  • Kaze/funkwhale
  • Tixie/funkwhale
  • IISergII/funkwhale
  • lfuelling/funkwhale
  • nhaddag/funkwhale
  • yoasif/funkwhale
  • ifischer/funkwhale
  • keslerm/funkwhale
  • flupe/funkwhale
  • petitminion/funkwhale
  • ariasuni/funkwhale
  • ollie/funkwhale
  • ngaumont/funkwhale
  • techknowlogick/funkwhale
  • Shleeble/funkwhale
  • theflyingfrog/funkwhale
  • jonatron/funkwhale
  • neobrain/funkwhale
  • eorn/funkwhale
  • KokaKiwi/funkwhale
  • u1-liquid/funkwhale
  • marzzzello/funkwhale
  • sirenwatcher/funkwhale
  • newer027/funkwhale
  • codl/funkwhale
  • Zwordi/funkwhale
  • gisforgabriel/funkwhale
  • iuriatan/funkwhale
  • simon/funkwhale
  • bheesham/funkwhale
  • zeoses/funkwhale
  • accraze/funkwhale
  • meliurwen/funkwhale
  • divadsn/funkwhale
  • Etua/funkwhale
  • sdrik/funkwhale
  • Soran/funkwhale
  • kuba-orlik/funkwhale
  • cristianvogel/funkwhale
  • Forceu/funkwhale
  • jeff/funkwhale
  • der_scheibenhacker/funkwhale
  • owlnical/funkwhale
  • jovuit/funkwhale
  • SilverFox15/funkwhale
  • phw/funkwhale
  • mayhem/funkwhale
  • sridhar/funkwhale
  • stromlin/funkwhale
  • rrrnld/funkwhale
  • nitaibezerra/funkwhale
  • jaller94/funkwhale
  • pcouy/funkwhale
  • eduxstad/funkwhale
  • codingHahn/funkwhale
  • captain/funkwhale
  • polyedre/funkwhale
  • leishenailong/funkwhale
  • ccritter/funkwhale
  • lnceballosz/funkwhale
  • fpiesche/funkwhale
  • Fanyx/funkwhale
  • markusblogde/funkwhale
  • Firobe/funkwhale
  • devilcius/funkwhale
  • freaktechnik/funkwhale
  • blopware/funkwhale
  • cone/funkwhale
  • thanksd/funkwhale
  • vachan-maker/funkwhale
  • bbenti/funkwhale
  • tarator/funkwhale
  • prplecake/funkwhale
  • DMarzal/funkwhale
  • lullis/funkwhale
  • hanacgr/funkwhale
  • albjeremias/funkwhale
  • xeruf/funkwhale
  • llelite/funkwhale
  • RoiArthurB/funkwhale
  • cloo/funkwhale
  • nztvar/funkwhale
  • Keunes/funkwhale
  • petitminion/funkwhale-petitminion
  • m-idler/funkwhale
  • SkyLeite/funkwhale
140 results
Select Git revision
Show changes
Showing
with 719 additions and 317 deletions
......@@ -23,6 +23,6 @@ def main(command, **kwargs):
total = users.count()
command.stdout.write(
"Updating {} users with {} permission...".format(total, user_permission)
f"Updating {total} users with {user_permission} permission..."
)
users.update(**{"permission_{}".format(user_permission): True})
users.update(**{f"permission_{user_permission}": True})
......@@ -12,12 +12,12 @@ This command will also generate federation ids for existing resources.
"""
from django.conf import settings
from django.db.models import functions, CharField, F, Value
from django.db.models import CharField, F, Value, functions
from funkwhale_api.common import preferences
from funkwhale_api.federation import models as federation_models
from funkwhale_api.music import models
from funkwhale_api.users.models import User
from funkwhale_api.federation import models as federation_models
from funkwhale_api.common import preferences
def create_libraries(open_api, stdout):
......@@ -36,9 +36,7 @@ def create_libraries(open_api, stdout):
)
libraries_by_user[library.actor.user.pk] = library.pk
if created:
stdout.write(
" * Created library {} for user {}".format(library.pk, a.user.pk)
)
stdout.write(f" * Created library {library.pk} for user {a.user.pk}")
else:
stdout.write(
" * Found existing library {} for user {}".format(
......@@ -60,13 +58,9 @@ def update_uploads(libraries_by_user, stdout):
)
total = candidates.update(library=library_id, import_status="finished")
if total:
stdout.write(
" * Assigned {} uploads to user {}'s library".format(total, user_id)
)
stdout.write(f" * Assigned {total} uploads to user {user_id}'s library")
else:
stdout.write(
" * No uploads to assign to user {}'s library".format(user_id)
)
stdout.write(f" * No uploads to assign to user {user_id}'s library")
def update_orphan_uploads(open_api, stdout):
......@@ -105,14 +99,12 @@ def update_orphan_uploads(open_api, stdout):
def set_fid(queryset, path, stdout):
model = queryset.model._meta.label
qs = queryset.filter(fid=None)
base_url = "{}{}".format(settings.FUNKWHALE_URL, path)
stdout.write(
"* Assigning federation ids to {} entries (path: {})".format(model, base_url)
)
base_url = f"{settings.FUNKWHALE_URL}{path}"
stdout.write(f"* Assigning federation ids to {model} entries (path: {base_url})")
new_fid = functions.Concat(Value(base_url), F("uuid"), output_field=CharField())
total = qs.update(fid=new_fid)
stdout.write(" * {} entries updated".format(total))
stdout.write(f" * {total} entries updated")
def update_shared_inbox_url(stdout):
......@@ -123,16 +115,16 @@ def update_shared_inbox_url(stdout):
def generate_actor_urls(part, stdout):
field = "{}_url".format(part)
stdout.write("* Update {} for local actors...".format(field))
field = f"{part}_url"
stdout.write(f"* Update {field} for local actors...")
queryset = federation_models.Actor.objects.local().filter(**{field: None})
base_url = "{}/federation/actors/".format(settings.FUNKWHALE_URL)
base_url = f"{settings.FUNKWHALE_URL}/federation/actors/"
new_field = functions.Concat(
Value(base_url),
F("preferred_username"),
Value("/{}".format(part)),
Value(f"/{part}"),
output_field=CharField(),
)
......
......@@ -5,14 +5,13 @@ from django.db.models import Q
from . import utils
QUERY_REGEX = re.compile(r'(((?P<key>\w+):)?(?P<value>"[^"]+"|[\S]+))')
def parse_query(query):
"""
Given a search query such as "hello is:issue status:opened",
returns a list of dictionnaries discribing each query token
returns a list of dictionaries describing each query token
"""
matches = [m.groupdict() for m in QUERY_REGEX.finditer(query.lower())]
for m in matches:
......@@ -26,7 +25,7 @@ def normalize_query(
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r"\s{2,}").sub,
):
""" Splits the query string in invidual keywords, getting rid of unecessary spaces
"""Splits the query string in individual keywords, getting rid of unnecessary spaces
and grouping quoted words together.
Example:
......@@ -73,7 +72,7 @@ def get_fts_query(query_string, fts_fields=["body_text"], model=None):
else:
query_string = remove_chars(query_string, ['"', "&", "(", ")", "!", "'"])
parts = query_string.replace(":", "").split(" ")
parts = ["{}:*".format(p) for p in parts if p]
parts = [f"{p}:*" for p in parts if p]
if not parts:
return Q(pk=None)
......@@ -98,7 +97,7 @@ def get_fts_query(query_string, fts_fields=["body_text"], model=None):
)
}
).values_list("pk", flat=True)
new_query = Q(**{"{}__in".format(fk_field_name): list(subquery)})
new_query = Q(**{f"{fk_field_name}__in": list(subquery)})
else:
new_query = Q(
**{
......@@ -181,7 +180,7 @@ class SearchConfig:
except KeyError:
# no cleaning to apply
value = token["value"]
q = Q(**{"{}__icontains".format(to): value})
q = Q(**{f"{to}__icontains": value})
if not specific_field_query:
specific_field_query = q
else:
......
import collections
import io
import PIL
import os
from rest_framework import serializers
import PIL
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema_field
from rest_framework import serializers
from . import models
from . import utils
from . import models, utils
class RelatedField(serializers.RelatedField):
......@@ -52,7 +52,7 @@ class RelatedField(serializers.RelatedField):
self.fail(
"does_not_exist",
related_field_name=self.related_field_name,
value=smart_text(data),
value=smart_str(data),
)
except (TypeError, ValueError):
self.fail("invalid")
......@@ -82,14 +82,14 @@ class RelatedField(serializers.RelatedField):
)
class Action(object):
class Action:
def __init__(self, name, allow_all=False, qs_filter=None):
self.name = name
self.allow_all = allow_all
self.qs_filter = qs_filter
def __repr__(self):
return "<Action {}>".format(self.name)
return f"<Action {self.name}>"
class ActionSerializer(serializers.Serializer):
......@@ -113,7 +113,7 @@ class ActionSerializer(serializers.Serializer):
)
for action in self.actions_by_name.keys():
handler_name = "handle_{}".format(action)
handler_name = f"handle_{action}"
assert hasattr(self, handler_name), "{} miss a {} method".format(
self.__class__.__name__, handler_name
)
......@@ -133,9 +133,9 @@ class ActionSerializer(serializers.Serializer):
if value == "all":
return self.queryset.all().order_by("id")
if type(value) in [list, tuple]:
return self.queryset.filter(
**{"{}__in".format(self.pk_field): value}
).order_by(self.pk_field)
return self.queryset.filter(**{f"{self.pk_field}__in": value}).order_by(
self.pk_field
)
raise serializers.ValidationError(
"{} is not a valid value for objects. You must provide either a "
......@@ -270,6 +270,7 @@ class APIMutationSerializer(serializers.ModelSerializer):
"previous_state",
]
@extend_schema_field(OpenApiTypes.OBJECT)
def get_target(self, obj):
target = obj.target
if not target:
......@@ -280,7 +281,7 @@ class APIMutationSerializer(serializers.ModelSerializer):
def validate_type(self, value):
if value not in self.context["registry"]:
raise serializers.ValidationError("Invalid mutation type {}".format(value))
raise serializers.ValidationError(f"Invalid mutation type {value}")
return value
......@@ -292,10 +293,22 @@ class AttachmentSerializer(serializers.Serializer):
file = StripExifImageField(write_only=True)
urls = serializers.SerializerMethodField()
@extend_schema_field(
{
"type": "object",
"properties": {
"original": {"type": "string"},
"small_square_crop": {"type": "string"},
"medium_square_crop": {"type": "string"},
"large_square_crop": {"type": "string"},
},
}
)
def get_urls(self, o):
urls = {}
urls["source"] = o.url
urls["original"] = o.download_url_original
urls["small_square_crop"] = o.download_url_small_square_crop
urls["medium_square_crop"] = o.download_url_medium_square_crop
urls["large_square_crop"] = o.download_url_large_square_crop
return urls
......@@ -308,16 +321,20 @@ class AttachmentSerializer(serializers.Serializer):
class ContentSerializer(serializers.Serializer):
text = serializers.CharField(
max_length=models.CONTENT_TEXT_MAX_LENGTH, allow_null=True
max_length=models.CONTENT_TEXT_MAX_LENGTH,
allow_null=True,
allow_blank=True,
)
content_type = serializers.ChoiceField(
choices=models.CONTENT_TEXT_SUPPORTED_TYPES,
)
content_type = serializers.ChoiceField(choices=models.CONTENT_TEXT_SUPPORTED_TYPES,)
html = serializers.SerializerMethodField()
def get_html(self, o):
def get_html(self, o) -> str:
return utils.render_html(o.text, o.content_type)
class NullToEmptDict(object):
class NullToEmptDict:
def get_attribute(self, o):
attr = super().get_attribute(o)
if attr is None:
......@@ -328,3 +345,36 @@ class NullToEmptDict(object):
if not v:
return v
return super().to_representation(v)
class ScopesSerializer(serializers.Serializer):
id = serializers.CharField()
rate = serializers.CharField()
description = serializers.CharField()
limit = serializers.IntegerField()
duration = serializers.IntegerField()
remaining = serializers.IntegerField()
available = serializers.IntegerField()
available_seconds = serializers.IntegerField()
reset = serializers.IntegerField()
reset_seconds = serializers.IntegerField()
class IdentSerializer(serializers.Serializer):
type = serializers.CharField()
id = serializers.CharField()
class RateLimitSerializer(serializers.Serializer):
enabled = serializers.BooleanField()
ident = IdentSerializer()
scopes = serializers.ListField(child=ScopesSerializer())
class ErrorDetailSerializer(serializers.Serializer):
detail = serializers.CharField(source="*")
class TextPreviewSerializer(serializers.Serializer):
rendered = serializers.CharField(read_only=True, source="*")
text = serializers.CharField(write_only=True)
import django.dispatch
mutation_created = django.dispatch.Signal(providing_args=["mutation"])
mutation_updated = django.dispatch.Signal(
providing_args=["mutation", "old_is_approved", "new_is_approved"]
)
""" Required args: mutation """
mutation_created = django.dispatch.Signal()
""" Required args: mutation, old_is_approved, new_is_approved """
mutation_updated = django.dispatch.Signal()
import os
import shutil
import slugify
import slugify
from django.core.files.storage import FileSystemStorage
from storages.backends.s3boto3 import S3Boto3Storage
......
......@@ -11,10 +11,7 @@ from django.utils import timezone
from funkwhale_api.common import channels
from funkwhale_api.taskapp import celery
from . import models
from . import serializers
from . import session
from . import signals
from . import models, serializers, session, signals
logger = logging.getLogger(__name__)
......
import collections
from django.conf import settings
from django.core.cache import cache
from rest_framework import throttling as rest_throttling
from django.conf import settings
def get_ident(user, request):
if user and user.is_authenticated:
return {"type": "authenticated", "id": user.pk}
return {"type": "authenticated", "id": f"{user.pk}"}
ident = rest_throttling.BaseThrottle().get_ident(request)
return {"type": "anonymous", "id": ident}
......
import datetime
import hashlib
from django.core.files.base import ContentFile
from django.http import request
from django.utils.deconstruct import deconstructible
import bleach.sanitizer
import logging
import markdown
import os
import shutil
import uuid
import xml.etree.ElementTree as ET
from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit
from django.conf import settings
import bleach.sanitizer
import markdown
from django import urls
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models, transaction
from django.http import request
from django.utils import timezone
from django.utils.deconstruct import deconstructible
logger = logging.getLogger(__name__)
......@@ -39,7 +36,7 @@ def rename_file(instance, field_name, new_name, allow_missing_file=False):
field = getattr(instance, field_name)
current_name, extension = os.path.splitext(field.name)
new_name_with_extension = "{}{}".format(new_name, extension)
new_name_with_extension = f"{new_name}{extension}"
try:
shutil.move(field.path, new_name_with_extension)
except FileNotFoundError:
......@@ -74,7 +71,7 @@ def set_query_parameter(url, **kwargs):
@deconstructible
class ChunkedPath(object):
class ChunkedPath:
def sanitize_filename(self, filename):
return filename.replace("/", "-")
......@@ -91,7 +88,7 @@ class ChunkedPath(object):
parts = chunks[:3] + [filename]
else:
ext = os.path.splitext(filename)[1][1:].lower()
new_filename = "".join(chunks[3:]) + ".{}".format(ext)
new_filename = "".join(chunks[3:]) + f".{ext}"
parts = chunks[:3] + [new_filename]
return os.path.join(self.root, *parts)
......@@ -141,7 +138,7 @@ def chunk_queryset(source_qs, chunk_size):
def join_url(start, end):
if end.startswith("http://") or end.startswith("https://"):
# alread a full URL, joining makes no sense
# already a full URL, joining makes no sense
return end
if start.endswith("/") and end.startswith("/"):
return start + end[1:]
......@@ -187,7 +184,7 @@ def order_for_search(qs, field):
When searching, it's often more useful to have short results first,
this function will order the given qs based on the length of the given field
"""
return qs.annotate(__size=models.functions.Length(field)).order_by("__size")
return qs.annotate(__size=models.functions.Length(field)).order_by("__size", "pk")
def recursive_getattr(obj, key, permissive=False):
......@@ -230,7 +227,7 @@ def replace_prefix(queryset, field, old, new):
on a whole table with a single query.
"""
qs = queryset.filter(**{"{}__startswith".format(field): old})
qs = queryset.filter(**{f"{field}__startswith": old})
# we extract the part after the old prefix, and Concat it with our new prefix
update = models.functions.Concat(
models.Value(new),
......@@ -311,7 +308,7 @@ HTMl_CLEANER = bleach.sanitizer.Cleaner(strip=True, tags=SAFE_TAGS)
HTML_PERMISSIVE_CLEANER = bleach.sanitizer.Cleaner(
strip=True,
tags=SAFE_TAGS + ["h1", "h2", "h3", "h4", "h5", "h6", "div", "section", "article"],
attributes=["class", "rel", "alt", "title"],
attributes=["class", "rel", "alt", "title", "href"],
)
# support for additional tlds
......@@ -356,7 +353,7 @@ def attach_content(obj, field, content_data):
from . import models
content_data = content_data or {}
existing = getattr(obj, "{}_id".format(field))
existing = getattr(obj, f"{field}_id")
if existing:
if same_content(getattr(obj, field), **content_data):
......@@ -379,10 +376,9 @@ def attach_content(obj, field, content_data):
@transaction.atomic
def attach_file(obj, field, file_data, fetch=False):
from . import models
from . import tasks
from . import models, tasks
existing = getattr(obj, "{}_id".format(field))
existing = getattr(obj, f"{field}_id")
if existing:
getattr(obj, field).delete()
......@@ -399,7 +395,7 @@ def attach_file(obj, field, file_data, fetch=False):
name = [
getattr(obj, field) for field in name_fields if getattr(obj, field, None)
][0]
filename = "{}-{}.{}".format(field, name, extension)
filename = f"{field}-{name}.{extension}"
if "url" in file_data:
attachment.url = file_data["url"]
else:
......@@ -481,14 +477,13 @@ def monkey_patch_request_build_absolute_uri():
def get_file_hash(file, algo=None, chunk_size=None, full_read=False):
algo = algo or settings.HASHING_ALGORITHM
chunk_size = chunk_size or settings.HASHING_CHUNK_SIZE
handler = getattr(hashlib, algo)
hash = handler()
hasher = hashlib.new(algo)
file.seek(0)
if full_read:
for byte_block in iter(lambda: file.read(chunk_size), b""):
hash.update(byte_block)
hasher.update(byte_block)
else:
# sometimes, it's useful to only hash the beginning of the file, e.g
# to avoid a lot of I/O when crawling large libraries
hash.update(file.read(chunk_size))
return "{}:{}".format(algo, hash.hexdigest())
hasher.update(file.read(chunk_size))
return f"{algo}:{hasher.hexdigest()}"
......@@ -6,7 +6,7 @@ from django.core.exceptions import ValidationError
from django.core.files.images import get_image_dimensions
from django.template.defaultfilters import filesizeformat
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import gettext_lazy as _
@deconstructible
......@@ -72,7 +72,7 @@ class ImageDimensionsValidator:
@deconstructible
class FileValidator(object):
class FileValidator:
"""
Taken from https://gist.github.com/jrosebr1/2140738
Validator for files, checking the size, extension and mimetype.
......@@ -97,7 +97,7 @@ class FileValidator(object):
"MIME type '%(mimetype)s' is not valid. Allowed types are: %(allowed_mimetypes)s."
)
min_size_message = _(
"The current file %(size)s, which is too small. The minumum file size is %(allowed_size)s."
"The current file %(size)s, which is too small. The minimum file size is %(allowed_size)s."
)
max_size_message = _(
"The current file %(size)s, which is too large. The maximum file size is %(allowed_size)s."
......@@ -163,5 +163,5 @@ class DomainValidator(validators.URLValidator):
If it fails, we know the domain is not valid.
"""
super().__call__("http://{}".format(value))
super().__call__(f"http://{value}")
return value
......@@ -3,28 +3,26 @@ import time
from django.conf import settings
from django.db import transaction
from drf_spectacular.utils import extend_schema
from rest_framework import (
exceptions,
generics,
mixins,
permissions,
response,
views,
viewsets,
)
from rest_framework.decorators import action
from rest_framework import exceptions
from rest_framework import mixins
from rest_framework import permissions
from rest_framework import response
from rest_framework import views
from rest_framework import viewsets
from config import plugins
from funkwhale_api.common.serializers import (
ErrorDetailSerializer,
TextPreviewSerializer,
)
from funkwhale_api.users.oauth import permissions as oauth_permissions
from . import filters
from . import models
from . import mutations
from . import serializers
from . import signals
from . import tasks
from . import throttling
from . import utils
from . import filters, models, mutations, serializers, signals, tasks, throttling, utils
logger = logging.getLogger(__name__)
......@@ -78,6 +76,7 @@ class MutationViewSet(
return super().perform_destroy(instance)
@extend_schema(operation_id="approve_mutation")
@action(detail=True, methods=["post"])
@transaction.atomic
def approve(self, request, *args, **kwargs):
......@@ -107,6 +106,7 @@ class MutationViewSet(
)
return response.Response({}, status=200)
@extend_schema(operation_id="reject_mutation")
@action(detail=True, methods=["post"])
@transaction.atomic
def reject(self, request, *args, **kwargs):
......@@ -139,6 +139,7 @@ class MutationViewSet(
class RateLimitView(views.APIView):
permission_classes = []
throttle_classes = []
serializer_class = serializers.RateLimitSerializer
def get(self, request, *args, **kwargs):
ident = throttling.get_ident(getattr(request, "user", None), request)
......@@ -147,7 +148,7 @@ class RateLimitView(views.APIView):
"ident": ident,
"scopes": throttling.get_status(ident, time.time()),
}
return response.Response(data, status=200)
return response.Response(serializers.RateLimitSerializer(data).data, status=200)
class AttachmentViewSet(
......@@ -175,7 +176,12 @@ class AttachmentViewSet(
return r
size = request.GET.get("next", "original").lower()
if size not in ["original", "medium_square_crop", "large_square_crop"]:
if size not in [
"original",
"small_square_crop",
"medium_square_crop",
"large_square_crop",
]:
size = "original"
try:
......@@ -197,20 +203,25 @@ class AttachmentViewSet(
instance.delete()
class TextPreviewView(views.APIView):
class TextPreviewView(generics.GenericAPIView):
permission_classes = []
serializer_class = TextPreviewSerializer
@extend_schema(
operation_id="preview_text",
responses={200: TextPreviewSerializer, 400: ErrorDetailSerializer},
)
def post(self, request, *args, **kwargs):
payload = request.data
if "text" not in payload:
return response.Response({"detail": "Invalid input"}, status=400)
return response.Response(
ErrorDetailSerializer("Invalid input").data, status=400
)
permissive = payload.get("permissive", False)
data = {
"rendered": utils.render_html(
payload["text"], "text/markdown", permissive=permissive
)
}
data = TextPreviewSerializer(
utils.render_html(payload["text"], "text/markdown", permissive=permissive)
).data
return response.Response(data, status=200)
......@@ -272,6 +283,7 @@ class PluginViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
user.plugins.filter(code=kwargs["pk"]).delete()
return response.Response(status=204)
@extend_schema(operation_id="enable_plugin")
@action(detail=True, methods=["post"])
def enable(self, request, *args, **kwargs):
user = request.user
......@@ -280,6 +292,7 @@ class PluginViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
plugins.enable_conf(kwargs["pk"], True, user)
return response.Response({}, status=200)
@extend_schema(operation_id="disable_plugin")
@action(detail=True, methods=["post"])
def disable(self, request, *args, **kwargs):
user = request.user
......
# -*- coding: utf-8 -*-
import logging
from config import plugins
from funkwhale_api.contrib.archivedl import tasks
from .funkwhale_startup import PLUGIN
logger = logging.getLogger(__name__)
@plugins.register_hook(plugins.TRIGGER_THIRD_PARTY_UPLOAD, PLUGIN)
def lauch_download(track, conf={}):
tasks.archive_download.delay(track_id=track.pk, conf=conf)
from config import plugins
PLUGIN = plugins.get_plugin_config(
name="archivedl",
label="Archive-dl",
description="",
version="0.1",
user=False,
conf=[],
)
import asyncio
import hashlib
import logging
import os
import tempfile
import time
import urllib.parse
from datetime import timedelta
import requests
from django.core.files import File
from django.utils import timezone
from funkwhale_api.federation import actors
from funkwhale_api.music import models, utils
from funkwhale_api.taskapp import celery
logger = logging.getLogger(__name__)
class TooManyQueriesError(Exception):
pass
def check_existing_download_task(track):
if models.Upload.objects.filter(
track=track,
import_status__in=["pending", "finished"],
third_party_provider="archive-dl",
).exists():
raise TooManyQueriesError(
"Upload for this track already exist or is pending. Stopping task."
)
def check_last_third_party_queries(track, count):
# 15 per minutes according to their doc = one each 4 seconds
time_threshold = timezone.now() - timedelta(seconds=5)
if models.Upload.objects.filter(
third_party_provider="archive-dl",
import_status__in=["pending", "finished"],
creation_date__gte=time_threshold,
).exists():
logger.info(
"Last archive.org query was too recent. Trying to wait 2 seconds..."
)
time.sleep(2)
count += 1
if count > 3:
raise TooManyQueriesError(
"Probably too many archivedl tasks are queue, stopping this task"
)
check_last_third_party_queries(track, count)
def create_upload(url, track, files_data):
mimetype = f"audio/{files_data.get('format', 'unknown')}"
duration = files_data.get("mtime", 0)
filesize = files_data.get("size", 0)
bitrate = files_data.get("bitrate", 0)
service_library = models.Library.objects.create(
privacy_level="everyone",
actor=actors.get_service_actor(),
)
return models.Upload.objects.create(
mimetype=mimetype,
source=url,
third_party_provider="archive-dl",
creation_date=timezone.now(),
track=track,
duration=duration,
size=filesize,
bitrate=bitrate,
library=service_library,
from_activity=None,
import_status="pending",
)
@celery.app.task(name="archivedl.archive_download")
@celery.require_instance(models.Track.objects.select_related(), "track")
def archive_download(track, conf):
try:
check_existing_download_task(track)
check_last_third_party_queries(track, 0)
except TooManyQueriesError as e:
logger.error(e)
return
artist_name = utils.get_artist_credit_string(track)
query = f"mediatype:audio AND title:{track.title} AND creator:{artist_name}"
with requests.Session() as session:
url = get_search_url(query, page_size=1, page=1)
page_data = fetch_json(url, session)
for obj in page_data["response"]["docs"]:
logger.info(f"launching download item for {str(obj)}")
download_item(
item_data=obj,
session=session,
allowed_extensions=utils.SUPPORTED_EXTENSIONS,
track=track,
)
def fetch_json(url, session):
logger.info(f"Fetching {url}...")
with session.get(url) as response:
return response.json()
def download_item(
item_data,
session,
allowed_extensions,
track,
):
files_data = get_files_data(item_data["identifier"], session)
to_download = list(
filter_files(
files_data["result"],
allowed_extensions=allowed_extensions,
)
)
url = f"https://archive.org/download/{item_data['identifier']}/{to_download[0]['name']}"
upload = create_upload(url, track, to_download[0])
try:
with tempfile.TemporaryDirectory() as temp_dir:
path = os.path.join(temp_dir, to_download[0]["name"])
download_file(
path,
url=url,
session=session,
checksum=to_download[0]["sha1"],
upload=upload,
to_download=to_download,
)
logger.info(f"Finished to download item {item_data['identifier']}...")
except Exception as e:
upload.delete()
raise e
def check_integrity(path, expected_checksum):
with open(path, mode="rb") as f:
hash = hashlib.sha1()
hash.update(f.read())
return expected_checksum == hash.hexdigest()
def get_files_data(identifier, session):
url = f"https://archive.org/metadata/{identifier}/files"
logger.info(f"Fetching files data at {url}...")
with session.get(url) as response:
return response.json()
def download_file(path, url, session, checksum, upload, to_download):
if os.path.exists(path) and check_integrity(path, checksum):
logger.info(f"Skipping already downloaded file at {path}")
return
logger.info(f"Downloading file {url}...")
with open(path, mode="wb") as f:
try:
with session.get(url) as response:
f.write(response.content)
except asyncio.TimeoutError as e:
logger.error(f"Timeout error while downloading {url}: {e}")
with open(path, "rb") as f:
upload.audio_file.save(f"{to_download['name']}", File(f))
upload.import_status = "finished"
upload.url = url
upload.save()
return upload
def filter_files(files, allowed_extensions):
for f in files:
if allowed_extensions:
extension = os.path.splitext(f["name"])[-1][1:]
if extension not in allowed_extensions:
continue
yield f
def get_search_url(query, page_size, page):
q = urllib.parse.urlencode({"q": query})
return f"https://archive.org/advancedsearch.php?{q}&sort[]=addeddate+desc&rows={page_size}\
&page={page}&output=json"
# Copyright (c) 2018 Philipp Wolfer <ph.wolfer@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import logging
import ssl
import time
from http.client import HTTPSConnection
HOST_NAME = "api.listenbrainz.org"
PATH_SUBMIT = "/1/submit-listens"
SSL_CONTEXT = ssl.create_default_context()
class Track:
"""
Represents a single track to submit.
See https://listenbrainz.readthedocs.io/en/latest/dev/json.html
"""
def __init__(self, artist_name, track_name, release_name=None, additional_info={}):
"""
Create a new Track instance
@param artist_name as str
@param track_name as str
@param release_name as str
@param additional_info as dict
"""
self.artist_name = artist_name
self.track_name = track_name
self.release_name = release_name
self.additional_info = additional_info
@staticmethod
def from_dict(data):
return Track(
data["artist_name"],
data["track_name"],
data.get("release_name", None),
data.get("additional_info", {}),
)
def to_dict(self):
return {
"artist_name": self.artist_name,
"track_name": self.track_name,
"release_name": self.release_name,
"additional_info": self.additional_info,
}
def __repr__(self):
return "Track(%s, %s)" % (self.artist_name, self.track_name)
class ListenBrainzClient:
"""
Submit listens to ListenBrainz.org.
See https://listenbrainz.readthedocs.io/en/latest/dev/api.html
"""
def __init__(self, user_token, logger=logging.getLogger(__name__)):
self.__next_request_time = 0
self.user_token = user_token
self.logger = logger
def listen(self, listened_at, track):
"""
Submit a listen for a track
@param listened_at as int
@param entry as Track
"""
payload = _get_payload(track, listened_at)
return self._submit("single", [payload])
def playing_now(self, track):
"""
Submit a playing now notification for a track
@param track as Track
"""
payload = _get_payload(track)
return self._submit("playing_now", [payload])
def import_tracks(self, tracks):
"""
Import a list of tracks as (listened_at, Track) pairs
@param track as [(int, Track)]
"""
payload = _get_payload_many(tracks)
return self._submit("import", payload)
def _submit(self, listen_type, payload, retry=0):
self._wait_for_ratelimit()
self.logger.debug("ListenBrainz %s: %r", listen_type, payload)
data = {"listen_type": listen_type, "payload": payload}
headers = {
"Authorization": "Token %s" % self.user_token,
"Content-Type": "application/json",
}
body = json.dumps(data)
conn = HTTPSConnection(HOST_NAME, context=SSL_CONTEXT)
conn.request("POST", PATH_SUBMIT, body, headers)
response = conn.getresponse()
response_text = response.read()
try:
response_data = json.loads(response_text)
except json.decoder.JSONDecodeError:
response_data = response_text
self._handle_ratelimit(response)
log_msg = "Response %s: %r" % (response.status, response_data)
if response.status == 429 and retry < 5: # Too Many Requests
self.logger.warning(log_msg)
return self._submit(listen_type, payload, retry + 1)
elif response.status == 200:
self.logger.debug(log_msg)
else:
self.logger.error(log_msg)
return response
def _wait_for_ratelimit(self):
now = time.time()
if self.__next_request_time > now:
delay = self.__next_request_time - now
self.logger.debug("Rate limit applies, delay %d", delay)
time.sleep(delay)
def _handle_ratelimit(self, response):
remaining = int(response.getheader("X-RateLimit-Remaining", 0))
reset_in = int(response.getheader("X-RateLimit-Reset-In", 0))
self.logger.debug("X-RateLimit-Remaining: %i", remaining)
self.logger.debug("X-RateLimit-Reset-In: %i", reset_in)
if remaining == 0:
self.__next_request_time = time.time() + reset_in
def _get_payload_many(tracks):
payload = []
for (listened_at, track) in tracks:
data = _get_payload(track, listened_at)
payload.append(data)
return payload
def _get_payload(track, listened_at=None):
data = {"track_metadata": track.to_dict()}
if listened_at is not None:
data["listened_at"] = listened_at
return data
import liblistenbrainz
import funkwhale_api
from config import plugins
from funkwhale_api.favorites import models as favorites_models
from funkwhale_api.history import models as history_models
from . import tasks
from .funkwhale_startup import PLUGIN
from .client import ListenBrainzClient, Track
@plugins.register_hook(plugins.LISTENING_CREATED, PLUGIN)
def submit_listen(listening, conf, **kwargs):
user_token = conf["user_token"]
if not user_token:
if not user_token and not conf["submit_listenings"]:
return
logger = PLUGIN["logger"]
logger.info("Submitting listen to ListenBrainz")
client = ListenBrainzClient(user_token=user_token, logger=logger)
track = get_track(listening.track)
client.listen(int(listening.creation_date.timestamp()), track)
client = liblistenbrainz.ListenBrainz()
client.set_auth_token(user_token)
listen = get_lb_listen(listening)
client.submit_single_listen(listen)
def get_track(track):
artist = track.artist.name
title = track.title
album = None
def get_lb_listen(listening):
track = listening.track
additional_info = {
"listening_from": "Funkwhale",
"media_player": "Funkwhale",
"media_player_version": funkwhale_api.__version__,
"submission_client": "Funkwhale ListenBrainz plugin",
"submission_client_version": PLUGIN["version"],
"tracknumber": track.position,
"discnumber": track.disc_number,
}
......@@ -31,11 +40,97 @@ def get_track(track):
if track.album:
if track.album.title:
album = track.album.title
release_name = track.album.title
if track.album.mbid:
additional_info["release_mbid"] = str(track.album.mbid)
mbids = [ac.artist.mbid for ac in track.artist_credit.all() if ac.artist.mbid]
if mbids:
additional_info["artist_mbids"] = mbids
upload = track.uploads.filter(duration__gte=0).first()
if upload:
additional_info["duration"] = upload.duration
return liblistenbrainz.Listen(
track_name=track.title,
listened_at=listening.creation_date.timestamp(),
artist_name=track.get_artist_credit_string,
release_name=release_name,
additional_info=additional_info,
)
@plugins.register_hook(plugins.FAVORITE_CREATED, PLUGIN)
def submit_favorite_creation(track_favorite, conf, **kwargs):
user_token = conf["user_token"]
if not user_token or not conf["submit_favorites"]:
return
logger = PLUGIN["logger"]
logger.info("Submitting favorite to ListenBrainz")
client = liblistenbrainz.ListenBrainz()
track = track_favorite.track
if not track.mbid:
logger.warning(
"This tracks doesn't have a mbid. Feedback will not be submitted to Listenbrainz"
)
return
client.submit_user_feedback(1, track.mbid)
@plugins.register_hook(plugins.FAVORITE_DELETED, PLUGIN)
def submit_favorite_deletion(track_favorite, conf, **kwargs):
user_token = conf["user_token"]
if not user_token or not conf["submit_favorites"]:
return
logger = PLUGIN["logger"]
logger.info("Submitting favorite deletion to ListenBrainz")
client = liblistenbrainz.ListenBrainz()
track = track_favorite.track
if not track.mbid:
logger.warning(
"This tracks doesn't have a mbid. Feedback will not be submitted to Listenbrainz"
)
return
client.submit_user_feedback(0, track.mbid)
@plugins.register_hook(plugins.LISTENING_SYNC, PLUGIN)
def sync_listenings_from_listenbrainz(user, conf):
user_name = conf["user_name"]
if not user_name or not conf["sync_listenings"]:
return
logger = PLUGIN["logger"]
logger.info("Getting listenings from ListenBrainz")
try:
last_ts = (
history_models.Listening.objects.filter(actor=user.actor)
.filter(source="Listenbrainz")
.latest("creation_date")
.values_list("creation_date", flat=True)
).timestamp()
except funkwhale_api.history.models.Listening.DoesNotExist:
tasks.import_listenbrainz_listenings(user, user_name, 0)
return
tasks.import_listenbrainz_listenings(user, user_name, last_ts)
if track.artist.mbid:
additional_info["artist_mbids"] = [str(track.artist.mbid)]
@plugins.register_hook(plugins.FAVORITE_SYNC, PLUGIN)
def sync_favorites_from_listenbrainz(user, conf):
user_name = conf["user_name"]
if not user_name or not conf["sync_favorites"]:
return
try:
last_ts = (
favorites_models.TrackFavorite.objects.filter(actor=user.actor)
.filter(source="Listenbrainz")
.latest("creation_date")
.creation_date.timestamp()
)
except favorites_models.TrackFavorite.DoesNotExist:
tasks.import_listenbrainz_favorites(user, user_name, 0)
return
return Track(artist, title, album, additional_info)
tasks.import_listenbrainz_favorites(user, user_name, last_ts)
from config import plugins
PLUGIN = plugins.get_plugin_config(
name="listenbrainz",
label="ListenBrainz",
description="A plugin that allows you to submit your listens to ListenBrainz.",
description="A plugin that allows you to submit or sync your listens and favorites to ListenBrainz.",
homepage="https://docs.funkwhale.audio/users/builtinplugins.html#listenbrainz-plugin", # noqa
version="0.1",
version="0.3",
user=True,
conf=[
{
......@@ -14,6 +13,45 @@ PLUGIN = plugins.get_plugin_config(
"type": "text",
"label": "Your ListenBrainz user token",
"help": "You can find your user token in your ListenBrainz profile at https://listenbrainz.org/profile/",
}
},
{
"name": "user_name",
"type": "text",
"required": False,
"label": "Your ListenBrainz user name.",
"help": "Required for importing listenings and favorites with ListenBrainz \
but not to send activities",
},
{
"name": "submit_listenings",
"type": "boolean",
"default": True,
"label": "Enable listening submission to ListenBrainz",
"help": "If enabled, your listenings from Funkwhale will be imported into ListenBrainz.",
},
{
"name": "sync_listenings",
"type": "boolean",
"default": False,
"label": "Enable listenings sync",
"help": "If enabled, your listening from ListenBrainz will be imported into Funkwhale. This means they \
will be used along with Funkwhale listenings to filter out recently listened content or \
generate recommendations",
},
{
"name": "sync_favorites",
"type": "boolean",
"default": False,
"label": "Enable favorite sync",
"help": "If enabled, your favorites from ListenBrainz will be imported into Funkwhale. This means they \
will be used along with Funkwhale favorites (UI display, federation activity)",
},
{
"name": "submit_favorites",
"type": "boolean",
"default": False,
"label": "Enable favorite submission to ListenBrainz services",
"help": "If enabled, your favorites from Funkwhale will be submitted to ListenBrainz",
},
],
)
import datetime
import liblistenbrainz
from django.utils import timezone
from config import plugins
from funkwhale_api.favorites import models as favorites_models
from funkwhale_api.history import models as history_models
from funkwhale_api.music import models as music_models
from funkwhale_api.taskapp import celery
from funkwhale_api.users import models
from .funkwhale_startup import PLUGIN
@celery.app.task(name="listenbrainz.trigger_listening_sync_with_listenbrainz")
def trigger_listening_sync_with_listenbrainz():
now = timezone.now()
active_month = now - datetime.timedelta(days=30)
users = (
models.User.objects.filter(plugins__code="listenbrainz")
.filter(plugins__conf__sync_listenings=True)
.filter(last_activity__gte=active_month)
)
for user in users:
plugins.trigger_hook(
plugins.LISTENING_SYNC,
user=user,
confs=plugins.get_confs(user),
)
@celery.app.task(name="listenbrainz.trigger_favorite_sync_with_listenbrainz")
def trigger_favorite_sync_with_listenbrainz():
now = timezone.now()
active_month = now - datetime.timedelta(days=30)
users = (
models.User.objects.filter(plugins__code="listenbrainz")
.filter(plugins__conf__sync_listenings=True)
.filter(last_activity__gte=active_month)
)
for user in users:
plugins.trigger_hook(
plugins.FAVORITE_SYNC,
user=user,
confs=plugins.get_confs(user),
)
@celery.app.task(name="listenbrainz.import_listenbrainz_listenings")
def import_listenbrainz_listenings(user, user_name, since):
client = liblistenbrainz.ListenBrainz()
response = client.get_listens(username=user_name, min_ts=since, count=100)
listens = response["payload"]["listens"]
while listens:
add_lb_listenings_to_db(listens, user)
new_ts = max(
listens,
key=lambda obj: datetime.datetime.fromtimestamp(
obj.listened_at, datetime.timezone.utc
),
)
response = client.get_listens(username=user_name, min_ts=new_ts, count=100)
listens = response["payload"]["listens"]
def add_lb_listenings_to_db(listens, user):
logger = PLUGIN["logger"]
fw_listens = []
for listen in listens:
if (
listen.additional_info.get("submission_client")
and listen.additional_info.get("submission_client")
== "Funkwhale ListenBrainz plugin"
and history_models.Listening.objects.filter(
creation_date=datetime.datetime.fromtimestamp(
listen.listened_at, datetime.timezone.utc
)
).exists()
):
logger.info(
f"Listen with ts {listen.listened_at} skipped because already in db"
)
continue
mbid = (
listen.mbid_mapping
if hasattr(listen, "mbid_mapping")
else listen.recording_mbid
)
if not mbid:
logger.info("Received listening that doesn't have a mbid. Skipping...")
try:
track = music_models.Track.objects.get(mbid=mbid)
except music_models.Track.DoesNotExist:
logger.info(
"Received listening that doesn't exist in fw database. Skipping..."
)
continue
user = user
fw_listen = history_models.Listening(
creation_date=datetime.datetime.fromtimestamp(
listen.listened_at, datetime.timezone.utc
),
track=track,
actor=user.actor,
source="Listenbrainz",
)
fw_listens.append(fw_listen)
history_models.Listening.objects.bulk_create(fw_listens)
@celery.app.task(name="listenbrainz.import_listenbrainz_favorites")
def import_listenbrainz_favorites(user, user_name, since):
client = liblistenbrainz.ListenBrainz()
response = client.get_user_feedback(username=user_name)
offset = 0
while response["feedback"]:
count = response["count"]
offset = offset + count
last_sync = min(
response["feedback"],
key=lambda obj: datetime.datetime.fromtimestamp(
obj["created"], datetime.timezone.utc
),
)["created"]
add_lb_feedback_to_db(response["feedback"], user)
if last_sync <= since or count == 0:
return
response = client.get_user_feedback(username=user_name, offset=offset)
def add_lb_feedback_to_db(feedbacks, user):
logger = PLUGIN["logger"]
for feedback in feedbacks:
try:
track = music_models.Track.objects.get(mbid=feedback["recording_mbid"])
except music_models.Track.DoesNotExist:
logger.info(
"Received feedback track that doesn't exist in fw database. Skipping..."
)
continue
if feedback["score"] == 1:
favorites_models.TrackFavorite.objects.get_or_create(
actor=user.actor,
creation_date=datetime.datetime.fromtimestamp(
feedback["created"], datetime.timezone.utc
),
track=track,
source="Listenbrainz",
)
elif feedback["score"] == 0:
try:
favorites_models.TrackFavorite.objects.get(
actor=user.actor, track=track
).delete()
except favorites_models.TrackFavorite.DoesNotExist:
continue
elif feedback["score"] == -1:
logger.info("Funkwhale doesn't support disliked tracks")
import json
from config import plugins
from .funkwhale_startup import PLUGIN
......@@ -16,30 +17,42 @@ def submit_listen(listening, conf, **kwargs):
return
logger = PLUGIN["logger"]
logger.info("Submitting listening to Majola at %s", server_url)
payload = get_payload(listening, api_key)
logger.debug("Majola payload: %r", payload)
logger.info("Submitting listening to Maloja at %s", server_url)
payload = get_payload(listening, api_key, conf)
logger.debug("Maloja payload: %r", payload)
url = server_url.rstrip("/") + "/apis/mlj_1/newscrobble"
session = plugins.get_session()
response = session.post(url, payload)
response = session.post(url, json=payload)
response.raise_for_status()
details = json.loads(response.text)
if details["status"] == "success":
logger.info("Majola listening submitted successfully")
logger.info("Maloja listening submitted successfully")
else:
raise MalojaException(response.text)
def get_payload(listening, api_key):
def get_payload(listening, api_key, conf):
track = listening.track
# See https://github.com/krateng/maloja/blob/master/API.md
payload = {
"key": api_key,
"artists": track.artist.name,
"artists": [artist.name for artist in track.artist_credit.get_artists_list()],
"title": track.title,
"time": int(listening.creation_date.timestamp()),
"nofix": bool(conf.get("nofix")),
}
if track.album:
if track.album.title:
payload["album"] = track.album.title
if track.album.artist_credit.all():
payload["albumartists"] = [
artist.name for artist in track.album.artist_credit.get_artists_list()
]
upload = track.uploads.filter(duration__gte=0).first()
if upload:
payload["length"] = upload.duration
return payload