Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
75 commits
Select commit Hold shift + click to select a range
12d7e7a
added POST request
sh1nkey Feb 7, 2024
73bdd39
added GET request for rating of projects
sh1nkey Feb 7, 2024
09bc493
used black, did naming corrections, fixed admin panel
sh1nkey Feb 9, 2024
afc342a
added serializer for GET
sh1nkey Feb 9, 2024
6c4171c
linter update
sh1nkey Feb 9, 2024
d1b9a68
linter fix
sh1nkey Feb 9, 2024
975bf2c
linter and naming update
sh1nkey Feb 9, 2024
751c9a6
added comment, changed namings, fixed models
sh1nkey Feb 10, 2024
7e41257
updated serializer code, naming and model fixes
sh1nkey Feb 10, 2024
bafe334
changed code to one value field
sh1nkey Feb 10, 2024
1929b54
changed code to one value field
sh1nkey Feb 10, 2024
d8fac38
changed model to one value field
sh1nkey Feb 10, 2024
33b5f73
added permissions for selected experts for selected programs
sh1nkey Feb 12, 2024
cbd1567
validation error fixed
sh1nkey Feb 12, 2024
d7a3bfe
pagination added, serializers changed
sh1nkey Feb 17, 2024
7670a27
added serializer that forgor to add previously
sh1nkey Feb 17, 2024
bd43735
added serializer that forgor to add previously x2
sh1nkey Feb 17, 2024
7d60f02
typhints corrected
sh1nkey Feb 17, 2024
3be2338
bug fixed
sh1nkey Feb 17, 2024
9dc37f2
new_fixed
sh1nkey Feb 19, 2024
5f612b2
linter fix
sh1nkey Feb 19, 2024
03ac396
minor fixes
sh1nkey Feb 19, 2024
a693473
strange old bugs from prod fixed
sh1nkey Feb 19, 2024
3b4fa17
naming corrected
sh1nkey Feb 19, 2024
1f3ce2c
POST project rate correction
sh1nkey Feb 19, 2024
f72f120
minor description fix
sh1nkey Feb 19, 2024
92aa188
minor grammar fix 💀
sh1nkey Feb 19, 2024
96f1748
error processing code uncommented
sh1nkey Feb 19, 2024
32fa001
mapping corrected
sh1nkey Feb 19, 2024
b12a4c2
corrections are made
sh1nkey Feb 19, 2024
90bb031
deleted serializer, changed code in helpers.py
sh1nkey Feb 19, 2024
55ea579
naming corrected again
sh1nkey Feb 19, 2024
d619863
minor fixes
sh1nkey Feb 19, 2024
65f1ac5
Merge pull request #281 from PROCOLLAB-github/flexivanov237-pro-173
sh1nkey Feb 19, 2024
151555c
hotfix feed
sh1nkey Feb 20, 2024
a773a4d
Merge branch 'dev' of https://github.com/PROCOLLAB-github/api
sh1nkey Feb 20, 2024
7c0efb7
Merge pull request #282 from PROCOLLAB-github/flexivanov237-pro-173
gregor-tokarev Feb 20, 2024
61c3029
feed fixed pagination offset
sh1nkey Feb 20, 2024
1a5d7ee
Merge pull request #283 from PROCOLLAB-github/flexivanov237-pro-173
sh1nkey Feb 20, 2024
2644543
fixed no limit and no offset bug
sh1nkey Feb 20, 2024
5abf8cb
fix of fix of feed
sh1nkey Feb 20, 2024
205ec48
pagination added
sh1nkey Feb 21, 2024
e2fe20f
fix serializer
sh1nkey Feb 21, 2024
76a5c0f
minor fix
sh1nkey Feb 21, 2024
1dd1e49
Merge pull request #284 from PROCOLLAB-github/flexivanov237-pro-173
sh1nkey Feb 21, 2024
6aa7dfc
fixed vacancy skills
sh1nkey Feb 21, 2024
a6b0475
comment added
sh1nkey Feb 21, 2024
6e46a8c
comment added x2
sh1nkey Feb 21, 2024
743c950
comment added x2
sh1nkey Feb 21, 2024
1158a71
Merge pull request #285 from PROCOLLAB-github/flexivanov237-pro-173
sh1nkey Feb 21, 2024
57827d2
merge conflicts fixed
sh1nkey Feb 21, 2024
662b33e
Merge pull request #263 from PROCOLLAB-github/flexivanov237-pro-144
sh1nkey Feb 21, 2024
e3f4646
naming fixed
sh1nkey Feb 21, 2024
6d9f3a3
naming fixed x2
sh1nkey Feb 21, 2024
94ddeb9
naming fixed in urls.py
sh1nkey Feb 21, 2024
b52d219
Merge pull request #286 from PROCOLLAB-github/flexivanov237-pro-144
sh1nkey Feb 21, 2024
1a4f294
merge problems fixed
sh1nkey Feb 21, 2024
5f6288a
Merge pull request #264 from PROCOLLAB-github/flexivanov237-pro-143
sh1nkey Feb 21, 2024
0eb7f54
wrong naming again bruh
sh1nkey Feb 21, 2024
9f41094
imports fixes
sh1nkey Feb 21, 2024
d0e5f0e
Merge pull request #287 from PROCOLLAB-github/fix_naming
sh1nkey Feb 21, 2024
4315dda
Merge pull request #266 from PROCOLLAB-github/flexivanov237-pro-148
sh1nkey Feb 21, 2024
f0eb064
undo fix
sh1nkey Feb 21, 2024
45b771a
Merge pull request #288 from PROCOLLAB-github/revert-merge
sh1nkey Feb 21, 2024
88381c1
get details added
sh1nkey Feb 21, 2024
82f1a49
Merge pull request #289 from PROCOLLAB-github/rates_get_detail
sh1nkey Feb 21, 2024
bb1fe15
Revert "get details added"
sh1nkey Feb 21, 2024
24c3254
Merge pull request #290 from PROCOLLAB-github/revert-289-rates_get_de…
sh1nkey Feb 21, 2024
411f641
get details added 1
sh1nkey Feb 21, 2024
61c0b18
Merge branch 'dev' into rates_get_detail
sh1nkey Feb 21, 2024
ebaa042
Merge pull request #291 from PROCOLLAB-github/rates_get_detail
sh1nkey Feb 21, 2024
99fda6b
get details added 2
sh1nkey Feb 21, 2024
8fcc5ac
Merge branch 'dev' of https://github.com/PROCOLLAB-github/api into dev
sh1nkey Feb 21, 2024
d50092b
get details added 2
sh1nkey Feb 21, 2024
3fdaad1
Merge pull request #292 from PROCOLLAB-github/rates_get_detail
sh1nkey Feb 21, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions core/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,9 @@ class SkillToObject(models.Model):
class SpecializationCategory(models.Model):
name = models.TextField()

def __str__(self):
return self.name

class Meta:
verbose_name = "Категория специализации"
verbose_name_plural = "Категории специализаций"
Expand All @@ -166,6 +169,9 @@ class Specialization(models.Model):
SpecializationCategory, related_name="specializations", on_delete=models.CASCADE
)

def __str__(self):
return self.name

class Meta:
verbose_name = "Специализация"
verbose_name_plural = "Специализации"
Expand Down
19 changes: 17 additions & 2 deletions feed/constants.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
import enum

from django.db.models import QuerySet
from rest_framework import serializers

from news.serializers import NewsListSerializer
from news.models import News
from news.serializers import NewsFeedListSerializer
from projects.models import Project
from projects.serializers import ProjectListSerializer
from vacancy.models import Vacancy
from vacancy.serializers import VacancyDetailSerializer


Expand All @@ -15,6 +19,17 @@ class FeedItemType(enum.Enum):

FEED_SERIALIZER_MAPPING: dict[FeedItemType, serializers.Serializer] = {
FeedItemType.PROJECT.value: ProjectListSerializer,
FeedItemType.NEWS.value: NewsListSerializer,
FeedItemType.NEWS.value: NewsFeedListSerializer,
FeedItemType.VACANCY.value: VacancyDetailSerializer,
}

SupportedModel = News | Project | Vacancy
SupportedQuerySet = QuerySet[News | Project | Vacancy]

model_mapping = {
FeedItemType.NEWS.value: News,
FeedItemType.PROJECT.value: Project,
FeedItemType.VACANCY.value: Vacancy,
}

LIMIT_PAGINATION_CONSTANT = 10
153 changes: 129 additions & 24 deletions feed/helpers.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,143 @@
import random
import typing
from random import shuffle
from typing import Iterable

from rest_framework.request import Request
from rest_framework.views import APIView

from feed import constants
from feed.constants import (
SupportedModel,
SupportedQuerySet,
LIMIT_PAGINATION_CONSTANT
)
from feed.pagination import FeedPagination
from feed.serializers import FeedItemSerializer
from news.models import News
from projects.models import Project

from django.db.models import Count

def collect_feed(models_list: typing.List, num) -> list[dict]:
get_model_data = {
model.__name__: collect_querysets(model, num) for model in models_list
}
from vacancy.models import Vacancy


def add_pagination(results: list[SupportedQuerySet], count: int) -> dict:
return {"count": count, "previous": None, "next": None, "results": results}


def paginate_serialize_feed(
model_data: dict[SupportedQuerySet],
paginator: FeedPagination,
request: Request,
view: APIView,
) -> tuple[list[SupportedQuerySet], int]:
result = []
for model in get_model_data:
result.extend(to_feed_items(model, get_model_data[model]))
random.shuffle(result)
return result
pages_count = 0

if len(model_data) == 0:
return [], 0

offset = request.query_params.get("offset", 0)
request.query_params._mutable = True

def collect_querysets(model, num):
if model.__name__ == Project.__class__.__name__:
return set(get_n_random_projects(num) + get_n_latest_created_projects(num))
if isinstance(offset, str) and offset.isdigit():
offset = int(offset)
else:
return list(model.objects.order_by("-datetime_created")[:num])
offset = 0

request.query_params["offset"] = offset

models_counts = {
model_name: model_data[model_name].count() for model_name in model_data.keys()
}
offset_numbers = offset_distribution(offset, models_counts)

for model_name in model_data.keys():
request.query_params["offset"] = offset_numbers[model_name]

paginated_part: dict = paginate_serialize_feed_queryset(
model_data, paginator, request, model_name, models_counts[model_name], view
)

result += paginated_part["paginated_data"]
pages_count += paginated_part["page_count"]

limit = request.query_params.get("limit", LIMIT_PAGINATION_CONSTANT)

if limit == "":
limit = LIMIT_PAGINATION_CONSTANT
else:
limit = int(limit)

shuffle(result)
return result[:limit], pages_count


def to_feed_items(type_: constants.FeedItemType, items: typing.Iterable) -> list[dict]:
def offset_distribution(offset: int, models_counts: dict) -> dict:
common_key_list = list(models_counts.keys())
quantity_of_models = len(list(models_counts.keys()))

full_division = offset // quantity_of_models
extra_items = offset % quantity_of_models

distributed_not_ready = {model_name: full_division for model_name in common_key_list}
distributed = dict(
sorted(distributed_not_ready.items(), key=lambda item: models_counts[item[0]])
)

last_key = common_key_list[-1]
distributed[last_key] += extra_items

new_keys_list = list(distributed.keys())
for i, key in enumerate(
new_keys_list
): # распределяем переполненные значения от маленьких моделей к большим
offset_value = distributed[key]
model_count = models_counts[key]
if offset_value > model_count:
diff = offset_value - model_count
distributed[key] = model_count
if i + 1 < len(new_keys_list):
next_key = new_keys_list[i + 1]
distributed[next_key] += diff

return distributed


def paginate_serialize_feed_queryset(
model_data: dict[SupportedQuerySet],
paginator: FeedPagination,
request: Request,
model: SupportedModel,
count: int,
view: APIView,
) -> dict:
paginated_info = paginator.custom_paginate_queryset(
model_data[model], request, count, view=view
)
paginated_data = paginated_info["queryset_ready"]
num_pages = paginated_info["count"]
return {
"paginated_data": to_feed_items(model, paginated_data),
"page_count": num_pages,
}


def collect_querysets(model: SupportedModel) -> SupportedQuerySet:
if model == Project:
queryset = model.objects.select_related("leader", "industry").filter(draft=False)
elif model == Vacancy:
queryset = model.objects.select_related("project")
elif model == News:
queryset = (
model.objects.select_related("content_type")
.prefetch_related("content_object", "files")
.annotate(likes_count=Count("likes"), views_count=Count("views"))
)

return queryset.order_by("-datetime_created")


def to_feed_items(type_: constants.FeedItemType, items: Iterable) -> list[dict]:
feed_items = []
for item in items:
serializer = to_feed_item(type_, item)
Expand All @@ -33,14 +146,6 @@ def to_feed_items(type_: constants.FeedItemType, items: typing.Iterable) -> list
return feed_items


def get_n_random_projects(num: int) -> list[Project]:
return list(Project.objects.filter(draft=False).order_by("?").distinct()[:num])


def get_n_latest_created_projects(num: int) -> list[Project]:
return list(Project.objects.filter(draft=False).order_by("-datetime_created")[:num])


def to_feed_item(type_: constants.FeedItemType, data):
serializer = constants.FEED_SERIALIZER_MAPPING[type_](data)
return FeedItemSerializer(data={"type": type_, "content": serializer.data})
return FeedItemSerializer(data={"type_model": type_, "content": serializer.data})
32 changes: 32 additions & 0 deletions feed/pagination.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from rest_framework import pagination
from rest_framework.request import Request

from feed.constants import SupportedQuerySet


class FeedPagination(pagination.LimitOffsetPagination):
default_limit = 10
limit_query_param = "limit"
offset_query_param = "offset"

def custom_paginate_queryset(
self, queryset: SupportedQuerySet, request: Request, count: int, view=None
) -> dict:
self.limit = self.get_limit(request)
if self.limit is None:
return None

self.count = count
self.offset = self.get_offset(request)
self.request = request
if self.count > self.limit and self.template is not None:
self.display_page_controls = True

if self.count == 0 or self.offset > self.count:
return {"queryset_ready": [], "count": self.count}

queryset_ready = queryset[self.offset : self.offset + self.limit] # noqa: E203
return {
"queryset_ready": queryset_ready,
"count": self.count,
}
2 changes: 1 addition & 1 deletion feed/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@


class FeedItemSerializer(serializers.Serializer):
type = serializers.ChoiceField(choices=constants.FeedItemType, required=True)
type_model = serializers.ChoiceField(choices=constants.FeedItemType, required=True)
content = serializers.JSONField(required=True)
50 changes: 36 additions & 14 deletions feed/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@
from rest_framework.response import Response
from rest_framework.views import APIView

from feed.helpers import collect_feed
from news.models import News
from projects.models import Project
from vacancy.models import Vacancy
from feed.constants import SupportedModel, SupportedQuerySet, FeedItemType, model_mapping
from feed.helpers import collect_querysets, paginate_serialize_feed, add_pagination
from feed.pagination import FeedPagination


class FeedList(APIView):
pagination_class = FeedPagination

@swagger_auto_schema(
responses={
200: openapi.Response(
Expand All @@ -31,13 +32,34 @@ class FeedList(APIView):
}
)
def get(self, request: Request, *args, **kwargs) -> Response:
models = []
filter = request.query_params.get("type")
if "news" in filter:
models.append(News)
if "project" in filter:
models.append(Project)
if "vacancy" in filter:
models.append(Vacancy)

return Response(status=status.HTTP_200_OK, data=collect_feed(models, 3))
prepared_data, sum_pages = self.paginate_serialize_data(
self.get_response_data(self.get_request_data())
)
for obj in prepared_data:
obj["type_model"] = obj["type_model"].lower()
return Response(
status=status.HTTP_200_OK,
data=add_pagination(prepared_data, sum_pages),
)

def get_request_data(self) -> list[SupportedModel]:
filter_queries = self.request.query_params.get("type")
filter_queries = filter_queries if filter_queries else "" # existence check

models = [
model_mapping[model_name]
for model_name in model_mapping.keys()
if model_name.lower() in filter_queries
]
return models

def get_response_data(
self, models: list[SupportedModel]
) -> dict[FeedItemType, SupportedQuerySet]:
return {model.__name__: collect_querysets(model) for model in models}

def paginate_serialize_data(
self, get_model_data: dict[FeedItemType, SupportedQuerySet]
) -> tuple[list[dict], int]:
paginator = self.pagination_class()
return paginate_serialize_feed(get_model_data, paginator, self.request, self)
Loading