Compare commits

...

73 Commits

Author SHA1 Message Date
pluja
99cb730bc0 Release 202506111039 2025-06-11 10:39:20 +00:00
pluja
d43402e162 Release 202506111007 2025-06-11 10:07:51 +00:00
pluja
9bb316b85f Release 202506102027 2025-06-10 20:27:48 +00:00
pluja
4aea68ee58 Release 202506101914 2025-06-10 19:14:10 +00:00
pluja
2f88c43236 Release 202506101800 2025-06-10 18:00:24 +00:00
pluja
ad3c561419 Release 202506101748 2025-06-10 17:48:58 +00:00
pluja
812937d2c7 Release 202506101742 2025-06-10 17:42:42 +00:00
pluja
459d7c91f7 Release 202506091901 2025-06-09 19:01:08 +00:00
pluja
b8b2dee4a4 Release 202506091433 2025-06-09 14:33:57 +00:00
pluja
eb0af871e1 Release 202506091118 2025-06-09 11:18:06 +00:00
pluja
3ccd7fd395 Release 202506091053 2025-06-09 10:53:52 +00:00
pluja
87f0f36aa1 Release 202506091000 2025-06-09 10:00:55 +00:00
pluja
8b90b3eef6 Release 202506061009 2025-06-06 10:09:59 +00:00
pluja
2489e94b0e Release 202506042153 2025-06-04 21:53:07 +00:00
pluja
144af17a70 Release 202506042038 2025-06-04 20:38:49 +00:00
pluja
02e52d7351 Release 202506041937 2025-06-04 19:37:33 +00:00
pluja
dacf73a804 Release 202506041641 2025-06-04 16:41:32 +00:00
pluja
5812399e29 Release 202506031821 2025-06-03 18:21:55 +00:00
pluja
6a6908518d Release 202506020353 2025-06-02 03:53:03 +00:00
pluja
d065910ff3 Release 202506011533 2025-06-01 15:33:44 +00:00
pluja
490433b002 Release 202506011511 2025-06-01 15:11:37 +00:00
pluja
e17bc8a521 Release 202505312236 2025-05-31 22:36:39 +00:00
pluja
ec1215f2ae Release 202505311848 2025-05-31 18:48:33 +00:00
pluja
3afa824c18 Release 202505311149 2025-05-31 11:49:38 +00:00
pluja
9a68112e24 Release 202505311113 2025-05-31 11:13:24 +00:00
pluja
0c40d8eec5 Release 202505311002 2025-05-31 10:02:50 +00:00
pluja
e16c9b64ed Release 202505311001 2025-05-31 10:01:35 +00:00
pluja
22944fcdb3 Release 202505310921 2025-05-31 09:21:32 +00:00
pluja
f7f380c591 Release 202505302056 2025-05-30 20:56:04 +00:00
pluja
577c524ca2 Release 202505302029 2025-05-30 20:29:01 +00:00
pluja
da12e8de79 add basic API plus minor updates and fixes 2025-05-30 08:17:23 +00:00
pluja
ea40f17d3c Release 202505281348 2025-05-28 13:48:27 +00:00
pluja
7e0d41cc7a Release 202505280851 2025-05-28 08:51:59 +00:00
pluja
70a097054b Release 202505271800 2025-05-27 18:00:08 +00:00
pluja
e536ca6519 Release 202505261804 2025-05-26 18:04:45 +00:00
pluja
b361ed3aa8 Release 202505261604 2025-05-26 16:04:25 +00:00
pluja
50ede46d50 release-34 2025-05-26 14:49:37 +00:00
pluja
ba809840c6 Release 202505261445 2025-05-26 14:45:22 +00:00
pluja
f2021a3027 Release 202505261035 2025-05-26 10:35:43 +00:00
pluja
6b86a72d1e Release 2025-05-25-GgNU 2025-05-25 12:28:30 +00:00
pluja
8f2b2c34ff Release 2025-05-25-irZj 2025-05-25 11:21:35 +00:00
pluja
ac9a2f428a Release 2025-05-25-ELtG 2025-05-25 10:07:02 +00:00
pluja
970622d061 Release 2025-05-23-xzNR 2025-05-23 21:50:03 +00:00
pluja
4806a7fd4e Release 2025-05-23-nwlb 2025-05-23 18:23:14 +00:00
pluja
85605de8aa Release 2025-05-23-S6tT 2025-05-23 14:56:00 +00:00
pluja
7a22629c55 Release 2025-05-23-JSHi 2025-05-23 12:25:07 +00:00
pluja
8deb9acb93 Release 2025-05-23-9Gto 2025-05-23 12:09:33 +00:00
pluja
61a5448ff5 Release 2025-05-23-5vNZ 2025-05-23 12:05:29 +00:00
pluja
cdfdcfc122 Release 2025-05-23-R3WZ 2025-05-23 11:52:16 +00:00
pluja
f4525e3d32 Release 2025-05-22-SwZ1 2025-05-22 23:07:55 +00:00
pluja
ecc8f67fc4 Release 2025-05-22-XDxe 2025-05-22 22:58:18 +00:00
pluja
72c238a4dc Release 2025-05-22-16vM 2025-05-22 22:38:41 +00:00
pluja
d79bedf219 Release 2025-05-22-5X5Q 2025-05-22 19:43:20 +00:00
pluja
2362d2cc73 Release 2025-05-22-Uvv4 2025-05-22 19:19:07 +00:00
pluja
a69c0aeed4 Release 2025-05-22-GmO6 2025-05-22 11:10:18 +00:00
pluja
ed86f863e3 Release 2025-05-21-MXjT 2025-05-21 14:31:33 +00:00
pluja
845aa1185c Release 2025-05-21-AQ5C 2025-05-21 07:03:39 +00:00
pluja
17b3642f7e Update favicon 2025-05-20 11:27:55 +00:00
pluja
d64268d396 fix logout issue 2025-05-20 11:12:55 +00:00
pluja
9c289753dd fix generate 2025-05-20 11:00:28 +00:00
pluja
8bdbe8ea36 small updates 2025-05-20 10:29:03 +00:00
pluja
af7ebe813b announcements style 2025-05-20 10:20:09 +00:00
pluja
dabc4e5c47 donation component 2025-05-20 08:02:55 +00:00
pluja
af3df8f79a Release 2025-05-20-0D8p 2025-05-20 01:47:50 +00:00
pluja
587480d140 pyworker fixes and ogimages fixes 2025-05-19 22:13:13 +00:00
pluja
74e6a50f14 fix karma trigger 2025-05-19 21:51:45 +00:00
pluja
3eb9b28ea0 triggers fix and ogimages 2025-05-19 21:38:37 +00:00
pluja
a21dc81099 updates 2025-05-19 21:31:29 +00:00
pluja
636057f8e0 announcements 2025-05-19 16:57:10 +00:00
pluja
205b6e8ea0 add karma transaction 2025-05-19 11:51:08 +00:00
pluja
60912def4e Release 2025-05-19-2257 2025-05-19 11:37:36 +00:00
pluja
22579d10c5 Just a test release 2025-05-19 11:22:11 +00:00
pluja
565e9a0ad1 Release 2025-05-19 2025-05-19 10:23:36 +00:00
370 changed files with 62494 additions and 0 deletions

View File

@@ -0,0 +1,20 @@
---
description:
globs: web/src/actions,web/src/pages
alwaysApply: false
---
- In the astro actions return, generaly don't return anythig unless the caller doesn't needs it. Specially don't `return { success: true }`, or similar. If needed, just return an object with the newly created/edited objects (Like: `return { newService: service }` or don't return anything if not needed).
- When importing actions, use `import { actions } from 'astro:actions'`. Example:
```ts
import { actions } from 'astro:actions'; /* CORRECT */
import { server } from '~/actions'; /* WRONG!!!! DON'T DO THIS */
import { adminAttributeActions } from '~/actions/admin/attribute.ts'; /* WRONG!!!! DON'T DO THIS */
const result = Astro.getActionResult(actions.admin.attribute.create);
```
- When adding actions, don't create and export a new variable called actions. Notice that Astro already provides that variable from `import { actions } from 'astro:actions'`. So just add the new actions to the `server` variable in `web/src/actions/index.ts` and that's it.
- When throwing errors in Astro actions use ActionError.
- Always use Astro actions instead of with API routes and instead of `if (Astro.request.method === "POST")`.
- Generally call the actions using html forms. But if you need to, you can call them from the server-side code with Astro.callAction(), or [callActionWithUrlParams.ts](mdc:web/src/lib/callActionWithUrlParams.ts).
- The admin actions go into a separate folder.

View File

@@ -0,0 +1,26 @@
---
description:
globs: web/src/pages,web/src/components
alwaysApply: false
---
- Avoid sending JavaScript to the client. The JS send should always be optional.
- Avoid using client-side JavaScript as much as possible. And if it has to be done, make it optional.
- To avoid using JavaScript, you can use HTML and CSS features such as hidden checkboxes, deltails tag, etc.
- The admin pages can use client-side JavaScript.
- When adding clientside JS do it with HTMX.
- When adding HTMX, the layout component BaseLayout [BaseLayout.astro](mdc:web/src/layouts/BaseLayout.astro) [BaseHead.astro](mdc:web/src/components/BaseHead.astro) accepts a prop htmx to load it in that page. No need to use a cdn.
- When adding client scripts remember to use the event `astro:page-load`, `querySelectorAll<Type>` and add an explanation comment, like so:
```tsx
<script>
////////////////////////////////////////////////////////////
// Optional script for __________. //
// Desctiption goes here... //
////////////////////////////////////////////////////////////
document.addEventListener('astro:page-load', () => {
document.querySelectorAll<HTMLDivElement>('[data-my-div]').forEach((myDiv) => {
// Do something
})
})
</script>
```

View File

@@ -0,0 +1,8 @@
---
description:
globs:
alwaysApply: true
---
- Instead of using the syntax`Array<T>`, use `T[]`.
- Use TypeScript `type` over `interface`.
- You should never add unnecessary or unuseful comments, if you add a comment it must provide some value to the code.

View File

@@ -0,0 +1,55 @@
---
description: Querying the database, editing the database, needing to import types from the database, or anything related to the database or Prisma.
globs:
alwaysApply: false
---
- We use Prisma as ORM.
- Remember to check the prisma schema [schema.prisma](mdc:web/prisma/schema.prisma) when doing things related to the database.
- After making changes to the [schema.prisma](mdc:web/prisma/schema.prisma) database or [seed.ts](mdc:web/prisma/seed.ts), you run `npm run db-reset` (from `/web/` folder) [package.json](mdc:web/package.json). And never do the migrations manually.
- Import the types from prisma instead of hardcoding duplicates. Specially use the Prisma.___GetPayload type and the enums. Like this:
```ts
type Props = {
user: Prisma.UserGetPayload<{
select: {
name: true
displayName: true
picture: true
}
}>
}
```
- Only `select` the necessary fields, no more.
- In prisma preffer `select` over `include` when making queries.
- Avoid hardcoding enums from the database, import them from prisma.
- To query the database from Astro pages, use Astro.locals.try() or Astro.locals.tryMany([]) [errorBanners.ts](mdc:web/src/lib/errorBanners.ts) [middleware.ts](mdc:web/src/middleware.ts) , like so:
```ts
const [user, services] = await Astro.locals.banners.tryMany([
[
'Error fetching user',
() =>
prisma.user.findUnique({
where: { id: userId },
select: {
name: true,
displayName: true,
picture: true,
},
}),
],
[
'Error fetching services',
() =>
prisma.service.findMany({
where: { categories: { some: { id: categoryId } } },
select: {
id: true,
name: true,
slug: true,
},
}),
[] as [],
],
])
```
- When editing the database, remember to edit the db seeding file [seed.ts](mdc:web/prisma/seed.ts) to generate data for the new schema.

View File

@@ -0,0 +1,98 @@
---
description:
globs:
alwaysApply: true
---
- The main libraries used are: Astro, TypeScript, Tailwind 4, HTMX, Prisma, npm, zod, lodash-es, date-fns, ts-toolbelt. Full list in: [package.json](mdc:web/package.json)
- When creating constants or enums, use the `makeHelpersForOptions` function [makeHelpersForOptions.ts](mdc:web/src/lib/makeHelpersForOptions.ts) like in this example. Save the file in the `web/src/constants` folder (like [attributeTypes.ts](mdc:web/src/constants/attributeTypes.ts)). Note that it's not necessary to use all the options or export all the variables that the example has, just the ones you need.
```ts
import { makeHelpersForOptions } from '../lib/makeHelpersForOptions';
import { transformCase } from '../lib/strings';
import type { AttributeType } from '@prisma/client';
type AttributeTypeInfo<T extends string | null | undefined = string> = {
value: T;
slug: string;
label: string;
icon: string;
order: number;
classNames: {
text: string;
icon: string;
};
};
export const {
dataArray: attributeTypes,
dataObject: attributeTypesById,
getFn: getAttributeTypeInfo,
getFnSlug: getAttributeTypeInfoBySlug,
zodEnumBySlug: attributeTypesZodEnumBySlug,
zodEnumById: attributeTypesZodEnumById,
keyToSlug: attributeTypeIdToSlug,
slugToKey: attributeTypeSlugToId,
} = makeHelpersForOptions(
'value',
(value): AttributeTypeInfo<typeof value> => ({
value,
slug: value ? value.toLowerCase() : '',
label: value
? transformCase(value.replace('_', ' '), 'title')
: String(value),
icon: 'ri:question-line',
order: Infinity,
classNames: {
text: 'text-current/60',
icon: 'text-current/60',
},
}),
[
{
value: 'BAD',
slug: 'bad',
label: 'Bad',
icon: 'ri:close-line',
order: 1,
classNames: {
text: 'text-red-200',
icon: 'text-red-400',
},
},
{
value: 'WARNING',
slug: 'warning',
label: 'Warning',
icon: 'ri:alert-line',
order: 2,
classNames: {
text: 'text-yellow-200',
icon: 'text-yellow-400',
},
},
{
value: 'GOOD',
slug: 'good',
label: 'Good',
icon: 'ri:check-line',
order: 3,
classNames: {
text: 'text-green-200',
icon: 'text-green-400',
},
},
{
value: 'INFO',
slug: 'info',
label: 'Info',
icon: 'ri:information-line',
order: 4,
classNames: {
text: 'text-blue-200',
icon: 'text-blue-400',
},
},
] as const satisfies AttributeTypeInfo<AttributeType>[]
);
```

View File

@@ -0,0 +1,161 @@
---
description:
globs: web/src/pages,web/src/components
alwaysApply: false
---
- On .astro files, don't forget to include the three dashes (`---`) at the begining of the file and where the server js ends. I noticed that sometimes you forget them.
- For icons use the `Icon` component from `astro-icon/components`.
- For icons use the Remix Icon library preferably.
- Use the `MyPicture` component from `src/components/MyPicture.astro` for images.
- When redirecting to login use the `makeLoginUrl` function from [redirectUrls.ts](mdc:web/src/lib/redirectUrls.ts) and if the link is for an `<a>` tag, use the `data-astro-reload` attribute. Similar for the logout and impersonate.
- Don't use the `web/src/pages/admin` pages as example unless explicitly stated or you're creating/editing an admin page.
- Checkout the @errorBanners.ts @middleware.ts @env.d.ts to see the avilable Astro.locals values.
- Avoid duplicating similar html code. You can use jsx for loops, create variables in the constants folder, or create separate components.
- When redirecting to the 404 not found page, use `Astro.rewrite` (Like this example: `if (!user) return Astro.rewrite('/404')`)
- Include schema markup in the pages when it makes sense. Examples: [[slug].astro](mdc:web/src/pages/service/[slug].astro)
- When creating forms, we already have utilities, components and established design patterns. Follow this example. (Note that this example may come slightly outdaded, but the overall philosophy doesn't change)
```astro
---
import { actions, isInputError } from 'astro:actions'
import { z } from 'astro:content'
import Captcha from '../../components/Captcha.astro'
import InputCardGroup from '../../components/InputCardGroup.astro'
import InputCheckboxGroup from '../../components/InputCheckboxGroup.astro'
import InputHoneypotTrap from '../../components/InputHoneypotTrap.astro'
import InputImageFile from '../../components/InputImageFile.astro'
import InputSubmitButton from '../../components/InputSubmitButton.astro'
import InputText from '../../components/InputText.astro'
import InputTextArea from '../../components/InputTextArea.astro'
import { kycLevels } from '../../constants/kycLevels'
import BaseLayout from '../../layouts/BaseLayout.astro'
import { zodParseQueryParamsStoringErrors } from '../../lib/parseUrlFilters'
import { prisma } from '../../lib/prisma'
import { makeLoginUrl } from '../../lib/redirectUrls'
const user = Astro.locals.user
if (!user) {
return Astro.redirect(makeLoginUrl(Astro.url, { message: 'Login to suggest a new service' }))
}
const result = Astro.getActionResult(actions.serviceSuggestion.editService)
if (result && !result.error) {
return Astro.redirect(`/service-suggestion/${result.data.serviceSuggestion.id}`)
}
const inputErrors = isInputError(result?.error) ? result.error.fields : {}
const { data: params } = zodParseQueryParamsStoringErrors(
{
serviceId: z.coerce.number().int().positive(),
notes: z.string().default(''),
},
Astro
)
if (!params.serviceId) return Astro.rewrite('/404')
const service = await Astro.locals.banners.try(
'Failed to fetch service',
async () =>
prisma.service.findUnique({
select: {
id: true,
name: true,
slug: true,
description: true,
overallScore: true,
kycLevel: true,
imageUrl: true,
verificationStatus: true,
acceptedCurrencies: true,
categories: {
select: {
name: true,
icon: true,
},
},
},
where: { id: params.serviceId },
}),
null
)
if (!service) return Astro.rewrite('/404')
---
<BaseLayout
pageTitle="Edit service"
description="Suggest an edit to service"
ogImage={{
template: 'generic',
title: 'Edit service',
description: 'Suggest an edit to service',
icon: 'ri:edit-line',
}}
widthClassName="max-w-screen-md"
>
<h1 class="font-title mt-12 mb-6 text-center text-3xl font-bold">Edit service</h1>
<form method="POST" action={actions.serviceSuggestion.editService} class="space-y-6">
<input type="hidden" name="serviceId" value={params.serviceId} />
<InputText
label="Service name"
name="name"
value={service.name}
error={inputErrors.name}
inputProps={{ 'data-custom-value': true, required: true }}
/>
<InputCardGroup
name="kycLevel"
label="KYC Level"
options={kycLevels.map((kycLevel) => ({
label: kycLevel.name,
value: kycLevel.id.toString(),
icon: kycLevel.icon,
description: `${kycLevel.description}\n\n_KYC Level ${kycLevel.value}/5_`,
}))}
iconSize="md"
cardSize="md"
required
error={inputErrors.kycLevel}
/>
<InputCheckboxGroup
name="categories"
label="Categories"
required
options={categories.map((category) => ({
label: category.name,
value: category.id.toString(),
icon: category.icon,
}))}
error={inputErrors.categories}
/>
<InputImageFile
label="Service Image"
name="imageFile"
description="Square image. At least 192x192px. Transparency supported."
error={inputErrors.imageFile}
square
required
/>
<InputTextArea
label="Note for Moderators"
name="notes"
value={params.notes}
inputProps={{ rows: 10 }}
error={inputErrors.notes}
/>
<Captcha action={actions.serviceSuggestion.createService} />
<InputHoneypotTrap name="message" />
<InputSubmitButton hideCancel />
</form>
</BaseLayout>
```

10
.cursor/rules/styles.mdc Normal file
View File

@@ -0,0 +1,10 @@
---
description:
globs: /web/src/pages,/web/src/components,/web/src/constants
alwaysApply: false
---
- We use Tailwind 4 (the latest version), make sure to not use outdated classes from Tailwind 3.
- Checkout the custom tailwind theme [global.css](mdc:web/src/styles/global.css).
- When adding conditional styles or merging tailwind classes, use the `cn` function. Never use template strings. [cn.ts](mdc:web/src/lib/cn.ts)
- For the grayscale colors, try to use the custom color `day` for the light/foreground colors (50-500) and `night` for the dark/bakground (500-950).
- Generally avoid using opacity modifiers (In `text-red-500/50` the `/50`), but it's fine to also use it.

46
.env.example Normal file
View File

@@ -0,0 +1,46 @@
# Database
POSTGRES_USER=your_db_user
POSTGRES_PASSWORD=your_db_password
POSTGRES_DATABASE=your_db_name
DATABASE_URL="postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@database:5432/${POSTGRES_DATABASE}?schema=public"
DATABASE_UI_URL="https://db.example.com"
# Generic Config
UPLOAD_DIR=/app/uploads
SITE_URL="https://your-site.example.com"
SOURCE_CODE_URL="https://your-source-code.example.com"
TIME_TRAP_SECRET=your_time_trap_secret
LOGS_UI_URL="https://logs.example.com"
# Release Info
RELEASE_NUMBER=
RELEASE_DATE=
# Redis
REDIS_URL="redis://redis:6379"
# Crawl4AI
CRAWL4AI_BASE_URL="http://crawl4ai:11235"
CRAWL4AI_API_TOKEN=your_crawl4ai_token
# Tor and I2P
ONION_ADDRESS="http://youronionaddress.onion"
I2P_ADDRESS="http://youri2paddress.b32.i2p"
I2P_PASS=your_i2p_password
# Push Notifications
VAPID_PUBLIC_KEY=your_vapid_public_key
VAPID_PRIVATE_KEY=your_vapid_private_key
VAPID_SUBJECT="mailto:your-email@example.com"
# OpenAI
OPENAI_API_KEY=your_openai_api_key
OPENAI_BASE_URL="https://your-openai-base-url.example.com"
OPENAI_MODEL=your_openai_model
OPENAI_RETRY=3
# Pyworker Crons
CRON_TOSREVIEW_TASK="0 0 1 * *" # Every month
CRON_USER_SENTIMENT_TASK="0 0 * * *" # Every day
CRON_COMMENT_MODERATION_TASK="0 * * * *" # Every hour
CRON_FORCE_TRIGGERS_TASK="0 2 * * *" # Every day

17
.gitignore vendored Normal file
View File

@@ -0,0 +1,17 @@
local_data/
TODO.md
webhook
docker-compose.override.yml
web/public/uploads/
.env
backups/
loki*
grafana*
dump*.sql
*.dump
*.log
*.bak
migrate.py
sync-all.sh
.DS_Store

1
.npmrc Normal file
View File

@@ -0,0 +1 @@
save-exact=true

64
.platform/README.md Normal file
View File

@@ -0,0 +1,64 @@
# .platform Hooks
This directory contains deployment hooks that are executed during the deployment process. The structure follows AWS Elastic Beanstalk's `.platform` hooks pattern, although we are not using AWS we think it is a good practice to use this standard.
## Directory Structure
```
.platform/
├── hooks/
│ ├── predeploy/ # Scripts executed before staging deployment
│ └── postdeploy/ # Scripts executed after successful production deployment
```
## Hook Execution
- Scripts in each hook directory are executed in alphabetical order
- If any hook fails (returns non-zero), the deployment process is aborted
- Hook failures are reported through the notification system
## Available Hooks
### Predeploy Hooks
Located in `.platform/hooks/predeploy/`
- Executed before the staging deployment starts
- Use for tasks like:
- Environment validation
- Resource preparation
- Database migrations
- Asset compilation
### Postdeploy Hooks
Located in `.platform/hooks/postdeploy/`
- Executed after successful production deployment
- Use for tasks like:
- Cache warming
- Service notifications
- Cleanup operations
- Import triggers (current implementation)
## Example Hook
```bash
#!/bin/bash
# .platform/hooks/postdeploy/01_import_triggers.sh
cd ../../../
just import-triggers
```
## Environment
Hooks have access to all environment variables available to the deployment script, including:
- `HOOK_PUSHER`
- `HOOK_MESSAGE`
- `GITEA_USERNAME`
- `GITEA_TOKEN`
- `GITEA_SERVER`
- `GITEA_REPO_USERNAME`
- `GITEA_REPO_NAME`

View File

@@ -0,0 +1,4 @@
#!/bin/bash
pwd
just import-triggers

View File

@@ -0,0 +1,4 @@
#!/bin/bash
pwd
just dump-db

13
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,13 @@
{
"recommendations": [
"astro-build.astro-vscode",
"esbenp.prettier-vscode",
"dbaeumer.vscode-eslint",
"davidanson.vscode-markdownlint",
"golang.go",
"bradlc.vscode-tailwindcss",
"craigrbroughton.htmx-attributes",
"nefrob.vscode-just-syntax"
],
"unwantedRecommendations": []
}

12
.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,12 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "npm run dev",
"request": "launch",
"type": "node-terminal",
"cwd": "${workspaceFolder}/web",
"command": "npm run dev"
}
]
}

60
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,60 @@
{
"editor.formatOnSave": true,
"editor.tabSize": 2,
"editor.insertSpaces": true,
"editor.wordWrap": "wordWrapColumn",
"editor.wordWrapColumn": 110,
"editor.rulers": [110],
"prettier.documentSelectors": ["**/*.astro"],
"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[astro]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[markdown]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[yaml]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[prisma]": {
"editor.wordWrap": "off"
},
"files.exclude": {
"**/node_modules": true
},
"eslint.validate": [
"javascript",
"javascriptreact",
"astro",
"typescript",
"typescriptreact"
],
"editor.codeActionsOnSave": {
"source.fixAll": "explicit",
"source.organizeImports": "never",
"source.fixAll.eslint": "explicit"
},
"eslint.enable": true,
"typescript.preferences.importModuleSpecifier": "non-relative",
"debug.javascript.autoAttachFilter": "always",
"tailwindCSS.classAttributes": [
"class",
"className",
"classNames",
"ngClass",
"class:list",
".*classNames?"
],
"tailwindCSS.classFunctions": ["tv", "cn"],
"tailwindCSS.experimental.classRegex": [
["([\"'`][^\"'`]*.*?[\"'`])", "[\"'`]([^\"'`]*).*?[\"'`]"]
]
}

53
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,53 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "install",
"type": "shell",
"command": "cd web && npm i",
"icon": {
"id": "package",
"color": "terminal.ansiGreen"
},
"detail": "Install npm dependencies"
},
{
"label": "web",
"type": "shell",
"command": "cd web && npm run dev",
"icon": {
"id": "browser",
"color": "terminal.ansiBlue"
},
"detail": "Start web development server",
"problemMatcher": ["$tsc-watch"],
"isBackground": true
},
{
"label": "db",
"type": "shell",
"command": "docker compose -f docker-compose.yml -f docker-compose.dev.yml up database redis db-admin",
"runOptions": {
"runOn": "folderOpen"
},
"icon": {
"id": "database",
"color": "terminal.ansiYellow"
},
"detail": "Start database services"
},
{
"label": "Install and run",
"dependsOrder": "sequence",
"dependsOn": ["install", "web"],
"runOptions": {
"runOn": "folderOpen"
},
"icon": {
"id": "play",
"color": "terminal.ansiMagenta"
},
"detail": "Setup and launch development environment"
}
]
}

61
README.md Normal file
View File

@@ -0,0 +1,61 @@
# KYCnot.me
[KYCnot.me](https://kycnot.me)
## Development
### Installations
Install the following tools:
- [nvm](https://github.com/nvm-sh/nvm) (or [node](https://nodejs.org/en/download/))
- [docker](https://docs.docker.com/get-docker/)
- [just](https://just.systems)
### Initialization
Run this the first time you setup the project:
```zsh
# you can alternatively use `just dev-database`
docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait database redis
cd web
nvm install
npm i
cp -n .env.example .env
npm run db-reset
```
Now open the [.env](web/.env) file and fill in the missing values.
> Default users are created with tokens: `admin`, `moderator`, `verified`, `normal` (configurable via env vars)
### Running the project
In separate terminals, run the following commands:
- Database
```zsh
# you can alternatively use `just dev-database`
docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait database redis
```
- Website <http://localhost:4321>
```zsh
cd web
nvm use
npm run dev
```
- Database Admin (Optional) <http://localhost:5555>
```zsh
cd web
nvm use
npm run db-admin
```
> [!TIP]
> VS Code will run the project in development mode automatically when you open the project.

45
docker-compose.dev.yml Normal file
View File

@@ -0,0 +1,45 @@
services:
database:
volumes:
- ./local_data/postgres:/var/lib/postgresql/data:z
ports:
- 3399:5432
restart: no
environment:
POSTGRES_USER: kycnot
POSTGRES_PASSWORD: kycnot
POSTGRES_DB: kycnot
healthcheck:
test: ["CMD-SHELL", "pg_isready -U kycnot -d kycnot"]
interval: 10s
timeout: 5s
retries: 5
db-admin:
image: node:20
working_dir: /app
volumes:
- ./web:/app
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER:-kycnot}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-kycnot}
POSTGRES_DB: ${POSTGRES_DATABASE:-kycnot}
DATABASE_URL: "postgresql://${POSTGRES_USER:-kycnot}:${POSTGRES_PASSWORD:-kycnot}@database:5432/${POSTGRES_DATABASE:-kycnot}?schema=public"
depends_on:
database:
condition: service_healthy
expose:
- 5555
ports:
- "5555:5555"
command: ["npm", "run", "db-admin"]
healthcheck:
test: ["CMD", "curl", "-k", "--silent", "--fail", "http://localhost:5555"]
interval: 10s
timeout: 5s
retries: 5
redis:
ports:
- "6379:6379"

80
docker-compose.yml Normal file
View File

@@ -0,0 +1,80 @@
volumes:
database:
services:
database:
image: postgres:latest
volumes:
- database:/var/lib/postgresql/data:z
restart: unless-stopped
env_file:
- .env
healthcheck:
test:
[
'CMD-SHELL',
'pg_isready -U ${POSTGRES_USER:-kycnot} -d ${POSTGRES_DATABASE:-kycnot}',
]
interval: 10s
timeout: 5s
retries: 5
pyworker:
build:
context: ./pyworker
image: kycnotme/pyworker:${PYWORKER_IMAGE_TAG:-latest}
restart: always
env_file:
- .env
crawl4ai:
image: unclecode/crawl4ai:basic-amd64
expose:
- '11235'
env_file:
- .env
volumes:
- /dev/shm:/dev/shm
deploy:
resources:
limits:
memory: 4G
reservations:
memory: 1G
redis:
image: redis:latest
restart: unless-stopped
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
interval: 10s
timeout: 5s
retries: 5
astro:
build:
dockerfile: web/Dockerfile
image: kycnotme/astro:${ASTRO_IMAGE_TAG:-latest}
restart: unless-stopped
env_file:
- .env
depends_on:
database:
condition: service_healthy
redis:
condition: service_healthy
expose:
- 4321
healthcheck:
test:
[
'CMD',
'curl',
'-k',
'--silent',
'--fail',
'http://localhost:4321/internal-api/healthcheck',
]
interval: 10s
timeout: 5s
retries: 5

113
justfile Normal file
View File

@@ -0,0 +1,113 @@
set dotenv-load
@default:
just --list
# Start the development database and redis services
dev-database:
docker compose -f docker-compose.yml -f docker-compose.dev.yml up database redis db-admin
# Import all triggers to the database
import-triggers:
#!/bin/bash
for sql_file in web/prisma/triggers/*.sql; do
echo "Importing $sql_file..."
docker compose exec -T database psql -U ${DATABASE_USER:-kycnot} -d ${DATABASE_NAME:-kycnot} < "$sql_file"
done
# Create a database backup that includes the Prisma migrations table (recommended)
dump-db:
#!/bin/bash
mkdir -p backups
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
echo "Creating complete database backup (including _prisma_migrations table)..."
docker compose exec -T database pg_dump -U ${POSTGRES_USER:-kycnot} -d ${POSTGRES_DATABASE:-kycnot} -c -F c > backups/db_backup_${TIMESTAMP}.dump
echo "Backup saved to backups/db_backup_${TIMESTAMP}.dump"
# Create a database backup without the migrations table (legacy format)
dump-db-no-migrations:
#!/bin/bash
mkdir -p backups
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
echo "Creating database backup (excluding _prisma_migrations table)..."
docker compose exec -T database pg_dump -U ${POSTGRES_USER:-kycnot} -d ${POSTGRES_DATABASE:-kycnot} -c -F c -T _prisma_migrations > backups/db_backup_no_migrations_${TIMESTAMP}.dump
echo "Backup saved to backups/db_backup_no_migrations_${TIMESTAMP}.dump"
# Import a database backup. Usage: just import-db [filename]
# If no filename is provided, it will use the most recent backup
import-db file="":
#!/bin/bash
if [ -z "{{file}}" ]; then
BACKUP_FILE=$(find backups/ -name 'db_backup_*.dump' | sort -r | head -n 1)
if [ -z "$BACKUP_FILE" ]; then
echo "Error: No backup files found in the backups directory"
exit 1
fi
else
BACKUP_FILE="{{file}}"
if [ ! -f "$BACKUP_FILE" ]; then
echo "Error: Backup file '$BACKUP_FILE' not found"
exit 1
fi
fi
echo "=== STEP 1: PREPARING DATABASE ==="
# Drop all connections to the database
docker compose exec -T database psql -U ${POSTGRES_USER:-kycnot} -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '${POSTGRES_DATABASE:-kycnot}' AND pid <> pg_backend_pid();" postgres
# Drop and recreate database
echo "Dropping and recreating the database..."
docker compose exec -T database psql -U ${POSTGRES_USER:-kycnot} -c "DROP DATABASE IF EXISTS ${POSTGRES_DATABASE:-kycnot};" postgres
docker compose exec -T database psql -U ${POSTGRES_USER:-kycnot} -c "CREATE DATABASE ${POSTGRES_DATABASE:-kycnot};" postgres
echo "=== STEP 2: RESTORING PRODUCTION DATA ==="
# Restore the database
cat "$BACKUP_FILE" | docker compose exec -T database pg_restore -U ${POSTGRES_USER:-kycnot} -d ${POSTGRES_DATABASE:-kycnot} --no-owner
echo "Database data restored successfully!"
echo "=== STEP 3: CREATING PRISMA MIGRATIONS TABLE ==="
# Create the _prisma_migrations table if it doesn't exist
docker compose exec -T database psql -U ${POSTGRES_USER:-kycnot} -d ${POSTGRES_DATABASE:-kycnot} -c "
CREATE TABLE IF NOT EXISTS _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMP WITH TIME ZONE,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMP WITH TIME ZONE,
started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);"
echo "=== STEP 4: REGISTERING MIGRATIONS ==="
# Only register migrations if the table is empty
migration_count=$(docker compose exec -T database psql -U ${POSTGRES_USER:-kycnot} -d ${POSTGRES_DATABASE:-kycnot} -t -c "SELECT COUNT(*) FROM _prisma_migrations;")
if [ "$migration_count" -eq "0" ]; then
# For each migration, insert a record into _prisma_migrations
for migration_dir in $(find web/prisma/migrations -maxdepth 1 -mindepth 1 -type d | sort); do
migration_name=$(basename "$migration_dir")
sql_file="$migration_dir/migration.sql"
if [ -f "$sql_file" ]; then
echo "Registering migration: $migration_name"
checksum=$(sha256sum "$sql_file" | cut -d' ' -f1)
uuid=$(uuidgen)
now=$(date -u +"%Y-%m-%d %H:%M:%S")
docker compose exec -T database psql -U ${POSTGRES_USER:-kycnot} -d ${POSTGRES_DATABASE:-kycnot} -c "
INSERT INTO _prisma_migrations (id, checksum, migration_name, logs, started_at, finished_at, applied_steps_count)
VALUES ('$uuid', '$checksum', '$migration_name', 'Registered during import', '$now', '$now', 1)
ON CONFLICT (migration_name) DO NOTHING;"
fi
done
else
echo "Migrations table already has entries. Skipping registration."
fi
echo "=== STEP 5: IMPORTING TRIGGERS ==="
just import-triggers
echo "Production database import completed successfully!"
echo "Migration status:"
cd web && npx prisma migrate status

21
pyworker/.env.example Normal file
View File

@@ -0,0 +1,21 @@
# Database connection
DATABASE_URL=postgresql://kycnot:kycnot@localhost:3399/kycnot
# API settings
TOS_API_BASE_URL=https://r.jina.ai
# Logging
LOG_LEVEL=INFO
LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
# OpenAI
OPENAI_API_KEY="xxxxxxxxx"
OPENAI_BASE_URL="https://xxxxxx/api/v1"
OPENAI_MODEL="xxxxxxxxx"
OPENAI_RETRY=3
CRON_TOSREVIEW_TASK=0 0 1 * * # Every month
CRON_USER_SENTIMENT_TASK=0 0 * * * # Every day
CRON_COMMENT_MODERATION_TASK=0 0 * * * # Every hour
CRON_FORCE_TRIGGERS_TASK=0 2 * * * # Every day
CRON_SERVICE_SCORE_RECALC_TASK=*/5 * * * * # Every 10 minutes

174
pyworker/.gitignore vendored Normal file
View File

@@ -0,0 +1,174 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc

1
pyworker/.python-version Normal file
View File

@@ -0,0 +1 @@
3.13

10
pyworker/Dockerfile Normal file
View File

@@ -0,0 +1,10 @@
FROM ghcr.io/astral-sh/uv:alpine
WORKDIR /app
COPY . .
RUN uv sync --frozen
EXPOSE 8000
CMD ["uv", "run", "-m", "pyworker", "--worker"]

149
pyworker/README.md Normal file
View File

@@ -0,0 +1,149 @@
# KYC Not Worker
A Python worker for processing and analyzing data for the KYC Not project.
## Features
- TOS (Terms of Service) text retrieval and analysis
- User sentiment analysis from comments
- Comment moderation
- Service score recalculation
- Database trigger maintenance
- Scheduled task execution
- Database operations for services and comments
## Installation
1. Clone the repository
2. Sync dependencies with [uv](https://docs.astral.sh/uv/):
```bash
uv sync
```
## Configuration
Copy `.env.example` to `.env` and fill in the required values:
```bash
cp .env.example .env
```
Required environment variables:
- `DATABASE_URL`: PostgreSQL connection string
- `OPENAI_API_KEY`: OpenAI API key for AI tasks
- `CRON_TOSREVIEW_TASK`: Cron expression for TOS review task
- `CRON_SENTIMENT_TASK`: Cron expression for user sentiment analysis task
- `CRON_MODERATION_TASK`: Cron expression for comment moderation task
- `CRON_FORCE_TRIGGERS_TASK`: Cron expression for force triggers task
- `CRON_SERVICE_SCORE_RECALC_TASK`: Cron expression for service score recalculation task
## Usage
### Command Line Interface
Run tasks directly:
```bash
# Run TOS review task
uv run -m pyworker tos [--service-id ID]
# Run user sentiment analysis task
uv run -m pyworker sentiment [--service-id ID]
# Run comment moderation task
uv run -m pyworker moderation [--service-id ID]
# Run force triggers task
uv run -m pyworker force-triggers
# Run service score recalculation task
uv run -m pyworker service-score-recalc [--service-id ID]
```
### Worker Mode
Run in worker mode to execute tasks on a schedule:
```bash
uv run -m pyworker --worker
```
Tasks will run according to their configured cron schedules.
## Tasks
### TOS Review Task
- Retrieves and analyzes Terms of Service documents
- Updates service records with TOS information
- Scheduled via `CRON_TOSREVIEW_TASK`
### User Sentiment Task
- Analyzes user comments to determine overall sentiment
- Updates service records with sentiment analysis
- Scheduled via `CRON_SENTIMENT_TASK`
### Comment Moderation Task
- Makes a basic first moderation of comments
- Flags comments as needed
- Adds content if needed
- Scheduled via `CRON_MODERATION_TASK`
### Force Triggers Task
- Maintains database triggers by forcing them to run under certain conditions
- Currently handles updating the "isRecentlyListed" flag for services after 15 days
- Scheduled via `CRON_FORCE-TRIGGERS_TASK`
### Service Score Recalculation Task
- Recalculates service scores based on attribute changes
- Processes jobs from the ServiceScoreRecalculationJob table
- Calculates privacy, trust, and overall scores
- Scheduled via `CRON_SERVICE-SCORE-RECALC_TASK`
## Development
### Project Structure
```text
pyworker/
├── pyworker/
│ ├── __init__.py
│ ├── __main__.py
│ ├── cli.py
│ ├── config.py
│ ├── database.py
│ ├── scheduler.py
│ ├── tasks/
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── comment_moderation.py
│ │ ├── force_triggers.py
│ │ ├── service_score_recalc.py
│ │ ├── tos_review.py
│ │ └── user_sentiment.py
│ └── utils/
│ ├── __init__.py
│ ├── ai.py
│ └── logging.py
├── tests/
├── setup.py
├── requirements.txt
└── README.md
```
### Adding New Tasks
1. Create a new task class in `pyworker/tasks/`
2. Implement the `run` method
3. Add the task to `pyworker/tasks/__init__.py`
4. Update the CLI and scheduler to handle the new task
## License
MIT

View File

@@ -0,0 +1,12 @@
services:
pyworker:
build:
context: .
dockerfile: Dockerfile
restart: always
env_file:
- .env
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- OPENAI_MODEL=${OPENAI_MODEL}
- DATABASE_URL=${DATABASE_URL}

24
pyworker/pyproject.toml Normal file
View File

@@ -0,0 +1,24 @@
[project]
name = "pyworker"
version = "0.1.0"
description = "AI workers for kycnot.me"
readme = "README.md"
requires-python = ">=3.13"
dependencies = [
"croniter>=6.0.0",
"json-repair>=0.41.1",
"openai>=1.74.0",
"psycopg[binary,pool]>=3.2.6",
"python-dotenv>=1.1.0",
"requests>=2.32.3",
]
[project.scripts]
pyworker = "pyworker.cli:main"
[tool.setuptools]
packages = ["pyworker"]
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"

View File

@@ -0,0 +1,7 @@
"""
KYC Not Worker Package
A package for worker tasks related to the KYC Not platform.
"""
__version__ = "0.1.0"

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python3
"""
Entry point for the pyworker package when executed as a module.
"""
import sys
from pyworker.cli import main
if __name__ == "__main__":
sys.exit(main())

437
pyworker/pyworker/cli.py Normal file
View File

@@ -0,0 +1,437 @@
"""
Command line interface for the pyworker package.
"""
import argparse
import sys
import time
from typing import List, Optional, Dict, Any
from pyworker.config import config
from pyworker.database import (
close_db_pool,
fetch_all_services,
fetch_services_with_pending_comments,
)
from pyworker.scheduler import TaskScheduler
from .tasks import (
CommentModerationTask,
ForceTriggersTask,
ServiceScoreRecalculationTask,
TosReviewTask,
UserSentimentTask,
)
from pyworker.utils.app_logging import setup_logging
logger = setup_logging(__name__)
def parse_args(args: List[str]) -> argparse.Namespace:
"""
Parse command line arguments.
Args:
args: Command line arguments.
Returns:
Parsed arguments.
"""
parser = argparse.ArgumentParser(description="KYC Not Worker")
# Global options
parser.add_argument(
"--worker",
action="store_true",
help="Run in worker mode (schedule tasks to run periodically)",
)
# Add subparsers for different tasks
subparsers = parser.add_subparsers(dest="task", help="Task to run")
# TOS retrieval task
tos_parser = subparsers.add_parser(
"tos", help="Retrieve Terms of Service (TOS) text"
)
tos_parser.add_argument(
"--service-id", type=int, help="Specific service ID to process (optional)"
)
# User sentiment task
sentiment_parser = subparsers.add_parser(
"sentiment", help="Analyze user sentiment from comments"
)
sentiment_parser.add_argument(
"--service-id", type=int, help="Specific service ID to process (optional)"
)
# Comment moderation task
moderation_parser = subparsers.add_parser(
"moderation", help="Moderate pending comments"
)
moderation_parser.add_argument(
"--service-id", type=int, help="Specific service ID to process (optional)"
)
# New Service Penalty task
penalty_parser = subparsers.add_parser(
"force-triggers",
help="Force triggers to run under certain conditions",
)
penalty_parser.add_argument(
"--service-id", type=int, help="Specific service ID to process (optional)"
)
# Service Score Recalculation task
score_recalc_parser = subparsers.add_parser(
"service-score-recalc",
help="Recalculate service scores based on attribute changes",
)
score_recalc_parser.add_argument(
"--service-id", type=int, help="Specific service ID to process (optional)"
)
return parser.parse_args(args)
def run_tos_task(service_id: Optional[int] = None) -> int:
"""
Run the TOS retrieval task.
Args:
service_id: Optional specific service ID to process.
Returns:
Exit code.
"""
logger.info("Starting TOS retrieval task")
try:
# Fetch services
services = fetch_all_services()
if not services:
logger.error("No services found")
return 1
# Filter by service ID if specified
if service_id:
services = [s for s in services if s["id"] == service_id]
if not services:
logger.error(f"Service with ID {service_id} not found")
return 1
# Initialize task and use as context manager
with TosReviewTask() as task: # type: ignore
# Process services using the same database connection
for service in services:
if not service.get("tosUrls"):
logger.info(
f"Skipping service {service['name']} (ID: {service['id']}) - no TOS URLs"
)
continue
result = task.run(service) # type: ignore
if result:
logger.info(
f"Successfully retrieved TOS for service {service['name']}"
)
else:
logger.warning(
f"Failed to retrieve TOS for service {service['name']}"
)
logger.info("TOS retrieval task completed")
return 0
finally:
# Ensure connection pool is closed even if an error occurs
close_db_pool()
def run_sentiment_task(service_id: Optional[int] = None) -> int:
"""
Run the user sentiment analysis task.
Args:
service_id: Optional specific service ID to process.
Returns:
Exit code.
"""
logger.info("Starting user sentiment analysis task")
try:
# Fetch services
services = fetch_all_services()
if not services:
logger.error("No services found")
return 1
# Filter by service ID if specified
if service_id:
services = [s for s in services if s["id"] == service_id]
if not services:
logger.error(f"Service with ID {service_id} not found")
return 1
# Initialize task and use as context manager
with UserSentimentTask() as task: # type: ignore
# Process services using the same database connection
for service in services:
result = task.run(service) # type: ignore
if result is not None:
logger.info(
f"Successfully analyzed sentiment for service {service['name']}"
)
logger.info("User sentiment analysis task completed")
return 0
finally:
# Ensure connection pool is closed even if an error occurs
close_db_pool()
def run_moderation_task(service_id: Optional[int] = None) -> int:
"""
Run the comment moderation task.
Args:
service_id: Optional specific service ID to process.
Returns:
Exit code.
"""
logger.info("Starting comment moderation task")
try:
services_to_process: List[Dict[str, Any]] = []
if service_id:
# Fetch specific service if ID is provided
# Consider creating a fetch_service_by_id for efficiency if this path is common
all_services = fetch_all_services()
services_to_process = [s for s in all_services if s["id"] == service_id]
if not services_to_process:
logger.error(
f"Service with ID {service_id} not found or does not meet general fetch criteria."
)
return 1
logger.info(f"Processing specifically for service ID: {service_id}")
else:
# No specific service ID, fetch only services with pending comments
logger.info(
"No specific service ID provided. Querying for services with pending comments."
)
services_to_process = fetch_services_with_pending_comments()
if not services_to_process:
logger.info(
"No services found with pending comments for moderation at this time."
)
# Task completed its check, nothing to do.
# Fall through to common completion log.
any_service_had_comments_processed = False
if not services_to_process and not service_id:
# This case is when no service_id was given AND no services with pending comments were found.
# Already logged above.
pass
elif not services_to_process and service_id:
# This case should have been caught by the 'return 1' if service_id was specified but not found.
# If it reaches here, it implies an issue or the service had no pending comments (which the task will handle).
logger.info(
f"Service ID {service_id} was specified, but no matching service found or it has no pending items for the task."
)
else:
logger.info(
f"Identified {len(services_to_process)} service(s) to check for comment moderation."
)
# Initialize task and use as context manager
with CommentModerationTask() as task: # type: ignore
for service in services_to_process:
# The CommentModerationTask.run() method now returns a boolean
# and handles its own logging regarding finding/processing comments for the service.
if task.run(service): # type: ignore
logger.info(
f"Comment moderation task ran for service {service['name']} (ID: {service['id']}) and processed comments."
)
any_service_had_comments_processed = True
else:
logger.info(
f"Comment moderation task ran for service {service['name']} (ID: {service['id']}), but no new comments were moderated."
)
if services_to_process and not any_service_had_comments_processed:
logger.info(
"Completed iterating through services; no comments were moderated in this run."
)
logger.info("Comment moderation task completed")
return 0
finally:
# Ensure connection pool is closed even if an error occurs
close_db_pool()
def run_force_triggers_task() -> int:
"""
Runs the force triggers task.
Returns:
Exit code.
"""
logger.info("Starting force triggers task")
try:
# Initialize task and use as context manager
with ForceTriggersTask() as task: # type: ignore
success = task.run() # type: ignore
if success:
logger.info("Force triggers task completed successfully.")
return 0
else:
logger.error("Force triggers task failed.")
return 1
finally:
# Ensure connection pool is closed even if an error occurs
close_db_pool()
def run_service_score_recalc_task(service_id: Optional[int] = None) -> int:
"""
Run the service score recalculation task.
Args:
service_id: Optional specific service ID to process.
Returns:
Exit code.
"""
logger.info("Starting service score recalculation task")
try:
# Initialize task and use as context manager
with ServiceScoreRecalculationTask() as task: # type: ignore
result = task.run(service_id) # type: ignore
if result:
logger.info("Successfully recalculated service scores")
else:
logger.warning("Failed to recalculate service scores")
logger.info("Service score recalculation task completed")
return 0
finally:
# Ensure connection pool is closed even if an error occurs
close_db_pool()
def run_worker_mode() -> int:
"""
Run in worker mode, scheduling tasks to run periodically.
Returns:
Exit code.
"""
logger.info("Starting worker mode")
# Get task schedules from config
task_schedules = config.task_schedules
if not task_schedules:
logger.error(
"No task schedules defined. Set CRON_TASKNAME_TASK environment variables."
)
return 1
logger.info(
f"Found {len(task_schedules)} scheduled tasks: {', '.join(task_schedules.keys())}"
)
# Initialize the scheduler
scheduler = TaskScheduler()
# Register tasks with their schedules
for task_name, cron_expression in task_schedules.items():
if task_name.lower() == "tosreview":
scheduler.register_task(task_name, cron_expression, run_tos_task)
elif task_name.lower() == "user_sentiment":
scheduler.register_task(task_name, cron_expression, run_sentiment_task)
elif task_name.lower() == "comment_moderation":
scheduler.register_task(task_name, cron_expression, run_moderation_task)
elif task_name.lower() == "force_triggers":
scheduler.register_task(task_name, cron_expression, run_force_triggers_task)
elif task_name.lower() == "service_score_recalc":
scheduler.register_task(
task_name, cron_expression, run_service_score_recalc_task
)
else:
logger.warning(f"Unknown task '{task_name}', skipping")
# Register service score recalculation task (every 5 minutes)
scheduler.register_task(
"service-score-recalc",
"*/5 * * * *",
run_service_score_recalc_task,
)
# Start the scheduler if tasks were registered
if scheduler.tasks:
try:
scheduler.start()
logger.info("Worker started, press Ctrl+C to stop")
# Keep the main thread alive
while scheduler.is_running():
time.sleep(1)
return 0
except KeyboardInterrupt:
logger.info("Keyboard interrupt received, shutting down...")
scheduler.stop()
return 0
except Exception as e:
logger.exception(f"Error in worker mode: {e}")
scheduler.stop()
return 1
else:
logger.error("No valid tasks registered")
return 1
def main() -> int:
"""
Main entry point.
Returns:
Exit code.
"""
args = parse_args(sys.argv[1:])
try:
# If worker mode is specified, run the scheduler
if args.worker:
return run_worker_mode()
# Otherwise, run the specified task once
if args.task == "tos":
return run_tos_task(args.service_id)
elif args.task == "sentiment":
return run_sentiment_task(args.service_id)
elif args.task == "moderation":
return run_moderation_task(args.service_id)
elif args.task == "force-triggers":
return run_force_triggers_task()
elif args.task == "service-score-recalc":
return run_service_score_recalc_task(args.service_id)
elif args.task:
logger.error(f"Unknown task: {args.task}")
return 1
else:
logger.error(
"No task specified. Use --worker for scheduled execution or specify a task to run once."
)
return 1
except Exception as e:
logger.exception(f"Error running task: {e}")
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,67 @@
"""
Configuration module for pyworker.
Handles loading environment variables and configuration settings.
"""
import os
import re
from typing import Dict
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class Config:
"""Configuration class for the worker application."""
# Database settings
DATABASE_URL: str = os.getenv(
"DATABASE_URL", "postgresql://kycnot:kycnot@localhost:3399/kycnot"
)
# Clean the URL by removing any query parameters
@property
def db_connection_string(self) -> str:
"""Get the clean database connection string without query parameters."""
if "?" in self.DATABASE_URL:
return self.DATABASE_URL.split("?")[0]
return self.DATABASE_URL
# API settings
TOS_API_BASE_URL: str = os.getenv("TOS_API_BASE_URL", "https://r.jina.ai")
# Logging settings
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")
LOG_FORMAT: str = os.getenv(
"LOG_FORMAT", "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
# Task scheduling
@property
def task_schedules(self) -> Dict[str, str]:
"""
Get cron schedules for tasks from environment variables.
Looks for environment variables with the pattern CRON_TASKNAME_TASK
and returns a dictionary mapping task names to cron schedules.
Returns:
Dictionary mapping task names to cron schedules.
"""
schedules: Dict[str, str] = {}
cron_pattern = re.compile(r"^CRON_(\w+)_TASK$")
for key, value in os.environ.items():
match = cron_pattern.match(key)
if match:
task_name = match.group(1).lower()
schedules[task_name] = value
return schedules
# Create a singleton instance
config = Config()

View File

@@ -0,0 +1,737 @@
"""
Database operations for the pyworker package.
"""
import json
from contextlib import contextmanager
from datetime import datetime
from typing import Any, Dict, Generator, List, Optional, TypedDict, Union
from typing import Literal as TypeLiteral
import psycopg
from psycopg.rows import dict_row
from psycopg.sql import SQL, Composed, Literal
from psycopg_pool import ConnectionPool # Proper import for the connection pool
from pyworker.config import config
from pyworker.utils.app_logging import setup_logging
logger = setup_logging(__name__)
# --- Type Definitions ---
# Moved from tasks/comment_moderation.py
class CommentType(TypedDict):
id: int
upvotes: int
status: str # Assuming CommentStatus Enum isn't used across modules yet
suspicious: bool
requiresAdminReview: bool
communityNote: Optional[str]
internalNote: Optional[str]
privateContext: Optional[str]
content: str
rating: Optional[float]
createdAt: datetime
updatedAt: datetime
authorId: int
serviceId: int
parentId: Optional[int]
# Add author/service/reply fields if needed by update_comment
# Moved from utils/ai.py
RatingType = TypeLiteral["info", "warning", "alert"]
class UserRightType(TypedDict):
text: str
rating: RatingType
class DataSharingType(TypedDict):
text: str
rating: RatingType
class DataCollectedType(TypedDict):
text: str
rating: RatingType
class KycOrSourceOfFundsType(TypedDict):
text: str
rating: RatingType
class TosReviewType(TypedDict, total=False):
contentHash: str
kycLevel: int
summary: str
complexity: TypeLiteral["low", "medium", "high"]
highlights: List[Dict[str, Any]]
class CommentSentimentSummaryType(TypedDict):
summary: str
sentiment: TypeLiteral["positive", "negative", "neutral"]
whatUsersLike: List[str]
whatUsersDislike: List[str]
class CommentModerationType(TypedDict):
isSpam: bool
requiresAdminReview: bool
contextNote: str
internalNote: str
commentQuality: int
QueryType = Union[str, bytes, SQL, Composed, Literal]
# --- Database Connection Pool ---
_db_pool: Optional[ConnectionPool] = None
def get_db_pool() -> ConnectionPool:
"""
Get or create the database connection pool.
Returns:
A connection pool object.
"""
global _db_pool
if _db_pool is None:
try:
# Create a new connection pool with min connections of 2 and max of 10
_db_pool = ConnectionPool(
conninfo=config.db_connection_string,
min_size=2,
max_size=10,
# Configure how connections are initialized
kwargs={
"autocommit": False,
},
)
logger.info("Database connection pool initialized")
except Exception as e:
logger.error(f"Error creating database connection pool: {e}")
raise
return _db_pool
def close_db_pool():
"""
Close the database connection pool.
This should be called when the application is shutting down.
"""
global _db_pool
if _db_pool is not None:
logger.info("Closing database connection pool")
_db_pool.close()
_db_pool = None
@contextmanager
def get_db_connection() -> Generator[psycopg.Connection, None, None]:
"""
Context manager for database connections.
Yields:
A database connection object from the pool.
"""
pool = get_db_pool()
try:
# Use the connection method which returns a connection as a context manager
with pool.connection() as conn:
# Set the schema explicitly after connection
with conn.cursor() as cursor:
cursor.execute("SET search_path TO public")
yield conn
# The connection will be automatically returned to the pool
# when the with block exits
except Exception as e:
logger.error(f"Error connecting to the database: {e}")
raise
# --- Database Functions ---
def fetch_all_services() -> List[Dict[str, Any]]:
"""
Fetch all public and verified services from the database.
Returns:
A list of service dictionaries.
"""
services = []
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute("""
SELECT id, name, slug, description, "kycLevel", "overallScore",
"privacyScore", "trustScore", "verificationStatus",
"serviceVisibility", "tosUrls", "serviceUrls", "onionUrls", "i2pUrls",
"tosReview", "tosReviewAt", "userSentiment", "userSentimentAt"
FROM "Service"
WHERE "serviceVisibility" = 'PUBLIC'
AND ("verificationStatus" = 'VERIFICATION_SUCCESS'
OR "verificationStatus" = 'COMMUNITY_CONTRIBUTED'
OR "verificationStatus" = 'APPROVED')
ORDER BY id
""")
services = cursor.fetchall()
logger.info(f"Fetched {len(services)} services from the database")
except Exception as e:
logger.error(f"Error fetching services: {e}")
return services
def fetch_services_with_pending_comments() -> List[Dict[str, Any]]:
"""
Fetch all public and verified services that have at least one pending comment.
Returns:
A list of service dictionaries.
"""
services = []
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute("""
SELECT DISTINCT s.id, s.name, s.slug, s.description, s."kycLevel", s."overallScore",
s."privacyScore", s."trustScore", s."verificationStatus",
s."serviceVisibility", s."tosUrls", s."serviceUrls", s."onionUrls", s."i2pUrls",
s."tosReview", s."tosReviewAt", s."userSentiment", s."userSentimentAt"
FROM "Service" s
JOIN "Comment" c ON s.id = c."serviceId"
WHERE c.status = 'PENDING'
AND s."serviceVisibility" = 'PUBLIC'
AND (s."verificationStatus" = 'VERIFICATION_SUCCESS'
OR s."verificationStatus" = 'COMMUNITY_CONTRIBUTED'
OR s."verificationStatus" = 'APPROVED')
ORDER BY s.id
""")
services = cursor.fetchall()
logger.info(
f"Fetched {len(services)} services with pending comments from the database"
)
except Exception as e:
logger.error(f"Error fetching services with pending comments: {e}")
return services
def fetch_service_attributes(service_id: int) -> List[Dict[str, Any]]:
"""
Fetch attributes for a specific service.
Args:
service_id: The ID of the service.
Returns:
A list of attribute dictionaries.
"""
attributes = []
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute(
"""
SELECT a.id, a.slug, a.title, a.description, a.category, a.type
FROM "Attribute" a
JOIN "ServiceAttribute" sa ON a.id = sa."attributeId"
WHERE sa."serviceId" = %s
""",
(service_id,),
)
attributes = cursor.fetchall()
except Exception as e:
logger.error(f"Error fetching attributes for service {service_id}: {e}")
return attributes
def get_attribute_id_by_slug(slug: str) -> Optional[int]:
attribute_id = None
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute('SELECT id FROM "Attribute" WHERE slug = %s', (slug,))
row = cursor.fetchone()
if row:
attribute_id = row["id"]
except Exception as e:
logger.error(f"Error fetching attribute id for slug '{slug}': {e}")
return attribute_id
def add_service_attribute(service_id: int, attribute_id: int) -> bool:
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute(
'SELECT 1 FROM "ServiceAttribute" WHERE "serviceId" = %s AND "attributeId" = %s',
(service_id, attribute_id),
)
if cursor.fetchone():
return True
cursor.execute(
'INSERT INTO "ServiceAttribute" ("serviceId", "attributeId", "createdAt") VALUES (%s, %s, NOW())',
(service_id, attribute_id),
)
conn.commit()
logger.info(
f"Added attribute id {attribute_id} to service {service_id}"
)
return True
except Exception as e:
logger.error(
f"Error adding attribute id {attribute_id} to service {service_id}: {e}"
)
return False
def remove_service_attribute(service_id: int, attribute_id: int) -> bool:
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute(
'DELETE FROM "ServiceAttribute" WHERE "serviceId" = %s AND "attributeId" = %s',
(service_id, attribute_id),
)
conn.commit()
logger.info(
f"Removed attribute id {attribute_id} from service {service_id}"
)
return True
except Exception as e:
logger.error(
f"Error removing attribute id {attribute_id} from service {service_id}: {e}"
)
return False
def add_service_attribute_by_slug(service_id: int, attribute_slug: str) -> bool:
attribute_id = get_attribute_id_by_slug(attribute_slug)
if attribute_id is None:
logger.error(f"Attribute with slug '{attribute_slug}' not found.")
return False
return add_service_attribute(service_id, attribute_id)
def remove_service_attribute_by_slug(service_id: int, attribute_slug: str) -> bool:
attribute_id = get_attribute_id_by_slug(attribute_slug)
if attribute_id is None:
logger.error(f"Attribute with slug '{attribute_slug}' not found.")
return False
return remove_service_attribute(service_id, attribute_id)
def save_tos_review(service_id: int, review: Optional[TosReviewType]):
"""Persist a TOS review and/or update the timestamp for a service.
If *review* is ``None`` the existing review (if any) is preserved while
only the ``tosReviewAt`` column is updated. This ensures we still track
when the review task last ran even if the review generation failed or
produced no changes.
"""
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
if review is None:
cursor.execute(
'UPDATE "Service" SET "tosReviewAt" = NOW() WHERE id = %s AND "tosReview" IS NULL',
(service_id,),
)
else:
review_json = json.dumps(review)
cursor.execute(
'UPDATE "Service" SET "tosReview" = %s, "tosReviewAt" = NOW() WHERE id = %s',
(review_json, service_id),
)
conn.commit()
logger.info(
f"Successfully saved TOS review (updated={review is not None}) for service {service_id}"
)
except Exception as e:
logger.error(f"Error saving TOS review for service {service_id}: {e}")
def update_kyc_level(service_id: int, kyc_level: int) -> bool:
"""
Update the KYC level for a specific service.
Args:
service_id: The ID of the service.
kyc_level: The new KYC level (0-4).
Returns:
bool: True if the update was successful, False otherwise.
"""
try:
# Ensure the kyc_level is within the valid range
if not 0 <= kyc_level <= 4:
logger.error(
f"Invalid KYC level ({kyc_level}) for service {service_id}. Must be between 0 and 4."
)
return False
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute(
"""
UPDATE "Service"
SET "kycLevel" = %s, "updatedAt" = NOW()
WHERE id = %s
""",
(kyc_level, service_id),
)
conn.commit()
logger.info(
f"Successfully updated KYC level to {kyc_level} for service {service_id}"
)
return True
except Exception as e:
logger.error(f"Error updating KYC level for service {service_id}: {e}")
return False
def get_comments(service_id: int, status: str = "APPROVED") -> List[Dict[str, Any]]:
"""
Get all comments for a specific service with the specified status.
Args:
service_id: The ID of the service.
status: The status of comments to fetch (e.g. 'APPROVED', 'PENDING'). Defaults to 'APPROVED'.
Returns:
A list of comment dictionaries.
NOTE: The structure returned by the SQL query might be different from CommentType.
Adjust CommentType or parsing if needed elsewhere.
"""
comments = []
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute(
"""
WITH RECURSIVE comment_tree AS (
-- Base case: get all root comments (no parent)
SELECT
c.id,
c.content,
c.rating,
c.upvotes,
c."createdAt",
c."updatedAt",
c."parentId",
c.status,
u.id as "authorId",
u.name as "authorName",
u."displayName" as "authorDisplayName",
u.picture as "authorPicture",
u.verified as "authorVerified",
0 as depth
FROM "Comment" c
JOIN "User" u ON c."authorId" = u.id
WHERE c."serviceId" = %s
AND c.status = %s
AND c."parentId" IS NULL
UNION ALL
-- Recursive case: get all replies
SELECT
c.id,
c.content,
c.rating,
c.upvotes,
c."createdAt",
c."updatedAt",
c."parentId",
c.status,
u.id as "authorId",
u.name as "authorName",
u."displayName" as "authorDisplayName",
u.picture as "authorPicture",
u.verified as "authorVerified",
ct.depth + 1
FROM "Comment" c
JOIN "User" u ON c."authorId" = u.id
JOIN comment_tree ct ON c."parentId" = ct.id
WHERE c.status = %s
)
SELECT * FROM comment_tree
ORDER BY "createdAt" DESC, depth ASC
""",
(service_id, status, status),
)
comments = cursor.fetchall()
except Exception as e:
logger.error(
f"Error fetching comments for service {service_id} with status {status}: {e}"
)
return comments
def get_max_comment_updated_at(
service_id: int, status: str = "APPROVED"
) -> Optional[datetime]:
"""
Get the maximum 'updatedAt' timestamp for comments of a specific service and status.
Args:
service_id: The ID of the service.
status: The status of comments to consider.
Returns:
The maximum 'updatedAt' timestamp as a datetime object, or None if no matching comments.
"""
max_updated_at = None
try:
with get_db_connection() as conn:
with (
conn.cursor() as cursor
): # dict_row not strictly needed for single value
cursor.execute(
"""
SELECT MAX("updatedAt")
FROM "Comment"
WHERE "serviceId" = %s AND status = %s
""",
(service_id, status),
)
result = cursor.fetchone()
if result and result[0] is not None:
max_updated_at = result[0]
except Exception as e:
logger.error(
f"Error fetching max comment updatedAt for service {service_id} with status {status}: {e}"
)
return max_updated_at
def save_user_sentiment(
service_id: int,
sentiment: Optional[CommentSentimentSummaryType],
last_processed_comment_timestamp: Optional[datetime],
):
"""
Save user sentiment for a specific service and the timestamp of the last comment processed.
Args:
service_id: The ID of the service.
sentiment: A dictionary containing the sentiment data, or None to clear it.
last_processed_comment_timestamp: The 'updatedAt' timestamp of the most recent comment
considered in this sentiment analysis. Can be None.
"""
try:
sentiment_json = json.dumps(sentiment) if sentiment is not None else None
with get_db_connection() as conn:
with conn.cursor() as cursor: # row_factory not needed for UPDATE
cursor.execute(
"""
UPDATE "Service"
SET "userSentiment" = %s, "userSentimentAt" = %s
WHERE id = %s
""",
(sentiment_json, last_processed_comment_timestamp, service_id),
)
conn.commit()
if sentiment:
logger.info(
f"Successfully saved user sentiment for service {service_id} with last comment processed at {last_processed_comment_timestamp}"
)
else:
logger.info(
f"Successfully cleared user sentiment for service {service_id}, last comment processed at set to {last_processed_comment_timestamp}"
)
except Exception as e:
logger.error(f"Error saving user sentiment for service {service_id}: {e}")
def update_comment_moderation(comment_data: CommentType):
"""
Update an existing comment in the database based on moderation results.
Args:
comment_data: A TypedDict representing the comment data to update.
Expected keys are defined in CommentType.
"""
comment_id = comment_data.get("id")
if not comment_id:
logger.error("Cannot update comment: 'id' is missing from comment_data.")
return
try:
with get_db_connection() as conn:
with conn.cursor() as cursor:
cursor.execute(
"""
UPDATE "Comment"
SET
status = %(status)s,
"requiresAdminReview" = %(requiresAdminReview)s,
"communityNote" = %(communityNote)s,
"internalNote" = %(internalNote)s,
"updatedAt" = NOW()
WHERE id = %(id)s
""",
comment_data,
)
conn.commit()
logger.info(f"Successfully updated comment {comment_id}")
except Exception as e:
logger.error(f"Error updating comment {comment_id}: {e}")
def touch_service_updated_at(service_id: int) -> bool:
"""
Update the updatedAt field for a specific service to now.
Args:
service_id: The ID of the service.
Returns:
bool: True if the update was successful, False otherwise.
"""
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
cursor.execute(
"""
UPDATE "Service"
SET "updatedAt" = NOW()
WHERE id = %s
""",
(service_id,),
)
conn.commit()
logger.info(f"Successfully touched updatedAt for service {service_id}")
return True
except Exception as e:
logger.error(f"Error touching updatedAt for service {service_id}: {e}")
return False
def run_db_query(query: Any, params: Optional[Any] = None) -> List[Dict[str, Any]]:
results = []
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
if params is None:
cursor.execute(query)
else:
cursor.execute(query, params)
results = cursor.fetchall()
except Exception as e:
logger.error(f"Error running query: {e}")
return results
def execute_db_command(command: str, params: Optional[Any] = None) -> int:
"""
Execute a database command (INSERT, UPDATE, DELETE) and return affected rows.
Args:
command: The SQL command string.
params: Optional parameters for the command.
Returns:
The number of rows affected by the command.
"""
affected_rows = 0
try:
with get_db_connection() as conn:
with conn.cursor() as cursor:
# Cast the string to the expected type to satisfy the type checker
# In runtime, this is equivalent to passing the command directly
cursor.execute(command, params) # type: ignore
affected_rows = cursor.rowcount
conn.commit()
logger.info(f"Executed command, {affected_rows} rows affected.")
except Exception as e:
logger.error(f"Error executing command: {e}")
return affected_rows
def create_attribute(
slug: str,
title: str,
description: str,
category: str,
type: str,
privacy_points: float = 0,
trust_points: float = 0,
overall_points: float = 0,
) -> Optional[int]:
"""
Create a new attribute in the database if it doesn't already exist.
Args:
slug: The unique slug for the attribute.
title: The display title of the attribute.
description: The description of the attribute.
category: The category of the attribute (e.g., 'TRUST', 'PRIVACY').
type: The type of the attribute (e.g., 'WARNING', 'FEATURE').
privacy_points: Points affecting privacy score (default: 0).
trust_points: Points affecting trust score (default: 0).
overall_points: Points affecting overall score (default: 0).
Returns:
The ID of the created (or existing) attribute, or None if creation failed.
"""
try:
with get_db_connection() as conn:
with conn.cursor(row_factory=dict_row) as cursor:
# First check if the attribute already exists
cursor.execute('SELECT id FROM "Attribute" WHERE slug = %s', (slug,))
row = cursor.fetchone()
if row:
logger.info(
f"Attribute with slug '{slug}' already exists, id: {row['id']}"
)
return row["id"]
# Create the attribute if it doesn't exist
cursor.execute(
"""
INSERT INTO "Attribute" (
slug, title, description, "privacyPoints", "trustPoints",
category, type, "createdAt", "updatedAt"
) VALUES (
%s, %s, %s, %s, %s, %s, %s, NOW(), NOW()
) RETURNING id
""",
(
slug,
title,
description,
privacy_points,
trust_points,
category,
type,
),
)
conn.commit()
result = cursor.fetchone()
if result is None:
logger.error(
f"Failed to retrieve ID for newly created attribute with slug '{slug}'"
)
return None
attribute_id = result["id"]
logger.info(
f"Created new attribute with slug '{slug}', id: {attribute_id}"
)
return attribute_id
except Exception as e:
logger.error(f"Error creating attribute with slug '{slug}': {e}")
return None

View File

@@ -0,0 +1,184 @@
"""
Scheduler module for managing task execution with cron.
"""
import signal
import threading
from datetime import datetime
from types import FrameType
from typing import Any, Callable, Dict, List, ParamSpec, TypeVar
from croniter import croniter
from pyworker.database import close_db_pool
from .tasks import (
CommentModerationTask,
ForceTriggersTask,
ServiceScoreRecalculationTask,
TosReviewTask,
UserSentimentTask,
)
from pyworker.utils.app_logging import setup_logging
logger = setup_logging(__name__)
P = ParamSpec("P")
R = TypeVar("R")
class TaskScheduler:
"""Task scheduler for running tasks on a cron schedule."""
def __init__(self):
"""Initialize the task scheduler."""
self.tasks: Dict[str, Dict[str, Any]] = {}
self.running = False
self.threads: List[threading.Thread] = []
self.stop_event = threading.Event()
self.logger = logger
# Set up signal handlers
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
def _handle_signal(self, signum: int, frame: FrameType | None) -> None:
"""Handle termination signals."""
self.logger.info(f"Received signal {signum}, shutting down...")
self.stop()
def register_task(
self,
task_name: str,
cron_expression: str,
task_func: Callable[P, R],
*args: P.args,
**kwargs: P.kwargs,
) -> None:
"""
Register a task to be scheduled.
Args:
task_name: Name of the task.
cron_expression: Cron expression defining the schedule.
task_func: Function to execute.
*args: Arguments to pass to the task function.
**kwargs: Keyword arguments to pass to the task function.
"""
# Declare task_instance variable with type annotation upfront
task_instance: Any = None
# Initialize the appropriate task class based on the task name
if task_name.lower() == "tosreview":
task_instance = TosReviewTask()
elif task_name.lower() == "user_sentiment":
task_instance = UserSentimentTask()
elif task_name.lower() == "comment_moderation":
task_instance = CommentModerationTask()
elif task_name.lower() == "force_triggers":
task_instance = ForceTriggersTask()
elif task_name.lower() == "service_score_recalc":
task_instance = ServiceScoreRecalculationTask()
else:
self.logger.warning(f"Unknown task '{task_name}', skipping")
return
self.tasks[task_name] = {
"cron": cron_expression,
"func": task_func,
"instance": task_instance,
"args": args,
"kwargs": kwargs,
}
self.logger.info(
f"Registered task '{task_name}' with schedule: {cron_expression}"
)
def _run_task(self, task_name: str, task_info: Dict[str, Any]):
"""
Run a task on its schedule.
Args:
task_name: Name of the task.
task_info: Task information including function and schedule.
"""
self.logger.info(f"Starting scheduler for task '{task_name}'")
# Parse the cron expression
cron = croniter(task_info["cron"], datetime.now())
while not self.stop_event.is_set():
# Get the next run time
next_run = cron.get_next(datetime)
self.logger.info(f"Next run for task '{task_name}': {next_run}")
# Sleep until the next run time
now = datetime.now()
sleep_seconds = (next_run - now).total_seconds()
if sleep_seconds > 0:
# Wait until next run time or until stop event is set
if self.stop_event.wait(sleep_seconds):
break
# Run the task if we haven't been stopped
if not self.stop_event.is_set():
try:
self.logger.info(f"Running task '{task_name}'")
# Use task instance as a context manager to ensure
# a single database connection is used for the entire task
with task_info["instance"]:
# Execute the registered task function with its arguments
task_info["func"](*task_info["args"], **task_info["kwargs"])
self.logger.info(f"Task '{task_name}' completed")
except Exception as e:
self.logger.exception(f"Error running task '{task_name}': {e}")
finally:
# Close the database pool after task execution
close_db_pool()
def start(self):
"""Start the scheduler."""
if self.running:
self.logger.warning("Scheduler is already running")
return
self.logger.info("Starting scheduler")
self.running = True
self.stop_event.clear()
# Start a thread for each task
for task_name, task_info in self.tasks.items():
thread = threading.Thread(
target=self._run_task,
args=(task_name, task_info),
name=f"scheduler-{task_name}",
)
thread.daemon = True
thread.start()
self.threads.append(thread)
self.logger.info(f"Started {len(self.threads)} scheduler threads")
def stop(self):
"""Stop the scheduler."""
if not self.running:
return
self.logger.info("Stopping scheduler")
self.running = False
self.stop_event.set()
# Wait for all threads to terminate
for thread in self.threads:
thread.join(timeout=5.0)
self.threads = []
# Close database pool when the scheduler stops
close_db_pool()
self.logger.info("Scheduler stopped")
def is_running(self) -> bool:
"""Check if the scheduler is running."""
return self.running

View File

@@ -0,0 +1,17 @@
"""Task modules for the pyworker package."""
from .base import Task
from .comment_moderation import CommentModerationTask
from .force_triggers import ForceTriggersTask
from .service_score_recalc import ServiceScoreRecalculationTask
from .tos_review import TosReviewTask
from .user_sentiment import UserSentimentTask
__all__ = [
"Task",
"CommentModerationTask",
"ForceTriggersTask",
"ServiceScoreRecalculationTask",
"TosReviewTask",
"UserSentimentTask",
]

View File

@@ -0,0 +1,64 @@
"""
Base task module for the pyworker package.
"""
from abc import ABC, abstractmethod
from contextlib import AbstractContextManager
from typing import Any, Optional, Type
from pyworker.database import get_db_connection
from pyworker.utils.app_logging import setup_logging
logger = setup_logging(__name__)
class Task(ABC):
"""Base class for all worker tasks."""
def __init__(self, name: str):
"""
Initialize a task.
Args:
name: The name of the task.
"""
self.name = name
self.logger = setup_logging(f"pyworker.task.{name}")
self.conn: Optional[Any] = None
self._context: Optional[AbstractContextManager[Any]] = None
def __enter__(self):
"""Enter context manager, acquiring a database connection."""
self._context = get_db_connection()
self.conn = self._context.__enter__()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[Any],
) -> Optional[bool]:
"""Exit context manager, releasing the database connection."""
if self._context:
return self._context.__exit__(exc_type, exc_val, exc_tb)
return None
@abstractmethod
def run(self, *args: Any, **kwargs: Any) -> Any:
"""
Run the task.
This method must be implemented by subclasses.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
The result of the task.
"""
pass
def __str__(self) -> str:
return f"{self.__class__.__name__}(name={self.name})"

View File

@@ -0,0 +1,112 @@
"""
Task for summarizing comments and getting overal sentiment
"""
import json
from datetime import datetime
from typing import Any, Dict, List
# Import types from database.py
from pyworker.database import ( # type: ignore
CommentType,
get_comments,
update_comment_moderation,
)
from pyworker.tasks.base import Task # type: ignore
from pyworker.utils.ai import prompt_comment_moderation
class DateTimeEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
class CommentModerationTask(Task):
"""Task for summarizing comments and getting overal sentiment"""
def __init__(self):
"""Initialize the comment moderation task."""
super().__init__("comment_moderation")
def run(self, service: Dict[str, Any]) -> bool:
"""
Run the comment moderation task.
Returns True if comments were processed, False otherwise.
"""
service_id = service["id"]
service_name = service["name"]
# Query the approved comments for the service
# get_comments is type ignored, so we assume it returns List[Dict[str, Any]]
comments: List[Dict[str, Any]] = get_comments(service_id, status="PENDING")
if not comments:
self.logger.info(
f"No pending comments found for service {service_name} (ID: {service_id}) during task run."
)
return False
self.logger.info(
f"Found {len(comments)} pending comments for service {service_name} (ID: {service_id}). Starting processing."
)
processed_at_least_one = False
for comment_data in comments:
# Assert the type for the individual dictionary for type checking within the loop
comment: CommentType = comment_data # type: ignore
# Query OpenAI to get the sentiment summary
moderation = prompt_comment_moderation(
f"Information about the service: {service}\\nCurrent time: {datetime.now()}\\n\\nComment to moderate: {json.dumps(comment, cls=DateTimeEncoder)}"
)
modstring = f"Comment {comment['id']} "
if moderation["isSpam"] and moderation["commentQuality"] > 5:
comment["status"] = "HUMAN_PENDING"
modstring += " marked as HUMAN_PENDING"
elif moderation["isSpam"] and moderation["commentQuality"] <= 5:
comment["status"] = "REJECTED"
modstring += " marked as REJECTED"
if moderation["requiresAdminReview"]:
comment["requiresAdminReview"] = True
modstring += " requires admin review"
# Ensure status is HUMAN_PENDING if admin review is required, unless already REJECTED
if comment.get("status") != "REJECTED":
comment["status"] = "HUMAN_PENDING"
if (
"marked as HUMAN_PENDING" not in modstring
): # Avoid duplicate message
modstring += " marked as HUMAN_PENDING"
else:
comment["requiresAdminReview"] = False
if (
comment.get("status") != "HUMAN_PENDING"
and comment.get("status") != "REJECTED"
):
comment["status"] = "APPROVED"
modstring += " marked as APPROVED"
if moderation.get("moderationNote"): # Check if key exists
comment["communityNote"] = moderation["contextNote"]
modstring += " with moderation note: " + moderation["contextNote"]
else:
comment["communityNote"] = None
if moderation.get("internalNote"): # Check if key exists
comment["internalNote"] = moderation["internalNote"]
modstring += (
" with internal note: " + moderation["internalNote"]
) # Changed from spam reason for clarity
else:
comment["internalNote"] = None
# Save the sentiment summary to the database
self.logger.info(f"{modstring}")
update_comment_moderation(comment)
processed_at_least_one = True
return processed_at_least_one

View File

@@ -0,0 +1,43 @@
from pyworker.tasks.base import Task
from pyworker.utils.app_logging import setup_logging
logger = setup_logging(__name__)
class ForceTriggersTask(Task):
"""
Force triggers to run under certain conditions.
"""
RECENT_LISTED_INTERVAL_DAYS = 15
def __init__(self):
super().__init__("force_triggers")
def run(self) -> bool:
logger.info(f"Starting {self.name} task.")
# Use the connection provided by the base Task class
if not self.conn:
logger.error("No database connection available")
return False
update_query = f"""
UPDATE "Service"
SET "isRecentlyListed" = FALSE, "updatedAt" = NOW()
WHERE "isRecentlyListed" = TRUE
AND "listedAt" IS NOT NULL
AND "listedAt" < NOW() - INTERVAL '{self.RECENT_LISTED_INTERVAL_DAYS} days'
"""
try:
with self.conn.cursor() as cursor:
cursor.execute(update_query)
self.conn.commit()
added_count = cursor.rowcount
logger.info(f"Updated {added_count} services.")
except Exception as e:
logger.error(f"Error updating services: {e}")
return False
logger.info(f"{self.name} task completed successfully.")
return True

View File

@@ -0,0 +1,325 @@
"""
Task to recalculate service scores based on attribute changes.
"""
from typing import Optional
from pyworker.tasks.base import Task
from pyworker.utils.app_logging import setup_logging
logger = setup_logging(__name__)
class ServiceScoreRecalculationTask(Task):
"""
Process pending service score recalculation jobs.
This task fetches jobs from the ServiceScoreRecalculationJob table
and recalculates service scores using the PostgreSQL functions.
"""
def __init__(self):
super().__init__("service_score_recalc")
def run(self, service_id: Optional[int] = None) -> bool:
"""
Process score recalculation jobs from the ServiceScoreRecalculationJob table.
Args:
service_id: Optional service ID to process only that specific service
Returns:
bool: True if successful, False otherwise
"""
logger.info(f"Starting {self.name} task.")
processed_count = 0
error_count = 0
batch_size = 50
# Use the connection provided by the base Task class
if not self.conn:
logger.error("No database connection available")
return False
try:
# Build query - either for a specific service or all pending jobs
if service_id:
select_query = """
SELECT id, "serviceId"
FROM "ServiceScoreRecalculationJob"
WHERE "serviceId" = %s AND "processedAt" IS NULL
ORDER BY "createdAt" ASC
"""
params = [service_id]
else:
select_query = """
SELECT id, "serviceId"
FROM "ServiceScoreRecalculationJob"
WHERE "processedAt" IS NULL
ORDER BY "createdAt" ASC
LIMIT %s
"""
params = [batch_size]
# Fetch jobs
with self.conn.cursor() as cursor:
cursor.execute(select_query, params)
unprocessed_jobs = cursor.fetchall()
if not unprocessed_jobs:
logger.info("No pending service score recalculation jobs found.")
return True
logger.info(
f"Processing {len(unprocessed_jobs)} service score recalculation jobs."
)
# Process each job
for job in unprocessed_jobs:
job_id = job[0] # First column is id
svc_id = job[1] # Second column is serviceId
try:
self._process_service_score(svc_id, job_id)
processed_count += 1
logger.debug(
f"Successfully processed job {job_id} for service {svc_id}"
)
except Exception as e:
if self.conn:
self.conn.rollback()
error_count += 1
logger.error(
f"Error processing job {job_id} for service {svc_id}: {str(e)}",
exc_info=True,
)
logger.info(
f"{self.name} task completed. Processed: {processed_count}, Errors: {error_count}"
)
return processed_count > 0 or error_count == 0
except Exception as e:
if self.conn:
self.conn.rollback()
logger.error(f"Failed to run {self.name} task: {str(e)}", exc_info=True)
return False
def _process_service_score(self, service_id: int, job_id: int) -> None:
"""
Process a single service score recalculation job.
Args:
service_id: The service ID to recalculate scores for
job_id: The job ID to mark as processed
"""
if not self.conn:
raise ValueError("No database connection available")
with self.conn.cursor() as cursor:
# 1. Calculate privacy score
cursor.execute("SELECT calculate_privacy_score(%s)", [service_id])
privacy_score = cursor.fetchone()[0]
# 2. Calculate trust score
cursor.execute("SELECT calculate_trust_score(%s)", [service_id])
trust_score = cursor.fetchone()[0]
# 3. Calculate overall score
cursor.execute(
"SELECT calculate_overall_score(%s, %s, %s)",
[service_id, privacy_score, trust_score],
)
overall_score = cursor.fetchone()[0]
# 4. Check for verification status and cap score if needed
cursor.execute(
'SELECT "verificationStatus" FROM "Service" WHERE id = %s',
[service_id],
)
result = cursor.fetchone()
if result is None:
logger.warning(
f"Service with ID {service_id} not found. Deleting job {job_id}."
)
# Delete the job if the service is gone
cursor.execute(
"""
DELETE FROM "ServiceScoreRecalculationJob"
WHERE id = %s
""",
[job_id],
)
self.conn.commit()
return # Skip the rest of the processing for this job
status = result[0]
if status == "VERIFICATION_FAILED":
if overall_score > 3:
overall_score = 3
elif overall_score < 0:
overall_score = 0
# 5. Update the service with recalculated scores
cursor.execute(
"""
UPDATE "Service"
SET "privacyScore" = %s, "trustScore" = %s, "overallScore" = %s
WHERE id = %s
""",
[privacy_score, trust_score, overall_score, service_id],
)
# 6. Mark the job as processed
cursor.execute(
"""
UPDATE "ServiceScoreRecalculationJob"
SET "processedAt" = NOW()
WHERE id = %s
""",
[job_id],
)
# Commit the transaction
if self.conn:
self.conn.commit()
def recalculate_all_services(self) -> bool:
"""
Recalculate scores for all active services.
Useful for batch updates after attribute changes.
Returns:
bool: True if successful, False otherwise
"""
logger.info("Starting recalculation for all active services.")
if not self.conn:
logger.error("No database connection available")
return False
try:
# Get all active service IDs
with self.conn.cursor() as cursor:
cursor.execute(
"""
SELECT id
FROM "Service"
WHERE "isActive" = TRUE
"""
)
services = cursor.fetchall()
if not services:
logger.info("No active services found.")
return True
logger.info(f"Found {len(services)} active services to recalculate.")
# Queue recalculation jobs for all services
inserted_count = 0
for service in services:
service_id = service[0]
try:
if self.conn:
with self.conn.cursor() as cursor:
cursor.execute(
"""
INSERT INTO "ServiceScoreRecalculationJob" ("serviceId", "createdAt", "processedAt")
VALUES (%s, NOW(), NULL)
ON CONFLICT ("serviceId") DO UPDATE
SET "processedAt" = NULL, "createdAt" = NOW()
""",
[service_id],
)
self.conn.commit()
inserted_count += 1
except Exception as e:
if self.conn:
self.conn.rollback()
logger.error(
f"Error queueing job for service {service_id}: {str(e)}"
)
logger.info(f"Successfully queued {inserted_count} recalculation jobs.")
return True
except Exception as e:
if self.conn:
self.conn.rollback()
logger.error(f"Failed to queue recalculation jobs: {str(e)}", exc_info=True)
return False
def recalculate_for_attribute(self, attribute_id: int) -> bool:
"""
Recalculate scores for all services associated with a specific attribute.
Args:
attribute_id: The attribute ID to recalculate scores for
Returns:
bool: True if successful, False otherwise
"""
logger.info(
f"Starting recalculation for services with attribute ID {attribute_id}."
)
if not self.conn:
logger.error("No database connection available")
return False
try:
# Get all services associated with this attribute
with self.conn.cursor() as cursor:
cursor.execute(
"""
SELECT DISTINCT sa."serviceId"
FROM "ServiceAttribute" sa
WHERE sa."attributeId" = %s
""",
[attribute_id],
)
services = cursor.fetchall()
if not services:
logger.info(f"No services found with attribute ID {attribute_id}.")
return True
logger.info(
f"Found {len(services)} services with attribute ID {attribute_id}."
)
# Queue recalculation jobs for all services with this attribute
inserted_count = 0
for service in services:
service_id = service[0]
try:
if self.conn:
with self.conn.cursor() as cursor:
cursor.execute(
"""
INSERT INTO "ServiceScoreRecalculationJob" ("serviceId", "createdAt", "processedAt")
VALUES (%s, NOW(), NULL)
ON CONFLICT ("serviceId") DO UPDATE
SET "processedAt" = NULL, "createdAt" = NOW()
""",
[service_id],
)
self.conn.commit()
inserted_count += 1
except Exception as e:
if self.conn:
self.conn.rollback()
logger.error(
f"Error queueing job for service {service_id}: {str(e)}"
)
logger.info(f"Successfully queued {inserted_count} recalculation jobs.")
return True
except Exception as e:
if self.conn:
self.conn.rollback()
logger.error(f"Failed to queue recalculation jobs: {str(e)}", exc_info=True)
return False

View File

@@ -0,0 +1,150 @@
"""
Task for retrieving Terms of Service (TOS) text.
"""
import hashlib
from typing import Any, Dict, Optional
import requests
from pyworker.database import TosReviewType, save_tos_review, update_kyc_level
from pyworker.tasks.base import Task
from pyworker.utils.ai import prompt_check_tos_review, prompt_tos_review
from pyworker.utils.crawl import fetch_markdown
class TosReviewTask(Task):
"""Task for retrieving Terms of Service (TOS) text."""
def __init__(self):
"""Initialize the TOS review task."""
super().__init__("tos_review")
def run(self, service: Dict[str, Any]) -> Optional[TosReviewType]:
"""
Review TOS text for a service.
Args:
service: A dictionary containing service information.
Returns:
A dictionary mapping TOS URLs to their retrieved text, or None if no TOS URLs.
"""
service_id = service["id"]
service_name = service["name"]
verification_status = service.get("verificationStatus")
# Only process verified or approved services
if verification_status not in ["VERIFICATION_SUCCESS", "APPROVED"]:
self.logger.info(
f"Skipping TOS review for service: {service_name} (ID: {service_id}) - Status: {verification_status}"
)
return None
tos_urls = service.get("tosUrls", [])
if not tos_urls:
self.logger.info(
f"No TOS URLs found for service: {service_name} (ID: {service_id})"
)
return None
self.logger.info(
f"Reviewing TOS for service: {service_name} (ID: {service_id})"
)
self.logger.info(f"TOS URLs: {tos_urls}")
review = self.get_tos_review(tos_urls, service.get("tosReview"))
# Always update the processed timestamp, even if review is None
save_tos_review(service_id, review)
if review is None:
self.logger.warning(
f"TOS review could not be generated for service {service_name} (ID: {service_id})"
)
return None
# Update the KYC level based on the review, when present
if "kycLevel" in review:
new_level = review["kycLevel"]
old_level = service.get("kycLevel")
# Update DB
if update_kyc_level(service_id, new_level):
msg = f"{service.get('slug', service_name)}: kycLevel {old_level} -> {new_level}"
# Log to console
self.logger.info(msg)
# Send notification via ntfy
try:
requests.post(
"https://ntfy.sh/knm-kyc-lvl-changes-knm", data=msg.encode()
)
except requests.RequestException as e:
self.logger.error(
f"Failed to send ntfy notification for KYC level change: {e}"
)
return review
def get_tos_review(
self, tos_urls: list[str], current_review: Optional[TosReviewType]
) -> Optional[TosReviewType]:
"""
Get TOS review from a list of URLs.
Args:
tos_urls: List of TOS URLs to check
current_review: Current review data from the database
Returns:
Dict containing:
- status: Literal["skipped", "failed", "success"]
- review: Optional[TosReviewType] - The review data if successful
"""
all_skipped = True
for tos_url in tos_urls:
api_url = f"{tos_url}"
self.logger.info(f"Fetching TOS from URL: {api_url}")
content = fetch_markdown(api_url)
if not content:
self.logger.warning(
f"Failed to retrieve TOS content for URL: {tos_url}"
)
all_skipped = False
continue
# Hash the content to avoid repeating the same content
content_hash = hashlib.sha256(content.encode()).hexdigest()
self.logger.info(f"Content hash: {content_hash}")
# Skip processing if we've seen this content before
if current_review and current_review.get("contentHash") == content_hash:
self.logger.info(
f"Skipping already processed TOS content with hash: {content_hash}"
)
continue
all_skipped = False
# Skip incomplete TOS content
check = prompt_check_tos_review(content)
if not check or not check["isComplete"]:
continue
# Query OpenAI to summarize the content
review = prompt_tos_review(content)
if review:
review["contentHash"] = content_hash
return review
if all_skipped:
return current_review
return None

View File

@@ -0,0 +1,134 @@
"""
Task for summarizing comments and getting overal sentiment
"""
import json
from datetime import datetime
from typing import Any, Dict, Optional
from pyworker.database import (
CommentSentimentSummaryType,
get_comments,
get_max_comment_updated_at,
save_user_sentiment,
)
from pyworker.tasks.base import Task
from pyworker.utils.ai import (
prompt_comment_sentiment_summary,
)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
class UserSentimentTask(Task):
"""Task for summarizing comments and getting overal sentiment"""
def __init__(self):
"""Initialize the comment sentiment summary task."""
super().__init__("comment_sentiment_summary")
def run(self, service: Dict[str, Any]) -> Optional[CommentSentimentSummaryType]:
"""
Run the comment sentiment summary task.
Skips execution if no new comments are found since the last run.
Clears sentiment if all comments are removed.
"""
service_id = service["id"]
service_name = service["name"]
current_user_sentiment_at: Optional[datetime] = service.get("userSentimentAt")
if isinstance(current_user_sentiment_at, str):
try:
current_user_sentiment_at = datetime.fromisoformat(
str(current_user_sentiment_at).replace("Z", "+00:00")
)
except ValueError:
self.logger.warning(
f"Could not parse userSentimentAt string '{current_user_sentiment_at}' for service {service_id}. Treating as None."
)
current_user_sentiment_at = None
# Get the timestamp of the most recent approved comment
max_comment_updated_at = get_max_comment_updated_at(
service_id, status="APPROVED"
)
self.logger.info(
f"Service {service_name} (ID: {service_id}): Current userSentimentAt: {current_user_sentiment_at}, Max approved comment updatedAt: {max_comment_updated_at}"
)
if max_comment_updated_at is None:
self.logger.info(
f"No approved comments found for service {service_name} (ID: {service_id})."
)
# If there was a sentiment before and now no comments, clear it.
if service.get("userSentiment") is not None:
self.logger.info(
f"Clearing existing sentiment for service {service_name} (ID: {service_id}) as no approved comments are present."
)
save_user_sentiment(service_id, None, None)
return None
if (
current_user_sentiment_at is not None
and max_comment_updated_at <= current_user_sentiment_at
):
self.logger.info(
f"No new approved comments for service {service_name} (ID: {service_id}) since last sentiment analysis ({current_user_sentiment_at}). Skipping."
)
# Optionally, return the existing sentiment if needed:
# existing_sentiment = service.get("userSentiment")
# return existing_sentiment if isinstance(existing_sentiment, dict) else None
return None
# Query the approved comments for the service
# get_comments defaults to status="APPROVED"
comments = get_comments(service_id)
self.logger.info(
f"Found {len(comments)} comments for service {service_name} (ID: {service_id}) to process."
)
if not comments:
# This case could occur if max_comment_updated_at found a comment,
# but get_comments filters it out or it was deleted just before get_comments ran.
self.logger.info(
f"No comments to process for service {service_name} (ID: {service_id}) after fetching (e.g. due to filtering or deletion)."
)
if service.get("userSentiment") is not None:
self.logger.info(
f"Clearing existing sentiment for service {service_name} (ID: {service_id}) as no processable comments found."
)
# Use max_comment_updated_at as the reference point for when this check was made.
save_user_sentiment(service_id, None, max_comment_updated_at)
return None
# Query OpenAI to get the sentiment summary
try:
sentiment_summary = prompt_comment_sentiment_summary(
json.dumps(comments, cls=DateTimeEncoder)
)
except Exception as e:
self.logger.error(
f"Failed to generate sentiment summary for service {service_name} (ID: {service_id}): {e}"
)
return None
if not sentiment_summary: # Defensive check if prompt could return None/empty
self.logger.warning(
f"Sentiment summary generation returned empty for service {service_name} (ID: {service_id})."
)
return None
# Save the sentiment summary to the database, using max_comment_updated_at
save_user_sentiment(service_id, sentiment_summary, max_comment_updated_at)
self.logger.info(
f"Successfully processed and saved user sentiment for service {service_name} (ID: {service_id})."
)
return sentiment_summary

View File

@@ -0,0 +1 @@
"""Utility modules for the pyworker package."""

View File

@@ -0,0 +1,261 @@
import os
import time
from typing import Any, Dict, List, Literal, TypedDict, cast
from json_repair import repair_json
from openai import OpenAI, OpenAIError
from openai.types.chat import ChatCompletionMessageParam
from pyworker.database import (
CommentModerationType,
CommentSentimentSummaryType,
TosReviewType,
)
from pyworker.utils.app_logging import setup_logging
logger = setup_logging(__name__)
client = OpenAI(
base_url=os.environ.get("OPENAI_BASE_URL"),
api_key=os.environ.get("OPENAI_API_KEY"),
)
def query_openai_json(
messages: List[ChatCompletionMessageParam],
model: str = os.environ.get("OPENAI_MODEL", "deepseek-chat-cheaper"),
) -> Dict[str, Any]:
max_retries = int(os.environ.get("OPENAI_RETRY", 3))
retry_delay = 30
last_error = None
for attempt in range(max_retries):
try:
completion = client.chat.completions.create(
model=model,
messages=messages,
)
content = completion.choices[0].message.content
if content is None:
raise ValueError("OpenAI response content is None")
logger.debug(f"Raw AI response content: {content}")
try:
result = repair_json(content)
if isinstance(result, str):
import json
result = json.loads(result)
if not isinstance(result, dict):
logger.error(
f"Repaired JSON is not a dictionary. Type: {type(result)}, Value: {result}"
)
raise TypeError(
f"Expected a dictionary from AI response, but got {type(result)}"
)
return result
except Exception as e:
logger.error(f"Failed to process JSON response: {e}")
logger.error(f"Raw content was: {content}")
raise
except (OpenAIError, ValueError, TypeError) as e:
last_error = e
if attempt == max_retries - 1: # Last attempt
logger.error(f"Failed after {max_retries} attempts. Last error: {e}")
raise last_error
logger.warning(
f"Attempt {attempt + 1} failed: {e}. Retrying in {retry_delay} seconds..."
)
time.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff
# This line should never be reached due to the raise in the last attempt
raise last_error # type: ignore
ReasonType = Literal["js_required", "firewalled", "other"]
class TosReviewCheck(TypedDict):
isComplete: bool
def prompt_check_tos_review(content: str) -> TosReviewCheck:
messages: List[ChatCompletionMessageParam] = [
{"role": "system", "content": PROMPT_CHECK_TOS_REVIEW},
{"role": "user", "content": content},
]
result_dict = query_openai_json(messages, model="openai/gpt-4.1-mini")
return cast(TosReviewCheck, result_dict)
def prompt_tos_review(content: str) -> TosReviewType:
messages: List[ChatCompletionMessageParam] = [
{"role": "system", "content": PROMPT_TOS_REVIEW},
{"role": "user", "content": content},
]
result_dict = query_openai_json(messages)
return cast(TosReviewType, result_dict)
def prompt_comment_sentiment_summary(content: str) -> CommentSentimentSummaryType:
messages: List[ChatCompletionMessageParam] = [
{"role": "system", "content": PROMPT_COMMENT_SENTIMENT_SUMMARY},
{"role": "user", "content": content},
]
result_dict = query_openai_json(messages)
return cast(CommentSentimentSummaryType, result_dict)
def prompt_comment_moderation(content: str) -> CommentModerationType:
messages: List[ChatCompletionMessageParam] = [
{"role": "system", "content": PROMPT_COMMENT_MODERATION},
{"role": "user", "content": content},
]
result_dict = query_openai_json(messages)
return cast(CommentModerationType, result_dict)
PROMPT_CHECK_TOS_REVIEW = """
You will receive the Markdown content of a website page. Determine if the page is a complete. If the page was blocked (e.g. by Cloudflare or similar), incomplete (e.g. requires JavaScript), irrelevant (login/signup/CAPTCHA), set isComplete to false.
If the page contains meaningful, coherent, valid service information or policy content, with no obvious blocking or truncation, set isComplete to true.
Return only this JSON and nothing else:
{"isComplete": true} or {"isComplete": false}
"""
PROMPT_TOS_REVIEW = """
You are a privacy analysis AI tasked with reviewing Terms of Service documents.
Your goal is to identify key information about data collection, privacy implications, and user rights.
You are a privacy advocate and you are looking for the most important information for the user in regards to privacy, kyc, self-sovereignity, anonymity, etc.
Analyze the provided Terms of Service and extract the following information:
1. KYC level is on a scale of 1 to 4:
- **Guaranteed no KYC (Level 0)**: Terms explicitly state KYC will never be requested.
- **No KYC mention (Level 1)**: No mention of current or future KYC requirements. The document does not mention KYC at all.
- **KYC on authorities request (Level 2)**: No routine KYC, but may share data, block funds or reject transactions. Cooperates with authorities.
- **Shotgun KYC (Level 3)**: May request KYC and block funds based on automated transaction flagging system. It is not mandatory by default, but can be requested at any time, for any reason.
- **Mandatory KYC (Level 4)**: Required for key features or for user registration.
2. Overall summary of the terms of service, must be concise and to the point, no more than 250 characters. Use markdown formatting to highlight the most important information. Plain english.
3. Complexity of the terms of service text for a non-technical user, must be a string of 'low', 'medium', 'high'.
4. 'highlights': The important bits of information from the ToS document for the user to know. Always related to privacy, kyc, self-sovereignity, anonymity, custody, censorship resistance, etc. No need to mention these topics, just the important bits of information from the ToS document.
- important things to look for: automated transaction scanning, rejection or block of funds, refund policy (does it require KYC?), data sharing, logging, kyc requirements, etc.
- if No reference to KYC or proof of funds checks is mentioned or required, you don't need to mention it in the highlights, it is already implied from the kycLevel.
- Try to avoid obvious statements that can be infered from other, more important, highlights. Keep it short and concise only with the most important information for the user.
- You must strictly adhere to the document information, do not make up or infer information, do not make assumptions, do not add any information that is not explicitly stated in the document.
Format your response as a valid JSON object with the following structure:
type TosReview = {
kycLevel: 0 | 1 | 2 | 3 | 4
/** Less than 200 characters */
summary: MarkdownString
complexity: 'high' | 'low' | 'medium'
highlights: {
/** Very short title, max 2-3 words */
title: string
/** Less than 200 characters. Highlight the most important information with markdown formatting. */
content: MarkdownString
/** In regards to KYC, Privacy, Anonymity, Self-Sovereignity, etc. */
/** anything that could harm the user's privacy, identity, self-sovereignity or anonymity is negative, anything that otherwise helps is positive. else it is neutral. */
rating: 'negative' | 'neutral' | 'positive'
}[] // max 8 highlights, try to provide at least 3.
}
The rating is a number between 0 and 2, where 0 is informative, 1 is warning, and 2 is critical.
Focus on the most important information for the user. Be concise and thorough, and make sure your output is properly formatted JSON.
"""
PROMPT_COMMENT_SENTIMENT_SUMMARY = """
You will be given a list of user comments to a service.
Your task is to summarize the comments in a way that is easy to understand and to the point.
The summary should be concise and to the point, no more than 100 words. Keep it short and concise.
Use markdown formatting to highlight in bold the most important information. Only bold is allowed.
You must format your response as a valid JSON object with the following structure:
interface CommentSummary {
summary: string; // Concise, 100 words max
sentiment: 'positive'|'negative'|'neutral';
whatUsersLike: string[]; // Concise, 2-3 words max
whatUsersDislike: string[]; // Concise, 2-3 words max
}
Always avoid repeating information in the list of what users like or dislike. Also, make sure you keep the summary short and concise, no more than 150 words. Ignore irrelevant comments. Make an item for each like/dislike, avoid something like 'No logs / Audited', it should be 'No logs' and 'Audited' as separate items.
You must return a valid raw JSON object, without any other text or formatting.
"""
PROMPT_COMMENT_MODERATION = """
You are kycnot.mes comment moderation API. Your sole responsibility is to analyze user comments on directory listings (cryptocurrency, anonymity, privacy services) and decide, in strict accordance with the schema and rules below, whether each comment is spam, needs admin review, and its overall quality for our platform. Output ONLY a plain, valid JSON object, with NO markdown, extra text, annotations, or code blocks.
## Output Schema
interface CommentModeration {
isSpam: boolean;
requiresAdminReview: boolean;
contextNote: string;
internalNote: string;
commentQuality: 0|1|2|3|4|5|6|7|8|9|10;
}
## FIELD EXPLANATION
- isSpam: Mark true if the comment is spam, irrelevant, repetitive, misleading, self-promoting, or fails minimum quality standards.
- requiresAdminReview: Mark true ONLY if the comment reports: service non-functionality, listing inaccuracies, clear scams, exit-scams, critical policy changes, malfunctions, service outages, or sensitive platform issues. If true, always add internalNote to explain why you made this decision.
- contextNote: Optional, visible to users. Add ONLY when clarification or warning is necessary―e.g., unsubstantiated claims or potential spam.
- internalNote: Internal note that is not visible to users. Example: explain why you marked a comment as spam or low quality. You should leave this empty if no relevant information would be added.
- commentQuality: 0 (lowest) to 10 (highest). Rate purely on informativeness, relevance, helpfulness, and evidence.
## STRICT MODERATION RULES
- Reject ALL comments that are generic, extremely short, or meaningless on their own, unless replying with added value or genuine context. Examples: "hey", "hello", "hi", "ok", "good", "great", "thanks", "test", "scam"—these are LOW quality and must generally be flagged as spam or rated VERY low, unless context justifies.
- Exception: Replies allowed if they significantly clarify, elaborate, or engage with a previous comment, and ADD new value.
- Comments must provide context, detail, experience, a clear perspective, or evidence. Approve only if the comment adds meaningful insight to the listings discussion.
- Mark as spam:
- Meaningless, contextless, very short comments (“hi”, “hey”).
- Comments entirely self-promotional, containing excessive emojis, special characters, random text, or multiple unrelated links.
- Use the surrounding context (such as parent comments, service description, previous discussions) to evaluate if a short comment is a valid reply, or still too low quality to approve.
- Rate "commentQuality" based on:
- 0-2: Meaningless, off-topic, one-word, no value.
- 3-5: Vague, minimal, only slightly relevant, lacking evidence.
- 6-8: Detailed, relevant, some insight or evidence, well-explained.
- 9-10: Exceptionally thorough, informative, well-documented experience.
- For claims (positive or negative) without evidence, add a warning context note: "This comment makes claims without supporting evidence."
- For extended, unstructured, or incoherent text (e.g. spam, or AI-generated nonsense), mark as spam.
## EXAMPLES
- "hello":
isSpam: true, internalNote: "Comment provides no value or context.", commentQuality: 0
- "works":
isSpam: true, internalNote: "Comment too short and contextless.", commentQuality: 0
- "Service did not work on my device—got error 503.":
isSpam: false, requiresAdminReview: true, commentQuality: 7
- "Scam!":
isSpam: true, internalNote: "Unsubstantiated, one-word negative claim.", commentQuality: 0, contextNote: "This is a one-word claim without details or evidence."
- "Instant transactions, responsive customer support. Used for 6 months.":
isSpam: false, commentQuality: 8
## INSTRUCTIONS
- Always evaluate if a comment stands on its own, adds value, and has relevance to the listing. Reject one-word, contextless, or “drive-by” comments.
- Replies: Only approve short replies if they directly answer or clarify something above and ADD useful new information.
Format your output EXACTLY as a raw JSON object using the schema, with NO extra formatting, markdown, or text.
"""

View File

@@ -0,0 +1,31 @@
"""
HTTP utilities for the pyworker package.
"""
from typing import Optional
import requests
from pyworker.utils.app_logging import setup_logging
logger = setup_logging(__name__)
def fetch_url(url: str, timeout: int = 30) -> Optional[str]:
"""
Fetch content from a URL.
Args:
url: The URL to fetch.
timeout: The timeout in seconds.
Returns:
The text content of the response, or None if the request failed.
"""
try:
response = requests.get(url, timeout=timeout)
response.raise_for_status()
return response.text
except requests.RequestException as e:
logger.error(f"Error fetching URL {url}: {e}")
return None

View File

@@ -0,0 +1,36 @@
"""
Logging utilities for the pyworker package.
"""
import logging
import sys
from pyworker.config import config
def setup_logging(name: str = "pyworker") -> logging.Logger:
"""
Set up logging for the application.
Args:
name: The name of the logger.
Returns:
A configured logger instance.
"""
logger = logging.getLogger(name)
# Set log level from configuration
log_level = getattr(logging, config.LOG_LEVEL.upper(), logging.INFO)
logger.setLevel(log_level)
# Create console handler
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(log_level)
# Create formatter
formatter = logging.Formatter(config.LOG_FORMAT)
handler.setFormatter(formatter)
# Add handler to logger
logger.addHandler(handler)
return logger

View File

@@ -0,0 +1,100 @@
import argparse
import os
import time
import requests
from dotenv import load_dotenv
from pyworker.utils.app_logging import setup_logging
from typing import Any
logger = setup_logging(__name__)
# Load environment variables from .env file
load_dotenv()
# Include API token header if set
CRAWL4AI_API_TOKEN = os.environ.get("CRAWL4AI_API_TOKEN", "")
HEADERS = (
{"Authorization": f"Bearer {CRAWL4AI_API_TOKEN}"} if CRAWL4AI_API_TOKEN else {}
)
CRAWL4AI_BASE_URL = os.environ.get("CRAWL4AI_BASE_URL", "http://crawl4ai:11235")
CRAWL4AI_TIMEOUT = int(os.environ.get("CRAWL4AI_TIMEOUT", 300))
CRAWL4AI_POLL_INTERVAL = int(os.environ.get("CRAWL4AI_POLL_INTERVAL", 2))
def fetch_fallback(url: str) -> str:
if not url:
raise ValueError("URL must not be empty")
logger.info(f"Fetching fallback for {url}")
fallback_url = f"https://r.jina.ai/{url.lstrip('/')}"
response = requests.get(fallback_url, timeout=80)
response.raise_for_status()
return response.text
def fetch_markdown(url: str, wait_for_dynamic_content: bool = True) -> str:
if not CRAWL4AI_API_TOKEN:
return fetch_fallback(url)
try:
payload: dict[str, Any] = {"urls": url}
if wait_for_dynamic_content:
# According to Crawl4AI docs, wait_for_images=True also waits for network idle state,
# which is helpful for JS-generated content.
# Adding scan_full_page and scroll_delay helps trigger lazy-loaded content.
payload["config"] = {
"wait_for_images": True,
"scan_full_page": True,
"scroll_delay": 0.5,
"magic": True,
}
response = requests.post(
f"{CRAWL4AI_BASE_URL}/crawl",
json=payload,
headers=HEADERS,
)
response.raise_for_status()
task_id = response.json().get("task_id")
start_time = time.time()
while True:
if time.time() - start_time > CRAWL4AI_TIMEOUT:
raise TimeoutError(f"Task {task_id} timeout")
status_resp = requests.get(
f"{CRAWL4AI_BASE_URL}/task/{task_id}",
headers=HEADERS,
)
status_resp.raise_for_status()
status = status_resp.json()
if status.get("status") == "completed":
markdown = status["result"].get("markdown", "")
metadata = status["result"].get("metadata", {})
return f"""
URL: {url}
Page Metadata: `{metadata}`
Markdown Content
----------------
{markdown}
"""
time.sleep(CRAWL4AI_POLL_INTERVAL)
except (requests.exceptions.RequestException, TimeoutError):
return fetch_fallback(url)
def main():
parser = argparse.ArgumentParser(
description="Crawl a URL and print its markdown content."
)
parser.add_argument("--url", required=True, help="The URL to crawl")
args = parser.parse_args()
print(f"Crawling {args.url}...")
markdown_content = fetch_markdown(args.url)
print("\n--- Markdown Content ---")
print(markdown_content)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1 @@
"""Test package for the pyworker."""

View File

@@ -0,0 +1,74 @@
"""
Tests for task modules.
"""
import unittest
from unittest.mock import patch, MagicMock
from typing import Dict, Any
from pyworker.tasks import TosReviewTask
class TestTosRetrievalTask(unittest.TestCase):
"""Tests for the TOS retrieval task."""
def setUp(self):
"""Set up test fixtures."""
self.task = TosReviewTask()
self.service = {
'id': 1,
'name': 'Test Service',
'tosUrls': ['test1', 'test2']
}
@patch('pyworker.tasks.tos_review.fetch_url')
def test_run_success(self, mock_fetch_url: MagicMock) -> None:
"""Test successful TOS retrieval."""
# Mock the fetch_url function to return test responses
mock_fetch_url.side_effect = ["Test TOS 1", "Test TOS 2"]
# Run the task
result = self.task.run(self.service)
# Check that the function was called twice with the correct arguments
self.assertEqual(mock_fetch_url.call_count, 2)
mock_fetch_url.assert_any_call('https://r.jina.ai/test1')
mock_fetch_url.assert_any_call('https://r.jina.ai/test2')
# Check that the result contains the expected content
self.assertEqual(result, {
'test1': 'Test TOS 1',
'test2': 'Test TOS 2'
})
@patch('pyworker.tasks.tos_review.fetch_url')
def test_run_failure(self, mock_fetch_url: MagicMock) -> None:
"""Test TOS retrieval failure."""
# Mock the fetch_url function to return None (failure)
mock_fetch_url.return_value = None
# Run the task
result = self.task.run(self.service)
# Check that the function was called twice
self.assertEqual(mock_fetch_url.call_count, 2)
# Check that the result is None since all fetches failed
self.assertIsNone(result)
def test_run_no_urls(self):
"""Test TOS retrieval with no URLs."""
# Create a service with no TOS URLs
service_no_urls: Dict[str, Any] = {
'id': 2,
'name': 'Service With No TOS',
'tosUrls': []
}
# Run the task
result = self.task.run(service_no_urls)
# Check that the result is None
self.assertIsNone(result)
if __name__ == '__main__':
unittest.main()

414
pyworker/uv.lock generated Normal file
View File

@@ -0,0 +1,414 @@
version = 1
revision = 1
requires-python = ">=3.13"
[[package]]
name = "annotated-types"
version = "0.7.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
]
[[package]]
name = "anyio"
version = "4.9.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "idna" },
{ name = "sniffio" },
]
sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916 },
]
[[package]]
name = "certifi"
version = "2025.1.31"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 },
]
[[package]]
name = "charset-normalizer"
version = "3.4.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 },
{ url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 },
{ url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 },
{ url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 },
{ url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 },
{ url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 },
{ url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 },
{ url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 },
{ url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 },
{ url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 },
{ url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 },
{ url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 },
{ url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 },
{ url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 },
]
[[package]]
name = "colorama"
version = "0.4.6"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
]
[[package]]
name = "croniter"
version = "6.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "python-dateutil" },
{ name = "pytz" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ad/2f/44d1ae153a0e27be56be43465e5cb39b9650c781e001e7864389deb25090/croniter-6.0.0.tar.gz", hash = "sha256:37c504b313956114a983ece2c2b07790b1f1094fe9d81cc94739214748255577", size = 64481 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/07/4b/290b4c3efd6417a8b0c284896de19b1d5855e6dbdb97d2a35e68fa42de85/croniter-6.0.0-py2.py3-none-any.whl", hash = "sha256:2f878c3856f17896979b2a4379ba1f09c83e374931ea15cc835c5dd2eee9b368", size = 25468 },
]
[[package]]
name = "distro"
version = "1.9.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277 },
]
[[package]]
name = "h11"
version = "0.14.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 },
]
[[package]]
name = "httpcore"
version = "1.0.8"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "h11" },
]
sdist = { url = "https://files.pythonhosted.org/packages/9f/45/ad3e1b4d448f22c0cff4f5692f5ed0666658578e358b8d58a19846048059/httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad", size = 85385 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/18/8d/f052b1e336bb2c1fc7ed1aaed898aa570c0b61a09707b108979d9fc6e308/httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be", size = 78732 },
]
[[package]]
name = "httpx"
version = "0.28.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "certifi" },
{ name = "httpcore" },
{ name = "idna" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 },
]
[[package]]
name = "idna"
version = "3.10"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
]
[[package]]
name = "jiter"
version = "0.9.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/1e/c2/e4562507f52f0af7036da125bb699602ead37a2332af0788f8e0a3417f36/jiter-0.9.0.tar.gz", hash = "sha256:aadba0964deb424daa24492abc3d229c60c4a31bfee205aedbf1acc7639d7893", size = 162604 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e7/1b/4cd165c362e8f2f520fdb43245e2b414f42a255921248b4f8b9c8d871ff1/jiter-0.9.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:2764891d3f3e8b18dce2cff24949153ee30c9239da7c00f032511091ba688ff7", size = 308197 },
{ url = "https://files.pythonhosted.org/packages/13/aa/7a890dfe29c84c9a82064a9fe36079c7c0309c91b70c380dc138f9bea44a/jiter-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:387b22fbfd7a62418d5212b4638026d01723761c75c1c8232a8b8c37c2f1003b", size = 318160 },
{ url = "https://files.pythonhosted.org/packages/6a/38/5888b43fc01102f733f085673c4f0be5a298f69808ec63de55051754e390/jiter-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d8da8629ccae3606c61d9184970423655fb4e33d03330bcdfe52d234d32f69", size = 341259 },
{ url = "https://files.pythonhosted.org/packages/3d/5e/bbdbb63305bcc01006de683b6228cd061458b9b7bb9b8d9bc348a58e5dc2/jiter-0.9.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1be73d8982bdc278b7b9377426a4b44ceb5c7952073dd7488e4ae96b88e1103", size = 363730 },
{ url = "https://files.pythonhosted.org/packages/75/85/53a3edc616992fe4af6814c25f91ee3b1e22f7678e979b6ea82d3bc0667e/jiter-0.9.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2228eaaaa111ec54b9e89f7481bffb3972e9059301a878d085b2b449fbbde635", size = 405126 },
{ url = "https://files.pythonhosted.org/packages/ae/b3/1ee26b12b2693bd3f0b71d3188e4e5d817b12e3c630a09e099e0a89e28fa/jiter-0.9.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11509bfecbc319459647d4ac3fd391d26fdf530dad00c13c4dadabf5b81f01a4", size = 393668 },
{ url = "https://files.pythonhosted.org/packages/11/87/e084ce261950c1861773ab534d49127d1517b629478304d328493f980791/jiter-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f22238da568be8bbd8e0650e12feeb2cfea15eda4f9fc271d3b362a4fa0604d", size = 352350 },
{ url = "https://files.pythonhosted.org/packages/f0/06/7dca84b04987e9df563610aa0bc154ea176e50358af532ab40ffb87434df/jiter-0.9.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17f5d55eb856597607562257c8e36c42bc87f16bef52ef7129b7da11afc779f3", size = 384204 },
{ url = "https://files.pythonhosted.org/packages/16/2f/82e1c6020db72f397dd070eec0c85ebc4df7c88967bc86d3ce9864148f28/jiter-0.9.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:6a99bed9fbb02f5bed416d137944419a69aa4c423e44189bc49718859ea83bc5", size = 520322 },
{ url = "https://files.pythonhosted.org/packages/36/fd/4f0cd3abe83ce208991ca61e7e5df915aa35b67f1c0633eb7cf2f2e88ec7/jiter-0.9.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e057adb0cd1bd39606100be0eafe742de2de88c79df632955b9ab53a086b3c8d", size = 512184 },
{ url = "https://files.pythonhosted.org/packages/a0/3c/8a56f6d547731a0b4410a2d9d16bf39c861046f91f57c98f7cab3d2aa9ce/jiter-0.9.0-cp313-cp313-win32.whl", hash = "sha256:f7e6850991f3940f62d387ccfa54d1a92bd4bb9f89690b53aea36b4364bcab53", size = 206504 },
{ url = "https://files.pythonhosted.org/packages/f4/1c/0c996fd90639acda75ed7fa698ee5fd7d80243057185dc2f63d4c1c9f6b9/jiter-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:c8ae3bf27cd1ac5e6e8b7a27487bf3ab5f82318211ec2e1346a5b058756361f7", size = 204943 },
{ url = "https://files.pythonhosted.org/packages/78/0f/77a63ca7aa5fed9a1b9135af57e190d905bcd3702b36aca46a01090d39ad/jiter-0.9.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f0b2827fb88dda2cbecbbc3e596ef08d69bda06c6f57930aec8e79505dc17001", size = 317281 },
{ url = "https://files.pythonhosted.org/packages/f9/39/a3a1571712c2bf6ec4c657f0d66da114a63a2e32b7e4eb8e0b83295ee034/jiter-0.9.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062b756ceb1d40b0b28f326cba26cfd575a4918415b036464a52f08632731e5a", size = 350273 },
{ url = "https://files.pythonhosted.org/packages/ee/47/3729f00f35a696e68da15d64eb9283c330e776f3b5789bac7f2c0c4df209/jiter-0.9.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6f7838bc467ab7e8ef9f387bd6de195c43bad82a569c1699cb822f6609dd4cdf", size = 206867 },
]
[[package]]
name = "json-repair"
version = "0.41.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/6d/6a/6c7a75a10da6dc807b582f2449034da1ed74415e8899746bdfff97109012/json_repair-0.41.1.tar.gz", hash = "sha256:bba404b0888c84a6b86ecc02ec43b71b673cfee463baf6da94e079c55b136565", size = 31208 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/10/5c/abd7495c934d9af5c263c2245ae30cfaa716c3c0cf027b2b8fa686ee7bd4/json_repair-0.41.1-py3-none-any.whl", hash = "sha256:0e181fd43a696887881fe19fed23422a54b3e4c558b6ff27a86a8c3ddde9ae79", size = 21578 },
]
[[package]]
name = "openai"
version = "1.74.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "httpx" },
{ name = "jiter" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "tqdm" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/75/86/c605a6e84da0248f2cebfcd864b5a6076ecf78849245af5e11d2a5ec7977/openai-1.74.0.tar.gz", hash = "sha256:592c25b8747a7cad33a841958f5eb859a785caea9ee22b9e4f4a2ec062236526", size = 427571 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a9/91/8c150f16a96367e14bd7d20e86e0bbbec3080e3eb593e63f21a7f013f8e4/openai-1.74.0-py3-none-any.whl", hash = "sha256:aff3e0f9fb209836382ec112778667027f4fd6ae38bdb2334bc9e173598b092a", size = 644790 },
]
[[package]]
name = "psycopg"
version = "3.2.6"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "tzdata", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/67/97/eea08f74f1c6dd2a02ee81b4ebfe5b558beb468ebbd11031adbf58d31be0/psycopg-3.2.6.tar.gz", hash = "sha256:16fa094efa2698f260f2af74f3710f781e4a6f226efe9d1fd0c37f384639ed8a", size = 156322 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d7/7d/0ba52deff71f65df8ec8038adad86ba09368c945424a9bd8145d679a2c6a/psycopg-3.2.6-py3-none-any.whl", hash = "sha256:f3ff5488525890abb0566c429146add66b329e20d6d4835662b920cbbf90ac58", size = 199077 },
]
[package.optional-dependencies]
binary = [
{ name = "psycopg-binary", marker = "implementation_name != 'pypy'" },
]
pool = [
{ name = "psycopg-pool" },
]
[[package]]
name = "psycopg-binary"
version = "3.2.6"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/bf/32/3d06c478fd3070ac25a49c2e8ca46b6d76b0048fa9fa255b99ee32f32312/psycopg_binary-3.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54af3fbf871baa2eb19df96fd7dc0cbd88e628a692063c3d1ab5cdd00aa04322", size = 3852672 },
{ url = "https://files.pythonhosted.org/packages/34/97/e581030e279500ede3096adb510f0e6071874b97cfc047a9a87b7d71fc77/psycopg_binary-3.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ad5da1e4636776c21eaeacdec42f25fa4612631a12f25cd9ab34ddf2c346ffb9", size = 3936562 },
{ url = "https://files.pythonhosted.org/packages/74/b6/6a8df4cb23c3d327403a83406c06c9140f311cb56c4e4d720ee7abf6fddc/psycopg_binary-3.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7956b9ea56f79cd86eddcfbfc65ae2af1e4fe7932fa400755005d903c709370", size = 4499167 },
{ url = "https://files.pythonhosted.org/packages/e4/5b/950eafef61e5e0b8ddb5afc5b6b279756411aa4bf70a346a6f091ad679bb/psycopg_binary-3.2.6-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e2efb763188008cf2914820dcb9fb23c10fe2be0d2c97ef0fac7cec28e281d8", size = 4311651 },
{ url = "https://files.pythonhosted.org/packages/72/b9/b366c49afc854c26b3053d4d35376046eea9aebdc48ded18ea249ea1f80c/psycopg_binary-3.2.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b3aab3451679f1e7932270e950259ed48c3b79390022d3f660491c0e65e4838", size = 4547852 },
{ url = "https://files.pythonhosted.org/packages/ab/d4/0e047360e2ea387dc7171ca017ffcee5214a0762f74b9dd982035f2e52fb/psycopg_binary-3.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:849a370ac4e125f55f2ad37f928e588291a67ccf91fa33d0b1e042bb3ee1f986", size = 4261725 },
{ url = "https://files.pythonhosted.org/packages/e3/ea/a1b969804250183900959ebe845d86be7fed2cbd9be58f64cd0fc24b2892/psycopg_binary-3.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:566d4ace928419d91f1eb3227fc9ef7b41cf0ad22e93dd2c3368d693cf144408", size = 3850073 },
{ url = "https://files.pythonhosted.org/packages/e5/71/ec2907342f0675092b76aea74365b56f38d960c4c635984dcfe25d8178c8/psycopg_binary-3.2.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f1981f13b10de2f11cfa2f99a8738b35b3f0a0f3075861446894a8d3042430c0", size = 3320323 },
{ url = "https://files.pythonhosted.org/packages/d7/d7/0d2cb4b42f231e2efe8ea1799ce917973d47486212a2c4d33cd331e7ac28/psycopg_binary-3.2.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:36f598300b55b3c983ae8df06473ad27333d2fd9f3e2cfdb913b3a5aaa3a8bcf", size = 3402335 },
{ url = "https://files.pythonhosted.org/packages/66/92/7050c372f78e53eba14695cec6c3a91b2d9ca56feaf0bfe95fe90facf730/psycopg_binary-3.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0f4699fa5fe1fffb0d6b2d14b31fd8c29b7ea7375f89d5989f002aaf21728b21", size = 3440442 },
{ url = "https://files.pythonhosted.org/packages/5f/4c/bebcaf754189283b2f3d457822a3d9b233d08ff50973d8f1e8d51f4d35ed/psycopg_binary-3.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:afe697b8b0071f497c5d4c0f41df9e038391534f5614f7fb3a8c1ca32d66e860", size = 2783465 },
]
[[package]]
name = "psycopg-pool"
version = "3.2.6"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/cf/13/1e7850bb2c69a63267c3dbf37387d3f71a00fd0e2fa55c5db14d64ba1af4/psycopg_pool-3.2.6.tar.gz", hash = "sha256:0f92a7817719517212fbfe2fd58b8c35c1850cdd2a80d36b581ba2085d9148e5", size = 29770 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/47/fd/4feb52a55c1a4bd748f2acaed1903ab54a723c47f6d0242780f4d97104d4/psycopg_pool-3.2.6-py3-none-any.whl", hash = "sha256:5887318a9f6af906d041a0b1dc1c60f8f0dda8340c2572b74e10907b51ed5da7", size = 38252 },
]
[[package]]
name = "pydantic"
version = "2.11.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-types" },
{ name = "pydantic-core" },
{ name = "typing-extensions" },
{ name = "typing-inspection" },
]
sdist = { url = "https://files.pythonhosted.org/packages/10/2e/ca897f093ee6c5f3b0bee123ee4465c50e75431c3d5b6a3b44a47134e891/pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3", size = 785513 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b0/1d/407b29780a289868ed696d1616f4aad49d6388e5a77f567dcd2629dcd7b8/pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f", size = 443591 },
]
[[package]]
name = "pydantic-core"
version = "2.33.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/17/19/ed6a078a5287aea7922de6841ef4c06157931622c89c2a47940837b5eecd/pydantic_core-2.33.1.tar.gz", hash = "sha256:bcc9c6fdb0ced789245b02b7d6603e17d1563064ddcfc36f046b61c0c05dd9df", size = 434395 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/7a/24/eed3466a4308d79155f1cdd5c7432c80ddcc4530ba8623b79d5ced021641/pydantic_core-2.33.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:70af6a21237b53d1fe7b9325b20e65cbf2f0a848cf77bed492b029139701e66a", size = 2033551 },
{ url = "https://files.pythonhosted.org/packages/ab/14/df54b1a0bc9b6ded9b758b73139d2c11b4e8eb43e8ab9c5847c0a2913ada/pydantic_core-2.33.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:282b3fe1bbbe5ae35224a0dbd05aed9ccabccd241e8e6b60370484234b456266", size = 1852785 },
{ url = "https://files.pythonhosted.org/packages/fa/96/e275f15ff3d34bb04b0125d9bc8848bf69f25d784d92a63676112451bfb9/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b315e596282bbb5822d0c7ee9d255595bd7506d1cb20c2911a4da0b970187d3", size = 1897758 },
{ url = "https://files.pythonhosted.org/packages/b7/d8/96bc536e975b69e3a924b507d2a19aedbf50b24e08c80fb00e35f9baaed8/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dfae24cf9921875ca0ca6a8ecb4bb2f13c855794ed0d468d6abbec6e6dcd44a", size = 1986109 },
{ url = "https://files.pythonhosted.org/packages/90/72/ab58e43ce7e900b88cb571ed057b2fcd0e95b708a2e0bed475b10130393e/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6dd8ecfde08d8bfadaea669e83c63939af76f4cf5538a72597016edfa3fad516", size = 2129159 },
{ url = "https://files.pythonhosted.org/packages/dc/3f/52d85781406886c6870ac995ec0ba7ccc028b530b0798c9080531b409fdb/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f593494876eae852dc98c43c6f260f45abdbfeec9e4324e31a481d948214764", size = 2680222 },
{ url = "https://files.pythonhosted.org/packages/f4/56/6e2ef42f363a0eec0fd92f74a91e0ac48cd2e49b695aac1509ad81eee86a/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b73114f47fd7016088e5186d13faf5e1b2fe83f5e320e371f035557fd264d", size = 2006980 },
{ url = "https://files.pythonhosted.org/packages/4c/c0/604536c4379cc78359f9ee0aa319f4aedf6b652ec2854953f5a14fc38c5a/pydantic_core-2.33.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11f3864eb516af21b01e25fac915a82e9ddad3bb0fb9e95a246067398b435a4", size = 2120840 },
{ url = "https://files.pythonhosted.org/packages/1f/46/9eb764814f508f0edfb291a0f75d10854d78113fa13900ce13729aaec3ae/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:549150be302428b56fdad0c23c2741dcdb5572413776826c965619a25d9c6bde", size = 2072518 },
{ url = "https://files.pythonhosted.org/packages/42/e3/fb6b2a732b82d1666fa6bf53e3627867ea3131c5f39f98ce92141e3e3dc1/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:495bc156026efafd9ef2d82372bd38afce78ddd82bf28ef5276c469e57c0c83e", size = 2248025 },
{ url = "https://files.pythonhosted.org/packages/5c/9d/fbe8fe9d1aa4dac88723f10a921bc7418bd3378a567cb5e21193a3c48b43/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ec79de2a8680b1a67a07490bddf9636d5c2fab609ba8c57597e855fa5fa4dacd", size = 2254991 },
{ url = "https://files.pythonhosted.org/packages/aa/99/07e2237b8a66438d9b26482332cda99a9acccb58d284af7bc7c946a42fd3/pydantic_core-2.33.1-cp313-cp313-win32.whl", hash = "sha256:ee12a7be1742f81b8a65b36c6921022301d466b82d80315d215c4c691724986f", size = 1915262 },
{ url = "https://files.pythonhosted.org/packages/8a/f4/e457a7849beeed1e5defbcf5051c6f7b3c91a0624dd31543a64fc9adcf52/pydantic_core-2.33.1-cp313-cp313-win_amd64.whl", hash = "sha256:ede9b407e39949d2afc46385ce6bd6e11588660c26f80576c11c958e6647bc40", size = 1956626 },
{ url = "https://files.pythonhosted.org/packages/20/d0/e8d567a7cff7b04e017ae164d98011f1e1894269fe8e90ea187a3cbfb562/pydantic_core-2.33.1-cp313-cp313-win_arm64.whl", hash = "sha256:aa687a23d4b7871a00e03ca96a09cad0f28f443690d300500603bd0adba4b523", size = 1909590 },
{ url = "https://files.pythonhosted.org/packages/ef/fd/24ea4302d7a527d672c5be06e17df16aabfb4e9fdc6e0b345c21580f3d2a/pydantic_core-2.33.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:401d7b76e1000d0dd5538e6381d28febdcacb097c8d340dde7d7fc6e13e9f95d", size = 1812963 },
{ url = "https://files.pythonhosted.org/packages/5f/95/4fbc2ecdeb5c1c53f1175a32d870250194eb2fdf6291b795ab08c8646d5d/pydantic_core-2.33.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aeb055a42d734c0255c9e489ac67e75397d59c6fbe60d155851e9782f276a9c", size = 1986896 },
{ url = "https://files.pythonhosted.org/packages/71/ae/fe31e7f4a62431222d8f65a3bd02e3fa7e6026d154a00818e6d30520ea77/pydantic_core-2.33.1-cp313-cp313t-win_amd64.whl", hash = "sha256:338ea9b73e6e109f15ab439e62cb3b78aa752c7fd9536794112e14bee02c8d18", size = 1931810 },
]
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "six" },
]
sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 },
]
[[package]]
name = "python-dotenv"
version = "1.1.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256 },
]
[[package]]
name = "pytz"
version = "2025.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225 },
]
[[package]]
name = "pyworker"
version = "0.1.0"
source = { editable = "." }
dependencies = [
{ name = "croniter" },
{ name = "json-repair" },
{ name = "openai" },
{ name = "psycopg", extra = ["binary", "pool"] },
{ name = "python-dotenv" },
{ name = "requests" },
]
[package.metadata]
requires-dist = [
{ name = "croniter", specifier = ">=6.0.0" },
{ name = "json-repair", specifier = ">=0.41.1" },
{ name = "openai", specifier = ">=1.74.0" },
{ name = "psycopg", extras = ["binary", "pool"], specifier = ">=3.2.6" },
{ name = "python-dotenv", specifier = ">=1.1.0" },
{ name = "requests", specifier = ">=2.32.3" },
]
[[package]]
name = "requests"
version = "2.32.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "charset-normalizer" },
{ name = "idna" },
{ name = "urllib3" },
]
sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 },
]
[[package]]
name = "six"
version = "1.17.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 },
]
[[package]]
name = "sniffio"
version = "1.3.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
]
[[package]]
name = "tqdm"
version = "4.67.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540 },
]
[[package]]
name = "typing-extensions"
version = "4.13.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806 },
]
[[package]]
name = "typing-inspection"
version = "0.4.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125 },
]
[[package]]
name = "tzdata"
version = "2025.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839 },
]
[[package]]
name = "urllib3"
version = "2.4.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680 },
]

28
web/.dockerignore Normal file
View File

@@ -0,0 +1,28 @@
# build output
dist/
# generated types
.astro/
# dependencies
node_modules/
# local only data
local_data/
# logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
# environment variables
.env.production
# macOS-specific files
.DS_Store
# jetbrains setting folder
.idea/
*.example

13
web/.env.example Normal file
View File

@@ -0,0 +1,13 @@
DATABASE_URL="postgresql://kycnot:kycnot@localhost:3399/kycnot?schema=public"
REDIS_URL="redis://localhost:6379"
SOURCE_CODE_URL="https://github.com"
DATABASE_UI_URL="http://localhost:5555"
SITE_URL="http://localhost:4321"
ONION_ADDRESS="http://kycnotmezdiftahfmc34pqbpicxlnx3jbf5p7jypge7gdvduu7i6qjqd.onion"
I2P_ADDRESS="http://nti3rj4j4disjcm2kvp4eno7otcejbbxv3ggxwr5tpfk4jucah7q.b32.i2p"
RELEASE_NUMBER=123
RELEASE_DATE="2025-05-23T19:00:00.000Z"
# Generated with `npx web-push generate-vapid-keys`
VAPID_PUBLIC_KEY="BPmJbRXzG9zT181vyg1GlpyV8qu7rjVjfg6vkkOgtqeTZECyt6lR4MuzmlarEHSBF6gPpc77ZA0_tTVtmYh65iM"
VAPID_PRIVATE_KEY="eN_S2SMXDB2hpwVXbgDkDrPIPMqirllZaJcUgYTt9w0"
VAPID_SUBJECT="mailto:no-reply@kycnot.me"

31
web/.gitignore vendored Normal file
View File

@@ -0,0 +1,31 @@
# build output
dist/
# generated types
.astro/
# dependencies
node_modules/
# local only data
local_data/
# logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
# environment variables
.env
.env.production
# macOS-specific files
.DS_Store
# jetbrains setting folder
.idea/
local_uploads/
!local_uploads/.gitkeep
uploads/

1
web/.npmrc Normal file
View File

@@ -0,0 +1 @@
save-exact=true

1
web/.nvmrc Normal file
View File

@@ -0,0 +1 @@
23

5
web/.prettierignore Normal file
View File

@@ -0,0 +1,5 @@
web/public/
.git/
package-lock.json
local_data/
.astro/

22
web/.prettierrc.mjs Normal file
View File

@@ -0,0 +1,22 @@
// @ts-check
/** @type {import("prettier").Config} */
export default {
plugins: ['prettier-plugin-astro', 'prettier-plugin-tailwindcss'],
overrides: [
{
files: '*.astro',
options: {
parser: 'astro',
},
},
],
tailwindFunctions: ['cn', 'clsx', 'tv'],
singleQuote: true,
semi: false,
tabWidth: 2,
trailingComma: 'es5',
printWidth: 110,
bracketSpacing: true,
endOfLine: 'lf',
}

29
web/Dockerfile Normal file
View File

@@ -0,0 +1,29 @@
FROM node:lts AS runtime
WORKDIR /app
COPY .env .env
COPY web/package.json web/package-lock.json ./
COPY web/.npmrc .npmrc
RUN npm ci
COPY web/ .
ARG ASTRO_BUILD_MODE=production
# Generate Prisma client
RUN npx prisma generate
# Build the application
RUN npm run build:astro -- --mode ${ASTRO_BUILD_MODE} && npm run build:server-init
ENV HOST=0.0.0.0
ENV PORT=4321
EXPOSE 4321
# Add knm-migrate command script and make it executable
COPY web/migrate.sh /usr/local/bin/knm-migrate
RUN chmod +x /usr/local/bin/knm-migrate
CMD ["sh", "-c", "node ./dist/server/server-init.js & node ./dist/server/entry.mjs"]

28
web/README.md Normal file
View File

@@ -0,0 +1,28 @@
# KYCnot.me website
[KYCnot.me](https://kycnot.me)
## Commands
All commands are run from the root of the project, from a terminal:
| Command | Action |
| :------------------------ | :------------------------------------------------------------------ |
| `nvm install` | Installs and uses the correct version of node |
| `npm install` | Installs dependencies |
| `npm run dev` | Starts local dev server at `localhost:4321` |
| `npm run build` | Build your production site to `./dist/` |
| `npm run preview` | Preview your build locally, before deploying |
| `npm run astro ...` | Run CLI commands like `astro add`, `astro check` |
| `npm run astro -- --help` | Get help using the Astro CLI |
| `npm run db-admin` | Runs Prisma Studio (database admin) |
| `npm run db-gen` | Generates the Prisma client without running migrations |
| `npm run db-push` | Updates the database schema with latest changes (development mode). |
| `npm run db-seed` | Seeds the database with fake data (development mode) |
| `npm run format` | Formats the code with Prettier |
| `npm run lint` | Lints the code with ESLint |
| `npm run lint-fix` | Lints the code with ESLint and fixes the issues |
> **Note**: `db-seed` support the `-- --services=n` flag, where n is the number of fake services to add. It defaults to 10. For example, `npm run db-seed -- --services=5` will add 5 fake services.
> **Note**: `db-seed` create default users with tokens: `admin`, `moderator`, `verified`, `normal` (override with `DEV_*****_USER_SECRET_TOKEN` env vars)

245
web/astro.config.mjs Normal file
View File

@@ -0,0 +1,245 @@
// @ts-check
import mdx from '@astrojs/mdx'
import node from '@astrojs/node'
import sitemap from '@astrojs/sitemap'
import tailwindcss from '@tailwindcss/vite'
import { minimal2023Preset } from '@vite-pwa/assets-generator/config'
import AstroPWA from '@vite-pwa/astro'
import { defineConfig, envField } from 'astro/config'
import icon from 'astro-icon'
import devtoolsJson from 'vite-plugin-devtools-json'
import { postgresListener } from './src/lib/postgresListenerIntegration'
import { getServerEnvVariable } from './src/lib/serverEnvVariables'
const SITE_URL = getServerEnvVariable('SITE_URL')
export default defineConfig({
site: SITE_URL,
vite: {
build: {
sourcemap: true, // Enable sourcemaps on production, so users can inspect the code
},
plugins: [devtoolsJson(), tailwindcss()],
},
integrations: [
postgresListener(),
icon(),
mdx(),
AstroPWA({
mode: 'development',
base: '/',
scope: '/',
registerType: 'autoUpdate',
manifest: {
name: 'KYCnot.me',
description: 'Find services that respect your privacy',
theme_color: '#040505',
background_color: '#171c1b',
},
pwaAssets: {
image: './public/favicon.svg',
preset: {
...minimal2023Preset,
maskable: {
...minimal2023Preset.maskable,
padding: 0.1,
resizeOptions: {
...minimal2023Preset.maskable.resizeOptions,
background: '#3bdb78',
},
},
apple: {
...minimal2023Preset.apple,
padding: 0.1,
resizeOptions: {
...minimal2023Preset.apple.resizeOptions,
background: '#3bdb78',
},
},
},
},
workbox: {
navigateFallback: '/404',
globPatterns: ['**/*.{js,css,html,ico,jpg,jpeg,png,svg,webp,avif}'],
},
strategies: 'injectManifest',
srcDir: 'src',
filename: 'sw.ts',
devOptions: {
enabled: true,
type: 'module',
},
experimental: {
directoryAndTrailingSlashHandler: true,
},
}),
sitemap({
filter: (page) => {
const url = new URL(page)
return !url.pathname.startsWith('/admin') && !url.pathname.startsWith('/account/impersonate')
},
}),
],
adapter: node({
mode: 'standalone',
}),
output: 'server',
devToolbar: {
enabled: false,
},
server: {
open: false,
allowedHosts: [new URL(SITE_URL).hostname],
},
image: {
domains: [new URL(SITE_URL).hostname],
remotePatterns: [{ protocol: 'https' }],
},
redirects: {
// #region Redirects from old website
'/pending': '/?verification=verified&verification=approved&verification=community',
'/changelog': '/events',
'/request': '/service-suggestion/new',
'/service/[...slug]/summary': '/service/[...slug]/#scores',
'/service/[...slug]/proof': '/service/[...slug]/#verification',
'/attribute/[...slug]': '/attributes',
'/attr/[...slug]': '/attributes',
'/service/[...slug]/review': '/service/[...slug]#comments',
// #endregion
},
env: {
schema: {
// Database (server-only, secret)
DATABASE_URL: envField.string({
context: 'server',
access: 'secret',
url: true,
startsWith: 'postgresql://',
default: 'postgresql://kycnot:kycnot@database:5432/kycnot?schema=public',
}),
// Public URLs (can be accessed from both server and client)
SOURCE_CODE_URL: envField.string({
context: 'server',
access: 'public',
url: true,
optional: false,
}),
I2P_ADDRESS: envField.string({
context: 'server',
access: 'public',
url: true,
optional: false,
}),
ONION_ADDRESS: envField.string({
context: 'server',
access: 'public',
url: true,
optional: false,
}),
REDIS_URL: envField.string({
context: 'server',
access: 'secret',
url: true,
startsWith: 'redis://',
default: 'redis://redis:6379',
}),
// Development tokens
DEV_ADMIN_USER_SECRET_TOKEN: envField.string({
context: 'server',
access: 'secret',
min: 1,
default: 'admin',
}),
DEV_MODERATOR_USER_SECRET_TOKEN: envField.string({
context: 'server',
access: 'secret',
min: 1,
default: 'moderator',
}),
DEV_VERIFIED_USER_SECRET_TOKEN: envField.string({
context: 'server',
access: 'secret',
min: 1,
default: 'verified',
}),
DEV_NORMAL_USER_SECRET_TOKEN: envField.string({
context: 'server',
access: 'secret',
min: 1,
default: 'normal',
}),
DEV_SPAM_USER_SECRET_TOKEN: envField.string({
context: 'server',
access: 'secret',
min: 1,
default: 'spam',
}),
// Upload directory configuration
UPLOAD_DIR: envField.string({
context: 'server',
access: 'secret',
min: 1,
default: './local_uploads',
}),
SITE_URL: envField.string({
context: 'client',
access: 'public',
url: true,
optional: false,
}),
DATABASE_UI_URL: envField.string({
context: 'server',
access: 'secret',
url: true,
optional: false,
}),
LOGS_UI_URL: envField.string({
context: 'server',
access: 'secret',
url: true,
optional: true,
}),
RELEASE_NUMBER: envField.number({
context: 'server',
access: 'public',
int: true,
optional: true,
}),
RELEASE_DATE: envField.string({
context: 'server',
access: 'public',
optional: true,
}),
// Generated with `npx web-push generate-vapid-keys`
VAPID_PUBLIC_KEY: envField.string({
context: 'server',
access: 'public',
min: 1,
optional: false,
}),
// Generated with `npx web-push generate-vapid-keys`
VAPID_PRIVATE_KEY: envField.string({
context: 'server',
access: 'secret',
min: 1,
optional: false,
}),
VAPID_SUBJECT: envField.string({
context: 'server',
access: 'secret',
min: 1,
optional: false,
}),
},
},
})

147
web/eslint.config.js Normal file
View File

@@ -0,0 +1,147 @@
// @ts-check
import pluginJs from '@eslint/js'
import stylistic from '@stylistic/eslint-plugin'
import { configs as eslintAstroPluginConfig } from 'eslint-plugin-astro'
import importPlugin from 'eslint-plugin-import'
import globals from 'globals'
import { without } from 'lodash-es'
import tseslint, { configs as tseslintConfigs } from 'typescript-eslint'
export default tseslint.config(
{
ignores: [
'**/node_modules/**',
'.astro/**',
'dist/**',
'coverage/**',
'build/**',
'public/**',
'.prettierrc.mjs',
],
},
{
files: ['**/*.{js,ts,mjs,cjs,tsx,jsx,astro}'],
},
{
settings: {
'import/resolver': {
typescript: {
alwaysTryTypes: true,
project: 'tsconfig.json',
},
},
},
},
pluginJs.configs.recommended,
tseslintConfigs.strictTypeChecked,
tseslintConfigs.stylisticTypeChecked,
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
importPlugin.flatConfigs.recommended,
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
importPlugin.flatConfigs.typescript,
eslintAstroPluginConfig['flat/recommended'],
eslintAstroPluginConfig['flat/jsx-a11y-strict'],
[
// These rules don't work with Astro and produce false positives
{
files: ['**/*.astro'],
rules: {
'@typescript-eslint/no-misused-promises': 'off',
'@typescript-eslint/no-unsafe-return': 'off',
'@typescript-eslint/no-redundant-type-constituents': 'off',
'@typescript-eslint/no-unsafe-member-access': 'off',
'@typescript-eslint/no-unsafe-call': 'off',
'@typescript-eslint/no-unsafe-assignment': 'off',
'@typescript-eslint/no-unsafe-argument': 'off',
'@typescript-eslint/restrict-template-expressions': 'off',
},
},
{
rules: {
'@typescript-eslint/no-unsafe-assignment': 'off',
},
},
],
{
languageOptions: {
globals: {
...globals.browser,
...globals.node,
},
parserOptions: {
project: true,
tsconfigRootDir: import.meta.dirname,
},
},
plugins: {
'@stylistic': stylistic,
},
rules: {
'@typescript-eslint/unbound-method': 'off',
'@typescript-eslint/no-unnecessary-type-parameters': 'off',
'@typescript-eslint/no-deprecated': 'warn',
'@typescript-eslint/prefer-nullish-coalescing': 'warn',
'@typescript-eslint/consistent-type-definitions': ['warn', 'type'],
'@typescript-eslint/no-unused-vars': [
'warn',
{
args: 'all',
argsIgnorePattern: '^_',
caughtErrors: 'all',
caughtErrorsIgnorePattern: '^_',
destructuredArrayIgnorePattern: '^_',
varsIgnorePattern: '^_',
ignoreRestSiblings: true,
},
],
'@typescript-eslint/consistent-type-imports': [
'error',
{ prefer: 'type-imports', fixStyle: 'separate-type-imports' },
],
'@typescript-eslint/sort-type-constituents': 'error',
'import/order': [
'warn',
{
groups: ['builtin', 'external', 'internal', 'parent', 'sibling', 'index', 'object', 'type'],
pathGroups: [
{
pattern: 'react',
group: 'external',
position: 'before',
},
],
pathGroupsExcludedImportTypes: ['react'],
'newlines-between': 'always',
alphabetize: {
order: 'asc',
caseInsensitive: true,
},
},
],
'import/first': 'error',
'import/newline-after-import': 'error',
'import/no-duplicates': 'error',
'import/no-unresolved': ['error', { ignore: ['^astro:', '^virtual:'] }],
'@typescript-eslint/no-explicit-any': 'warn',
'no-console': ['warn', { allow: without(Object.keys(console), 'log') }],
'import/namespace': 'off',
'object-shorthand': ['warn', 'always', { avoidExplicitReturnArrows: false }],
'no-useless-rename': 'warn',
curly: ['error', 'multi-line'],
'@stylistic/quotes': [
'error',
'single',
{
avoidEscape: true,
allowTemplateLiterals: false,
},
],
},
},
{
files: ['**/*.d.ts'],
rules: {
'@typescript-eslint/no-explicit-any': 'off',
},
}
)

19
web/migrate.sh Normal file
View File

@@ -0,0 +1,19 @@
#!/bin/sh
set -e
# Apply migrations
echo "Applying database migrations..."
npx prisma migrate deploy
# Apply triggers
echo "Applying database triggers..."
for trigger_file in prisma/triggers/*.sql; do
if [ -f "$trigger_file" ]; then
echo "Applying trigger: $trigger_file"
npx prisma db execute --file "$trigger_file" --schema=./prisma/schema.prisma
else
echo "No trigger files found in prisma/triggers/ or $trigger_file is not a file."
fi
done
echo "Migrations completed."

19207
web/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

110
web/package.json Normal file
View File

@@ -0,0 +1,110 @@
{
"name": "kycnot.me",
"type": "module",
"version": "0.0.1",
"scripts": {
"dev": "astro dev",
"build": "npm run build:astro && npm run build:server-init",
"build:astro": "astro build --remote",
"build:server-init": "esbuild src/server-init.ts --bundle --platform=node --format=esm --packages=external --outfile=dist/server/server-init.js",
"preview": "node dist/server/server-init.js & astro preview",
"astro": "astro",
"db-admin": "prisma studio --browser=none",
"db-gen": "prisma generate",
"db-push": "prisma migrate dev",
"db-triggers": "just import-triggers",
"db-update": "prisma migrate dev && just import-triggers",
"db-reset": "prisma migrate reset -f && prisma migrate dev",
"db-seed": "prisma db seed",
"format": "prettier --write .",
"lint": "eslint .",
"lint-fix": "eslint . --fix && prettier --write ."
},
"prisma": {
"seed": "tsx prisma/seed.ts"
},
"dependencies": {
"@astrojs/check": "0.9.4",
"@astrojs/db": "0.15.0",
"@astrojs/mdx": "4.3.0",
"@astrojs/node": "9.2.2",
"@astrojs/rss": "4.0.12",
"@astrojs/sitemap": "3.4.1",
"@fontsource-variable/space-grotesk": "5.2.8",
"@fontsource/inter": "5.2.5",
"@fontsource/space-grotesk": "5.2.8",
"@prisma/client": "6.9.0",
"@tailwindcss/vite": "4.1.8",
"@types/mime-types": "3.0.0",
"@types/pg": "8.15.4",
"@vercel/og": "0.6.8",
"astro": "5.9.0",
"astro-loading-indicator": "0.7.0",
"astro-remote": "0.3.4",
"astro-seo-schema": "5.0.0",
"canvas": "3.1.0",
"clsx": "2.1.1",
"htmx.org": "1.9.12",
"javascript-time-ago": "2.5.11",
"libphonenumber-js": "1.12.9",
"lodash-es": "4.17.21",
"mime-types": "3.0.1",
"object-to-formdata": "4.5.1",
"pg": "8.16.0",
"qrcode": "1.5.4",
"react": "19.1.0",
"redis": "5.5.6",
"schema-dts": "1.1.5",
"seedrandom": "3.0.5",
"sharp": "0.34.2",
"slugify": "1.6.6",
"tailwind-merge": "3.3.0",
"tailwind-variants": "1.0.0",
"tailwindcss": "4.1.8",
"typescript": "5.8.3",
"unique-username-generator": "1.4.0",
"web-push": "3.6.7",
"zod-form-data": "2.0.7"
},
"devDependencies": {
"@eslint/js": "9.28.0",
"@faker-js/faker": "9.8.0",
"@iconify-json/material-symbols": "1.2.24",
"@iconify-json/mdi": "1.2.3",
"@iconify-json/ri": "1.2.5",
"@stylistic/eslint-plugin": "4.4.1",
"@tailwindcss/forms": "0.5.10",
"@tailwindcss/typography": "0.5.16",
"@types/eslint__js": "9.14.0",
"@types/lodash-es": "4.17.12",
"@types/qrcode": "1.5.5",
"@types/react": "19.1.6",
"@types/seedrandom": "3.0.8",
"@types/web-push": "3.6.4",
"@typescript-eslint/parser": "8.33.1",
"@vite-pwa/assets-generator": "1.0.0",
"@vite-pwa/astro": "1.1.0",
"astro-icon": "1.1.5",
"date-fns": "4.1.0",
"esbuild": "0.25.5",
"eslint": "9.28.0",
"eslint-import-resolver-typescript": "4.4.3",
"eslint-plugin-astro": "1.3.1",
"eslint-plugin-import": "2.31.0",
"eslint-plugin-jsx-a11y": "6.10.2",
"globals": "16.2.0",
"prettier": "3.5.3",
"prettier-plugin-astro": "0.14.1",
"prettier-plugin-tailwindcss": "0.6.12",
"prisma": "6.9.0",
"prisma-json-types-generator": "3.4.2",
"tailwind-htmx": "0.1.2",
"ts-essentials": "10.0.4",
"ts-toolbelt": "9.6.0",
"tsx": "4.19.4",
"typescript-eslint": "8.33.1",
"vite-plugin-devtools-json": "0.1.1",
"workbox-core": "7.3.0",
"workbox-precaching": "7.3.0"
}
}

View File

@@ -0,0 +1,798 @@
-- CreateEnum
CREATE TYPE "CommentStatus" AS ENUM ('PENDING', 'HUMAN_PENDING', 'APPROVED', 'VERIFIED', 'REJECTED');
-- CreateEnum
CREATE TYPE "OrderIdStatus" AS ENUM ('PENDING', 'APPROVED', 'REJECTED');
-- CreateEnum
CREATE TYPE "VerificationStatus" AS ENUM ('COMMUNITY_CONTRIBUTED', 'APPROVED', 'VERIFICATION_SUCCESS', 'VERIFICATION_FAILED');
-- CreateEnum
CREATE TYPE "ServiceInfoBanner" AS ENUM ('NONE', 'NO_LONGER_OPERATIONAL');
-- CreateEnum
CREATE TYPE "ServiceVisibility" AS ENUM ('PUBLIC', 'UNLISTED', 'HIDDEN');
-- CreateEnum
CREATE TYPE "Currency" AS ENUM ('MONERO', 'BITCOIN', 'LIGHTNING', 'FIAT', 'CASH');
-- CreateEnum
CREATE TYPE "EventType" AS ENUM ('WARNING', 'WARNING_SOLVED', 'ALERT', 'ALERT_SOLVED', 'INFO', 'NORMAL', 'UPDATE');
-- CreateEnum
CREATE TYPE "ServiceUserRole" AS ENUM ('OWNER', 'ADMIN', 'MODERATOR', 'SUPPORT', 'TEAM_MEMBER');
-- CreateEnum
CREATE TYPE "AccountStatusChange" AS ENUM ('ADMIN_TRUE', 'ADMIN_FALSE', 'VERIFIED_TRUE', 'VERIFIED_FALSE', 'VERIFIER_TRUE', 'VERIFIER_FALSE', 'SPAMMER_TRUE', 'SPAMMER_FALSE');
-- CreateEnum
CREATE TYPE "NotificationType" AS ENUM ('COMMENT_STATUS_CHANGE', 'REPLY_COMMENT_CREATED', 'COMMUNITY_NOTE_ADDED', 'ROOT_COMMENT_CREATED', 'SUGGESTION_MESSAGE', 'SUGGESTION_STATUS_CHANGE', 'ACCOUNT_STATUS_CHANGE', 'EVENT_CREATED', 'SERVICE_VERIFICATION_STATUS_CHANGE');
-- CreateEnum
CREATE TYPE "CommentStatusChange" AS ENUM ('MARKED_AS_SPAM', 'UNMARKED_AS_SPAM', 'MARKED_FOR_ADMIN_REVIEW', 'UNMARKED_FOR_ADMIN_REVIEW', 'STATUS_CHANGED_TO_APPROVED', 'STATUS_CHANGED_TO_VERIFIED', 'STATUS_CHANGED_TO_REJECTED', 'STATUS_CHANGED_TO_PENDING');
-- CreateEnum
CREATE TYPE "ServiceVerificationStatusChange" AS ENUM ('STATUS_CHANGED_TO_COMMUNITY_CONTRIBUTED', 'STATUS_CHANGED_TO_APPROVED', 'STATUS_CHANGED_TO_VERIFICATION_SUCCESS', 'STATUS_CHANGED_TO_VERIFICATION_FAILED');
-- CreateEnum
CREATE TYPE "ServiceSuggestionStatusChange" AS ENUM ('STATUS_CHANGED_TO_PENDING', 'STATUS_CHANGED_TO_APPROVED', 'STATUS_CHANGED_TO_REJECTED', 'STATUS_CHANGED_TO_WITHDRAWN');
-- CreateEnum
CREATE TYPE "ServiceSuggestionStatus" AS ENUM ('PENDING', 'APPROVED', 'REJECTED', 'WITHDRAWN');
-- CreateEnum
CREATE TYPE "ServiceSuggestionType" AS ENUM ('CREATE_SERVICE', 'EDIT_SERVICE');
-- CreateEnum
CREATE TYPE "AttributeCategory" AS ENUM ('PRIVACY', 'TRUST');
-- CreateEnum
CREATE TYPE "AttributeType" AS ENUM ('GOOD', 'BAD', 'WARNING', 'INFO');
-- CreateEnum
CREATE TYPE "VerificationStepStatus" AS ENUM ('PENDING', 'IN_PROGRESS', 'PASSED', 'FAILED');
-- CreateTable
CREATE TABLE "Comment" (
"id" SERIAL NOT NULL,
"upvotes" INTEGER NOT NULL DEFAULT 0,
"status" "CommentStatus" NOT NULL DEFAULT 'PENDING',
"suspicious" BOOLEAN NOT NULL DEFAULT false,
"requiresAdminReview" BOOLEAN NOT NULL DEFAULT false,
"communityNote" TEXT,
"verificationNote" TEXT,
"internalNote" TEXT,
"privateContext" TEXT,
"orderId" VARCHAR(100),
"orderIdStatus" "OrderIdStatus" DEFAULT 'PENDING',
"kycRequested" BOOLEAN NOT NULL DEFAULT false,
"fundsBlocked" BOOLEAN NOT NULL DEFAULT false,
"content" TEXT NOT NULL,
"rating" SMALLINT,
"ratingActive" BOOLEAN NOT NULL DEFAULT false,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"authorId" INTEGER NOT NULL,
"serviceId" INTEGER NOT NULL,
"parentId" INTEGER,
CONSTRAINT "Comment_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Notification" (
"id" SERIAL NOT NULL,
"userId" INTEGER NOT NULL,
"type" "NotificationType" NOT NULL,
"read" BOOLEAN NOT NULL DEFAULT false,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"aboutCommentId" INTEGER,
"aboutEventId" INTEGER,
"aboutServiceId" INTEGER,
"aboutServiceSuggestionId" INTEGER,
"aboutServiceSuggestionMessageId" INTEGER,
"aboutAccountStatusChange" "AccountStatusChange",
"aboutCommentStatusChange" "CommentStatusChange",
"aboutServiceVerificationStatusChange" "ServiceVerificationStatusChange",
"aboutSuggestionStatusChange" "ServiceSuggestionStatusChange",
CONSTRAINT "Notification_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "NotificationPreferences" (
"id" SERIAL NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"userId" INTEGER NOT NULL,
"enableOnMyCommentStatusChange" BOOLEAN NOT NULL DEFAULT true,
"enableAutowatchMyComments" BOOLEAN NOT NULL DEFAULT true,
"enableNotifyPendingRepliesOnWatch" BOOLEAN NOT NULL DEFAULT false,
CONSTRAINT "NotificationPreferences_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "NotificationPreferenceOnServiceVerificationChangeFilterFilter" (
"id" SERIAL NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"verificationStatus" "VerificationStepStatus" NOT NULL,
"notificationPreferencesId" INTEGER NOT NULL,
"currencies" "Currency"[],
"scores" INTEGER[],
CONSTRAINT "NotificationPreferenceOnServiceVerificationChangeFilterFil_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Event" (
"id" SERIAL NOT NULL,
"title" TEXT NOT NULL,
"content" TEXT NOT NULL,
"source" TEXT,
"type" "EventType" NOT NULL,
"visible" BOOLEAN NOT NULL DEFAULT true,
"startedAt" TIMESTAMP(3) NOT NULL,
"endedAt" TIMESTAMP(3),
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"serviceId" INTEGER NOT NULL,
CONSTRAINT "Event_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ServiceSuggestion" (
"id" SERIAL NOT NULL,
"type" "ServiceSuggestionType" NOT NULL,
"status" "ServiceSuggestionStatus" NOT NULL DEFAULT 'PENDING',
"notes" TEXT,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"userId" INTEGER NOT NULL,
"serviceId" INTEGER NOT NULL,
CONSTRAINT "ServiceSuggestion_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ServiceSuggestionMessage" (
"id" SERIAL NOT NULL,
"content" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"userId" INTEGER NOT NULL,
"suggestionId" INTEGER NOT NULL,
CONSTRAINT "ServiceSuggestionMessage_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Service" (
"id" SERIAL NOT NULL,
"name" TEXT NOT NULL,
"slug" TEXT NOT NULL,
"description" TEXT NOT NULL,
"kycLevel" INTEGER NOT NULL DEFAULT 4,
"overallScore" INTEGER NOT NULL DEFAULT 0,
"privacyScore" INTEGER NOT NULL DEFAULT 0,
"trustScore" INTEGER NOT NULL DEFAULT 0,
"isRecentlyListed" BOOLEAN NOT NULL DEFAULT false,
"averageUserRating" DOUBLE PRECISION,
"serviceVisibility" "ServiceVisibility" NOT NULL DEFAULT 'PUBLIC',
"serviceInfoBanner" "ServiceInfoBanner" NOT NULL DEFAULT 'NONE',
"serviceInfoBannerNotes" TEXT,
"verificationStatus" "VerificationStatus" NOT NULL DEFAULT 'COMMUNITY_CONTRIBUTED',
"verificationSummary" TEXT,
"verificationProofMd" TEXT,
"verifiedAt" TIMESTAMP(3),
"userSentiment" JSONB,
"userSentimentAt" TIMESTAMP(3),
"referral" TEXT,
"acceptedCurrencies" "Currency"[] DEFAULT ARRAY[]::"Currency"[],
"serviceUrls" TEXT[],
"tosUrls" TEXT[] DEFAULT ARRAY[]::TEXT[],
"onionUrls" TEXT[] DEFAULT ARRAY[]::TEXT[],
"i2pUrls" TEXT[] DEFAULT ARRAY[]::TEXT[],
"imageUrl" TEXT,
"tosReview" JSONB,
"tosReviewAt" TIMESTAMP(3),
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"listedAt" TIMESTAMP(3),
CONSTRAINT "Service_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ServiceContactMethod" (
"id" SERIAL NOT NULL,
"label" TEXT NOT NULL,
"value" TEXT NOT NULL,
"iconId" TEXT NOT NULL,
"info" TEXT NOT NULL,
"serviceId" INTEGER NOT NULL,
CONSTRAINT "ServiceContactMethod_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Attribute" (
"id" SERIAL NOT NULL,
"slug" TEXT NOT NULL,
"title" TEXT NOT NULL,
"description" TEXT NOT NULL,
"privacyPoints" INTEGER NOT NULL DEFAULT 0,
"trustPoints" INTEGER NOT NULL DEFAULT 0,
"category" "AttributeCategory" NOT NULL,
"type" "AttributeType" NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Attribute_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "InternalUserNote" (
"id" SERIAL NOT NULL,
"content" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"userId" INTEGER NOT NULL,
"addedByUserId" INTEGER,
CONSTRAINT "InternalUserNote_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "User" (
"id" SERIAL NOT NULL,
"name" TEXT NOT NULL,
"displayName" TEXT,
"link" TEXT,
"picture" TEXT,
"spammer" BOOLEAN NOT NULL DEFAULT false,
"verified" BOOLEAN NOT NULL DEFAULT false,
"admin" BOOLEAN NOT NULL DEFAULT false,
"verifier" BOOLEAN NOT NULL DEFAULT false,
"verifiedLink" TEXT,
"secretTokenHash" TEXT NOT NULL,
"totalKarma" INTEGER NOT NULL DEFAULT 0,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "User_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "CommentVote" (
"id" SERIAL NOT NULL,
"downvote" BOOLEAN NOT NULL DEFAULT false,
"commentId" INTEGER NOT NULL,
"userId" INTEGER NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "CommentVote_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ServiceAttribute" (
"serviceId" INTEGER NOT NULL,
"attributeId" INTEGER NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "ServiceAttribute_pkey" PRIMARY KEY ("serviceId","attributeId")
);
-- CreateTable
CREATE TABLE "KarmaTransaction" (
"id" SERIAL NOT NULL,
"userId" INTEGER NOT NULL,
"action" TEXT NOT NULL,
"points" INTEGER NOT NULL DEFAULT 0,
"commentId" INTEGER,
"suggestionId" INTEGER,
"description" TEXT NOT NULL,
"processed" BOOLEAN NOT NULL DEFAULT false,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "KarmaTransaction_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "VerificationStep" (
"id" SERIAL NOT NULL,
"title" TEXT NOT NULL,
"description" TEXT NOT NULL,
"status" "VerificationStepStatus" NOT NULL DEFAULT 'PENDING',
"evidenceMd" TEXT,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"serviceId" INTEGER NOT NULL,
CONSTRAINT "VerificationStep_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Category" (
"id" SERIAL NOT NULL,
"name" TEXT NOT NULL,
"icon" TEXT NOT NULL,
"slug" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Category_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ServiceVerificationRequest" (
"id" SERIAL NOT NULL,
"serviceId" INTEGER NOT NULL,
"userId" INTEGER NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "ServiceVerificationRequest_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ServiceScoreRecalculationJob" (
"id" SERIAL NOT NULL,
"serviceId" INTEGER NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"processedAt" TIMESTAMP(3),
CONSTRAINT "ServiceScoreRecalculationJob_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ServiceUser" (
"id" SERIAL NOT NULL,
"userId" INTEGER NOT NULL,
"serviceId" INTEGER NOT NULL,
"role" "ServiceUserRole" NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "ServiceUser_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "_watchedComments" (
"A" INTEGER NOT NULL,
"B" INTEGER NOT NULL,
CONSTRAINT "_watchedComments_AB_pkey" PRIMARY KEY ("A","B")
);
-- CreateTable
CREATE TABLE "_onEventCreatedForServices" (
"A" INTEGER NOT NULL,
"B" INTEGER NOT NULL,
CONSTRAINT "_onEventCreatedForServices_AB_pkey" PRIMARY KEY ("A","B")
);
-- CreateTable
CREATE TABLE "_onRootCommentCreatedForServices" (
"A" INTEGER NOT NULL,
"B" INTEGER NOT NULL,
CONSTRAINT "_onRootCommentCreatedForServices_AB_pkey" PRIMARY KEY ("A","B")
);
-- CreateTable
CREATE TABLE "_onVerificationChangeForServices" (
"A" INTEGER NOT NULL,
"B" INTEGER NOT NULL,
CONSTRAINT "_onVerificationChangeForServices_AB_pkey" PRIMARY KEY ("A","B")
);
-- CreateTable
CREATE TABLE "_AttributeToNotificationPreferenceOnServiceVerificationChangeFi" (
"A" INTEGER NOT NULL,
"B" INTEGER NOT NULL,
CONSTRAINT "_AttributeToNotificationPreferenceOnServiceVerification_AB_pkey" PRIMARY KEY ("A","B")
);
-- CreateTable
CREATE TABLE "_ServiceToCategory" (
"A" INTEGER NOT NULL,
"B" INTEGER NOT NULL,
CONSTRAINT "_ServiceToCategory_AB_pkey" PRIMARY KEY ("A","B")
);
-- CreateTable
CREATE TABLE "_CategoryToNotificationPreferenceOnServiceVerificationChangeFil" (
"A" INTEGER NOT NULL,
"B" INTEGER NOT NULL,
CONSTRAINT "_CategoryToNotificationPreferenceOnServiceVerificationC_AB_pkey" PRIMARY KEY ("A","B")
);
-- CreateIndex
CREATE INDEX "Comment_status_idx" ON "Comment"("status");
-- CreateIndex
CREATE INDEX "Comment_createdAt_idx" ON "Comment"("createdAt");
-- CreateIndex
CREATE INDEX "Comment_serviceId_idx" ON "Comment"("serviceId");
-- CreateIndex
CREATE INDEX "Comment_authorId_idx" ON "Comment"("authorId");
-- CreateIndex
CREATE INDEX "Comment_upvotes_idx" ON "Comment"("upvotes");
-- CreateIndex
CREATE INDEX "Comment_rating_idx" ON "Comment"("rating");
-- CreateIndex
CREATE INDEX "Comment_ratingActive_idx" ON "Comment"("ratingActive");
-- CreateIndex
CREATE UNIQUE INDEX "Comment_serviceId_orderId_key" ON "Comment"("serviceId", "orderId");
-- CreateIndex
CREATE INDEX "Notification_userId_idx" ON "Notification"("userId");
-- CreateIndex
CREATE INDEX "Notification_read_idx" ON "Notification"("read");
-- CreateIndex
CREATE INDEX "Notification_createdAt_idx" ON "Notification"("createdAt");
-- CreateIndex
CREATE INDEX "Notification_userId_read_createdAt_idx" ON "Notification"("userId", "read", "createdAt");
-- CreateIndex
CREATE INDEX "Notification_userId_type_aboutCommentId_idx" ON "Notification"("userId", "type", "aboutCommentId");
-- CreateIndex
CREATE INDEX "idx_notification_suggestion_message" ON "Notification"("userId", "type", "aboutServiceSuggestionMessageId");
-- CreateIndex
CREATE INDEX "idx_notification_suggestion_status" ON "Notification"("userId", "type", "aboutServiceSuggestionId");
-- CreateIndex
CREATE INDEX "idx_notification_account_status" ON "Notification"("userId", "type", "aboutAccountStatusChange");
-- CreateIndex
CREATE UNIQUE INDEX "NotificationPreferences_userId_key" ON "NotificationPreferences"("userId");
-- CreateIndex
CREATE UNIQUE INDEX "NotificationPreferenceOnServiceVerificationChangeFilterFilt_key" ON "NotificationPreferenceOnServiceVerificationChangeFilterFilter"("verificationStatus", "notificationPreferencesId");
-- CreateIndex
CREATE INDEX "Event_visible_idx" ON "Event"("visible");
-- CreateIndex
CREATE INDEX "Event_startedAt_idx" ON "Event"("startedAt");
-- CreateIndex
CREATE INDEX "Event_createdAt_idx" ON "Event"("createdAt");
-- CreateIndex
CREATE INDEX "Event_endedAt_idx" ON "Event"("endedAt");
-- CreateIndex
CREATE INDEX "Event_type_idx" ON "Event"("type");
-- CreateIndex
CREATE INDEX "Event_serviceId_idx" ON "Event"("serviceId");
-- CreateIndex
CREATE INDEX "ServiceSuggestion_userId_idx" ON "ServiceSuggestion"("userId");
-- CreateIndex
CREATE INDEX "ServiceSuggestion_serviceId_idx" ON "ServiceSuggestion"("serviceId");
-- CreateIndex
CREATE INDEX "ServiceSuggestionMessage_userId_idx" ON "ServiceSuggestionMessage"("userId");
-- CreateIndex
CREATE INDEX "ServiceSuggestionMessage_suggestionId_idx" ON "ServiceSuggestionMessage"("suggestionId");
-- CreateIndex
CREATE INDEX "ServiceSuggestionMessage_createdAt_idx" ON "ServiceSuggestionMessage"("createdAt");
-- CreateIndex
CREATE UNIQUE INDEX "Service_slug_key" ON "Service"("slug");
-- CreateIndex
CREATE INDEX "Service_listedAt_idx" ON "Service"("listedAt");
-- CreateIndex
CREATE INDEX "Service_overallScore_idx" ON "Service"("overallScore");
-- CreateIndex
CREATE INDEX "Service_privacyScore_idx" ON "Service"("privacyScore");
-- CreateIndex
CREATE INDEX "Service_trustScore_idx" ON "Service"("trustScore");
-- CreateIndex
CREATE INDEX "Service_averageUserRating_idx" ON "Service"("averageUserRating");
-- CreateIndex
CREATE INDEX "Service_name_idx" ON "Service"("name");
-- CreateIndex
CREATE INDEX "Service_verificationStatus_idx" ON "Service"("verificationStatus");
-- CreateIndex
CREATE INDEX "Service_kycLevel_idx" ON "Service"("kycLevel");
-- CreateIndex
CREATE INDEX "Service_createdAt_idx" ON "Service"("createdAt");
-- CreateIndex
CREATE INDEX "Service_updatedAt_idx" ON "Service"("updatedAt");
-- CreateIndex
CREATE INDEX "Service_slug_idx" ON "Service"("slug");
-- CreateIndex
CREATE UNIQUE INDEX "Attribute_slug_key" ON "Attribute"("slug");
-- CreateIndex
CREATE INDEX "InternalUserNote_userId_idx" ON "InternalUserNote"("userId");
-- CreateIndex
CREATE INDEX "InternalUserNote_addedByUserId_idx" ON "InternalUserNote"("addedByUserId");
-- CreateIndex
CREATE INDEX "InternalUserNote_createdAt_idx" ON "InternalUserNote"("createdAt");
-- CreateIndex
CREATE UNIQUE INDEX "User_name_key" ON "User"("name");
-- CreateIndex
CREATE UNIQUE INDEX "User_secretTokenHash_key" ON "User"("secretTokenHash");
-- CreateIndex
CREATE INDEX "User_createdAt_idx" ON "User"("createdAt");
-- CreateIndex
CREATE INDEX "User_totalKarma_idx" ON "User"("totalKarma");
-- CreateIndex
CREATE INDEX "CommentVote_commentId_idx" ON "CommentVote"("commentId");
-- CreateIndex
CREATE INDEX "CommentVote_userId_idx" ON "CommentVote"("userId");
-- CreateIndex
CREATE UNIQUE INDEX "CommentVote_commentId_userId_key" ON "CommentVote"("commentId", "userId");
-- CreateIndex
CREATE INDEX "KarmaTransaction_createdAt_idx" ON "KarmaTransaction"("createdAt");
-- CreateIndex
CREATE INDEX "KarmaTransaction_userId_idx" ON "KarmaTransaction"("userId");
-- CreateIndex
CREATE INDEX "KarmaTransaction_processed_idx" ON "KarmaTransaction"("processed");
-- CreateIndex
CREATE INDEX "KarmaTransaction_suggestionId_idx" ON "KarmaTransaction"("suggestionId");
-- CreateIndex
CREATE INDEX "KarmaTransaction_commentId_idx" ON "KarmaTransaction"("commentId");
-- CreateIndex
CREATE INDEX "VerificationStep_serviceId_idx" ON "VerificationStep"("serviceId");
-- CreateIndex
CREATE INDEX "VerificationStep_status_idx" ON "VerificationStep"("status");
-- CreateIndex
CREATE INDEX "VerificationStep_createdAt_idx" ON "VerificationStep"("createdAt");
-- CreateIndex
CREATE UNIQUE INDEX "Category_name_key" ON "Category"("name");
-- CreateIndex
CREATE UNIQUE INDEX "Category_slug_key" ON "Category"("slug");
-- CreateIndex
CREATE INDEX "Category_name_idx" ON "Category"("name");
-- CreateIndex
CREATE INDEX "Category_slug_idx" ON "Category"("slug");
-- CreateIndex
CREATE INDEX "ServiceVerificationRequest_serviceId_idx" ON "ServiceVerificationRequest"("serviceId");
-- CreateIndex
CREATE INDEX "ServiceVerificationRequest_userId_idx" ON "ServiceVerificationRequest"("userId");
-- CreateIndex
CREATE INDEX "ServiceVerificationRequest_createdAt_idx" ON "ServiceVerificationRequest"("createdAt");
-- CreateIndex
CREATE UNIQUE INDEX "ServiceVerificationRequest_serviceId_userId_key" ON "ServiceVerificationRequest"("serviceId", "userId");
-- CreateIndex
CREATE UNIQUE INDEX "ServiceScoreRecalculationJob_serviceId_key" ON "ServiceScoreRecalculationJob"("serviceId");
-- CreateIndex
CREATE INDEX "ServiceScoreRecalculationJob_processedAt_idx" ON "ServiceScoreRecalculationJob"("processedAt");
-- CreateIndex
CREATE INDEX "ServiceScoreRecalculationJob_createdAt_idx" ON "ServiceScoreRecalculationJob"("createdAt");
-- CreateIndex
CREATE INDEX "ServiceUser_userId_idx" ON "ServiceUser"("userId");
-- CreateIndex
CREATE INDEX "ServiceUser_serviceId_idx" ON "ServiceUser"("serviceId");
-- CreateIndex
CREATE INDEX "ServiceUser_role_idx" ON "ServiceUser"("role");
-- CreateIndex
CREATE UNIQUE INDEX "ServiceUser_userId_serviceId_key" ON "ServiceUser"("userId", "serviceId");
-- CreateIndex
CREATE INDEX "_watchedComments_B_index" ON "_watchedComments"("B");
-- CreateIndex
CREATE INDEX "_onEventCreatedForServices_B_index" ON "_onEventCreatedForServices"("B");
-- CreateIndex
CREATE INDEX "_onRootCommentCreatedForServices_B_index" ON "_onRootCommentCreatedForServices"("B");
-- CreateIndex
CREATE INDEX "_onVerificationChangeForServices_B_index" ON "_onVerificationChangeForServices"("B");
-- CreateIndex
CREATE INDEX "_AttributeToNotificationPreferenceOnServiceVerification_B_index" ON "_AttributeToNotificationPreferenceOnServiceVerificationChangeFi"("B");
-- CreateIndex
CREATE INDEX "_ServiceToCategory_B_index" ON "_ServiceToCategory"("B");
-- CreateIndex
CREATE INDEX "_CategoryToNotificationPreferenceOnServiceVerificationC_B_index" ON "_CategoryToNotificationPreferenceOnServiceVerificationChangeFil"("B");
-- AddForeignKey
ALTER TABLE "Comment" ADD CONSTRAINT "Comment_authorId_fkey" FOREIGN KEY ("authorId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Comment" ADD CONSTRAINT "Comment_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Comment" ADD CONSTRAINT "Comment_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "Comment"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Notification" ADD CONSTRAINT "Notification_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Notification" ADD CONSTRAINT "Notification_aboutCommentId_fkey" FOREIGN KEY ("aboutCommentId") REFERENCES "Comment"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Notification" ADD CONSTRAINT "Notification_aboutEventId_fkey" FOREIGN KEY ("aboutEventId") REFERENCES "Event"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Notification" ADD CONSTRAINT "Notification_aboutServiceId_fkey" FOREIGN KEY ("aboutServiceId") REFERENCES "Service"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Notification" ADD CONSTRAINT "Notification_aboutServiceSuggestionId_fkey" FOREIGN KEY ("aboutServiceSuggestionId") REFERENCES "ServiceSuggestion"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Notification" ADD CONSTRAINT "Notification_aboutServiceSuggestionMessageId_fkey" FOREIGN KEY ("aboutServiceSuggestionMessageId") REFERENCES "ServiceSuggestionMessage"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "NotificationPreferences" ADD CONSTRAINT "NotificationPreferences_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "NotificationPreferenceOnServiceVerificationChangeFilterFilter" ADD CONSTRAINT "NotificationPreferenceOnServiceVerificationChangeFilterFil_fkey" FOREIGN KEY ("notificationPreferencesId") REFERENCES "NotificationPreferences"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Event" ADD CONSTRAINT "Event_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceSuggestion" ADD CONSTRAINT "ServiceSuggestion_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceSuggestion" ADD CONSTRAINT "ServiceSuggestion_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceSuggestionMessage" ADD CONSTRAINT "ServiceSuggestionMessage_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceSuggestionMessage" ADD CONSTRAINT "ServiceSuggestionMessage_suggestionId_fkey" FOREIGN KEY ("suggestionId") REFERENCES "ServiceSuggestion"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceContactMethod" ADD CONSTRAINT "ServiceContactMethod_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "InternalUserNote" ADD CONSTRAINT "InternalUserNote_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "InternalUserNote" ADD CONSTRAINT "InternalUserNote_addedByUserId_fkey" FOREIGN KEY ("addedByUserId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "CommentVote" ADD CONSTRAINT "CommentVote_commentId_fkey" FOREIGN KEY ("commentId") REFERENCES "Comment"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "CommentVote" ADD CONSTRAINT "CommentVote_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceAttribute" ADD CONSTRAINT "ServiceAttribute_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceAttribute" ADD CONSTRAINT "ServiceAttribute_attributeId_fkey" FOREIGN KEY ("attributeId") REFERENCES "Attribute"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "KarmaTransaction" ADD CONSTRAINT "KarmaTransaction_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "KarmaTransaction" ADD CONSTRAINT "KarmaTransaction_commentId_fkey" FOREIGN KEY ("commentId") REFERENCES "Comment"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "KarmaTransaction" ADD CONSTRAINT "KarmaTransaction_suggestionId_fkey" FOREIGN KEY ("suggestionId") REFERENCES "ServiceSuggestion"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "VerificationStep" ADD CONSTRAINT "VerificationStep_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceVerificationRequest" ADD CONSTRAINT "ServiceVerificationRequest_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceVerificationRequest" ADD CONSTRAINT "ServiceVerificationRequest_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceUser" ADD CONSTRAINT "ServiceUser_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ServiceUser" ADD CONSTRAINT "ServiceUser_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_watchedComments" ADD CONSTRAINT "_watchedComments_A_fkey" FOREIGN KEY ("A") REFERENCES "Comment"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_watchedComments" ADD CONSTRAINT "_watchedComments_B_fkey" FOREIGN KEY ("B") REFERENCES "NotificationPreferences"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_onEventCreatedForServices" ADD CONSTRAINT "_onEventCreatedForServices_A_fkey" FOREIGN KEY ("A") REFERENCES "NotificationPreferences"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_onEventCreatedForServices" ADD CONSTRAINT "_onEventCreatedForServices_B_fkey" FOREIGN KEY ("B") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_onRootCommentCreatedForServices" ADD CONSTRAINT "_onRootCommentCreatedForServices_A_fkey" FOREIGN KEY ("A") REFERENCES "NotificationPreferences"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_onRootCommentCreatedForServices" ADD CONSTRAINT "_onRootCommentCreatedForServices_B_fkey" FOREIGN KEY ("B") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_onVerificationChangeForServices" ADD CONSTRAINT "_onVerificationChangeForServices_A_fkey" FOREIGN KEY ("A") REFERENCES "NotificationPreferences"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_onVerificationChangeForServices" ADD CONSTRAINT "_onVerificationChangeForServices_B_fkey" FOREIGN KEY ("B") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_AttributeToNotificationPreferenceOnServiceVerificationChangeFi" ADD CONSTRAINT "_AttributeToNotificationPreferenceOnServiceVerificationC_A_fkey" FOREIGN KEY ("A") REFERENCES "Attribute"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_AttributeToNotificationPreferenceOnServiceVerificationChangeFi" ADD CONSTRAINT "_AttributeToNotificationPreferenceOnServiceVerificationC_B_fkey" FOREIGN KEY ("B") REFERENCES "NotificationPreferenceOnServiceVerificationChangeFilterFilter"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_ServiceToCategory" ADD CONSTRAINT "_ServiceToCategory_A_fkey" FOREIGN KEY ("A") REFERENCES "Category"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_ServiceToCategory" ADD CONSTRAINT "_ServiceToCategory_B_fkey" FOREIGN KEY ("B") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_CategoryToNotificationPreferenceOnServiceVerificationChangeFil" ADD CONSTRAINT "_CategoryToNotificationPreferenceOnServiceVerificationCh_A_fkey" FOREIGN KEY ("A") REFERENCES "Category"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_CategoryToNotificationPreferenceOnServiceVerificationChangeFil" ADD CONSTRAINT "_CategoryToNotificationPreferenceOnServiceVerificationCh_B_fkey" FOREIGN KEY ("B") REFERENCES "NotificationPreferenceOnServiceVerificationChangeFilterFilter"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "User" ADD COLUMN "lastLoginAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP;

View File

@@ -0,0 +1,19 @@
/*
Warnings:
- Changed the type of `action` on the `KarmaTransaction` table. No cast exists, the column would be dropped and recreated, which cannot be done if there is data, since the column is required.
*/
-- CreateEnum
CREATE TYPE "KarmaTransactionAction" AS ENUM ('COMMENT_APPROVED', 'COMMENT_VERIFIED', 'COMMENT_SPAM', 'COMMENT_SPAM_REVERTED', 'COMMENT_UPVOTE', 'COMMENT_DOWNVOTE', 'COMMENT_VOTE_REMOVED', 'SUGGESTION_APPROVED', 'MANUAL_ADJUSTMENT');
-- AlterTable
ALTER TABLE "KarmaTransaction" ADD COLUMN "grantedByUserId" INTEGER,
DROP COLUMN "action",
ADD COLUMN "action" "KarmaTransactionAction" NOT NULL;
-- CreateIndex
CREATE INDEX "KarmaTransaction_grantedByUserId_idx" ON "KarmaTransaction"("grantedByUserId");
-- AddForeignKey
ALTER TABLE "KarmaTransaction" ADD CONSTRAINT "KarmaTransaction_grantedByUserId_fkey" FOREIGN KEY ("grantedByUserId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE;

View File

@@ -0,0 +1,20 @@
-- CreateEnum
CREATE TYPE "AnnouncementType" AS ENUM ('INFO', 'WARNING', 'ALERT');
-- CreateTable
CREATE TABLE "Announcement" (
"id" SERIAL NOT NULL,
"title" TEXT NOT NULL,
"content" TEXT NOT NULL,
"type" "AnnouncementType" NOT NULL,
"startDate" TIMESTAMP(3) NOT NULL,
"endDate" TIMESTAMP(3),
"isActive" BOOLEAN NOT NULL DEFAULT true,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Announcement_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE INDEX "Announcement_isActive_startDate_endDate_idx" ON "Announcement"("isActive", "startDate", "endDate");

View File

@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "Announcement" ADD COLUMN "link" TEXT;

View File

@@ -0,0 +1,8 @@
/*
Warnings:
- You are about to drop the column `title` on the `Announcement` table. All the data in the column will be lost.
*/
-- AlterTable
ALTER TABLE "Announcement" DROP COLUMN "title";

View File

@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "Announcement" ADD COLUMN "linkText" TEXT;

View File

@@ -0,0 +1,11 @@
-- AlterEnum
ALTER TYPE "NotificationType" ADD VALUE 'KARMA_CHANGE';
-- AlterTable
ALTER TABLE "Notification" ADD COLUMN "aboutKarmaTransactionId" INTEGER;
-- AlterTable
ALTER TABLE "NotificationPreferences" ADD COLUMN "karmaNotificationThreshold" INTEGER NOT NULL DEFAULT 10;
-- AddForeignKey
ALTER TABLE "Notification" ADD CONSTRAINT "Notification_aboutKarmaTransactionId_fkey" FOREIGN KEY ("aboutKarmaTransactionId") REFERENCES "KarmaTransaction"("id") ON DELETE SET NULL ON UPDATE CASCADE;

View File

@@ -0,0 +1,13 @@
/*
Warnings:
- You are about to drop the column `iconId` on the `ServiceContactMethod` table. All the data in the column will be lost.
- You are about to drop the column `info` on the `ServiceContactMethod` table. All the data in the column will be lost.
*/
-- AlterTable
ALTER TABLE "ServiceContactMethod" DROP COLUMN "iconId",
DROP COLUMN "info",
ADD COLUMN "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
ADD COLUMN "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
ALTER COLUMN "label" DROP NOT NULL;

View File

@@ -0,0 +1,13 @@
/*
Manully edited to be a rename migration.
*/
-- AlterEnum
BEGIN;
ALTER TYPE "AccountStatusChange" RENAME VALUE 'VERIFIER_TRUE' TO 'MODERATOR_TRUE';
ALTER TYPE "AccountStatusChange" RENAME VALUE 'VERIFIER_FALSE' TO 'MODERATOR_FALSE';
COMMIT;
-- AlterTable
ALTER TABLE "User"
RENAME COLUMN "verifier" TO "moderator"

View File

@@ -0,0 +1,2 @@
-- AlterEnum
ALTER TYPE "ServiceVisibility" ADD VALUE 'ARCHIVED';

View File

@@ -0,0 +1,26 @@
-- CreateTable
CREATE TABLE "InternalServiceNote" (
"id" SERIAL NOT NULL,
"content" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"serviceId" INTEGER NOT NULL,
"addedByUserId" INTEGER,
CONSTRAINT "InternalServiceNote_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE INDEX "InternalServiceNote_serviceId_idx" ON "InternalServiceNote"("serviceId");
-- CreateIndex
CREATE INDEX "InternalServiceNote_addedByUserId_idx" ON "InternalServiceNote"("addedByUserId");
-- CreateIndex
CREATE INDEX "InternalServiceNote_createdAt_idx" ON "InternalServiceNote"("createdAt");
-- AddForeignKey
ALTER TABLE "InternalServiceNote" ADD CONSTRAINT "InternalServiceNote_serviceId_fkey" FOREIGN KEY ("serviceId") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "InternalServiceNote" ADD CONSTRAINT "InternalServiceNote_addedByUserId_fkey" FOREIGN KEY ("addedByUserId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE;

View File

@@ -0,0 +1,2 @@
-- Enable pg_trgm extension for similarity functions
CREATE EXTENSION IF NOT EXISTS pg_trgm;

View File

@@ -0,0 +1,5 @@
-- AlterTable
ALTER TABLE "Service" ADD COLUMN "previousSlugs" TEXT[] DEFAULT ARRAY[]::TEXT[];
-- CreateIndex
CREATE INDEX "Service_previousSlugs_idx" ON "Service"("previousSlugs");

View File

@@ -0,0 +1,6 @@
-- CreateEnum
CREATE TYPE "KycLevelClarification" AS ENUM ('NONE', 'DEPENDS_ON_PARTNERS');
-- AlterTable
ALTER TABLE "Service" ADD COLUMN "kycLevelClarification" "KycLevelClarification",
ADD COLUMN "kycLevelDetailsId" INTEGER;

View File

@@ -0,0 +1,25 @@
-- CreateTable
CREATE TABLE "PushSubscription" (
"id" SERIAL NOT NULL,
"userId" INTEGER NOT NULL,
"endpoint" TEXT NOT NULL,
"p256dh" TEXT NOT NULL,
"auth" TEXT NOT NULL,
"userAgent" TEXT,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "PushSubscription_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "PushSubscription_endpoint_key" ON "PushSubscription"("endpoint");
-- CreateIndex
CREATE INDEX "PushSubscription_userId_idx" ON "PushSubscription"("userId");
-- CreateIndex
CREATE INDEX "PushSubscription_endpoint_idx" ON "PushSubscription"("endpoint");
-- AddForeignKey
ALTER TABLE "PushSubscription" ADD CONSTRAINT "PushSubscription_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -0,0 +1,11 @@
/*
Warnings:
- You are about to drop the column `kycLevelDetailsId` on the `Service` table. All the data in the column will be lost.
- Made the column `kycLevelClarification` on table `Service` required. This step will fail if there are existing NULL values in that column.
*/
-- AlterTable
ALTER TABLE "Service" DROP COLUMN "kycLevelDetailsId",
ALTER COLUMN "kycLevelClarification" SET NOT NULL,
ALTER COLUMN "kycLevelClarification" SET DEFAULT 'NONE';

View File

@@ -0,0 +1,2 @@
-- AlterEnum
ALTER TYPE "NotificationType" ADD VALUE 'TEST';

View File

@@ -0,0 +1,2 @@
-- AlterEnum
ALTER TYPE "OrderIdStatus" ADD VALUE 'WITHDRAWN';

View File

@@ -0,0 +1,2 @@
-- AlterEnum
ALTER TYPE "NotificationType" ADD VALUE 'SUGGESTION_CREATED';

View File

@@ -0,0 +1,11 @@
/*
Warnings:
- A unique constraint covering the columns `[feedId]` on the table `User` will be added. If there are existing duplicate values, this will fail.
*/
-- AlterTable
ALTER TABLE "User" ADD COLUMN "feedId" TEXT;
-- CreateIndex
CREATE UNIQUE INDEX "User_feedId_key" ON "User"("feedId");

View File

@@ -0,0 +1,8 @@
/*
Warnings:
- Made the column `feedId` on table `User` required. This step will fail if there are existing NULL values in that column.
*/
-- AlterTable
ALTER TABLE "User" ALTER COLUMN "feedId" SET NOT NULL;

View File

@@ -0,0 +1,3 @@
# Please do not edit this file manually
# It should be added in your version-control system (e.g., Git)
provider = "postgresql"

687
web/prisma/schema.prisma Normal file
View File

@@ -0,0 +1,687 @@
// This is your Prisma schema file
datasource db {
provider = "postgres"
url = env("DATABASE_URL")
}
generator client {
provider = "prisma-client-js"
}
generator json {
provider = "prisma-json-types-generator"
}
enum CommentStatus {
PENDING
HUMAN_PENDING
APPROVED
VERIFIED
REJECTED
}
enum OrderIdStatus {
PENDING
APPROVED
REJECTED
WITHDRAWN
}
model Comment {
id Int @id @default(autoincrement())
/// Computed via trigger. Do not update through prisma.
upvotes Int @default(0)
status CommentStatus @default(PENDING)
suspicious Boolean @default(false)
requiresAdminReview Boolean @default(false)
communityNote String?
verificationNote String?
internalNote String?
privateContext String?
orderId String? @db.VarChar(100)
orderIdStatus OrderIdStatus? @default(PENDING)
kycRequested Boolean @default(false)
fundsBlocked Boolean @default(false)
content String
rating Int? @db.SmallInt
ratingActive Boolean @default(false)
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
author User @relation(fields: [authorId], references: [id], onDelete: Cascade)
authorId Int
service Service @relation(fields: [serviceId], references: [id], onDelete: Cascade)
serviceId Int
parentId Int?
parent Comment? @relation("CommentReplies", fields: [parentId], references: [id], onDelete: Cascade)
replies Comment[] @relation("CommentReplies")
karmaTransactions KarmaTransaction[]
votes CommentVote[]
notificationPreferenceswatchedComments NotificationPreferences[] @relation("watchedComments")
Notification Notification[]
@@unique([serviceId, orderId], name: "unique_orderId_per_service")
@@index([status])
@@index([createdAt])
@@index([serviceId])
@@index([authorId])
@@index([upvotes])
@@index([rating])
@@index([ratingActive])
}
enum VerificationStatus {
COMMUNITY_CONTRIBUTED
// COMMUNITY_VERIFIED
APPROVED
VERIFICATION_SUCCESS
VERIFICATION_FAILED
}
enum ServiceInfoBanner {
NONE
NO_LONGER_OPERATIONAL
}
enum ServiceVisibility {
PUBLIC
UNLISTED
HIDDEN
ARCHIVED
}
enum Currency {
MONERO
BITCOIN
LIGHTNING
FIAT
CASH
}
enum EventType {
WARNING
WARNING_SOLVED
ALERT
ALERT_SOLVED
INFO
NORMAL
UPDATE
}
enum ServiceUserRole {
OWNER
ADMIN
MODERATOR
SUPPORT
TEAM_MEMBER
}
enum AccountStatusChange {
ADMIN_TRUE
ADMIN_FALSE
VERIFIED_TRUE
VERIFIED_FALSE
MODERATOR_TRUE
MODERATOR_FALSE
SPAMMER_TRUE
SPAMMER_FALSE
}
enum NotificationType {
TEST
COMMENT_STATUS_CHANGE
REPLY_COMMENT_CREATED
COMMUNITY_NOTE_ADDED
/// Comment that is not a reply. May include a rating.
ROOT_COMMENT_CREATED
SUGGESTION_CREATED
SUGGESTION_MESSAGE
SUGGESTION_STATUS_CHANGE
// KARMA_UNLOCK // TODO: [KARMA_UNLOCK] Will be added later, when karma unloks are in the database, not in the code.
KARMA_CHANGE
/// Marked as spammer, promoted to admin, etc.
ACCOUNT_STATUS_CHANGE
EVENT_CREATED
SERVICE_VERIFICATION_STATUS_CHANGE
}
enum CommentStatusChange {
MARKED_AS_SPAM
UNMARKED_AS_SPAM
MARKED_FOR_ADMIN_REVIEW
UNMARKED_FOR_ADMIN_REVIEW
STATUS_CHANGED_TO_APPROVED
STATUS_CHANGED_TO_VERIFIED
STATUS_CHANGED_TO_REJECTED
STATUS_CHANGED_TO_PENDING
}
enum ServiceVerificationStatusChange {
STATUS_CHANGED_TO_COMMUNITY_CONTRIBUTED
STATUS_CHANGED_TO_APPROVED
STATUS_CHANGED_TO_VERIFICATION_SUCCESS
STATUS_CHANGED_TO_VERIFICATION_FAILED
}
enum ServiceSuggestionStatusChange {
STATUS_CHANGED_TO_PENDING
STATUS_CHANGED_TO_APPROVED
STATUS_CHANGED_TO_REJECTED
STATUS_CHANGED_TO_WITHDRAWN
}
enum KarmaTransactionAction {
COMMENT_APPROVED
COMMENT_VERIFIED
COMMENT_SPAM
COMMENT_SPAM_REVERTED
COMMENT_UPVOTE
COMMENT_DOWNVOTE
COMMENT_VOTE_REMOVED
SUGGESTION_APPROVED
MANUAL_ADJUSTMENT
}
enum AnnouncementType {
INFO
WARNING
ALERT
}
model Notification {
id Int @id @default(autoincrement())
userId Int
user User @relation("NotificationOwner", fields: [userId], references: [id], onDelete: Cascade)
type NotificationType
read Boolean @default(false)
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
aboutComment Comment? @relation(fields: [aboutCommentId], references: [id])
aboutCommentId Int?
aboutEvent Event? @relation(fields: [aboutEventId], references: [id])
aboutEventId Int?
aboutService Service? @relation(fields: [aboutServiceId], references: [id])
aboutServiceId Int?
aboutServiceSuggestion ServiceSuggestion? @relation(fields: [aboutServiceSuggestionId], references: [id])
aboutServiceSuggestionId Int?
aboutServiceSuggestionMessage ServiceSuggestionMessage? @relation(fields: [aboutServiceSuggestionMessageId], references: [id])
aboutServiceSuggestionMessageId Int?
aboutAccountStatusChange AccountStatusChange?
aboutCommentStatusChange CommentStatusChange?
aboutServiceVerificationStatusChange ServiceVerificationStatusChange?
aboutSuggestionStatusChange ServiceSuggestionStatusChange?
aboutKarmaTransaction KarmaTransaction? @relation(fields: [aboutKarmaTransactionId], references: [id])
aboutKarmaTransactionId Int?
@@index([userId])
@@index([read])
@@index([createdAt])
@@index([userId, read, createdAt])
@@index([userId, type, aboutCommentId])
@@index([userId, type, aboutServiceSuggestionMessageId], map: "idx_notification_suggestion_message")
@@index([userId, type, aboutServiceSuggestionId], map: "idx_notification_suggestion_status")
@@index([userId, type, aboutAccountStatusChange], map: "idx_notification_account_status")
}
model NotificationPreferences {
id Int @id @default(autoincrement())
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
userId Int @unique
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
enableOnMyCommentStatusChange Boolean @default(true)
enableAutowatchMyComments Boolean @default(true)
enableNotifyPendingRepliesOnWatch Boolean @default(false)
karmaNotificationThreshold Int @default(10)
onEventCreatedForServices Service[] @relation("onEventCreatedForServices")
onRootCommentCreatedForServices Service[] @relation("onRootCommentCreatedForServices")
onVerificationChangeForServices Service[] @relation("onVerificationChangeForServices")
watchedComments Comment[] @relation("watchedComments")
onServiceVerificationChangeFilter NotificationPreferenceOnServiceVerificationChangeFilterFilter[]
}
model NotificationPreferenceOnServiceVerificationChangeFilterFilter {
id Int @id @default(autoincrement())
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
verificationStatus VerificationStepStatus
notificationPreferences NotificationPreferences @relation(fields: [notificationPreferencesId], references: [id], onDelete: Cascade)
notificationPreferencesId Int
categories Category[]
attributes Attribute[]
currencies Currency[]
/// 0-10
scores Int[]
@@unique([verificationStatus, notificationPreferencesId])
}
model Event {
id Int @id @default(autoincrement())
title String
content String
source String?
type EventType
visible Boolean @default(true)
startedAt DateTime
/// If null, the event is ongoing. If same as startedAt, the event is a one-time event. If startedAt is in the future, the event is upcoming.
endedAt DateTime?
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
service Service @relation(fields: [serviceId], references: [id], onDelete: Cascade)
serviceId Int
Notification Notification[]
@@index([visible])
@@index([startedAt])
@@index([createdAt])
@@index([endedAt])
@@index([type])
@@index([serviceId])
}
enum ServiceSuggestionStatus {
PENDING
APPROVED
REJECTED
WITHDRAWN
}
enum ServiceSuggestionType {
CREATE_SERVICE
EDIT_SERVICE
}
enum KycLevelClarification {
NONE
DEPENDS_ON_PARTNERS
}
model ServiceSuggestion {
id Int @id @default(autoincrement())
type ServiceSuggestionType
status ServiceSuggestionStatus @default(PENDING)
notes String?
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
userId Int
serviceId Int
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
service Service @relation(fields: [serviceId], references: [id], onDelete: Cascade)
messages ServiceSuggestionMessage[]
Notification Notification[]
KarmaTransaction KarmaTransaction[]
@@index([userId])
@@index([serviceId])
}
model ServiceSuggestionMessage {
id Int @id @default(autoincrement())
content String
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
userId Int
suggestionId Int
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
suggestion ServiceSuggestion @relation(fields: [suggestionId], references: [id], onDelete: Cascade)
notifications Notification[]
@@index([userId])
@@index([suggestionId])
@@index([createdAt])
}
model Service {
id Int @id @default(autoincrement())
name String
slug String @unique
previousSlugs String[] @default([])
description String
categories Category[] @relation("ServiceToCategory")
kycLevel Int @default(4)
kycLevelClarification KycLevelClarification @default(NONE)
overallScore Int @default(0)
privacyScore Int @default(0)
trustScore Int @default(0)
/// Computed via trigger. Do not update through prisma.
isRecentlyListed Boolean @default(false)
/// Computed via trigger. Do not update through prisma.
averageUserRating Float?
serviceVisibility ServiceVisibility @default(PUBLIC)
serviceInfoBanner ServiceInfoBanner @default(NONE)
serviceInfoBannerNotes String?
verificationStatus VerificationStatus @default(COMMUNITY_CONTRIBUTED)
verificationSummary String?
verificationRequests ServiceVerificationRequest[]
verificationProofMd String?
/// Computed via trigger when the service status is VERIFICATION_SUCCESS. Do not update through prisma.
verifiedAt DateTime?
/// [UserSentiment]
userSentiment Json?
userSentimentAt DateTime?
referral String?
acceptedCurrencies Currency[] @default([])
serviceUrls String[]
tosUrls String[] @default([])
onionUrls String[] @default([])
i2pUrls String[] @default([])
imageUrl String?
/// [TosReview]
tosReview Json?
tosReviewAt DateTime?
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
listedAt DateTime?
comments Comment[]
events Event[]
contactMethods ServiceContactMethod[] @relation("ServiceToContactMethod")
attributes ServiceAttribute[]
verificationSteps VerificationStep[]
suggestions ServiceSuggestion[]
internalNotes InternalServiceNote[] @relation("ServiceRecievedNotes")
onEventCreatedForServices NotificationPreferences[] @relation("onEventCreatedForServices")
onRootCommentCreatedForServices NotificationPreferences[] @relation("onRootCommentCreatedForServices")
onVerificationChangeForServices NotificationPreferences[] @relation("onVerificationChangeForServices")
Notification Notification[]
affiliatedUsers ServiceUser[] @relation("ServiceUsers")
@@index([listedAt])
@@index([overallScore])
@@index([privacyScore])
@@index([trustScore])
@@index([averageUserRating])
@@index([name])
@@index([verificationStatus])
@@index([kycLevel])
@@index([createdAt])
@@index([updatedAt])
@@index([slug])
@@index([previousSlugs])
}
model ServiceContactMethod {
id Int @id @default(autoincrement())
/// Only include it if you want to override the formatted value.
label String?
/// Including the protocol (e.g. "mailto:", "tel:", "https://")
value String
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
services Service @relation("ServiceToContactMethod", fields: [serviceId], references: [id], onDelete: Cascade)
serviceId Int
}
enum AttributeCategory {
PRIVACY
TRUST
}
enum AttributeType {
GOOD
BAD
WARNING
INFO
}
model Attribute {
id Int @id @default(autoincrement())
slug String @unique
title String
/// Markdown
description String
privacyPoints Int @default(0)
trustPoints Int @default(0)
category AttributeCategory
type AttributeType
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
services ServiceAttribute[]
notificationPreferencesOnServiceVerificationChange NotificationPreferenceOnServiceVerificationChangeFilterFilter[]
}
model InternalUserNote {
id Int @id @default(autoincrement())
/// Markdown
content String
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
user User @relation("UserRecievedNotes", fields: [userId], references: [id], onDelete: Cascade)
userId Int
addedByUser User? @relation("UserAddedNotes", fields: [addedByUserId], references: [id], onDelete: SetNull)
addedByUserId Int?
@@index([userId])
@@index([addedByUserId])
@@index([createdAt])
}
model InternalServiceNote {
id Int @id @default(autoincrement())
/// Markdown
content String
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
service Service @relation("ServiceRecievedNotes", fields: [serviceId], references: [id], onDelete: Cascade)
serviceId Int
addedByUser User? @relation("UserAddedServiceNotes", fields: [addedByUserId], references: [id], onDelete: SetNull)
addedByUserId Int?
@@index([serviceId])
@@index([addedByUserId])
@@index([createdAt])
}
model User {
id Int @id @default(autoincrement())
name String @unique
displayName String?
link String?
picture String?
spammer Boolean @default(false)
verified Boolean @default(false)
admin Boolean @default(false)
moderator Boolean @default(false)
verifiedLink String?
secretTokenHash String @unique
feedId String @unique @default(cuid(2))
/// Computed via trigger. Do not update through prisma.
totalKarma Int @default(0)
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
lastLoginAt DateTime @default(now())
comments Comment[]
karmaTransactions KarmaTransaction[]
grantedKarmaTransactions KarmaTransaction[] @relation("KarmaGrantedBy")
commentVotes CommentVote[]
suggestions ServiceSuggestion[]
suggestionMessages ServiceSuggestionMessage[]
internalNotes InternalUserNote[] @relation("UserRecievedNotes")
addedInternalNotes InternalUserNote[] @relation("UserAddedNotes")
addedServiceNotes InternalServiceNote[] @relation("UserAddedServiceNotes")
verificationRequests ServiceVerificationRequest[]
notifications Notification[] @relation("NotificationOwner")
notificationPreferences NotificationPreferences?
serviceAffiliations ServiceUser[] @relation("UserServices")
pushSubscriptions PushSubscription[]
@@index([createdAt])
@@index([totalKarma])
}
model CommentVote {
id Int @id @default(autoincrement())
downvote Boolean @default(false) // false = upvote, true = downvote
comment Comment @relation(fields: [commentId], references: [id], onDelete: Cascade)
commentId Int
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
userId Int
createdAt DateTime @default(now())
@@unique([commentId, userId]) // Ensure one vote per user per comment
@@index([commentId])
@@index([userId])
}
model ServiceAttribute {
service Service @relation(fields: [serviceId], references: [id], onDelete: Cascade)
serviceId Int
attribute Attribute @relation(fields: [attributeId], references: [id], onDelete: Cascade)
attributeId Int
createdAt DateTime @default(now())
@@id([serviceId, attributeId])
}
model KarmaTransaction {
id Int @id @default(autoincrement())
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
userId Int
action KarmaTransactionAction
points Int @default(0)
comment Comment? @relation(fields: [commentId], references: [id], onDelete: Cascade)
commentId Int?
suggestion ServiceSuggestion? @relation(fields: [suggestionId], references: [id], onDelete: Cascade)
suggestionId Int?
description String
processed Boolean @default(false)
createdAt DateTime @default(now())
grantedBy User? @relation("KarmaGrantedBy", fields: [grantedByUserId], references: [id], onDelete: SetNull)
grantedByUserId Int?
Notification Notification[]
@@index([createdAt])
@@index([userId])
@@index([processed])
@@index([suggestionId])
@@index([commentId])
@@index([grantedByUserId])
}
enum VerificationStepStatus {
PENDING
IN_PROGRESS
PASSED
FAILED
}
model VerificationStep {
id Int @id @default(autoincrement())
title String
description String
status VerificationStepStatus @default(PENDING)
evidenceMd String?
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
service Service @relation(fields: [serviceId], references: [id], onDelete: Cascade)
serviceId Int
@@index([serviceId])
@@index([status])
@@index([createdAt])
}
model Category {
id Int @id @default(autoincrement())
name String @unique
icon String
slug String @unique
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
services Service[] @relation("ServiceToCategory")
notificationPreferencesOnServiceVerificationChange NotificationPreferenceOnServiceVerificationChangeFilterFilter[]
@@index([name])
@@index([slug])
}
model ServiceVerificationRequest {
id Int @id @default(autoincrement())
service Service @relation(fields: [serviceId], references: [id], onDelete: Cascade)
serviceId Int
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
userId Int
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
@@unique([serviceId, userId])
@@index([serviceId])
@@index([userId])
@@index([createdAt])
}
model ServiceScoreRecalculationJob {
id Int @id @default(autoincrement())
serviceId Int @unique
createdAt DateTime @default(now())
processedAt DateTime? @updatedAt
@@index([processedAt])
@@index([createdAt])
}
model ServiceUser {
id Int @id @default(autoincrement())
userId Int
user User @relation("UserServices", fields: [userId], references: [id], onDelete: Cascade)
serviceId Int
service Service @relation("ServiceUsers", fields: [serviceId], references: [id], onDelete: Cascade)
role ServiceUserRole
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
@@unique([userId, serviceId])
@@index([userId])
@@index([serviceId])
@@index([role])
}
model Announcement {
id Int @id @default(autoincrement())
content String
type AnnouncementType
link String?
linkText String?
startDate DateTime
endDate DateTime?
isActive Boolean @default(true)
createdAt DateTime @default(now())
updatedAt DateTime @default(now()) @updatedAt
@@index([isActive, startDate, endDate])
}
model PushSubscription {
id Int @id @default(autoincrement())
userId Int
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
endpoint String @unique
/// Public key for encryption
p256dh String
/// Authentication secret
auth String
/// To identify different devices
userAgent String?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
@@index([userId])
@@index([endpoint])
}

1419
web/prisma/seed.ts Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,291 @@
-- This script manages user karma based on comment interactions. It handles karma points
-- for comment approvals, verifications, spam status changes, and votes (upvotes/downvotes).
-- Karma transactions are recorded, and user karma totals are updated accordingly.
-- Drop existing triggers first
DROP TRIGGER IF EXISTS comment_status_change_trigger ON "Comment";
DROP TRIGGER IF EXISTS comment_suspicious_change_trigger ON "Comment";
DROP TRIGGER IF EXISTS comment_upvote_change_trigger ON "Comment";
DROP TRIGGER IF EXISTS comment_vote_change_trigger ON "CommentVote";
DROP TRIGGER IF EXISTS suggestion_status_change_trigger ON "ServiceSuggestion";
DROP TRIGGER IF EXISTS manual_karma_adjustment_trigger ON "KarmaTransaction";
-- Drop existing functions
DROP FUNCTION IF EXISTS handle_comment_upvote_change();
DROP FUNCTION IF EXISTS handle_comment_status_change();
DROP FUNCTION IF EXISTS handle_comment_approval();
DROP FUNCTION IF EXISTS handle_comment_verification();
DROP FUNCTION IF EXISTS handle_comment_spam_status();
DROP FUNCTION IF EXISTS handle_comment_vote_change();
DROP FUNCTION IF EXISTS insert_karma_transaction();
DROP FUNCTION IF EXISTS update_user_karma();
DROP FUNCTION IF EXISTS handle_suggestion_status_change();
DROP FUNCTION IF EXISTS handle_manual_karma_adjustment();
-- Helper function to insert karma transaction
CREATE OR REPLACE FUNCTION insert_karma_transaction(
p_user_id INT,
p_points INT,
p_action TEXT,
p_comment_id INT,
p_description TEXT,
p_suggestion_id INT DEFAULT NULL
) RETURNS VOID AS $$
BEGIN
INSERT INTO "KarmaTransaction" (
"userId", "points", "action", "commentId", "suggestionId", "description", "processed", "createdAt"
)
VALUES (
p_user_id,
p_points,
p_action::"KarmaTransactionAction",
p_comment_id,
p_suggestion_id,
p_description,
true,
NOW()
);
END;
$$ LANGUAGE plpgsql;
-- Helper function to update user karma
CREATE OR REPLACE FUNCTION update_user_karma(
p_user_id INT,
p_karma_change INT
) RETURNS VOID AS $$
BEGIN
UPDATE "User"
SET "totalKarma" = "totalKarma" + p_karma_change
WHERE id = p_user_id;
END;
$$ LANGUAGE plpgsql;
-- Handle comment approval
CREATE OR REPLACE FUNCTION handle_comment_approval(
NEW RECORD,
OLD RECORD
) RETURNS VOID AS $$
BEGIN
IF OLD.status = 'PENDING' AND NEW.status = 'APPROVED' THEN
PERFORM insert_karma_transaction(
NEW."authorId",
1,
'COMMENT_APPROVED',
NEW.id,
format('Your comment #comment-%s in %s has been approved!',
NEW.id,
(SELECT name FROM "Service" WHERE id = NEW."serviceId"))
);
PERFORM update_user_karma(NEW."authorId", 1);
END IF;
END;
$$ LANGUAGE plpgsql;
-- Handle comment verification
CREATE OR REPLACE FUNCTION handle_comment_verification(
NEW RECORD,
OLD RECORD
) RETURNS VOID AS $$
BEGIN
IF NEW.status = 'VERIFIED' AND OLD.status != 'VERIFIED' THEN
PERFORM insert_karma_transaction(
NEW."authorId",
5,
'COMMENT_VERIFIED',
NEW.id,
format('Your comment #comment-%s in %s has been verified!',
NEW.id,
(SELECT name FROM "Service" WHERE id = NEW."serviceId"))
);
PERFORM update_user_karma(NEW."authorId", 5);
END IF;
END;
$$ LANGUAGE plpgsql;
-- Handle spam status changes
CREATE OR REPLACE FUNCTION handle_comment_spam_status(
NEW RECORD,
OLD RECORD
) RETURNS VOID AS $$
BEGIN
-- Handle marking as spam
IF NEW.suspicious = true AND OLD.suspicious = false THEN
PERFORM insert_karma_transaction(
NEW."authorId",
-10,
'COMMENT_SPAM',
NEW.id,
format('Your comment #comment-%s in %s has been marked as spam.',
NEW.id,
(SELECT name FROM "Service" WHERE id = NEW."serviceId"))
);
PERFORM update_user_karma(NEW."authorId", -10);
-- Handle unmarking as spam
ELSIF NEW.suspicious = false AND OLD.suspicious = true THEN
PERFORM insert_karma_transaction(
NEW."authorId",
10,
'COMMENT_SPAM_REVERTED',
NEW.id,
format('Your comment #comment-%s in %s is no longer marked as spam.',
NEW.id,
(SELECT name FROM "Service" WHERE id = NEW."serviceId"))
);
PERFORM update_user_karma(NEW."authorId", 10);
END IF;
END;
$$ LANGUAGE plpgsql;
-- Function for handling vote changes
CREATE OR REPLACE FUNCTION handle_comment_vote_change()
RETURNS TRIGGER AS $$
DECLARE
karma_points INT;
vote_action "KarmaTransactionAction";
vote_description TEXT;
comment_author_id INT;
service_name TEXT;
upvote_change INT := 0; -- Variable to track change in upvotes
BEGIN
-- Get comment author and service info
SELECT c."authorId", s.name INTO comment_author_id, service_name
FROM "Comment" c
JOIN "Service" s ON c.id = COALESCE(NEW."commentId", OLD."commentId") AND c."serviceId" = s.id;
-- Calculate karma impact based on vote type
IF TG_OP = 'INSERT' THEN
-- New vote
karma_points := CASE WHEN NEW.downvote THEN -1 ELSE 1 END;
vote_action := CASE WHEN NEW.downvote THEN 'COMMENT_DOWNVOTE' ELSE 'COMMENT_UPVOTE' END;
vote_description := format('Your comment #comment-%s in %s received %s',
NEW."commentId",
service_name,
CASE WHEN NEW.downvote THEN 'a downvote' ELSE 'an upvote' END);
upvote_change := CASE WHEN NEW.downvote THEN -1 ELSE 1 END; -- -1 for downvote, +1 for upvote
ELSIF TG_OP = 'DELETE' THEN
-- Removed vote
karma_points := CASE WHEN OLD.downvote THEN 1 ELSE -1 END;
vote_action := 'COMMENT_VOTE_REMOVED';
vote_description := format('A vote was removed from your comment #comment-%s in %s',
OLD."commentId",
service_name);
upvote_change := CASE WHEN OLD.downvote THEN 1 ELSE -1 END; -- +1 if downvote removed, -1 if upvote removed
ELSIF TG_OP = 'UPDATE' THEN
-- Changed vote (from upvote to downvote or vice versa)
karma_points := CASE WHEN NEW.downvote THEN -2 ELSE 2 END;
vote_action := CASE WHEN NEW.downvote THEN 'COMMENT_DOWNVOTE' ELSE 'COMMENT_UPVOTE' END;
vote_description := format('Your comment #comment-%s in %s vote changed to %s',
NEW."commentId",
service_name,
CASE WHEN NEW.downvote THEN 'downvote' ELSE 'upvote' END);
upvote_change := CASE WHEN NEW.downvote THEN -2 ELSE 2 END; -- -2 if upvote->downvote, +2 if downvote->upvote
END IF;
-- Record karma transaction and update user karma
PERFORM insert_karma_transaction(
comment_author_id,
karma_points,
vote_action,
COALESCE(NEW."commentId", OLD."commentId"),
vote_description
);
PERFORM update_user_karma(comment_author_id, karma_points);
-- Update comment's upvotes count incrementally
UPDATE "Comment"
SET upvotes = upvotes + upvote_change
WHERE id = COALESCE(NEW."commentId", OLD."commentId");
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Main function for handling status changes
CREATE OR REPLACE FUNCTION handle_comment_status_change()
RETURNS TRIGGER AS $$
BEGIN
PERFORM handle_comment_approval(NEW, OLD);
PERFORM handle_comment_verification(NEW, OLD);
PERFORM handle_comment_spam_status(NEW, OLD);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create triggers
CREATE TRIGGER comment_status_change_trigger
AFTER UPDATE OF status
ON "Comment"
FOR EACH ROW
EXECUTE FUNCTION handle_comment_status_change();
CREATE TRIGGER comment_suspicious_change_trigger
AFTER UPDATE OF suspicious
ON "Comment"
FOR EACH ROW
EXECUTE FUNCTION handle_comment_status_change();
CREATE TRIGGER comment_vote_change_trigger
AFTER INSERT OR UPDATE OR DELETE
ON "CommentVote"
FOR EACH ROW
EXECUTE FUNCTION handle_comment_vote_change();
-- Function to handle suggestion status changes and award karma
CREATE OR REPLACE FUNCTION handle_suggestion_status_change()
RETURNS TRIGGER AS $$
DECLARE
service_name TEXT;
BEGIN
-- Award karma for first approval
-- Check that OLD.status is not NULL to handle the initial creation case if needed,
-- and ensure it wasn't already APPROVED.
IF OLD.status IS DISTINCT FROM 'APPROVED' AND NEW.status = 'APPROVED' THEN
-- Fetch service name for the description
SELECT name INTO service_name FROM "Service" WHERE id = NEW."serviceId";
-- Insert karma transaction, linking it to the suggestion
PERFORM insert_karma_transaction(
NEW."userId",
10,
'SUGGESTION_APPROVED',
NULL, -- p_comment_id (not applicable)
format('Your suggestion for service ''%s'' has been approved!', service_name),
NEW.id -- p_suggestion_id
);
-- Update user's total karma
PERFORM update_user_karma(NEW."userId", 10);
END IF;
RETURN NEW; -- Result is ignored since this is an AFTER trigger
END;
$$ LANGUAGE plpgsql;
-- Create triggers
CREATE TRIGGER suggestion_status_change_trigger
AFTER UPDATE OF status
ON "ServiceSuggestion"
FOR EACH ROW
EXECUTE FUNCTION handle_suggestion_status_change();
-- Function to handle manual karma adjustments
CREATE OR REPLACE FUNCTION handle_manual_karma_adjustment()
RETURNS TRIGGER AS $$
BEGIN
-- Only process MANUAL_ADJUSTMENT transactions that are not yet processed
IF NEW.processed = false AND NEW.action = 'MANUAL_ADJUSTMENT' THEN
-- Update user's total karma
PERFORM update_user_karma(NEW."userId", NEW.points);
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create trigger for manual karma adjustments
CREATE TRIGGER manual_karma_adjustment_trigger
AFTER INSERT
ON "KarmaTransaction"
FOR EACH ROW
EXECUTE FUNCTION handle_manual_karma_adjustment();

View File

@@ -0,0 +1,277 @@
-- This script defines PostgreSQL functions and triggers for managing service scores:
-- 1. Automatically calculates and updates privacy, trust, and overall scores
-- for services when services or their attributes change.
-- 2. Updates the isRecentlyListed flag for services listed within the last 15 days.
-- 3. Queues asynchronous score recalculation in "ServiceScoreRecalculationJob"
-- when an "Attribute" definition (e.g., points) is updated, ensuring
-- efficient handling of widespread score updates.
-- Drop existing triggers first
DROP TRIGGER IF EXISTS service_score_update_trigger ON "Service";
DROP TRIGGER IF EXISTS service_attribute_change_trigger ON "ServiceAttribute";
DROP TRIGGER IF EXISTS attribute_change_trigger ON "Attribute";
-- Drop existing functions
DROP FUNCTION IF EXISTS calculate_service_scores();
DROP FUNCTION IF EXISTS calculate_privacy_score();
DROP FUNCTION IF EXISTS calculate_trust_score();
DROP FUNCTION IF EXISTS calculate_overall_score();
DROP FUNCTION IF EXISTS recalculate_scores_for_attribute();
-- Calculate privacy score based on service attributes and properties
CREATE OR REPLACE FUNCTION calculate_privacy_score(service_id INT)
RETURNS INT AS $$
DECLARE
privacy_score INT := 0;
kyc_factor INT;
onion_factor INT := 0;
i2p_factor INT := 0;
monero_factor INT := 0;
open_source_factor INT := 0;
p2p_factor INT := 0;
decentralized_factor INT := 0;
attributes_score INT := 0;
BEGIN
-- Get service data
SELECT
CASE
WHEN "kycLevel" = 0 THEN 25 -- No KYC is best for privacy
WHEN "kycLevel" = 1 THEN 10 -- Minimal KYC
WHEN "kycLevel" = 2 THEN -5 -- Moderate KYC
WHEN "kycLevel" = 3 THEN -15 -- More KYC
WHEN "kycLevel" = 4 THEN -25 -- Full mandatory KYC
ELSE 0 -- Default to no change
END
INTO kyc_factor
FROM "Service"
WHERE "id" = service_id;
-- Check for onion URLs
IF EXISTS (
SELECT 1 FROM "Service"
WHERE "id" = service_id AND array_length("onionUrls", 1) > 0
) THEN
onion_factor := 5;
END IF;
-- Check for i2p URLs
IF EXISTS (
SELECT 1 FROM "Service"
WHERE "id" = service_id AND array_length("i2pUrls", 1) > 0
) THEN
i2p_factor := 5;
END IF;
-- Check for Monero acceptance
IF EXISTS (
SELECT 1 FROM "Service"
WHERE "id" = service_id AND 'MONERO' = ANY("acceptedCurrencies")
) THEN
monero_factor := 5;
END IF;
-- Calculate score from privacy attributes - directly use the points
SELECT COALESCE(SUM(a."privacyPoints"), 0)
INTO attributes_score
FROM "ServiceAttribute" sa
JOIN "Attribute" a ON sa."attributeId" = a."id"
WHERE sa."serviceId" = service_id AND a."category" = 'PRIVACY';
-- Calculate final privacy score (base 100)
privacy_score := 50 + kyc_factor + onion_factor + i2p_factor + monero_factor + open_source_factor + p2p_factor + decentralized_factor + attributes_score;
-- Ensure the score is in reasonable bounds (0-100)
privacy_score := GREATEST(0, LEAST(100, privacy_score));
RETURN privacy_score;
END;
$$ LANGUAGE plpgsql;
-- Calculate trust score based on service attributes and verification status
CREATE OR REPLACE FUNCTION calculate_trust_score(service_id INT)
RETURNS INT AS $$
DECLARE
trust_score INT := 0;
verification_factor INT;
attributes_score INT := 0;
recently_listed_factor INT := 0;
tos_penalty_factor INT := 0;
BEGIN
-- Get verification status factor
SELECT
CASE
WHEN "verificationStatus" = 'VERIFICATION_SUCCESS' THEN 10
WHEN "verificationStatus" = 'APPROVED' THEN 5
WHEN "verificationStatus" = 'COMMUNITY_CONTRIBUTED' THEN 0
WHEN "verificationStatus" = 'VERIFICATION_FAILED' THEN -50
ELSE 0
END
INTO verification_factor
FROM "Service"
WHERE id = service_id;
-- Calculate score from trust attributes - directly use the points
SELECT COALESCE(SUM(a."trustPoints"), 0)
INTO attributes_score
FROM "ServiceAttribute" sa
JOIN "Attribute" a ON sa."attributeId" = a.id
WHERE sa."serviceId" = service_id AND a.category = 'TRUST';
-- Apply penalty if service was listed within the last 15 days
IF EXISTS (
SELECT 1
FROM "Service"
WHERE id = service_id
AND "listedAt" IS NOT NULL
AND "verificationStatus" = 'APPROVED'
AND (NOW() - "listedAt") <= INTERVAL '15 days'
) THEN
recently_listed_factor := -10;
-- Update the isRecentlyListed flag to true
UPDATE "Service"
SET "isRecentlyListed" = TRUE
WHERE id = service_id;
ELSE
-- Update the isRecentlyListed flag to false
UPDATE "Service"
SET "isRecentlyListed" = FALSE
WHERE id = service_id;
END IF;
-- Apply penalty if ToS cannot be analyzed
IF EXISTS (
SELECT 1
FROM "Service"
WHERE id = service_id
AND "tosReviewAt" IS NOT NULL
AND "tosReview" IS NULL
) THEN
tos_penalty_factor := -3;
END IF;
-- Calculate final trust score (base 100)
trust_score := 50 + verification_factor + attributes_score + recently_listed_factor + tos_penalty_factor;
-- Ensure the score is in reasonable bounds (0-100)
trust_score := GREATEST(0, LEAST(100, trust_score));
RETURN trust_score;
END;
$$ LANGUAGE plpgsql;
-- Calculate overall score based on weighted average of privacy and trust scores
CREATE OR REPLACE FUNCTION calculate_overall_score(service_id INT, privacy_score INT, trust_score INT)
RETURNS INT AS $$
DECLARE
overall_score INT;
BEGIN
overall_score := CAST(ROUND(((privacy_score * 0.6) + (trust_score * 0.4)) / 10.0) AS INT);
RETURN GREATEST(0, LEAST(10, overall_score));
END;
$$ LANGUAGE plpgsql;
-- Main function to calculate all scores for a service
CREATE OR REPLACE FUNCTION calculate_service_scores()
RETURNS TRIGGER AS $$
DECLARE
privacy_score INT;
trust_score INT;
overall_score INT;
service_id INT;
BEGIN
-- Determine which service ID to use based on the trigger context and table
IF TG_TABLE_NAME = 'Service' THEN
IF TG_OP = 'INSERT' OR TG_OP = 'UPDATE' THEN
service_id := NEW."id";
END IF;
ELSIF TG_TABLE_NAME = 'ServiceAttribute' THEN
IF TG_OP = 'DELETE' THEN
service_id := OLD."serviceId";
ELSE -- INSERT or UPDATE
service_id := NEW."serviceId";
END IF;
END IF;
-- Calculate each score
privacy_score := calculate_privacy_score(service_id);
trust_score := calculate_trust_score(service_id);
overall_score := calculate_overall_score(service_id, privacy_score, trust_score);
-- Cap score if service is flagged as scam (verificationStatus = 'VERIFICATION_FAILED')
IF (SELECT "verificationStatus" FROM "Service" WHERE "id" = service_id) = 'VERIFICATION_FAILED' THEN
IF overall_score > 3 THEN
overall_score := 3;
ELSIF overall_score < 0 THEN
overall_score := 0;
END IF;
END IF;
-- Update the service with the new scores
UPDATE "Service"
SET
"privacyScore" = privacy_score,
"trustScore" = trust_score,
"overallScore" = overall_score
WHERE "id" = service_id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create trigger to recalculate scores when service is created or updated
CREATE TRIGGER service_score_update_trigger
AFTER INSERT OR UPDATE
ON "Service"
FOR EACH ROW
WHEN (pg_trigger_depth() < 2) -- Prevent recursive triggering
EXECUTE FUNCTION calculate_service_scores();
-- Create trigger to recalculate scores when service attributes change
CREATE TRIGGER service_attribute_change_trigger
AFTER INSERT OR UPDATE OR DELETE
ON "ServiceAttribute"
FOR EACH ROW
WHEN (pg_trigger_depth() < 2) -- Prevent recursive triggering
EXECUTE FUNCTION calculate_service_scores();
-- Function to queue score recalculation for all services with a specific attribute
CREATE OR REPLACE FUNCTION queue_service_score_recalculation_for_attribute()
RETURNS TRIGGER AS $$
DECLARE
service_rec RECORD;
BEGIN
-- Only trigger recalculation if relevant fields changed
IF (TG_OP = 'UPDATE' AND (
OLD."privacyPoints" != NEW."privacyPoints" OR
OLD."trustPoints" != NEW."trustPoints" OR
OLD."type" != NEW."type" OR
OLD."category" != NEW."category"
)) THEN
-- Find all services that have this attribute and queue a recalculation job
FOR service_rec IN
SELECT DISTINCT sa."serviceId"
FROM "ServiceAttribute" sa
WHERE sa."attributeId" = NEW.id
LOOP
-- Insert a job into the queue table
-- ON CONFLICT clause ensures we don't queue the same service multiple times per transaction
INSERT INTO "ServiceScoreRecalculationJob" ("serviceId", "createdAt", "processedAt")
VALUES (service_rec."serviceId", NOW(), NULL)
ON CONFLICT ("serviceId") DO UPDATE SET "processedAt" = NULL, "createdAt" = NOW();
END LOOP;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create constraint trigger to queue score recalculation when attributes are updated
DROP TRIGGER IF EXISTS attribute_change_trigger ON "Attribute";
CREATE CONSTRAINT TRIGGER attribute_change_trigger
AFTER UPDATE
ON "Attribute"
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW
WHEN (pg_trigger_depth() < 2)
EXECUTE FUNCTION queue_service_score_recalculation_for_attribute();

View File

@@ -0,0 +1,57 @@
-- This script defines a PostgreSQL function and trigger to automatically calculate
-- and update the average user rating for services based on associated comments.
-- The average rating is recalculated whenever comments are added, updated, or deleted.
-- Drop existing triggers first
DROP TRIGGER IF EXISTS comment_average_rating_trigger ON "Comment";
-- Drop existing functions
DROP FUNCTION IF EXISTS calculate_average_rating();
-- Calculate average rating based on active comments with approved or verified status
CREATE OR REPLACE FUNCTION calculate_average_rating()
RETURNS TRIGGER AS $$
DECLARE
affected_service_id INT;
average_user_rating DECIMAL;
BEGIN
-- Determine which service ID to use based on the trigger context
IF TG_OP = 'DELETE' THEN
affected_service_id := OLD."serviceId";
ELSE -- INSERT or UPDATE
affected_service_id := NEW."serviceId";
END IF;
-- Calculate average rating from active comments with approved or verified status
-- Excluding suspicious comments and replies (comments with parentId not null)
SELECT AVG(rating) INTO average_user_rating
FROM "Comment"
WHERE "serviceId" = affected_service_id
AND "parentId" IS NULL
AND rating IS NOT NULL
AND (status = 'APPROVED' OR status = 'VERIFIED')
AND "ratingActive" = true
AND suspicious = false;
-- Update the service with the new average rating
UPDATE "Service"
SET "averageUserRating" = average_user_rating
WHERE "id" = affected_service_id;
-- Return the appropriate record based on operation
IF TG_OP = 'DELETE' THEN
RETURN OLD;
ELSE
RETURN NEW;
END IF;
END;
$$ LANGUAGE plpgsql;
-- Create trigger to recalculate average rating when comments are created, updated, or deleted
CREATE TRIGGER comment_average_rating_trigger
AFTER INSERT OR UPDATE OR DELETE
ON "Comment"
FOR EACH ROW
WHEN (pg_trigger_depth() < 2) -- Prevent recursive triggering
EXECUTE FUNCTION calculate_average_rating();

View File

@@ -0,0 +1,48 @@
-- This script manages the `listedAt`, `verifiedAt`, and `isRecentlyListed` timestamps
-- for services based on changes to their `verificationStatus`. It ensures these timestamps
-- are set or cleared appropriately when a service's verification status is updated.
CREATE OR REPLACE FUNCTION manage_service_timestamps()
RETURNS TRIGGER AS $$
BEGIN
-- Manage listedAt timestamp
IF NEW."verificationStatus" IN ('APPROVED', 'VERIFICATION_SUCCESS') THEN
-- Set listedAt only on the first time status becomes APPROVED or VERIFICATION_SUCCESS
IF OLD."listedAt" IS NULL THEN
NEW."listedAt" := NOW();
NEW."isRecentlyListed" := TRUE;
END IF;
ELSIF OLD."verificationStatus" IN ('APPROVED', 'VERIFICATION_SUCCESS') THEN
-- Clear listedAt if the status changes FROM APPROVED or VERIFICATION_SUCCESS to something else
-- The trigger's WHEN clause ensures NEW."verificationStatus" is different.
NEW."listedAt" := NULL;
NEW."isRecentlyListed" := FALSE;
END IF;
-- Manage verifiedAt timestamp
IF NEW."verificationStatus" = 'VERIFICATION_SUCCESS' THEN
-- Set verifiedAt when status changes TO VERIFICATION_SUCCESS
NEW."verifiedAt" := NOW();
NEW."isRecentlyListed" := FALSE;
ELSIF OLD."verificationStatus" = 'VERIFICATION_SUCCESS' THEN
-- Clear verifiedAt when status changes FROM VERIFICATION_SUCCESS
-- The trigger's WHEN clause ensures NEW."verificationStatus" is different.
NEW."verifiedAt" := NULL;
NEW."isRecentlyListed" := FALSE;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Drop the old trigger first if it exists under the old name
DROP TRIGGER IF EXISTS trigger_set_service_listed_at ON "Service";
-- Drop the trigger if it exists under the new name
DROP TRIGGER IF EXISTS trigger_manage_service_timestamps ON "Service";
CREATE TRIGGER trigger_manage_service_timestamps
BEFORE UPDATE OF "verificationStatus" ON "Service"
FOR EACH ROW
-- Only execute the function if the verificationStatus value has actually changed
WHEN (OLD."verificationStatus" IS DISTINCT FROM NEW."verificationStatus")
EXECUTE FUNCTION manage_service_timestamps();

View File

@@ -0,0 +1,399 @@
-- Service Events Trigger
-- This trigger automatically creates events when services are updated
-- to track important changes over time
CREATE OR REPLACE FUNCTION trigger_service_events()
RETURNS TRIGGER AS $$
DECLARE
change_descriptions TEXT[] := '{}';
event_title TEXT;
event_content TEXT;
change_type TEXT := NULL;
event_time TIMESTAMP WITH TIME ZONE := transaction_timestamp();
currency_desc TEXT;
BEGIN
-- Only proceed if this is an UPDATE operation
IF TG_OP <> 'UPDATE' THEN
RETURN NEW;
END IF;
-- Check for domain/URL changes
IF OLD."serviceUrls" IS DISTINCT FROM NEW."serviceUrls" THEN
change_descriptions := array_append(change_descriptions,
'Service URLs updated from ' || array_to_string(OLD."serviceUrls", ', ') ||
' to ' || array_to_string(NEW."serviceUrls", ', ')
);
change_type := COALESCE(change_type, 'Domain change');
END IF;
-- Check for KYC level changes
IF OLD."kycLevel" IS DISTINCT FROM NEW."kycLevel" THEN
change_descriptions := array_append(change_descriptions,
'KYC level changed from ' || OLD."kycLevel"::TEXT || ' to ' || NEW."kycLevel"::TEXT
);
change_type := COALESCE(change_type, 'KYC update');
END IF;
-- Check for verification status changes
IF OLD."verificationStatus" IS DISTINCT FROM NEW."verificationStatus" THEN
change_descriptions := array_append(change_descriptions,
'Verification status changed from ' || OLD."verificationStatus"::TEXT || ' to ' || NEW."verificationStatus"::TEXT
);
change_type := COALESCE(change_type, 'Verification update');
END IF;
-- Check for description changes
IF OLD.description IS DISTINCT FROM NEW.description THEN
change_descriptions := array_append(change_descriptions, 'Description was updated');
change_type := COALESCE(change_type, 'Description update');
END IF;
-- Check for currency changes
IF OLD."acceptedCurrencies" IS DISTINCT FROM NEW."acceptedCurrencies" THEN
-- Find currencies added
WITH
old_currencies AS (SELECT unnest(OLD."acceptedCurrencies") AS currency),
new_currencies AS (SELECT unnest(NEW."acceptedCurrencies") AS currency),
added_currencies AS (
SELECT currency FROM new_currencies
EXCEPT
SELECT currency FROM old_currencies
),
removed_currencies AS (
SELECT currency FROM old_currencies
EXCEPT
SELECT currency FROM new_currencies
)
-- Temp variable for currency description
SELECT
CASE
WHEN (SELECT COUNT(*) FROM added_currencies) > 0 AND (SELECT COUNT(*) FROM removed_currencies) > 0 THEN
'Currencies updated: added ' || array_to_string(ARRAY(SELECT currency FROM added_currencies), ', ') ||
', removed ' || array_to_string(ARRAY(SELECT currency FROM removed_currencies), ', ')
WHEN (SELECT COUNT(*) FROM added_currencies) > 0 THEN
'Added currencies: ' || array_to_string(ARRAY(SELECT currency FROM added_currencies), ', ')
WHEN (SELECT COUNT(*) FROM removed_currencies) > 0 THEN
'Removed currencies: ' || array_to_string(ARRAY(SELECT currency FROM removed_currencies), ', ')
ELSE
'Currencies changed'
END
INTO currency_desc;
IF currency_desc IS NOT NULL AND currency_desc <> '' THEN
change_descriptions := array_append(change_descriptions, currency_desc);
change_type := COALESCE(change_type, 'Currency update');
END IF;
END IF;
-- If there are changes, create an event
IF array_length(change_descriptions, 1) > 0 THEN
-- Create a title based on number of changes
IF array_length(change_descriptions, 1) = 1 THEN
event_title := COALESCE(change_type, 'Service updated'); -- Ensure title is not null
ELSE
event_title := 'Service updated';
END IF;
-- Create content with all changes
event_content := array_to_string(change_descriptions, '. ');
-- Ensure content is not null or empty
IF event_content IS NULL OR event_content = '' THEN
event_content := 'Service details changed (content unavailable)';
END IF;
-- Insert the event
INSERT INTO "Event" (
"title",
"content",
"type",
"visible",
"startedAt",
"endedAt",
"serviceId"
) VALUES (
event_title,
event_content,
'UPDATE',
TRUE,
event_time,
event_time,
NEW.id
);
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create a trigger for service updates
DROP TRIGGER IF EXISTS service_events_trigger ON "Service";
CREATE TRIGGER service_events_trigger
AFTER UPDATE OF "serviceUrls", "kycLevel", "verificationStatus", "description", "acceptedCurrencies" ON "Service"
FOR EACH ROW
EXECUTE FUNCTION trigger_service_events();
-- Additional trigger to monitor changes to ServiceAttribute
CREATE OR REPLACE FUNCTION trigger_service_attribute_events()
RETURNS TRIGGER AS $$
DECLARE
attribute_name TEXT;
service_name TEXT;
event_title TEXT := 'Attribute change'; -- Default title
event_content TEXT;
event_time TIMESTAMP WITH TIME ZONE := transaction_timestamp();
target_service_id INT;
service_exists BOOLEAN;
service_created_at TIMESTAMP WITH TIME ZONE;
is_new_service BOOLEAN := FALSE;
BEGIN
-- Determine target service ID and operation type
IF TG_OP = 'INSERT' THEN
target_service_id := NEW."serviceId";
-- Check if this is a new service (created within the last minute)
-- This helps prevent events when attributes are initially added to a new service
SELECT "createdAt" INTO service_created_at FROM "Service" WHERE id = target_service_id;
IF service_created_at IS NOT NULL AND (event_time - service_created_at) < INTERVAL '1 minute' THEN
is_new_service := TRUE;
RETURN NEW; -- Skip event creation for new services
END IF;
SELECT title INTO attribute_name FROM "Attribute" WHERE id = NEW."attributeId";
SELECT name INTO service_name FROM "Service" WHERE id = target_service_id;
IF attribute_name IS NOT NULL AND service_name IS NOT NULL THEN
event_title := 'Attribute added';
event_content := 'Attribute "' || attribute_name || '" was added to ' || service_name;
ELSE
event_content := 'An attribute was added (details unavailable)';
END IF;
ELSIF TG_OP = 'DELETE' THEN
target_service_id := OLD."serviceId";
-- Check if the service still exists before trying to fetch its name or create an event
SELECT EXISTS (SELECT 1 FROM "Service" WHERE id = target_service_id) INTO service_exists;
IF service_exists THEN
SELECT title INTO attribute_name FROM "Attribute" WHERE id = OLD."attributeId";
SELECT name INTO service_name FROM "Service" WHERE id = target_service_id;
IF attribute_name IS NOT NULL AND service_name IS NOT NULL THEN
event_title := 'Attribute removed';
event_content := 'Attribute "' || attribute_name || '" was removed from ' || service_name;
ELSE
-- This case might happen if attribute was deleted concurrently
event_content := 'An attribute was removed (details unavailable)';
END IF;
ELSE
-- Service was deleted, don't create an event
RETURN OLD;
END IF;
END IF;
-- Ensure content is not null/empty and insert
IF event_content IS NOT NULL AND event_content <> '' AND target_service_id IS NOT NULL AND NOT is_new_service THEN
-- Re-check service existence right before insert just in case of concurrency on INSERT
IF TG_OP = 'INSERT' THEN
SELECT EXISTS (SELECT 1 FROM "Service" WHERE id = target_service_id) INTO service_exists;
END IF;
IF service_exists THEN
INSERT INTO "Event" (
"title",
"content",
"type",
"visible",
"startedAt",
"endedAt",
"serviceId"
) VALUES (
event_title,
event_content,
'UPDATE',
TRUE,
event_time,
event_time,
target_service_id
);
END IF;
END IF;
-- Return appropriate record
IF TG_OP = 'INSERT' THEN
RETURN NEW;
ELSE
RETURN OLD;
END IF;
END;
$$ LANGUAGE plpgsql;
-- Create a trigger for service attribute changes
DROP TRIGGER IF EXISTS service_attribute_events_trigger ON "ServiceAttribute";
CREATE TRIGGER service_attribute_events_trigger
AFTER INSERT OR DELETE ON "ServiceAttribute"
FOR EACH ROW
EXECUTE FUNCTION trigger_service_attribute_events();
-- Additional trigger to monitor changes to service categories
CREATE OR REPLACE FUNCTION trigger_service_category_events()
RETURNS TRIGGER AS $$
DECLARE
category_name TEXT;
service_name TEXT;
event_title TEXT := 'Category change'; -- Default title
event_content TEXT;
event_time TIMESTAMP WITH TIME ZONE := transaction_timestamp();
target_service_id INT;
service_exists BOOLEAN;
service_created_at TIMESTAMP WITH TIME ZONE;
is_new_service BOOLEAN := FALSE;
BEGIN
-- Determine target service ID and operation type
IF TG_OP = 'INSERT' THEN
target_service_id := NEW."A";
-- Check if this is a new service (created within the last minute)
-- This helps prevent events when categories are initially added to a new service
SELECT "createdAt" INTO service_created_at FROM "Service" WHERE id = target_service_id;
IF service_created_at IS NOT NULL AND (event_time - service_created_at) < INTERVAL '1 minute' THEN
is_new_service := TRUE;
RETURN NEW; -- Skip event creation for new services
END IF;
SELECT name INTO category_name FROM "Category" WHERE id = NEW."B";
SELECT name INTO service_name FROM "Service" WHERE id = target_service_id;
IF category_name IS NOT NULL AND service_name IS NOT NULL THEN
event_title := 'Category added';
event_content := 'Category "' || category_name || '" was added to ' || service_name;
ELSE
event_content := 'A category was added (details unavailable)';
END IF;
ELSIF TG_OP = 'DELETE' THEN
target_service_id := OLD."A";
-- Check if the service still exists before trying to fetch its name or create an event
SELECT EXISTS (SELECT 1 FROM "Service" WHERE id = target_service_id) INTO service_exists;
IF service_exists THEN
SELECT name INTO category_name FROM "Category" WHERE id = OLD."B";
SELECT name INTO service_name FROM "Service" WHERE id = target_service_id;
IF category_name IS NOT NULL AND service_name IS NOT NULL THEN
event_title := 'Category removed';
event_content := 'Category "' || category_name || '" was removed from ' || service_name;
ELSE
-- This case might happen if category was deleted concurrently
event_content := 'A category was removed (details unavailable)';
END IF;
ELSE
-- Service was deleted, don't create an event
RETURN OLD;
END IF;
END IF;
-- Ensure content is not null/empty and insert
IF event_content IS NOT NULL AND event_content <> '' AND target_service_id IS NOT NULL AND NOT is_new_service THEN
-- Re-check service existence right before insert just in case of concurrency on INSERT
IF TG_OP = 'INSERT' THEN
SELECT EXISTS (SELECT 1 FROM "Service" WHERE id = target_service_id) INTO service_exists;
END IF;
IF service_exists THEN
INSERT INTO "Event" (
"title",
"content",
"type",
"visible",
"startedAt",
"endedAt",
"serviceId"
) VALUES (
event_title,
event_content,
'UPDATE',
TRUE,
event_time,
event_time,
target_service_id
);
END IF;
END IF;
-- Return appropriate record
IF TG_OP = 'INSERT' THEN
RETURN NEW;
ELSE
RETURN OLD;
END IF;
END;
$$ LANGUAGE plpgsql;
-- Create a trigger for service category changes (on the junction table)
DROP TRIGGER IF EXISTS service_category_events_trigger ON "_ServiceToCategory";
CREATE TRIGGER service_category_events_trigger
AFTER INSERT OR DELETE ON "_ServiceToCategory"
FOR EACH ROW
EXECUTE FUNCTION trigger_service_category_events();
-- Verification Steps Trigger
-- This trigger creates events when verification steps are added or status changes
CREATE OR REPLACE FUNCTION trigger_verification_step_events()
RETURNS TRIGGER AS $$
DECLARE
service_name TEXT;
event_title TEXT;
event_content TEXT;
event_time TIMESTAMP WITH TIME ZONE := transaction_timestamp();
service_exists BOOLEAN;
BEGIN
-- Check if the service exists
SELECT EXISTS (SELECT 1 FROM "Service" WHERE id = NEW."serviceId") INTO service_exists;
IF NOT service_exists THEN
-- Service was deleted or doesn't exist, don't create an event
RETURN NEW;
END IF;
-- Get service name
SELECT name INTO service_name FROM "Service" WHERE id = NEW."serviceId";
-- Handle different operations
IF TG_OP = 'INSERT' THEN
event_title := 'Verification step added';
event_content := '"' || NEW.title || '" was added';
ELSIF TG_OP = 'UPDATE' AND OLD.status IS DISTINCT FROM NEW.status THEN
event_title := 'Verification step ' || replace(lower(NEW.status::TEXT), '_', ' ');
event_content := '"' || NEW.title || '" status changed from ' ||
replace(lower(OLD.status::TEXT), '_', ' ') || ' to ' || replace(lower(NEW.status::TEXT), '_', ' ');
ELSE
-- No relevant changes, exit
RETURN NEW;
END IF;
-- Insert the event
INSERT INTO "Event" (
"title",
"content",
"type",
"visible",
"startedAt",
"endedAt",
"serviceId"
) VALUES (
event_title,
event_content,
'UPDATE',
TRUE,
event_time,
event_time,
NEW."serviceId"
);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create trigger for verification step changes
DROP TRIGGER IF EXISTS verification_step_events_trigger ON "VerificationStep";
CREATE TRIGGER verification_step_events_trigger
AFTER INSERT OR UPDATE OF status ON "VerificationStep"
FOR EACH ROW
EXECUTE FUNCTION trigger_verification_step_events();

View File

@@ -0,0 +1,227 @@
-- Function & Trigger for Root Comment Insertions (Approved/Verified)
CREATE OR REPLACE FUNCTION notify_root_comment_inserted()
RETURNS TRIGGER AS $$
DECLARE
watcher_count INT;
BEGIN
RAISE NOTICE '[notify_root_comment_inserted] Trigger fired for comment ID: %', NEW.id;
WITH watchers AS (
SELECT np."userId", np."enableNotifyPendingRepliesOnWatch"
FROM "_onRootCommentCreatedForServices" rc
JOIN "NotificationPreferences" np ON rc."A" = np."id"
WHERE rc."B" = NEW."serviceId"
AND np."userId" <> NEW."authorId"
)
INSERT INTO "Notification" ("userId", "type", "aboutCommentId")
SELECT w."userId",
'ROOT_COMMENT_CREATED',
NEW."id"
FROM watchers w
WHERE (
NEW.status IN ('APPROVED', 'VERIFIED')
OR (NEW.status = 'PENDING' AND w."enableNotifyPendingRepliesOnWatch")
)
ON CONFLICT DO NOTHING;
RAISE NOTICE '[notify_root_comment_inserted] Inserted % notifications for comment ID: %', FOUND, NEW.id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_notify_root_comment_inserted ON "Comment";
CREATE TRIGGER trg_notify_root_comment_inserted
AFTER INSERT ON "Comment"
FOR EACH ROW
WHEN (NEW."parentId" IS NULL)
EXECUTE FUNCTION notify_root_comment_inserted();
-- Function & Trigger for Reply Comment Insertions
CREATE OR REPLACE FUNCTION notify_reply_comment_inserted()
RETURNS TRIGGER AS $$
BEGIN
WITH watchers AS (
SELECT np."userId", np."enableNotifyPendingRepliesOnWatch"
FROM "_watchedComments" w
JOIN "NotificationPreferences" np ON w."B" = np."id"
WHERE w."A" = NEW."parentId"
AND np."userId" <> NEW."authorId"
)
INSERT INTO "Notification" ("userId", "type", "aboutCommentId")
SELECT w."userId",
'REPLY_COMMENT_CREATED',
NEW."id"
FROM watchers w
WHERE (
NEW.status IN ('APPROVED', 'VERIFIED')
OR (NEW.status = 'PENDING' AND w."enableNotifyPendingRepliesOnWatch")
)
ON CONFLICT DO NOTHING;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_notify_reply_comment_inserted ON "Comment";
CREATE TRIGGER trg_notify_reply_comment_inserted
AFTER INSERT ON "Comment"
FOR EACH ROW
WHEN (NEW."parentId" IS NOT NULL)
EXECUTE FUNCTION notify_reply_comment_inserted();
-- Function & Trigger for Reply Approval/Verification
CREATE OR REPLACE FUNCTION notify_reply_approved()
RETURNS TRIGGER AS $$
BEGIN
WITH watchers AS (
SELECT np."userId"
FROM "_watchedComments" w
JOIN "NotificationPreferences" np ON w."B" = np."id"
WHERE w."A" = NEW."parentId"
AND np."userId" <> NEW."authorId"
)
INSERT INTO "Notification" ("userId", "type", "aboutCommentId")
SELECT w."userId",
'REPLY_COMMENT_CREATED',
NEW."id"
FROM watchers w
ON CONFLICT DO NOTHING;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_notify_reply_approved ON "Comment";
CREATE TRIGGER trg_notify_reply_approved
AFTER UPDATE OF status ON "Comment"
FOR EACH ROW
WHEN (NEW."parentId" IS NOT NULL AND NEW.status IN ('APPROVED', 'VERIFIED') AND OLD.status NOT IN ('APPROVED', 'VERIFIED'))
EXECUTE FUNCTION notify_reply_approved();
DROP TRIGGER IF EXISTS trg_notify_root_approved ON "Comment";
CREATE OR REPLACE FUNCTION notify_root_approved()
RETURNS TRIGGER AS $$
BEGIN
WITH watchers AS (
SELECT np."userId"
FROM "_onRootCommentCreatedForServices" rc
JOIN "NotificationPreferences" np ON rc."A" = np."id"
WHERE rc."B" = NEW."serviceId"
AND np."userId" <> NEW."authorId"
AND NOT np."enableNotifyPendingRepliesOnWatch"
)
INSERT INTO "Notification" ("userId", "type", "aboutCommentId")
SELECT w."userId",
'ROOT_COMMENT_CREATED',
NEW."id"
FROM watchers w
ON CONFLICT DO NOTHING;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_notify_root_approved
AFTER UPDATE OF status ON "Comment"
FOR EACH ROW
WHEN (NEW."parentId" IS NULL AND NEW.status IN ('APPROVED', 'VERIFIED') AND OLD.status NOT IN ('APPROVED', 'VERIFIED'))
EXECUTE FUNCTION notify_root_approved();
-- Function & Trigger for Comment Status Changes (Status, Suspicious, AdminReview)
CREATE OR REPLACE FUNCTION notify_comment_status_changed()
RETURNS TRIGGER AS $$
DECLARE
v_status_change "CommentStatusChange" := NULL;
BEGIN
-- Determine the status change type
IF NEW.status <> OLD.status THEN
IF NEW.status = 'APPROVED' THEN v_status_change := 'STATUS_CHANGED_TO_APPROVED';
ELSIF NEW.status = 'VERIFIED' THEN v_status_change := 'STATUS_CHANGED_TO_VERIFIED';
ELSIF NEW.status = 'REJECTED' THEN v_status_change := 'STATUS_CHANGED_TO_REJECTED';
ELSIF (NEW.status = 'PENDING' OR NEW.status = 'HUMAN_PENDING') AND (OLD.status <> 'PENDING' AND OLD.status <> 'HUMAN_PENDING') THEN v_status_change := 'STATUS_CHANGED_TO_PENDING';
END IF;
ELSIF NEW.suspicious <> OLD.suspicious THEN
IF NEW.suspicious = true THEN v_status_change := 'MARKED_AS_SPAM';
ELSE v_status_change := 'UNMARKED_AS_SPAM';
END IF;
ELSIF NEW."requiresAdminReview" <> OLD."requiresAdminReview" THEN
IF NEW."requiresAdminReview" = true THEN v_status_change := 'MARKED_FOR_ADMIN_REVIEW';
ELSE v_status_change := 'UNMARKED_FOR_ADMIN_REVIEW';
END IF;
END IF;
-- If a relevant status change occurred, notify watchers of THIS comment
IF v_status_change IS NOT NULL THEN
WITH watchers AS (
-- Get all watchers excluding author
SELECT np."userId"
FROM "_watchedComments" w
JOIN "NotificationPreferences" np ON w."B" = np."id"
WHERE w."A" = NEW."id"
AND np."userId" <> NEW."authorId"
AND np."enableOnMyCommentStatusChange"
UNION ALL
-- Add author if they have enabled notifications for their own comments
SELECT np."userId"
FROM "NotificationPreferences" np
WHERE np."userId" = NEW."authorId"
AND np."enableOnMyCommentStatusChange"
)
INSERT INTO "Notification" ("userId", "type", "aboutCommentId", "aboutCommentStatusChange")
SELECT w."userId",
'COMMENT_STATUS_CHANGE',
NEW."id",
v_status_change
FROM watchers w
ON CONFLICT DO NOTHING;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_notify_comment_status_changed ON "Comment";
CREATE TRIGGER trg_notify_comment_status_changed
AFTER UPDATE OF status, suspicious, "requiresAdminReview" ON "Comment"
FOR EACH ROW
WHEN (NEW.status <> OLD.status OR NEW.suspicious <> OLD.suspicious OR NEW."requiresAdminReview" <> OLD."requiresAdminReview")
EXECUTE FUNCTION notify_comment_status_changed();
-- Function & Trigger for Community Note Added
CREATE OR REPLACE FUNCTION notify_community_note_added()
RETURNS TRIGGER AS $$
BEGIN
-- Notify watchers of this specific comment (excluding author)
WITH watchers AS (
SELECT np."userId"
FROM "_watchedComments" w
JOIN "NotificationPreferences" np ON w."B" = np."id"
WHERE w."A" = NEW."id"
AND np."userId" <> NEW."authorId"
)
INSERT INTO "Notification" ("userId", "type", "aboutCommentId")
SELECT w."userId",
'COMMUNITY_NOTE_ADDED',
NEW."id"
FROM watchers w
ON CONFLICT DO NOTHING;
-- Always notify the author
INSERT INTO "Notification" ("userId", "type", "aboutCommentId")
VALUES (NEW."authorId", 'COMMUNITY_NOTE_ADDED', NEW."id")
ON CONFLICT DO NOTHING;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_notify_community_note_added ON "Comment";
CREATE TRIGGER trg_notify_community_note_added
AFTER UPDATE OF "communityNote" ON "Comment"
FOR EACH ROW
WHEN (NEW."communityNote" IS NOT NULL AND NEW."communityNote" <> '' AND (OLD."communityNote" IS NULL OR OLD."communityNote" = ''))
EXECUTE FUNCTION notify_community_note_added();
-- Remove the old monolithic trigger and function definition if they still exist
DROP TRIGGER IF EXISTS comment_notifications_trigger ON "Comment";
DROP FUNCTION IF EXISTS trigger_comment_notifications();

View File

@@ -0,0 +1,92 @@
CREATE OR REPLACE FUNCTION trigger_service_suggestion_notifications()
RETURNS TRIGGER AS $$
DECLARE
suggestion_status_change "ServiceSuggestionStatusChange";
BEGIN
IF TG_OP = 'INSERT' AND TG_TABLE_NAME = 'ServiceSuggestion' THEN -- Corresponds to ServiceSuggestion insert
-- Notify all admins when a new suggestion is created
INSERT INTO "Notification" ("userId", "type", "aboutServiceSuggestionId")
SELECT u."id", 'SUGGESTION_CREATED', NEW."id"
FROM "User" u
WHERE u."admin" = true
AND NOT EXISTS (
SELECT 1 FROM "Notification" n
WHERE n."userId" = u."id"
AND n."type" = 'SUGGESTION_CREATED'
AND n."aboutServiceSuggestionId" = NEW."id"
);
ELSIF TG_OP = 'INSERT' AND TG_TABLE_NAME = 'ServiceSuggestionMessage' THEN -- Corresponds to ServiceSuggestionMessage insert
-- Notify suggestion author (if not the sender)
INSERT INTO "Notification" ("userId", "type", "aboutServiceSuggestionId", "aboutServiceSuggestionMessageId")
SELECT s."userId", 'SUGGESTION_MESSAGE', NEW."suggestionId", NEW."id"
FROM "ServiceSuggestion" s
WHERE s."id" = NEW."suggestionId"
AND s."userId" <> NEW."userId"
AND NOT EXISTS (
SELECT 1 FROM "Notification" n
WHERE n."userId" = s."userId"
AND n."type" = 'SUGGESTION_MESSAGE'
AND n."aboutServiceSuggestionMessageId" = NEW."id"
);
-- Notify all admins (except the sender), but only if sender is not admin
INSERT INTO "Notification" ("userId", "type", "aboutServiceSuggestionId", "aboutServiceSuggestionMessageId")
SELECT u."id", 'SUGGESTION_MESSAGE', NEW."suggestionId", NEW."id"
FROM "User" u
WHERE u."admin" = true
AND u."id" <> NEW."userId"
-- Only notify admins if the message sender is not an admin
AND NOT EXISTS (SELECT 1 FROM "User" WHERE "id" = NEW."userId" AND "admin" = true)
AND NOT EXISTS (
SELECT 1 FROM "Notification" n
WHERE n."userId" = u."id"
AND n."type" = 'SUGGESTION_MESSAGE'
AND n."aboutServiceSuggestionMessageId" = NEW."id"
);
ELSIF TG_OP = 'UPDATE' THEN -- Corresponds to ServiceSuggestion status update
-- Notify suggestion author about status change
IF NEW.status <> OLD.status THEN
IF NEW.status = 'PENDING' THEN
suggestion_status_change := 'STATUS_CHANGED_TO_PENDING';
ELSIF NEW.status = 'APPROVED' THEN
suggestion_status_change := 'STATUS_CHANGED_TO_APPROVED';
ELSIF NEW.status = 'REJECTED' THEN
suggestion_status_change := 'STATUS_CHANGED_TO_REJECTED';
ELSIF NEW.status = 'WITHDRAWN' THEN
suggestion_status_change := 'STATUS_CHANGED_TO_WITHDRAWN';
END IF;
INSERT INTO "Notification" ("userId", "type", "aboutServiceSuggestionId", "aboutSuggestionStatusChange")
VALUES (NEW."userId", 'SUGGESTION_STATUS_CHANGE', NEW."id", suggestion_status_change);
END IF;
END IF;
-- Use RETURN NULL for AFTER triggers as the return value is ignored.
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Trigger for new suggestions
DROP TRIGGER IF EXISTS service_suggestion_created_notifications_trigger ON "ServiceSuggestion";
CREATE TRIGGER service_suggestion_created_notifications_trigger
AFTER INSERT ON "ServiceSuggestion"
FOR EACH ROW
EXECUTE FUNCTION trigger_service_suggestion_notifications();
-- Trigger for new messages
DROP TRIGGER IF EXISTS service_suggestion_message_notifications_trigger ON "ServiceSuggestionMessage";
CREATE TRIGGER service_suggestion_message_notifications_trigger
AFTER INSERT ON "ServiceSuggestionMessage"
FOR EACH ROW
EXECUTE FUNCTION trigger_service_suggestion_notifications();
-- Trigger for status updates
DROP TRIGGER IF EXISTS service_suggestion_status_notifications_trigger ON "ServiceSuggestion";
CREATE TRIGGER service_suggestion_status_notifications_trigger
AFTER UPDATE OF status ON "ServiceSuggestion"
FOR EACH ROW
-- Only run the function if the status actually changed
WHEN (OLD.status IS DISTINCT FROM NEW.status)
EXECUTE FUNCTION trigger_service_suggestion_notifications();

View File

@@ -0,0 +1,28 @@
CREATE OR REPLACE FUNCTION trigger_service_events_notifications()
RETURNS TRIGGER AS $$
BEGIN
-- Handle new Event insertions
IF TG_TABLE_NAME = 'Event' AND TG_OP = 'INSERT' THEN
INSERT INTO "Notification" ("userId", "type", "aboutServiceId", "aboutEventId")
SELECT np."userId", 'EVENT_CREATED', NEW."serviceId", NEW.id
FROM "_onEventCreatedForServices" oes
JOIN "NotificationPreferences" np ON oes."A" = np.id
WHERE oes."B" = NEW."serviceId"
AND NOT EXISTS (
SELECT 1 FROM "Notification" n
WHERE n."userId" = np."userId"
AND n."type" = 'EVENT_CREATED'
AND n."aboutEventId" = NEW.id
);
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Trigger for new Events
DROP TRIGGER IF EXISTS eVENT_CREATED_notifications_trigger ON "Event";
CREATE TRIGGER eVENT_CREATED_notifications_trigger
AFTER INSERT ON "Event"
FOR EACH ROW
EXECUTE FUNCTION trigger_service_events_notifications();

View File

@@ -0,0 +1,37 @@
CREATE OR REPLACE FUNCTION trigger_service_verification_status_change_notifications()
RETURNS TRIGGER AS $$
DECLARE
v_status_change "ServiceVerificationStatusChange";
BEGIN
-- Check if verificationStatus actually changed
IF OLD."verificationStatus" IS DISTINCT FROM NEW."verificationStatus" THEN
-- Determine the correct ServiceVerificationStatusChange enum value
SELECT CASE NEW."verificationStatus"
WHEN 'COMMUNITY_CONTRIBUTED' THEN 'STATUS_CHANGED_TO_COMMUNITY_CONTRIBUTED'
WHEN 'APPROVED' THEN 'STATUS_CHANGED_TO_APPROVED'
WHEN 'VERIFICATION_SUCCESS' THEN 'STATUS_CHANGED_TO_VERIFICATION_SUCCESS'
WHEN 'VERIFICATION_FAILED' THEN 'STATUS_CHANGED_TO_VERIFICATION_FAILED'
ELSE NULL
END
INTO v_status_change;
-- Only insert if we determined a valid status change enum
IF v_status_change IS NOT NULL THEN
INSERT INTO "Notification" ("userId", "type", "aboutServiceId", "aboutServiceVerificationStatusChange")
SELECT np."userId", 'SERVICE_VERIFICATION_STATUS_CHANGE', NEW.id, v_status_change
FROM "_onVerificationChangeForServices" oes
JOIN "NotificationPreferences" np ON oes."A" = np.id -- A -> NotificationPreferences.id
WHERE oes."B" = NEW.id; -- B -> Service.id
END IF;
END IF;
RETURN NULL; -- Return NULL for AFTER trigger
END;
$$ LANGUAGE plpgsql;
-- Trigger for Service verificationStatus updates
DROP TRIGGER IF EXISTS service_verification_status_change_notifications_trigger ON "Service";
CREATE TRIGGER service_verification_status_change_notifications_trigger
AFTER UPDATE ON "Service"
FOR EACH ROW
EXECUTE FUNCTION trigger_service_verification_status_change_notifications();

View File

@@ -0,0 +1,62 @@
CREATE OR REPLACE FUNCTION trigger_user_status_change_notifications()
RETURNS TRIGGER AS $$
DECLARE
status_change "AccountStatusChange";
BEGIN
-- Check for admin status change
IF OLD.admin IS DISTINCT FROM NEW.admin THEN
IF NEW.admin = true THEN
status_change := 'ADMIN_TRUE';
ELSE
status_change := 'ADMIN_FALSE';
END IF;
INSERT INTO "Notification" ("userId", "type", "aboutAccountStatusChange")
VALUES (NEW.id, 'ACCOUNT_STATUS_CHANGE', status_change);
END IF;
-- Check for verified status change
IF OLD.verified IS DISTINCT FROM NEW.verified THEN
IF NEW.verified = true THEN
status_change := 'VERIFIED_TRUE';
ELSE
status_change := 'VERIFIED_FALSE';
END IF;
INSERT INTO "Notification" ("userId", "type", "aboutAccountStatusChange")
VALUES (NEW.id, 'ACCOUNT_STATUS_CHANGE', status_change);
END IF;
-- Check for moderator status change
IF OLD.moderator IS DISTINCT FROM NEW.moderator THEN
IF NEW.moderator = true THEN
status_change := 'MODERATOR_TRUE';
ELSE
status_change := 'MODERATOR_FALSE';
END IF;
INSERT INTO "Notification" ("userId", "type", "aboutAccountStatusChange")
VALUES (NEW.id, 'ACCOUNT_STATUS_CHANGE', status_change);
END IF;
-- Check for spammer status change
IF OLD.spammer IS DISTINCT FROM NEW.spammer THEN
IF NEW.spammer = true THEN
status_change := 'SPAMMER_TRUE';
ELSE
status_change := 'SPAMMER_FALSE';
END IF;
INSERT INTO "Notification" ("userId", "type", "aboutAccountStatusChange")
VALUES (NEW.id, 'ACCOUNT_STATUS_CHANGE', status_change);
END IF;
-- Return NULL for AFTER triggers as the return value is ignored.
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Drop the trigger if it exists to ensure a clean setup
DROP TRIGGER IF EXISTS user_status_change_notifications_trigger ON "User";
-- Create the trigger to fire after updates on specific status columns
CREATE TRIGGER user_status_change_notifications_trigger
AFTER UPDATE OF admin, verified, moderator, spammer ON "User"
FOR EACH ROW
EXECUTE FUNCTION trigger_user_status_change_notifications();

View File

@@ -0,0 +1,29 @@
CREATE OR REPLACE FUNCTION trigger_karma_notifications()
RETURNS TRIGGER AS $$
BEGIN
-- Only create notification if the user has enabled karma notifications
-- and the karma change exceeds their threshold
INSERT INTO "Notification" ("userId", "type", "aboutKarmaTransactionId")
SELECT NEW."userId", 'KARMA_CHANGE', NEW.id
FROM "NotificationPreferences" np
WHERE np."userId" = NEW."userId"
AND ABS(NEW.points) >= COALESCE(np."karmaNotificationThreshold", 10)
AND NOT EXISTS (
SELECT 1 FROM "Notification" n
WHERE n."userId" = NEW."userId"
AND n."type" = 'KARMA_CHANGE'
AND n."aboutKarmaTransactionId" = NEW.id
);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Drop the trigger if it exists to ensure a clean setup
DROP TRIGGER IF EXISTS karma_notifications_trigger ON "KarmaTransaction";
-- Create the trigger to fire after inserts
CREATE TRIGGER karma_notifications_trigger
AFTER INSERT ON "KarmaTransaction"
FOR EACH ROW
EXECUTE FUNCTION trigger_karma_notifications();

Some files were not shown because too many files have changed in this diff Show More